From d21ca3e48095ebba72e2c40aff1437a49882a415 Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Fri, 29 Jan 2021 15:14:20 +0100 Subject: libqasan and use target cross compiler to compile target qemu libs --- qemu_mode/libqasan/dlmalloc.c | 6332 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 6332 insertions(+) create mode 100644 qemu_mode/libqasan/dlmalloc.c (limited to 'qemu_mode/libqasan/dlmalloc.c') diff --git a/qemu_mode/libqasan/dlmalloc.c b/qemu_mode/libqasan/dlmalloc.c new file mode 100644 index 00000000..0cdd4a90 --- /dev/null +++ b/qemu_mode/libqasan/dlmalloc.c @@ -0,0 +1,6332 @@ +/* + This is a version (aka dlmalloc) of malloc/free/realloc written by + Doug Lea and released to the public domain, as explained at + http://creativecommons.org/publicdomain/zero/1.0/ Send questions, + comments, complaints, performance data, etc to dl@cs.oswego.edu + +* Version 2.8.6 Wed Aug 29 06:57:58 2012 Doug Lea + Note: There may be an updated version of this malloc obtainable at + ftp://gee.cs.oswego.edu/pub/misc/malloc.c + Check before installing! + +* Quickstart + + This library is all in one file to simplify the most common usage: + ftp it, compile it (-O3), and link it into another program. All of + the compile-time options default to reasonable values for use on + most platforms. You might later want to step through various + compile-time and dynamic tuning options. + + For convenience, an include file for code using this malloc is at: + ftp://gee.cs.oswego.edu/pub/misc/malloc-2.8.6.h + You don't really need this .h file unless you call functions not + defined in your system include files. The .h file contains only the + excerpts from this file needed for using this malloc on ANSI C/C++ + systems, so long as you haven't changed compile-time options about + naming and tuning parameters. If you do, then you can create your + own malloc.h that does include all settings by cutting at the point + indicated below. Note that you may already by default be using a C + library containing a malloc that is based on some version of this + malloc (for example in linux). You might still want to use the one + in this file to customize settings or to avoid overheads associated + with library versions. + +* Vital statistics: + + Supported pointer/size_t representation: 4 or 8 bytes + size_t MUST be an unsigned type of the same width as + pointers. (If you are using an ancient system that declares + size_t as a signed type, or need it to be a different width + than pointers, you can use a previous release of this malloc + (e.g. 2.7.2) supporting these.) + + Alignment: 8 bytes (minimum) + This suffices for nearly all current machines and C compilers. + However, you can define MALLOC_ALIGNMENT to be wider than this + if necessary (up to 128bytes), at the expense of using more space. + + Minimum overhead per allocated chunk: 4 or 8 bytes (if 4byte sizes) + 8 or 16 bytes (if 8byte sizes) + Each malloced chunk has a hidden word of overhead holding size + and status information, and additional cross-check word + if FOOTERS is defined. + + Minimum allocated size: 4-byte ptrs: 16 bytes (including overhead) + 8-byte ptrs: 32 bytes (including overhead) + + Even a request for zero bytes (i.e., malloc(0)) returns a + pointer to something of the minimum allocatable size. + The maximum overhead wastage (i.e., number of extra bytes + allocated than were requested in malloc) is less than or equal + to the minimum size, except for requests >= mmap_threshold that + are serviced via mmap(), where the worst case wastage is about + 32 bytes plus the remainder from a system page (the minimal + mmap unit); typically 4096 or 8192 bytes. + + Security: static-safe; optionally more or less + The "security" of malloc refers to the ability of malicious + code to accentuate the effects of errors (for example, freeing + space that is not currently malloc'ed or overwriting past the + ends of chunks) in code that calls malloc. This malloc + guarantees not to modify any memory locations below the base of + heap, i.e., static variables, even in the presence of usage + errors. The routines additionally detect most improper frees + and reallocs. All this holds as long as the static bookkeeping + for malloc itself is not corrupted by some other means. This + is only one aspect of security -- these checks do not, and + cannot, detect all possible programming errors. + + If FOOTERS is defined nonzero, then each allocated chunk + carries an additional check word to verify that it was malloced + from its space. These check words are the same within each + execution of a program using malloc, but differ across + executions, so externally crafted fake chunks cannot be + freed. This improves security by rejecting frees/reallocs that + could corrupt heap memory, in addition to the checks preventing + writes to statics that are always on. This may further improve + security at the expense of time and space overhead. (Note that + FOOTERS may also be worth using with MSPACES.) + + By default detected errors cause the program to abort (calling + "abort()"). You can override this to instead proceed past + errors by defining PROCEED_ON_ERROR. In this case, a bad free + has no effect, and a malloc that encounters a bad address + caused by user overwrites will ignore the bad address by + dropping pointers and indices to all known memory. This may + be appropriate for programs that should continue if at all + possible in the face of programming errors, although they may + run out of memory because dropped memory is never reclaimed. + + If you don't like either of these options, you can define + CORRUPTION_ERROR_ACTION and USAGE_ERROR_ACTION to do anything + else. And if if you are sure that your program using malloc has + no errors or vulnerabilities, you can define INSECURE to 1, + which might (or might not) provide a small performance improvement. + + It is also possible to limit the maximum total allocatable + space, using malloc_set_footprint_limit. This is not + designed as a security feature in itself (calls to set limits + are not screened or privileged), but may be useful as one + aspect of a secure implementation. + + Thread-safety: NOT thread-safe unless USE_LOCKS defined non-zero + When USE_LOCKS is defined, each public call to malloc, free, + etc is surrounded with a lock. By default, this uses a plain + pthread mutex, win32 critical section, or a spin-lock if if + available for the platform and not disabled by setting + USE_SPIN_LOCKS=0. However, if USE_RECURSIVE_LOCKS is defined, + recursive versions are used instead (which are not required for + base functionality but may be needed in layered extensions). + Using a global lock is not especially fast, and can be a major + bottleneck. It is designed only to provide minimal protection + in concurrent environments, and to provide a basis for + extensions. If you are using malloc in a concurrent program, + consider instead using nedmalloc + (http://www.nedprod.com/programs/portable/nedmalloc/) or + ptmalloc (See http://www.malloc.de), which are derived from + versions of this malloc. + + System requirements: Any combination of MORECORE and/or MMAP/MUNMAP + This malloc can use unix sbrk or any emulation (invoked using + the CALL_MORECORE macro) and/or mmap/munmap or any emulation + (invoked using CALL_MMAP/CALL_MUNMAP) to get and release system + memory. On most unix systems, it tends to work best if both + MORECORE and MMAP are enabled. On Win32, it uses emulations + based on VirtualAlloc. It also uses common C library functions + like memset. + + Compliance: I believe it is compliant with the Single Unix Specification + (See http://www.unix.org). Also SVID/XPG, ANSI C, and probably + others as well. + +* Overview of algorithms + + This is not the fastest, most space-conserving, most portable, or + most tunable malloc ever written. However it is among the fastest + while also being among the most space-conserving, portable and + tunable. Consistent balance across these factors results in a good + general-purpose allocator for malloc-intensive programs. + + In most ways, this malloc is a best-fit allocator. Generally, it + chooses the best-fitting existing chunk for a request, with ties + broken in approximately least-recently-used order. (This strategy + normally maintains low fragmentation.) However, for requests less + than 256bytes, it deviates from best-fit when there is not an + exactly fitting available chunk by preferring to use space adjacent + to that used for the previous small request, as well as by breaking + ties in approximately most-recently-used order. (These enhance + locality of series of small allocations.) And for very large requests + (>= 256Kb by default), it relies on system memory mapping + facilities, if supported. (This helps avoid carrying around and + possibly fragmenting memory used only for large chunks.) + + All operations (except malloc_stats and mallinfo) have execution + times that are bounded by a constant factor of the number of bits in + a size_t, not counting any clearing in calloc or copying in realloc, + or actions surrounding MORECORE and MMAP that have times + proportional to the number of non-contiguous regions returned by + system allocation routines, which is often just 1. In real-time + applications, you can optionally suppress segment traversals using + NO_SEGMENT_TRAVERSAL, which assures bounded execution even when + system allocators return non-contiguous spaces, at the typical + expense of carrying around more memory and increased fragmentation. + + The implementation is not very modular and seriously overuses + macros. Perhaps someday all C compilers will do as good a job + inlining modular code as can now be done by brute-force expansion, + but now, enough of them seem not to. + + Some compilers issue a lot of warnings about code that is + dead/unreachable only on some platforms, and also about intentional + uses of negation on unsigned types. All known cases of each can be + ignored. + + For a longer but out of date high-level description, see + http://gee.cs.oswego.edu/dl/html/malloc.html + +* MSPACES + If MSPACES is defined, then in addition to malloc, free, etc., + this file also defines mspace_malloc, mspace_free, etc. These + are versions of malloc routines that take an "mspace" argument + obtained using create_mspace, to control all internal bookkeeping. + If ONLY_MSPACES is defined, only these versions are compiled. + So if you would like to use this allocator for only some allocations, + and your system malloc for others, you can compile with + ONLY_MSPACES and then do something like... + static mspace mymspace = create_mspace(0,0); // for example + #define mymalloc(bytes) mspace_malloc(mymspace, bytes) + + (Note: If you only need one instance of an mspace, you can instead + use "USE_DL_PREFIX" to relabel the global malloc.) + + You can similarly create thread-local allocators by storing + mspaces as thread-locals. For example: + static __thread mspace tlms = 0; + void* tlmalloc(size_t bytes) { + if (tlms == 0) tlms = create_mspace(0, 0); + return mspace_malloc(tlms, bytes); + } + void tlfree(void* mem) { mspace_free(tlms, mem); } + + Unless FOOTERS is defined, each mspace is completely independent. + You cannot allocate from one and free to another (although + conformance is only weakly checked, so usage errors are not always + caught). If FOOTERS is defined, then each chunk carries around a tag + indicating its originating mspace, and frees are directed to their + originating spaces. Normally, this requires use of locks. + + ------------------------- Compile-time options --------------------------- + +Be careful in setting #define values for numerical constants of type +size_t. On some systems, literal values are not automatically extended +to size_t precision unless they are explicitly casted. You can also +use the symbolic values MAX_SIZE_T, SIZE_T_ONE, etc below. + +WIN32 default: defined if _WIN32 defined + Defining WIN32 sets up defaults for MS environment and compilers. + Otherwise defaults are for unix. Beware that there seem to be some + cases where this malloc might not be a pure drop-in replacement for + Win32 malloc: Random-looking failures from Win32 GDI API's (eg; + SetDIBits()) may be due to bugs in some video driver implementations + when pixel buffers are malloc()ed, and the region spans more than + one VirtualAlloc()ed region. Because dlmalloc uses a small (64Kb) + default granularity, pixel buffers may straddle virtual allocation + regions more often than when using the Microsoft allocator. You can + avoid this by using VirtualAlloc() and VirtualFree() for all pixel + buffers rather than using malloc(). If this is not possible, + recompile this malloc with a larger DEFAULT_GRANULARITY. Note: + in cases where MSC and gcc (cygwin) are known to differ on WIN32, + conditions use _MSC_VER to distinguish them. + +DLMALLOC_EXPORT default: extern + Defines how public APIs are declared. If you want to export via a + Windows DLL, you might define this as + #define DLMALLOC_EXPORT extern __declspec(dllexport) + If you want a POSIX ELF shared object, you might use + #define DLMALLOC_EXPORT extern __attribute__((visibility("default"))) + +MALLOC_ALIGNMENT default: (size_t)(2 * sizeof(void *)) + Controls the minimum alignment for malloc'ed chunks. It must be a + power of two and at least 8, even on machines for which smaller + alignments would suffice. It may be defined as larger than this + though. Note however that code and data structures are optimized for + the case of 8-byte alignment. + +MSPACES default: 0 (false) + If true, compile in support for independent allocation spaces. + This is only supported if HAVE_MMAP is true. + +ONLY_MSPACES default: 0 (false) + If true, only compile in mspace versions, not regular versions. + +USE_LOCKS default: 0 (false) + Causes each call to each public routine to be surrounded with + pthread or WIN32 mutex lock/unlock. (If set true, this can be + overridden on a per-mspace basis for mspace versions.) If set to a + non-zero value other than 1, locks are used, but their + implementation is left out, so lock functions must be supplied manually, + as described below. + +USE_SPIN_LOCKS default: 1 iff USE_LOCKS and spin locks available + If true, uses custom spin locks for locking. This is currently + supported only gcc >= 4.1, older gccs on x86 platforms, and recent + MS compilers. Otherwise, posix locks or win32 critical sections are + used. + +USE_RECURSIVE_LOCKS default: not defined + If defined nonzero, uses recursive (aka reentrant) locks, otherwise + uses plain mutexes. This is not required for malloc proper, but may + be needed for layered allocators such as nedmalloc. + +LOCK_AT_FORK default: not defined + If defined nonzero, performs pthread_atfork upon initialization + to initialize child lock while holding parent lock. The implementation + assumes that pthread locks (not custom locks) are being used. In other + cases, you may need to customize the implementation. + +FOOTERS default: 0 + If true, provide extra checking and dispatching by placing + information in the footers of allocated chunks. This adds + space and time overhead. + +INSECURE default: 0 + If true, omit checks for usage errors and heap space overwrites. + +USE_DL_PREFIX default: NOT defined + Causes compiler to prefix all public routines with the string 'dl'. + This can be useful when you only want to use this malloc in one part + of a program, using your regular system malloc elsewhere. + +MALLOC_INSPECT_ALL default: NOT defined + If defined, compiles malloc_inspect_all and mspace_inspect_all, that + perform traversal of all heap space. Unless access to these + functions is otherwise restricted, you probably do not want to + include them in secure implementations. + +ABORT default: defined as abort() + Defines how to abort on failed checks. On most systems, a failed + check cannot die with an "assert" or even print an informative + message, because the underlying print routines in turn call malloc, + which will fail again. Generally, the best policy is to simply call + abort(). It's not very useful to do more than this because many + errors due to overwriting will show up as address faults (null, odd + addresses etc) rather than malloc-triggered checks, so will also + abort. Also, most compilers know that abort() does not return, so + can better optimize code conditionally calling it. + +PROCEED_ON_ERROR default: defined as 0 (false) + Controls whether detected bad addresses cause them to bypassed + rather than aborting. If set, detected bad arguments to free and + realloc are ignored. And all bookkeeping information is zeroed out + upon a detected overwrite of freed heap space, thus losing the + ability to ever return it from malloc again, but enabling the + application to proceed. If PROCEED_ON_ERROR is defined, the + static variable malloc_corruption_error_count is compiled in + and can be examined to see if errors have occurred. This option + generates slower code than the default abort policy. + +DEBUG default: NOT defined + The DEBUG setting is mainly intended for people trying to modify + this code or diagnose problems when porting to new platforms. + However, it may also be able to better isolate user errors than just + using runtime checks. The assertions in the check routines spell + out in more detail the assumptions and invariants underlying the + algorithms. The checking is fairly extensive, and will slow down + execution noticeably. Calling malloc_stats or mallinfo with DEBUG + set will attempt to check every non-mmapped allocated and free chunk + in the course of computing the summaries. + +ABORT_ON_ASSERT_FAILURE default: defined as 1 (true) + Debugging assertion failures can be nearly impossible if your + version of the assert macro causes malloc to be called, which will + lead to a cascade of further failures, blowing the runtime stack. + ABORT_ON_ASSERT_FAILURE cause assertions failures to call abort(), + which will usually make debugging easier. + +MALLOC_FAILURE_ACTION default: sets errno to ENOMEM, or no-op on win32 + The action to take before "return 0" when malloc fails to be able to + return memory because there is none available. + +HAVE_MORECORE default: 1 (true) unless win32 or ONLY_MSPACES + True if this system supports sbrk or an emulation of it. + +MORECORE default: sbrk + The name of the sbrk-style system routine to call to obtain more + memory. See below for guidance on writing custom MORECORE + functions. The type of the argument to sbrk/MORECORE varies across + systems. It cannot be size_t, because it supports negative + arguments, so it is normally the signed type of the same width as + size_t (sometimes declared as "intptr_t"). It doesn't much matter + though. Internally, we only call it with arguments less than half + the max value of a size_t, which should work across all reasonable + possibilities, although sometimes generating compiler warnings. + +MORECORE_CONTIGUOUS default: 1 (true) if HAVE_MORECORE + If true, take advantage of fact that consecutive calls to MORECORE + with positive arguments always return contiguous increasing + addresses. This is true of unix sbrk. It does not hurt too much to + set it true anyway, since malloc copes with non-contiguities. + Setting it false when definitely non-contiguous saves time + and possibly wasted space it would take to discover this though. + +MORECORE_CANNOT_TRIM default: NOT defined + True if MORECORE cannot release space back to the system when given + negative arguments. This is generally necessary only if you are + using a hand-crafted MORECORE function that cannot handle negative + arguments. + +NO_SEGMENT_TRAVERSAL default: 0 + If non-zero, suppresses traversals of memory segments + returned by either MORECORE or CALL_MMAP. This disables + merging of segments that are contiguous, and selectively + releasing them to the OS if unused, but bounds execution times. + +HAVE_MMAP default: 1 (true) + True if this system supports mmap or an emulation of it. If so, and + HAVE_MORECORE is not true, MMAP is used for all system + allocation. If set and HAVE_MORECORE is true as well, MMAP is + primarily used to directly allocate very large blocks. It is also + used as a backup strategy in cases where MORECORE fails to provide + space from system. Note: A single call to MUNMAP is assumed to be + able to unmap memory that may have be allocated using multiple calls + to MMAP, so long as they are adjacent. + +HAVE_MREMAP default: 1 on linux, else 0 + If true realloc() uses mremap() to re-allocate large blocks and + extend or shrink allocation spaces. + +MMAP_CLEARS default: 1 except on WINCE. + True if mmap clears memory so calloc doesn't need to. This is true + for standard unix mmap using /dev/zero and on WIN32 except for WINCE. + +USE_BUILTIN_FFS default: 0 (i.e., not used) + Causes malloc to use the builtin ffs() function to compute indices. + Some compilers may recognize and intrinsify ffs to be faster than the + supplied C version. Also, the case of x86 using gcc is special-cased + to an asm instruction, so is already as fast as it can be, and so + this setting has no effect. Similarly for Win32 under recent MS compilers. + (On most x86s, the asm version is only slightly faster than the C version.) + +malloc_getpagesize default: derive from system includes, or 4096. + The system page size. To the extent possible, this malloc manages + memory from the system in page-size units. This may be (and + usually is) a function rather than a constant. This is ignored + if WIN32, where page size is determined using getSystemInfo during + initialization. + +USE_DEV_RANDOM default: 0 (i.e., not used) + Causes malloc to use /dev/random to initialize secure magic seed for + stamping footers. Otherwise, the current time is used. + +NO_MALLINFO default: 0 + If defined, don't compile "mallinfo". This can be a simple way + of dealing with mismatches between system declarations and + those in this file. + +MALLINFO_FIELD_TYPE default: size_t + The type of the fields in the mallinfo struct. This was originally + defined as "int" in SVID etc, but is more usefully defined as + size_t. The value is used only if HAVE_USR_INCLUDE_MALLOC_H is not set + +NO_MALLOC_STATS default: 0 + If defined, don't compile "malloc_stats". This avoids calls to + fprintf and bringing in stdio dependencies you might not want. + +REALLOC_ZERO_BYTES_FREES default: not defined + This should be set if a call to realloc with zero bytes should + be the same as a call to free. Some people think it should. Otherwise, + since this malloc returns a unique pointer for malloc(0), so does + realloc(p, 0). + +LACKS_UNISTD_H, LACKS_FCNTL_H, LACKS_SYS_PARAM_H, LACKS_SYS_MMAN_H +LACKS_STRINGS_H, LACKS_STRING_H, LACKS_SYS_TYPES_H, LACKS_ERRNO_H +LACKS_STDLIB_H LACKS_SCHED_H LACKS_TIME_H default: NOT defined unless on WIN32 + Define these if your system does not have these header files. + You might need to manually insert some of the declarations they provide. + +DEFAULT_GRANULARITY default: page size if MORECORE_CONTIGUOUS, + system_info.dwAllocationGranularity in WIN32, + otherwise 64K. + Also settable using mallopt(M_GRANULARITY, x) + The unit for allocating and deallocating memory from the system. On + most systems with contiguous MORECORE, there is no reason to + make this more than a page. However, systems with MMAP tend to + either require or encourage larger granularities. You can increase + this value to prevent system allocation functions to be called so + often, especially if they are slow. The value must be at least one + page and must be a power of two. Setting to 0 causes initialization + to either page size or win32 region size. (Note: In previous + versions of malloc, the equivalent of this option was called + "TOP_PAD") + +DEFAULT_TRIM_THRESHOLD default: 2MB + Also settable using mallopt(M_TRIM_THRESHOLD, x) + The maximum amount of unused top-most memory to keep before + releasing via malloc_trim in free(). Automatic trimming is mainly + useful in long-lived programs using contiguous MORECORE. Because + trimming via sbrk can be slow on some systems, and can sometimes be + wasteful (in cases where programs immediately afterward allocate + more large chunks) the value should be high enough so that your + overall system performance would improve by releasing this much + memory. As a rough guide, you might set to a value close to the + average size of a process (program) running on your system. + Releasing this much memory would allow such a process to run in + memory. Generally, it is worth tuning trim thresholds when a + program undergoes phases where several large chunks are allocated + and released in ways that can reuse each other's storage, perhaps + mixed with phases where there are no such chunks at all. The trim + value must be greater than page size to have any useful effect. To + disable trimming completely, you can set to MAX_SIZE_T. Note that the trick + some people use of mallocing a huge space and then freeing it at + program startup, in an attempt to reserve system memory, doesn't + have the intended effect under automatic trimming, since that memory + will immediately be returned to the system. + +DEFAULT_MMAP_THRESHOLD default: 256K + Also settable using mallopt(M_MMAP_THRESHOLD, x) + The request size threshold for using MMAP to directly service a + request. Requests of at least this size that cannot be allocated + using already-existing space will be serviced via mmap. (If enough + normal freed space already exists it is used instead.) Using mmap + segregates relatively large chunks of memory so that they can be + individually obtained and released from the host system. A request + serviced through mmap is never reused by any other request (at least + not directly; the system may just so happen to remap successive + requests to the same locations). Segregating space in this way has + the benefits that: Mmapped space can always be individually released + back to the system, which helps keep the system level memory demands + of a long-lived program low. Also, mapped memory doesn't become + `locked' between other chunks, as can happen with normally allocated + chunks, which means that even trimming via malloc_trim would not + release them. However, it has the disadvantage that the space + cannot be reclaimed, consolidated, and then used to service later + requests, as happens with normal chunks. The advantages of mmap + nearly always outweigh disadvantages for "large" chunks, but the + value of "large" may vary across systems. The default is an + empirically derived value that works well in most systems. You can + disable mmap by setting to MAX_SIZE_T. + +MAX_RELEASE_CHECK_RATE default: 4095 unless not HAVE_MMAP + The number of consolidated frees between checks to release + unused segments when freeing. When using non-contiguous segments, + especially with multiple mspaces, checking only for topmost space + doesn't always suffice to trigger trimming. To compensate for this, + free() will, with a period of MAX_RELEASE_CHECK_RATE (or the + current number of segments, if greater) try to release unused + segments to the OS when freeing chunks that result in + consolidation. The best value for this parameter is a compromise + between slowing down frees with relatively costly checks that + rarely trigger versus holding on to unused memory. To effectively + disable, set to MAX_SIZE_T. This may lead to a very slight speed + improvement at the expense of carrying around more memory. +*/ + +#define USE_DL_PREFIX + +/* Version identifier to allow people to support multiple versions */ +#ifndef DLMALLOC_VERSION +#define DLMALLOC_VERSION 20806 +#endif /* DLMALLOC_VERSION */ + +#ifndef DLMALLOC_EXPORT +#define DLMALLOC_EXPORT extern +#endif + +#ifndef WIN32 +#ifdef _WIN32 +#define WIN32 1 +#endif /* _WIN32 */ +#ifdef _WIN32_WCE +#define LACKS_FCNTL_H +#define WIN32 1 +#endif /* _WIN32_WCE */ +#endif /* WIN32 */ +#ifdef WIN32 +#define WIN32_LEAN_AND_MEAN +#include +#include +#define HAVE_MMAP 1 +#define HAVE_MORECORE 0 +#define LACKS_UNISTD_H +#define LACKS_SYS_PARAM_H +#define LACKS_SYS_MMAN_H +#define LACKS_STRING_H +#define LACKS_STRINGS_H +#define LACKS_SYS_TYPES_H +#define LACKS_ERRNO_H +#define LACKS_SCHED_H +#ifndef MALLOC_FAILURE_ACTION +#define MALLOC_FAILURE_ACTION +#endif /* MALLOC_FAILURE_ACTION */ +#ifndef MMAP_CLEARS +#ifdef _WIN32_WCE /* WINCE reportedly does not clear */ +#define MMAP_CLEARS 0 +#else +#define MMAP_CLEARS 1 +#endif /* _WIN32_WCE */ +#endif /*MMAP_CLEARS */ +#endif /* WIN32 */ + +#if defined(DARWIN) || defined(_DARWIN) +/* Mac OSX docs advise not to use sbrk; it seems better to use mmap */ +#ifndef HAVE_MORECORE +#define HAVE_MORECORE 0 +#define HAVE_MMAP 1 +/* OSX allocators provide 16 byte alignment */ +#ifndef MALLOC_ALIGNMENT +#define MALLOC_ALIGNMENT ((size_t)16U) +#endif +#endif /* HAVE_MORECORE */ +#endif /* DARWIN */ + +#ifndef LACKS_SYS_TYPES_H +#include /* For size_t */ +#endif /* LACKS_SYS_TYPES_H */ + +/* The maximum possible size_t value has all bits set */ +#define MAX_SIZE_T (~(size_t)0) + +#ifndef USE_LOCKS /* ensure true if spin or recursive locks set */ +#define USE_LOCKS ((defined(USE_SPIN_LOCKS) && USE_SPIN_LOCKS != 0) || \ + (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0)) +#endif /* USE_LOCKS */ + +#if USE_LOCKS /* Spin locks for gcc >= 4.1, older gcc on x86, MSC >= 1310 */ +#if ((defined(__GNUC__) && \ + ((__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) || \ + defined(__i386__) || defined(__x86_64__))) || \ + (defined(_MSC_VER) && _MSC_VER>=1310)) +#ifndef USE_SPIN_LOCKS +#define USE_SPIN_LOCKS 1 +#endif /* USE_SPIN_LOCKS */ +#elif USE_SPIN_LOCKS +#error "USE_SPIN_LOCKS defined without implementation" +#endif /* ... locks available... */ +#elif !defined(USE_SPIN_LOCKS) +#define USE_SPIN_LOCKS 0 +#endif /* USE_LOCKS */ + +#ifndef ONLY_MSPACES +#define ONLY_MSPACES 0 +#endif /* ONLY_MSPACES */ +#ifndef MSPACES +#if ONLY_MSPACES +#define MSPACES 1 +#else /* ONLY_MSPACES */ +#define MSPACES 0 +#endif /* ONLY_MSPACES */ +#endif /* MSPACES */ +#ifndef MALLOC_ALIGNMENT +#define MALLOC_ALIGNMENT ((size_t)(2 * sizeof(void *))) +#endif /* MALLOC_ALIGNMENT */ +#ifndef FOOTERS +#define FOOTERS 0 +#endif /* FOOTERS */ +#ifndef ABORT +#define ABORT abort() +#endif /* ABORT */ +#ifndef ABORT_ON_ASSERT_FAILURE +#define ABORT_ON_ASSERT_FAILURE 1 +#endif /* ABORT_ON_ASSERT_FAILURE */ +#ifndef PROCEED_ON_ERROR +#define PROCEED_ON_ERROR 0 +#endif /* PROCEED_ON_ERROR */ + +#ifndef INSECURE +#define INSECURE 0 +#endif /* INSECURE */ +#ifndef MALLOC_INSPECT_ALL +#define MALLOC_INSPECT_ALL 0 +#endif /* MALLOC_INSPECT_ALL */ +#ifndef HAVE_MMAP +#define HAVE_MMAP 1 +#endif /* HAVE_MMAP */ +#ifndef MMAP_CLEARS +#define MMAP_CLEARS 1 +#endif /* MMAP_CLEARS */ +#ifndef HAVE_MREMAP +#ifdef linux +#define HAVE_MREMAP 1 +#define _GNU_SOURCE /* Turns on mremap() definition */ +#else /* linux */ +#define HAVE_MREMAP 0 +#endif /* linux */ +#endif /* HAVE_MREMAP */ +#ifndef MALLOC_FAILURE_ACTION +#define MALLOC_FAILURE_ACTION errno = ENOMEM; +#endif /* MALLOC_FAILURE_ACTION */ +#ifndef HAVE_MORECORE +#if ONLY_MSPACES +#define HAVE_MORECORE 0 +#else /* ONLY_MSPACES */ +#define HAVE_MORECORE 1 +#endif /* ONLY_MSPACES */ +#endif /* HAVE_MORECORE */ +#if !HAVE_MORECORE +#define MORECORE_CONTIGUOUS 0 +#else /* !HAVE_MORECORE */ +#define MORECORE_DEFAULT sbrk +#ifndef MORECORE_CONTIGUOUS +#define MORECORE_CONTIGUOUS 1 +#endif /* MORECORE_CONTIGUOUS */ +#endif /* HAVE_MORECORE */ +#ifndef DEFAULT_GRANULARITY +#if (MORECORE_CONTIGUOUS || defined(WIN32)) +#define DEFAULT_GRANULARITY (0) /* 0 means to compute in init_mparams */ +#else /* MORECORE_CONTIGUOUS */ +#define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U) +#endif /* MORECORE_CONTIGUOUS */ +#endif /* DEFAULT_GRANULARITY */ +#ifndef DEFAULT_TRIM_THRESHOLD +#ifndef MORECORE_CANNOT_TRIM +#define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U) +#else /* MORECORE_CANNOT_TRIM */ +#define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T +#endif /* MORECORE_CANNOT_TRIM */ +#endif /* DEFAULT_TRIM_THRESHOLD */ +#ifndef DEFAULT_MMAP_THRESHOLD +#if HAVE_MMAP +#define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U) +#else /* HAVE_MMAP */ +#define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T +#endif /* HAVE_MMAP */ +#endif /* DEFAULT_MMAP_THRESHOLD */ +#ifndef MAX_RELEASE_CHECK_RATE +#if HAVE_MMAP +#define MAX_RELEASE_CHECK_RATE 4095 +#else +#define MAX_RELEASE_CHECK_RATE MAX_SIZE_T +#endif /* HAVE_MMAP */ +#endif /* MAX_RELEASE_CHECK_RATE */ +#ifndef USE_BUILTIN_FFS +#define USE_BUILTIN_FFS 0 +#endif /* USE_BUILTIN_FFS */ +#ifndef USE_DEV_RANDOM +#define USE_DEV_RANDOM 0 +#endif /* USE_DEV_RANDOM */ +#ifndef NO_MALLINFO +#define NO_MALLINFO 0 +#endif /* NO_MALLINFO */ +#ifndef MALLINFO_FIELD_TYPE +#define MALLINFO_FIELD_TYPE size_t +#endif /* MALLINFO_FIELD_TYPE */ +#ifndef NO_MALLOC_STATS +#define NO_MALLOC_STATS 0 +#endif /* NO_MALLOC_STATS */ +#ifndef NO_SEGMENT_TRAVERSAL +#define NO_SEGMENT_TRAVERSAL 0 +#endif /* NO_SEGMENT_TRAVERSAL */ + +/* + mallopt tuning options. SVID/XPG defines four standard parameter + numbers for mallopt, normally defined in malloc.h. None of these + are used in this malloc, so setting them has no effect. But this + malloc does support the following options. +*/ + +#undef M_TRIM_THRESHOLD +#undef M_GRANULARITY +#undef M_MMAP_THRESHOLD +#define M_TRIM_THRESHOLD (-1) +#define M_GRANULARITY (-2) +#define M_MMAP_THRESHOLD (-3) + +/* ------------------------ Mallinfo declarations ------------------------ */ + +#if !NO_MALLINFO +/* + This version of malloc supports the standard SVID/XPG mallinfo + routine that returns a struct containing usage properties and + statistics. It should work on any system that has a + /usr/include/malloc.h defining struct mallinfo. The main + declaration needed is the mallinfo struct that is returned (by-copy) + by mallinfo(). The malloinfo struct contains a bunch of fields that + are not even meaningful in this version of malloc. These fields are + are instead filled by mallinfo() with other numbers that might be of + interest. + + HAVE_USR_INCLUDE_MALLOC_H should be set if you have a + /usr/include/malloc.h file that includes a declaration of struct + mallinfo. If so, it is included; else a compliant version is + declared below. These must be precisely the same for mallinfo() to + work. The original SVID version of this struct, defined on most + systems with mallinfo, declares all fields as ints. But some others + define as unsigned long. If your system defines the fields using a + type of different width than listed here, you MUST #include your + system version and #define HAVE_USR_INCLUDE_MALLOC_H. +*/ + +/* #define HAVE_USR_INCLUDE_MALLOC_H */ + +#ifdef HAVE_USR_INCLUDE_MALLOC_H +#include "/usr/include/malloc.h" +#else /* HAVE_USR_INCLUDE_MALLOC_H */ +#ifndef STRUCT_MALLINFO_DECLARED +/* HP-UX (and others?) redefines mallinfo unless _STRUCT_MALLINFO is defined */ +#define _STRUCT_MALLINFO +#define STRUCT_MALLINFO_DECLARED 1 +struct mallinfo { + MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system */ + MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */ + MALLINFO_FIELD_TYPE smblks; /* always 0 */ + MALLINFO_FIELD_TYPE hblks; /* always 0 */ + MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */ + MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */ + MALLINFO_FIELD_TYPE fsmblks; /* always 0 */ + MALLINFO_FIELD_TYPE uordblks; /* total allocated space */ + MALLINFO_FIELD_TYPE fordblks; /* total free space */ + MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */ +}; +#endif /* STRUCT_MALLINFO_DECLARED */ +#endif /* HAVE_USR_INCLUDE_MALLOC_H */ +#endif /* NO_MALLINFO */ + +/* + Try to persuade compilers to inline. The most critical functions for + inlining are defined as macros, so these aren't used for them. +*/ + +#ifndef FORCEINLINE + #if defined(__GNUC__) +#define FORCEINLINE __inline __attribute__ ((always_inline)) + #elif defined(_MSC_VER) + #define FORCEINLINE __forceinline + #endif +#endif +#ifndef NOINLINE + #if defined(__GNUC__) + #define NOINLINE __attribute__ ((noinline)) + #elif defined(_MSC_VER) + #define NOINLINE __declspec(noinline) + #else + #define NOINLINE + #endif +#endif + +#ifdef __cplusplus +extern "C" { +#ifndef FORCEINLINE + #define FORCEINLINE inline +#endif +#endif /* __cplusplus */ +#ifndef FORCEINLINE + #define FORCEINLINE +#endif + +#if !ONLY_MSPACES + +/* ------------------- Declarations of public routines ------------------- */ + +#ifndef USE_DL_PREFIX +#define dlcalloc calloc +#define dlfree free +#define dlmalloc malloc +#define dlmemalign memalign +#define dlposix_memalign posix_memalign +#define dlrealloc realloc +#define dlrealloc_in_place realloc_in_place +#define dlvalloc valloc +#define dlpvalloc pvalloc +#define dlmallinfo mallinfo +#define dlmallopt mallopt +#define dlmalloc_trim malloc_trim +#define dlmalloc_stats malloc_stats +#define dlmalloc_usable_size malloc_usable_size +#define dlmalloc_footprint malloc_footprint +#define dlmalloc_max_footprint malloc_max_footprint +#define dlmalloc_footprint_limit malloc_footprint_limit +#define dlmalloc_set_footprint_limit malloc_set_footprint_limit +#define dlmalloc_inspect_all malloc_inspect_all +#define dlindependent_calloc independent_calloc +#define dlindependent_comalloc independent_comalloc +#define dlbulk_free bulk_free +#endif /* USE_DL_PREFIX */ + +/* + malloc(size_t n) + Returns a pointer to a newly allocated chunk of at least n bytes, or + null if no space is available, in which case errno is set to ENOMEM + on ANSI C systems. + + If n is zero, malloc returns a minimum-sized chunk. (The minimum + size is 16 bytes on most 32bit systems, and 32 bytes on 64bit + systems.) Note that size_t is an unsigned type, so calls with + arguments that would be negative if signed are interpreted as + requests for huge amounts of space, which will often fail. The + maximum supported value of n differs across systems, but is in all + cases less than the maximum representable value of a size_t. +*/ +DLMALLOC_EXPORT void* dlmalloc(size_t); + +/* + free(void* p) + Releases the chunk of memory pointed to by p, that had been previously + allocated using malloc or a related routine such as realloc. + It has no effect if p is null. If p was not malloced or already + freed, free(p) will by default cause the current program to abort. +*/ +DLMALLOC_EXPORT void dlfree(void*); + +/* + calloc(size_t n_elements, size_t element_size); + Returns a pointer to n_elements * element_size bytes, with all locations + set to zero. +*/ +DLMALLOC_EXPORT void* dlcalloc(size_t, size_t); + +/* + realloc(void* p, size_t n) + Returns a pointer to a chunk of size n that contains the same data + as does chunk p up to the minimum of (n, p's size) bytes, or null + if no space is available. + + The returned pointer may or may not be the same as p. The algorithm + prefers extending p in most cases when possible, otherwise it + employs the equivalent of a malloc-copy-free sequence. + + If p is null, realloc is equivalent to malloc. + + If space is not available, realloc returns null, errno is set (if on + ANSI) and p is NOT freed. + + if n is for fewer bytes than already held by p, the newly unused + space is lopped off and freed if possible. realloc with a size + argument of zero (re)allocates a minimum-sized chunk. + + The old unix realloc convention of allowing the last-free'd chunk + to be used as an argument to realloc is not supported. +*/ +DLMALLOC_EXPORT void* dlrealloc(void*, size_t); + +/* + realloc_in_place(void* p, size_t n) + Resizes the space allocated for p to size n, only if this can be + done without moving p (i.e., only if there is adjacent space + available if n is greater than p's current allocated size, or n is + less than or equal to p's size). This may be used instead of plain + realloc if an alternative allocation strategy is needed upon failure + to expand space; for example, reallocation of a buffer that must be + memory-aligned or cleared. You can use realloc_in_place to trigger + these alternatives only when needed. + + Returns p if successful; otherwise null. +*/ +DLMALLOC_EXPORT void* dlrealloc_in_place(void*, size_t); + +/* + memalign(size_t alignment, size_t n); + Returns a pointer to a newly allocated chunk of n bytes, aligned + in accord with the alignment argument. + + The alignment argument should be a power of two. If the argument is + not a power of two, the nearest greater power is used. + 8-byte alignment is guaranteed by normal malloc calls, so don't + bother calling memalign with an argument of 8 or less. + + Overreliance on memalign is a sure way to fragment space. +*/ +DLMALLOC_EXPORT void* dlmemalign(size_t, size_t); + +/* + int posix_memalign(void** pp, size_t alignment, size_t n); + Allocates a chunk of n bytes, aligned in accord with the alignment + argument. Differs from memalign only in that it (1) assigns the + allocated memory to *pp rather than returning it, (2) fails and + returns EINVAL if the alignment is not a power of two (3) fails and + returns ENOMEM if memory cannot be allocated. +*/ +DLMALLOC_EXPORT int dlposix_memalign(void**, size_t, size_t); + +/* + valloc(size_t n); + Equivalent to memalign(pagesize, n), where pagesize is the page + size of the system. If the pagesize is unknown, 4096 is used. +*/ +DLMALLOC_EXPORT void* dlvalloc(size_t); + +/* + mallopt(int parameter_number, int parameter_value) + Sets tunable parameters The format is to provide a + (parameter-number, parameter-value) pair. mallopt then sets the + corresponding parameter to the argument value if it can (i.e., so + long as the value is meaningful), and returns 1 if successful else + 0. To workaround the fact that mallopt is specified to use int, + not size_t parameters, the value -1 is specially treated as the + maximum unsigned size_t value. + + SVID/XPG/ANSI defines four standard param numbers for mallopt, + normally defined in malloc.h. None of these are use in this malloc, + so setting them has no effect. But this malloc also supports other + options in mallopt. See below for details. Briefly, supported + parameters are as follows (listed defaults are for "typical" + configurations). + + Symbol param # default allowed param values + M_TRIM_THRESHOLD -1 2*1024*1024 any (-1 disables) + M_GRANULARITY -2 page size any power of 2 >= page size + M_MMAP_THRESHOLD -3 256*1024 any (or 0 if no MMAP support) +*/ +DLMALLOC_EXPORT int dlmallopt(int, int); + +/* + malloc_footprint(); + Returns the number of bytes obtained from the system. The total + number of bytes allocated by malloc, realloc etc., is less than this + value. Unlike mallinfo, this function returns only a precomputed + result, so can be called frequently to monitor memory consumption. + Even if locks are otherwise defined, this function does not use them, + so results might not be up to date. +*/ +DLMALLOC_EXPORT size_t dlmalloc_footprint(void); + +/* + malloc_max_footprint(); + Returns the maximum number of bytes obtained from the system. This + value will be greater than current footprint if deallocated space + has been reclaimed by the system. The peak number of bytes allocated + by malloc, realloc etc., is less than this value. Unlike mallinfo, + this function returns only a precomputed result, so can be called + frequently to monitor memory consumption. Even if locks are + otherwise defined, this function does not use them, so results might + not be up to date. +*/ +DLMALLOC_EXPORT size_t dlmalloc_max_footprint(void); + +/* + malloc_footprint_limit(); + Returns the number of bytes that the heap is allowed to obtain from + the system, returning the last value returned by + malloc_set_footprint_limit, or the maximum size_t value if + never set. The returned value reflects a permission. There is no + guarantee that this number of bytes can actually be obtained from + the system. +*/ +DLMALLOC_EXPORT size_t dlmalloc_footprint_limit(); + +/* + malloc_set_footprint_limit(); + Sets the maximum number of bytes to obtain from the system, causing + failure returns from malloc and related functions upon attempts to + exceed this value. The argument value may be subject to page + rounding to an enforceable limit; this actual value is returned. + Using an argument of the maximum possible size_t effectively + disables checks. If the argument is less than or equal to the + current malloc_footprint, then all future allocations that require + additional system memory will fail. However, invocation cannot + retroactively deallocate existing used memory. +*/ +DLMALLOC_EXPORT size_t dlmalloc_set_footprint_limit(size_t bytes); + +#if MALLOC_INSPECT_ALL +/* + malloc_inspect_all(void(*handler)(void *start, + void *end, + size_t used_bytes, + void* callback_arg), + void* arg); + Traverses the heap and calls the given handler for each managed + region, skipping all bytes that are (or may be) used for bookkeeping + purposes. Traversal does not include include chunks that have been + directly memory mapped. Each reported region begins at the start + address, and continues up to but not including the end address. The + first used_bytes of the region contain allocated data. If + used_bytes is zero, the region is unallocated. The handler is + invoked with the given callback argument. If locks are defined, they + are held during the entire traversal. It is a bad idea to invoke + other malloc functions from within the handler. + + For example, to count the number of in-use chunks with size greater + than 1000, you could write: + static int count = 0; + void count_chunks(void* start, void* end, size_t used, void* arg) { + if (used >= 1000) ++count; + } + then: + malloc_inspect_all(count_chunks, NULL); + + malloc_inspect_all is compiled only if MALLOC_INSPECT_ALL is defined. +*/ +DLMALLOC_EXPORT void dlmalloc_inspect_all(void(*handler)(void*, void *, size_t, void*), + void* arg); + +#endif /* MALLOC_INSPECT_ALL */ + +#if !NO_MALLINFO +/* + mallinfo() + Returns (by copy) a struct containing various summary statistics: + + arena: current total non-mmapped bytes allocated from system + ordblks: the number of free chunks + smblks: always zero. + hblks: current number of mmapped regions + hblkhd: total bytes held in mmapped regions + usmblks: the maximum total allocated space. This will be greater + than current total if trimming has occurred. + fsmblks: always zero + uordblks: current total allocated space (normal or mmapped) + fordblks: total free space + keepcost: the maximum number of bytes that could ideally be released + back to system via malloc_trim. ("ideally" means that + it ignores page restrictions etc.) + + Because these fields are ints, but internal bookkeeping may + be kept as longs, the reported values may wrap around zero and + thus be inaccurate. +*/ +DLMALLOC_EXPORT struct mallinfo dlmallinfo(void); +#endif /* NO_MALLINFO */ + +/* + independent_calloc(size_t n_elements, size_t element_size, void* chunks[]); + + independent_calloc is similar to calloc, but instead of returning a + single cleared space, it returns an array of pointers to n_elements + independent elements that can hold contents of size elem_size, each + of which starts out cleared, and can be independently freed, + realloc'ed etc. The elements are guaranteed to be adjacently + allocated (this is not guaranteed to occur with multiple callocs or + mallocs), which may also improve cache locality in some + applications. + + The "chunks" argument is optional (i.e., may be null, which is + probably the most typical usage). If it is null, the returned array + is itself dynamically allocated and should also be freed when it is + no longer needed. Otherwise, the chunks array must be of at least + n_elements in length. It is filled in with the pointers to the + chunks. + + In either case, independent_calloc returns this pointer array, or + null if the allocation failed. If n_elements is zero and "chunks" + is null, it returns a chunk representing an array with zero elements + (which should be freed if not wanted). + + Each element must be freed when it is no longer needed. This can be + done all at once using bulk_free. + + independent_calloc simplifies and speeds up implementations of many + kinds of pools. It may also be useful when constructing large data + structures that initially have a fixed number of fixed-sized nodes, + but the number is not known at compile time, and some of the nodes + may later need to be freed. For example: + + struct Node { int item; struct Node* next; }; + + struct Node* build_list() { + struct Node** pool; + int n = read_number_of_nodes_needed(); + if (n <= 0) return 0; + pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0); + if (pool == 0) die(); + // organize into a linked list... + struct Node* first = pool[0]; + for (i = 0; i < n-1; ++i) + pool[i]->next = pool[i+1]; + free(pool); // Can now free the array (or not, if it is needed later) + return first; + } +*/ +DLMALLOC_EXPORT void** dlindependent_calloc(size_t, size_t, void**); + +/* + independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]); + + independent_comalloc allocates, all at once, a set of n_elements + chunks with sizes indicated in the "sizes" array. It returns + an array of pointers to these elements, each of which can be + independently freed, realloc'ed etc. The elements are guaranteed to + be adjacently allocated (this is not guaranteed to occur with + multiple callocs or mallocs), which may also improve cache locality + in some applications. + + The "chunks" argument is optional (i.e., may be null). If it is null + the returned array is itself dynamically allocated and should also + be freed when it is no longer needed. Otherwise, the chunks array + must be of at least n_elements in length. It is filled in with the + pointers to the chunks. + + In either case, independent_comalloc returns this pointer array, or + null if the allocation failed. If n_elements is zero and chunks is + null, it returns a chunk representing an array with zero elements + (which should be freed if not wanted). + + Each element must be freed when it is no longer needed. This can be + done all at once using bulk_free. + + independent_comallac differs from independent_calloc in that each + element may have a different size, and also that it does not + automatically clear elements. + + independent_comalloc can be used to speed up allocation in cases + where several structs or objects must always be allocated at the + same time. For example: + + struct Head { ... } + struct Foot { ... } + + void send_message(char* msg) { + int msglen = strlen(msg); + size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) }; + void* chunks[3]; + if (independent_comalloc(3, sizes, chunks) == 0) + die(); + struct Head* head = (struct Head*)(chunks[0]); + char* body = (char*)(chunks[1]); + struct Foot* foot = (struct Foot*)(chunks[2]); + // ... + } + + In general though, independent_comalloc is worth using only for + larger values of n_elements. For small values, you probably won't + detect enough difference from series of malloc calls to bother. + + Overuse of independent_comalloc can increase overall memory usage, + since it cannot reuse existing noncontiguous small chunks that + might be available for some of the elements. +*/ +DLMALLOC_EXPORT void** dlindependent_comalloc(size_t, size_t*, void**); + +/* + bulk_free(void* array[], size_t n_elements) + Frees and clears (sets to null) each non-null pointer in the given + array. This is likely to be faster than freeing them one-by-one. + If footers are used, pointers that have been allocated in different + mspaces are not freed or cleared, and the count of all such pointers + is returned. For large arrays of pointers with poor locality, it + may be worthwhile to sort this array before calling bulk_free. +*/ +DLMALLOC_EXPORT size_t dlbulk_free(void**, size_t n_elements); + +/* + pvalloc(size_t n); + Equivalent to valloc(minimum-page-that-holds(n)), that is, + round up n to nearest pagesize. + */ +DLMALLOC_EXPORT void* dlpvalloc(size_t); + +/* + malloc_trim(size_t pad); + + If possible, gives memory back to the system (via negative arguments + to sbrk) if there is unused memory at the `high' end of the malloc + pool or in unused MMAP segments. You can call this after freeing + large blocks of memory to potentially reduce the system-level memory + requirements of a program. However, it cannot guarantee to reduce + memory. Under some allocation patterns, some large free blocks of + memory will be locked between two used chunks, so they cannot be + given back to the system. + + The `pad' argument to malloc_trim represents the amount of free + trailing space to leave untrimmed. If this argument is zero, only + the minimum amount of memory to maintain internal data structures + will be left. Non-zero arguments can be supplied to maintain enough + trailing space to service future expected allocations without having + to re-obtain memory from the system. + + Malloc_trim returns 1 if it actually released any memory, else 0. +*/ +DLMALLOC_EXPORT int dlmalloc_trim(size_t); + +/* + malloc_stats(); + Prints on stderr the amount of space obtained from the system (both + via sbrk and mmap), the maximum amount (which may be more than + current if malloc_trim and/or munmap got called), and the current + number of bytes allocated via malloc (or realloc, etc) but not yet + freed. Note that this is the number of bytes allocated, not the + number requested. It will be larger than the number requested + because of alignment and bookkeeping overhead. Because it includes + alignment wastage as being in use, this figure may be greater than + zero even when no user-level chunks are allocated. + + The reported current and maximum system memory can be inaccurate if + a program makes other calls to system memory allocation functions + (normally sbrk) outside of malloc. + + malloc_stats prints only the most commonly interesting statistics. + More information can be obtained by calling mallinfo. +*/ +DLMALLOC_EXPORT void dlmalloc_stats(void); + +/* + malloc_usable_size(void* p); + + Returns the number of bytes you can actually use in + an allocated chunk, which may be more than you requested (although + often not) due to alignment and minimum size constraints. + You can use this many bytes without worrying about + overwriting other allocated objects. This is not a particularly great + programming practice. malloc_usable_size can be more useful in + debugging and assertions, for example: + + p = malloc(n); + assert(malloc_usable_size(p) >= 256); +*/ +size_t dlmalloc_usable_size(void*); + +#endif /* ONLY_MSPACES */ + +#if MSPACES + +/* + mspace is an opaque type representing an independent + region of space that supports mspace_malloc, etc. +*/ +typedef void* mspace; + +/* + create_mspace creates and returns a new independent space with the + given initial capacity, or, if 0, the default granularity size. It + returns null if there is no system memory available to create the + space. If argument locked is non-zero, the space uses a separate + lock to control access. The capacity of the space will grow + dynamically as needed to service mspace_malloc requests. You can + control the sizes of incremental increases of this space by + compiling with a different DEFAULT_GRANULARITY or dynamically + setting with mallopt(M_GRANULARITY, value). +*/ +DLMALLOC_EXPORT mspace create_mspace(size_t capacity, int locked); + +/* + destroy_mspace destroys the given space, and attempts to return all + of its memory back to the system, returning the total number of + bytes freed. After destruction, the results of access to all memory + used by the space become undefined. +*/ +DLMALLOC_EXPORT size_t destroy_mspace(mspace msp); + +/* + create_mspace_with_base uses the memory supplied as the initial base + of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this + space is used for bookkeeping, so the capacity must be at least this + large. (Otherwise 0 is returned.) When this initial space is + exhausted, additional memory will be obtained from the system. + Destroying this space will deallocate all additionally allocated + space (if possible) but not the initial base. +*/ +DLMALLOC_EXPORT mspace create_mspace_with_base(void* base, size_t capacity, int locked); + +/* + mspace_track_large_chunks controls whether requests for large chunks + are allocated in their own untracked mmapped regions, separate from + others in this mspace. By default large chunks are not tracked, + which reduces fragmentation. However, such chunks are not + necessarily released to the system upon destroy_mspace. Enabling + tracking by setting to true may increase fragmentation, but avoids + leakage when relying on destroy_mspace to release all memory + allocated using this space. The function returns the previous + setting. +*/ +DLMALLOC_EXPORT int mspace_track_large_chunks(mspace msp, int enable); + + +/* + mspace_malloc behaves as malloc, but operates within + the given space. +*/ +DLMALLOC_EXPORT void* mspace_malloc(mspace msp, size_t bytes); + +/* + mspace_free behaves as free, but operates within + the given space. + + If compiled with FOOTERS==1, mspace_free is not actually needed. + free may be called instead of mspace_free because freed chunks from + any space are handled by their originating spaces. +*/ +DLMALLOC_EXPORT void mspace_free(mspace msp, void* mem); + +/* + mspace_realloc behaves as realloc, but operates within + the given space. + + If compiled with FOOTERS==1, mspace_realloc is not actually + needed. realloc may be called instead of mspace_realloc because + realloced chunks from any space are handled by their originating + spaces. +*/ +DLMALLOC_EXPORT void* mspace_realloc(mspace msp, void* mem, size_t newsize); + +/* + mspace_calloc behaves as calloc, but operates within + the given space. +*/ +DLMALLOC_EXPORT void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size); + +/* + mspace_memalign behaves as memalign, but operates within + the given space. +*/ +DLMALLOC_EXPORT void* mspace_memalign(mspace msp, size_t alignment, size_t bytes); + +/* + mspace_independent_calloc behaves as independent_calloc, but + operates within the given space. +*/ +DLMALLOC_EXPORT void** mspace_independent_calloc(mspace msp, size_t n_elements, + size_t elem_size, void* chunks[]); + +/* + mspace_independent_comalloc behaves as independent_comalloc, but + operates within the given space. +*/ +DLMALLOC_EXPORT void** mspace_independent_comalloc(mspace msp, size_t n_elements, + size_t sizes[], void* chunks[]); + +/* + mspace_footprint() returns the number of bytes obtained from the + system for this space. +*/ +DLMALLOC_EXPORT size_t mspace_footprint(mspace msp); + +/* + mspace_max_footprint() returns the peak number of bytes obtained from the + system for this space. +*/ +DLMALLOC_EXPORT size_t mspace_max_footprint(mspace msp); + + +#if !NO_MALLINFO +/* + mspace_mallinfo behaves as mallinfo, but reports properties of + the given space. +*/ +DLMALLOC_EXPORT struct mallinfo mspace_mallinfo(mspace msp); +#endif /* NO_MALLINFO */ + +/* + malloc_usable_size(void* p) behaves the same as malloc_usable_size; +*/ +DLMALLOC_EXPORT size_t mspace_usable_size(const void* mem); + +/* + mspace_malloc_stats behaves as malloc_stats, but reports + properties of the given space. +*/ +DLMALLOC_EXPORT void mspace_malloc_stats(mspace msp); + +/* + mspace_trim behaves as malloc_trim, but + operates within the given space. +*/ +DLMALLOC_EXPORT int mspace_trim(mspace msp, size_t pad); + +/* + An alias for mallopt. +*/ +DLMALLOC_EXPORT int mspace_mallopt(int, int); + +#endif /* MSPACES */ + +#ifdef __cplusplus +} /* end of extern "C" */ +#endif /* __cplusplus */ + +/* + ======================================================================== + To make a fully customizable malloc.h header file, cut everything + above this line, put into file malloc.h, edit to suit, and #include it + on the next line, as well as in programs that use this malloc. + ======================================================================== +*/ + +/* #include "malloc.h" */ + +/*------------------------------ internal #includes ---------------------- */ + +#ifdef _MSC_VER +#pragma warning( disable : 4146 ) /* no "unsigned" warnings */ +#endif /* _MSC_VER */ +#if !NO_MALLOC_STATS +#include /* for printing in malloc_stats */ +#endif /* NO_MALLOC_STATS */ +#ifndef LACKS_ERRNO_H +#include /* for MALLOC_FAILURE_ACTION */ +#endif /* LACKS_ERRNO_H */ +#ifdef DEBUG +#if ABORT_ON_ASSERT_FAILURE +#undef assert +#define assert(x) if(!(x)) ABORT +#else /* ABORT_ON_ASSERT_FAILURE */ +#include +#endif /* ABORT_ON_ASSERT_FAILURE */ +#else /* DEBUG */ +#ifndef assert +#define assert(x) +#endif +#define DEBUG 0 +#endif /* DEBUG */ +#if !defined(WIN32) && !defined(LACKS_TIME_H) +#include /* for magic initialization */ +#endif /* WIN32 */ +#ifndef LACKS_STDLIB_H +#include /* for abort() */ +#endif /* LACKS_STDLIB_H */ +#ifndef LACKS_STRING_H +#include /* for memset etc */ +#endif /* LACKS_STRING_H */ +#if USE_BUILTIN_FFS +#ifndef LACKS_STRINGS_H +#include /* for ffs */ +#endif /* LACKS_STRINGS_H */ +#endif /* USE_BUILTIN_FFS */ +#if HAVE_MMAP +#ifndef LACKS_SYS_MMAN_H +/* On some versions of linux, mremap decl in mman.h needs __USE_GNU set */ +#if (defined(linux) && !defined(__USE_GNU)) +#define __USE_GNU 1 +#include /* for mmap */ +#undef __USE_GNU +#else +#include /* for mmap */ +#endif /* linux */ +#endif /* LACKS_SYS_MMAN_H */ +#ifndef LACKS_FCNTL_H +#include +#endif /* LACKS_FCNTL_H */ +#endif /* HAVE_MMAP */ +#ifndef LACKS_UNISTD_H +#include /* for sbrk, sysconf */ +#else /* LACKS_UNISTD_H */ +#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__) +extern void* sbrk(ptrdiff_t); +#endif /* FreeBSD etc */ +#endif /* LACKS_UNISTD_H */ + +/* Declarations for locking */ +#if USE_LOCKS +#ifndef WIN32 +#if defined (__SVR4) && defined (__sun) /* solaris */ +#include +#elif !defined(LACKS_SCHED_H) +#include +#endif /* solaris or LACKS_SCHED_H */ +#if (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0) || !USE_SPIN_LOCKS +#include +#endif /* USE_RECURSIVE_LOCKS ... */ +#elif defined(_MSC_VER) +#ifndef _M_AMD64 +/* These are already defined on AMD64 builds */ +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ +LONG __cdecl _InterlockedCompareExchange(LONG volatile *Dest, LONG Exchange, LONG Comp); +LONG __cdecl _InterlockedExchange(LONG volatile *Target, LONG Value); +#ifdef __cplusplus +} +#endif /* __cplusplus */ +#endif /* _M_AMD64 */ +#pragma intrinsic (_InterlockedCompareExchange) +#pragma intrinsic (_InterlockedExchange) +#define interlockedcompareexchange _InterlockedCompareExchange +#define interlockedexchange _InterlockedExchange +#elif defined(WIN32) && defined(__GNUC__) +#define interlockedcompareexchange(a, b, c) __sync_val_compare_and_swap(a, c, b) +#define interlockedexchange __sync_lock_test_and_set +#endif /* Win32 */ +#else /* USE_LOCKS */ +#endif /* USE_LOCKS */ + +#ifndef LOCK_AT_FORK +#define LOCK_AT_FORK 0 +#endif + +/* Declarations for bit scanning on win32 */ +#if defined(_MSC_VER) && _MSC_VER>=1300 +#ifndef BitScanForward /* Try to avoid pulling in WinNT.h */ +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ +unsigned char _BitScanForward(unsigned long *index, unsigned long mask); +unsigned char _BitScanReverse(unsigned long *index, unsigned long mask); +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#define BitScanForward _BitScanForward +#define BitScanReverse _BitScanReverse +#pragma intrinsic(_BitScanForward) +#pragma intrinsic(_BitScanReverse) +#endif /* BitScanForward */ +#endif /* defined(_MSC_VER) && _MSC_VER>=1300 */ + +#ifndef WIN32 +#ifndef malloc_getpagesize +# ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */ +# ifndef _SC_PAGE_SIZE +# define _SC_PAGE_SIZE _SC_PAGESIZE +# endif +# endif +# ifdef _SC_PAGE_SIZE +# define malloc_getpagesize sysconf(_SC_PAGE_SIZE) +# else +# if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE) + extern size_t getpagesize(); +# define malloc_getpagesize getpagesize() +# else +# ifdef WIN32 /* use supplied emulation of getpagesize */ +# define malloc_getpagesize getpagesize() +# else +# ifndef LACKS_SYS_PARAM_H +# include +# endif +# ifdef EXEC_PAGESIZE +# define malloc_getpagesize EXEC_PAGESIZE +# else +# ifdef NBPG +# ifndef CLSIZE +# define malloc_getpagesize NBPG +# else +# define malloc_getpagesize (NBPG * CLSIZE) +# endif +# else +# ifdef NBPC +# define malloc_getpagesize NBPC +# else +# ifdef PAGESIZE +# define malloc_getpagesize PAGESIZE +# else /* just guess */ +# define malloc_getpagesize ((size_t)4096U) +# endif +# endif +# endif +# endif +# endif +# endif +# endif +#endif +#endif + +/* ------------------- size_t and alignment properties -------------------- */ + +/* The byte and bit size of a size_t */ +#define SIZE_T_SIZE (sizeof(size_t)) +#define SIZE_T_BITSIZE (sizeof(size_t) << 3) + +/* Some constants coerced to size_t */ +/* Annoying but necessary to avoid errors on some platforms */ +#define SIZE_T_ZERO ((size_t)0) +#define SIZE_T_ONE ((size_t)1) +#define SIZE_T_TWO ((size_t)2) +#define SIZE_T_FOUR ((size_t)4) +#define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1) +#define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2) +#define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES) +#define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U) + +/* The bit mask value corresponding to MALLOC_ALIGNMENT */ +#define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE) + +/* True if address a has acceptable alignment */ +#define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0) + +/* the number of bytes to offset an address to align it */ +#define align_offset(A)\ + ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\ + ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK)) + +/* -------------------------- MMAP preliminaries ------------------------- */ + +/* + If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and + checks to fail so compiler optimizer can delete code rather than + using so many "#if"s. +*/ + + +/* MORECORE and MMAP must return MFAIL on failure */ +#define MFAIL ((void*)(MAX_SIZE_T)) +#define CMFAIL ((char*)(MFAIL)) /* defined for convenience */ + +#if HAVE_MMAP + +#ifndef WIN32 +#define MMAP_PROT (PROT_READ|PROT_WRITE) +#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON) +#define MAP_ANONYMOUS MAP_ANON +#endif /* MAP_ANON */ +#ifdef MAP_ANONYMOUS + +#define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS) + +static FORCEINLINE void* unixmmap(size_t size) { + void* result; + + result = mmap(0, size, MMAP_PROT, MMAP_FLAGS, -1, 0); + if (result == MFAIL) + return MFAIL; + + return result; +} + +static FORCEINLINE int unixmunmap(void* ptr, size_t size) { + int result; + + result = munmap(ptr, size); + if (result != 0) + return result; + + return result; +} + +#define MMAP_DEFAULT(s) unixmmap(s) +#define MUNMAP_DEFAULT(a, s) unixmunmap((a), (s)) + +#else /* MAP_ANONYMOUS */ +/* + Nearly all versions of mmap support MAP_ANONYMOUS, so the following + is unlikely to be needed, but is supplied just in case. +*/ +#define MMAP_FLAGS (MAP_PRIVATE) +static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */ +#define MMAP_DEFAULT(s) ((dev_zero_fd < 0) ? \ + (dev_zero_fd = open("/dev/zero", O_RDWR), \ + mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \ + mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) +#define MUNMAP_DEFAULT(a, s) munmap((a), (s)) +#endif /* MAP_ANONYMOUS */ + +#define DIRECT_MMAP_DEFAULT(s) MMAP_DEFAULT(s) + +#else /* WIN32 */ + +/* Win32 MMAP via VirtualAlloc */ +static FORCEINLINE void* win32mmap(size_t size) { + void* ptr; + + ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE); + if (ptr == 0) + return MFAIL; + + return ptr; +} + +/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */ +static FORCEINLINE void* win32direct_mmap(size_t size) { + void* ptr; + + ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, + PAGE_READWRITE); + if (ptr == 0) + return MFAIL; + + return ptr; +} + +/* This function supports releasing coalesed segments */ +static FORCEINLINE int win32munmap(void* ptr, size_t size) { + MEMORY_BASIC_INFORMATION minfo; + char* cptr = (char*)ptr; + + while (size) { + if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0) + return -1; + if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr || + minfo.State != MEM_COMMIT || minfo.RegionSize > size) + return -1; + if (VirtualFree(cptr, 0, MEM_RELEASE) == 0) + return -1; + cptr += minfo.RegionSize; + size -= minfo.RegionSize; + } + + return 0; +} + +#define MMAP_DEFAULT(s) win32mmap(s) +#define MUNMAP_DEFAULT(a, s) win32munmap((a), (s)) +#define DIRECT_MMAP_DEFAULT(s) win32direct_mmap(s) +#endif /* WIN32 */ +#endif /* HAVE_MMAP */ + +#if HAVE_MREMAP +#ifndef WIN32 + +static FORCEINLINE void* dlmremap(void* old_address, size_t old_size, size_t new_size, int flags) { + void* result; + + result = mremap(old_address, old_size, new_size, flags); + if (result == MFAIL) + return MFAIL; + + return result; +} + +#define MREMAP_DEFAULT(addr, osz, nsz, mv) dlmremap((addr), (osz), (nsz), (mv)) +#endif /* WIN32 */ +#endif /* HAVE_MREMAP */ + +/** + * Define CALL_MORECORE + */ +#if HAVE_MORECORE + #ifdef MORECORE + #define CALL_MORECORE(S) MORECORE(S) + #else /* MORECORE */ + #define CALL_MORECORE(S) MORECORE_DEFAULT(S) + #endif /* MORECORE */ +#else /* HAVE_MORECORE */ + #define CALL_MORECORE(S) MFAIL +#endif /* HAVE_MORECORE */ + +/** + * Define CALL_MMAP/CALL_MUNMAP/CALL_DIRECT_MMAP + */ +#if HAVE_MMAP + #define USE_MMAP_BIT (SIZE_T_ONE) + + #ifdef MMAP + #define CALL_MMAP(s) MMAP(s) + #else /* MMAP */ + #define CALL_MMAP(s) MMAP_DEFAULT(s) + #endif /* MMAP */ + #ifdef MUNMAP + #define CALL_MUNMAP(a, s) MUNMAP((a), (s)) + #else /* MUNMAP */ + #define CALL_MUNMAP(a, s) MUNMAP_DEFAULT((a), (s)) + #endif /* MUNMAP */ + #ifdef DIRECT_MMAP + #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s) + #else /* DIRECT_MMAP */ + #define CALL_DIRECT_MMAP(s) DIRECT_MMAP_DEFAULT(s) + #endif /* DIRECT_MMAP */ +#else /* HAVE_MMAP */ + #define USE_MMAP_BIT (SIZE_T_ZERO) + + #define MMAP(s) MFAIL + #define MUNMAP(a, s) (-1) + #define DIRECT_MMAP(s) MFAIL + #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s) + #define CALL_MMAP(s) MMAP(s) + #define CALL_MUNMAP(a, s) MUNMAP((a), (s)) +#endif /* HAVE_MMAP */ + +/** + * Define CALL_MREMAP + */ +#if HAVE_MMAP && HAVE_MREMAP + #ifdef MREMAP + #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP((addr), (osz), (nsz), (mv)) + #else /* MREMAP */ + #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP_DEFAULT((addr), (osz), (nsz), (mv)) + #endif /* MREMAP */ +#else /* HAVE_MMAP && HAVE_MREMAP */ + #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL +#endif /* HAVE_MMAP && HAVE_MREMAP */ + +/* mstate bit set if continguous morecore disabled or failed */ +#define USE_NONCONTIGUOUS_BIT (4U) + +/* segment bit set in create_mspace_with_base */ +#define EXTERN_BIT (8U) + + +/* --------------------------- Lock preliminaries ------------------------ */ + +/* + When locks are defined, there is one global lock, plus + one per-mspace lock. + + The global lock_ensures that mparams.magic and other unique + mparams values are initialized only once. It also protects + sequences of calls to MORECORE. In many cases sys_alloc requires + two calls, that should not be interleaved with calls by other + threads. This does not protect against direct calls to MORECORE + by other threads not using this lock, so there is still code to + cope the best we can on interference. + + Per-mspace locks surround calls to malloc, free, etc. + By default, locks are simple non-reentrant mutexes. + + Because lock-protected regions generally have bounded times, it is + OK to use the supplied simple spinlocks. Spinlocks are likely to + improve performance for lightly contended applications, but worsen + performance under heavy contention. + + If USE_LOCKS is > 1, the definitions of lock routines here are + bypassed, in which case you will need to define the type MLOCK_T, + and at least INITIAL_LOCK, DESTROY_LOCK, ACQUIRE_LOCK, RELEASE_LOCK + and TRY_LOCK. You must also declare a + static MLOCK_T malloc_global_mutex = { initialization values };. + +*/ + +#if !USE_LOCKS +#define USE_LOCK_BIT (0U) +#define INITIAL_LOCK(l) (0) +#define DESTROY_LOCK(l) (0) +#define ACQUIRE_MALLOC_GLOBAL_LOCK() +#define RELEASE_MALLOC_GLOBAL_LOCK() + +#else +#if USE_LOCKS > 1 +/* ----------------------- User-defined locks ------------------------ */ +/* Define your own lock implementation here */ +/* #define INITIAL_LOCK(lk) ... */ +/* #define DESTROY_LOCK(lk) ... */ +/* #define ACQUIRE_LOCK(lk) ... */ +/* #define RELEASE_LOCK(lk) ... */ +/* #define TRY_LOCK(lk) ... */ +/* static MLOCK_T malloc_global_mutex = ... */ + +#elif USE_SPIN_LOCKS + +/* First, define CAS_LOCK and CLEAR_LOCK on ints */ +/* Note CAS_LOCK defined to return 0 on success */ + +#if defined(__GNUC__)&& (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) +#define CAS_LOCK(sl) __sync_lock_test_and_set(sl, 1) +#define CLEAR_LOCK(sl) __sync_lock_release(sl) + +#elif (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))) +/* Custom spin locks for older gcc on x86 */ +static FORCEINLINE int x86_cas_lock(int *sl) { + int ret; + int val = 1; + int cmp = 0; + __asm__ __volatile__ ("lock; cmpxchgl %1, %2" + : "=a" (ret) + : "r" (val), "m" (*(sl)), "0"(cmp) + : "memory", "cc"); + return ret; +} + +static FORCEINLINE void x86_clear_lock(int* sl) { + assert(*sl != 0); + int prev = 0; + int ret; + __asm__ __volatile__ ("lock; xchgl %0, %1" + : "=r" (ret) + : "m" (*(sl)), "0"(prev) + : "memory"); +} + +#define CAS_LOCK(sl) x86_cas_lock(sl) +#define CLEAR_LOCK(sl) x86_clear_lock(sl) + +#else /* Win32 MSC */ +#define CAS_LOCK(sl) interlockedexchange((volatile LONG *)sl, (LONG)1) +#define CLEAR_LOCK(sl) interlockedexchange((volatile LONG *)sl, (LONG)0) + +#endif /* ... gcc spins locks ... */ + +/* How to yield for a spin lock */ +#define SPINS_PER_YIELD 63 +#if defined(_MSC_VER) +#define SLEEP_EX_DURATION 50 /* delay for yield/sleep */ +#define SPIN_LOCK_YIELD SleepEx(SLEEP_EX_DURATION, FALSE) +#elif defined (__SVR4) && defined (__sun) /* solaris */ +#define SPIN_LOCK_YIELD thr_yield(); +#elif !defined(LACKS_SCHED_H) +#define SPIN_LOCK_YIELD sched_yield(); +#else +#define SPIN_LOCK_YIELD +#endif /* ... yield ... */ + +#if !defined(USE_RECURSIVE_LOCKS) || USE_RECURSIVE_LOCKS == 0 +/* Plain spin locks use single word (embedded in malloc_states) */ +static int spin_acquire_lock(int *sl) { + int spins = 0; + while (*(volatile int *)sl != 0 || CAS_LOCK(sl)) { + if ((++spins & SPINS_PER_YIELD) == 0) { + SPIN_LOCK_YIELD; + } + } + return 0; +} + +#define MLOCK_T int +#define TRY_LOCK(sl) !CAS_LOCK(sl) +#define RELEASE_LOCK(sl) CLEAR_LOCK(sl) +#define ACQUIRE_LOCK(sl) (CAS_LOCK(sl)? spin_acquire_lock(sl) : 0) +#define INITIAL_LOCK(sl) (*sl = 0) +#define DESTROY_LOCK(sl) (0) +static MLOCK_T malloc_global_mutex = 0; + +#else /* USE_RECURSIVE_LOCKS */ +/* types for lock owners */ +#ifdef WIN32 +#define THREAD_ID_T DWORD +#define CURRENT_THREAD GetCurrentThreadId() +#define EQ_OWNER(X,Y) ((X) == (Y)) +#else +/* + Note: the following assume that pthread_t is a type that can be + initialized to (casted) zero. If this is not the case, you will need to + somehow redefine these or not use spin locks. +*/ +#define THREAD_ID_T pthread_t +#define CURRENT_THREAD pthread_self() +#define EQ_OWNER(X,Y) pthread_equal(X, Y) +#endif + +struct malloc_recursive_lock { + int sl; + unsigned int c; + THREAD_ID_T threadid; +}; + +#define MLOCK_T struct malloc_recursive_lock +static MLOCK_T malloc_global_mutex = { 0, 0, (THREAD_ID_T)0}; + +static FORCEINLINE void recursive_release_lock(MLOCK_T *lk) { + assert(lk->sl != 0); + if (--lk->c == 0) { + CLEAR_LOCK(&lk->sl); + } +} + +static FORCEINLINE int recursive_acquire_lock(MLOCK_T *lk) { + THREAD_ID_T mythreadid = CURRENT_THREAD; + int spins = 0; + for (;;) { + if (*((volatile int *)(&lk->sl)) == 0) { + if (!CAS_LOCK(&lk->sl)) { + lk->threadid = mythreadid; + lk->c = 1; + return 0; + } + } + else if (EQ_OWNER(lk->threadid, mythreadid)) { + ++lk->c; + return 0; + } + if ((++spins & SPINS_PER_YIELD) == 0) { + SPIN_LOCK_YIELD; + } + } +} + +static FORCEINLINE int recursive_try_lock(MLOCK_T *lk) { + THREAD_ID_T mythreadid = CURRENT_THREAD; + if (*((volatile int *)(&lk->sl)) == 0) { + if (!CAS_LOCK(&lk->sl)) { + lk->threadid = mythreadid; + lk->c = 1; + return 1; + } + } + else if (EQ_OWNER(lk->threadid, mythreadid)) { + ++lk->c; + return 1; + } + return 0; +} + +#define RELEASE_LOCK(lk) recursive_release_lock(lk) +#define TRY_LOCK(lk) recursive_try_lock(lk) +#define ACQUIRE_LOCK(lk) recursive_acquire_lock(lk) +#define INITIAL_LOCK(lk) ((lk)->threadid = (THREAD_ID_T)0, (lk)->sl = 0, (lk)->c = 0) +#define DESTROY_LOCK(lk) (0) +#endif /* USE_RECURSIVE_LOCKS */ + +#elif defined(WIN32) /* Win32 critical sections */ +#define MLOCK_T CRITICAL_SECTION +#define ACQUIRE_LOCK(lk) (EnterCriticalSection(lk), 0) +#define RELEASE_LOCK(lk) LeaveCriticalSection(lk) +#define TRY_LOCK(lk) TryEnterCriticalSection(lk) +#define INITIAL_LOCK(lk) (!InitializeCriticalSectionAndSpinCount((lk), 0x80000000|4000)) +#define DESTROY_LOCK(lk) (DeleteCriticalSection(lk), 0) +#define NEED_GLOBAL_LOCK_INIT + +static MLOCK_T malloc_global_mutex; +static volatile LONG malloc_global_mutex_status; + +/* Use spin loop to initialize global lock */ +static void init_malloc_global_mutex() { + for (;;) { + long stat = malloc_global_mutex_status; + if (stat > 0) + return; + /* transition to < 0 while initializing, then to > 0) */ + if (stat == 0 && + interlockedcompareexchange(&malloc_global_mutex_status, (LONG)-1, (LONG)0) == 0) { + InitializeCriticalSection(&malloc_global_mutex); + interlockedexchange(&malloc_global_mutex_status, (LONG)1); + return; + } + SleepEx(0, FALSE); + } +} + +#else /* pthreads-based locks */ +#define MLOCK_T pthread_mutex_t +#define ACQUIRE_LOCK(lk) pthread_mutex_lock(lk) +#define RELEASE_LOCK(lk) pthread_mutex_unlock(lk) +#define TRY_LOCK(lk) (!pthread_mutex_trylock(lk)) +#define INITIAL_LOCK(lk) pthread_init_lock(lk) +#define DESTROY_LOCK(lk) pthread_mutex_destroy(lk) + +#if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0 && defined(linux) && !defined(PTHREAD_MUTEX_RECURSIVE) +/* Cope with old-style linux recursive lock initialization by adding */ +/* skipped internal declaration from pthread.h */ +extern int pthread_mutexattr_setkind_np __P ((pthread_mutexattr_t *__attr, + int __kind)); +#define PTHREAD_MUTEX_RECURSIVE PTHREAD_MUTEX_RECURSIVE_NP +#define pthread_mutexattr_settype(x,y) pthread_mutexattr_setkind_np(x,y) +#endif /* USE_RECURSIVE_LOCKS ... */ + +static MLOCK_T malloc_global_mutex = PTHREAD_MUTEX_INITIALIZER; + +static int pthread_init_lock (MLOCK_T *lk) { + pthread_mutexattr_t attr; + if (pthread_mutexattr_init(&attr)) return 1; +#if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0 + if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE)) return 1; +#endif + if (pthread_mutex_init(lk, &attr)) return 1; + if (pthread_mutexattr_destroy(&attr)) return 1; + return 0; +} + +#endif /* ... lock types ... */ + +/* Common code for all lock types */ +#define USE_LOCK_BIT (2U) + +#ifndef ACQUIRE_MALLOC_GLOBAL_LOCK +#define ACQUIRE_MALLOC_GLOBAL_LOCK() ACQUIRE_LOCK(&malloc_global_mutex); +#endif + +#ifndef RELEASE_MALLOC_GLOBAL_LOCK +#define RELEASE_MALLOC_GLOBAL_LOCK() RELEASE_LOCK(&malloc_global_mutex); +#endif + +#endif /* USE_LOCKS */ + +/* ----------------------- Chunk representations ------------------------ */ + +/* + (The following includes lightly edited explanations by Colin Plumb.) + + The malloc_chunk declaration below is misleading (but accurate and + necessary). It declares a "view" into memory allowing access to + necessary fields at known offsets from a given base. + + Chunks of memory are maintained using a `boundary tag' method as + originally described by Knuth. (See the paper by Paul Wilson + ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a survey of such + techniques.) Sizes of free chunks are stored both in the front of + each chunk and at the end. This makes consolidating fragmented + chunks into bigger chunks fast. The head fields also hold bits + representing whether chunks are free or in use. + + Here are some pictures to make it clearer. They are "exploded" to + show that the state of a chunk can be thought of as extending from + the high 31 bits of the head field of its header through the + prev_foot and PINUSE_BIT bit of the following chunk header. + + A chunk that's in use looks like: + + chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Size of previous chunk (if P = 0) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P| + | Size of this chunk 1| +-+ + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | | + +- -+ + | | + +- -+ + | : + +- size - sizeof(size_t) available payload bytes -+ + : | + chunk-> +- -+ + | | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |1| + | Size of next chunk (may or may not be in use) | +-+ + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + + And if it's free, it looks like this: + + chunk-> +- -+ + | User payload (must be in use, or we would have merged!) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P| + | Size of this chunk 0| +-+ + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Next pointer | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Prev pointer | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | : + +- size - sizeof(struct chunk) unused bytes -+ + : | + chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Size of this chunk | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |0| + | Size of next chunk (must be in use, or we would have merged)| +-+ + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | : + +- User payload -+ + : | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + |0| + +-+ + Note that since we always merge adjacent free chunks, the chunks + adjacent to a free chunk must be in use. + + Given a pointer to a chunk (which can be derived trivially from the + payload pointer) we can, in O(1) time, find out whether the adjacent + chunks are free, and if so, unlink them from the lists that they + are on and merge them with the current chunk. + + Chunks always begin on even word boundaries, so the mem portion + (which is returned to the user) is also on an even word boundary, and + thus at least double-word aligned. + + The P (PINUSE_BIT) bit, stored in the unused low-order bit of the + chunk size (which is always a multiple of two words), is an in-use + bit for the *previous* chunk. If that bit is *clear*, then the + word before the current chunk size contains the previous chunk + size, and can be used to find the front of the previous chunk. + The very first chunk allocated always has this bit set, preventing + access to non-existent (or non-owned) memory. If pinuse is set for + any given chunk, then you CANNOT determine the size of the + previous chunk, and might even get a memory addressing fault when + trying to do so. + + The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of + the chunk size redundantly records whether the current chunk is + inuse (unless the chunk is mmapped). This redundancy enables usage + checks within free and realloc, and reduces indirection when freeing + and consolidating chunks. + + Each freshly allocated chunk must have both cinuse and pinuse set. + That is, each allocated chunk borders either a previously allocated + and still in-use chunk, or the base of its memory arena. This is + ensured by making all allocations from the `lowest' part of any + found chunk. Further, no free chunk physically borders another one, + so each free chunk is known to be preceded and followed by either + inuse chunks or the ends of memory. + + Note that the `foot' of the current chunk is actually represented + as the prev_foot of the NEXT chunk. This makes it easier to + deal with alignments etc but can be very confusing when trying + to extend or adapt this code. + + The exceptions to all this are + + 1. The special chunk `top' is the top-most available chunk (i.e., + the one bordering the end of available memory). It is treated + specially. Top is never included in any bin, is used only if + no other chunk is available, and is released back to the + system if it is very large (see M_TRIM_THRESHOLD). In effect, + the top chunk is treated as larger (and thus less well + fitting) than any other available chunk. The top chunk + doesn't update its trailing size field since there is no next + contiguous chunk that would have to index off it. However, + space is still allocated for it (TOP_FOOT_SIZE) to enable + separation or merging when space is extended. + + 3. Chunks allocated via mmap, have both cinuse and pinuse bits + cleared in their head fields. Because they are allocated + one-by-one, each must carry its own prev_foot field, which is + also used to hold the offset this chunk has within its mmapped + region, which is needed to preserve alignment. Each mmapped + chunk is trailed by the first two fields of a fake next-chunk + for sake of usage checks. + +*/ + +struct malloc_chunk { + size_t prev_foot; /* Size of previous chunk (if free). */ + size_t head; /* Size and inuse bits. */ + struct malloc_chunk* fd; /* double links -- used only if free. */ + struct malloc_chunk* bk; +}; + +typedef struct malloc_chunk mchunk; +typedef struct malloc_chunk* mchunkptr; +typedef struct malloc_chunk* sbinptr; /* The type of bins of chunks */ +typedef unsigned int bindex_t; /* Described below */ +typedef unsigned int binmap_t; /* Described below */ +typedef unsigned int flag_t; /* The type of various bit flag sets */ + +/* ------------------- Chunks sizes and alignments ----------------------- */ + +#define MCHUNK_SIZE (sizeof(mchunk)) + +#if FOOTERS +#define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) +#else /* FOOTERS */ +#define CHUNK_OVERHEAD (SIZE_T_SIZE) +#endif /* FOOTERS */ + +/* MMapped chunks need a second word of overhead ... */ +#define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) +/* ... and additional padding for fake next-chunk at foot */ +#define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES) + +/* The smallest size we can malloc is an aligned minimal chunk */ +#define MIN_CHUNK_SIZE\ + ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) + +/* conversion from malloc headers to user pointers, and back */ +#define chunk2mem(p) ((void*)((char*)(p) + TWO_SIZE_T_SIZES)) +#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES)) +/* chunk associated with aligned address A */ +#define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A))) + +/* Bounds on request (not chunk) sizes. */ +#define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2) +#define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE) + +/* pad request bytes into a usable size */ +#define pad_request(req) \ + (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) + +/* pad request, checking for minimum (but not maximum) */ +#define request2size(req) \ + (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req)) + + +/* ------------------ Operations on head and foot fields ----------------- */ + +/* + The head field of a chunk is or'ed with PINUSE_BIT when previous + adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in + use, unless mmapped, in which case both bits are cleared. + + FLAG4_BIT is not used by this malloc, but might be useful in extensions. +*/ + +#define PINUSE_BIT (SIZE_T_ONE) +#define CINUSE_BIT (SIZE_T_TWO) +#define FLAG4_BIT (SIZE_T_FOUR) +#define INUSE_BITS (PINUSE_BIT|CINUSE_BIT) +#define FLAG_BITS (PINUSE_BIT|CINUSE_BIT|FLAG4_BIT) + +/* Head value for fenceposts */ +#define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE) + +/* extraction of fields from head words */ +#define cinuse(p) ((p)->head & CINUSE_BIT) +#define pinuse(p) ((p)->head & PINUSE_BIT) +#define flag4inuse(p) ((p)->head & FLAG4_BIT) +#define is_inuse(p) (((p)->head & INUSE_BITS) != PINUSE_BIT) +#define is_mmapped(p) (((p)->head & INUSE_BITS) == 0) + +#define chunksize(p) ((p)->head & ~(FLAG_BITS)) + +#define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT) +#define set_flag4(p) ((p)->head |= FLAG4_BIT) +#define clear_flag4(p) ((p)->head &= ~FLAG4_BIT) + +/* Treat space at ptr +/- offset as a chunk */ +#define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s))) +#define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s))) + +/* Ptr to next or previous physical malloc_chunk. */ +#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~FLAG_BITS))) +#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) )) + +/* extract next chunk's pinuse bit */ +#define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT) + +/* Get/set size at footer */ +#define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot) +#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s)) + +/* Set size, pinuse bit, and foot */ +#define set_size_and_pinuse_of_free_chunk(p, s)\ + ((p)->head = (s|PINUSE_BIT), set_foot(p, s)) + +/* Set size, pinuse bit, foot, and clear next pinuse */ +#define set_free_with_pinuse(p, s, n)\ + (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s)) + +/* Get the internal overhead associated with chunk p */ +#define overhead_for(p)\ + (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD) + +/* Return true if malloced space is not necessarily cleared */ +#if MMAP_CLEARS +#define calloc_must_clear(p) (!is_mmapped(p)) +#else /* MMAP_CLEARS */ +#define calloc_must_clear(p) (1) +#endif /* MMAP_CLEARS */ + +/* ---------------------- Overlaid data structures ----------------------- */ + +/* + When chunks are not in use, they are treated as nodes of either + lists or trees. + + "Small" chunks are stored in circular doubly-linked lists, and look + like this: + + chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Size of previous chunk | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + `head:' | Size of chunk, in bytes |P| + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Forward pointer to next chunk in list | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Back pointer to previous chunk in list | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Unused space (may be 0 bytes long) . + . . + . | +nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + `foot:' | Size of chunk, in bytes | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + + Larger chunks are kept in a form of bitwise digital trees (aka + tries) keyed on chunksizes. Because malloc_tree_chunks are only for + free chunks greater than 256 bytes, their size doesn't impose any + constraints on user chunk sizes. Each node looks like: + + chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Size of previous chunk | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + `head:' | Size of chunk, in bytes |P| + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Forward pointer to next chunk of same size | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Back pointer to previous chunk of same size | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Pointer to left child (child[0]) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Pointer to right child (child[1]) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Pointer to parent | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | bin index of this chunk | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Unused space . + . | +nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + `foot:' | Size of chunk, in bytes | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + + Each tree holding treenodes is a tree of unique chunk sizes. Chunks + of the same size are arranged in a circularly-linked list, with only + the oldest chunk (the next to be used, in our FIFO ordering) + actually in the tree. (Tree members are distinguished by a non-null + parent pointer.) If a chunk with the same size an an existing node + is inserted, it is linked off the existing node using pointers that + work in the same way as fd/bk pointers of small chunks. + + Each tree contains a power of 2 sized range of chunk sizes (the + smallest is 0x100 <= x < 0x180), which is is divided in half at each + tree level, with the chunks in the smaller half of the range (0x100 + <= x < 0x140 for the top nose) in the left subtree and the larger + half (0x140 <= x < 0x180) in the right subtree. This is, of course, + done by inspecting individual bits. + + Using these rules, each node's left subtree contains all smaller + sizes than its right subtree. However, the node at the root of each + subtree has no particular ordering relationship to either. (The + dividing line between the subtree sizes is based on trie relation.) + If we remove the last chunk of a given size from the interior of the + tree, we need to replace it with a leaf node. The tree ordering + rules permit a node to be replaced by any leaf below it. + + The smallest chunk in a tree (a common operation in a best-fit + allocator) can be found by walking a path to the leftmost leaf in + the tree. Unlike a usual binary tree, where we follow left child + pointers until we reach a null, here we follow the right child + pointer any time the left one is null, until we reach a leaf with + both child pointers null. The smallest chunk in the tree will be + somewhere along that path. + + The worst case number of steps to add, find, or remove a node is + bounded by the number of bits differentiating chunks within + bins. Under current bin calculations, this ranges from 6 up to 21 + (for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case + is of course much better. +*/ + +struct malloc_tree_chunk { + /* The first four fields must be compatible with malloc_chunk */ + size_t prev_foot; + size_t head; + struct malloc_tree_chunk* fd; + struct malloc_tree_chunk* bk; + + struct malloc_tree_chunk* child[2]; + struct malloc_tree_chunk* parent; + bindex_t index; +}; + +typedef struct malloc_tree_chunk tchunk; +typedef struct malloc_tree_chunk* tchunkptr; +typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */ + +/* A little helper macro for trees */ +#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1]) + +/* ----------------------------- Segments -------------------------------- */ + +/* + Each malloc space may include non-contiguous segments, held in a + list headed by an embedded malloc_segment record representing the + top-most space. Segments also include flags holding properties of + the space. Large chunks that are directly allocated by mmap are not + included in this list. They are instead independently created and + destroyed without otherwise keeping track of them. + + Segment management mainly comes into play for spaces allocated by + MMAP. Any call to MMAP might or might not return memory that is + adjacent to an existing segment. MORECORE normally contiguously + extends the current space, so this space is almost always adjacent, + which is simpler and faster to deal with. (This is why MORECORE is + used preferentially to MMAP when both are available -- see + sys_alloc.) When allocating using MMAP, we don't use any of the + hinting mechanisms (inconsistently) supported in various + implementations of unix mmap, or distinguish reserving from + committing memory. Instead, we just ask for space, and exploit + contiguity when we get it. It is probably possible to do + better than this on some systems, but no general scheme seems + to be significantly better. + + Management entails a simpler variant of the consolidation scheme + used for chunks to reduce fragmentation -- new adjacent memory is + normally prepended or appended to an existing segment. However, + there are limitations compared to chunk consolidation that mostly + reflect the fact that segment processing is relatively infrequent + (occurring only when getting memory from system) and that we + don't expect to have huge numbers of segments: + + * Segments are not indexed, so traversal requires linear scans. (It + would be possible to index these, but is not worth the extra + overhead and complexity for most programs on most platforms.) + * New segments are only appended to old ones when holding top-most + memory; if they cannot be prepended to others, they are held in + different segments. + + Except for the top-most segment of an mstate, each segment record + is kept at the tail of its segment. Segments are added by pushing + segment records onto the list headed by &mstate.seg for the + containing mstate. + + Segment flags control allocation/merge/deallocation policies: + * If EXTERN_BIT set, then we did not allocate this segment, + and so should not try to deallocate or merge with others. + (This currently holds only for the initial segment passed + into create_mspace_with_base.) + * If USE_MMAP_BIT set, the segment may be merged with + other surrounding mmapped segments and trimmed/de-allocated + using munmap. + * If neither bit is set, then the segment was obtained using + MORECORE so can be merged with surrounding MORECORE'd segments + and deallocated/trimmed using MORECORE with negative arguments. +*/ + +struct malloc_segment { + char* base; /* base address */ + size_t size; /* allocated size */ + struct malloc_segment* next; /* ptr to next segment */ + flag_t sflags; /* mmap and extern flag */ +}; + +#define is_mmapped_segment(S) ((S)->sflags & USE_MMAP_BIT) +#define is_extern_segment(S) ((S)->sflags & EXTERN_BIT) + +typedef struct malloc_segment msegment; +typedef struct malloc_segment* msegmentptr; + +/* ---------------------------- malloc_state ----------------------------- */ + +/* + A malloc_state holds all of the bookkeeping for a space. + The main fields are: + + Top + The topmost chunk of the currently active segment. Its size is + cached in topsize. The actual size of topmost space is + topsize+TOP_FOOT_SIZE, which includes space reserved for adding + fenceposts and segment records if necessary when getting more + space from the system. The size at which to autotrim top is + cached from mparams in trim_check, except that it is disabled if + an autotrim fails. + + Designated victim (dv) + This is the preferred chunk for servicing small requests that + don't have exact fits. It is normally the chunk split off most + recently to service another small request. Its size is cached in + dvsize. The link fields of this chunk are not maintained since it + is not kept in a bin. + + SmallBins + An array of bin headers for free chunks. These bins hold chunks + with sizes less than MIN_LARGE_SIZE bytes. Each bin contains + chunks of all the same size, spaced 8 bytes apart. To simplify + use in double-linked lists, each bin header acts as a malloc_chunk + pointing to the real first node, if it exists (else pointing to + itself). This avoids special-casing for headers. But to avoid + waste, we allocate only the fd/bk pointers of bins, and then use + repositioning tricks to treat these as the fields of a chunk. + + TreeBins + Treebins are pointers to the roots of trees holding a range of + sizes. There are 2 equally spaced treebins for each power of two + from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything + larger. + + Bin maps + There is one bit map for small bins ("smallmap") and one for + treebins ("treemap). Each bin sets its bit when non-empty, and + clears the bit when empty. Bit operations are then used to avoid + bin-by-bin searching -- nearly all "search" is done without ever + looking at bins that won't be selected. The bit maps + conservatively use 32 bits per map word, even if on 64bit system. + For a good description of some of the bit-based techniques used + here, see Henry S. Warren Jr's book "Hacker's Delight" (and + supplement at http://hackersdelight.org/). Many of these are + intended to reduce the branchiness of paths through malloc etc, as + well as to reduce the number of memory locations read or written. + + Segments + A list of segments headed by an embedded malloc_segment record + representing the initial space. + + Address check support + The least_addr field is the least address ever obtained from + MORECORE or MMAP. Attempted frees and reallocs of any address less + than this are trapped (unless INSECURE is defined). + + Magic tag + A cross-check field that should always hold same value as mparams.magic. + + Max allowed footprint + The maximum allowed bytes to allocate from system (zero means no limit) + + Flags + Bits recording whether to use MMAP, locks, or contiguous MORECORE + + Statistics + Each space keeps track of current and maximum system memory + obtained via MORECORE or MMAP. + + Trim support + Fields holding the amount of unused topmost memory that should trigger + trimming, and a counter to force periodic scanning to release unused + non-topmost segments. + + Locking + If USE_LOCKS is defined, the "mutex" lock is acquired and released + around every public call using this mspace. + + Extension support + A void* pointer and a size_t field that can be used to help implement + extensions to this malloc. +*/ + +/* Bin types, widths and sizes */ +#define NSMALLBINS (32U) +#define NTREEBINS (32U) +#define SMALLBIN_SHIFT (3U) +#define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT) +#define TREEBIN_SHIFT (8U) +#define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT) +#define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE) +#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD) + +struct malloc_state { + binmap_t smallmap; + binmap_t treemap; + size_t dvsize; + size_t topsize; + char* least_addr; + mchunkptr dv; + mchunkptr top; + size_t trim_check; + size_t release_checks; + size_t magic; + mchunkptr smallbins[(NSMALLBINS+1)*2]; + tbinptr treebins[NTREEBINS]; + size_t footprint; + size_t max_footprint; + size_t footprint_limit; /* zero means no limit */ + flag_t mflags; +#if USE_LOCKS + MLOCK_T mutex; /* locate lock among fields that rarely change */ +#endif /* USE_LOCKS */ + msegment seg; + void* extp; /* Unused but available for extensions */ + size_t exts; +}; + +typedef struct malloc_state* mstate; + +/* ------------- Global malloc_state and malloc_params ------------------- */ + +/* + malloc_params holds global properties, including those that can be + dynamically set using mallopt. There is a single instance, mparams, + initialized in init_mparams. Note that the non-zeroness of "magic" + also serves as an initialization flag. +*/ + +struct malloc_params { + size_t magic; + size_t page_size; + size_t granularity; + size_t mmap_threshold; + size_t trim_threshold; + flag_t default_mflags; +}; + +static struct malloc_params mparams; + +/* Ensure mparams initialized */ +#define ensure_initialization() (void)(mparams.magic != 0 || init_mparams()) + +#if !ONLY_MSPACES + +/* The global malloc_state used for all non-"mspace" calls */ +static struct malloc_state _gm_; +#define gm (&_gm_) +#define is_global(M) ((M) == &_gm_) + +#endif /* !ONLY_MSPACES */ + +#define is_initialized(M) ((M)->top != 0) + +/* -------------------------- system alloc setup ------------------------- */ + +/* Operations on mflags */ + +#define use_lock(M) ((M)->mflags & USE_LOCK_BIT) +#define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT) +#if USE_LOCKS +#define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT) +#else +#define disable_lock(M) +#endif + +#define use_mmap(M) ((M)->mflags & USE_MMAP_BIT) +#define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT) +#if HAVE_MMAP +#define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT) +#else +#define disable_mmap(M) +#endif + +#define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT) +#define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT) + +#define set_lock(M,L)\ + ((M)->mflags = (L)?\ + ((M)->mflags | USE_LOCK_BIT) :\ + ((M)->mflags & ~USE_LOCK_BIT)) + +/* page-align a size */ +#define page_align(S)\ + (((S) + (mparams.page_size - SIZE_T_ONE)) & ~(mparams.page_size - SIZE_T_ONE)) + +/* granularity-align a size */ +#define granularity_align(S)\ + (((S) + (mparams.granularity - SIZE_T_ONE))\ + & ~(mparams.granularity - SIZE_T_ONE)) + + +/* For mmap, use granularity alignment on windows, else page-align */ +#ifdef WIN32 +#define mmap_align(S) granularity_align(S) +#else +#define mmap_align(S) page_align(S) +#endif + +/* For sys_alloc, enough padding to ensure can malloc request on success */ +#define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT) + +#define is_page_aligned(S)\ + (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0) +#define is_granularity_aligned(S)\ + (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0) + +/* True if segment S holds address A */ +#define segment_holds(S, A)\ + ((char*)(A) >= S->base && (char*)(A) < S->base + S->size) + +/* Return segment holding given address */ +static msegmentptr segment_holding(mstate m, char* addr) { + msegmentptr sp = &m->seg; + for (;;) { + if (addr >= sp->base && addr < sp->base + sp->size) + return sp; + if ((sp = sp->next) == 0) + return 0; + } +} + +/* Return true if segment contains a segment link */ +static int has_segment_link(mstate m, msegmentptr ss) { + msegmentptr sp = &m->seg; + for (;;) { + if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size) + return 1; + if ((sp = sp->next) == 0) + return 0; + } +} + +#ifndef MORECORE_CANNOT_TRIM +#define should_trim(M,s) ((s) > (M)->trim_check) +#else /* MORECORE_CANNOT_TRIM */ +#define should_trim(M,s) (0) +#endif /* MORECORE_CANNOT_TRIM */ + +/* + TOP_FOOT_SIZE is padding at the end of a segment, including space + that may be needed to place segment records and fenceposts when new + noncontiguous segments are added. +*/ +#define TOP_FOOT_SIZE\ + (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE) + + +/* ------------------------------- Hooks -------------------------------- */ + +/* + PREACTION should be defined to return 0 on success, and nonzero on + failure. If you are not using locking, you can redefine these to do + anything you like. +*/ + +#if USE_LOCKS +#define PREACTION(M) ((use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0) +#define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); } +#else /* USE_LOCKS */ + +#ifndef PREACTION +#define PREACTION(M) (0) +#endif /* PREACTION */ + +#ifndef POSTACTION +#define POSTACTION(M) +#endif /* POSTACTION */ + +#endif /* USE_LOCKS */ + +/* + CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses. + USAGE_ERROR_ACTION is triggered on detected bad frees and + reallocs. The argument p is an address that might have triggered the + fault. It is ignored by the two predefined actions, but might be + useful in custom actions that try to help diagnose errors. +*/ + +#if PROCEED_ON_ERROR + +/* A count of the number of corruption errors causing resets */ +int malloc_corruption_error_count; + +/* default corruption action */ +static void reset_on_error(mstate m); + +#define CORRUPTION_ERROR_ACTION(m) reset_on_error(m) +#define USAGE_ERROR_ACTION(m, p) + +#else /* PROCEED_ON_ERROR */ + +#ifndef CORRUPTION_ERROR_ACTION +#define CORRUPTION_ERROR_ACTION(m) ABORT +#endif /* CORRUPTION_ERROR_ACTION */ + +#ifndef USAGE_ERROR_ACTION +#define USAGE_ERROR_ACTION(m,p) ABORT +#endif /* USAGE_ERROR_ACTION */ + +#endif /* PROCEED_ON_ERROR */ + + +/* -------------------------- Debugging setup ---------------------------- */ + +#if ! DEBUG + +#define check_free_chunk(M,P) +#define check_inuse_chunk(M,P) +#define check_malloced_chunk(M,P,N) +#define check_mmapped_chunk(M,P) +#define check_malloc_state(M) +#define check_top_chunk(M,P) + +#else /* DEBUG */ +#define check_free_chunk(M,P) do_check_free_chunk(M,P) +#define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P) +#define check_top_chunk(M,P) do_check_top_chunk(M,P) +#define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N) +#define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P) +#define check_malloc_state(M) do_check_malloc_state(M) + +static void do_check_any_chunk(mstate m, mchunkptr p); +static void do_check_top_chunk(mstate m, mchunkptr p); +static void do_check_mmapped_chunk(mstate m, mchunkptr p); +static void do_check_inuse_chunk(mstate m, mchunkptr p); +static void do_check_free_chunk(mstate m, mchunkptr p); +static void do_check_malloced_chunk(mstate m, void* mem, size_t s); +static void do_check_tree(mstate m, tchunkptr t); +static void do_check_treebin(mstate m, bindex_t i); +static void do_check_smallbin(mstate m, bindex_t i); +static void do_check_malloc_state(mstate m); +static int bin_find(mstate m, mchunkptr x); +static size_t traverse_and_check(mstate m); +#endif /* DEBUG */ + +/* ---------------------------- Indexing Bins ---------------------------- */ + +#define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS) +#define small_index(s) (bindex_t)((s) >> SMALLBIN_SHIFT) +#define small_index2size(i) ((i) << SMALLBIN_SHIFT) +#define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE)) + +/* addressing by index. See above about smallbin repositioning */ +#define smallbin_at(M, i) ((sbinptr)((char*)&((M)->smallbins[(i)<<1]))) +#define treebin_at(M,i) (&((M)->treebins[i])) + +/* assign tree index for size S to variable I. Use x86 asm if possible */ +#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) +#define compute_tree_index(S, I)\ +{\ + unsigned int X = S >> TREEBIN_SHIFT;\ + if (X == 0)\ + I = 0;\ + else if (X > 0xFFFF)\ + I = NTREEBINS-1;\ + else {\ + unsigned int K = (unsigned) sizeof(X)*__CHAR_BIT__ - 1 - (unsigned) __builtin_clz(X); \ + I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ + }\ +} + +#elif defined (__INTEL_COMPILER) +#define compute_tree_index(S, I)\ +{\ + size_t X = S >> TREEBIN_SHIFT;\ + if (X == 0)\ + I = 0;\ + else if (X > 0xFFFF)\ + I = NTREEBINS-1;\ + else {\ + unsigned int K = _bit_scan_reverse (X); \ + I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ + }\ +} + +#elif defined(_MSC_VER) && _MSC_VER>=1300 +#define compute_tree_index(S, I)\ +{\ + size_t X = S >> TREEBIN_SHIFT;\ + if (X == 0)\ + I = 0;\ + else if (X > 0xFFFF)\ + I = NTREEBINS-1;\ + else {\ + unsigned int K;\ + _BitScanReverse((DWORD *) &K, (DWORD) X);\ + I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ + }\ +} + +#else /* GNUC */ +#define compute_tree_index(S, I)\ +{\ + size_t X = S >> TREEBIN_SHIFT;\ + if (X == 0)\ + I = 0;\ + else if (X > 0xFFFF)\ + I = NTREEBINS-1;\ + else {\ + unsigned int Y = (unsigned int)X;\ + unsigned int N = ((Y - 0x100) >> 16) & 8;\ + unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\ + N += K;\ + N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\ + K = 14 - N + ((Y <<= K) >> 15);\ + I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\ + }\ +} +#endif /* GNUC */ + +/* Bit representing maximum resolved size in a treebin at i */ +#define bit_for_tree_index(i) \ + (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2) + +/* Shift placing maximum resolved bit in a treebin at i as sign bit */ +#define leftshift_for_tree_index(i) \ + ((i == NTREEBINS-1)? 0 : \ + ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2))) + +/* The size of the smallest chunk held in bin with index i */ +#define minsize_for_tree_index(i) \ + ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \ + (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1))) + + +/* ------------------------ Operations on bin maps ----------------------- */ + +/* bit corresponding to given index */ +#define idx2bit(i) ((binmap_t)(1) << (i)) + +/* Mark/Clear bits with given index */ +#define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i)) +#define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i)) +#define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i)) + +#define mark_treemap(M,i) ((M)->treemap |= idx2bit(i)) +#define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i)) +#define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i)) + +/* isolate the least set bit of a bitmap */ +#define least_bit(x) ((x) & -(x)) + +/* mask with all bits to left of least bit of x on */ +#define left_bits(x) ((x<<1) | -(x<<1)) + +/* mask with all bits to left of or equal to least bit of x on */ +#define same_or_left_bits(x) ((x) | -(x)) + +/* index corresponding to given bit. Use x86 asm if possible */ + +#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) +#define compute_bit2idx(X, I)\ +{\ + unsigned int J;\ + J = __builtin_ctz(X); \ + I = (bindex_t)J;\ +} + +#elif defined (__INTEL_COMPILER) +#define compute_bit2idx(X, I)\ +{\ + unsigned int J;\ + J = _bit_scan_forward (X); \ + I = (bindex_t)J;\ +} + +#elif defined(_MSC_VER) && _MSC_VER>=1300 +#define compute_bit2idx(X, I)\ +{\ + unsigned int J;\ + _BitScanForward((DWORD *) &J, X);\ + I = (bindex_t)J;\ +} + +#elif USE_BUILTIN_FFS +#define compute_bit2idx(X, I) I = ffs(X)-1 + +#else +#define compute_bit2idx(X, I)\ +{\ + unsigned int Y = X - 1;\ + unsigned int K = Y >> (16-4) & 16;\ + unsigned int N = K; Y >>= K;\ + N += K = Y >> (8-3) & 8; Y >>= K;\ + N += K = Y >> (4-2) & 4; Y >>= K;\ + N += K = Y >> (2-1) & 2; Y >>= K;\ + N += K = Y >> (1-0) & 1; Y >>= K;\ + I = (bindex_t)(N + Y);\ +} +#endif /* GNUC */ + + +/* ----------------------- Runtime Check Support ------------------------- */ + +/* + For security, the main invariant is that malloc/free/etc never + writes to a static address other than malloc_state, unless static + malloc_state itself has been corrupted, which cannot occur via + malloc (because of these checks). In essence this means that we + believe all pointers, sizes, maps etc held in malloc_state, but + check all of those linked or offsetted from other embedded data + structures. These checks are interspersed with main code in a way + that tends to minimize their run-time cost. + + When FOOTERS is defined, in addition to range checking, we also + verify footer fields of inuse chunks, which can be used guarantee + that the mstate controlling malloc/free is intact. This is a + streamlined version of the approach described by William Robertson + et al in "Run-time Detection of Heap-based Overflows" LISA'03 + http://www.usenix.org/events/lisa03/tech/robertson.html The footer + of an inuse chunk holds the xor of its mstate and a random seed, + that is checked upon calls to free() and realloc(). This is + (probabalistically) unguessable from outside the program, but can be + computed by any code successfully malloc'ing any chunk, so does not + itself provide protection against code that has already broken + security through some other means. Unlike Robertson et al, we + always dynamically check addresses of all offset chunks (previous, + next, etc). This turns out to be cheaper than relying on hashes. +*/ + +#if !INSECURE +/* Check if address a is at least as high as any from MORECORE or MMAP */ +#define ok_address(M, a) ((char*)(a) >= (M)->least_addr) +/* Check if address of next chunk n is higher than base chunk p */ +#define ok_next(p, n) ((char*)(p) < (char*)(n)) +/* Check if p has inuse status */ +#define ok_inuse(p) is_inuse(p) +/* Check if p has its pinuse bit on */ +#define ok_pinuse(p) pinuse(p) + +#else /* !INSECURE */ +#define ok_address(M, a) (1) +#define ok_next(b, n) (1) +#define ok_inuse(p) (1) +#define ok_pinuse(p) (1) +#endif /* !INSECURE */ + +#if (FOOTERS && !INSECURE) +/* Check if (alleged) mstate m has expected magic field */ +#define ok_magic(M) ((M)->magic == mparams.magic) +#else /* (FOOTERS && !INSECURE) */ +#define ok_magic(M) (1) +#endif /* (FOOTERS && !INSECURE) */ + +/* In gcc, use __builtin_expect to minimize impact of checks */ +#if !INSECURE +#if defined(__GNUC__) && __GNUC__ >= 3 +#define RTCHECK(e) __builtin_expect(e, 1) +#else /* GNUC */ +#define RTCHECK(e) (e) +#endif /* GNUC */ +#else /* !INSECURE */ +#define RTCHECK(e) (1) +#endif /* !INSECURE */ + +/* macros to set up inuse chunks with or without footers */ + +#if !FOOTERS + +#define mark_inuse_foot(M,p,s) + +/* Macros for setting head/foot of non-mmapped chunks */ + +/* Set cinuse bit and pinuse bit of next chunk */ +#define set_inuse(M,p,s)\ + ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\ + ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT) + +/* Set cinuse and pinuse of this chunk and pinuse of next chunk */ +#define set_inuse_and_pinuse(M,p,s)\ + ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ + ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT) + +/* Set size, cinuse and pinuse bit of this chunk */ +#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\ + ((p)->head = (s|PINUSE_BIT|CINUSE_BIT)) + +#else /* FOOTERS */ + +/* Set foot of inuse chunk to be xor of mstate and seed */ +#define mark_inuse_foot(M,p,s)\ + (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic)) + +#define get_mstate_for(p)\ + ((mstate)(((mchunkptr)((char*)(p) +\ + (chunksize(p))))->prev_foot ^ mparams.magic)) + +#define set_inuse(M,p,s)\ + ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\ + (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \ + mark_inuse_foot(M,p,s)) + +#define set_inuse_and_pinuse(M,p,s)\ + ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ + (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\ + mark_inuse_foot(M,p,s)) + +#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\ + ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ + mark_inuse_foot(M, p, s)) + +#endif /* !FOOTERS */ + +/* ---------------------------- setting mparams -------------------------- */ + +#if LOCK_AT_FORK +static void pre_fork(void) { ACQUIRE_LOCK(&(gm)->mutex); } +static void post_fork_parent(void) { RELEASE_LOCK(&(gm)->mutex); } +static void post_fork_child(void) { INITIAL_LOCK(&(gm)->mutex); } +#endif /* LOCK_AT_FORK */ + +/* Initialize mparams */ +static int init_mparams(void) { +#ifdef NEED_GLOBAL_LOCK_INIT + if (malloc_global_mutex_status <= 0) + init_malloc_global_mutex(); +#endif + + ACQUIRE_MALLOC_GLOBAL_LOCK(); + if (mparams.magic == 0) { + size_t magic; + size_t psize; + size_t gsize; + +#ifndef WIN32 + psize = malloc_getpagesize; + gsize = ((DEFAULT_GRANULARITY != 0)? DEFAULT_GRANULARITY : psize); +#else /* WIN32 */ + { + SYSTEM_INFO system_info; + GetSystemInfo(&system_info); + psize = system_info.dwPageSize; + gsize = ((DEFAULT_GRANULARITY != 0)? + DEFAULT_GRANULARITY : system_info.dwAllocationGranularity); + } +#endif /* WIN32 */ + + /* Sanity-check configuration: + size_t must be unsigned and as wide as pointer type. + ints must be at least 4 bytes. + alignment must be at least 8. + Alignment, min chunk size, and page size must all be powers of 2. + */ + if ((sizeof(size_t) != sizeof(char*)) || + (MAX_SIZE_T < MIN_CHUNK_SIZE) || + (sizeof(int) < 4) || + (MALLOC_ALIGNMENT < (size_t)8U) || + ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-SIZE_T_ONE)) != 0) || + ((MCHUNK_SIZE & (MCHUNK_SIZE-SIZE_T_ONE)) != 0) || + ((gsize & (gsize-SIZE_T_ONE)) != 0) || + ((psize & (psize-SIZE_T_ONE)) != 0)) + ABORT; + mparams.granularity = gsize; + mparams.page_size = psize; + mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD; + mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD; +#if MORECORE_CONTIGUOUS + mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT; +#else /* MORECORE_CONTIGUOUS */ + mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT; +#endif /* MORECORE_CONTIGUOUS */ + +#if !ONLY_MSPACES + /* Set up lock for main malloc area */ + gm->mflags = mparams.default_mflags; + (void)INITIAL_LOCK(&gm->mutex); +#endif +#if LOCK_AT_FORK + pthread_atfork(&pre_fork, &post_fork_parent, &post_fork_child); +#endif + + { +#if USE_DEV_RANDOM + int fd; + unsigned char buf[sizeof(size_t)]; + /* Try to use /dev/urandom, else fall back on using time */ + if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 && + read(fd, buf, sizeof(buf)) == sizeof(buf)) { + magic = *((size_t *) buf); + close(fd); + } + else +#endif /* USE_DEV_RANDOM */ +#ifdef WIN32 + magic = (size_t)(GetTickCount() ^ (size_t)0x55555555U); +#elif defined(LACKS_TIME_H) + magic = (size_t)&magic ^ (size_t)0x55555555U; +#else + magic = (size_t)(time(0) ^ (size_t)0x55555555U); +#endif + magic |= (size_t)8U; /* ensure nonzero */ + magic &= ~(size_t)7U; /* improve chances of fault for bad values */ + /* Until memory modes commonly available, use volatile-write */ + (*(volatile size_t *)(&(mparams.magic))) = magic; + } + } + + RELEASE_MALLOC_GLOBAL_LOCK(); + return 1; +} + +/* support for mallopt */ +static int change_mparam(int param_number, int value) { + size_t val; + ensure_initialization(); + val = (value == -1)? MAX_SIZE_T : (size_t)value; + switch(param_number) { + case M_TRIM_THRESHOLD: + mparams.trim_threshold = val; + return 1; + case M_GRANULARITY: + if (val >= mparams.page_size && ((val & (val-1)) == 0)) { + mparams.granularity = val; + return 1; + } + else + return 0; + case M_MMAP_THRESHOLD: + mparams.mmap_threshold = val; + return 1; + default: + return 0; + } +} + +#if DEBUG +/* ------------------------- Debugging Support --------------------------- */ + +/* Check properties of any chunk, whether free, inuse, mmapped etc */ +static void do_check_any_chunk(mstate m, mchunkptr p) { + assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); + assert(ok_address(m, p)); +} + +/* Check properties of top chunk */ +static void do_check_top_chunk(mstate m, mchunkptr p) { + msegmentptr sp = segment_holding(m, (char*)p); + size_t sz = p->head & ~INUSE_BITS; /* third-lowest bit can be set! */ + assert(sp != 0); + assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); + assert(ok_address(m, p)); + assert(sz == m->topsize); + assert(sz > 0); + assert(sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE); + assert(pinuse(p)); + assert(!pinuse(chunk_plus_offset(p, sz))); +} + +/* Check properties of (inuse) mmapped chunks */ +static void do_check_mmapped_chunk(mstate m, mchunkptr p) { + size_t sz = chunksize(p); + size_t len = (sz + (p->prev_foot) + MMAP_FOOT_PAD); + assert(is_mmapped(p)); + assert(use_mmap(m)); + assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); + assert(ok_address(m, p)); + assert(!is_small(sz)); + assert((len & (mparams.page_size-SIZE_T_ONE)) == 0); + assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD); + assert(chunk_plus_offset(p, sz+SIZE_T_SIZE)->head == 0); +} + +/* Check properties of inuse chunks */ +static void do_check_inuse_chunk(mstate m, mchunkptr p) { + do_check_any_chunk(m, p); + assert(is_inuse(p)); + assert(next_pinuse(p)); + /* If not pinuse and not mmapped, previous chunk has OK offset */ + assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p); + if (is_mmapped(p)) + do_check_mmapped_chunk(m, p); +} + +/* Check properties of free chunks */ +static void do_check_free_chunk(mstate m, mchunkptr p) { + size_t sz = chunksize(p); + mchunkptr next = chunk_plus_offset(p, sz); + do_check_any_chunk(m, p); + assert(!is_inuse(p)); + assert(!next_pinuse(p)); + assert (!is_mmapped(p)); + if (p != m->dv && p != m->top) { + if (sz >= MIN_CHUNK_SIZE) { + assert((sz & CHUNK_ALIGN_MASK) == 0); + assert(is_aligned(chunk2mem(p))); + assert(next->prev_foot == sz); + assert(pinuse(p)); + assert (next == m->top || is_inuse(next)); + assert(p->fd->bk == p); + assert(p->bk->fd == p); + } + else /* markers are always of size SIZE_T_SIZE */ + assert(sz == SIZE_T_SIZE); + } +} + +/* Check properties of malloced chunks at the point they are malloced */ +static void do_check_malloced_chunk(mstate m, void* mem, size_t s) { + if (mem != 0) { + mchunkptr p = mem2chunk(mem); + size_t sz = p->head & ~INUSE_BITS; + do_check_inuse_chunk(m, p); + assert((sz & CHUNK_ALIGN_MASK) == 0); + assert(sz >= MIN_CHUNK_SIZE); + assert(sz >= s); + /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */ + assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE)); + } +} + +/* Check a tree and its subtrees. */ +static void do_check_tree(mstate m, tchunkptr t) { + tchunkptr head = 0; + tchunkptr u = t; + bindex_t tindex = t->index; + size_t tsize = chunksize(t); + bindex_t idx; + compute_tree_index(tsize, idx); + assert(tindex == idx); + assert(tsize >= MIN_LARGE_SIZE); + assert(tsize >= minsize_for_tree_index(idx)); + assert((idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1)))); + + do { /* traverse through chain of same-sized nodes */ + do_check_any_chunk(m, ((mchunkptr)u)); + assert(u->index == tindex); + assert(chunksize(u) == tsize); + assert(!is_inuse(u)); + assert(!next_pinuse(u)); + assert(u->fd->bk == u); + assert(u->bk->fd == u); + if (u->parent == 0) { + assert(u->child[0] == 0); + assert(u->child[1] == 0); + } + else { + assert(head == 0); /* only one node on chain has parent */ + head = u; + assert(u->parent != u); + assert (u->parent->child[0] == u || + u->parent->child[1] == u || + *((tbinptr*)(u->parent)) == u); + if (u->child[0] != 0) { + assert(u->child[0]->parent == u); + assert(u->child[0] != u); + do_check_tree(m, u->child[0]); + } + if (u->child[1] != 0) { + assert(u->child[1]->parent == u); + assert(u->child[1] != u); + do_check_tree(m, u->child[1]); + } + if (u->child[0] != 0 && u->child[1] != 0) { + assert(chunksize(u->child[0]) < chunksize(u->child[1])); + } + } + u = u->fd; + } while (u != t); + assert(head != 0); +} + +/* Check all the chunks in a treebin. */ +static void do_check_treebin(mstate m, bindex_t i) { + tbinptr* tb = treebin_at(m, i); + tchunkptr t = *tb; + int empty = (m->treemap & (1U << i)) == 0; + if (t == 0) + assert(empty); + if (!empty) + do_check_tree(m, t); +} + +/* Check all the chunks in a smallbin. */ +static void do_check_smallbin(mstate m, bindex_t i) { + sbinptr b = smallbin_at(m, i); + mchunkptr p = b->bk; + unsigned int empty = (m->smallmap & (1U << i)) == 0; + if (p == b) + assert(empty); + if (!empty) { + for (; p != b; p = p->bk) { + size_t size = chunksize(p); + mchunkptr q; + /* each chunk claims to be free */ + do_check_free_chunk(m, p); + /* chunk belongs in bin */ + assert(small_index(size) == i); + assert(p->bk == b || chunksize(p->bk) == chunksize(p)); + /* chunk is followed by an inuse chunk */ + q = next_chunk(p); + if (q->head != FENCEPOST_HEAD) + do_check_inuse_chunk(m, q); + } + } +} + +/* Find x in a bin. Used in other check functions. */ +static int bin_find(mstate m, mchunkptr x) { + size_t size = chunksize(x); + if (is_small(size)) { + bindex_t sidx = small_index(size); + sbinptr b = smallbin_at(m, sidx); + if (smallmap_is_marked(m, sidx)) { + mchunkptr p = b; + do { + if (p == x) + return 1; + } while ((p = p->fd) != b); + } + } + else { + bindex_t tidx; + compute_tree_index(size, tidx); + if (treemap_is_marked(m, tidx)) { + tchunkptr t = *treebin_at(m, tidx); + size_t sizebits = size << leftshift_for_tree_index(tidx); + while (t != 0 && chunksize(t) != size) { + t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]; + sizebits <<= 1; + } + if (t != 0) { + tchunkptr u = t; + do { + if (u == (tchunkptr)x) + return 1; + } while ((u = u->fd) != t); + } + } + } + return 0; +} + +/* Traverse each chunk and check it; return total */ +static size_t traverse_and_check(mstate m) { + size_t sum = 0; + if (is_initialized(m)) { + msegmentptr s = &m->seg; + sum += m->topsize + TOP_FOOT_SIZE; + while (s != 0) { + mchunkptr q = align_as_chunk(s->base); + mchunkptr lastq = 0; + assert(pinuse(q)); + while (segment_holds(s, q) && + q != m->top && q->head != FENCEPOST_HEAD) { + sum += chunksize(q); + if (is_inuse(q)) { + assert(!bin_find(m, q)); + do_check_inuse_chunk(m, q); + } + else { + assert(q == m->dv || bin_find(m, q)); + assert(lastq == 0 || is_inuse(lastq)); /* Not 2 consecutive free */ + do_check_free_chunk(m, q); + } + lastq = q; + q = next_chunk(q); + } + s = s->next; + } + } + return sum; +} + + +/* Check all properties of malloc_state. */ +static void do_check_malloc_state(mstate m) { + bindex_t i; + size_t total; + /* check bins */ + for (i = 0; i < NSMALLBINS; ++i) + do_check_smallbin(m, i); + for (i = 0; i < NTREEBINS; ++i) + do_check_treebin(m, i); + + if (m->dvsize != 0) { /* check dv chunk */ + do_check_any_chunk(m, m->dv); + assert(m->dvsize == chunksize(m->dv)); + assert(m->dvsize >= MIN_CHUNK_SIZE); + assert(bin_find(m, m->dv) == 0); + } + + if (m->top != 0) { /* check top chunk */ + do_check_top_chunk(m, m->top); + /*assert(m->topsize == chunksize(m->top)); redundant */ + assert(m->topsize > 0); + assert(bin_find(m, m->top) == 0); + } + + total = traverse_and_check(m); + assert(total <= m->footprint); + assert(m->footprint <= m->max_footprint); +} +#endif /* DEBUG */ + +/* ----------------------------- statistics ------------------------------ */ + +#if !NO_MALLINFO +static struct mallinfo internal_mallinfo(mstate m) { + struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + ensure_initialization(); + if (!PREACTION(m)) { + check_malloc_state(m); + if (is_initialized(m)) { + size_t nfree = SIZE_T_ONE; /* top always free */ + size_t mfree = m->topsize + TOP_FOOT_SIZE; + size_t sum = mfree; + msegmentptr s = &m->seg; + while (s != 0) { + mchunkptr q = align_as_chunk(s->base); + while (segment_holds(s, q) && + q != m->top && q->head != FENCEPOST_HEAD) { + size_t sz = chunksize(q); + sum += sz; + if (!is_inuse(q)) { + mfree += sz; + ++nfree; + } + q = next_chunk(q); + } + s = s->next; + } + + nm.arena = sum; + nm.ordblks = nfree; + nm.hblkhd = m->footprint - sum; + nm.usmblks = m->max_footprint; + nm.uordblks = m->footprint - mfree; + nm.fordblks = mfree; + nm.keepcost = m->topsize; + } + + POSTACTION(m); + } + return nm; +} +#endif /* !NO_MALLINFO */ + +#if !NO_MALLOC_STATS +static void internal_malloc_stats(mstate m) { + ensure_initialization(); + if (!PREACTION(m)) { + size_t maxfp = 0; + size_t fp = 0; + size_t used = 0; + check_malloc_state(m); + if (is_initialized(m)) { + msegmentptr s = &m->seg; + maxfp = m->max_footprint; + fp = m->footprint; + used = fp - (m->topsize + TOP_FOOT_SIZE); + + while (s != 0) { + mchunkptr q = align_as_chunk(s->base); + while (segment_holds(s, q) && + q != m->top && q->head != FENCEPOST_HEAD) { + if (!is_inuse(q)) + used -= chunksize(q); + q = next_chunk(q); + } + s = s->next; + } + } + POSTACTION(m); /* drop lock */ + fprintf(stderr, "max system bytes = %10lu\n", (unsigned long)(maxfp)); + fprintf(stderr, "system bytes = %10lu\n", (unsigned long)(fp)); + fprintf(stderr, "in use bytes = %10lu\n", (unsigned long)(used)); + } +} +#endif /* NO_MALLOC_STATS */ + +/* ----------------------- Operations on smallbins ----------------------- */ + +/* + Various forms of linking and unlinking are defined as macros. Even + the ones for trees, which are very long but have very short typical + paths. This is ugly but reduces reliance on inlining support of + compilers. +*/ + +/* Link a free chunk into a smallbin */ +#define insert_small_chunk(M, P, S) {\ + bindex_t I = small_index(S);\ + mchunkptr B = smallbin_at(M, I);\ + mchunkptr F = B;\ + assert(S >= MIN_CHUNK_SIZE);\ + if (!smallmap_is_marked(M, I))\ + mark_smallmap(M, I);\ + else if (RTCHECK(ok_address(M, B->fd)))\ + F = B->fd;\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + B->fd = P;\ + F->bk = P;\ + P->fd = F;\ + P->bk = B;\ +} + +/* Unlink a chunk from a smallbin */ +#define unlink_small_chunk(M, P, S) {\ + mchunkptr F = P->fd;\ + mchunkptr B = P->bk;\ + bindex_t I = small_index(S);\ + assert(P != B);\ + assert(P != F);\ + assert(chunksize(P) == small_index2size(I));\ + if (RTCHECK(F == smallbin_at(M,I) || (ok_address(M, F) && F->bk == P))) { \ + if (B == F) {\ + clear_smallmap(M, I);\ + }\ + else if (RTCHECK(B == smallbin_at(M,I) ||\ + (ok_address(M, B) && B->fd == P))) {\ + F->bk = B;\ + B->fd = F;\ + }\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + }\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + }\ +} + +/* Unlink the first chunk from a smallbin */ +#define unlink_first_small_chunk(M, B, P, I) {\ + mchunkptr F = P->fd;\ + assert(P != B);\ + assert(P != F);\ + assert(chunksize(P) == small_index2size(I));\ + if (B == F) {\ + clear_smallmap(M, I);\ + }\ + else if (RTCHECK(ok_address(M, F) && F->bk == P)) {\ + F->bk = B;\ + B->fd = F;\ + }\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + }\ +} + +/* Replace dv node, binning the old one */ +/* Used only when dvsize known to be small */ +#define replace_dv(M, P, S) {\ + size_t DVS = M->dvsize;\ + assert(is_small(DVS));\ + if (DVS != 0) {\ + mchunkptr DV = M->dv;\ + insert_small_chunk(M, DV, DVS);\ + }\ + M->dvsize = S;\ + M->dv = P;\ +} + +/* ------------------------- Operations on trees ------------------------- */ + +/* Insert chunk into tree */ +#define insert_large_chunk(M, X, S) {\ + tbinptr* H;\ + bindex_t I;\ + compute_tree_index(S, I);\ + H = treebin_at(M, I);\ + X->index = I;\ + X->child[0] = X->child[1] = 0;\ + if (!treemap_is_marked(M, I)) {\ + mark_treemap(M, I);\ + *H = X;\ + X->parent = (tchunkptr)H;\ + X->fd = X->bk = X;\ + }\ + else {\ + tchunkptr T = *H;\ + size_t K = S << leftshift_for_tree_index(I);\ + for (;;) {\ + if (chunksize(T) != S) {\ + tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\ + K <<= 1;\ + if (*C != 0)\ + T = *C;\ + else if (RTCHECK(ok_address(M, C))) {\ + *C = X;\ + X->parent = T;\ + X->fd = X->bk = X;\ + break;\ + }\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + break;\ + }\ + }\ + else {\ + tchunkptr F = T->fd;\ + if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\ + T->fd = F->bk = X;\ + X->fd = F;\ + X->bk = T;\ + X->parent = 0;\ + break;\ + }\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + break;\ + }\ + }\ + }\ + }\ +} + +/* + Unlink steps: + + 1. If x is a chained node, unlink it from its same-sized fd/bk links + and choose its bk node as its replacement. + 2. If x was the last node of its size, but not a leaf node, it must + be replaced with a leaf node (not merely one with an open left or + right), to make sure that lefts and rights of descendents + correspond properly to bit masks. We use the rightmost descendent + of x. We could use any other leaf, but this is easy to locate and + tends to counteract removal of leftmosts elsewhere, and so keeps + paths shorter than minimally guaranteed. This doesn't loop much + because on average a node in a tree is near the bottom. + 3. If x is the base of a chain (i.e., has parent links) relink + x's parent and children to x's replacement (or null if none). +*/ + +#define unlink_large_chunk(M, X) {\ + tchunkptr XP = X->parent;\ + tchunkptr R;\ + if (X->bk != X) {\ + tchunkptr F = X->fd;\ + R = X->bk;\ + if (RTCHECK(ok_address(M, F) && F->bk == X && R->fd == X)) {\ + F->bk = R;\ + R->fd = F;\ + }\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + }\ + else {\ + tchunkptr* RP;\ + if (((R = *(RP = &(X->child[1]))) != 0) ||\ + ((R = *(RP = &(X->child[0]))) != 0)) {\ + tchunkptr* CP;\ + while ((*(CP = &(R->child[1])) != 0) ||\ + (*(CP = &(R->child[0])) != 0)) {\ + R = *(RP = CP);\ + }\ + if (RTCHECK(ok_address(M, RP)))\ + *RP = 0;\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + }\ + }\ + if (XP != 0) {\ + tbinptr* H = treebin_at(M, X->index);\ + if (X == *H) {\ + if ((*H = R) == 0) \ + clear_treemap(M, X->index);\ + }\ + else if (RTCHECK(ok_address(M, XP))) {\ + if (XP->child[0] == X) \ + XP->child[0] = R;\ + else \ + XP->child[1] = R;\ + }\ + else\ + CORRUPTION_ERROR_ACTION(M);\ + if (R != 0) {\ + if (RTCHECK(ok_address(M, R))) {\ + tchunkptr C0, C1;\ + R->parent = XP;\ + if ((C0 = X->child[0]) != 0) {\ + if (RTCHECK(ok_address(M, C0))) {\ + R->child[0] = C0;\ + C0->parent = R;\ + }\ + else\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + if ((C1 = X->child[1]) != 0) {\ + if (RTCHECK(ok_address(M, C1))) {\ + R->child[1] = C1;\ + C1->parent = R;\ + }\ + else\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + }\ + else\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + }\ +} + +/* Relays to large vs small bin operations */ + +#define insert_chunk(M, P, S)\ + if (is_small(S)) insert_small_chunk(M, P, S)\ + else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); } + +#define unlink_chunk(M, P, S)\ + if (is_small(S)) unlink_small_chunk(M, P, S)\ + else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); } + + +/* Relays to internal calls to malloc/free from realloc, memalign etc */ + +#if ONLY_MSPACES +#define internal_malloc(m, b) mspace_malloc(m, b) +#define internal_free(m, mem) mspace_free(m,mem); +#else /* ONLY_MSPACES */ +#if MSPACES +#define internal_malloc(m, b)\ + ((m == gm)? dlmalloc(b) : mspace_malloc(m, b)) +#define internal_free(m, mem)\ + if (m == gm) dlfree(mem); else mspace_free(m,mem); +#else /* MSPACES */ +#define internal_malloc(m, b) dlmalloc(b) +#define internal_free(m, mem) dlfree(mem) +#endif /* MSPACES */ +#endif /* ONLY_MSPACES */ + +/* ----------------------- Direct-mmapping chunks ----------------------- */ + +/* + Directly mmapped chunks are set up with an offset to the start of + the mmapped region stored in the prev_foot field of the chunk. This + allows reconstruction of the required argument to MUNMAP when freed, + and also allows adjustment of the returned chunk to meet alignment + requirements (especially in memalign). +*/ + +/* Malloc using mmap */ +static void* mmap_alloc(mstate m, size_t nb) { + size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK); + if (m->footprint_limit != 0) { + size_t fp = m->footprint + mmsize; + if (fp <= m->footprint || fp > m->footprint_limit) + return 0; + } + if (mmsize > nb) { /* Check for wrap around 0 */ + char* mm = (char*)(CALL_DIRECT_MMAP(mmsize)); + if (mm != CMFAIL) { + size_t offset = align_offset(chunk2mem(mm)); + size_t psize = mmsize - offset - MMAP_FOOT_PAD; + mchunkptr p = (mchunkptr)(mm + offset); + p->prev_foot = offset; + p->head = psize; + mark_inuse_foot(m, p, psize); + chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD; + chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0; + + if (m->least_addr == 0 || mm < m->least_addr) + m->least_addr = mm; + if ((m->footprint += mmsize) > m->max_footprint) + m->max_footprint = m->footprint; + assert(is_aligned(chunk2mem(p))); + check_mmapped_chunk(m, p); + return chunk2mem(p); + } + } + return 0; +} + +/* Realloc using mmap */ +static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb, int flags) { + size_t oldsize = chunksize(oldp); + (void)flags; /* placate people compiling -Wunused */ + if (is_small(nb)) /* Can't shrink mmap regions below small size */ + return 0; + /* Keep old chunk if big enough but not too big */ + if (oldsize >= nb + SIZE_T_SIZE && + (oldsize - nb) <= (mparams.granularity << 1)) + return oldp; + else { + size_t offset = oldp->prev_foot; + size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD; + size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK); + char* cp = (char*)CALL_MREMAP((char*)oldp - offset, + oldmmsize, newmmsize, flags); + if (cp != CMFAIL) { + mchunkptr newp = (mchunkptr)(cp + offset); + size_t psize = newmmsize - offset - MMAP_FOOT_PAD; + newp->head = psize; + mark_inuse_foot(m, newp, psize); + chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD; + chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0; + + if (cp < m->least_addr) + m->least_addr = cp; + if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint) + m->max_footprint = m->footprint; + check_mmapped_chunk(m, newp); + return newp; + } + } + return 0; +} + + +/* -------------------------- mspace management -------------------------- */ + +/* Initialize top chunk and its size */ +static void init_top(mstate m, mchunkptr p, size_t psize) { + /* Ensure alignment */ + size_t offset = align_offset(chunk2mem(p)); + p = (mchunkptr)((char*)p + offset); + psize -= offset; + + m->top = p; + m->topsize = psize; + p->head = psize | PINUSE_BIT; + /* set size of fake trailing chunk holding overhead space only once */ + chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE; + m->trim_check = mparams.trim_threshold; /* reset on each update */ +} + +/* Initialize bins for a new mstate that is otherwise zeroed out */ +static void init_bins(mstate m) { + /* Establish circular links for smallbins */ + bindex_t i; + for (i = 0; i < NSMALLBINS; ++i) { + sbinptr bin = smallbin_at(m,i); + bin->fd = bin->bk = bin; + } +} + +#if PROCEED_ON_ERROR + +/* default corruption action */ +static void reset_on_error(mstate m) { + int i; + ++malloc_corruption_error_count; + /* Reinitialize fields to forget about all memory */ + m->smallmap = m->treemap = 0; + m->dvsize = m->topsize = 0; + m->seg.base = 0; + m->seg.size = 0; + m->seg.next = 0; + m->top = m->dv = 0; + for (i = 0; i < NTREEBINS; ++i) + *treebin_at(m, i) = 0; + init_bins(m); +} +#endif /* PROCEED_ON_ERROR */ + +/* Allocate chunk and prepend remainder with chunk in successor base. */ +static void* prepend_alloc(mstate m, char* newbase, char* oldbase, + size_t nb) { + mchunkptr p = align_as_chunk(newbase); + mchunkptr oldfirst = align_as_chunk(oldbase); + size_t psize = (char*)oldfirst - (char*)p; + mchunkptr q = chunk_plus_offset(p, nb); + size_t qsize = psize - nb; + set_size_and_pinuse_of_inuse_chunk(m, p, nb); + + assert((char*)oldfirst > (char*)q); + assert(pinuse(oldfirst)); + assert(qsize >= MIN_CHUNK_SIZE); + + /* consolidate remainder with first chunk of old base */ + if (oldfirst == m->top) { + size_t tsize = m->topsize += qsize; + m->top = q; + q->head = tsize | PINUSE_BIT; + check_top_chunk(m, q); + } + else if (oldfirst == m->dv) { + size_t dsize = m->dvsize += qsize; + m->dv = q; + set_size_and_pinuse_of_free_chunk(q, dsize); + } + else { + if (!is_inuse(oldfirst)) { + size_t nsize = chunksize(oldfirst); + unlink_chunk(m, oldfirst, nsize); + oldfirst = chunk_plus_offset(oldfirst, nsize); + qsize += nsize; + } + set_free_with_pinuse(q, qsize, oldfirst); + insert_chunk(m, q, qsize); + check_free_chunk(m, q); + } + + check_malloced_chunk(m, chunk2mem(p), nb); + return chunk2mem(p); +} + +/* Add a segment to hold a new noncontiguous region */ +static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) { + /* Determine locations and sizes of segment, fenceposts, old top */ + char* old_top = (char*)m->top; + msegmentptr oldsp = segment_holding(m, old_top); + char* old_end = oldsp->base + oldsp->size; + size_t ssize = pad_request(sizeof(struct malloc_segment)); + char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK); + size_t offset = align_offset(chunk2mem(rawsp)); + char* asp = rawsp + offset; + char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp; + mchunkptr sp = (mchunkptr)csp; + msegmentptr ss = (msegmentptr)(chunk2mem(sp)); + mchunkptr tnext = chunk_plus_offset(sp, ssize); + mchunkptr p = tnext; + int nfences = 0; + + /* reset top to new space */ + init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE); + + /* Set up segment record */ + assert(is_aligned(ss)); + set_size_and_pinuse_of_inuse_chunk(m, sp, ssize); + *ss = m->seg; /* Push current record */ + m->seg.base = tbase; + m->seg.size = tsize; + m->seg.sflags = mmapped; + m->seg.next = ss; + + /* Insert trailing fenceposts */ + for (;;) { + mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE); + p->head = FENCEPOST_HEAD; + ++nfences; + if ((char*)(&(nextp->head)) < old_end) + p = nextp; + else + break; + } + assert(nfences >= 2); + + /* Insert the rest of old top into a bin as an ordinary free chunk */ + if (csp != old_top) { + mchunkptr q = (mchunkptr)old_top; + size_t psize = csp - old_top; + mchunkptr tn = chunk_plus_offset(q, psize); + set_free_with_pinuse(q, psize, tn); + insert_chunk(m, q, psize); + } + + check_top_chunk(m, m->top); +} + +/* -------------------------- System allocation -------------------------- */ + +/* Get memory from system using MORECORE or MMAP */ +static void* sys_alloc(mstate m, size_t nb) { + char* tbase = CMFAIL; + size_t tsize = 0; + flag_t mmap_flag = 0; + size_t asize; /* allocation size */ + + ensure_initialization(); + + /* Directly map large chunks, but only if already initialized */ + if (use_mmap(m) && nb >= mparams.mmap_threshold && m->topsize != 0) { + void* mem = mmap_alloc(m, nb); + if (mem != 0) + return mem; + } + + asize = granularity_align(nb + SYS_ALLOC_PADDING); + if (asize <= nb) + return 0; /* wraparound */ + if (m->footprint_limit != 0) { + size_t fp = m->footprint + asize; + if (fp <= m->footprint || fp > m->footprint_limit) + return 0; + } + + /* + Try getting memory in any of three ways (in most-preferred to + least-preferred order): + 1. A call to MORECORE that can normally contiguously extend memory. + (disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or + or main space is mmapped or a previous contiguous call failed) + 2. A call to MMAP new space (disabled if not HAVE_MMAP). + Note that under the default settings, if MORECORE is unable to + fulfill a request, and HAVE_MMAP is true, then mmap is + used as a noncontiguous system allocator. This is a useful backup + strategy for systems with holes in address spaces -- in this case + sbrk cannot contiguously expand the heap, but mmap may be able to + find space. + 3. A call to MORECORE that cannot usually contiguously extend memory. + (disabled if not HAVE_MORECORE) + + In all cases, we need to request enough bytes from system to ensure + we can malloc nb bytes upon success, so pad with enough space for + top_foot, plus alignment-pad to make sure we don't lose bytes if + not on boundary, and round this up to a granularity unit. + */ + + if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) { + char* br = CMFAIL; + size_t ssize = asize; /* sbrk call size */ + msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (char*)m->top); + ACQUIRE_MALLOC_GLOBAL_LOCK(); + + if (ss == 0) { /* First time through or recovery */ + char* base = (char*)CALL_MORECORE(0); + if (base != CMFAIL) { + size_t fp; + /* Adjust to end on a page boundary */ + if (!is_page_aligned(base)) + ssize += (page_align((size_t)base) - (size_t)base); + fp = m->footprint + ssize; /* recheck limits */ + if (ssize > nb && ssize < HALF_MAX_SIZE_T && + (m->footprint_limit == 0 || + (fp > m->footprint && fp <= m->footprint_limit)) && + (br = (char*)(CALL_MORECORE(ssize))) == base) { + tbase = base; + tsize = ssize; + } + } + } + else { + /* Subtract out existing available top space from MORECORE request. */ + ssize = granularity_align(nb - m->topsize + SYS_ALLOC_PADDING); + /* Use mem here only if it did continuously extend old space */ + if (ssize < HALF_MAX_SIZE_T && + (br = (char*)(CALL_MORECORE(ssize))) == ss->base+ss->size) { + tbase = br; + tsize = ssize; + } + } + + if (tbase == CMFAIL) { /* Cope with partial failure */ + if (br != CMFAIL) { /* Try to use/extend the space we did get */ + if (ssize < HALF_MAX_SIZE_T && + ssize < nb + SYS_ALLOC_PADDING) { + size_t esize = granularity_align(nb + SYS_ALLOC_PADDING - ssize); + if (esize < HALF_MAX_SIZE_T) { + char* end = (char*)CALL_MORECORE(esize); + if (end != CMFAIL) + ssize += esize; + else { /* Can't use; try to release */ + (void) CALL_MORECORE(-ssize); + br = CMFAIL; + } + } + } + } + if (br != CMFAIL) { /* Use the space we did get */ + tbase = br; + tsize = ssize; + } + else + disable_contiguous(m); /* Don't try contiguous path in the future */ + } + + RELEASE_MALLOC_GLOBAL_LOCK(); + } + + if (HAVE_MMAP && tbase == CMFAIL) { /* Try MMAP */ + char* mp = (char*)(CALL_MMAP(asize)); + if (mp != CMFAIL) { + tbase = mp; + tsize = asize; + mmap_flag = USE_MMAP_BIT; + } + } + + if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */ + if (asize < HALF_MAX_SIZE_T) { + char* br = CMFAIL; + char* end = CMFAIL; + ACQUIRE_MALLOC_GLOBAL_LOCK(); + br = (char*)(CALL_MORECORE(asize)); + end = (char*)(CALL_MORECORE(0)); + RELEASE_MALLOC_GLOBAL_LOCK(); + if (br != CMFAIL && end != CMFAIL && br < end) { + size_t ssize = end - br; + if (ssize > nb + TOP_FOOT_SIZE) { + tbase = br; + tsize = ssize; + } + } + } + } + + if (tbase != CMFAIL) { + + if ((m->footprint += tsize) > m->max_footprint) + m->max_footprint = m->footprint; + + if (!is_initialized(m)) { /* first-time initialization */ + if (m->least_addr == 0 || tbase < m->least_addr) + m->least_addr = tbase; + m->seg.base = tbase; + m->seg.size = tsize; + m->seg.sflags = mmap_flag; + m->magic = mparams.magic; + m->release_checks = MAX_RELEASE_CHECK_RATE; + init_bins(m); +#if !ONLY_MSPACES + if (is_global(m)) + init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE); + else +#endif + { + /* Offset top by embedded malloc_state */ + mchunkptr mn = next_chunk(mem2chunk(m)); + init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE); + } + } + + else { + /* Try to merge with an existing segment */ + msegmentptr sp = &m->seg; + /* Only consider most recent segment if traversal suppressed */ + while (sp != 0 && tbase != sp->base + sp->size) + sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next; + if (sp != 0 && + !is_extern_segment(sp) && + (sp->sflags & USE_MMAP_BIT) == mmap_flag && + segment_holds(sp, m->top)) { /* append */ + sp->size += tsize; + init_top(m, m->top, m->topsize + tsize); + } + else { + if (tbase < m->least_addr) + m->least_addr = tbase; + sp = &m->seg; + while (sp != 0 && sp->base != tbase + tsize) + sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next; + if (sp != 0 && + !is_extern_segment(sp) && + (sp->sflags & USE_MMAP_BIT) == mmap_flag) { + char* oldbase = sp->base; + sp->base = tbase; + sp->size += tsize; + return prepend_alloc(m, tbase, oldbase, nb); + } + else + add_segment(m, tbase, tsize, mmap_flag); + } + } + + if (nb < m->topsize) { /* Allocate from new or extended top space */ + size_t rsize = m->topsize -= nb; + mchunkptr p = m->top; + mchunkptr r = m->top = chunk_plus_offset(p, nb); + r->head = rsize | PINUSE_BIT; + set_size_and_pinuse_of_inuse_chunk(m, p, nb); + check_top_chunk(m, m->top); + check_malloced_chunk(m, chunk2mem(p), nb); + return chunk2mem(p); + } + } + + MALLOC_FAILURE_ACTION; + return 0; +} + +/* ----------------------- system deallocation -------------------------- */ + +/* Unmap and unlink any mmapped segments that don't contain used chunks */ +static size_t release_unused_segments(mstate m) { + size_t released = 0; + int nsegs = 0; + msegmentptr pred = &m->seg; + msegmentptr sp = pred->next; + while (sp != 0) { + char* base = sp->base; + size_t size = sp->size; + msegmentptr next = sp->next; + ++nsegs; + if (is_mmapped_segment(sp) && !is_extern_segment(sp)) { + mchunkptr p = align_as_chunk(base); + size_t psize = chunksize(p); + /* Can unmap if first chunk holds entire segment and not pinned */ + if (!is_inuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) { + tchunkptr tp = (tchunkptr)p; + assert(segment_holds(sp, (char*)sp)); + if (p == m->dv) { + m->dv = 0; + m->dvsize = 0; + } + else { + unlink_large_chunk(m, tp); + } + if (CALL_MUNMAP(base, size) == 0) { + released += size; + m->footprint -= size; + /* unlink obsoleted record */ + sp = pred; + sp->next = next; + } + else { /* back out if cannot unmap */ + insert_large_chunk(m, tp, psize); + } + } + } + if (NO_SEGMENT_TRAVERSAL) /* scan only first segment */ + break; + pred = sp; + sp = next; + } + /* Reset check counter */ + m->release_checks = (((size_t) nsegs > (size_t) MAX_RELEASE_CHECK_RATE)? + (size_t) nsegs : (size_t) MAX_RELEASE_CHECK_RATE); + return released; +} + +static int sys_trim(mstate m, size_t pad) { + size_t released = 0; + ensure_initialization(); + if (pad < MAX_REQUEST && is_initialized(m)) { + pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */ + + if (m->topsize > pad) { + /* Shrink top space in granularity-size units, keeping at least one */ + size_t unit = mparams.granularity; + size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit - + SIZE_T_ONE) * unit; + msegmentptr sp = segment_holding(m, (char*)m->top); + + if (!is_extern_segment(sp)) { + if (is_mmapped_segment(sp)) { + if (HAVE_MMAP && + sp->size >= extra && + !has_segment_link(m, sp)) { /* can't shrink if pinned */ + size_t newsize = sp->size - extra; + (void)newsize; /* placate people compiling -Wunused-variable */ + /* Prefer mremap, fall back to munmap */ + if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) || + (CALL_MUNMAP(sp->base + newsize, extra) == 0)) { + released = extra; + } + } + } + else if (HAVE_MORECORE) { + if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */ + extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit; + ACQUIRE_MALLOC_GLOBAL_LOCK(); + { + /* Make sure end of memory is where we last set it. */ + char* old_br = (char*)(CALL_MORECORE(0)); + if (old_br == sp->base + sp->size) { + char* rel_br = (char*)(CALL_MORECORE(-extra)); + char* new_br = (char*)(CALL_MORECORE(0)); + if (rel_br != CMFAIL && new_br < old_br) + released = old_br - new_br; + } + } + RELEASE_MALLOC_GLOBAL_LOCK(); + } + } + + if (released != 0) { + sp->size -= released; + m->footprint -= released; + init_top(m, m->top, m->topsize - released); + check_top_chunk(m, m->top); + } + } + + /* Unmap any unused mmapped segments */ + if (HAVE_MMAP) + released += release_unused_segments(m); + + /* On failure, disable autotrim to avoid repeated failed future calls */ + if (released == 0 && m->topsize > m->trim_check) + m->trim_check = MAX_SIZE_T; + } + + return (released != 0)? 1 : 0; +} + +/* Consolidate and bin a chunk. Differs from exported versions + of free mainly in that the chunk need not be marked as inuse. +*/ +static void dispose_chunk(mstate m, mchunkptr p, size_t psize) { + mchunkptr next = chunk_plus_offset(p, psize); + if (!pinuse(p)) { + mchunkptr prev; + size_t prevsize = p->prev_foot; + if (is_mmapped(p)) { + psize += prevsize + MMAP_FOOT_PAD; + if (CALL_MUNMAP((char*)p - prevsize, psize) == 0) + m->footprint -= psize; + return; + } + prev = chunk_minus_offset(p, prevsize); + psize += prevsize; + p = prev; + if (RTCHECK(ok_address(m, prev))) { /* consolidate backward */ + if (p != m->dv) { + unlink_chunk(m, p, prevsize); + } + else if ((next->head & INUSE_BITS) == INUSE_BITS) { + m->dvsize = psize; + set_free_with_pinuse(p, psize, next); + return; + } + } + else { + CORRUPTION_ERROR_ACTION(m); + return; + } + } + if (RTCHECK(ok_address(m, next))) { + if (!cinuse(next)) { /* consolidate forward */ + if (next == m->top) { + size_t tsize = m->topsize += psize; + m->top = p; + p->head = tsize | PINUSE_BIT; + if (p == m->dv) { + m->dv = 0; + m->dvsize = 0; + } + return; + } + else if (next == m->dv) { + size_t dsize = m->dvsize += psize; + m->dv = p; + set_size_and_pinuse_of_free_chunk(p, dsize); + return; + } + else { + size_t nsize = chunksize(next); + psize += nsize; + unlink_chunk(m, next, nsize); + set_size_and_pinuse_of_free_chunk(p, psize); + if (p == m->dv) { + m->dvsize = psize; + return; + } + } + } + else { + set_free_with_pinuse(p, psize, next); + } + insert_chunk(m, p, psize); + } + else { + CORRUPTION_ERROR_ACTION(m); + } +} + +/* ---------------------------- malloc --------------------------- */ + +/* allocate a large request from the best fitting chunk in a treebin */ +static void* tmalloc_large(mstate m, size_t nb) { + tchunkptr v = 0; + size_t rsize = -nb; /* Unsigned negation */ + tchunkptr t; + bindex_t idx; + compute_tree_index(nb, idx); + if ((t = *treebin_at(m, idx)) != 0) { + /* Traverse tree for this bin looking for node with size == nb */ + size_t sizebits = nb << leftshift_for_tree_index(idx); + tchunkptr rst = 0; /* The deepest untaken right subtree */ + for (;;) { + tchunkptr rt; + size_t trem = chunksize(t) - nb; + if (trem < rsize) { + v = t; + if ((rsize = trem) == 0) + break; + } + rt = t->child[1]; + t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]; + if (rt != 0 && rt != t) + rst = rt; + if (t == 0) { + t = rst; /* set t to least subtree holding sizes > nb */ + break; + } + sizebits <<= 1; + } + } + if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */ + binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap; + if (leftbits != 0) { + bindex_t i; + binmap_t leastbit = least_bit(leftbits); + compute_bit2idx(leastbit, i); + t = *treebin_at(m, i); + } + } + + while (t != 0) { /* find smallest of tree or subtree */ + size_t trem = chunksize(t) - nb; + if (trem < rsize) { + rsize = trem; + v = t; + } + t = leftmost_child(t); + } + + /* If dv is a better fit, return 0 so malloc will use it */ + if (v != 0 && rsize < (size_t)(m->dvsize - nb)) { + if (RTCHECK(ok_address(m, v))) { /* split */ + mchunkptr r = chunk_plus_offset(v, nb); + assert(chunksize(v) == rsize + nb); + if (RTCHECK(ok_next(v, r))) { + unlink_large_chunk(m, v); + if (rsize < MIN_CHUNK_SIZE) + set_inuse_and_pinuse(m, v, (rsize + nb)); + else { + set_size_and_pinuse_of_inuse_chunk(m, v, nb); + set_size_and_pinuse_of_free_chunk(r, rsize); + insert_chunk(m, r, rsize); + } + return chunk2mem(v); + } + } + CORRUPTION_ERROR_ACTION(m); + } + return 0; +} + +/* allocate a small request from the best fitting chunk in a treebin */ +static void* tmalloc_small(mstate m, size_t nb) { + tchunkptr t, v; + size_t rsize; + bindex_t i; + binmap_t leastbit = least_bit(m->treemap); + compute_bit2idx(leastbit, i); + v = t = *treebin_at(m, i); + rsize = chunksize(t) - nb; + + while ((t = leftmost_child(t)) != 0) { + size_t trem = chunksize(t) - nb; + if (trem < rsize) { + rsize = trem; + v = t; + } + } + + if (RTCHECK(ok_address(m, v))) { + mchunkptr r = chunk_plus_offset(v, nb); + assert(chunksize(v) == rsize + nb); + if (RTCHECK(ok_next(v, r))) { + unlink_large_chunk(m, v); + if (rsize < MIN_CHUNK_SIZE) + set_inuse_and_pinuse(m, v, (rsize + nb)); + else { + set_size_and_pinuse_of_inuse_chunk(m, v, nb); + set_size_and_pinuse_of_free_chunk(r, rsize); + replace_dv(m, r, rsize); + } + return chunk2mem(v); + } + } + + CORRUPTION_ERROR_ACTION(m); + return 0; +} + +#if !ONLY_MSPACES + +void* dlmalloc(size_t bytes) { + /* + Basic algorithm: + If a small request (< 256 bytes minus per-chunk overhead): + 1. If one exists, use a remainderless chunk in associated smallbin. + (Remainderless means that there are too few excess bytes to + represent as a chunk.) + 2. If it is big enough, use the dv chunk, which is normally the + chunk adjacent to the one used for the most recent small request. + 3. If one exists, split the smallest available chunk in a bin, + saving remainder in dv. + 4. If it is big enough, use the top chunk. + 5. If available, get memory from system and use it + Otherwise, for a large request: + 1. Find the smallest available binned chunk that fits, and use it + if it is better fitting than dv chunk, splitting if necessary. + 2. If better fitting than any binned chunk, use the dv chunk. + 3. If it is big enough, use the top chunk. + 4. If request size >= mmap threshold, try to directly mmap this chunk. + 5. If available, get memory from system and use it + + The ugly goto's here ensure that postaction occurs along all paths. + */ + +#if USE_LOCKS + ensure_initialization(); /* initialize in sys_alloc if not using locks */ +#endif + + if (!PREACTION(gm)) { + void* mem; + size_t nb; + if (bytes <= MAX_SMALL_REQUEST) { + bindex_t idx; + binmap_t smallbits; + nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes); + idx = small_index(nb); + smallbits = gm->smallmap >> idx; + + if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ + mchunkptr b, p; + idx += ~smallbits & 1; /* Uses next bin if idx empty */ + b = smallbin_at(gm, idx); + p = b->fd; + assert(chunksize(p) == small_index2size(idx)); + unlink_first_small_chunk(gm, b, p, idx); + set_inuse_and_pinuse(gm, p, small_index2size(idx)); + mem = chunk2mem(p); + check_malloced_chunk(gm, mem, nb); + goto postaction; + } + + else if (nb > gm->dvsize) { + if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ + mchunkptr b, p, r; + size_t rsize; + bindex_t i; + binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx)); + binmap_t leastbit = least_bit(leftbits); + compute_bit2idx(leastbit, i); + b = smallbin_at(gm, i); + p = b->fd; + assert(chunksize(p) == small_index2size(i)); + unlink_first_small_chunk(gm, b, p, i); + rsize = small_index2size(i) - nb; + /* Fit here cannot be remainderless if 4byte sizes */ + if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) + set_inuse_and_pinuse(gm, p, small_index2size(i)); + else { + set_size_and_pinuse_of_inuse_chunk(gm, p, nb); + r = chunk_plus_offset(p, nb); + set_size_and_pinuse_of_free_chunk(r, rsize); + replace_dv(gm, r, rsize); + } + mem = chunk2mem(p); + check_malloced_chunk(gm, mem, nb); + goto postaction; + } + + else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) { + check_malloced_chunk(gm, mem, nb); + goto postaction; + } + } + } + else if (bytes >= MAX_REQUEST) + nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */ + else { + nb = pad_request(bytes); + if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) { + check_malloced_chunk(gm, mem, nb); + goto postaction; + } + } + + if (nb <= gm->dvsize) { + size_t rsize = gm->dvsize - nb; + mchunkptr p = gm->dv; + if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ + mchunkptr r = gm->dv = chunk_plus_offset(p, nb); + gm->dvsize = rsize; + set_size_and_pinuse_of_free_chunk(r, rsize); + set_size_and_pinuse_of_inuse_chunk(gm, p, nb); + } + else { /* exhaust dv */ + size_t dvs = gm->dvsize; + gm->dvsize = 0; + gm->dv = 0; + set_inuse_and_pinuse(gm, p, dvs); + } + mem = chunk2mem(p); + check_malloced_chunk(gm, mem, nb); + goto postaction; + } + + else if (nb < gm->topsize) { /* Split top */ + size_t rsize = gm->topsize -= nb; + mchunkptr p = gm->top; + mchunkptr r = gm->top = chunk_plus_offset(p, nb); + r->head = rsize | PINUSE_BIT; + set_size_and_pinuse_of_inuse_chunk(gm, p, nb); + mem = chunk2mem(p); + check_top_chunk(gm, gm->top); + check_malloced_chunk(gm, mem, nb); + goto postaction; + } + + mem = sys_alloc(gm, nb); + + postaction: + POSTACTION(gm); + return mem; + } + + return 0; +} + +/* ---------------------------- free --------------------------- */ + +void dlfree(void* mem) { + /* + Consolidate freed chunks with preceeding or succeeding bordering + free chunks, if they exist, and then place in a bin. Intermixed + with special cases for top, dv, mmapped chunks, and usage errors. + */ + + if (mem != 0) { + mchunkptr p = mem2chunk(mem); +#if FOOTERS + mstate fm = get_mstate_for(p); + if (!ok_magic(fm)) { + USAGE_ERROR_ACTION(fm, p); + return; + } +#else /* FOOTERS */ +#define fm gm +#endif /* FOOTERS */ + if (!PREACTION(fm)) { + check_inuse_chunk(fm, p); + if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) { + size_t psize = chunksize(p); + mchunkptr next = chunk_plus_offset(p, psize); + if (!pinuse(p)) { + size_t prevsize = p->prev_foot; + if (is_mmapped(p)) { + psize += prevsize + MMAP_FOOT_PAD; + if (CALL_MUNMAP((char*)p - prevsize, psize) == 0) + fm->footprint -= psize; + goto postaction; + } + else { + mchunkptr prev = chunk_minus_offset(p, prevsize); + psize += prevsize; + p = prev; + if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */ + if (p != fm->dv) { + unlink_chunk(fm, p, prevsize); + } + else if ((next->head & INUSE_BITS) == INUSE_BITS) { + fm->dvsize = psize; + set_free_with_pinuse(p, psize, next); + goto postaction; + } + } + else + goto erroraction; + } + } + + if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) { + if (!cinuse(next)) { /* consolidate forward */ + if (next == fm->top) { + size_t tsize = fm->topsize += psize; + fm->top = p; + p->head = tsize | PINUSE_BIT; + if (p == fm->dv) { + fm->dv = 0; + fm->dvsize = 0; + } + if (should_trim(fm, tsize)) + sys_trim(fm, 0); + goto postaction; + } + else if (next == fm->dv) { + size_t dsize = fm->dvsize += psize; + fm->dv = p; + set_size_and_pinuse_of_free_chunk(p, dsize); + goto postaction; + } + else { + size_t nsize = chunksize(next); + psize += nsize; + unlink_chunk(fm, next, nsize); + set_size_and_pinuse_of_free_chunk(p, psize); + if (p == fm->dv) { + fm->dvsize = psize; + goto postaction; + } + } + } + else + set_free_with_pinuse(p, psize, next); + + if (is_small(psize)) { + insert_small_chunk(fm, p, psize); + check_free_chunk(fm, p); + } + else { + tchunkptr tp = (tchunkptr)p; + insert_large_chunk(fm, tp, psize); + check_free_chunk(fm, p); + if (--fm->release_checks == 0) + release_unused_segments(fm); + } + goto postaction; + } + } + erroraction: + USAGE_ERROR_ACTION(fm, p); + postaction: + POSTACTION(fm); + } + } +#if !FOOTERS +#undef fm +#endif /* FOOTERS */ +} + +void* dlcalloc(size_t n_elements, size_t elem_size) { + void* mem; + size_t req = 0; + if (n_elements != 0) { + req = n_elements * elem_size; + if (((n_elements | elem_size) & ~(size_t)0xffff) && + (req / n_elements != elem_size)) + req = MAX_SIZE_T; /* force downstream failure on overflow */ + } + mem = dlmalloc(req); + if (mem != 0 && calloc_must_clear(mem2chunk(mem))) + __builtin_memset(mem, 0, req); + return mem; +} + +#endif /* !ONLY_MSPACES */ + +/* ------------ Internal support for realloc, memalign, etc -------------- */ + +/* Try to realloc; only in-place unless can_move true */ +static mchunkptr try_realloc_chunk(mstate m, mchunkptr p, size_t nb, + int can_move) { + mchunkptr newp = 0; + size_t oldsize = chunksize(p); + mchunkptr next = chunk_plus_offset(p, oldsize); + if (RTCHECK(ok_address(m, p) && ok_inuse(p) && + ok_next(p, next) && ok_pinuse(next))) { + if (is_mmapped(p)) { + newp = mmap_resize(m, p, nb, can_move); + } + else if (oldsize >= nb) { /* already big enough */ + size_t rsize = oldsize - nb; + if (rsize >= MIN_CHUNK_SIZE) { /* split off remainder */ + mchunkptr r = chunk_plus_offset(p, nb); + set_inuse(m, p, nb); + set_inuse(m, r, rsize); + dispose_chunk(m, r, rsize); + } + newp = p; + } + else if (next == m->top) { /* extend into top */ + if (oldsize + m->topsize > nb) { + size_t newsize = oldsize + m->topsize; + size_t newtopsize = newsize - nb; + mchunkptr newtop = chunk_plus_offset(p, nb); + set_inuse(m, p, nb); + newtop->head = newtopsize |PINUSE_BIT; + m->top = newtop; + m->topsize = newtopsize; + newp = p; + } + } + else if (next == m->dv) { /* extend into dv */ + size_t dvs = m->dvsize; + if (oldsize + dvs >= nb) { + size_t dsize = oldsize + dvs - nb; + if (dsize >= MIN_CHUNK_SIZE) { + mchunkptr r = chunk_plus_offset(p, nb); + mchunkptr n = chunk_plus_offset(r, dsize); + set_inuse(m, p, nb); + set_size_and_pinuse_of_free_chunk(r, dsize); + clear_pinuse(n); + m->dvsize = dsize; + m->dv = r; + } + else { /* exhaust dv */ + size_t newsize = oldsize + dvs; + set_inuse(m, p, newsize); + m->dvsize = 0; + m->dv = 0; + } + newp = p; + } + } + else if (!cinuse(next)) { /* extend into next free chunk */ + size_t nextsize = chunksize(next); + if (oldsize + nextsize >= nb) { + size_t rsize = oldsize + nextsize - nb; + unlink_chunk(m, next, nextsize); + if (rsize < MIN_CHUNK_SIZE) { + size_t newsize = oldsize + nextsize; + set_inuse(m, p, newsize); + } + else { + mchunkptr r = chunk_plus_offset(p, nb); + set_inuse(m, p, nb); + set_inuse(m, r, rsize); + dispose_chunk(m, r, rsize); + } + newp = p; + } + } + } + else { + USAGE_ERROR_ACTION(m, chunk2mem(p)); + } + return newp; +} + +static void* internal_memalign(mstate m, size_t alignment, size_t bytes) { + void* mem = 0; + if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */ + alignment = MIN_CHUNK_SIZE; + if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of 2 */ + size_t a = MALLOC_ALIGNMENT << 1; + while (a < alignment) a <<= 1; + alignment = a; + } + if (bytes >= MAX_REQUEST - alignment) { + if (m != 0) { /* Test isn't needed but avoids compiler warning */ + MALLOC_FAILURE_ACTION; + } + } + else { + size_t nb = request2size(bytes); + size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD; + mem = internal_malloc(m, req); + if (mem != 0) { + mchunkptr p = mem2chunk(mem); + if (PREACTION(m)) + return 0; + if ((((size_t)(mem)) & (alignment - 1)) != 0) { /* misaligned */ + /* + Find an aligned spot inside chunk. Since we need to give + back leading space in a chunk of at least MIN_CHUNK_SIZE, if + the first calculation places us at a spot with less than + MIN_CHUNK_SIZE leader, we can move to the next aligned spot. + We've allocated enough total room so that this is always + possible. + */ + char* br = (char*)mem2chunk((size_t)(((size_t)((char*)mem + alignment - + SIZE_T_ONE)) & + -alignment)); + char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)? + br : br+alignment; + mchunkptr newp = (mchunkptr)pos; + size_t leadsize = pos - (char*)(p); + size_t newsize = chunksize(p) - leadsize; + + if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */ + newp->prev_foot = p->prev_foot + leadsize; + newp->head = newsize; + } + else { /* Otherwise, give back leader, use the rest */ + set_inuse(m, newp, newsize); + set_inuse(m, p, leadsize); + dispose_chunk(m, p, leadsize); + } + p = newp; + } + + /* Give back spare room at the end */ + if (!is_mmapped(p)) { + size_t size = chunksize(p); + if (size > nb + MIN_CHUNK_SIZE) { + size_t remainder_size = size - nb; + mchunkptr remainder = chunk_plus_offset(p, nb); + set_inuse(m, p, nb); + set_inuse(m, remainder, remainder_size); + dispose_chunk(m, remainder, remainder_size); + } + } + + mem = chunk2mem(p); + assert (chunksize(p) >= nb); + assert(((size_t)mem & (alignment - 1)) == 0); + check_inuse_chunk(m, p); + POSTACTION(m); + } + } + return mem; +} + +/* + Common support for independent_X routines, handling + all of the combinations that can result. + The opts arg has: + bit 0 set if all elements are same size (using sizes[0]) + bit 1 set if elements should be zeroed +*/ +static void** ialloc(mstate m, + size_t n_elements, + size_t* sizes, + int opts, + void* chunks[]) { + + size_t element_size; /* chunksize of each element, if all same */ + size_t contents_size; /* total size of elements */ + size_t array_size; /* request size of pointer array */ + void* mem; /* malloced aggregate space */ + mchunkptr p; /* corresponding chunk */ + size_t remainder_size; /* remaining bytes while splitting */ + void** marray; /* either "chunks" or malloced ptr array */ + mchunkptr array_chunk; /* chunk for malloced ptr array */ + flag_t was_enabled; /* to disable mmap */ + size_t size; + size_t i; + + ensure_initialization(); + /* compute array length, if needed */ + if (chunks != 0) { + if (n_elements == 0) + return chunks; /* nothing to do */ + marray = chunks; + array_size = 0; + } + else { + /* if empty req, must still return chunk representing empty array */ + if (n_elements == 0) + return (void**)internal_malloc(m, 0); + marray = 0; + array_size = request2size(n_elements * (sizeof(void*))); + } + + /* compute total element size */ + if (opts & 0x1) { /* all-same-size */ + element_size = request2size(*sizes); + contents_size = n_elements * element_size; + } + else { /* add up all the sizes */ + element_size = 0; + contents_size = 0; + for (i = 0; i != n_elements; ++i) + contents_size += request2size(sizes[i]); + } + + size = contents_size + array_size; + + /* + Allocate the aggregate chunk. First disable direct-mmapping so + malloc won't use it, since we would not be able to later + free/realloc space internal to a segregated mmap region. + */ + was_enabled = use_mmap(m); + disable_mmap(m); + mem = internal_malloc(m, size - CHUNK_OVERHEAD); + if (was_enabled) + enable_mmap(m); + if (mem == 0) + return 0; + + if (PREACTION(m)) return 0; + p = mem2chunk(mem); + remainder_size = chunksize(p); + + assert(!is_mmapped(p)); + + if (opts & 0x2) { /* optionally clear the elements */ + __builtin_memset((size_t*)mem, 0, remainder_size - SIZE_T_SIZE - array_size); + } + + /* If not provided, allocate the pointer array as final part of chunk */ + if (marray == 0) { + size_t array_chunk_size; + array_chunk = chunk_plus_offset(p, contents_size); + array_chunk_size = remainder_size - contents_size; + marray = (void**) (chunk2mem(array_chunk)); + set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size); + remainder_size = contents_size; + } + + /* split out elements */ + for (i = 0; ; ++i) { + marray[i] = chunk2mem(p); + if (i != n_elements-1) { + if (element_size != 0) + size = element_size; + else + size = request2size(sizes[i]); + remainder_size -= size; + set_size_and_pinuse_of_inuse_chunk(m, p, size); + p = chunk_plus_offset(p, size); + } + else { /* the final element absorbs any overallocation slop */ + set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size); + break; + } + } + +#if DEBUG + if (marray != chunks) { + /* final element must have exactly exhausted chunk */ + if (element_size != 0) { + assert(remainder_size == element_size); + } + else { + assert(remainder_size == request2size(sizes[i])); + } + check_inuse_chunk(m, mem2chunk(marray)); + } + for (i = 0; i != n_elements; ++i) + check_inuse_chunk(m, mem2chunk(marray[i])); + +#endif /* DEBUG */ + + POSTACTION(m); + return marray; +} + +/* Try to free all pointers in the given array. + Note: this could be made faster, by delaying consolidation, + at the price of disabling some user integrity checks, We + still optimize some consolidations by combining adjacent + chunks before freeing, which will occur often if allocated + with ialloc or the array is sorted. +*/ +static size_t internal_bulk_free(mstate m, void* array[], size_t nelem) { + size_t unfreed = 0; + if (!PREACTION(m)) { + void** a; + void** fence = &(array[nelem]); + for (a = array; a != fence; ++a) { + void* mem = *a; + if (mem != 0) { + mchunkptr p = mem2chunk(mem); + size_t psize = chunksize(p); +#if FOOTERS + if (get_mstate_for(p) != m) { + ++unfreed; + continue; + } +#endif + check_inuse_chunk(m, p); + *a = 0; + if (RTCHECK(ok_address(m, p) && ok_inuse(p))) { + void ** b = a + 1; /* try to merge with next chunk */ + mchunkptr next = next_chunk(p); + if (b != fence && *b == chunk2mem(next)) { + size_t newsize = chunksize(next) + psize; + set_inuse(m, p, newsize); + *b = chunk2mem(p); + } + else + dispose_chunk(m, p, psize); + } + else { + CORRUPTION_ERROR_ACTION(m); + break; + } + } + } + if (should_trim(m, m->topsize)) + sys_trim(m, 0); + POSTACTION(m); + } + return unfreed; +} + +/* Traversal */ +#if MALLOC_INSPECT_ALL +static void internal_inspect_all(mstate m, + void(*handler)(void *start, + void *end, + size_t used_bytes, + void* callback_arg), + void* arg) { + if (is_initialized(m)) { + mchunkptr top = m->top; + msegmentptr s; + for (s = &m->seg; s != 0; s = s->next) { + mchunkptr q = align_as_chunk(s->base); + while (segment_holds(s, q) && q->head != FENCEPOST_HEAD) { + mchunkptr next = next_chunk(q); + size_t sz = chunksize(q); + size_t used; + void* start; + if (is_inuse(q)) { + used = sz - CHUNK_OVERHEAD; /* must not be mmapped */ + start = chunk2mem(q); + } + else { + used = 0; + if (is_small(sz)) { /* offset by possible bookkeeping */ + start = (void*)((char*)q + sizeof(struct malloc_chunk)); + } + else { + start = (void*)((char*)q + sizeof(struct malloc_tree_chunk)); + } + } + if (start < (void*)next) /* skip if all space is bookkeeping */ + handler(start, next, used, arg); + if (q == top) + break; + q = next; + } + } + } +} +#endif /* MALLOC_INSPECT_ALL */ + +/* ------------------ Exported realloc, memalign, etc -------------------- */ + +#if !ONLY_MSPACES + +void* dlrealloc(void* oldmem, size_t bytes) { + void* mem = 0; + if (oldmem == 0) { + mem = dlmalloc(bytes); + } + else if (bytes >= MAX_REQUEST) { + MALLOC_FAILURE_ACTION; + } +#ifdef REALLOC_ZERO_BYTES_FREES + else if (bytes == 0) { + dlfree(oldmem); + } +#endif /* REALLOC_ZERO_BYTES_FREES */ + else { + size_t nb = request2size(bytes); + mchunkptr oldp = mem2chunk(oldmem); +#if ! FOOTERS + mstate m = gm; +#else /* FOOTERS */ + mstate m = get_mstate_for(oldp); + if (!ok_magic(m)) { + USAGE_ERROR_ACTION(m, oldmem); + return 0; + } +#endif /* FOOTERS */ + if (!PREACTION(m)) { + mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1); + POSTACTION(m); + if (newp != 0) { + check_inuse_chunk(m, newp); + mem = chunk2mem(newp); + } + else { + mem = internal_malloc(m, bytes); + if (mem != 0) { + size_t oc = chunksize(oldp) - overhead_for(oldp); + __builtin_memcpy(mem, oldmem, (oc < bytes)? oc : bytes); + internal_free(m, oldmem); + } + } + } + } + return mem; +} + +void* dlrealloc_in_place(void* oldmem, size_t bytes) { + void* mem = 0; + if (oldmem != 0) { + if (bytes >= MAX_REQUEST) { + MALLOC_FAILURE_ACTION; + } + else { + size_t nb = request2size(bytes); + mchunkptr oldp = mem2chunk(oldmem); +#if ! FOOTERS + mstate m = gm; +#else /* FOOTERS */ + mstate m = get_mstate_for(oldp); + if (!ok_magic(m)) { + USAGE_ERROR_ACTION(m, oldmem); + return 0; + } +#endif /* FOOTERS */ + if (!PREACTION(m)) { + mchunkptr newp = try_realloc_chunk(m, oldp, nb, 0); + POSTACTION(m); + if (newp == oldp) { + check_inuse_chunk(m, newp); + mem = oldmem; + } + } + } + } + return mem; +} + +void* dlmemalign(size_t alignment, size_t bytes) { + if (alignment <= MALLOC_ALIGNMENT) { + return dlmalloc(bytes); + } + return internal_memalign(gm, alignment, bytes); +} + +int dlposix_memalign(void** pp, size_t alignment, size_t bytes) { + void* mem = 0; + if (alignment == MALLOC_ALIGNMENT) + mem = dlmalloc(bytes); + else { + size_t d = alignment / sizeof(void*); + size_t r = alignment % sizeof(void*); + if (r != 0 || d == 0 || (d & (d-SIZE_T_ONE)) != 0) + return EINVAL; + else if (bytes <= MAX_REQUEST - alignment) { + if (alignment < MIN_CHUNK_SIZE) + alignment = MIN_CHUNK_SIZE; + mem = internal_memalign(gm, alignment, bytes); + } + } + if (mem == 0) + return ENOMEM; + else { + *pp = mem; + return 0; + } +} + +void* dlvalloc(size_t bytes) { + size_t pagesz; + ensure_initialization(); + pagesz = mparams.page_size; + return dlmemalign(pagesz, bytes); +} + +void* dlpvalloc(size_t bytes) { + size_t pagesz; + ensure_initialization(); + pagesz = mparams.page_size; + return dlmemalign(pagesz, (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE)); +} + +void** dlindependent_calloc(size_t n_elements, size_t elem_size, + void* chunks[]) { + size_t sz = elem_size; /* serves as 1-element array */ + return ialloc(gm, n_elements, &sz, 3, chunks); +} + +void** dlindependent_comalloc(size_t n_elements, size_t sizes[], + void* chunks[]) { + return ialloc(gm, n_elements, sizes, 0, chunks); +} + +size_t dlbulk_free(void* array[], size_t nelem) { + return internal_bulk_free(gm, array, nelem); +} + +#if MALLOC_INSPECT_ALL +void dlmalloc_inspect_all(void(*handler)(void *start, + void *end, + size_t used_bytes, + void* callback_arg), + void* arg) { + ensure_initialization(); + if (!PREACTION(gm)) { + internal_inspect_all(gm, handler, arg); + POSTACTION(gm); + } +} +#endif /* MALLOC_INSPECT_ALL */ + +int dlmalloc_trim(size_t pad) { + int result = 0; + ensure_initialization(); + if (!PREACTION(gm)) { + result = sys_trim(gm, pad); + POSTACTION(gm); + } + return result; +} + +size_t dlmalloc_footprint(void) { + return gm->footprint; +} + +size_t dlmalloc_max_footprint(void) { + return gm->max_footprint; +} + +size_t dlmalloc_footprint_limit(void) { + size_t maf = gm->footprint_limit; + return maf == 0 ? MAX_SIZE_T : maf; +} + +size_t dlmalloc_set_footprint_limit(size_t bytes) { + size_t result; /* invert sense of 0 */ + if (bytes == 0) + result = granularity_align(1); /* Use minimal size */ + if (bytes == MAX_SIZE_T) + result = 0; /* disable */ + else + result = granularity_align(bytes); + return gm->footprint_limit = result; +} + +#if !NO_MALLINFO +struct mallinfo dlmallinfo(void) { + return internal_mallinfo(gm); +} +#endif /* NO_MALLINFO */ + +#if !NO_MALLOC_STATS +void dlmalloc_stats() { + internal_malloc_stats(gm); +} +#endif /* NO_MALLOC_STATS */ + +int dlmallopt(int param_number, int value) { + return change_mparam(param_number, value); +} + +size_t dlmalloc_usable_size(void* mem) { + if (mem != 0) { + mchunkptr p = mem2chunk(mem); + if (is_inuse(p)) + return chunksize(p) - overhead_for(p); + } + return 0; +} + +#endif /* !ONLY_MSPACES */ + +/* ----------------------------- user mspaces ---------------------------- */ + +#if MSPACES + +static mstate init_user_mstate(char* tbase, size_t tsize) { + size_t msize = pad_request(sizeof(struct malloc_state)); + mchunkptr mn; + mchunkptr msp = align_as_chunk(tbase); + mstate m = (mstate)(chunk2mem(msp)); + __builtin_memset(m, 0, msize); + (void)INITIAL_LOCK(&m->mutex); + msp->head = (msize|INUSE_BITS); + m->seg.base = m->least_addr = tbase; + m->seg.size = m->footprint = m->max_footprint = tsize; + m->magic = mparams.magic; + m->release_checks = MAX_RELEASE_CHECK_RATE; + m->mflags = mparams.default_mflags; + m->extp = 0; + m->exts = 0; + disable_contiguous(m); + init_bins(m); + mn = next_chunk(mem2chunk(m)); + init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) - TOP_FOOT_SIZE); + check_top_chunk(m, m->top); + return m; +} + +mspace create_mspace(size_t capacity, int locked) { + mstate m = 0; + size_t msize; + ensure_initialization(); + msize = pad_request(sizeof(struct malloc_state)); + if (capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) { + size_t rs = ((capacity == 0)? mparams.granularity : + (capacity + TOP_FOOT_SIZE + msize)); + size_t tsize = granularity_align(rs); + char* tbase = (char*)(CALL_MMAP(tsize)); + if (tbase != CMFAIL) { + m = init_user_mstate(tbase, tsize); + m->seg.sflags = USE_MMAP_BIT; + set_lock(m, locked); + } + } + return (mspace)m; +} + +mspace create_mspace_with_base(void* base, size_t capacity, int locked) { + mstate m = 0; + size_t msize; + ensure_initialization(); + msize = pad_request(sizeof(struct malloc_state)); + if (capacity > msize + TOP_FOOT_SIZE && + capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) { + m = init_user_mstate((char*)base, capacity); + m->seg.sflags = EXTERN_BIT; + set_lock(m, locked); + } + return (mspace)m; +} + +int mspace_track_large_chunks(mspace msp, int enable) { + int ret = 0; + mstate ms = (mstate)msp; + if (!PREACTION(ms)) { + if (!use_mmap(ms)) { + ret = 1; + } + if (!enable) { + enable_mmap(ms); + } else { + disable_mmap(ms); + } + POSTACTION(ms); + } + return ret; +} + +size_t destroy_mspace(mspace msp) { + size_t freed = 0; + mstate ms = (mstate)msp; + if (ok_magic(ms)) { + msegmentptr sp = &ms->seg; + (void)DESTROY_LOCK(&ms->mutex); /* destroy before unmapped */ + while (sp != 0) { + char* base = sp->base; + size_t size = sp->size; + flag_t flag = sp->sflags; + (void)base; /* placate people compiling -Wunused-variable */ + sp = sp->next; + if ((flag & USE_MMAP_BIT) && !(flag & EXTERN_BIT) && + CALL_MUNMAP(base, size) == 0) + freed += size; + } + } + else { + USAGE_ERROR_ACTION(ms,ms); + } + return freed; +} + +/* + mspace versions of routines are near-clones of the global + versions. This is not so nice but better than the alternatives. +*/ + +void* mspace_malloc(mspace msp, size_t bytes) { + mstate ms = (mstate)msp; + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + return 0; + } + if (!PREACTION(ms)) { + void* mem; + size_t nb; + if (bytes <= MAX_SMALL_REQUEST) { + bindex_t idx; + binmap_t smallbits; + nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes); + idx = small_index(nb); + smallbits = ms->smallmap >> idx; + + if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ + mchunkptr b, p; + idx += ~smallbits & 1; /* Uses next bin if idx empty */ + b = smallbin_at(ms, idx); + p = b->fd; + assert(chunksize(p) == small_index2size(idx)); + unlink_first_small_chunk(ms, b, p, idx); + set_inuse_and_pinuse(ms, p, small_index2size(idx)); + mem = chunk2mem(p); + check_malloced_chunk(ms, mem, nb); + goto postaction; + } + + else if (nb > ms->dvsize) { + if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ + mchunkptr b, p, r; + size_t rsize; + bindex_t i; + binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx)); + binmap_t leastbit = least_bit(leftbits); + compute_bit2idx(leastbit, i); + b = smallbin_at(ms, i); + p = b->fd; + assert(chunksize(p) == small_index2size(i)); + unlink_first_small_chunk(ms, b, p, i); + rsize = small_index2size(i) - nb; + /* Fit here cannot be remainderless if 4byte sizes */ + if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) + set_inuse_and_pinuse(ms, p, small_index2size(i)); + else { + set_size_and_pinuse_of_inuse_chunk(ms, p, nb); + r = chunk_plus_offset(p, nb); + set_size_and_pinuse_of_free_chunk(r, rsize); + replace_dv(ms, r, rsize); + } + mem = chunk2mem(p); + check_malloced_chunk(ms, mem, nb); + goto postaction; + } + + else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) { + check_malloced_chunk(ms, mem, nb); + goto postaction; + } + } + } + else if (bytes >= MAX_REQUEST) + nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */ + else { + nb = pad_request(bytes); + if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) { + check_malloced_chunk(ms, mem, nb); + goto postaction; + } + } + + if (nb <= ms->dvsize) { + size_t rsize = ms->dvsize - nb; + mchunkptr p = ms->dv; + if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ + mchunkptr r = ms->dv = chunk_plus_offset(p, nb); + ms->dvsize = rsize; + set_size_and_pinuse_of_free_chunk(r, rsize); + set_size_and_pinuse_of_inuse_chunk(ms, p, nb); + } + else { /* exhaust dv */ + size_t dvs = ms->dvsize; + ms->dvsize = 0; + ms->dv = 0; + set_inuse_and_pinuse(ms, p, dvs); + } + mem = chunk2mem(p); + check_malloced_chunk(ms, mem, nb); + goto postaction; + } + + else if (nb < ms->topsize) { /* Split top */ + size_t rsize = ms->topsize -= nb; + mchunkptr p = ms->top; + mchunkptr r = ms->top = chunk_plus_offset(p, nb); + r->head = rsize | PINUSE_BIT; + set_size_and_pinuse_of_inuse_chunk(ms, p, nb); + mem = chunk2mem(p); + check_top_chunk(ms, ms->top); + check_malloced_chunk(ms, mem, nb); + goto postaction; + } + + mem = sys_alloc(ms, nb); + + postaction: + POSTACTION(ms); + return mem; + } + + return 0; +} + +void mspace_free(mspace msp, void* mem) { + if (mem != 0) { + mchunkptr p = mem2chunk(mem); +#if FOOTERS + mstate fm = get_mstate_for(p); + (void)msp; /* placate people compiling -Wunused */ +#else /* FOOTERS */ + mstate fm = (mstate)msp; +#endif /* FOOTERS */ + if (!ok_magic(fm)) { + USAGE_ERROR_ACTION(fm, p); + return; + } + if (!PREACTION(fm)) { + check_inuse_chunk(fm, p); + if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) { + size_t psize = chunksize(p); + mchunkptr next = chunk_plus_offset(p, psize); + if (!pinuse(p)) { + size_t prevsize = p->prev_foot; + if (is_mmapped(p)) { + psize += prevsize + MMAP_FOOT_PAD; + if (CALL_MUNMAP((char*)p - prevsize, psize) == 0) + fm->footprint -= psize; + goto postaction; + } + else { + mchunkptr prev = chunk_minus_offset(p, prevsize); + psize += prevsize; + p = prev; + if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */ + if (p != fm->dv) { + unlink_chunk(fm, p, prevsize); + } + else if ((next->head & INUSE_BITS) == INUSE_BITS) { + fm->dvsize = psize; + set_free_with_pinuse(p, psize, next); + goto postaction; + } + } + else + goto erroraction; + } + } + + if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) { + if (!cinuse(next)) { /* consolidate forward */ + if (next == fm->top) { + size_t tsize = fm->topsize += psize; + fm->top = p; + p->head = tsize | PINUSE_BIT; + if (p == fm->dv) { + fm->dv = 0; + fm->dvsize = 0; + } + if (should_trim(fm, tsize)) + sys_trim(fm, 0); + goto postaction; + } + else if (next == fm->dv) { + size_t dsize = fm->dvsize += psize; + fm->dv = p; + set_size_and_pinuse_of_free_chunk(p, dsize); + goto postaction; + } + else { + size_t nsize = chunksize(next); + psize += nsize; + unlink_chunk(fm, next, nsize); + set_size_and_pinuse_of_free_chunk(p, psize); + if (p == fm->dv) { + fm->dvsize = psize; + goto postaction; + } + } + } + else + set_free_with_pinuse(p, psize, next); + + if (is_small(psize)) { + insert_small_chunk(fm, p, psize); + check_free_chunk(fm, p); + } + else { + tchunkptr tp = (tchunkptr)p; + insert_large_chunk(fm, tp, psize); + check_free_chunk(fm, p); + if (--fm->release_checks == 0) + release_unused_segments(fm); + } + goto postaction; + } + } + erroraction: + USAGE_ERROR_ACTION(fm, p); + postaction: + POSTACTION(fm); + } + } +} + +void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) { + void* mem; + size_t req = 0; + mstate ms = (mstate)msp; + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + return 0; + } + if (n_elements != 0) { + req = n_elements * elem_size; + if (((n_elements | elem_size) & ~(size_t)0xffff) && + (req / n_elements != elem_size)) + req = MAX_SIZE_T; /* force downstream failure on overflow */ + } + mem = internal_malloc(ms, req); + if (mem != 0 && calloc_must_clear(mem2chunk(mem))) + __builtin_memset(mem, 0, req); + return mem; +} + +void* mspace_realloc(mspace msp, void* oldmem, size_t bytes) { + void* mem = 0; + if (oldmem == 0) { + mem = mspace_malloc(msp, bytes); + } + else if (bytes >= MAX_REQUEST) { + MALLOC_FAILURE_ACTION; + } +#ifdef REALLOC_ZERO_BYTES_FREES + else if (bytes == 0) { + mspace_free(msp, oldmem); + } +#endif /* REALLOC_ZERO_BYTES_FREES */ + else { + size_t nb = request2size(bytes); + mchunkptr oldp = mem2chunk(oldmem); +#if ! FOOTERS + mstate m = (mstate)msp; +#else /* FOOTERS */ + mstate m = get_mstate_for(oldp); + if (!ok_magic(m)) { + USAGE_ERROR_ACTION(m, oldmem); + return 0; + } +#endif /* FOOTERS */ + if (!PREACTION(m)) { + mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1); + POSTACTION(m); + if (newp != 0) { + check_inuse_chunk(m, newp); + mem = chunk2mem(newp); + } + else { + mem = mspace_malloc(m, bytes); + if (mem != 0) { + size_t oc = chunksize(oldp) - overhead_for(oldp); + __builtin_memcpy(mem, oldmem, (oc < bytes)? oc : bytes); + mspace_free(m, oldmem); + } + } + } + } + return mem; +} + +void* mspace_realloc_in_place(mspace msp, void* oldmem, size_t bytes) { + void* mem = 0; + if (oldmem != 0) { + if (bytes >= MAX_REQUEST) { + MALLOC_FAILURE_ACTION; + } + else { + size_t nb = request2size(bytes); + mchunkptr oldp = mem2chunk(oldmem); +#if ! FOOTERS + mstate m = (mstate)msp; +#else /* FOOTERS */ + mstate m = get_mstate_for(oldp); + (void)msp; /* placate people compiling -Wunused */ + if (!ok_magic(m)) { + USAGE_ERROR_ACTION(m, oldmem); + return 0; + } +#endif /* FOOTERS */ + if (!PREACTION(m)) { + mchunkptr newp = try_realloc_chunk(m, oldp, nb, 0); + POSTACTION(m); + if (newp == oldp) { + check_inuse_chunk(m, newp); + mem = oldmem; + } + } + } + } + return mem; +} + +void* mspace_memalign(mspace msp, size_t alignment, size_t bytes) { + mstate ms = (mstate)msp; + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + return 0; + } + if (alignment <= MALLOC_ALIGNMENT) + return mspace_malloc(msp, bytes); + return internal_memalign(ms, alignment, bytes); +} + +void** mspace_independent_calloc(mspace msp, size_t n_elements, + size_t elem_size, void* chunks[]) { + size_t sz = elem_size; /* serves as 1-element array */ + mstate ms = (mstate)msp; + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + return 0; + } + return ialloc(ms, n_elements, &sz, 3, chunks); +} + +void** mspace_independent_comalloc(mspace msp, size_t n_elements, + size_t sizes[], void* chunks[]) { + mstate ms = (mstate)msp; + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + return 0; + } + return ialloc(ms, n_elements, sizes, 0, chunks); +} + +size_t mspace_bulk_free(mspace msp, void* array[], size_t nelem) { + return internal_bulk_free((mstate)msp, array, nelem); +} + +#if MALLOC_INSPECT_ALL +void mspace_inspect_all(mspace msp, + void(*handler)(void *start, + void *end, + size_t used_bytes, + void* callback_arg), + void* arg) { + mstate ms = (mstate)msp; + if (ok_magic(ms)) { + if (!PREACTION(ms)) { + internal_inspect_all(ms, handler, arg); + POSTACTION(ms); + } + } + else { + USAGE_ERROR_ACTION(ms,ms); + } +} +#endif /* MALLOC_INSPECT_ALL */ + +int mspace_trim(mspace msp, size_t pad) { + int result = 0; + mstate ms = (mstate)msp; + if (ok_magic(ms)) { + if (!PREACTION(ms)) { + result = sys_trim(ms, pad); + POSTACTION(ms); + } + } + else { + USAGE_ERROR_ACTION(ms,ms); + } + return result; +} + +#if !NO_MALLOC_STATS +void mspace_malloc_stats(mspace msp) { + mstate ms = (mstate)msp; + if (ok_magic(ms)) { + internal_malloc_stats(ms); + } + else { + USAGE_ERROR_ACTION(ms,ms); + } +} +#endif /* NO_MALLOC_STATS */ + +size_t mspace_footprint(mspace msp) { + size_t result = 0; + mstate ms = (mstate)msp; + if (ok_magic(ms)) { + result = ms->footprint; + } + else { + USAGE_ERROR_ACTION(ms,ms); + } + return result; +} + +size_t mspace_max_footprint(mspace msp) { + size_t result = 0; + mstate ms = (mstate)msp; + if (ok_magic(ms)) { + result = ms->max_footprint; + } + else { + USAGE_ERROR_ACTION(ms,ms); + } + return result; +} + +size_t mspace_footprint_limit(mspace msp) { + size_t result = 0; + mstate ms = (mstate)msp; + if (ok_magic(ms)) { + size_t maf = ms->footprint_limit; + result = (maf == 0) ? MAX_SIZE_T : maf; + } + else { + USAGE_ERROR_ACTION(ms,ms); + } + return result; +} + +size_t mspace_set_footprint_limit(mspace msp, size_t bytes) { + size_t result = 0; + mstate ms = (mstate)msp; + if (ok_magic(ms)) { + if (bytes == 0) + result = granularity_align(1); /* Use minimal size */ + if (bytes == MAX_SIZE_T) + result = 0; /* disable */ + else + result = granularity_align(bytes); + ms->footprint_limit = result; + } + else { + USAGE_ERROR_ACTION(ms,ms); + } + return result; +} + +#if !NO_MALLINFO +struct mallinfo mspace_mallinfo(mspace msp) { + mstate ms = (mstate)msp; + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + } + return internal_mallinfo(ms); +} +#endif /* NO_MALLINFO */ + +size_t mspace_usable_size(const void* mem) { + if (mem != 0) { + mchunkptr p = mem2chunk(mem); + if (is_inuse(p)) + return chunksize(p) - overhead_for(p); + } + return 0; +} + +int mspace_mallopt(int param_number, int value) { + return change_mparam(param_number, value); +} + +#endif /* MSPACES */ + + +/* -------------------- Alternative MORECORE functions ------------------- */ + +/* + Guidelines for creating a custom version of MORECORE: + + * For best performance, MORECORE should allocate in multiples of pagesize. + * MORECORE may allocate more memory than requested. (Or even less, + but this will usually result in a malloc failure.) + * MORECORE must not allocate memory when given argument zero, but + instead return one past the end address of memory from previous + nonzero call. + * For best performance, consecutive calls to MORECORE with positive + arguments should return increasing addresses, indicating that + space has been contiguously extended. + * Even though consecutive calls to MORECORE need not return contiguous + addresses, it must be OK for malloc'ed chunks to span multiple + regions in those cases where they do happen to be contiguous. + * MORECORE need not handle negative arguments -- it may instead + just return MFAIL when given negative arguments. + Negative arguments are always multiples of pagesize. MORECORE + must not misinterpret negative args as large positive unsigned + args. You can suppress all such calls from even occurring by defining + MORECORE_CANNOT_TRIM, + + As an example alternative MORECORE, here is a custom allocator + kindly contributed for pre-OSX macOS. It uses virtually but not + necessarily physically contiguous non-paged memory (locked in, + present and won't get swapped out). You can use it by uncommenting + this section, adding some #includes, and setting up the appropriate + defines above: + + #define MORECORE osMoreCore + + There is also a shutdown routine that should somehow be called for + cleanup upon program exit. + + #define MAX_POOL_ENTRIES 100 + #define MINIMUM_MORECORE_SIZE (64 * 1024U) + static int next_os_pool; + void *our_os_pools[MAX_POOL_ENTRIES]; + + void *osMoreCore(int size) + { + void *ptr = 0; + static void *sbrk_top = 0; + + if (size > 0) + { + if (size < MINIMUM_MORECORE_SIZE) + size = MINIMUM_MORECORE_SIZE; + if (CurrentExecutionLevel() == kTaskLevel) + ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0); + if (ptr == 0) + { + return (void *) MFAIL; + } + // save ptrs so they can be freed during cleanup + our_os_pools[next_os_pool] = ptr; + next_os_pool++; + ptr = (void *) ((((size_t) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK); + sbrk_top = (char *) ptr + size; + return ptr; + } + else if (size < 0) + { + // we don't currently support shrink behavior + return (void *) MFAIL; + } + else + { + return sbrk_top; + } + } + + // cleanup any allocated memory pools + // called as last thing before shutting down driver + + void osCleanupMem(void) + { + void **ptr; + + for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++) + if (*ptr) + { + PoolDeallocate(*ptr); + *ptr = 0; + } + } + +*/ + + +/* ----------------------------------------------------------------------- +History: + v2.8.6 Wed Aug 29 06:57:58 2012 Doug Lea + * fix bad comparison in dlposix_memalign + * don't reuse adjusted asize in sys_alloc + * add LOCK_AT_FORK -- thanks to Kirill Artamonov for the suggestion + * reduce compiler warnings -- thanks to all who reported/suggested these + + v2.8.5 Sun May 22 10:26:02 2011 Doug Lea (dl at gee) + * Always perform unlink checks unless INSECURE + * Add posix_memalign. + * Improve realloc to expand in more cases; expose realloc_in_place. + Thanks to Peter Buhr for the suggestion. + * Add footprint_limit, inspect_all, bulk_free. Thanks + to Barry Hayes and others for the suggestions. + * Internal refactorings to avoid calls while holding locks + * Use non-reentrant locks by default. Thanks to Roland McGrath + for the suggestion. + * Small fixes to mspace_destroy, reset_on_error. + * Various configuration extensions/changes. Thanks + to all who contributed these. + + V2.8.4a Thu Apr 28 14:39:43 2011 (dl at gee.cs.oswego.edu) + * Update Creative Commons URL + + V2.8.4 Wed May 27 09:56:23 2009 Doug Lea (dl at gee) + * Use zeros instead of prev foot for is_mmapped + * Add mspace_track_large_chunks; thanks to Jean Brouwers + * Fix set_inuse in internal_realloc; thanks to Jean Brouwers + * Fix insufficient sys_alloc padding when using 16byte alignment + * Fix bad error check in mspace_footprint + * Adaptations for ptmalloc; thanks to Wolfram Gloger. + * Reentrant spin locks; thanks to Earl Chew and others + * Win32 improvements; thanks to Niall Douglas and Earl Chew + * Add NO_SEGMENT_TRAVERSAL and MAX_RELEASE_CHECK_RATE options + * Extension hook in malloc_state + * Various small adjustments to reduce warnings on some compilers + * Various configuration extensions/changes for more platforms. Thanks + to all who contributed these. + + V2.8.3 Thu Sep 22 11:16:32 2005 Doug Lea (dl at gee) + * Add max_footprint functions + * Ensure all appropriate literals are size_t + * Fix conditional compilation problem for some #define settings + * Avoid concatenating segments with the one provided + in create_mspace_with_base + * Rename some variables to avoid compiler shadowing warnings + * Use explicit lock initialization. + * Better handling of sbrk interference. + * Simplify and fix segment insertion, trimming and mspace_destroy + * Reinstate REALLOC_ZERO_BYTES_FREES option from 2.7.x + * Thanks especially to Dennis Flanagan for help on these. + + V2.8.2 Sun Jun 12 16:01:10 2005 Doug Lea (dl at gee) + * Fix memalign brace error. + + V2.8.1 Wed Jun 8 16:11:46 2005 Doug Lea (dl at gee) + * Fix improper #endif nesting in C++ + * Add explicit casts needed for C++ + + V2.8.0 Mon May 30 14:09:02 2005 Doug Lea (dl at gee) + * Use trees for large bins + * Support mspaces + * Use segments to unify sbrk-based and mmap-based system allocation, + removing need for emulation on most platforms without sbrk. + * Default safety checks + * Optional footer checks. Thanks to William Robertson for the idea. + * Internal code refactoring + * Incorporate suggestions and platform-specific changes. + Thanks to Dennis Flanagan, Colin Plumb, Niall Douglas, + Aaron Bachmann, Emery Berger, and others. + * Speed up non-fastbin processing enough to remove fastbins. + * Remove useless cfree() to avoid conflicts with other apps. + * Remove internal memcpy, memset. Compilers handle builtins better. + * Remove some options that no one ever used and rename others. + + V2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee) + * Fix malloc_state bitmap array misdeclaration + + V2.7.1 Thu Jul 25 10:58:03 2002 Doug Lea (dl at gee) + * Allow tuning of FIRST_SORTED_BIN_SIZE + * Use PTR_UINT as type for all ptr->int casts. Thanks to John Belmonte. + * Better detection and support for non-contiguousness of MORECORE. + Thanks to Andreas Mueller, Conal Walsh, and Wolfram Gloger + * Bypass most of malloc if no frees. Thanks To Emery Berger. + * Fix freeing of old top non-contiguous chunk im sysmalloc. + * Raised default trim and map thresholds to 256K. + * Fix mmap-related #defines. Thanks to Lubos Lunak. + * Fix copy macros; added LACKS_FCNTL_H. Thanks to Neal Walfield. + * Branch-free bin calculation + * Default trim and mmap thresholds now 256K. + + V2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee) + * Introduce independent_comalloc and independent_calloc. + Thanks to Michael Pachos for motivation and help. + * Make optional .h file available + * Allow > 2GB requests on 32bit systems. + * new WIN32 sbrk, mmap, munmap, lock code from . + Thanks also to Andreas Mueller , + and Anonymous. + * Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for + helping test this.) + * memalign: check alignment arg + * realloc: don't try to shift chunks backwards, since this + leads to more fragmentation in some programs and doesn't + seem to help in any others. + * Collect all cases in malloc requiring system memory into sysmalloc + * Use mmap as backup to sbrk + * Place all internal state in malloc_state + * Introduce fastbins (although similar to 2.5.1) + * Many minor tunings and cosmetic improvements + * Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK + * Introduce MALLOC_FAILURE_ACTION, MORECORE_CONTIGUOUS + Thanks to Tony E. Bennett and others. + * Include errno.h to support default failure action. + + V2.6.6 Sun Dec 5 07:42:19 1999 Doug Lea (dl at gee) + * return null for negative arguments + * Added Several WIN32 cleanups from Martin C. Fong + * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h' + (e.g. WIN32 platforms) + * Cleanup header file inclusion for WIN32 platforms + * Cleanup code to avoid Microsoft Visual C++ compiler complaints + * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing + memory allocation routines + * Set 'malloc_getpagesize' for WIN32 platforms (needs more work) + * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to + usage of 'assert' in non-WIN32 code + * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to + avoid infinite loop + * Always call 'fREe()' rather than 'free()' + + V2.6.5 Wed Jun 17 15:57:31 1998 Doug Lea (dl at gee) + * Fixed ordering problem with boundary-stamping + + V2.6.3 Sun May 19 08:17:58 1996 Doug Lea (dl at gee) + * Added pvalloc, as recommended by H.J. Liu + * Added 64bit pointer support mainly from Wolfram Gloger + * Added anonymously donated WIN32 sbrk emulation + * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen + * malloc_extend_top: fix mask error that caused wastage after + foreign sbrks + * Add linux mremap support code from HJ Liu + + V2.6.2 Tue Dec 5 06:52:55 1995 Doug Lea (dl at gee) + * Integrated most documentation with the code. + * Add support for mmap, with help from + Wolfram Gloger (Gloger@lrz.uni-muenchen.de). + * Use last_remainder in more cases. + * Pack bins using idea from colin@nyx10.cs.du.edu + * Use ordered bins instead of best-fit threshhold + * Eliminate block-local decls to simplify tracing and debugging. + * Support another case of realloc via move into top + * Fix error occuring when initial sbrk_base not word-aligned. + * Rely on page size for units instead of SBRK_UNIT to + avoid surprises about sbrk alignment conventions. + * Add mallinfo, mallopt. Thanks to Raymond Nijssen + (raymond@es.ele.tue.nl) for the suggestion. + * Add `pad' argument to malloc_trim and top_pad mallopt parameter. + * More precautions for cases where other routines call sbrk, + courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de). + * Added macros etc., allowing use in linux libc from + H.J. Lu (hjl@gnu.ai.mit.edu) + * Inverted this history list + + V2.6.1 Sat Dec 2 14:10:57 1995 Doug Lea (dl at gee) + * Re-tuned and fixed to behave more nicely with V2.6.0 changes. + * Removed all preallocation code since under current scheme + the work required to undo bad preallocations exceeds + the work saved in good cases for most test programs. + * No longer use return list or unconsolidated bins since + no scheme using them consistently outperforms those that don't + given above changes. + * Use best fit for very large chunks to prevent some worst-cases. + * Added some support for debugging + + V2.6.0 Sat Nov 4 07:05:23 1995 Doug Lea (dl at gee) + * Removed footers when chunks are in use. Thanks to + Paul Wilson (wilson@cs.texas.edu) for the suggestion. + + V2.5.4 Wed Nov 1 07:54:51 1995 Doug Lea (dl at gee) + * Added malloc_trim, with help from Wolfram Gloger + (wmglo@Dent.MED.Uni-Muenchen.DE). + + V2.5.3 Tue Apr 26 10:16:01 1994 Doug Lea (dl at g) + + V2.5.2 Tue Apr 5 16:20:40 1994 Doug Lea (dl at g) + * realloc: try to expand in both directions + * malloc: swap order of clean-bin strategy; + * realloc: only conditionally expand backwards + * Try not to scavenge used bins + * Use bin counts as a guide to preallocation + * Occasionally bin return list chunks in first scan + * Add a few optimizations from colin@nyx10.cs.du.edu + + V2.5.1 Sat Aug 14 15:40:43 1993 Doug Lea (dl at g) + * faster bin computation & slightly different binning + * merged all consolidations to one part of malloc proper + (eliminating old malloc_find_space & malloc_clean_bin) + * Scan 2 returns chunks (not just 1) + * Propagate failure in realloc if malloc returns 0 + * Add stuff to allow compilation on non-ANSI compilers + from kpv@research.att.com + + V2.5 Sat Aug 7 07:41:59 1993 Doug Lea (dl at g.oswego.edu) + * removed potential for odd address access in prev_chunk + * removed dependency on getpagesize.h + * misc cosmetics and a bit more internal documentation + * anticosmetics: mangled names in macros to evade debugger strangeness + * tested on sparc, hp-700, dec-mips, rs6000 + with gcc & native cc (hp, dec only) allowing + Detlefs & Zorn comparison study (in SIGPLAN Notices.) + + Trial version Fri Aug 28 13:14:29 1992 Doug Lea (dl at g.oswego.edu) + * Based loosely on libg++-1.2X malloc. (It retains some of the overall + structure of old version, but most details differ.) + +*/ -- cgit 1.4.1 From cd95ee67bc38c0f4cc955bbaaa63a246b3a5c2d8 Mon Sep 17 00:00:00 2001 From: van Hauser Date: Mon, 1 Feb 2021 23:00:45 +0100 Subject: code format qasan --- qemu_mode/libqasan/dlmalloc.c | 4761 +++++++++++++++++++++++++---------------- qemu_mode/libqasan/hooks.c | 1 + qemu_mode/libqasan/libqasan.h | 95 +- qemu_mode/libqasan/malloc.c | 91 +- qemu_mode/libqasan/patch.c | 46 +- qemu_mode/libqasan/string.c | 98 +- 6 files changed, 3032 insertions(+), 2060 deletions(-) (limited to 'qemu_mode/libqasan/dlmalloc.c') diff --git a/qemu_mode/libqasan/dlmalloc.c b/qemu_mode/libqasan/dlmalloc.c index 0cdd4a90..39ca4301 100644 --- a/qemu_mode/libqasan/dlmalloc.c +++ b/qemu_mode/libqasan/dlmalloc.c @@ -203,9 +203,12 @@ mspaces as thread-locals. For example: static __thread mspace tlms = 0; void* tlmalloc(size_t bytes) { + if (tlms == 0) tlms = create_mspace(0, 0); return mspace_malloc(tlms, bytes); + } + void tlfree(void* mem) { mspace_free(tlms, mem); } Unless FOOTERS is defined, each mspace is completely independent. @@ -525,197 +528,198 @@ MAX_RELEASE_CHECK_RATE default: 4095 unless not HAVE_MMAP /* Version identifier to allow people to support multiple versions */ #ifndef DLMALLOC_VERSION -#define DLMALLOC_VERSION 20806 -#endif /* DLMALLOC_VERSION */ + #define DLMALLOC_VERSION 20806 +#endif /* DLMALLOC_VERSION */ #ifndef DLMALLOC_EXPORT -#define DLMALLOC_EXPORT extern + #define DLMALLOC_EXPORT extern #endif #ifndef WIN32 -#ifdef _WIN32 -#define WIN32 1 -#endif /* _WIN32 */ -#ifdef _WIN32_WCE -#define LACKS_FCNTL_H -#define WIN32 1 -#endif /* _WIN32_WCE */ -#endif /* WIN32 */ + #ifdef _WIN32 + #define WIN32 1 + #endif /* _WIN32 */ + #ifdef _WIN32_WCE + #define LACKS_FCNTL_H + #define WIN32 1 + #endif /* _WIN32_WCE */ +#endif /* WIN32 */ #ifdef WIN32 -#define WIN32_LEAN_AND_MEAN -#include -#include -#define HAVE_MMAP 1 -#define HAVE_MORECORE 0 -#define LACKS_UNISTD_H -#define LACKS_SYS_PARAM_H -#define LACKS_SYS_MMAN_H -#define LACKS_STRING_H -#define LACKS_STRINGS_H -#define LACKS_SYS_TYPES_H -#define LACKS_ERRNO_H -#define LACKS_SCHED_H -#ifndef MALLOC_FAILURE_ACTION -#define MALLOC_FAILURE_ACTION -#endif /* MALLOC_FAILURE_ACTION */ -#ifndef MMAP_CLEARS -#ifdef _WIN32_WCE /* WINCE reportedly does not clear */ -#define MMAP_CLEARS 0 -#else -#define MMAP_CLEARS 1 -#endif /* _WIN32_WCE */ -#endif /*MMAP_CLEARS */ -#endif /* WIN32 */ + #define WIN32_LEAN_AND_MEAN + #include + #include + #define HAVE_MMAP 1 + #define HAVE_MORECORE 0 + #define LACKS_UNISTD_H + #define LACKS_SYS_PARAM_H + #define LACKS_SYS_MMAN_H + #define LACKS_STRING_H + #define LACKS_STRINGS_H + #define LACKS_SYS_TYPES_H + #define LACKS_ERRNO_H + #define LACKS_SCHED_H + #ifndef MALLOC_FAILURE_ACTION + #define MALLOC_FAILURE_ACTION + #endif /* MALLOC_FAILURE_ACTION */ + #ifndef MMAP_CLEARS + #ifdef _WIN32_WCE /* WINCE reportedly does not clear */ + #define MMAP_CLEARS 0 + #else + #define MMAP_CLEARS 1 + #endif /* _WIN32_WCE */ + #endif /*MMAP_CLEARS */ +#endif /* WIN32 */ #if defined(DARWIN) || defined(_DARWIN) -/* Mac OSX docs advise not to use sbrk; it seems better to use mmap */ -#ifndef HAVE_MORECORE -#define HAVE_MORECORE 0 -#define HAVE_MMAP 1 -/* OSX allocators provide 16 byte alignment */ -#ifndef MALLOC_ALIGNMENT -#define MALLOC_ALIGNMENT ((size_t)16U) -#endif -#endif /* HAVE_MORECORE */ -#endif /* DARWIN */ + /* Mac OSX docs advise not to use sbrk; it seems better to use mmap */ + #ifndef HAVE_MORECORE + #define HAVE_MORECORE 0 + #define HAVE_MMAP 1 + /* OSX allocators provide 16 byte alignment */ + #ifndef MALLOC_ALIGNMENT + #define MALLOC_ALIGNMENT ((size_t)16U) + #endif + #endif /* HAVE_MORECORE */ +#endif /* DARWIN */ #ifndef LACKS_SYS_TYPES_H -#include /* For size_t */ -#endif /* LACKS_SYS_TYPES_H */ + #include /* For size_t */ +#endif /* LACKS_SYS_TYPES_H */ /* The maximum possible size_t value has all bits set */ -#define MAX_SIZE_T (~(size_t)0) - -#ifndef USE_LOCKS /* ensure true if spin or recursive locks set */ -#define USE_LOCKS ((defined(USE_SPIN_LOCKS) && USE_SPIN_LOCKS != 0) || \ - (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0)) -#endif /* USE_LOCKS */ - -#if USE_LOCKS /* Spin locks for gcc >= 4.1, older gcc on x86, MSC >= 1310 */ -#if ((defined(__GNUC__) && \ - ((__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) || \ - defined(__i386__) || defined(__x86_64__))) || \ - (defined(_MSC_VER) && _MSC_VER>=1310)) -#ifndef USE_SPIN_LOCKS -#define USE_SPIN_LOCKS 1 -#endif /* USE_SPIN_LOCKS */ -#elif USE_SPIN_LOCKS -#error "USE_SPIN_LOCKS defined without implementation" -#endif /* ... locks available... */ +#define MAX_SIZE_T (~(size_t)0) + +#ifndef USE_LOCKS /* ensure true if spin or recursive locks set */ + #define USE_LOCKS \ + ((defined(USE_SPIN_LOCKS) && USE_SPIN_LOCKS != 0) || \ + (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0)) +#endif /* USE_LOCKS */ + +#if USE_LOCKS /* Spin locks for gcc >= 4.1, older gcc on x86, MSC >= 1310 */ + #if ((defined(__GNUC__) && \ + ((__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) || \ + defined(__i386__) || defined(__x86_64__))) || \ + (defined(_MSC_VER) && _MSC_VER >= 1310)) + #ifndef USE_SPIN_LOCKS + #define USE_SPIN_LOCKS 1 + #endif /* USE_SPIN_LOCKS */ + #elif USE_SPIN_LOCKS + #error "USE_SPIN_LOCKS defined without implementation" + #endif /* ... locks available... */ #elif !defined(USE_SPIN_LOCKS) -#define USE_SPIN_LOCKS 0 -#endif /* USE_LOCKS */ + #define USE_SPIN_LOCKS 0 +#endif /* USE_LOCKS */ #ifndef ONLY_MSPACES -#define ONLY_MSPACES 0 -#endif /* ONLY_MSPACES */ + #define ONLY_MSPACES 0 +#endif /* ONLY_MSPACES */ #ifndef MSPACES -#if ONLY_MSPACES -#define MSPACES 1 -#else /* ONLY_MSPACES */ -#define MSPACES 0 -#endif /* ONLY_MSPACES */ -#endif /* MSPACES */ + #if ONLY_MSPACES + #define MSPACES 1 + #else /* ONLY_MSPACES */ + #define MSPACES 0 + #endif /* ONLY_MSPACES */ +#endif /* MSPACES */ #ifndef MALLOC_ALIGNMENT -#define MALLOC_ALIGNMENT ((size_t)(2 * sizeof(void *))) -#endif /* MALLOC_ALIGNMENT */ + #define MALLOC_ALIGNMENT ((size_t)(2 * sizeof(void *))) +#endif /* MALLOC_ALIGNMENT */ #ifndef FOOTERS -#define FOOTERS 0 -#endif /* FOOTERS */ + #define FOOTERS 0 +#endif /* FOOTERS */ #ifndef ABORT -#define ABORT abort() -#endif /* ABORT */ + #define ABORT abort() +#endif /* ABORT */ #ifndef ABORT_ON_ASSERT_FAILURE -#define ABORT_ON_ASSERT_FAILURE 1 -#endif /* ABORT_ON_ASSERT_FAILURE */ + #define ABORT_ON_ASSERT_FAILURE 1 +#endif /* ABORT_ON_ASSERT_FAILURE */ #ifndef PROCEED_ON_ERROR -#define PROCEED_ON_ERROR 0 -#endif /* PROCEED_ON_ERROR */ + #define PROCEED_ON_ERROR 0 +#endif /* PROCEED_ON_ERROR */ #ifndef INSECURE -#define INSECURE 0 -#endif /* INSECURE */ + #define INSECURE 0 +#endif /* INSECURE */ #ifndef MALLOC_INSPECT_ALL -#define MALLOC_INSPECT_ALL 0 -#endif /* MALLOC_INSPECT_ALL */ + #define MALLOC_INSPECT_ALL 0 +#endif /* MALLOC_INSPECT_ALL */ #ifndef HAVE_MMAP -#define HAVE_MMAP 1 -#endif /* HAVE_MMAP */ + #define HAVE_MMAP 1 +#endif /* HAVE_MMAP */ #ifndef MMAP_CLEARS -#define MMAP_CLEARS 1 -#endif /* MMAP_CLEARS */ + #define MMAP_CLEARS 1 +#endif /* MMAP_CLEARS */ #ifndef HAVE_MREMAP -#ifdef linux -#define HAVE_MREMAP 1 -#define _GNU_SOURCE /* Turns on mremap() definition */ -#else /* linux */ -#define HAVE_MREMAP 0 -#endif /* linux */ -#endif /* HAVE_MREMAP */ + #ifdef linux + #define HAVE_MREMAP 1 + #define _GNU_SOURCE /* Turns on mremap() definition */ + #else /* linux */ + #define HAVE_MREMAP 0 + #endif /* linux */ +#endif /* HAVE_MREMAP */ #ifndef MALLOC_FAILURE_ACTION -#define MALLOC_FAILURE_ACTION errno = ENOMEM; -#endif /* MALLOC_FAILURE_ACTION */ + #define MALLOC_FAILURE_ACTION errno = ENOMEM; +#endif /* MALLOC_FAILURE_ACTION */ #ifndef HAVE_MORECORE -#if ONLY_MSPACES -#define HAVE_MORECORE 0 -#else /* ONLY_MSPACES */ -#define HAVE_MORECORE 1 -#endif /* ONLY_MSPACES */ -#endif /* HAVE_MORECORE */ + #if ONLY_MSPACES + #define HAVE_MORECORE 0 + #else /* ONLY_MSPACES */ + #define HAVE_MORECORE 1 + #endif /* ONLY_MSPACES */ +#endif /* HAVE_MORECORE */ #if !HAVE_MORECORE -#define MORECORE_CONTIGUOUS 0 -#else /* !HAVE_MORECORE */ -#define MORECORE_DEFAULT sbrk -#ifndef MORECORE_CONTIGUOUS -#define MORECORE_CONTIGUOUS 1 -#endif /* MORECORE_CONTIGUOUS */ -#endif /* HAVE_MORECORE */ + #define MORECORE_CONTIGUOUS 0 +#else /* !HAVE_MORECORE */ + #define MORECORE_DEFAULT sbrk + #ifndef MORECORE_CONTIGUOUS + #define MORECORE_CONTIGUOUS 1 + #endif /* MORECORE_CONTIGUOUS */ +#endif /* HAVE_MORECORE */ #ifndef DEFAULT_GRANULARITY -#if (MORECORE_CONTIGUOUS || defined(WIN32)) -#define DEFAULT_GRANULARITY (0) /* 0 means to compute in init_mparams */ -#else /* MORECORE_CONTIGUOUS */ -#define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U) -#endif /* MORECORE_CONTIGUOUS */ -#endif /* DEFAULT_GRANULARITY */ + #if (MORECORE_CONTIGUOUS || defined(WIN32)) + #define DEFAULT_GRANULARITY (0) /* 0 means to compute in init_mparams */ + #else /* MORECORE_CONTIGUOUS */ + #define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U) + #endif /* MORECORE_CONTIGUOUS */ +#endif /* DEFAULT_GRANULARITY */ #ifndef DEFAULT_TRIM_THRESHOLD -#ifndef MORECORE_CANNOT_TRIM -#define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U) -#else /* MORECORE_CANNOT_TRIM */ -#define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T -#endif /* MORECORE_CANNOT_TRIM */ -#endif /* DEFAULT_TRIM_THRESHOLD */ + #ifndef MORECORE_CANNOT_TRIM + #define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U) + #else /* MORECORE_CANNOT_TRIM */ + #define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T + #endif /* MORECORE_CANNOT_TRIM */ +#endif /* DEFAULT_TRIM_THRESHOLD */ #ifndef DEFAULT_MMAP_THRESHOLD -#if HAVE_MMAP -#define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U) -#else /* HAVE_MMAP */ -#define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T -#endif /* HAVE_MMAP */ -#endif /* DEFAULT_MMAP_THRESHOLD */ + #if HAVE_MMAP + #define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U) + #else /* HAVE_MMAP */ + #define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T + #endif /* HAVE_MMAP */ +#endif /* DEFAULT_MMAP_THRESHOLD */ #ifndef MAX_RELEASE_CHECK_RATE -#if HAVE_MMAP -#define MAX_RELEASE_CHECK_RATE 4095 -#else -#define MAX_RELEASE_CHECK_RATE MAX_SIZE_T -#endif /* HAVE_MMAP */ -#endif /* MAX_RELEASE_CHECK_RATE */ + #if HAVE_MMAP + #define MAX_RELEASE_CHECK_RATE 4095 + #else + #define MAX_RELEASE_CHECK_RATE MAX_SIZE_T + #endif /* HAVE_MMAP */ +#endif /* MAX_RELEASE_CHECK_RATE */ #ifndef USE_BUILTIN_FFS -#define USE_BUILTIN_FFS 0 -#endif /* USE_BUILTIN_FFS */ + #define USE_BUILTIN_FFS 0 +#endif /* USE_BUILTIN_FFS */ #ifndef USE_DEV_RANDOM -#define USE_DEV_RANDOM 0 -#endif /* USE_DEV_RANDOM */ + #define USE_DEV_RANDOM 0 +#endif /* USE_DEV_RANDOM */ #ifndef NO_MALLINFO -#define NO_MALLINFO 0 -#endif /* NO_MALLINFO */ + #define NO_MALLINFO 0 +#endif /* NO_MALLINFO */ #ifndef MALLINFO_FIELD_TYPE -#define MALLINFO_FIELD_TYPE size_t -#endif /* MALLINFO_FIELD_TYPE */ + #define MALLINFO_FIELD_TYPE size_t +#endif /* MALLINFO_FIELD_TYPE */ #ifndef NO_MALLOC_STATS -#define NO_MALLOC_STATS 0 -#endif /* NO_MALLOC_STATS */ + #define NO_MALLOC_STATS 0 +#endif /* NO_MALLOC_STATS */ #ifndef NO_SEGMENT_TRAVERSAL -#define NO_SEGMENT_TRAVERSAL 0 -#endif /* NO_SEGMENT_TRAVERSAL */ + #define NO_SEGMENT_TRAVERSAL 0 +#endif /* NO_SEGMENT_TRAVERSAL */ /* mallopt tuning options. SVID/XPG defines four standard parameter @@ -727,9 +731,9 @@ MAX_RELEASE_CHECK_RATE default: 4095 unless not HAVE_MMAP #undef M_TRIM_THRESHOLD #undef M_GRANULARITY #undef M_MMAP_THRESHOLD -#define M_TRIM_THRESHOLD (-1) -#define M_GRANULARITY (-2) -#define M_MMAP_THRESHOLD (-3) +#define M_TRIM_THRESHOLD (-1) +#define M_GRANULARITY (-2) +#define M_MMAP_THRESHOLD (-3) /* ------------------------ Mallinfo declarations ------------------------ */ @@ -758,28 +762,32 @@ MAX_RELEASE_CHECK_RATE default: 4095 unless not HAVE_MMAP /* #define HAVE_USR_INCLUDE_MALLOC_H */ -#ifdef HAVE_USR_INCLUDE_MALLOC_H -#include "/usr/include/malloc.h" -#else /* HAVE_USR_INCLUDE_MALLOC_H */ -#ifndef STRUCT_MALLINFO_DECLARED -/* HP-UX (and others?) redefines mallinfo unless _STRUCT_MALLINFO is defined */ -#define _STRUCT_MALLINFO -#define STRUCT_MALLINFO_DECLARED 1 + #ifdef HAVE_USR_INCLUDE_MALLOC_H + #include "/usr/include/malloc.h" + #else /* HAVE_USR_INCLUDE_MALLOC_H */ + #ifndef STRUCT_MALLINFO_DECLARED + /* HP-UX (and others?) redefines mallinfo unless _STRUCT_MALLINFO is + * defined */ + #define _STRUCT_MALLINFO + #define STRUCT_MALLINFO_DECLARED 1 struct mallinfo { - MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system */ - MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */ - MALLINFO_FIELD_TYPE smblks; /* always 0 */ - MALLINFO_FIELD_TYPE hblks; /* always 0 */ - MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */ - MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */ - MALLINFO_FIELD_TYPE fsmblks; /* always 0 */ - MALLINFO_FIELD_TYPE uordblks; /* total allocated space */ - MALLINFO_FIELD_TYPE fordblks; /* total free space */ - MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */ + + MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system */ + MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */ + MALLINFO_FIELD_TYPE smblks; /* always 0 */ + MALLINFO_FIELD_TYPE hblks; /* always 0 */ + MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */ + MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */ + MALLINFO_FIELD_TYPE fsmblks; /* always 0 */ + MALLINFO_FIELD_TYPE uordblks; /* total allocated space */ + MALLINFO_FIELD_TYPE fordblks; /* total free space */ + MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */ + }; -#endif /* STRUCT_MALLINFO_DECLARED */ -#endif /* HAVE_USR_INCLUDE_MALLOC_H */ -#endif /* NO_MALLINFO */ + + #endif /* STRUCT_MALLINFO_DECLARED */ + #endif /* HAVE_USR_INCLUDE_MALLOC_H */ +#endif /* NO_MALLINFO */ /* Try to persuade compilers to inline. The most critical functions for @@ -788,14 +796,14 @@ struct mallinfo { #ifndef FORCEINLINE #if defined(__GNUC__) -#define FORCEINLINE __inline __attribute__ ((always_inline)) + #define FORCEINLINE __inline __attribute__((always_inline)) #elif defined(_MSC_VER) #define FORCEINLINE __forceinline #endif #endif #ifndef NOINLINE #if defined(__GNUC__) - #define NOINLINE __attribute__ ((noinline)) + #define NOINLINE __attribute__((noinline)) #elif defined(_MSC_VER) #define NOINLINE __declspec(noinline) #else @@ -805,42 +813,43 @@ struct mallinfo { #ifdef __cplusplus extern "C" { + + #ifndef FORCEINLINE + #define FORCEINLINE inline + #endif +#endif /* __cplusplus */ #ifndef FORCEINLINE - #define FORCEINLINE inline -#endif -#endif /* __cplusplus */ -#ifndef FORCEINLINE - #define FORCEINLINE + #define FORCEINLINE #endif #if !ONLY_MSPACES /* ------------------- Declarations of public routines ------------------- */ -#ifndef USE_DL_PREFIX -#define dlcalloc calloc -#define dlfree free -#define dlmalloc malloc -#define dlmemalign memalign -#define dlposix_memalign posix_memalign -#define dlrealloc realloc -#define dlrealloc_in_place realloc_in_place -#define dlvalloc valloc -#define dlpvalloc pvalloc -#define dlmallinfo mallinfo -#define dlmallopt mallopt -#define dlmalloc_trim malloc_trim -#define dlmalloc_stats malloc_stats -#define dlmalloc_usable_size malloc_usable_size -#define dlmalloc_footprint malloc_footprint -#define dlmalloc_max_footprint malloc_max_footprint -#define dlmalloc_footprint_limit malloc_footprint_limit -#define dlmalloc_set_footprint_limit malloc_set_footprint_limit -#define dlmalloc_inspect_all malloc_inspect_all -#define dlindependent_calloc independent_calloc -#define dlindependent_comalloc independent_comalloc -#define dlbulk_free bulk_free -#endif /* USE_DL_PREFIX */ + #ifndef USE_DL_PREFIX + #define dlcalloc calloc + #define dlfree free + #define dlmalloc malloc + #define dlmemalign memalign + #define dlposix_memalign posix_memalign + #define dlrealloc realloc + #define dlrealloc_in_place realloc_in_place + #define dlvalloc valloc + #define dlpvalloc pvalloc + #define dlmallinfo mallinfo + #define dlmallopt mallopt + #define dlmalloc_trim malloc_trim + #define dlmalloc_stats malloc_stats + #define dlmalloc_usable_size malloc_usable_size + #define dlmalloc_footprint malloc_footprint + #define dlmalloc_max_footprint malloc_max_footprint + #define dlmalloc_footprint_limit malloc_footprint_limit + #define dlmalloc_set_footprint_limit malloc_set_footprint_limit + #define dlmalloc_inspect_all malloc_inspect_all + #define dlindependent_calloc independent_calloc + #define dlindependent_comalloc independent_comalloc + #define dlbulk_free bulk_free + #endif /* USE_DL_PREFIX */ /* malloc(size_t n) @@ -856,7 +865,7 @@ extern "C" { maximum supported value of n differs across systems, but is in all cases less than the maximum representable value of a size_t. */ -DLMALLOC_EXPORT void* dlmalloc(size_t); +DLMALLOC_EXPORT void *dlmalloc(size_t); /* free(void* p) @@ -865,14 +874,14 @@ DLMALLOC_EXPORT void* dlmalloc(size_t); It has no effect if p is null. If p was not malloced or already freed, free(p) will by default cause the current program to abort. */ -DLMALLOC_EXPORT void dlfree(void*); +DLMALLOC_EXPORT void dlfree(void *); /* calloc(size_t n_elements, size_t element_size); Returns a pointer to n_elements * element_size bytes, with all locations set to zero. */ -DLMALLOC_EXPORT void* dlcalloc(size_t, size_t); +DLMALLOC_EXPORT void *dlcalloc(size_t, size_t); /* realloc(void* p, size_t n) @@ -896,7 +905,7 @@ DLMALLOC_EXPORT void* dlcalloc(size_t, size_t); The old unix realloc convention of allowing the last-free'd chunk to be used as an argument to realloc is not supported. */ -DLMALLOC_EXPORT void* dlrealloc(void*, size_t); +DLMALLOC_EXPORT void *dlrealloc(void *, size_t); /* realloc_in_place(void* p, size_t n) @@ -911,7 +920,7 @@ DLMALLOC_EXPORT void* dlrealloc(void*, size_t); Returns p if successful; otherwise null. */ -DLMALLOC_EXPORT void* dlrealloc_in_place(void*, size_t); +DLMALLOC_EXPORT void *dlrealloc_in_place(void *, size_t); /* memalign(size_t alignment, size_t n); @@ -925,7 +934,7 @@ DLMALLOC_EXPORT void* dlrealloc_in_place(void*, size_t); Overreliance on memalign is a sure way to fragment space. */ -DLMALLOC_EXPORT void* dlmemalign(size_t, size_t); +DLMALLOC_EXPORT void *dlmemalign(size_t, size_t); /* int posix_memalign(void** pp, size_t alignment, size_t n); @@ -935,14 +944,14 @@ DLMALLOC_EXPORT void* dlmemalign(size_t, size_t); returns EINVAL if the alignment is not a power of two (3) fails and returns ENOMEM if memory cannot be allocated. */ -DLMALLOC_EXPORT int dlposix_memalign(void**, size_t, size_t); +DLMALLOC_EXPORT int dlposix_memalign(void **, size_t, size_t); /* valloc(size_t n); Equivalent to memalign(pagesize, n), where pagesize is the page size of the system. If the pagesize is unknown, 4096 is used. */ -DLMALLOC_EXPORT void* dlvalloc(size_t); +DLMALLOC_EXPORT void *dlvalloc(size_t); /* mallopt(int parameter_number, int parameter_value) @@ -1017,7 +1026,7 @@ DLMALLOC_EXPORT size_t dlmalloc_footprint_limit(); */ DLMALLOC_EXPORT size_t dlmalloc_set_footprint_limit(size_t bytes); -#if MALLOC_INSPECT_ALL + #if MALLOC_INSPECT_ALL /* malloc_inspect_all(void(*handler)(void *start, void *end, @@ -1039,19 +1048,23 @@ DLMALLOC_EXPORT size_t dlmalloc_set_footprint_limit(size_t bytes); than 1000, you could write: static int count = 0; void count_chunks(void* start, void* end, size_t used, void* arg) { + if (used >= 1000) ++count; + } + then: malloc_inspect_all(count_chunks, NULL); malloc_inspect_all is compiled only if MALLOC_INSPECT_ALL is defined. */ -DLMALLOC_EXPORT void dlmalloc_inspect_all(void(*handler)(void*, void *, size_t, void*), - void* arg); +DLMALLOC_EXPORT void dlmalloc_inspect_all(void (*handler)(void *, void *, + size_t, void *), + void *arg); -#endif /* MALLOC_INSPECT_ALL */ + #endif /* MALLOC_INSPECT_ALL */ -#if !NO_MALLINFO + #if !NO_MALLINFO /* mallinfo() Returns (by copy) a struct containing various summary statistics: @@ -1075,7 +1088,7 @@ DLMALLOC_EXPORT void dlmalloc_inspect_all(void(*handler)(void*, void *, size_t, thus be inaccurate. */ DLMALLOC_EXPORT struct mallinfo dlmallinfo(void); -#endif /* NO_MALLINFO */ + #endif /* NO_MALLINFO */ /* independent_calloc(size_t n_elements, size_t element_size, void* chunks[]); @@ -1113,6 +1126,7 @@ DLMALLOC_EXPORT struct mallinfo dlmallinfo(void); struct Node { int item; struct Node* next; }; struct Node* build_list() { + struct Node** pool; int n = read_number_of_nodes_needed(); if (n <= 0) return 0; @@ -1124,9 +1138,11 @@ DLMALLOC_EXPORT struct mallinfo dlmallinfo(void); pool[i]->next = pool[i+1]; free(pool); // Can now free the array (or not, if it is needed later) return first; + } + */ -DLMALLOC_EXPORT void** dlindependent_calloc(size_t, size_t, void**); +DLMALLOC_EXPORT void **dlindependent_calloc(size_t, size_t, void **); /* independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]); @@ -1165,6 +1181,7 @@ DLMALLOC_EXPORT void** dlindependent_calloc(size_t, size_t, void**); struct Foot { ... } void send_message(char* msg) { + int msglen = strlen(msg); size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) }; void* chunks[3]; @@ -1174,6 +1191,7 @@ DLMALLOC_EXPORT void** dlindependent_calloc(size_t, size_t, void**); char* body = (char*)(chunks[1]); struct Foot* foot = (struct Foot*)(chunks[2]); // ... + } In general though, independent_comalloc is worth using only for @@ -1184,7 +1202,7 @@ DLMALLOC_EXPORT void** dlindependent_calloc(size_t, size_t, void**); since it cannot reuse existing noncontiguous small chunks that might be available for some of the elements. */ -DLMALLOC_EXPORT void** dlindependent_comalloc(size_t, size_t*, void**); +DLMALLOC_EXPORT void **dlindependent_comalloc(size_t, size_t *, void **); /* bulk_free(void* array[], size_t n_elements) @@ -1195,14 +1213,14 @@ DLMALLOC_EXPORT void** dlindependent_comalloc(size_t, size_t*, void**); is returned. For large arrays of pointers with poor locality, it may be worthwhile to sort this array before calling bulk_free. */ -DLMALLOC_EXPORT size_t dlbulk_free(void**, size_t n_elements); +DLMALLOC_EXPORT size_t dlbulk_free(void **, size_t n_elements); /* pvalloc(size_t n); Equivalent to valloc(minimum-page-that-holds(n)), that is, round up n to nearest pagesize. */ -DLMALLOC_EXPORT void* dlpvalloc(size_t); +DLMALLOC_EXPORT void *dlpvalloc(size_t); /* malloc_trim(size_t pad); @@ -1225,7 +1243,7 @@ DLMALLOC_EXPORT void* dlpvalloc(size_t); Malloc_trim returns 1 if it actually released any memory, else 0. */ -DLMALLOC_EXPORT int dlmalloc_trim(size_t); +DLMALLOC_EXPORT int dlmalloc_trim(size_t); /* malloc_stats(); @@ -1246,7 +1264,7 @@ DLMALLOC_EXPORT int dlmalloc_trim(size_t); malloc_stats prints only the most commonly interesting statistics. More information can be obtained by calling mallinfo. */ -DLMALLOC_EXPORT void dlmalloc_stats(void); +DLMALLOC_EXPORT void dlmalloc_stats(void); /* malloc_usable_size(void* p); @@ -1262,9 +1280,9 @@ DLMALLOC_EXPORT void dlmalloc_stats(void); p = malloc(n); assert(malloc_usable_size(p) >= 256); */ -size_t dlmalloc_usable_size(void*); +size_t dlmalloc_usable_size(void *); -#endif /* ONLY_MSPACES */ +#endif /* ONLY_MSPACES */ #if MSPACES @@ -1272,7 +1290,7 @@ size_t dlmalloc_usable_size(void*); mspace is an opaque type representing an independent region of space that supports mspace_malloc, etc. */ -typedef void* mspace; +typedef void *mspace; /* create_mspace creates and returns a new independent space with the @@ -1304,7 +1322,8 @@ DLMALLOC_EXPORT size_t destroy_mspace(mspace msp); Destroying this space will deallocate all additionally allocated space (if possible) but not the initial base. */ -DLMALLOC_EXPORT mspace create_mspace_with_base(void* base, size_t capacity, int locked); +DLMALLOC_EXPORT mspace create_mspace_with_base(void *base, size_t capacity, + int locked); /* mspace_track_large_chunks controls whether requests for large chunks @@ -1319,12 +1338,11 @@ DLMALLOC_EXPORT mspace create_mspace_with_base(void* base, size_t capacity, int */ DLMALLOC_EXPORT int mspace_track_large_chunks(mspace msp, int enable); - /* mspace_malloc behaves as malloc, but operates within the given space. */ -DLMALLOC_EXPORT void* mspace_malloc(mspace msp, size_t bytes); +DLMALLOC_EXPORT void *mspace_malloc(mspace msp, size_t bytes); /* mspace_free behaves as free, but operates within @@ -1334,7 +1352,7 @@ DLMALLOC_EXPORT void* mspace_malloc(mspace msp, size_t bytes); free may be called instead of mspace_free because freed chunks from any space are handled by their originating spaces. */ -DLMALLOC_EXPORT void mspace_free(mspace msp, void* mem); +DLMALLOC_EXPORT void mspace_free(mspace msp, void *mem); /* mspace_realloc behaves as realloc, but operates within @@ -1345,33 +1363,38 @@ DLMALLOC_EXPORT void mspace_free(mspace msp, void* mem); realloced chunks from any space are handled by their originating spaces. */ -DLMALLOC_EXPORT void* mspace_realloc(mspace msp, void* mem, size_t newsize); +DLMALLOC_EXPORT void *mspace_realloc(mspace msp, void *mem, size_t newsize); /* mspace_calloc behaves as calloc, but operates within the given space. */ -DLMALLOC_EXPORT void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size); +DLMALLOC_EXPORT void *mspace_calloc(mspace msp, size_t n_elements, + size_t elem_size); /* mspace_memalign behaves as memalign, but operates within the given space. */ -DLMALLOC_EXPORT void* mspace_memalign(mspace msp, size_t alignment, size_t bytes); +DLMALLOC_EXPORT void *mspace_memalign(mspace msp, size_t alignment, + size_t bytes); /* mspace_independent_calloc behaves as independent_calloc, but operates within the given space. */ -DLMALLOC_EXPORT void** mspace_independent_calloc(mspace msp, size_t n_elements, - size_t elem_size, void* chunks[]); +DLMALLOC_EXPORT void **mspace_independent_calloc(mspace msp, size_t n_elements, + size_t elem_size, + void * chunks[]); /* mspace_independent_comalloc behaves as independent_comalloc, but operates within the given space. */ -DLMALLOC_EXPORT void** mspace_independent_comalloc(mspace msp, size_t n_elements, - size_t sizes[], void* chunks[]); +DLMALLOC_EXPORT void **mspace_independent_comalloc(mspace msp, + size_t n_elements, + size_t sizes[], + void * chunks[]); /* mspace_footprint() returns the number of bytes obtained from the @@ -1385,19 +1408,18 @@ DLMALLOC_EXPORT size_t mspace_footprint(mspace msp); */ DLMALLOC_EXPORT size_t mspace_max_footprint(mspace msp); - -#if !NO_MALLINFO + #if !NO_MALLINFO /* mspace_mallinfo behaves as mallinfo, but reports properties of the given space. */ DLMALLOC_EXPORT struct mallinfo mspace_mallinfo(mspace msp); -#endif /* NO_MALLINFO */ + #endif /* NO_MALLINFO */ /* malloc_usable_size(void* p) behaves the same as malloc_usable_size; */ -DLMALLOC_EXPORT size_t mspace_usable_size(const void* mem); +DLMALLOC_EXPORT size_t mspace_usable_size(const void *mem); /* mspace_malloc_stats behaves as malloc_stats, but reports @@ -1416,11 +1438,13 @@ DLMALLOC_EXPORT int mspace_trim(mspace msp, size_t pad); */ DLMALLOC_EXPORT int mspace_mallopt(int, int); -#endif /* MSPACES */ +#endif /* MSPACES */ #ifdef __cplusplus -} /* end of extern "C" */ -#endif /* __cplusplus */ + +} /* end of extern "C" */ + +#endif /* __cplusplus */ /* ======================================================================== @@ -1435,195 +1459,207 @@ DLMALLOC_EXPORT int mspace_mallopt(int, int); /*------------------------------ internal #includes ---------------------- */ #ifdef _MSC_VER -#pragma warning( disable : 4146 ) /* no "unsigned" warnings */ -#endif /* _MSC_VER */ + #pragma warning(disable : 4146) /* no "unsigned" warnings */ +#endif /* _MSC_VER */ #if !NO_MALLOC_STATS -#include /* for printing in malloc_stats */ -#endif /* NO_MALLOC_STATS */ + #include /* for printing in malloc_stats */ +#endif /* NO_MALLOC_STATS */ #ifndef LACKS_ERRNO_H -#include /* for MALLOC_FAILURE_ACTION */ -#endif /* LACKS_ERRNO_H */ + #include /* for MALLOC_FAILURE_ACTION */ +#endif /* LACKS_ERRNO_H */ #ifdef DEBUG -#if ABORT_ON_ASSERT_FAILURE -#undef assert -#define assert(x) if(!(x)) ABORT -#else /* ABORT_ON_ASSERT_FAILURE */ -#include -#endif /* ABORT_ON_ASSERT_FAILURE */ -#else /* DEBUG */ -#ifndef assert -#define assert(x) -#endif -#define DEBUG 0 -#endif /* DEBUG */ + #if ABORT_ON_ASSERT_FAILURE + #undef assert + #define assert(x) \ + if (!(x)) ABORT + #else /* ABORT_ON_ASSERT_FAILURE */ + #include + #endif /* ABORT_ON_ASSERT_FAILURE */ +#else /* DEBUG */ + #ifndef assert + #define assert(x) + #endif + #define DEBUG 0 +#endif /* DEBUG */ #if !defined(WIN32) && !defined(LACKS_TIME_H) -#include /* for magic initialization */ -#endif /* WIN32 */ + #include /* for magic initialization */ +#endif /* WIN32 */ #ifndef LACKS_STDLIB_H -#include /* for abort() */ -#endif /* LACKS_STDLIB_H */ + #include /* for abort() */ +#endif /* LACKS_STDLIB_H */ #ifndef LACKS_STRING_H -#include /* for memset etc */ -#endif /* LACKS_STRING_H */ + #include /* for memset etc */ +#endif /* LACKS_STRING_H */ #if USE_BUILTIN_FFS -#ifndef LACKS_STRINGS_H -#include /* for ffs */ -#endif /* LACKS_STRINGS_H */ -#endif /* USE_BUILTIN_FFS */ + #ifndef LACKS_STRINGS_H + #include /* for ffs */ + #endif /* LACKS_STRINGS_H */ +#endif /* USE_BUILTIN_FFS */ #if HAVE_MMAP -#ifndef LACKS_SYS_MMAN_H -/* On some versions of linux, mremap decl in mman.h needs __USE_GNU set */ -#if (defined(linux) && !defined(__USE_GNU)) -#define __USE_GNU 1 -#include /* for mmap */ -#undef __USE_GNU -#else -#include /* for mmap */ -#endif /* linux */ -#endif /* LACKS_SYS_MMAN_H */ -#ifndef LACKS_FCNTL_H -#include -#endif /* LACKS_FCNTL_H */ -#endif /* HAVE_MMAP */ + #ifndef LACKS_SYS_MMAN_H + /* On some versions of linux, mremap decl in mman.h needs __USE_GNU set */ + #if (defined(linux) && !defined(__USE_GNU)) + #define __USE_GNU 1 + #include /* for mmap */ + #undef __USE_GNU + #else + #include /* for mmap */ + #endif /* linux */ + #endif /* LACKS_SYS_MMAN_H */ + #ifndef LACKS_FCNTL_H + #include + #endif /* LACKS_FCNTL_H */ +#endif /* HAVE_MMAP */ #ifndef LACKS_UNISTD_H -#include /* for sbrk, sysconf */ -#else /* LACKS_UNISTD_H */ -#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__) -extern void* sbrk(ptrdiff_t); -#endif /* FreeBSD etc */ -#endif /* LACKS_UNISTD_H */ + #include /* for sbrk, sysconf */ +#else /* LACKS_UNISTD_H */ + #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__) +extern void *sbrk(ptrdiff_t); + #endif /* FreeBSD etc */ +#endif /* LACKS_UNISTD_H */ /* Declarations for locking */ #if USE_LOCKS -#ifndef WIN32 -#if defined (__SVR4) && defined (__sun) /* solaris */ -#include -#elif !defined(LACKS_SCHED_H) -#include -#endif /* solaris or LACKS_SCHED_H */ -#if (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0) || !USE_SPIN_LOCKS -#include -#endif /* USE_RECURSIVE_LOCKS ... */ -#elif defined(_MSC_VER) -#ifndef _M_AMD64 -/* These are already defined on AMD64 builds */ -#ifdef __cplusplus + #ifndef WIN32 + #if defined(__SVR4) && defined(__sun) /* solaris */ + #include + #elif !defined(LACKS_SCHED_H) + #include + #endif /* solaris or LACKS_SCHED_H */ + #if (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0) || \ + !USE_SPIN_LOCKS + #include + #endif /* USE_RECURSIVE_LOCKS ... */ + #elif defined(_MSC_VER) + #ifndef _M_AMD64 + /* These are already defined on AMD64 builds */ + #ifdef __cplusplus extern "C" { -#endif /* __cplusplus */ -LONG __cdecl _InterlockedCompareExchange(LONG volatile *Dest, LONG Exchange, LONG Comp); + + #endif /* __cplusplus */ +LONG __cdecl _InterlockedCompareExchange(LONG volatile *Dest, LONG Exchange, + LONG Comp); LONG __cdecl _InterlockedExchange(LONG volatile *Target, LONG Value); -#ifdef __cplusplus + #ifdef __cplusplus + } -#endif /* __cplusplus */ -#endif /* _M_AMD64 */ -#pragma intrinsic (_InterlockedCompareExchange) -#pragma intrinsic (_InterlockedExchange) -#define interlockedcompareexchange _InterlockedCompareExchange -#define interlockedexchange _InterlockedExchange -#elif defined(WIN32) && defined(__GNUC__) -#define interlockedcompareexchange(a, b, c) __sync_val_compare_and_swap(a, c, b) -#define interlockedexchange __sync_lock_test_and_set -#endif /* Win32 */ -#else /* USE_LOCKS */ -#endif /* USE_LOCKS */ + + #endif /* __cplusplus */ + #endif /* _M_AMD64 */ + #pragma intrinsic(_InterlockedCompareExchange) + #pragma intrinsic(_InterlockedExchange) + #define interlockedcompareexchange _InterlockedCompareExchange + #define interlockedexchange _InterlockedExchange + #elif defined(WIN32) && defined(__GNUC__) + #define interlockedcompareexchange(a, b, c) \ + __sync_val_compare_and_swap(a, c, b) + #define interlockedexchange __sync_lock_test_and_set + #endif /* Win32 */ +#else /* USE_LOCKS */ +#endif /* USE_LOCKS */ #ifndef LOCK_AT_FORK -#define LOCK_AT_FORK 0 + #define LOCK_AT_FORK 0 #endif /* Declarations for bit scanning on win32 */ -#if defined(_MSC_VER) && _MSC_VER>=1300 -#ifndef BitScanForward /* Try to avoid pulling in WinNT.h */ -#ifdef __cplusplus +#if defined(_MSC_VER) && _MSC_VER >= 1300 + #ifndef BitScanForward /* Try to avoid pulling in WinNT.h */ + #ifdef __cplusplus extern "C" { -#endif /* __cplusplus */ + + #endif /* __cplusplus */ unsigned char _BitScanForward(unsigned long *index, unsigned long mask); unsigned char _BitScanReverse(unsigned long *index, unsigned long mask); -#ifdef __cplusplus + #ifdef __cplusplus + } -#endif /* __cplusplus */ -#define BitScanForward _BitScanForward -#define BitScanReverse _BitScanReverse -#pragma intrinsic(_BitScanForward) -#pragma intrinsic(_BitScanReverse) -#endif /* BitScanForward */ -#endif /* defined(_MSC_VER) && _MSC_VER>=1300 */ + #endif /* __cplusplus */ + + #define BitScanForward _BitScanForward + #define BitScanReverse _BitScanReverse + #pragma intrinsic(_BitScanForward) + #pragma intrinsic(_BitScanReverse) + #endif /* BitScanForward */ +#endif /* defined(_MSC_VER) && _MSC_VER>=1300 */ #ifndef WIN32 -#ifndef malloc_getpagesize -# ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */ -# ifndef _SC_PAGE_SIZE -# define _SC_PAGE_SIZE _SC_PAGESIZE -# endif -# endif -# ifdef _SC_PAGE_SIZE -# define malloc_getpagesize sysconf(_SC_PAGE_SIZE) -# else -# if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE) - extern size_t getpagesize(); -# define malloc_getpagesize getpagesize() -# else -# ifdef WIN32 /* use supplied emulation of getpagesize */ -# define malloc_getpagesize getpagesize() -# else -# ifndef LACKS_SYS_PARAM_H -# include -# endif -# ifdef EXEC_PAGESIZE -# define malloc_getpagesize EXEC_PAGESIZE -# else -# ifdef NBPG -# ifndef CLSIZE -# define malloc_getpagesize NBPG -# else -# define malloc_getpagesize (NBPG * CLSIZE) -# endif -# else -# ifdef NBPC -# define malloc_getpagesize NBPC -# else -# ifdef PAGESIZE -# define malloc_getpagesize PAGESIZE -# else /* just guess */ -# define malloc_getpagesize ((size_t)4096U) -# endif -# endif -# endif -# endif -# endif -# endif -# endif -#endif + #ifndef malloc_getpagesize + #ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */ + #ifndef _SC_PAGE_SIZE + #define _SC_PAGE_SIZE _SC_PAGESIZE + #endif + #endif + #ifdef _SC_PAGE_SIZE + #define malloc_getpagesize sysconf(_SC_PAGE_SIZE) + #else + #if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE) +extern size_t getpagesize(); + #define malloc_getpagesize getpagesize() + #else + #ifdef WIN32 /* use supplied emulation of getpagesize */ + #define malloc_getpagesize getpagesize() + #else + #ifndef LACKS_SYS_PARAM_H + #include + #endif + #ifdef EXEC_PAGESIZE + #define malloc_getpagesize EXEC_PAGESIZE + #else + #ifdef NBPG + #ifndef CLSIZE + #define malloc_getpagesize NBPG + #else + #define malloc_getpagesize (NBPG * CLSIZE) + #endif + #else + #ifdef NBPC + #define malloc_getpagesize NBPC + #else + #ifdef PAGESIZE + #define malloc_getpagesize PAGESIZE + #else /* just guess */ + #define malloc_getpagesize ((size_t)4096U) + #endif + #endif + #endif + #endif + #endif + #endif + #endif + #endif #endif /* ------------------- size_t and alignment properties -------------------- */ /* The byte and bit size of a size_t */ -#define SIZE_T_SIZE (sizeof(size_t)) -#define SIZE_T_BITSIZE (sizeof(size_t) << 3) +#define SIZE_T_SIZE (sizeof(size_t)) +#define SIZE_T_BITSIZE (sizeof(size_t) << 3) /* Some constants coerced to size_t */ /* Annoying but necessary to avoid errors on some platforms */ -#define SIZE_T_ZERO ((size_t)0) -#define SIZE_T_ONE ((size_t)1) -#define SIZE_T_TWO ((size_t)2) -#define SIZE_T_FOUR ((size_t)4) -#define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1) -#define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2) -#define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES) -#define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U) +#define SIZE_T_ZERO ((size_t)0) +#define SIZE_T_ONE ((size_t)1) +#define SIZE_T_TWO ((size_t)2) +#define SIZE_T_FOUR ((size_t)4) +#define TWO_SIZE_T_SIZES (SIZE_T_SIZE << 1) +#define FOUR_SIZE_T_SIZES (SIZE_T_SIZE << 2) +#define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES + TWO_SIZE_T_SIZES) +#define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U) /* The bit mask value corresponding to MALLOC_ALIGNMENT */ -#define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE) +#define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE) /* True if address a has acceptable alignment */ -#define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0) +#define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0) /* the number of bytes to offset an address to align it */ -#define align_offset(A)\ - ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\ - ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK)) +#define align_offset(A) \ + ((((size_t)(A)&CHUNK_ALIGN_MASK) == 0) \ + ? 0 \ + : ((MALLOC_ALIGNMENT - ((size_t)(A)&CHUNK_ALIGN_MASK)) & \ + CHUNK_ALIGN_MASK)) /* -------------------------- MMAP preliminaries ------------------------- */ @@ -1633,193 +1669,202 @@ unsigned char _BitScanReverse(unsigned long *index, unsigned long mask); using so many "#if"s. */ - /* MORECORE and MMAP must return MFAIL on failure */ -#define MFAIL ((void*)(MAX_SIZE_T)) -#define CMFAIL ((char*)(MFAIL)) /* defined for convenience */ +#define MFAIL ((void *)(MAX_SIZE_T)) +#define CMFAIL ((char *)(MFAIL)) /* defined for convenience */ #if HAVE_MMAP -#ifndef WIN32 -#define MMAP_PROT (PROT_READ|PROT_WRITE) -#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON) -#define MAP_ANONYMOUS MAP_ANON -#endif /* MAP_ANON */ -#ifdef MAP_ANONYMOUS + #ifndef WIN32 + #define MMAP_PROT (PROT_READ | PROT_WRITE) + #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON) + #define MAP_ANONYMOUS MAP_ANON + #endif /* MAP_ANON */ + #ifdef MAP_ANONYMOUS + + #define MMAP_FLAGS (MAP_PRIVATE | MAP_ANONYMOUS) -#define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS) +static FORCEINLINE void *unixmmap(size_t size) { -static FORCEINLINE void* unixmmap(size_t size) { - void* result; + void *result; result = mmap(0, size, MMAP_PROT, MMAP_FLAGS, -1, 0); - if (result == MFAIL) - return MFAIL; + if (result == MFAIL) return MFAIL; return result; + } -static FORCEINLINE int unixmunmap(void* ptr, size_t size) { +static FORCEINLINE int unixmunmap(void *ptr, size_t size) { + int result; result = munmap(ptr, size); - if (result != 0) - return result; + if (result != 0) return result; return result; + } -#define MMAP_DEFAULT(s) unixmmap(s) -#define MUNMAP_DEFAULT(a, s) unixmunmap((a), (s)) + #define MMAP_DEFAULT(s) unixmmap(s) + #define MUNMAP_DEFAULT(a, s) unixmunmap((a), (s)) -#else /* MAP_ANONYMOUS */ -/* - Nearly all versions of mmap support MAP_ANONYMOUS, so the following - is unlikely to be needed, but is supplied just in case. -*/ -#define MMAP_FLAGS (MAP_PRIVATE) -static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */ -#define MMAP_DEFAULT(s) ((dev_zero_fd < 0) ? \ - (dev_zero_fd = open("/dev/zero", O_RDWR), \ - mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \ - mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) -#define MUNMAP_DEFAULT(a, s) munmap((a), (s)) -#endif /* MAP_ANONYMOUS */ + #else /* MAP_ANONYMOUS */ + /* + Nearly all versions of mmap support MAP_ANONYMOUS, so the following + is unlikely to be needed, but is supplied just in case. + */ + #define MMAP_FLAGS (MAP_PRIVATE) +static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */ + #define MMAP_DEFAULT(s) \ + ((dev_zero_fd < 0) \ + ? (dev_zero_fd = open("/dev/zero", O_RDWR), \ + mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) \ + : mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) + #define MUNMAP_DEFAULT(a, s) munmap((a), (s)) + #endif /* MAP_ANONYMOUS */ -#define DIRECT_MMAP_DEFAULT(s) MMAP_DEFAULT(s) + #define DIRECT_MMAP_DEFAULT(s) MMAP_DEFAULT(s) -#else /* WIN32 */ + #else /* WIN32 */ /* Win32 MMAP via VirtualAlloc */ -static FORCEINLINE void* win32mmap(size_t size) { - void* ptr; +static FORCEINLINE void *win32mmap(size_t size) { + + void *ptr; - ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE); - if (ptr == 0) - return MFAIL; + ptr = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); + if (ptr == 0) return MFAIL; return ptr; + } /* For direct MMAP, use MEM_TOP_DOWN to minimize interference */ -static FORCEINLINE void* win32direct_mmap(size_t size) { - void* ptr; +static FORCEINLINE void *win32direct_mmap(size_t size) { + + void *ptr; - ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, - PAGE_READWRITE); - if (ptr == 0) - return MFAIL; + ptr = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT | MEM_TOP_DOWN, + PAGE_READWRITE); + if (ptr == 0) return MFAIL; return ptr; + } /* This function supports releasing coalesed segments */ -static FORCEINLINE int win32munmap(void* ptr, size_t size) { +static FORCEINLINE int win32munmap(void *ptr, size_t size) { + MEMORY_BASIC_INFORMATION minfo; - char* cptr = (char*)ptr; + char *cptr = (char *)ptr; while (size) { - if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0) - return -1; + + if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0) return -1; if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr || minfo.State != MEM_COMMIT || minfo.RegionSize > size) return -1; - if (VirtualFree(cptr, 0, MEM_RELEASE) == 0) - return -1; + if (VirtualFree(cptr, 0, MEM_RELEASE) == 0) return -1; cptr += minfo.RegionSize; size -= minfo.RegionSize; + } return 0; + } -#define MMAP_DEFAULT(s) win32mmap(s) -#define MUNMAP_DEFAULT(a, s) win32munmap((a), (s)) -#define DIRECT_MMAP_DEFAULT(s) win32direct_mmap(s) -#endif /* WIN32 */ -#endif /* HAVE_MMAP */ + #define MMAP_DEFAULT(s) win32mmap(s) + #define MUNMAP_DEFAULT(a, s) win32munmap((a), (s)) + #define DIRECT_MMAP_DEFAULT(s) win32direct_mmap(s) + #endif /* WIN32 */ +#endif /* HAVE_MMAP */ #if HAVE_MREMAP -#ifndef WIN32 + #ifndef WIN32 -static FORCEINLINE void* dlmremap(void* old_address, size_t old_size, size_t new_size, int flags) { - void* result; +static FORCEINLINE void *dlmremap(void *old_address, size_t old_size, + size_t new_size, int flags) { + + void *result; result = mremap(old_address, old_size, new_size, flags); - if (result == MFAIL) - return MFAIL; + if (result == MFAIL) return MFAIL; return result; + } -#define MREMAP_DEFAULT(addr, osz, nsz, mv) dlmremap((addr), (osz), (nsz), (mv)) -#endif /* WIN32 */ -#endif /* HAVE_MREMAP */ + #define MREMAP_DEFAULT(addr, osz, nsz, mv) \ + dlmremap((addr), (osz), (nsz), (mv)) + #endif /* WIN32 */ +#endif /* HAVE_MREMAP */ /** * Define CALL_MORECORE */ #if HAVE_MORECORE - #ifdef MORECORE - #define CALL_MORECORE(S) MORECORE(S) - #else /* MORECORE */ - #define CALL_MORECORE(S) MORECORE_DEFAULT(S) - #endif /* MORECORE */ -#else /* HAVE_MORECORE */ - #define CALL_MORECORE(S) MFAIL -#endif /* HAVE_MORECORE */ + #ifdef MORECORE + #define CALL_MORECORE(S) MORECORE(S) + #else /* MORECORE */ + #define CALL_MORECORE(S) MORECORE_DEFAULT(S) + #endif /* MORECORE */ +#else /* HAVE_MORECORE */ + #define CALL_MORECORE(S) MFAIL +#endif /* HAVE_MORECORE */ /** * Define CALL_MMAP/CALL_MUNMAP/CALL_DIRECT_MMAP */ #if HAVE_MMAP - #define USE_MMAP_BIT (SIZE_T_ONE) - - #ifdef MMAP - #define CALL_MMAP(s) MMAP(s) - #else /* MMAP */ - #define CALL_MMAP(s) MMAP_DEFAULT(s) - #endif /* MMAP */ - #ifdef MUNMAP - #define CALL_MUNMAP(a, s) MUNMAP((a), (s)) - #else /* MUNMAP */ - #define CALL_MUNMAP(a, s) MUNMAP_DEFAULT((a), (s)) - #endif /* MUNMAP */ - #ifdef DIRECT_MMAP - #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s) - #else /* DIRECT_MMAP */ - #define CALL_DIRECT_MMAP(s) DIRECT_MMAP_DEFAULT(s) - #endif /* DIRECT_MMAP */ -#else /* HAVE_MMAP */ - #define USE_MMAP_BIT (SIZE_T_ZERO) - - #define MMAP(s) MFAIL - #define MUNMAP(a, s) (-1) - #define DIRECT_MMAP(s) MFAIL - #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s) - #define CALL_MMAP(s) MMAP(s) - #define CALL_MUNMAP(a, s) MUNMAP((a), (s)) -#endif /* HAVE_MMAP */ + #define USE_MMAP_BIT (SIZE_T_ONE) + + #ifdef MMAP + #define CALL_MMAP(s) MMAP(s) + #else /* MMAP */ + #define CALL_MMAP(s) MMAP_DEFAULT(s) + #endif /* MMAP */ + #ifdef MUNMAP + #define CALL_MUNMAP(a, s) MUNMAP((a), (s)) + #else /* MUNMAP */ + #define CALL_MUNMAP(a, s) MUNMAP_DEFAULT((a), (s)) + #endif /* MUNMAP */ + #ifdef DIRECT_MMAP + #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s) + #else /* DIRECT_MMAP */ + #define CALL_DIRECT_MMAP(s) DIRECT_MMAP_DEFAULT(s) + #endif /* DIRECT_MMAP */ +#else /* HAVE_MMAP */ + #define USE_MMAP_BIT (SIZE_T_ZERO) + + #define MMAP(s) MFAIL + #define MUNMAP(a, s) (-1) + #define DIRECT_MMAP(s) MFAIL + #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s) + #define CALL_MMAP(s) MMAP(s) + #define CALL_MUNMAP(a, s) MUNMAP((a), (s)) +#endif /* HAVE_MMAP */ /** * Define CALL_MREMAP */ #if HAVE_MMAP && HAVE_MREMAP - #ifdef MREMAP - #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP((addr), (osz), (nsz), (mv)) - #else /* MREMAP */ - #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP_DEFAULT((addr), (osz), (nsz), (mv)) - #endif /* MREMAP */ -#else /* HAVE_MMAP && HAVE_MREMAP */ - #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL -#endif /* HAVE_MMAP && HAVE_MREMAP */ + #ifdef MREMAP + #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP((addr), (osz), (nsz), (mv)) + #else /* MREMAP */ + #define CALL_MREMAP(addr, osz, nsz, mv) \ + MREMAP_DEFAULT((addr), (osz), (nsz), (mv)) + #endif /* MREMAP */ +#else /* HAVE_MMAP && HAVE_MREMAP */ + #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL +#endif /* HAVE_MMAP && HAVE_MREMAP */ /* mstate bit set if continguous morecore disabled or failed */ #define USE_NONCONTIGUOUS_BIT (4U) /* segment bit set in create_mspace_with_base */ -#define EXTERN_BIT (8U) - +#define EXTERN_BIT (8U) /* --------------------------- Lock preliminaries ------------------------ */ @@ -1852,247 +1897,284 @@ static FORCEINLINE void* dlmremap(void* old_address, size_t old_size, size_t new */ #if !USE_LOCKS -#define USE_LOCK_BIT (0U) -#define INITIAL_LOCK(l) (0) -#define DESTROY_LOCK(l) (0) -#define ACQUIRE_MALLOC_GLOBAL_LOCK() -#define RELEASE_MALLOC_GLOBAL_LOCK() + #define USE_LOCK_BIT (0U) + #define INITIAL_LOCK(l) (0) + #define DESTROY_LOCK(l) (0) + #define ACQUIRE_MALLOC_GLOBAL_LOCK() + #define RELEASE_MALLOC_GLOBAL_LOCK() #else -#if USE_LOCKS > 1 -/* ----------------------- User-defined locks ------------------------ */ -/* Define your own lock implementation here */ -/* #define INITIAL_LOCK(lk) ... */ -/* #define DESTROY_LOCK(lk) ... */ -/* #define ACQUIRE_LOCK(lk) ... */ -/* #define RELEASE_LOCK(lk) ... */ -/* #define TRY_LOCK(lk) ... */ -/* static MLOCK_T malloc_global_mutex = ... */ - -#elif USE_SPIN_LOCKS - -/* First, define CAS_LOCK and CLEAR_LOCK on ints */ -/* Note CAS_LOCK defined to return 0 on success */ - -#if defined(__GNUC__)&& (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) -#define CAS_LOCK(sl) __sync_lock_test_and_set(sl, 1) -#define CLEAR_LOCK(sl) __sync_lock_release(sl) - -#elif (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))) + #if USE_LOCKS > 1 + /* ----------------------- User-defined locks ------------------------ */ + /* Define your own lock implementation here */ + /* #define INITIAL_LOCK(lk) ... */ + /* #define DESTROY_LOCK(lk) ... */ + /* #define ACQUIRE_LOCK(lk) ... */ + /* #define RELEASE_LOCK(lk) ... */ + /* #define TRY_LOCK(lk) ... */ + /* static MLOCK_T malloc_global_mutex = ... */ + + #elif USE_SPIN_LOCKS + + /* First, define CAS_LOCK and CLEAR_LOCK on ints */ + /* Note CAS_LOCK defined to return 0 on success */ + + #if defined(__GNUC__) && \ + (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) + #define CAS_LOCK(sl) __sync_lock_test_and_set(sl, 1) + #define CLEAR_LOCK(sl) __sync_lock_release(sl) + + #elif (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))) /* Custom spin locks for older gcc on x86 */ static FORCEINLINE int x86_cas_lock(int *sl) { + int ret; int val = 1; int cmp = 0; - __asm__ __volatile__ ("lock; cmpxchgl %1, %2" - : "=a" (ret) - : "r" (val), "m" (*(sl)), "0"(cmp) - : "memory", "cc"); + __asm__ __volatile__("lock; cmpxchgl %1, %2" + : "=a"(ret) + : "r"(val), "m"(*(sl)), "0"(cmp) + : "memory", "cc"); return ret; + } -static FORCEINLINE void x86_clear_lock(int* sl) { +static FORCEINLINE void x86_clear_lock(int *sl) { + assert(*sl != 0); int prev = 0; int ret; - __asm__ __volatile__ ("lock; xchgl %0, %1" - : "=r" (ret) - : "m" (*(sl)), "0"(prev) - : "memory"); -} - -#define CAS_LOCK(sl) x86_cas_lock(sl) -#define CLEAR_LOCK(sl) x86_clear_lock(sl) - -#else /* Win32 MSC */ -#define CAS_LOCK(sl) interlockedexchange((volatile LONG *)sl, (LONG)1) -#define CLEAR_LOCK(sl) interlockedexchange((volatile LONG *)sl, (LONG)0) - -#endif /* ... gcc spins locks ... */ + __asm__ __volatile__("lock; xchgl %0, %1" + : "=r"(ret) + : "m"(*(sl)), "0"(prev) + : "memory"); -/* How to yield for a spin lock */ -#define SPINS_PER_YIELD 63 -#if defined(_MSC_VER) -#define SLEEP_EX_DURATION 50 /* delay for yield/sleep */ -#define SPIN_LOCK_YIELD SleepEx(SLEEP_EX_DURATION, FALSE) -#elif defined (__SVR4) && defined (__sun) /* solaris */ -#define SPIN_LOCK_YIELD thr_yield(); -#elif !defined(LACKS_SCHED_H) -#define SPIN_LOCK_YIELD sched_yield(); -#else -#define SPIN_LOCK_YIELD -#endif /* ... yield ... */ +} -#if !defined(USE_RECURSIVE_LOCKS) || USE_RECURSIVE_LOCKS == 0 + #define CAS_LOCK(sl) x86_cas_lock(sl) + #define CLEAR_LOCK(sl) x86_clear_lock(sl) + + #else /* Win32 MSC */ + #define CAS_LOCK(sl) interlockedexchange((volatile LONG *)sl, (LONG)1) + #define CLEAR_LOCK(sl) interlockedexchange((volatile LONG *)sl, (LONG)0) + + #endif /* ... gcc spins locks ... */ + + /* How to yield for a spin lock */ + #define SPINS_PER_YIELD 63 + #if defined(_MSC_VER) + #define SLEEP_EX_DURATION 50 /* delay for yield/sleep */ + #define SPIN_LOCK_YIELD SleepEx(SLEEP_EX_DURATION, FALSE) + #elif defined(__SVR4) && defined(__sun) /* solaris */ + #define SPIN_LOCK_YIELD thr_yield(); + #elif !defined(LACKS_SCHED_H) + #define SPIN_LOCK_YIELD sched_yield(); + #else + #define SPIN_LOCK_YIELD + #endif /* ... yield ... */ + + #if !defined(USE_RECURSIVE_LOCKS) || USE_RECURSIVE_LOCKS == 0 /* Plain spin locks use single word (embedded in malloc_states) */ static int spin_acquire_lock(int *sl) { + int spins = 0; while (*(volatile int *)sl != 0 || CAS_LOCK(sl)) { - if ((++spins & SPINS_PER_YIELD) == 0) { - SPIN_LOCK_YIELD; - } + + if ((++spins & SPINS_PER_YIELD) == 0) { SPIN_LOCK_YIELD; } + } + return 0; + } -#define MLOCK_T int -#define TRY_LOCK(sl) !CAS_LOCK(sl) -#define RELEASE_LOCK(sl) CLEAR_LOCK(sl) -#define ACQUIRE_LOCK(sl) (CAS_LOCK(sl)? spin_acquire_lock(sl) : 0) -#define INITIAL_LOCK(sl) (*sl = 0) -#define DESTROY_LOCK(sl) (0) + #define MLOCK_T int + #define TRY_LOCK(sl) !CAS_LOCK(sl) + #define RELEASE_LOCK(sl) CLEAR_LOCK(sl) + #define ACQUIRE_LOCK(sl) (CAS_LOCK(sl) ? spin_acquire_lock(sl) : 0) + #define INITIAL_LOCK(sl) (*sl = 0) + #define DESTROY_LOCK(sl) (0) static MLOCK_T malloc_global_mutex = 0; -#else /* USE_RECURSIVE_LOCKS */ -/* types for lock owners */ -#ifdef WIN32 -#define THREAD_ID_T DWORD -#define CURRENT_THREAD GetCurrentThreadId() -#define EQ_OWNER(X,Y) ((X) == (Y)) -#else -/* - Note: the following assume that pthread_t is a type that can be - initialized to (casted) zero. If this is not the case, you will need to - somehow redefine these or not use spin locks. -*/ -#define THREAD_ID_T pthread_t -#define CURRENT_THREAD pthread_self() -#define EQ_OWNER(X,Y) pthread_equal(X, Y) -#endif + #else /* USE_RECURSIVE_LOCKS */ + /* types for lock owners */ + #ifdef WIN32 + #define THREAD_ID_T DWORD + #define CURRENT_THREAD GetCurrentThreadId() + #define EQ_OWNER(X, Y) ((X) == (Y)) + #else + /* + Note: the following assume that pthread_t is a type that can be + initialized to (casted) zero. If this is not the case, you will need + to somehow redefine these or not use spin locks. + */ + #define THREAD_ID_T pthread_t + #define CURRENT_THREAD pthread_self() + #define EQ_OWNER(X, Y) pthread_equal(X, Y) + #endif struct malloc_recursive_lock { - int sl; + + int sl; unsigned int c; - THREAD_ID_T threadid; + THREAD_ID_T threadid; + }; -#define MLOCK_T struct malloc_recursive_lock -static MLOCK_T malloc_global_mutex = { 0, 0, (THREAD_ID_T)0}; + #define MLOCK_T struct malloc_recursive_lock +static MLOCK_T malloc_global_mutex = {0, 0, (THREAD_ID_T)0}; static FORCEINLINE void recursive_release_lock(MLOCK_T *lk) { + assert(lk->sl != 0); - if (--lk->c == 0) { - CLEAR_LOCK(&lk->sl); - } + if (--lk->c == 0) { CLEAR_LOCK(&lk->sl); } + } static FORCEINLINE int recursive_acquire_lock(MLOCK_T *lk) { + THREAD_ID_T mythreadid = CURRENT_THREAD; - int spins = 0; + int spins = 0; for (;;) { + if (*((volatile int *)(&lk->sl)) == 0) { + if (!CAS_LOCK(&lk->sl)) { + lk->threadid = mythreadid; lk->c = 1; return 0; + } - } - else if (EQ_OWNER(lk->threadid, mythreadid)) { + + } else if (EQ_OWNER(lk->threadid, mythreadid)) { + ++lk->c; return 0; + } - if ((++spins & SPINS_PER_YIELD) == 0) { - SPIN_LOCK_YIELD; - } + + if ((++spins & SPINS_PER_YIELD) == 0) { SPIN_LOCK_YIELD; } + } + } static FORCEINLINE int recursive_try_lock(MLOCK_T *lk) { + THREAD_ID_T mythreadid = CURRENT_THREAD; if (*((volatile int *)(&lk->sl)) == 0) { + if (!CAS_LOCK(&lk->sl)) { + lk->threadid = mythreadid; lk->c = 1; return 1; + } - } - else if (EQ_OWNER(lk->threadid, mythreadid)) { + + } else if (EQ_OWNER(lk->threadid, mythreadid)) { + ++lk->c; return 1; + } + return 0; + } -#define RELEASE_LOCK(lk) recursive_release_lock(lk) -#define TRY_LOCK(lk) recursive_try_lock(lk) -#define ACQUIRE_LOCK(lk) recursive_acquire_lock(lk) -#define INITIAL_LOCK(lk) ((lk)->threadid = (THREAD_ID_T)0, (lk)->sl = 0, (lk)->c = 0) -#define DESTROY_LOCK(lk) (0) -#endif /* USE_RECURSIVE_LOCKS */ - -#elif defined(WIN32) /* Win32 critical sections */ -#define MLOCK_T CRITICAL_SECTION -#define ACQUIRE_LOCK(lk) (EnterCriticalSection(lk), 0) -#define RELEASE_LOCK(lk) LeaveCriticalSection(lk) -#define TRY_LOCK(lk) TryEnterCriticalSection(lk) -#define INITIAL_LOCK(lk) (!InitializeCriticalSectionAndSpinCount((lk), 0x80000000|4000)) -#define DESTROY_LOCK(lk) (DeleteCriticalSection(lk), 0) -#define NEED_GLOBAL_LOCK_INIT - -static MLOCK_T malloc_global_mutex; + #define RELEASE_LOCK(lk) recursive_release_lock(lk) + #define TRY_LOCK(lk) recursive_try_lock(lk) + #define ACQUIRE_LOCK(lk) recursive_acquire_lock(lk) + #define INITIAL_LOCK(lk) \ + ((lk)->threadid = (THREAD_ID_T)0, (lk)->sl = 0, (lk)->c = 0) + #define DESTROY_LOCK(lk) (0) + #endif /* USE_RECURSIVE_LOCKS */ + + #elif defined(WIN32) /* Win32 critical sections */ + #define MLOCK_T CRITICAL_SECTION + #define ACQUIRE_LOCK(lk) (EnterCriticalSection(lk), 0) + #define RELEASE_LOCK(lk) LeaveCriticalSection(lk) + #define TRY_LOCK(lk) TryEnterCriticalSection(lk) + #define INITIAL_LOCK(lk) \ + (!InitializeCriticalSectionAndSpinCount((lk), 0x80000000 | 4000)) + #define DESTROY_LOCK(lk) (DeleteCriticalSection(lk), 0) + #define NEED_GLOBAL_LOCK_INIT + +static MLOCK_T malloc_global_mutex; static volatile LONG malloc_global_mutex_status; /* Use spin loop to initialize global lock */ static void init_malloc_global_mutex() { + for (;;) { + long stat = malloc_global_mutex_status; - if (stat > 0) - return; + if (stat > 0) return; /* transition to < 0 while initializing, then to > 0) */ - if (stat == 0 && - interlockedcompareexchange(&malloc_global_mutex_status, (LONG)-1, (LONG)0) == 0) { + if (stat == 0 && interlockedcompareexchange(&malloc_global_mutex_status, + (LONG)-1, (LONG)0) == 0) { + InitializeCriticalSection(&malloc_global_mutex); interlockedexchange(&malloc_global_mutex_status, (LONG)1); return; + } + SleepEx(0, FALSE); + } + } -#else /* pthreads-based locks */ -#define MLOCK_T pthread_mutex_t -#define ACQUIRE_LOCK(lk) pthread_mutex_lock(lk) -#define RELEASE_LOCK(lk) pthread_mutex_unlock(lk) -#define TRY_LOCK(lk) (!pthread_mutex_trylock(lk)) -#define INITIAL_LOCK(lk) pthread_init_lock(lk) -#define DESTROY_LOCK(lk) pthread_mutex_destroy(lk) + #else /* pthreads-based locks */ + #define MLOCK_T pthread_mutex_t + #define ACQUIRE_LOCK(lk) pthread_mutex_lock(lk) + #define RELEASE_LOCK(lk) pthread_mutex_unlock(lk) + #define TRY_LOCK(lk) (!pthread_mutex_trylock(lk)) + #define INITIAL_LOCK(lk) pthread_init_lock(lk) + #define DESTROY_LOCK(lk) pthread_mutex_destroy(lk) -#if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0 && defined(linux) && !defined(PTHREAD_MUTEX_RECURSIVE) + #if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0 && \ + defined(linux) && !defined(PTHREAD_MUTEX_RECURSIVE) /* Cope with old-style linux recursive lock initialization by adding */ /* skipped internal declaration from pthread.h */ -extern int pthread_mutexattr_setkind_np __P ((pthread_mutexattr_t *__attr, - int __kind)); -#define PTHREAD_MUTEX_RECURSIVE PTHREAD_MUTEX_RECURSIVE_NP -#define pthread_mutexattr_settype(x,y) pthread_mutexattr_setkind_np(x,y) -#endif /* USE_RECURSIVE_LOCKS ... */ +extern int pthread_mutexattr_setkind_np __P((pthread_mutexattr_t * __attr, + int __kind)); + #define PTHREAD_MUTEX_RECURSIVE PTHREAD_MUTEX_RECURSIVE_NP + #define pthread_mutexattr_settype(x, y) pthread_mutexattr_setkind_np(x, y) + #endif /* USE_RECURSIVE_LOCKS ... */ static MLOCK_T malloc_global_mutex = PTHREAD_MUTEX_INITIALIZER; -static int pthread_init_lock (MLOCK_T *lk) { +static int pthread_init_lock(MLOCK_T *lk) { + pthread_mutexattr_t attr; if (pthread_mutexattr_init(&attr)) return 1; -#if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0 + #if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0 if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE)) return 1; -#endif + #endif if (pthread_mutex_init(lk, &attr)) return 1; if (pthread_mutexattr_destroy(&attr)) return 1; return 0; + } -#endif /* ... lock types ... */ + #endif /* ... lock types ... */ -/* Common code for all lock types */ -#define USE_LOCK_BIT (2U) + /* Common code for all lock types */ + #define USE_LOCK_BIT (2U) -#ifndef ACQUIRE_MALLOC_GLOBAL_LOCK -#define ACQUIRE_MALLOC_GLOBAL_LOCK() ACQUIRE_LOCK(&malloc_global_mutex); -#endif + #ifndef ACQUIRE_MALLOC_GLOBAL_LOCK + #define ACQUIRE_MALLOC_GLOBAL_LOCK() ACQUIRE_LOCK(&malloc_global_mutex); + #endif -#ifndef RELEASE_MALLOC_GLOBAL_LOCK -#define RELEASE_MALLOC_GLOBAL_LOCK() RELEASE_LOCK(&malloc_global_mutex); -#endif + #ifndef RELEASE_MALLOC_GLOBAL_LOCK + #define RELEASE_MALLOC_GLOBAL_LOCK() RELEASE_LOCK(&malloc_global_mutex); + #endif -#endif /* USE_LOCKS */ +#endif /* USE_LOCKS */ /* ----------------------- Chunk representations ------------------------ */ @@ -2232,56 +2314,56 @@ static int pthread_init_lock (MLOCK_T *lk) { */ struct malloc_chunk { - size_t prev_foot; /* Size of previous chunk (if free). */ - size_t head; /* Size and inuse bits. */ - struct malloc_chunk* fd; /* double links -- used only if free. */ - struct malloc_chunk* bk; + + size_t prev_foot; /* Size of previous chunk (if free). */ + size_t head; /* Size and inuse bits. */ + struct malloc_chunk *fd; /* double links -- used only if free. */ + struct malloc_chunk *bk; + }; typedef struct malloc_chunk mchunk; -typedef struct malloc_chunk* mchunkptr; -typedef struct malloc_chunk* sbinptr; /* The type of bins of chunks */ -typedef unsigned int bindex_t; /* Described below */ -typedef unsigned int binmap_t; /* Described below */ -typedef unsigned int flag_t; /* The type of various bit flag sets */ +typedef struct malloc_chunk *mchunkptr; +typedef struct malloc_chunk *sbinptr; /* The type of bins of chunks */ +typedef unsigned int bindex_t; /* Described below */ +typedef unsigned int binmap_t; /* Described below */ +typedef unsigned int flag_t; /* The type of various bit flag sets */ /* ------------------- Chunks sizes and alignments ----------------------- */ -#define MCHUNK_SIZE (sizeof(mchunk)) +#define MCHUNK_SIZE (sizeof(mchunk)) #if FOOTERS -#define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) -#else /* FOOTERS */ -#define CHUNK_OVERHEAD (SIZE_T_SIZE) -#endif /* FOOTERS */ + #define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) +#else /* FOOTERS */ + #define CHUNK_OVERHEAD (SIZE_T_SIZE) +#endif /* FOOTERS */ /* MMapped chunks need a second word of overhead ... */ #define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) /* ... and additional padding for fake next-chunk at foot */ -#define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES) +#define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES) /* The smallest size we can malloc is an aligned minimal chunk */ -#define MIN_CHUNK_SIZE\ - ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) +#define MIN_CHUNK_SIZE ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) /* conversion from malloc headers to user pointers, and back */ -#define chunk2mem(p) ((void*)((char*)(p) + TWO_SIZE_T_SIZES)) -#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES)) +#define chunk2mem(p) ((void *)((char *)(p) + TWO_SIZE_T_SIZES)) +#define mem2chunk(mem) ((mchunkptr)((char *)(mem)-TWO_SIZE_T_SIZES)) /* chunk associated with aligned address A */ -#define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A))) +#define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A))) /* Bounds on request (not chunk) sizes. */ -#define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2) -#define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE) +#define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2) +#define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE) /* pad request bytes into a usable size */ #define pad_request(req) \ - (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) + (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) /* pad request, checking for minimum (but not maximum) */ #define request2size(req) \ - (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req)) - + (((req) < MIN_REQUEST) ? MIN_CHUNK_SIZE : pad_request(req)) /* ------------------ Operations on head and foot fields ----------------- */ @@ -2293,61 +2375,60 @@ typedef unsigned int flag_t; /* The type of various bit flag sets */ FLAG4_BIT is not used by this malloc, but might be useful in extensions. */ -#define PINUSE_BIT (SIZE_T_ONE) -#define CINUSE_BIT (SIZE_T_TWO) -#define FLAG4_BIT (SIZE_T_FOUR) -#define INUSE_BITS (PINUSE_BIT|CINUSE_BIT) -#define FLAG_BITS (PINUSE_BIT|CINUSE_BIT|FLAG4_BIT) +#define PINUSE_BIT (SIZE_T_ONE) +#define CINUSE_BIT (SIZE_T_TWO) +#define FLAG4_BIT (SIZE_T_FOUR) +#define INUSE_BITS (PINUSE_BIT | CINUSE_BIT) +#define FLAG_BITS (PINUSE_BIT | CINUSE_BIT | FLAG4_BIT) /* Head value for fenceposts */ -#define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE) +#define FENCEPOST_HEAD (INUSE_BITS | SIZE_T_SIZE) /* extraction of fields from head words */ -#define cinuse(p) ((p)->head & CINUSE_BIT) -#define pinuse(p) ((p)->head & PINUSE_BIT) -#define flag4inuse(p) ((p)->head & FLAG4_BIT) -#define is_inuse(p) (((p)->head & INUSE_BITS) != PINUSE_BIT) -#define is_mmapped(p) (((p)->head & INUSE_BITS) == 0) +#define cinuse(p) ((p)->head & CINUSE_BIT) +#define pinuse(p) ((p)->head & PINUSE_BIT) +#define flag4inuse(p) ((p)->head & FLAG4_BIT) +#define is_inuse(p) (((p)->head & INUSE_BITS) != PINUSE_BIT) +#define is_mmapped(p) (((p)->head & INUSE_BITS) == 0) -#define chunksize(p) ((p)->head & ~(FLAG_BITS)) +#define chunksize(p) ((p)->head & ~(FLAG_BITS)) -#define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT) -#define set_flag4(p) ((p)->head |= FLAG4_BIT) -#define clear_flag4(p) ((p)->head &= ~FLAG4_BIT) +#define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT) +#define set_flag4(p) ((p)->head |= FLAG4_BIT) +#define clear_flag4(p) ((p)->head &= ~FLAG4_BIT) /* Treat space at ptr +/- offset as a chunk */ -#define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s))) -#define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s))) +#define chunk_plus_offset(p, s) ((mchunkptr)(((char *)(p)) + (s))) +#define chunk_minus_offset(p, s) ((mchunkptr)(((char *)(p)) - (s))) /* Ptr to next or previous physical malloc_chunk. */ -#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~FLAG_BITS))) -#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) )) +#define next_chunk(p) ((mchunkptr)(((char *)(p)) + ((p)->head & ~FLAG_BITS))) +#define prev_chunk(p) ((mchunkptr)(((char *)(p)) - ((p)->prev_foot))) /* extract next chunk's pinuse bit */ -#define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT) +#define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT) /* Get/set size at footer */ -#define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot) -#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s)) +#define get_foot(p, s) (((mchunkptr)((char *)(p) + (s)))->prev_foot) +#define set_foot(p, s) (((mchunkptr)((char *)(p) + (s)))->prev_foot = (s)) /* Set size, pinuse bit, and foot */ -#define set_size_and_pinuse_of_free_chunk(p, s)\ - ((p)->head = (s|PINUSE_BIT), set_foot(p, s)) +#define set_size_and_pinuse_of_free_chunk(p, s) \ + ((p)->head = (s | PINUSE_BIT), set_foot(p, s)) /* Set size, pinuse bit, foot, and clear next pinuse */ -#define set_free_with_pinuse(p, s, n)\ +#define set_free_with_pinuse(p, s, n) \ (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s)) /* Get the internal overhead associated with chunk p */ -#define overhead_for(p)\ - (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD) +#define overhead_for(p) (is_mmapped(p) ? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD) /* Return true if malloced space is not necessarily cleared */ #if MMAP_CLEARS -#define calloc_must_clear(p) (!is_mmapped(p)) -#else /* MMAP_CLEARS */ -#define calloc_must_clear(p) (1) -#endif /* MMAP_CLEARS */ + #define calloc_must_clear(p) (!is_mmapped(p)) +#else /* MMAP_CLEARS */ + #define calloc_must_clear(p) (1) +#endif /* MMAP_CLEARS */ /* ---------------------- Overlaid data structures ----------------------- */ @@ -2441,23 +2522,25 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ struct malloc_tree_chunk { + /* The first four fields must be compatible with malloc_chunk */ size_t prev_foot; size_t head; - struct malloc_tree_chunk* fd; - struct malloc_tree_chunk* bk; + struct malloc_tree_chunk *fd; + struct malloc_tree_chunk *bk; - struct malloc_tree_chunk* child[2]; - struct malloc_tree_chunk* parent; + struct malloc_tree_chunk *child[2]; + struct malloc_tree_chunk *parent; bindex_t index; + }; typedef struct malloc_tree_chunk tchunk; -typedef struct malloc_tree_chunk* tchunkptr; -typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */ +typedef struct malloc_tree_chunk *tchunkptr; +typedef struct malloc_tree_chunk *tbinptr; /* The type of bins of trees */ /* A little helper macro for trees */ -#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1]) +#define leftmost_child(t) ((t)->child[0] != 0 ? (t)->child[0] : (t)->child[1]) /* ----------------------------- Segments -------------------------------- */ @@ -2517,17 +2600,19 @@ typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */ */ struct malloc_segment { - char* base; /* base address */ - size_t size; /* allocated size */ - struct malloc_segment* next; /* ptr to next segment */ - flag_t sflags; /* mmap and extern flag */ + + char * base; /* base address */ + size_t size; /* allocated size */ + struct malloc_segment *next; /* ptr to next segment */ + flag_t sflags; /* mmap and extern flag */ + }; -#define is_mmapped_segment(S) ((S)->sflags & USE_MMAP_BIT) -#define is_extern_segment(S) ((S)->sflags & EXTERN_BIT) +#define is_mmapped_segment(S) ((S)->sflags & USE_MMAP_BIT) +#define is_extern_segment(S) ((S)->sflags & EXTERN_BIT) typedef struct malloc_segment msegment; -typedef struct malloc_segment* msegmentptr; +typedef struct malloc_segment *msegmentptr; /* ---------------------------- malloc_state ----------------------------- */ @@ -2617,41 +2702,43 @@ typedef struct malloc_segment* msegmentptr; */ /* Bin types, widths and sizes */ -#define NSMALLBINS (32U) -#define NTREEBINS (32U) -#define SMALLBIN_SHIFT (3U) -#define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT) -#define TREEBIN_SHIFT (8U) -#define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT) -#define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE) +#define NSMALLBINS (32U) +#define NTREEBINS (32U) +#define SMALLBIN_SHIFT (3U) +#define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT) +#define TREEBIN_SHIFT (8U) +#define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT) +#define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE) #define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD) struct malloc_state { - binmap_t smallmap; - binmap_t treemap; - size_t dvsize; - size_t topsize; - char* least_addr; - mchunkptr dv; - mchunkptr top; - size_t trim_check; - size_t release_checks; - size_t magic; - mchunkptr smallbins[(NSMALLBINS+1)*2]; - tbinptr treebins[NTREEBINS]; - size_t footprint; - size_t max_footprint; - size_t footprint_limit; /* zero means no limit */ - flag_t mflags; + + binmap_t smallmap; + binmap_t treemap; + size_t dvsize; + size_t topsize; + char * least_addr; + mchunkptr dv; + mchunkptr top; + size_t trim_check; + size_t release_checks; + size_t magic; + mchunkptr smallbins[(NSMALLBINS + 1) * 2]; + tbinptr treebins[NTREEBINS]; + size_t footprint; + size_t max_footprint; + size_t footprint_limit; /* zero means no limit */ + flag_t mflags; #if USE_LOCKS - MLOCK_T mutex; /* locate lock among fields that rarely change */ -#endif /* USE_LOCKS */ - msegment seg; - void* extp; /* Unused but available for extensions */ - size_t exts; + MLOCK_T mutex; /* locate lock among fields that rarely change */ +#endif /* USE_LOCKS */ + msegment seg; + void * extp; /* Unused but available for extensions */ + size_t exts; + }; -typedef struct malloc_state* mstate; +typedef struct malloc_state *mstate; /* ------------- Global malloc_state and malloc_params ------------------- */ @@ -2663,12 +2750,14 @@ typedef struct malloc_state* mstate; */ struct malloc_params { + size_t magic; size_t page_size; size_t granularity; size_t mmap_threshold; size_t trim_threshold; flag_t default_mflags; + }; static struct malloc_params mparams; @@ -2680,106 +2769,108 @@ static struct malloc_params mparams; /* The global malloc_state used for all non-"mspace" calls */ static struct malloc_state _gm_; -#define gm (&_gm_) -#define is_global(M) ((M) == &_gm_) + #define gm (&_gm_) + #define is_global(M) ((M) == &_gm_) -#endif /* !ONLY_MSPACES */ +#endif /* !ONLY_MSPACES */ -#define is_initialized(M) ((M)->top != 0) +#define is_initialized(M) ((M)->top != 0) /* -------------------------- system alloc setup ------------------------- */ /* Operations on mflags */ -#define use_lock(M) ((M)->mflags & USE_LOCK_BIT) -#define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT) +#define use_lock(M) ((M)->mflags & USE_LOCK_BIT) +#define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT) #if USE_LOCKS -#define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT) + #define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT) #else -#define disable_lock(M) + #define disable_lock(M) #endif -#define use_mmap(M) ((M)->mflags & USE_MMAP_BIT) -#define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT) +#define use_mmap(M) ((M)->mflags & USE_MMAP_BIT) +#define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT) #if HAVE_MMAP -#define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT) + #define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT) #else -#define disable_mmap(M) + #define disable_mmap(M) #endif -#define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT) -#define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT) +#define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT) +#define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT) -#define set_lock(M,L)\ - ((M)->mflags = (L)?\ - ((M)->mflags | USE_LOCK_BIT) :\ - ((M)->mflags & ~USE_LOCK_BIT)) +#define set_lock(M, L) \ + ((M)->mflags = \ + (L) ? ((M)->mflags | USE_LOCK_BIT) : ((M)->mflags & ~USE_LOCK_BIT)) /* page-align a size */ -#define page_align(S)\ - (((S) + (mparams.page_size - SIZE_T_ONE)) & ~(mparams.page_size - SIZE_T_ONE)) +#define page_align(S) \ + (((S) + (mparams.page_size - SIZE_T_ONE)) & ~(mparams.page_size - SIZE_T_ONE)) /* granularity-align a size */ -#define granularity_align(S)\ - (((S) + (mparams.granularity - SIZE_T_ONE))\ - & ~(mparams.granularity - SIZE_T_ONE)) - +#define granularity_align(S) \ + (((S) + (mparams.granularity - SIZE_T_ONE)) & \ + ~(mparams.granularity - SIZE_T_ONE)) /* For mmap, use granularity alignment on windows, else page-align */ #ifdef WIN32 -#define mmap_align(S) granularity_align(S) + #define mmap_align(S) granularity_align(S) #else -#define mmap_align(S) page_align(S) + #define mmap_align(S) page_align(S) #endif /* For sys_alloc, enough padding to ensure can malloc request on success */ #define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT) -#define is_page_aligned(S)\ - (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0) -#define is_granularity_aligned(S)\ - (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0) +#define is_page_aligned(S) \ + (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0) +#define is_granularity_aligned(S) \ + (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0) /* True if segment S holds address A */ -#define segment_holds(S, A)\ - ((char*)(A) >= S->base && (char*)(A) < S->base + S->size) +#define segment_holds(S, A) \ + ((char *)(A) >= S->base && (char *)(A) < S->base + S->size) /* Return segment holding given address */ -static msegmentptr segment_holding(mstate m, char* addr) { +static msegmentptr segment_holding(mstate m, char *addr) { + msegmentptr sp = &m->seg; for (;;) { - if (addr >= sp->base && addr < sp->base + sp->size) - return sp; - if ((sp = sp->next) == 0) - return 0; + + if (addr >= sp->base && addr < sp->base + sp->size) return sp; + if ((sp = sp->next) == 0) return 0; + } + } /* Return true if segment contains a segment link */ static int has_segment_link(mstate m, msegmentptr ss) { + msegmentptr sp = &m->seg; for (;;) { - if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size) - return 1; - if ((sp = sp->next) == 0) - return 0; + + if ((char *)sp >= ss->base && (char *)sp < ss->base + ss->size) return 1; + if ((sp = sp->next) == 0) return 0; + } + } #ifndef MORECORE_CANNOT_TRIM -#define should_trim(M,s) ((s) > (M)->trim_check) -#else /* MORECORE_CANNOT_TRIM */ -#define should_trim(M,s) (0) -#endif /* MORECORE_CANNOT_TRIM */ + #define should_trim(M, s) ((s) > (M)->trim_check) +#else /* MORECORE_CANNOT_TRIM */ + #define should_trim(M, s) (0) +#endif /* MORECORE_CANNOT_TRIM */ /* TOP_FOOT_SIZE is padding at the end of a segment, including space that may be needed to place segment records and fenceposts when new noncontiguous segments are added. */ -#define TOP_FOOT_SIZE\ - (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE) - +#define TOP_FOOT_SIZE \ + (align_offset(chunk2mem(0)) + pad_request(sizeof(struct malloc_segment)) + \ + MIN_CHUNK_SIZE) /* ------------------------------- Hooks -------------------------------- */ @@ -2790,19 +2881,24 @@ static int has_segment_link(mstate m, msegmentptr ss) { */ #if USE_LOCKS -#define PREACTION(M) ((use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0) -#define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); } -#else /* USE_LOCKS */ + #define PREACTION(M) ((use_lock(M)) ? ACQUIRE_LOCK(&(M)->mutex) : 0) + #define POSTACTION(M) \ + { \ + \ + if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); \ + \ + } +#else /* USE_LOCKS */ -#ifndef PREACTION -#define PREACTION(M) (0) -#endif /* PREACTION */ + #ifndef PREACTION + #define PREACTION(M) (0) + #endif /* PREACTION */ -#ifndef POSTACTION -#define POSTACTION(M) -#endif /* POSTACTION */ + #ifndef POSTACTION + #define POSTACTION(M) + #endif /* POSTACTION */ -#endif /* USE_LOCKS */ +#endif /* USE_LOCKS */ /* CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses. @@ -2820,164 +2916,180 @@ int malloc_corruption_error_count; /* default corruption action */ static void reset_on_error(mstate m); -#define CORRUPTION_ERROR_ACTION(m) reset_on_error(m) -#define USAGE_ERROR_ACTION(m, p) + #define CORRUPTION_ERROR_ACTION(m) reset_on_error(m) + #define USAGE_ERROR_ACTION(m, p) -#else /* PROCEED_ON_ERROR */ +#else /* PROCEED_ON_ERROR */ -#ifndef CORRUPTION_ERROR_ACTION -#define CORRUPTION_ERROR_ACTION(m) ABORT -#endif /* CORRUPTION_ERROR_ACTION */ + #ifndef CORRUPTION_ERROR_ACTION + #define CORRUPTION_ERROR_ACTION(m) ABORT + #endif /* CORRUPTION_ERROR_ACTION */ -#ifndef USAGE_ERROR_ACTION -#define USAGE_ERROR_ACTION(m,p) ABORT -#endif /* USAGE_ERROR_ACTION */ - -#endif /* PROCEED_ON_ERROR */ + #ifndef USAGE_ERROR_ACTION + #define USAGE_ERROR_ACTION(m, p) ABORT + #endif /* USAGE_ERROR_ACTION */ +#endif /* PROCEED_ON_ERROR */ /* -------------------------- Debugging setup ---------------------------- */ -#if ! DEBUG +#if !DEBUG -#define check_free_chunk(M,P) -#define check_inuse_chunk(M,P) -#define check_malloced_chunk(M,P,N) -#define check_mmapped_chunk(M,P) -#define check_malloc_state(M) -#define check_top_chunk(M,P) + #define check_free_chunk(M, P) + #define check_inuse_chunk(M, P) + #define check_malloced_chunk(M, P, N) + #define check_mmapped_chunk(M, P) + #define check_malloc_state(M) + #define check_top_chunk(M, P) -#else /* DEBUG */ -#define check_free_chunk(M,P) do_check_free_chunk(M,P) -#define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P) -#define check_top_chunk(M,P) do_check_top_chunk(M,P) -#define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N) -#define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P) -#define check_malloc_state(M) do_check_malloc_state(M) +#else /* DEBUG */ + #define check_free_chunk(M, P) do_check_free_chunk(M, P) + #define check_inuse_chunk(M, P) do_check_inuse_chunk(M, P) + #define check_top_chunk(M, P) do_check_top_chunk(M, P) + #define check_malloced_chunk(M, P, N) do_check_malloced_chunk(M, P, N) + #define check_mmapped_chunk(M, P) do_check_mmapped_chunk(M, P) + #define check_malloc_state(M) do_check_malloc_state(M) static void do_check_any_chunk(mstate m, mchunkptr p); static void do_check_top_chunk(mstate m, mchunkptr p); static void do_check_mmapped_chunk(mstate m, mchunkptr p); static void do_check_inuse_chunk(mstate m, mchunkptr p); static void do_check_free_chunk(mstate m, mchunkptr p); -static void do_check_malloced_chunk(mstate m, void* mem, size_t s); +static void do_check_malloced_chunk(mstate m, void *mem, size_t s); static void do_check_tree(mstate m, tchunkptr t); static void do_check_treebin(mstate m, bindex_t i); static void do_check_smallbin(mstate m, bindex_t i); static void do_check_malloc_state(mstate m); static int bin_find(mstate m, mchunkptr x); static size_t traverse_and_check(mstate m); -#endif /* DEBUG */ +#endif /* DEBUG */ /* ---------------------------- Indexing Bins ---------------------------- */ -#define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS) -#define small_index(s) (bindex_t)((s) >> SMALLBIN_SHIFT) -#define small_index2size(i) ((i) << SMALLBIN_SHIFT) -#define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE)) +#define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS) +#define small_index(s) (bindex_t)((s) >> SMALLBIN_SHIFT) +#define small_index2size(i) ((i) << SMALLBIN_SHIFT) +#define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE)) /* addressing by index. See above about smallbin repositioning */ -#define smallbin_at(M, i) ((sbinptr)((char*)&((M)->smallbins[(i)<<1]))) -#define treebin_at(M,i) (&((M)->treebins[i])) +#define smallbin_at(M, i) ((sbinptr)((char *)&((M)->smallbins[(i) << 1]))) +#define treebin_at(M, i) (&((M)->treebins[i])) /* assign tree index for size S to variable I. Use x86 asm if possible */ #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) -#define compute_tree_index(S, I)\ -{\ - unsigned int X = S >> TREEBIN_SHIFT;\ - if (X == 0)\ - I = 0;\ - else if (X > 0xFFFF)\ - I = NTREEBINS-1;\ - else {\ - unsigned int K = (unsigned) sizeof(X)*__CHAR_BIT__ - 1 - (unsigned) __builtin_clz(X); \ - I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ - }\ -} + #define compute_tree_index(S, I) \ + { \ + \ + unsigned int X = S >> TREEBIN_SHIFT; \ + if (X == 0) \ + I = 0; \ + else if (X > 0xFFFF) \ + I = NTREEBINS - 1; \ + else { \ + \ + unsigned int K = (unsigned)sizeof(X) * __CHAR_BIT__ - 1 - \ + (unsigned)__builtin_clz(X); \ + I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT - 1)) & 1))); \ + \ + } \ + \ + } -#elif defined (__INTEL_COMPILER) -#define compute_tree_index(S, I)\ -{\ - size_t X = S >> TREEBIN_SHIFT;\ - if (X == 0)\ - I = 0;\ - else if (X > 0xFFFF)\ - I = NTREEBINS-1;\ - else {\ - unsigned int K = _bit_scan_reverse (X); \ - I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ - }\ -} +#elif defined(__INTEL_COMPILER) + #define compute_tree_index(S, I) \ + { \ + \ + size_t X = S >> TREEBIN_SHIFT; \ + if (X == 0) \ + I = 0; \ + else if (X > 0xFFFF) \ + I = NTREEBINS - 1; \ + else { \ + \ + unsigned int K = _bit_scan_reverse(X); \ + I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT - 1)) & 1))); \ + \ + } \ + \ + } -#elif defined(_MSC_VER) && _MSC_VER>=1300 -#define compute_tree_index(S, I)\ -{\ - size_t X = S >> TREEBIN_SHIFT;\ - if (X == 0)\ - I = 0;\ - else if (X > 0xFFFF)\ - I = NTREEBINS-1;\ - else {\ - unsigned int K;\ - _BitScanReverse((DWORD *) &K, (DWORD) X);\ - I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ - }\ -} +#elif defined(_MSC_VER) && _MSC_VER >= 1300 + #define compute_tree_index(S, I) \ + { \ + \ + size_t X = S >> TREEBIN_SHIFT; \ + if (X == 0) \ + I = 0; \ + else if (X > 0xFFFF) \ + I = NTREEBINS - 1; \ + else { \ + \ + unsigned int K; \ + _BitScanReverse((DWORD *)&K, (DWORD)X); \ + I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT - 1)) & 1))); \ + \ + } \ + \ + } -#else /* GNUC */ -#define compute_tree_index(S, I)\ -{\ - size_t X = S >> TREEBIN_SHIFT;\ - if (X == 0)\ - I = 0;\ - else if (X > 0xFFFF)\ - I = NTREEBINS-1;\ - else {\ - unsigned int Y = (unsigned int)X;\ - unsigned int N = ((Y - 0x100) >> 16) & 8;\ - unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\ - N += K;\ - N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\ - K = 14 - N + ((Y <<= K) >> 15);\ - I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\ - }\ -} -#endif /* GNUC */ +#else /* GNUC */ + #define compute_tree_index(S, I) \ + { \ + \ + size_t X = S >> TREEBIN_SHIFT; \ + if (X == 0) \ + I = 0; \ + else if (X > 0xFFFF) \ + I = NTREEBINS - 1; \ + else { \ + \ + unsigned int Y = (unsigned int)X; \ + unsigned int N = ((Y - 0x100) >> 16) & 8; \ + unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4; \ + N += K; \ + N += K = (((Y <<= K) - 0x4000) >> 16) & 2; \ + K = 14 - N + ((Y <<= K) >> 15); \ + I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT - 1)) & 1)); \ + \ + } \ + \ + } +#endif /* GNUC */ /* Bit representing maximum resolved size in a treebin at i */ #define bit_for_tree_index(i) \ - (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2) + (i == NTREEBINS - 1) ? (SIZE_T_BITSIZE - 1) : (((i) >> 1) + TREEBIN_SHIFT - 2) /* Shift placing maximum resolved bit in a treebin at i as sign bit */ #define leftshift_for_tree_index(i) \ - ((i == NTREEBINS-1)? 0 : \ - ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2))) + ((i == NTREEBINS - 1) \ + ? 0 \ + : ((SIZE_T_BITSIZE - SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2))) /* The size of the smallest chunk held in bin with index i */ -#define minsize_for_tree_index(i) \ - ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \ - (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1))) - +#define minsize_for_tree_index(i) \ + ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \ + (((size_t)((i)&SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1))) /* ------------------------ Operations on bin maps ----------------------- */ /* bit corresponding to given index */ -#define idx2bit(i) ((binmap_t)(1) << (i)) +#define idx2bit(i) ((binmap_t)(1) << (i)) /* Mark/Clear bits with given index */ -#define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i)) -#define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i)) -#define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i)) +#define mark_smallmap(M, i) ((M)->smallmap |= idx2bit(i)) +#define clear_smallmap(M, i) ((M)->smallmap &= ~idx2bit(i)) +#define smallmap_is_marked(M, i) ((M)->smallmap & idx2bit(i)) -#define mark_treemap(M,i) ((M)->treemap |= idx2bit(i)) -#define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i)) -#define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i)) +#define mark_treemap(M, i) ((M)->treemap |= idx2bit(i)) +#define clear_treemap(M, i) ((M)->treemap &= ~idx2bit(i)) +#define treemap_is_marked(M, i) ((M)->treemap & idx2bit(i)) /* isolate the least set bit of a bitmap */ -#define least_bit(x) ((x) & -(x)) +#define least_bit(x) ((x) & -(x)) /* mask with all bits to left of least bit of x on */ -#define left_bits(x) ((x<<1) | -(x<<1)) +#define left_bits(x) ((x << 1) | -(x << 1)) /* mask with all bits to left of or equal to least bit of x on */ #define same_or_left_bits(x) ((x) | -(x)) @@ -2985,46 +3097,58 @@ static size_t traverse_and_check(mstate m); /* index corresponding to given bit. Use x86 asm if possible */ #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) -#define compute_bit2idx(X, I)\ -{\ - unsigned int J;\ - J = __builtin_ctz(X); \ - I = (bindex_t)J;\ -} + #define compute_bit2idx(X, I) \ + { \ + \ + unsigned int J; \ + J = __builtin_ctz(X); \ + I = (bindex_t)J; \ + \ + } -#elif defined (__INTEL_COMPILER) -#define compute_bit2idx(X, I)\ -{\ - unsigned int J;\ - J = _bit_scan_forward (X); \ - I = (bindex_t)J;\ -} +#elif defined(__INTEL_COMPILER) + #define compute_bit2idx(X, I) \ + { \ + \ + unsigned int J; \ + J = _bit_scan_forward(X); \ + I = (bindex_t)J; \ + \ + } -#elif defined(_MSC_VER) && _MSC_VER>=1300 -#define compute_bit2idx(X, I)\ -{\ - unsigned int J;\ - _BitScanForward((DWORD *) &J, X);\ - I = (bindex_t)J;\ -} +#elif defined(_MSC_VER) && _MSC_VER >= 1300 + #define compute_bit2idx(X, I) \ + { \ + \ + unsigned int J; \ + _BitScanForward((DWORD *)&J, X); \ + I = (bindex_t)J; \ + \ + } #elif USE_BUILTIN_FFS -#define compute_bit2idx(X, I) I = ffs(X)-1 + #define compute_bit2idx(X, I) I = ffs(X) - 1 #else -#define compute_bit2idx(X, I)\ -{\ - unsigned int Y = X - 1;\ - unsigned int K = Y >> (16-4) & 16;\ - unsigned int N = K; Y >>= K;\ - N += K = Y >> (8-3) & 8; Y >>= K;\ - N += K = Y >> (4-2) & 4; Y >>= K;\ - N += K = Y >> (2-1) & 2; Y >>= K;\ - N += K = Y >> (1-0) & 1; Y >>= K;\ - I = (bindex_t)(N + Y);\ -} -#endif /* GNUC */ - + #define compute_bit2idx(X, I) \ + { \ + \ + unsigned int Y = X - 1; \ + unsigned int K = Y >> (16 - 4) & 16; \ + unsigned int N = K; \ + Y >>= K; \ + N += K = Y >> (8 - 3) & 8; \ + Y >>= K; \ + N += K = Y >> (4 - 2) & 4; \ + Y >>= K; \ + N += K = Y >> (2 - 1) & 2; \ + Y >>= K; \ + N += K = Y >> (1 - 0) & 1; \ + Y >>= K; \ + I = (bindex_t)(N + Y); \ + \ + } +#endif /* GNUC */ /* ----------------------- Runtime Check Support ------------------------- */ @@ -3055,121 +3179,141 @@ static size_t traverse_and_check(mstate m); */ #if !INSECURE -/* Check if address a is at least as high as any from MORECORE or MMAP */ -#define ok_address(M, a) ((char*)(a) >= (M)->least_addr) -/* Check if address of next chunk n is higher than base chunk p */ -#define ok_next(p, n) ((char*)(p) < (char*)(n)) -/* Check if p has inuse status */ -#define ok_inuse(p) is_inuse(p) -/* Check if p has its pinuse bit on */ -#define ok_pinuse(p) pinuse(p) - -#else /* !INSECURE */ -#define ok_address(M, a) (1) -#define ok_next(b, n) (1) -#define ok_inuse(p) (1) -#define ok_pinuse(p) (1) -#endif /* !INSECURE */ + /* Check if address a is at least as high as any from MORECORE or MMAP */ + #define ok_address(M, a) ((char *)(a) >= (M)->least_addr) + /* Check if address of next chunk n is higher than base chunk p */ + #define ok_next(p, n) ((char *)(p) < (char *)(n)) + /* Check if p has inuse status */ + #define ok_inuse(p) is_inuse(p) + /* Check if p has its pinuse bit on */ + #define ok_pinuse(p) pinuse(p) + +#else /* !INSECURE */ + #define ok_address(M, a) (1) + #define ok_next(b, n) (1) + #define ok_inuse(p) (1) + #define ok_pinuse(p) (1) +#endif /* !INSECURE */ #if (FOOTERS && !INSECURE) -/* Check if (alleged) mstate m has expected magic field */ -#define ok_magic(M) ((M)->magic == mparams.magic) -#else /* (FOOTERS && !INSECURE) */ -#define ok_magic(M) (1) -#endif /* (FOOTERS && !INSECURE) */ + /* Check if (alleged) mstate m has expected magic field */ + #define ok_magic(M) ((M)->magic == mparams.magic) +#else /* (FOOTERS && !INSECURE) */ + #define ok_magic(M) (1) +#endif /* (FOOTERS && !INSECURE) */ /* In gcc, use __builtin_expect to minimize impact of checks */ #if !INSECURE -#if defined(__GNUC__) && __GNUC__ >= 3 -#define RTCHECK(e) __builtin_expect(e, 1) -#else /* GNUC */ -#define RTCHECK(e) (e) -#endif /* GNUC */ -#else /* !INSECURE */ -#define RTCHECK(e) (1) -#endif /* !INSECURE */ + #if defined(__GNUC__) && __GNUC__ >= 3 + #define RTCHECK(e) __builtin_expect(e, 1) + #else /* GNUC */ + #define RTCHECK(e) (e) + #endif /* GNUC */ +#else /* !INSECURE */ + #define RTCHECK(e) (1) +#endif /* !INSECURE */ /* macros to set up inuse chunks with or without footers */ #if !FOOTERS -#define mark_inuse_foot(M,p,s) + #define mark_inuse_foot(M, p, s) -/* Macros for setting head/foot of non-mmapped chunks */ + /* Macros for setting head/foot of non-mmapped chunks */ -/* Set cinuse bit and pinuse bit of next chunk */ -#define set_inuse(M,p,s)\ - ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\ - ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT) + /* Set cinuse bit and pinuse bit of next chunk */ + #define set_inuse(M, p, s) \ + ((p)->head = (((p)->head & PINUSE_BIT) | s | CINUSE_BIT), \ + ((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT) -/* Set cinuse and pinuse of this chunk and pinuse of next chunk */ -#define set_inuse_and_pinuse(M,p,s)\ - ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ - ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT) + /* Set cinuse and pinuse of this chunk and pinuse of next chunk */ + #define set_inuse_and_pinuse(M, p, s) \ + ((p)->head = (s | PINUSE_BIT | CINUSE_BIT), \ + ((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT) -/* Set size, cinuse and pinuse bit of this chunk */ -#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\ - ((p)->head = (s|PINUSE_BIT|CINUSE_BIT)) + /* Set size, cinuse and pinuse bit of this chunk */ + #define set_size_and_pinuse_of_inuse_chunk(M, p, s) \ + ((p)->head = (s | PINUSE_BIT | CINUSE_BIT)) -#else /* FOOTERS */ +#else /* FOOTERS */ -/* Set foot of inuse chunk to be xor of mstate and seed */ -#define mark_inuse_foot(M,p,s)\ - (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic)) + /* Set foot of inuse chunk to be xor of mstate and seed */ + #define mark_inuse_foot(M, p, s) \ + (((mchunkptr)((char *)(p) + (s)))->prev_foot = \ + ((size_t)(M) ^ mparams.magic)) -#define get_mstate_for(p)\ - ((mstate)(((mchunkptr)((char*)(p) +\ - (chunksize(p))))->prev_foot ^ mparams.magic)) + #define get_mstate_for(p) \ + ((mstate)(((mchunkptr)((char *)(p) + (chunksize(p))))->prev_foot ^ \ + mparams.magic)) -#define set_inuse(M,p,s)\ - ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\ - (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \ - mark_inuse_foot(M,p,s)) + #define set_inuse(M, p, s) \ + ((p)->head = (((p)->head & PINUSE_BIT) | s | CINUSE_BIT), \ + (((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT), \ + mark_inuse_foot(M, p, s)) -#define set_inuse_and_pinuse(M,p,s)\ - ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ - (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\ - mark_inuse_foot(M,p,s)) + #define set_inuse_and_pinuse(M, p, s) \ + ((p)->head = (s | PINUSE_BIT | CINUSE_BIT), \ + (((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT), \ + mark_inuse_foot(M, p, s)) -#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\ - ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ - mark_inuse_foot(M, p, s)) + #define set_size_and_pinuse_of_inuse_chunk(M, p, s) \ + ((p)->head = (s | PINUSE_BIT | CINUSE_BIT), mark_inuse_foot(M, p, s)) -#endif /* !FOOTERS */ +#endif /* !FOOTERS */ /* ---------------------------- setting mparams -------------------------- */ #if LOCK_AT_FORK -static void pre_fork(void) { ACQUIRE_LOCK(&(gm)->mutex); } -static void post_fork_parent(void) { RELEASE_LOCK(&(gm)->mutex); } -static void post_fork_child(void) { INITIAL_LOCK(&(gm)->mutex); } -#endif /* LOCK_AT_FORK */ +static void pre_fork(void) { + + ACQUIRE_LOCK(&(gm)->mutex); + +} + +static void post_fork_parent(void) { + + RELEASE_LOCK(&(gm)->mutex); + +} + +static void post_fork_child(void) { + + INITIAL_LOCK(&(gm)->mutex); + +} + +#endif /* LOCK_AT_FORK */ /* Initialize mparams */ static int init_mparams(void) { + #ifdef NEED_GLOBAL_LOCK_INIT - if (malloc_global_mutex_status <= 0) - init_malloc_global_mutex(); + if (malloc_global_mutex_status <= 0) init_malloc_global_mutex(); #endif ACQUIRE_MALLOC_GLOBAL_LOCK(); if (mparams.magic == 0) { + size_t magic; size_t psize; size_t gsize; #ifndef WIN32 psize = malloc_getpagesize; - gsize = ((DEFAULT_GRANULARITY != 0)? DEFAULT_GRANULARITY : psize); -#else /* WIN32 */ + gsize = ((DEFAULT_GRANULARITY != 0) ? DEFAULT_GRANULARITY : psize); +#else /* WIN32 */ { + SYSTEM_INFO system_info; GetSystemInfo(&system_info); psize = system_info.dwPageSize; - gsize = ((DEFAULT_GRANULARITY != 0)? - DEFAULT_GRANULARITY : system_info.dwAllocationGranularity); + gsize = + ((DEFAULT_GRANULARITY != 0) ? DEFAULT_GRANULARITY + : system_info.dwAllocationGranularity); + } -#endif /* WIN32 */ + +#endif /* WIN32 */ /* Sanity-check configuration: size_t must be unsigned and as wide as pointer type. @@ -3177,24 +3321,23 @@ static int init_mparams(void) { alignment must be at least 8. Alignment, min chunk size, and page size must all be powers of 2. */ - if ((sizeof(size_t) != sizeof(char*)) || - (MAX_SIZE_T < MIN_CHUNK_SIZE) || - (sizeof(int) < 4) || - (MALLOC_ALIGNMENT < (size_t)8U) || - ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-SIZE_T_ONE)) != 0) || - ((MCHUNK_SIZE & (MCHUNK_SIZE-SIZE_T_ONE)) != 0) || - ((gsize & (gsize-SIZE_T_ONE)) != 0) || - ((psize & (psize-SIZE_T_ONE)) != 0)) + if ((sizeof(size_t) != sizeof(char *)) || (MAX_SIZE_T < MIN_CHUNK_SIZE) || + (sizeof(int) < 4) || (MALLOC_ALIGNMENT < (size_t)8U) || + ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT - SIZE_T_ONE)) != 0) || + ((MCHUNK_SIZE & (MCHUNK_SIZE - SIZE_T_ONE)) != 0) || + ((gsize & (gsize - SIZE_T_ONE)) != 0) || + ((psize & (psize - SIZE_T_ONE)) != 0)) ABORT; mparams.granularity = gsize; mparams.page_size = psize; mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD; mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD; #if MORECORE_CONTIGUOUS - mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT; -#else /* MORECORE_CONTIGUOUS */ - mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT; -#endif /* MORECORE_CONTIGUOUS */ + mparams.default_mflags = USE_LOCK_BIT | USE_MMAP_BIT; +#else /* MORECORE_CONTIGUOUS */ + mparams.default_mflags = + USE_LOCK_BIT | USE_MMAP_BIT | USE_NONCONTIGUOUS_BIT; +#endif /* MORECORE_CONTIGUOUS */ #if !ONLY_MSPACES /* Set up lock for main malloc area */ @@ -3206,57 +3349,69 @@ static int init_mparams(void) { #endif { + #if USE_DEV_RANDOM - int fd; + int fd; unsigned char buf[sizeof(size_t)]; /* Try to use /dev/urandom, else fall back on using time */ if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 && read(fd, buf, sizeof(buf)) == sizeof(buf)) { - magic = *((size_t *) buf); + + magic = *((size_t *)buf); close(fd); - } - else -#endif /* USE_DEV_RANDOM */ + + } else + +#endif /* USE_DEV_RANDOM */ #ifdef WIN32 - magic = (size_t)(GetTickCount() ^ (size_t)0x55555555U); + magic = (size_t)(GetTickCount() ^ (size_t)0x55555555U); #elif defined(LACKS_TIME_H) magic = (size_t)&magic ^ (size_t)0x55555555U; #else magic = (size_t)(time(0) ^ (size_t)0x55555555U); #endif - magic |= (size_t)8U; /* ensure nonzero */ - magic &= ~(size_t)7U; /* improve chances of fault for bad values */ + magic |= (size_t)8U; /* ensure nonzero */ + magic &= ~(size_t)7U; /* improve chances of fault for bad values */ /* Until memory modes commonly available, use volatile-write */ (*(volatile size_t *)(&(mparams.magic))) = magic; + } + } RELEASE_MALLOC_GLOBAL_LOCK(); return 1; + } /* support for mallopt */ static int change_mparam(int param_number, int value) { + size_t val; ensure_initialization(); - val = (value == -1)? MAX_SIZE_T : (size_t)value; - switch(param_number) { - case M_TRIM_THRESHOLD: - mparams.trim_threshold = val; - return 1; - case M_GRANULARITY: - if (val >= mparams.page_size && ((val & (val-1)) == 0)) { - mparams.granularity = val; + val = (value == -1) ? MAX_SIZE_T : (size_t)value; + switch (param_number) { + + case M_TRIM_THRESHOLD: + mparams.trim_threshold = val; return 1; - } - else + case M_GRANULARITY: + if (val >= mparams.page_size && ((val & (val - 1)) == 0)) { + + mparams.granularity = val; + return 1; + + } else + + return 0; + case M_MMAP_THRESHOLD: + mparams.mmap_threshold = val; + return 1; + default: return 0; - case M_MMAP_THRESHOLD: - mparams.mmap_threshold = val; - return 1; - default: - return 0; + } + } #if DEBUG @@ -3264,100 +3419,118 @@ static int change_mparam(int param_number, int value) { /* Check properties of any chunk, whether free, inuse, mmapped etc */ static void do_check_any_chunk(mstate m, mchunkptr p) { + assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); assert(ok_address(m, p)); + } /* Check properties of top chunk */ static void do_check_top_chunk(mstate m, mchunkptr p) { - msegmentptr sp = segment_holding(m, (char*)p); - size_t sz = p->head & ~INUSE_BITS; /* third-lowest bit can be set! */ + + msegmentptr sp = segment_holding(m, (char *)p); + size_t sz = p->head & ~INUSE_BITS; /* third-lowest bit can be set! */ assert(sp != 0); assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); assert(ok_address(m, p)); assert(sz == m->topsize); assert(sz > 0); - assert(sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE); + assert(sz == ((sp->base + sp->size) - (char *)p) - TOP_FOOT_SIZE); assert(pinuse(p)); assert(!pinuse(chunk_plus_offset(p, sz))); + } /* Check properties of (inuse) mmapped chunks */ static void do_check_mmapped_chunk(mstate m, mchunkptr p) { - size_t sz = chunksize(p); + + size_t sz = chunksize(p); size_t len = (sz + (p->prev_foot) + MMAP_FOOT_PAD); assert(is_mmapped(p)); assert(use_mmap(m)); assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); assert(ok_address(m, p)); assert(!is_small(sz)); - assert((len & (mparams.page_size-SIZE_T_ONE)) == 0); + assert((len & (mparams.page_size - SIZE_T_ONE)) == 0); assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD); - assert(chunk_plus_offset(p, sz+SIZE_T_SIZE)->head == 0); + assert(chunk_plus_offset(p, sz + SIZE_T_SIZE)->head == 0); + } /* Check properties of inuse chunks */ static void do_check_inuse_chunk(mstate m, mchunkptr p) { + do_check_any_chunk(m, p); assert(is_inuse(p)); assert(next_pinuse(p)); /* If not pinuse and not mmapped, previous chunk has OK offset */ assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p); - if (is_mmapped(p)) - do_check_mmapped_chunk(m, p); + if (is_mmapped(p)) do_check_mmapped_chunk(m, p); + } /* Check properties of free chunks */ static void do_check_free_chunk(mstate m, mchunkptr p) { - size_t sz = chunksize(p); + + size_t sz = chunksize(p); mchunkptr next = chunk_plus_offset(p, sz); do_check_any_chunk(m, p); assert(!is_inuse(p)); assert(!next_pinuse(p)); - assert (!is_mmapped(p)); + assert(!is_mmapped(p)); if (p != m->dv && p != m->top) { + if (sz >= MIN_CHUNK_SIZE) { + assert((sz & CHUNK_ALIGN_MASK) == 0); assert(is_aligned(chunk2mem(p))); assert(next->prev_foot == sz); assert(pinuse(p)); - assert (next == m->top || is_inuse(next)); + assert(next == m->top || is_inuse(next)); assert(p->fd->bk == p); assert(p->bk->fd == p); - } - else /* markers are always of size SIZE_T_SIZE */ + + } else /* markers are always of size SIZE_T_SIZE */ + assert(sz == SIZE_T_SIZE); + } + } /* Check properties of malloced chunks at the point they are malloced */ -static void do_check_malloced_chunk(mstate m, void* mem, size_t s) { +static void do_check_malloced_chunk(mstate m, void *mem, size_t s) { + if (mem != 0) { + mchunkptr p = mem2chunk(mem); - size_t sz = p->head & ~INUSE_BITS; + size_t sz = p->head & ~INUSE_BITS; do_check_inuse_chunk(m, p); assert((sz & CHUNK_ALIGN_MASK) == 0); assert(sz >= MIN_CHUNK_SIZE); assert(sz >= s); /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */ assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE)); + } + } /* Check a tree and its subtrees. */ static void do_check_tree(mstate m, tchunkptr t) { + tchunkptr head = 0; tchunkptr u = t; - bindex_t tindex = t->index; - size_t tsize = chunksize(t); - bindex_t idx; + bindex_t tindex = t->index; + size_t tsize = chunksize(t); + bindex_t idx; compute_tree_index(tsize, idx); assert(tindex == idx); assert(tsize >= MIN_LARGE_SIZE); assert(tsize >= minsize_for_tree_index(idx)); - assert((idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1)))); + assert((idx == NTREEBINS - 1) || (tsize < minsize_for_tree_index((idx + 1)))); - do { /* traverse through chain of same-sized nodes */ + do { /* traverse through chain of same-sized nodes */ do_check_any_chunk(m, ((mchunkptr)u)); assert(u->index == tindex); assert(chunksize(u) == tsize); @@ -3366,56 +3539,72 @@ static void do_check_tree(mstate m, tchunkptr t) { assert(u->fd->bk == u); assert(u->bk->fd == u); if (u->parent == 0) { + assert(u->child[0] == 0); assert(u->child[1] == 0); - } - else { - assert(head == 0); /* only one node on chain has parent */ - head = u; - assert(u->parent != u); - assert (u->parent->child[0] == u || - u->parent->child[1] == u || - *((tbinptr*)(u->parent)) == u); + + } else { + + assert(head == 0); /* only one node on chain has parent */ + head = u; + assert(u->parent != u); + assert(u->parent->child[0] == u || u->parent->child[1] == u || + *((tbinptr *)(u->parent)) == u); if (u->child[0] != 0) { + assert(u->child[0]->parent == u); assert(u->child[0] != u); do_check_tree(m, u->child[0]); + } + if (u->child[1] != 0) { + assert(u->child[1]->parent == u); assert(u->child[1] != u); do_check_tree(m, u->child[1]); + } + if (u->child[0] != 0 && u->child[1] != 0) { + assert(chunksize(u->child[0]) < chunksize(u->child[1])); + } + } + u = u->fd; + } while (u != t); + assert(head != 0); + } /* Check all the chunks in a treebin. */ static void do_check_treebin(mstate m, bindex_t i) { - tbinptr* tb = treebin_at(m, i); + + tbinptr * tb = treebin_at(m, i); tchunkptr t = *tb; - int empty = (m->treemap & (1U << i)) == 0; - if (t == 0) - assert(empty); - if (!empty) - do_check_tree(m, t); + int empty = (m->treemap & (1U << i)) == 0; + if (t == 0) assert(empty); + if (!empty) do_check_tree(m, t); + } /* Check all the chunks in a smallbin. */ static void do_check_smallbin(mstate m, bindex_t i) { - sbinptr b = smallbin_at(m, i); - mchunkptr p = b->bk; + + sbinptr b = smallbin_at(m, i); + mchunkptr p = b->bk; unsigned int empty = (m->smallmap & (1U << i)) == 0; - if (p == b) - assert(empty); + if (p == b) assert(empty); if (!empty) { + for (; p != b; p = p->bk) { - size_t size = chunksize(p); + + size_t size = chunksize(p); mchunkptr q; /* each chunk claims to be free */ do_check_free_chunk(m, p); @@ -3424,185 +3613,249 @@ static void do_check_smallbin(mstate m, bindex_t i) { assert(p->bk == b || chunksize(p->bk) == chunksize(p)); /* chunk is followed by an inuse chunk */ q = next_chunk(p); - if (q->head != FENCEPOST_HEAD) - do_check_inuse_chunk(m, q); + if (q->head != FENCEPOST_HEAD) do_check_inuse_chunk(m, q); + } + } + } /* Find x in a bin. Used in other check functions. */ static int bin_find(mstate m, mchunkptr x) { + size_t size = chunksize(x); if (is_small(size)) { + bindex_t sidx = small_index(size); - sbinptr b = smallbin_at(m, sidx); + sbinptr b = smallbin_at(m, sidx); if (smallmap_is_marked(m, sidx)) { + mchunkptr p = b; do { - if (p == x) - return 1; + + if (p == x) return 1; + } while ((p = p->fd) != b); + } - } - else { + + } else { + bindex_t tidx; compute_tree_index(size, tidx); if (treemap_is_marked(m, tidx)) { + tchunkptr t = *treebin_at(m, tidx); - size_t sizebits = size << leftshift_for_tree_index(tidx); + size_t sizebits = size << leftshift_for_tree_index(tidx); while (t != 0 && chunksize(t) != size) { - t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]; + + t = t->child[(sizebits >> (SIZE_T_BITSIZE - SIZE_T_ONE)) & 1]; sizebits <<= 1; + } + if (t != 0) { + tchunkptr u = t; do { - if (u == (tchunkptr)x) - return 1; + + if (u == (tchunkptr)x) return 1; + } while ((u = u->fd) != t); + } + } + } + return 0; + } /* Traverse each chunk and check it; return total */ static size_t traverse_and_check(mstate m) { + size_t sum = 0; if (is_initialized(m)) { + msegmentptr s = &m->seg; sum += m->topsize + TOP_FOOT_SIZE; while (s != 0) { + mchunkptr q = align_as_chunk(s->base); mchunkptr lastq = 0; assert(pinuse(q)); - while (segment_holds(s, q) && - q != m->top && q->head != FENCEPOST_HEAD) { + while (segment_holds(s, q) && q != m->top && q->head != FENCEPOST_HEAD) { + sum += chunksize(q); if (is_inuse(q)) { + assert(!bin_find(m, q)); do_check_inuse_chunk(m, q); - } - else { + + } else { + assert(q == m->dv || bin_find(m, q)); - assert(lastq == 0 || is_inuse(lastq)); /* Not 2 consecutive free */ + assert(lastq == 0 || is_inuse(lastq)); /* Not 2 consecutive free */ do_check_free_chunk(m, q); + } + lastq = q; q = next_chunk(q); + } + s = s->next; + } + } + return sum; -} +} /* Check all properties of malloc_state. */ static void do_check_malloc_state(mstate m) { + bindex_t i; - size_t total; + size_t total; /* check bins */ for (i = 0; i < NSMALLBINS; ++i) do_check_smallbin(m, i); for (i = 0; i < NTREEBINS; ++i) do_check_treebin(m, i); - if (m->dvsize != 0) { /* check dv chunk */ + if (m->dvsize != 0) { /* check dv chunk */ do_check_any_chunk(m, m->dv); assert(m->dvsize == chunksize(m->dv)); assert(m->dvsize >= MIN_CHUNK_SIZE); assert(bin_find(m, m->dv) == 0); + } - if (m->top != 0) { /* check top chunk */ + if (m->top != 0) { /* check top chunk */ do_check_top_chunk(m, m->top); /*assert(m->topsize == chunksize(m->top)); redundant */ assert(m->topsize > 0); assert(bin_find(m, m->top) == 0); + } total = traverse_and_check(m); assert(total <= m->footprint); assert(m->footprint <= m->max_footprint); + } -#endif /* DEBUG */ + +#endif /* DEBUG */ /* ----------------------------- statistics ------------------------------ */ #if !NO_MALLINFO static struct mallinfo internal_mallinfo(mstate m) { - struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + + struct mallinfo nm = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; ensure_initialization(); if (!PREACTION(m)) { + check_malloc_state(m); if (is_initialized(m)) { - size_t nfree = SIZE_T_ONE; /* top always free */ - size_t mfree = m->topsize + TOP_FOOT_SIZE; - size_t sum = mfree; + + size_t nfree = SIZE_T_ONE; /* top always free */ + size_t mfree = m->topsize + TOP_FOOT_SIZE; + size_t sum = mfree; msegmentptr s = &m->seg; while (s != 0) { + mchunkptr q = align_as_chunk(s->base); - while (segment_holds(s, q) && - q != m->top && q->head != FENCEPOST_HEAD) { + while (segment_holds(s, q) && q != m->top && + q->head != FENCEPOST_HEAD) { + size_t sz = chunksize(q); sum += sz; if (!is_inuse(q)) { + mfree += sz; ++nfree; + } + q = next_chunk(q); + } + s = s->next; + } - nm.arena = sum; - nm.ordblks = nfree; - nm.hblkhd = m->footprint - sum; - nm.usmblks = m->max_footprint; + nm.arena = sum; + nm.ordblks = nfree; + nm.hblkhd = m->footprint - sum; + nm.usmblks = m->max_footprint; nm.uordblks = m->footprint - mfree; nm.fordblks = mfree; nm.keepcost = m->topsize; + } POSTACTION(m); + } + return nm; + } -#endif /* !NO_MALLINFO */ + +#endif /* !NO_MALLINFO */ #if !NO_MALLOC_STATS static void internal_malloc_stats(mstate m) { + ensure_initialization(); if (!PREACTION(m)) { + size_t maxfp = 0; size_t fp = 0; size_t used = 0; check_malloc_state(m); if (is_initialized(m)) { + msegmentptr s = &m->seg; maxfp = m->max_footprint; fp = m->footprint; used = fp - (m->topsize + TOP_FOOT_SIZE); while (s != 0) { + mchunkptr q = align_as_chunk(s->base); - while (segment_holds(s, q) && - q != m->top && q->head != FENCEPOST_HEAD) { - if (!is_inuse(q)) - used -= chunksize(q); + while (segment_holds(s, q) && q != m->top && + q->head != FENCEPOST_HEAD) { + + if (!is_inuse(q)) used -= chunksize(q); q = next_chunk(q); + } + s = s->next; + } + } - POSTACTION(m); /* drop lock */ + + POSTACTION(m); /* drop lock */ fprintf(stderr, "max system bytes = %10lu\n", (unsigned long)(maxfp)); fprintf(stderr, "system bytes = %10lu\n", (unsigned long)(fp)); fprintf(stderr, "in use bytes = %10lu\n", (unsigned long)(used)); + } + } -#endif /* NO_MALLOC_STATS */ + +#endif /* NO_MALLOC_STATS */ /* ----------------------- Operations on smallbins ----------------------- */ @@ -3614,134 +3867,179 @@ static void internal_malloc_stats(mstate m) { */ /* Link a free chunk into a smallbin */ -#define insert_small_chunk(M, P, S) {\ - bindex_t I = small_index(S);\ - mchunkptr B = smallbin_at(M, I);\ - mchunkptr F = B;\ - assert(S >= MIN_CHUNK_SIZE);\ - if (!smallmap_is_marked(M, I))\ - mark_smallmap(M, I);\ - else if (RTCHECK(ok_address(M, B->fd)))\ - F = B->fd;\ - else {\ - CORRUPTION_ERROR_ACTION(M);\ - }\ - B->fd = P;\ - F->bk = P;\ - P->fd = F;\ - P->bk = B;\ -} +#define insert_small_chunk(M, P, S) \ + { \ + \ + bindex_t I = small_index(S); \ + mchunkptr B = smallbin_at(M, I); \ + mchunkptr F = B; \ + assert(S >= MIN_CHUNK_SIZE); \ + if (!smallmap_is_marked(M, I)) \ + mark_smallmap(M, I); \ + else if (RTCHECK(ok_address(M, B->fd))) \ + F = B->fd; \ + else { \ + \ + CORRUPTION_ERROR_ACTION(M); \ + \ + } \ + B->fd = P; \ + F->bk = P; \ + P->fd = F; \ + P->bk = B; \ + \ + } /* Unlink a chunk from a smallbin */ -#define unlink_small_chunk(M, P, S) {\ - mchunkptr F = P->fd;\ - mchunkptr B = P->bk;\ - bindex_t I = small_index(S);\ - assert(P != B);\ - assert(P != F);\ - assert(chunksize(P) == small_index2size(I));\ - if (RTCHECK(F == smallbin_at(M,I) || (ok_address(M, F) && F->bk == P))) { \ - if (B == F) {\ - clear_smallmap(M, I);\ - }\ - else if (RTCHECK(B == smallbin_at(M,I) ||\ - (ok_address(M, B) && B->fd == P))) {\ - F->bk = B;\ - B->fd = F;\ - }\ - else {\ - CORRUPTION_ERROR_ACTION(M);\ - }\ - }\ - else {\ - CORRUPTION_ERROR_ACTION(M);\ - }\ -} +#define unlink_small_chunk(M, P, S) \ + { \ + \ + mchunkptr F = P->fd; \ + mchunkptr B = P->bk; \ + bindex_t I = small_index(S); \ + assert(P != B); \ + assert(P != F); \ + assert(chunksize(P) == small_index2size(I)); \ + if (RTCHECK(F == smallbin_at(M, I) || (ok_address(M, F) && F->bk == P))) { \ + \ + if (B == F) { \ + \ + clear_smallmap(M, I); \ + \ + } else if (RTCHECK(B == smallbin_at(M, I) || \ + (ok_address(M, B) && B->fd == P))) { \ + \ + F->bk = B; \ + B->fd = F; \ + \ + } else { \ + \ + CORRUPTION_ERROR_ACTION(M); \ + \ + } \ + \ + } else { \ + \ + CORRUPTION_ERROR_ACTION(M); \ + \ + } \ + \ + } /* Unlink the first chunk from a smallbin */ -#define unlink_first_small_chunk(M, B, P, I) {\ - mchunkptr F = P->fd;\ - assert(P != B);\ - assert(P != F);\ - assert(chunksize(P) == small_index2size(I));\ - if (B == F) {\ - clear_smallmap(M, I);\ - }\ - else if (RTCHECK(ok_address(M, F) && F->bk == P)) {\ - F->bk = B;\ - B->fd = F;\ - }\ - else {\ - CORRUPTION_ERROR_ACTION(M);\ - }\ -} +#define unlink_first_small_chunk(M, B, P, I) \ + { \ + \ + mchunkptr F = P->fd; \ + assert(P != B); \ + assert(P != F); \ + assert(chunksize(P) == small_index2size(I)); \ + if (B == F) { \ + \ + clear_smallmap(M, I); \ + \ + } else if (RTCHECK(ok_address(M, F) && F->bk == P)) { \ + \ + F->bk = B; \ + B->fd = F; \ + \ + } else { \ + \ + CORRUPTION_ERROR_ACTION(M); \ + \ + } \ + \ + } /* Replace dv node, binning the old one */ /* Used only when dvsize known to be small */ -#define replace_dv(M, P, S) {\ - size_t DVS = M->dvsize;\ - assert(is_small(DVS));\ - if (DVS != 0) {\ - mchunkptr DV = M->dv;\ - insert_small_chunk(M, DV, DVS);\ - }\ - M->dvsize = S;\ - M->dv = P;\ -} +#define replace_dv(M, P, S) \ + { \ + \ + size_t DVS = M->dvsize; \ + assert(is_small(DVS)); \ + if (DVS != 0) { \ + \ + mchunkptr DV = M->dv; \ + insert_small_chunk(M, DV, DVS); \ + \ + } \ + M->dvsize = S; \ + M->dv = P; \ + \ + } /* ------------------------- Operations on trees ------------------------- */ /* Insert chunk into tree */ -#define insert_large_chunk(M, X, S) {\ - tbinptr* H;\ - bindex_t I;\ - compute_tree_index(S, I);\ - H = treebin_at(M, I);\ - X->index = I;\ - X->child[0] = X->child[1] = 0;\ - if (!treemap_is_marked(M, I)) {\ - mark_treemap(M, I);\ - *H = X;\ - X->parent = (tchunkptr)H;\ - X->fd = X->bk = X;\ - }\ - else {\ - tchunkptr T = *H;\ - size_t K = S << leftshift_for_tree_index(I);\ - for (;;) {\ - if (chunksize(T) != S) {\ - tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\ - K <<= 1;\ - if (*C != 0)\ - T = *C;\ - else if (RTCHECK(ok_address(M, C))) {\ - *C = X;\ - X->parent = T;\ - X->fd = X->bk = X;\ - break;\ - }\ - else {\ - CORRUPTION_ERROR_ACTION(M);\ - break;\ - }\ - }\ - else {\ - tchunkptr F = T->fd;\ - if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\ - T->fd = F->bk = X;\ - X->fd = F;\ - X->bk = T;\ - X->parent = 0;\ - break;\ - }\ - else {\ - CORRUPTION_ERROR_ACTION(M);\ - break;\ - }\ - }\ - }\ - }\ -} +#define insert_large_chunk(M, X, S) \ + { \ + \ + tbinptr *H; \ + bindex_t I; \ + compute_tree_index(S, I); \ + H = treebin_at(M, I); \ + X->index = I; \ + X->child[0] = X->child[1] = 0; \ + if (!treemap_is_marked(M, I)) { \ + \ + mark_treemap(M, I); \ + *H = X; \ + X->parent = (tchunkptr)H; \ + X->fd = X->bk = X; \ + \ + } else { \ + \ + tchunkptr T = *H; \ + size_t K = S << leftshift_for_tree_index(I); \ + for (;;) { \ + \ + if (chunksize(T) != S) { \ + \ + tchunkptr *C = \ + &(T->child[(K >> (SIZE_T_BITSIZE - SIZE_T_ONE)) & 1]); \ + K <<= 1; \ + if (*C != 0) \ + T = *C; \ + else if (RTCHECK(ok_address(M, C))) { \ + \ + *C = X; \ + X->parent = T; \ + X->fd = X->bk = X; \ + break; \ + \ + } else { \ + \ + CORRUPTION_ERROR_ACTION(M); \ + break; \ + \ + } \ + \ + } else { \ + \ + tchunkptr F = T->fd; \ + if (RTCHECK(ok_address(M, T) && ok_address(M, F))) { \ + \ + T->fd = F->bk = X; \ + X->fd = F; \ + X->bk = T; \ + X->parent = 0; \ + break; \ + \ + } else { \ + \ + CORRUPTION_ERROR_ACTION(M); \ + break; \ + \ + } \ + \ + } \ + \ + } \ + \ + } \ + \ + } /* Unlink steps: @@ -3760,104 +4058,141 @@ static void internal_malloc_stats(mstate m) { x's parent and children to x's replacement (or null if none). */ -#define unlink_large_chunk(M, X) {\ - tchunkptr XP = X->parent;\ - tchunkptr R;\ - if (X->bk != X) {\ - tchunkptr F = X->fd;\ - R = X->bk;\ - if (RTCHECK(ok_address(M, F) && F->bk == X && R->fd == X)) {\ - F->bk = R;\ - R->fd = F;\ - }\ - else {\ - CORRUPTION_ERROR_ACTION(M);\ - }\ - }\ - else {\ - tchunkptr* RP;\ - if (((R = *(RP = &(X->child[1]))) != 0) ||\ - ((R = *(RP = &(X->child[0]))) != 0)) {\ - tchunkptr* CP;\ - while ((*(CP = &(R->child[1])) != 0) ||\ - (*(CP = &(R->child[0])) != 0)) {\ - R = *(RP = CP);\ - }\ - if (RTCHECK(ok_address(M, RP)))\ - *RP = 0;\ - else {\ - CORRUPTION_ERROR_ACTION(M);\ - }\ - }\ - }\ - if (XP != 0) {\ - tbinptr* H = treebin_at(M, X->index);\ - if (X == *H) {\ - if ((*H = R) == 0) \ - clear_treemap(M, X->index);\ - }\ - else if (RTCHECK(ok_address(M, XP))) {\ - if (XP->child[0] == X) \ - XP->child[0] = R;\ - else \ - XP->child[1] = R;\ - }\ - else\ - CORRUPTION_ERROR_ACTION(M);\ - if (R != 0) {\ - if (RTCHECK(ok_address(M, R))) {\ - tchunkptr C0, C1;\ - R->parent = XP;\ - if ((C0 = X->child[0]) != 0) {\ - if (RTCHECK(ok_address(M, C0))) {\ - R->child[0] = C0;\ - C0->parent = R;\ - }\ - else\ - CORRUPTION_ERROR_ACTION(M);\ - }\ - if ((C1 = X->child[1]) != 0) {\ - if (RTCHECK(ok_address(M, C1))) {\ - R->child[1] = C1;\ - C1->parent = R;\ - }\ - else\ - CORRUPTION_ERROR_ACTION(M);\ - }\ - }\ - else\ - CORRUPTION_ERROR_ACTION(M);\ - }\ - }\ -} +#define unlink_large_chunk(M, X) \ + { \ + \ + tchunkptr XP = X->parent; \ + tchunkptr R; \ + if (X->bk != X) { \ + \ + tchunkptr F = X->fd; \ + R = X->bk; \ + if (RTCHECK(ok_address(M, F) && F->bk == X && R->fd == X)) { \ + \ + F->bk = R; \ + R->fd = F; \ + \ + } else { \ + \ + CORRUPTION_ERROR_ACTION(M); \ + \ + } \ + \ + } else { \ + \ + tchunkptr *RP; \ + if (((R = *(RP = &(X->child[1]))) != 0) || \ + ((R = *(RP = &(X->child[0]))) != 0)) { \ + \ + tchunkptr *CP; \ + while ((*(CP = &(R->child[1])) != 0) || \ + (*(CP = &(R->child[0])) != 0)) { \ + \ + R = *(RP = CP); \ + \ + } \ + if (RTCHECK(ok_address(M, RP))) \ + *RP = 0; \ + else { \ + \ + CORRUPTION_ERROR_ACTION(M); \ + \ + } \ + \ + } \ + \ + } \ + if (XP != 0) { \ + \ + tbinptr *H = treebin_at(M, X->index); \ + if (X == *H) { \ + \ + if ((*H = R) == 0) clear_treemap(M, X->index); \ + \ + } else if (RTCHECK(ok_address(M, XP))) { \ + \ + if (XP->child[0] == X) \ + XP->child[0] = R; \ + else \ + XP->child[1] = R; \ + \ + } else \ + CORRUPTION_ERROR_ACTION(M); \ + if (R != 0) { \ + \ + if (RTCHECK(ok_address(M, R))) { \ + \ + tchunkptr C0, C1; \ + R->parent = XP; \ + if ((C0 = X->child[0]) != 0) { \ + \ + if (RTCHECK(ok_address(M, C0))) { \ + \ + R->child[0] = C0; \ + C0->parent = R; \ + \ + } else \ + CORRUPTION_ERROR_ACTION(M); \ + \ + } \ + if ((C1 = X->child[1]) != 0) { \ + \ + if (RTCHECK(ok_address(M, C1))) { \ + \ + R->child[1] = C1; \ + C1->parent = R; \ + \ + } else \ + CORRUPTION_ERROR_ACTION(M); \ + \ + } \ + \ + } else \ + CORRUPTION_ERROR_ACTION(M); \ + \ + } \ + \ + } \ + \ + } /* Relays to large vs small bin operations */ -#define insert_chunk(M, P, S)\ - if (is_small(S)) insert_small_chunk(M, P, S)\ - else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); } - -#define unlink_chunk(M, P, S)\ - if (is_small(S)) unlink_small_chunk(M, P, S)\ - else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); } +#define insert_chunk(M, P, S) \ + if (is_small(S)) insert_small_chunk(M, P, S) else { \ + \ + tchunkptr TP = (tchunkptr)(P); \ + insert_large_chunk(M, TP, S); \ + \ + } +#define unlink_chunk(M, P, S) \ + if (is_small(S)) unlink_small_chunk(M, P, S) else { \ + \ + tchunkptr TP = (tchunkptr)(P); \ + unlink_large_chunk(M, TP); \ + \ + } /* Relays to internal calls to malloc/free from realloc, memalign etc */ #if ONLY_MSPACES -#define internal_malloc(m, b) mspace_malloc(m, b) -#define internal_free(m, mem) mspace_free(m,mem); -#else /* ONLY_MSPACES */ -#if MSPACES -#define internal_malloc(m, b)\ - ((m == gm)? dlmalloc(b) : mspace_malloc(m, b)) -#define internal_free(m, mem)\ - if (m == gm) dlfree(mem); else mspace_free(m,mem); -#else /* MSPACES */ -#define internal_malloc(m, b) dlmalloc(b) -#define internal_free(m, mem) dlfree(mem) -#endif /* MSPACES */ -#endif /* ONLY_MSPACES */ + #define internal_malloc(m, b) mspace_malloc(m, b) + #define internal_free(m, mem) mspace_free(m, mem); +#else /* ONLY_MSPACES */ + #if MSPACES + #define internal_malloc(m, b) \ + ((m == gm) ? dlmalloc(b) : mspace_malloc(m, b)) + #define internal_free(m, mem) \ + if (m == gm) \ + dlfree(mem); \ + else \ + mspace_free(m, mem); + #else /* MSPACES */ + #define internal_malloc(m, b) dlmalloc(b) + #define internal_free(m, mem) dlfree(mem) + #endif /* MSPACES */ +#endif /* ONLY_MSPACES */ /* ----------------------- Direct-mmapping chunks ----------------------- */ @@ -3870,80 +4205,93 @@ static void internal_malloc_stats(mstate m) { */ /* Malloc using mmap */ -static void* mmap_alloc(mstate m, size_t nb) { +static void *mmap_alloc(mstate m, size_t nb) { + size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK); if (m->footprint_limit != 0) { + size_t fp = m->footprint + mmsize; - if (fp <= m->footprint || fp > m->footprint_limit) - return 0; + if (fp <= m->footprint || fp > m->footprint_limit) return 0; + } - if (mmsize > nb) { /* Check for wrap around 0 */ - char* mm = (char*)(CALL_DIRECT_MMAP(mmsize)); + + if (mmsize > nb) { /* Check for wrap around 0 */ + char *mm = (char *)(CALL_DIRECT_MMAP(mmsize)); if (mm != CMFAIL) { - size_t offset = align_offset(chunk2mem(mm)); - size_t psize = mmsize - offset - MMAP_FOOT_PAD; + + size_t offset = align_offset(chunk2mem(mm)); + size_t psize = mmsize - offset - MMAP_FOOT_PAD; mchunkptr p = (mchunkptr)(mm + offset); p->prev_foot = offset; p->head = psize; mark_inuse_foot(m, p, psize); chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD; - chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0; + chunk_plus_offset(p, psize + SIZE_T_SIZE)->head = 0; - if (m->least_addr == 0 || mm < m->least_addr) - m->least_addr = mm; + if (m->least_addr == 0 || mm < m->least_addr) m->least_addr = mm; if ((m->footprint += mmsize) > m->max_footprint) m->max_footprint = m->footprint; assert(is_aligned(chunk2mem(p))); check_mmapped_chunk(m, p); return chunk2mem(p); + } + } + return 0; + } /* Realloc using mmap */ static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb, int flags) { + size_t oldsize = chunksize(oldp); - (void)flags; /* placate people compiling -Wunused */ - if (is_small(nb)) /* Can't shrink mmap regions below small size */ + (void)flags; /* placate people compiling -Wunused */ + if (is_small(nb)) /* Can't shrink mmap regions below small size */ return 0; /* Keep old chunk if big enough but not too big */ if (oldsize >= nb + SIZE_T_SIZE && (oldsize - nb) <= (mparams.granularity << 1)) return oldp; else { + size_t offset = oldp->prev_foot; size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD; size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK); - char* cp = (char*)CALL_MREMAP((char*)oldp - offset, - oldmmsize, newmmsize, flags); + char * cp = + (char *)CALL_MREMAP((char *)oldp - offset, oldmmsize, newmmsize, flags); if (cp != CMFAIL) { + mchunkptr newp = (mchunkptr)(cp + offset); - size_t psize = newmmsize - offset - MMAP_FOOT_PAD; + size_t psize = newmmsize - offset - MMAP_FOOT_PAD; newp->head = psize; mark_inuse_foot(m, newp, psize); chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD; - chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0; + chunk_plus_offset(newp, psize + SIZE_T_SIZE)->head = 0; - if (cp < m->least_addr) - m->least_addr = cp; + if (cp < m->least_addr) m->least_addr = cp; if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint) m->max_footprint = m->footprint; check_mmapped_chunk(m, newp); return newp; + } + } + return 0; -} +} /* -------------------------- mspace management -------------------------- */ /* Initialize top chunk and its size */ static void init_top(mstate m, mchunkptr p, size_t psize) { + /* Ensure alignment */ size_t offset = align_offset(chunk2mem(p)); - p = (mchunkptr)((char*)p + offset); + p = (mchunkptr)((char *)p + offset); psize -= offset; m->top = p; @@ -3951,23 +4299,29 @@ static void init_top(mstate m, mchunkptr p, size_t psize) { p->head = psize | PINUSE_BIT; /* set size of fake trailing chunk holding overhead space only once */ chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE; - m->trim_check = mparams.trim_threshold; /* reset on each update */ + m->trim_check = mparams.trim_threshold; /* reset on each update */ + } /* Initialize bins for a new mstate that is otherwise zeroed out */ static void init_bins(mstate m) { + /* Establish circular links for smallbins */ bindex_t i; for (i = 0; i < NSMALLBINS; ++i) { - sbinptr bin = smallbin_at(m,i); + + sbinptr bin = smallbin_at(m, i); bin->fd = bin->bk = bin; + } + } #if PROCEED_ON_ERROR /* default corruption action */ static void reset_on_error(mstate m) { + int i; ++malloc_corruption_error_count; /* Reinitialize fields to forget about all memory */ @@ -3980,67 +4334,78 @@ static void reset_on_error(mstate m) { for (i = 0; i < NTREEBINS; ++i) *treebin_at(m, i) = 0; init_bins(m); + } -#endif /* PROCEED_ON_ERROR */ + +#endif /* PROCEED_ON_ERROR */ /* Allocate chunk and prepend remainder with chunk in successor base. */ -static void* prepend_alloc(mstate m, char* newbase, char* oldbase, - size_t nb) { +static void *prepend_alloc(mstate m, char *newbase, char *oldbase, size_t nb) { + mchunkptr p = align_as_chunk(newbase); mchunkptr oldfirst = align_as_chunk(oldbase); - size_t psize = (char*)oldfirst - (char*)p; + size_t psize = (char *)oldfirst - (char *)p; mchunkptr q = chunk_plus_offset(p, nb); - size_t qsize = psize - nb; + size_t qsize = psize - nb; set_size_and_pinuse_of_inuse_chunk(m, p, nb); - assert((char*)oldfirst > (char*)q); + assert((char *)oldfirst > (char *)q); assert(pinuse(oldfirst)); assert(qsize >= MIN_CHUNK_SIZE); /* consolidate remainder with first chunk of old base */ if (oldfirst == m->top) { + size_t tsize = m->topsize += qsize; m->top = q; q->head = tsize | PINUSE_BIT; check_top_chunk(m, q); - } - else if (oldfirst == m->dv) { + + } else if (oldfirst == m->dv) { + size_t dsize = m->dvsize += qsize; m->dv = q; set_size_and_pinuse_of_free_chunk(q, dsize); - } - else { + + } else { + if (!is_inuse(oldfirst)) { + size_t nsize = chunksize(oldfirst); unlink_chunk(m, oldfirst, nsize); oldfirst = chunk_plus_offset(oldfirst, nsize); qsize += nsize; + } + set_free_with_pinuse(q, qsize, oldfirst); insert_chunk(m, q, qsize); check_free_chunk(m, q); + } check_malloced_chunk(m, chunk2mem(p), nb); return chunk2mem(p); + } /* Add a segment to hold a new noncontiguous region */ -static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) { +static void add_segment(mstate m, char *tbase, size_t tsize, flag_t mmapped) { + /* Determine locations and sizes of segment, fenceposts, old top */ - char* old_top = (char*)m->top; + char * old_top = (char *)m->top; msegmentptr oldsp = segment_holding(m, old_top); - char* old_end = oldsp->base + oldsp->size; - size_t ssize = pad_request(sizeof(struct malloc_segment)); - char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK); - size_t offset = align_offset(chunk2mem(rawsp)); - char* asp = rawsp + offset; - char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp; - mchunkptr sp = (mchunkptr)csp; + char * old_end = oldsp->base + oldsp->size; + size_t ssize = pad_request(sizeof(struct malloc_segment)); + char * rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK); + size_t offset = align_offset(chunk2mem(rawsp)); + char * asp = rawsp + offset; + char * csp = (asp < (old_top + MIN_CHUNK_SIZE)) ? old_top : asp; + mchunkptr sp = (mchunkptr)csp; msegmentptr ss = (msegmentptr)(chunk2mem(sp)); - mchunkptr tnext = chunk_plus_offset(sp, ssize); - mchunkptr p = tnext; - int nfences = 0; + mchunkptr tnext = chunk_plus_offset(sp, ssize); + mchunkptr p = tnext; + int nfences = 0; /* reset top to new space */ init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE); @@ -4048,7 +4413,7 @@ static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) { /* Set up segment record */ assert(is_aligned(ss)); set_size_and_pinuse_of_inuse_chunk(m, sp, ssize); - *ss = m->seg; /* Push current record */ + *ss = m->seg; /* Push current record */ m->seg.base = tbase; m->seg.size = tsize; m->seg.sflags = mmapped; @@ -4056,53 +4421,61 @@ static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) { /* Insert trailing fenceposts */ for (;;) { + mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE); p->head = FENCEPOST_HEAD; ++nfences; - if ((char*)(&(nextp->head)) < old_end) + if ((char *)(&(nextp->head)) < old_end) p = nextp; else break; + } + assert(nfences >= 2); /* Insert the rest of old top into a bin as an ordinary free chunk */ if (csp != old_top) { + mchunkptr q = (mchunkptr)old_top; - size_t psize = csp - old_top; + size_t psize = csp - old_top; mchunkptr tn = chunk_plus_offset(q, psize); set_free_with_pinuse(q, psize, tn); insert_chunk(m, q, psize); + } check_top_chunk(m, m->top); + } /* -------------------------- System allocation -------------------------- */ /* Get memory from system using MORECORE or MMAP */ -static void* sys_alloc(mstate m, size_t nb) { - char* tbase = CMFAIL; +static void *sys_alloc(mstate m, size_t nb) { + + char * tbase = CMFAIL; size_t tsize = 0; flag_t mmap_flag = 0; - size_t asize; /* allocation size */ + size_t asize; /* allocation size */ ensure_initialization(); /* Directly map large chunks, but only if already initialized */ if (use_mmap(m) && nb >= mparams.mmap_threshold && m->topsize != 0) { - void* mem = mmap_alloc(m, nb); - if (mem != 0) - return mem; + + void *mem = mmap_alloc(m, nb); + if (mem != 0) return mem; + } asize = granularity_align(nb + SYS_ALLOC_PADDING); - if (asize <= nb) - return 0; /* wraparound */ + if (asize <= nb) return 0; /* wraparound */ if (m->footprint_limit != 0) { + size_t fp = m->footprint + asize; - if (fp <= m->footprint || fp > m->footprint_limit) - return 0; + if (fp <= m->footprint || fp > m->footprint_limit) return 0; + } /* @@ -4128,91 +4501,119 @@ static void* sys_alloc(mstate m, size_t nb) { */ if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) { - char* br = CMFAIL; - size_t ssize = asize; /* sbrk call size */ - msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (char*)m->top); + + char * br = CMFAIL; + size_t ssize = asize; /* sbrk call size */ + msegmentptr ss = (m->top == 0) ? 0 : segment_holding(m, (char *)m->top); ACQUIRE_MALLOC_GLOBAL_LOCK(); - if (ss == 0) { /* First time through or recovery */ - char* base = (char*)CALL_MORECORE(0); + if (ss == 0) { /* First time through or recovery */ + char *base = (char *)CALL_MORECORE(0); if (base != CMFAIL) { + size_t fp; /* Adjust to end on a page boundary */ if (!is_page_aligned(base)) ssize += (page_align((size_t)base) - (size_t)base); - fp = m->footprint + ssize; /* recheck limits */ + fp = m->footprint + ssize; /* recheck limits */ if (ssize > nb && ssize < HALF_MAX_SIZE_T && (m->footprint_limit == 0 || (fp > m->footprint && fp <= m->footprint_limit)) && - (br = (char*)(CALL_MORECORE(ssize))) == base) { + (br = (char *)(CALL_MORECORE(ssize))) == base) { + tbase = base; tsize = ssize; + } + } - } - else { + + } else { + /* Subtract out existing available top space from MORECORE request. */ ssize = granularity_align(nb - m->topsize + SYS_ALLOC_PADDING); /* Use mem here only if it did continuously extend old space */ if (ssize < HALF_MAX_SIZE_T && - (br = (char*)(CALL_MORECORE(ssize))) == ss->base+ss->size) { + (br = (char *)(CALL_MORECORE(ssize))) == ss->base + ss->size) { + tbase = br; tsize = ssize; + } + } - if (tbase == CMFAIL) { /* Cope with partial failure */ - if (br != CMFAIL) { /* Try to use/extend the space we did get */ - if (ssize < HALF_MAX_SIZE_T && - ssize < nb + SYS_ALLOC_PADDING) { + if (tbase == CMFAIL) { /* Cope with partial failure */ + if (br != CMFAIL) { /* Try to use/extend the space we did get */ + if (ssize < HALF_MAX_SIZE_T && ssize < nb + SYS_ALLOC_PADDING) { + size_t esize = granularity_align(nb + SYS_ALLOC_PADDING - ssize); if (esize < HALF_MAX_SIZE_T) { - char* end = (char*)CALL_MORECORE(esize); + + char *end = (char *)CALL_MORECORE(esize); if (end != CMFAIL) ssize += esize; - else { /* Can't use; try to release */ - (void) CALL_MORECORE(-ssize); + else { /* Can't use; try to release */ + (void)CALL_MORECORE(-ssize); br = CMFAIL; + } + } + } + } - if (br != CMFAIL) { /* Use the space we did get */ + + if (br != CMFAIL) { /* Use the space we did get */ tbase = br; tsize = ssize; - } - else - disable_contiguous(m); /* Don't try contiguous path in the future */ + + } else + + disable_contiguous(m); /* Don't try contiguous path in the future */ + } RELEASE_MALLOC_GLOBAL_LOCK(); + } - if (HAVE_MMAP && tbase == CMFAIL) { /* Try MMAP */ - char* mp = (char*)(CALL_MMAP(asize)); + if (HAVE_MMAP && tbase == CMFAIL) { /* Try MMAP */ + char *mp = (char *)(CALL_MMAP(asize)); if (mp != CMFAIL) { + tbase = mp; tsize = asize; mmap_flag = USE_MMAP_BIT; + } + } - if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */ + if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */ if (asize < HALF_MAX_SIZE_T) { - char* br = CMFAIL; - char* end = CMFAIL; + + char *br = CMFAIL; + char *end = CMFAIL; ACQUIRE_MALLOC_GLOBAL_LOCK(); - br = (char*)(CALL_MORECORE(asize)); - end = (char*)(CALL_MORECORE(0)); + br = (char *)(CALL_MORECORE(asize)); + end = (char *)(CALL_MORECORE(0)); RELEASE_MALLOC_GLOBAL_LOCK(); if (br != CMFAIL && end != CMFAIL && br < end) { + size_t ssize = end - br; if (ssize > nb + TOP_FOOT_SIZE) { + tbase = br; tsize = ssize; + } + } + } + } if (tbase != CMFAIL) { @@ -4220,9 +4621,8 @@ static void* sys_alloc(mstate m, size_t nb) { if ((m->footprint += tsize) > m->max_footprint) m->max_footprint = m->footprint; - if (!is_initialized(m)) { /* first-time initialization */ - if (m->least_addr == 0 || tbase < m->least_addr) - m->least_addr = tbase; + if (!is_initialized(m)) { /* first-time initialization */ + if (m->least_addr == 0 || tbase < m->least_addr) m->least_addr = tbase; m->seg.base = tbase; m->seg.size = tsize; m->seg.sflags = mmap_flag; @@ -4235,46 +4635,52 @@ static void* sys_alloc(mstate m, size_t nb) { else #endif { + /* Offset top by embedded malloc_state */ mchunkptr mn = next_chunk(mem2chunk(m)); - init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE); + init_top(m, mn, (size_t)((tbase + tsize) - (char *)mn) - TOP_FOOT_SIZE); + } + } else { + /* Try to merge with an existing segment */ msegmentptr sp = &m->seg; /* Only consider most recent segment if traversal suppressed */ while (sp != 0 && tbase != sp->base + sp->size) sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next; - if (sp != 0 && - !is_extern_segment(sp) && + if (sp != 0 && !is_extern_segment(sp) && (sp->sflags & USE_MMAP_BIT) == mmap_flag && - segment_holds(sp, m->top)) { /* append */ + segment_holds(sp, m->top)) { /* append */ sp->size += tsize; init_top(m, m->top, m->topsize + tsize); - } - else { - if (tbase < m->least_addr) - m->least_addr = tbase; + + } else { + + if (tbase < m->least_addr) m->least_addr = tbase; sp = &m->seg; while (sp != 0 && sp->base != tbase + tsize) sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next; - if (sp != 0 && - !is_extern_segment(sp) && + if (sp != 0 && !is_extern_segment(sp) && (sp->sflags & USE_MMAP_BIT) == mmap_flag) { - char* oldbase = sp->base; + + char *oldbase = sp->base; sp->base = tbase; sp->size += tsize; return prepend_alloc(m, tbase, oldbase, nb); - } - else + + } else + add_segment(m, tbase, tsize, mmap_flag); + } + } - if (nb < m->topsize) { /* Allocate from new or extended top space */ - size_t rsize = m->topsize -= nb; + if (nb < m->topsize) { /* Allocate from new or extended top space */ + size_t rsize = m->topsize -= nb; mchunkptr p = m->top; mchunkptr r = m->top = chunk_plus_offset(p, nb); r->head = rsize | PINUSE_BIT; @@ -4282,313 +4688,421 @@ static void* sys_alloc(mstate m, size_t nb) { check_top_chunk(m, m->top); check_malloced_chunk(m, chunk2mem(p), nb); return chunk2mem(p); + } + } MALLOC_FAILURE_ACTION; return 0; + } /* ----------------------- system deallocation -------------------------- */ /* Unmap and unlink any mmapped segments that don't contain used chunks */ static size_t release_unused_segments(mstate m) { - size_t released = 0; - int nsegs = 0; + + size_t released = 0; + int nsegs = 0; msegmentptr pred = &m->seg; msegmentptr sp = pred->next; while (sp != 0) { - char* base = sp->base; - size_t size = sp->size; + + char * base = sp->base; + size_t size = sp->size; msegmentptr next = sp->next; ++nsegs; if (is_mmapped_segment(sp) && !is_extern_segment(sp)) { + mchunkptr p = align_as_chunk(base); - size_t psize = chunksize(p); + size_t psize = chunksize(p); /* Can unmap if first chunk holds entire segment and not pinned */ - if (!is_inuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) { + if (!is_inuse(p) && (char *)p + psize >= base + size - TOP_FOOT_SIZE) { + tchunkptr tp = (tchunkptr)p; - assert(segment_holds(sp, (char*)sp)); + assert(segment_holds(sp, (char *)sp)); if (p == m->dv) { + m->dv = 0; m->dvsize = 0; - } - else { + + } else { + unlink_large_chunk(m, tp); + } + if (CALL_MUNMAP(base, size) == 0) { + released += size; m->footprint -= size; /* unlink obsoleted record */ sp = pred; sp->next = next; - } - else { /* back out if cannot unmap */ + + } else { /* back out if cannot unmap */ + insert_large_chunk(m, tp, psize); + } + } + } - if (NO_SEGMENT_TRAVERSAL) /* scan only first segment */ + + if (NO_SEGMENT_TRAVERSAL) /* scan only first segment */ break; pred = sp; sp = next; + } + /* Reset check counter */ - m->release_checks = (((size_t) nsegs > (size_t) MAX_RELEASE_CHECK_RATE)? - (size_t) nsegs : (size_t) MAX_RELEASE_CHECK_RATE); + m->release_checks = (((size_t)nsegs > (size_t)MAX_RELEASE_CHECK_RATE) + ? (size_t)nsegs + : (size_t)MAX_RELEASE_CHECK_RATE); return released; + } static int sys_trim(mstate m, size_t pad) { + size_t released = 0; ensure_initialization(); if (pad < MAX_REQUEST && is_initialized(m)) { - pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */ + + pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */ if (m->topsize > pad) { + /* Shrink top space in granularity-size units, keeping at least one */ size_t unit = mparams.granularity; - size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit - - SIZE_T_ONE) * unit; - msegmentptr sp = segment_holding(m, (char*)m->top); + size_t extra = + ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit - SIZE_T_ONE) * unit; + msegmentptr sp = segment_holding(m, (char *)m->top); if (!is_extern_segment(sp)) { + if (is_mmapped_segment(sp)) { - if (HAVE_MMAP && - sp->size >= extra && - !has_segment_link(m, sp)) { /* can't shrink if pinned */ + + if (HAVE_MMAP && sp->size >= extra && + !has_segment_link(m, sp)) { /* can't shrink if pinned */ size_t newsize = sp->size - extra; - (void)newsize; /* placate people compiling -Wunused-variable */ + (void)newsize; /* placate people compiling -Wunused-variable */ /* Prefer mremap, fall back to munmap */ if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) || (CALL_MUNMAP(sp->base + newsize, extra) == 0)) { + released = extra; + } + } - } - else if (HAVE_MORECORE) { - if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */ - extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit; + + } else if (HAVE_MORECORE) { + + if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */ + extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit; ACQUIRE_MALLOC_GLOBAL_LOCK(); { + /* Make sure end of memory is where we last set it. */ - char* old_br = (char*)(CALL_MORECORE(0)); + char *old_br = (char *)(CALL_MORECORE(0)); if (old_br == sp->base + sp->size) { - char* rel_br = (char*)(CALL_MORECORE(-extra)); - char* new_br = (char*)(CALL_MORECORE(0)); + + char *rel_br = (char *)(CALL_MORECORE(-extra)); + char *new_br = (char *)(CALL_MORECORE(0)); if (rel_br != CMFAIL && new_br < old_br) released = old_br - new_br; + } + } + RELEASE_MALLOC_GLOBAL_LOCK(); + } + } if (released != 0) { + sp->size -= released; m->footprint -= released; init_top(m, m->top, m->topsize - released); check_top_chunk(m, m->top); + } + } /* Unmap any unused mmapped segments */ - if (HAVE_MMAP) - released += release_unused_segments(m); + if (HAVE_MMAP) released += release_unused_segments(m); /* On failure, disable autotrim to avoid repeated failed future calls */ - if (released == 0 && m->topsize > m->trim_check) - m->trim_check = MAX_SIZE_T; + if (released == 0 && m->topsize > m->trim_check) m->trim_check = MAX_SIZE_T; + } - return (released != 0)? 1 : 0; + return (released != 0) ? 1 : 0; + } /* Consolidate and bin a chunk. Differs from exported versions of free mainly in that the chunk need not be marked as inuse. */ static void dispose_chunk(mstate m, mchunkptr p, size_t psize) { + mchunkptr next = chunk_plus_offset(p, psize); if (!pinuse(p)) { + mchunkptr prev; - size_t prevsize = p->prev_foot; + size_t prevsize = p->prev_foot; if (is_mmapped(p)) { + psize += prevsize + MMAP_FOOT_PAD; - if (CALL_MUNMAP((char*)p - prevsize, psize) == 0) - m->footprint -= psize; + if (CALL_MUNMAP((char *)p - prevsize, psize) == 0) m->footprint -= psize; return; + } + prev = chunk_minus_offset(p, prevsize); psize += prevsize; p = prev; - if (RTCHECK(ok_address(m, prev))) { /* consolidate backward */ + if (RTCHECK(ok_address(m, prev))) { /* consolidate backward */ if (p != m->dv) { + unlink_chunk(m, p, prevsize); - } - else if ((next->head & INUSE_BITS) == INUSE_BITS) { + + } else if ((next->head & INUSE_BITS) == INUSE_BITS) { + m->dvsize = psize; set_free_with_pinuse(p, psize, next); return; + } - } - else { + + } else { + CORRUPTION_ERROR_ACTION(m); return; + } + } + if (RTCHECK(ok_address(m, next))) { - if (!cinuse(next)) { /* consolidate forward */ + + if (!cinuse(next)) { /* consolidate forward */ if (next == m->top) { + size_t tsize = m->topsize += psize; m->top = p; p->head = tsize | PINUSE_BIT; if (p == m->dv) { + m->dv = 0; m->dvsize = 0; + } + return; - } - else if (next == m->dv) { + + } else if (next == m->dv) { + size_t dsize = m->dvsize += psize; m->dv = p; set_size_and_pinuse_of_free_chunk(p, dsize); return; - } - else { + + } else { + size_t nsize = chunksize(next); psize += nsize; unlink_chunk(m, next, nsize); set_size_and_pinuse_of_free_chunk(p, psize); if (p == m->dv) { + m->dvsize = psize; return; + } + } - } - else { + + } else { + set_free_with_pinuse(p, psize, next); + } + insert_chunk(m, p, psize); - } - else { + + } else { + CORRUPTION_ERROR_ACTION(m); + } + } /* ---------------------------- malloc --------------------------- */ /* allocate a large request from the best fitting chunk in a treebin */ -static void* tmalloc_large(mstate m, size_t nb) { +static void *tmalloc_large(mstate m, size_t nb) { + tchunkptr v = 0; - size_t rsize = -nb; /* Unsigned negation */ + size_t rsize = -nb; /* Unsigned negation */ tchunkptr t; - bindex_t idx; + bindex_t idx; compute_tree_index(nb, idx); if ((t = *treebin_at(m, idx)) != 0) { + /* Traverse tree for this bin looking for node with size == nb */ - size_t sizebits = nb << leftshift_for_tree_index(idx); - tchunkptr rst = 0; /* The deepest untaken right subtree */ + size_t sizebits = nb << leftshift_for_tree_index(idx); + tchunkptr rst = 0; /* The deepest untaken right subtree */ for (;;) { + tchunkptr rt; - size_t trem = chunksize(t) - nb; + size_t trem = chunksize(t) - nb; if (trem < rsize) { + v = t; - if ((rsize = trem) == 0) - break; + if ((rsize = trem) == 0) break; + } + rt = t->child[1]; - t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]; - if (rt != 0 && rt != t) - rst = rt; + t = t->child[(sizebits >> (SIZE_T_BITSIZE - SIZE_T_ONE)) & 1]; + if (rt != 0 && rt != t) rst = rt; if (t == 0) { - t = rst; /* set t to least subtree holding sizes > nb */ + + t = rst; /* set t to least subtree holding sizes > nb */ break; + } + sizebits <<= 1; + } + } - if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */ + + if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */ binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap; if (leftbits != 0) { + bindex_t i; binmap_t leastbit = least_bit(leftbits); compute_bit2idx(leastbit, i); t = *treebin_at(m, i); + } + } - while (t != 0) { /* find smallest of tree or subtree */ + while (t != 0) { /* find smallest of tree or subtree */ size_t trem = chunksize(t) - nb; if (trem < rsize) { + rsize = trem; v = t; + } + t = leftmost_child(t); + } /* If dv is a better fit, return 0 so malloc will use it */ if (v != 0 && rsize < (size_t)(m->dvsize - nb)) { - if (RTCHECK(ok_address(m, v))) { /* split */ + + if (RTCHECK(ok_address(m, v))) { /* split */ mchunkptr r = chunk_plus_offset(v, nb); assert(chunksize(v) == rsize + nb); if (RTCHECK(ok_next(v, r))) { + unlink_large_chunk(m, v); if (rsize < MIN_CHUNK_SIZE) set_inuse_and_pinuse(m, v, (rsize + nb)); else { + set_size_and_pinuse_of_inuse_chunk(m, v, nb); set_size_and_pinuse_of_free_chunk(r, rsize); insert_chunk(m, r, rsize); + } + return chunk2mem(v); + } + } + CORRUPTION_ERROR_ACTION(m); + } + return 0; + } /* allocate a small request from the best fitting chunk in a treebin */ -static void* tmalloc_small(mstate m, size_t nb) { +static void *tmalloc_small(mstate m, size_t nb) { + tchunkptr t, v; - size_t rsize; - bindex_t i; - binmap_t leastbit = least_bit(m->treemap); + size_t rsize; + bindex_t i; + binmap_t leastbit = least_bit(m->treemap); compute_bit2idx(leastbit, i); v = t = *treebin_at(m, i); rsize = chunksize(t) - nb; while ((t = leftmost_child(t)) != 0) { + size_t trem = chunksize(t) - nb; if (trem < rsize) { + rsize = trem; v = t; + } + } if (RTCHECK(ok_address(m, v))) { + mchunkptr r = chunk_plus_offset(v, nb); assert(chunksize(v) == rsize + nb); if (RTCHECK(ok_next(v, r))) { + unlink_large_chunk(m, v); if (rsize < MIN_CHUNK_SIZE) set_inuse_and_pinuse(m, v, (rsize + nb)); else { + set_size_and_pinuse_of_inuse_chunk(m, v, nb); set_size_and_pinuse_of_free_chunk(r, rsize); replace_dv(m, r, rsize); + } + return chunk2mem(v); + } + } CORRUPTION_ERROR_ACTION(m); return 0; + } #if !ONLY_MSPACES -void* dlmalloc(size_t bytes) { +void *dlmalloc(size_t bytes) { + /* Basic algorithm: If a small request (< 256 bytes minus per-chunk overhead): @@ -4612,23 +5126,25 @@ void* dlmalloc(size_t bytes) { The ugly goto's here ensure that postaction occurs along all paths. */ -#if USE_LOCKS - ensure_initialization(); /* initialize in sys_alloc if not using locks */ -#endif + #if USE_LOCKS + ensure_initialization(); /* initialize in sys_alloc if not using locks */ + #endif if (!PREACTION(gm)) { - void* mem; + + void * mem; size_t nb; if (bytes <= MAX_SMALL_REQUEST) { + bindex_t idx; binmap_t smallbits; - nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes); + nb = (bytes < MIN_REQUEST) ? MIN_CHUNK_SIZE : pad_request(bytes); idx = small_index(nb); smallbits = gm->smallmap >> idx; - if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ + if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ mchunkptr b, p; - idx += ~smallbits & 1; /* Uses next bin if idx empty */ + idx += ~smallbits & 1; /* Uses next bin if idx empty */ b = smallbin_at(gm, idx); p = b->fd; assert(chunksize(p) == small_index2size(idx)); @@ -4637,15 +5153,17 @@ void* dlmalloc(size_t bytes) { mem = chunk2mem(p); check_malloced_chunk(gm, mem, nb); goto postaction; + } else if (nb > gm->dvsize) { - if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ + + if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ mchunkptr b, p, r; - size_t rsize; - bindex_t i; - binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx)); - binmap_t leastbit = least_bit(leftbits); + size_t rsize; + bindex_t i; + binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx)); + binmap_t leastbit = least_bit(leftbits); compute_bit2idx(leastbit, i); b = smallbin_at(gm, i); p = b->fd; @@ -4656,54 +5174,71 @@ void* dlmalloc(size_t bytes) { if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) set_inuse_and_pinuse(gm, p, small_index2size(i)); else { + set_size_and_pinuse_of_inuse_chunk(gm, p, nb); r = chunk_plus_offset(p, nb); set_size_and_pinuse_of_free_chunk(r, rsize); replace_dv(gm, r, rsize); + } + mem = chunk2mem(p); check_malloced_chunk(gm, mem, nb); goto postaction; + } else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) { + check_malloced_chunk(gm, mem, nb); goto postaction; + } + } - } - else if (bytes >= MAX_REQUEST) + + } else if (bytes >= MAX_REQUEST) + nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */ else { + nb = pad_request(bytes); if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) { + check_malloced_chunk(gm, mem, nb); goto postaction; + } + } if (nb <= gm->dvsize) { - size_t rsize = gm->dvsize - nb; + + size_t rsize = gm->dvsize - nb; mchunkptr p = gm->dv; - if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ + if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ mchunkptr r = gm->dv = chunk_plus_offset(p, nb); gm->dvsize = rsize; set_size_and_pinuse_of_free_chunk(r, rsize); set_size_and_pinuse_of_inuse_chunk(gm, p, nb); - } - else { /* exhaust dv */ + + } else { /* exhaust dv */ + size_t dvs = gm->dvsize; gm->dvsize = 0; gm->dv = 0; set_inuse_and_pinuse(gm, p, dvs); + } + mem = chunk2mem(p); check_malloced_chunk(gm, mem, nb); goto postaction; + } - else if (nb < gm->topsize) { /* Split top */ - size_t rsize = gm->topsize -= nb; + else if (nb < gm->topsize) { /* Split top */ + size_t rsize = gm->topsize -= nb; mchunkptr p = gm->top; mchunkptr r = gm->top = chunk_plus_offset(p, nb); r->head = rsize | PINUSE_BIT; @@ -4712,6 +5247,7 @@ void* dlmalloc(size_t bytes) { check_top_chunk(gm, gm->top); check_malloced_chunk(gm, mem, nb); goto postaction; + } mem = sys_alloc(gm, nb); @@ -4719,14 +5255,17 @@ void* dlmalloc(size_t bytes) { postaction: POSTACTION(gm); return mem; + } return 0; + } /* ---------------------------- free --------------------------- */ -void dlfree(void* mem) { +void dlfree(void *mem) { + /* Consolidate freed chunks with preceeding or succeeding bordering free chunks, if they exist, and then place in a bin. Intermixed @@ -4734,164 +5273,216 @@ void dlfree(void* mem) { */ if (mem != 0) { - mchunkptr p = mem2chunk(mem); -#if FOOTERS + + mchunkptr p = mem2chunk(mem); + #if FOOTERS mstate fm = get_mstate_for(p); if (!ok_magic(fm)) { + USAGE_ERROR_ACTION(fm, p); return; + } -#else /* FOOTERS */ -#define fm gm -#endif /* FOOTERS */ + + #else /* FOOTERS */ + #define fm gm + #endif /* FOOTERS */ if (!PREACTION(fm)) { + check_inuse_chunk(fm, p); if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) { - size_t psize = chunksize(p); + + size_t psize = chunksize(p); mchunkptr next = chunk_plus_offset(p, psize); if (!pinuse(p)) { + size_t prevsize = p->prev_foot; if (is_mmapped(p)) { + psize += prevsize + MMAP_FOOT_PAD; - if (CALL_MUNMAP((char*)p - prevsize, psize) == 0) + if (CALL_MUNMAP((char *)p - prevsize, psize) == 0) fm->footprint -= psize; goto postaction; - } - else { + + } else { + mchunkptr prev = chunk_minus_offset(p, prevsize); psize += prevsize; p = prev; - if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */ + if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */ if (p != fm->dv) { + unlink_chunk(fm, p, prevsize); - } - else if ((next->head & INUSE_BITS) == INUSE_BITS) { + + } else if ((next->head & INUSE_BITS) == INUSE_BITS) { + fm->dvsize = psize; set_free_with_pinuse(p, psize, next); goto postaction; + } - } - else + + } else + goto erroraction; + } + } if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) { - if (!cinuse(next)) { /* consolidate forward */ + + if (!cinuse(next)) { /* consolidate forward */ if (next == fm->top) { + size_t tsize = fm->topsize += psize; fm->top = p; p->head = tsize | PINUSE_BIT; if (p == fm->dv) { + fm->dv = 0; fm->dvsize = 0; + } - if (should_trim(fm, tsize)) - sys_trim(fm, 0); + + if (should_trim(fm, tsize)) sys_trim(fm, 0); goto postaction; - } - else if (next == fm->dv) { + + } else if (next == fm->dv) { + size_t dsize = fm->dvsize += psize; fm->dv = p; set_size_and_pinuse_of_free_chunk(p, dsize); goto postaction; - } - else { + + } else { + size_t nsize = chunksize(next); psize += nsize; unlink_chunk(fm, next, nsize); set_size_and_pinuse_of_free_chunk(p, psize); if (p == fm->dv) { + fm->dvsize = psize; goto postaction; + } + } - } - else + + } else + set_free_with_pinuse(p, psize, next); if (is_small(psize)) { + insert_small_chunk(fm, p, psize); check_free_chunk(fm, p); - } - else { + + } else { + tchunkptr tp = (tchunkptr)p; insert_large_chunk(fm, tp, psize); check_free_chunk(fm, p); - if (--fm->release_checks == 0) - release_unused_segments(fm); + if (--fm->release_checks == 0) release_unused_segments(fm); + } + goto postaction; + } + } + erroraction: USAGE_ERROR_ACTION(fm, p); postaction: POSTACTION(fm); + } + } -#if !FOOTERS -#undef fm -#endif /* FOOTERS */ + + #if !FOOTERS + #undef fm + #endif /* FOOTERS */ + } -void* dlcalloc(size_t n_elements, size_t elem_size) { - void* mem; +void *dlcalloc(size_t n_elements, size_t elem_size) { + + void * mem; size_t req = 0; if (n_elements != 0) { + req = n_elements * elem_size; if (((n_elements | elem_size) & ~(size_t)0xffff) && (req / n_elements != elem_size)) - req = MAX_SIZE_T; /* force downstream failure on overflow */ + req = MAX_SIZE_T; /* force downstream failure on overflow */ + } + mem = dlmalloc(req); if (mem != 0 && calloc_must_clear(mem2chunk(mem))) __builtin_memset(mem, 0, req); return mem; + } -#endif /* !ONLY_MSPACES */ +#endif /* !ONLY_MSPACES */ /* ------------ Internal support for realloc, memalign, etc -------------- */ /* Try to realloc; only in-place unless can_move true */ static mchunkptr try_realloc_chunk(mstate m, mchunkptr p, size_t nb, int can_move) { + mchunkptr newp = 0; - size_t oldsize = chunksize(p); + size_t oldsize = chunksize(p); mchunkptr next = chunk_plus_offset(p, oldsize); - if (RTCHECK(ok_address(m, p) && ok_inuse(p) && - ok_next(p, next) && ok_pinuse(next))) { + if (RTCHECK(ok_address(m, p) && ok_inuse(p) && ok_next(p, next) && + ok_pinuse(next))) { + if (is_mmapped(p)) { + newp = mmap_resize(m, p, nb, can_move); - } - else if (oldsize >= nb) { /* already big enough */ + + } else if (oldsize >= nb) { /* already big enough */ + size_t rsize = oldsize - nb; - if (rsize >= MIN_CHUNK_SIZE) { /* split off remainder */ + if (rsize >= MIN_CHUNK_SIZE) { /* split off remainder */ mchunkptr r = chunk_plus_offset(p, nb); set_inuse(m, p, nb); set_inuse(m, r, rsize); dispose_chunk(m, r, rsize); + } + newp = p; - } - else if (next == m->top) { /* extend into top */ + + } else if (next == m->top) { /* extend into top */ + if (oldsize + m->topsize > nb) { - size_t newsize = oldsize + m->topsize; - size_t newtopsize = newsize - nb; + + size_t newsize = oldsize + m->topsize; + size_t newtopsize = newsize - nb; mchunkptr newtop = chunk_plus_offset(p, nb); set_inuse(m, p, nb); - newtop->head = newtopsize |PINUSE_BIT; + newtop->head = newtopsize | PINUSE_BIT; m->top = newtop; m->topsize = newtopsize; newp = p; + } - } - else if (next == m->dv) { /* extend into dv */ + + } else if (next == m->dv) { /* extend into dv */ + size_t dvs = m->dvsize; if (oldsize + dvs >= nb) { + size_t dsize = oldsize + dvs - nb; if (dsize >= MIN_CHUNK_SIZE) { + mchunkptr r = chunk_plus_offset(p, nb); mchunkptr n = chunk_plus_offset(r, dsize); set_inuse(m, p, nb); @@ -4899,64 +5490,87 @@ static mchunkptr try_realloc_chunk(mstate m, mchunkptr p, size_t nb, clear_pinuse(n); m->dvsize = dsize; m->dv = r; - } - else { /* exhaust dv */ + + } else { /* exhaust dv */ + size_t newsize = oldsize + dvs; set_inuse(m, p, newsize); m->dvsize = 0; m->dv = 0; + } + newp = p; + } - } - else if (!cinuse(next)) { /* extend into next free chunk */ + + } else if (!cinuse(next)) { /* extend into next free chunk */ + size_t nextsize = chunksize(next); if (oldsize + nextsize >= nb) { + size_t rsize = oldsize + nextsize - nb; unlink_chunk(m, next, nextsize); if (rsize < MIN_CHUNK_SIZE) { + size_t newsize = oldsize + nextsize; set_inuse(m, p, newsize); - } - else { + + } else { + mchunkptr r = chunk_plus_offset(p, nb); set_inuse(m, p, nb); set_inuse(m, r, rsize); dispose_chunk(m, r, rsize); + } + newp = p; + } + } - } - else { + + } else { + USAGE_ERROR_ACTION(m, chunk2mem(p)); + } + return newp; + } -static void* internal_memalign(mstate m, size_t alignment, size_t bytes) { - void* mem = 0; - if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */ +static void *internal_memalign(mstate m, size_t alignment, size_t bytes) { + + void *mem = 0; + if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */ alignment = MIN_CHUNK_SIZE; - if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of 2 */ + if ((alignment & (alignment - SIZE_T_ONE)) != 0) { /* Ensure a power of 2 */ size_t a = MALLOC_ALIGNMENT << 1; - while (a < alignment) a <<= 1; + while (a < alignment) + a <<= 1; alignment = a; + } + if (bytes >= MAX_REQUEST - alignment) { - if (m != 0) { /* Test isn't needed but avoids compiler warning */ + + if (m != 0) { /* Test isn't needed but avoids compiler warning */ MALLOC_FAILURE_ACTION; + } - } - else { + + } else { + size_t nb = request2size(bytes); size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD; mem = internal_malloc(m, req); if (mem != 0) { + mchunkptr p = mem2chunk(mem); - if (PREACTION(m)) - return 0; - if ((((size_t)(mem)) & (alignment - 1)) != 0) { /* misaligned */ + if (PREACTION(m)) return 0; + if ((((size_t)(mem)) & (alignment - 1)) != 0) { /* misaligned */ /* Find an aligned spot inside chunk. Since we need to give back leading space in a chunk of at least MIN_CHUNK_SIZE, if @@ -4965,47 +5579,59 @@ static void* internal_memalign(mstate m, size_t alignment, size_t bytes) { We've allocated enough total room so that this is always possible. */ - char* br = (char*)mem2chunk((size_t)(((size_t)((char*)mem + alignment - - SIZE_T_ONE)) & - -alignment)); - char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)? - br : br+alignment; + char * br = (char *)mem2chunk((size_t)( + ((size_t)((char *)mem + alignment - SIZE_T_ONE)) & -alignment)); + char * pos = ((size_t)(br - (char *)(p)) >= MIN_CHUNK_SIZE) + ? br + : br + alignment; mchunkptr newp = (mchunkptr)pos; - size_t leadsize = pos - (char*)(p); - size_t newsize = chunksize(p) - leadsize; + size_t leadsize = pos - (char *)(p); + size_t newsize = chunksize(p) - leadsize; - if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */ + if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */ newp->prev_foot = p->prev_foot + leadsize; newp->head = newsize; - } - else { /* Otherwise, give back leader, use the rest */ + + } else { /* Otherwise, give back leader, use the rest */ + set_inuse(m, newp, newsize); set_inuse(m, p, leadsize); dispose_chunk(m, p, leadsize); + } + p = newp; + } /* Give back spare room at the end */ if (!is_mmapped(p)) { + size_t size = chunksize(p); if (size > nb + MIN_CHUNK_SIZE) { - size_t remainder_size = size - nb; + + size_t remainder_size = size - nb; mchunkptr remainder = chunk_plus_offset(p, nb); set_inuse(m, p, nb); set_inuse(m, remainder, remainder_size); dispose_chunk(m, remainder, remainder_size); + } + } mem = chunk2mem(p); - assert (chunksize(p) >= nb); + assert(chunksize(p) >= nb); assert(((size_t)mem & (alignment - 1)) == 0); check_inuse_chunk(m, p); POSTACTION(m); + } + } + return mem; + } /* @@ -5015,50 +5641,50 @@ static void* internal_memalign(mstate m, size_t alignment, size_t bytes) { bit 0 set if all elements are same size (using sizes[0]) bit 1 set if elements should be zeroed */ -static void** ialloc(mstate m, - size_t n_elements, - size_t* sizes, - int opts, - void* chunks[]) { - - size_t element_size; /* chunksize of each element, if all same */ - size_t contents_size; /* total size of elements */ - size_t array_size; /* request size of pointer array */ - void* mem; /* malloced aggregate space */ - mchunkptr p; /* corresponding chunk */ - size_t remainder_size; /* remaining bytes while splitting */ - void** marray; /* either "chunks" or malloced ptr array */ - mchunkptr array_chunk; /* chunk for malloced ptr array */ - flag_t was_enabled; /* to disable mmap */ +static void **ialloc(mstate m, size_t n_elements, size_t *sizes, int opts, + void *chunks[]) { + + size_t element_size; /* chunksize of each element, if all same */ + size_t contents_size; /* total size of elements */ + size_t array_size; /* request size of pointer array */ + void * mem; /* malloced aggregate space */ + mchunkptr p; /* corresponding chunk */ + size_t remainder_size; /* remaining bytes while splitting */ + void ** marray; /* either "chunks" or malloced ptr array */ + mchunkptr array_chunk; /* chunk for malloced ptr array */ + flag_t was_enabled; /* to disable mmap */ size_t size; size_t i; ensure_initialization(); /* compute array length, if needed */ if (chunks != 0) { - if (n_elements == 0) - return chunks; /* nothing to do */ + + if (n_elements == 0) return chunks; /* nothing to do */ marray = chunks; array_size = 0; - } - else { + + } else { + /* if empty req, must still return chunk representing empty array */ - if (n_elements == 0) - return (void**)internal_malloc(m, 0); + if (n_elements == 0) return (void **)internal_malloc(m, 0); marray = 0; - array_size = request2size(n_elements * (sizeof(void*))); + array_size = request2size(n_elements * (sizeof(void *))); + } /* compute total element size */ - if (opts & 0x1) { /* all-same-size */ + if (opts & 0x1) { /* all-same-size */ element_size = request2size(*sizes); contents_size = n_elements * element_size; - } - else { /* add up all the sizes */ + + } else { /* add up all the sizes */ + element_size = 0; contents_size = 0; for (i = 0; i != n_elements; ++i) contents_size += request2size(sizes[i]); + } size = contents_size + array_size; @@ -5071,10 +5697,8 @@ static void** ialloc(mstate m, was_enabled = use_mmap(m); disable_mmap(m); mem = internal_malloc(m, size - CHUNK_OVERHEAD); - if (was_enabled) - enable_mmap(m); - if (mem == 0) - return 0; + if (was_enabled) enable_mmap(m); + if (mem == 0) return 0; if (PREACTION(m)) return 0; p = mem2chunk(mem); @@ -5082,24 +5706,30 @@ static void** ialloc(mstate m, assert(!is_mmapped(p)); - if (opts & 0x2) { /* optionally clear the elements */ - __builtin_memset((size_t*)mem, 0, remainder_size - SIZE_T_SIZE - array_size); + if (opts & 0x2) { /* optionally clear the elements */ + __builtin_memset((size_t *)mem, 0, + remainder_size - SIZE_T_SIZE - array_size); + } /* If not provided, allocate the pointer array as final part of chunk */ if (marray == 0) { - size_t array_chunk_size; + + size_t array_chunk_size; array_chunk = chunk_plus_offset(p, contents_size); array_chunk_size = remainder_size - contents_size; - marray = (void**) (chunk2mem(array_chunk)); + marray = (void **)(chunk2mem(array_chunk)); set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size); remainder_size = contents_size; + } /* split out elements */ - for (i = 0; ; ++i) { + for (i = 0;; ++i) { + marray[i] = chunk2mem(p); - if (i != n_elements-1) { + if (i != n_elements - 1) { + if (element_size != 0) size = element_size; else @@ -5107,31 +5737,42 @@ static void** ialloc(mstate m, remainder_size -= size; set_size_and_pinuse_of_inuse_chunk(m, p, size); p = chunk_plus_offset(p, size); - } - else { /* the final element absorbs any overallocation slop */ + + } else { /* the final element absorbs any overallocation slop */ + set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size); break; + } + } #if DEBUG if (marray != chunks) { + /* final element must have exactly exhausted chunk */ if (element_size != 0) { + assert(remainder_size == element_size); - } - else { + + } else { + assert(remainder_size == request2size(sizes[i])); + } + check_inuse_chunk(m, mem2chunk(marray)); + } + for (i = 0; i != n_elements; ++i) check_inuse_chunk(m, mem2chunk(marray[i])); -#endif /* DEBUG */ +#endif /* DEBUG */ POSTACTION(m); return marray; + } /* Try to free all pointers in the given array. @@ -5141,316 +5782,431 @@ static void** ialloc(mstate m, chunks before freeing, which will occur often if allocated with ialloc or the array is sorted. */ -static size_t internal_bulk_free(mstate m, void* array[], size_t nelem) { +static size_t internal_bulk_free(mstate m, void *array[], size_t nelem) { + size_t unfreed = 0; if (!PREACTION(m)) { - void** a; - void** fence = &(array[nelem]); + + void **a; + void **fence = &(array[nelem]); for (a = array; a != fence; ++a) { - void* mem = *a; + + void *mem = *a; if (mem != 0) { + mchunkptr p = mem2chunk(mem); - size_t psize = chunksize(p); + size_t psize = chunksize(p); #if FOOTERS if (get_mstate_for(p) != m) { + ++unfreed; continue; + } + #endif check_inuse_chunk(m, p); *a = 0; if (RTCHECK(ok_address(m, p) && ok_inuse(p))) { - void ** b = a + 1; /* try to merge with next chunk */ + + void ** b = a + 1; /* try to merge with next chunk */ mchunkptr next = next_chunk(p); if (b != fence && *b == chunk2mem(next)) { + size_t newsize = chunksize(next) + psize; set_inuse(m, p, newsize); *b = chunk2mem(p); - } - else + + } else + dispose_chunk(m, p, psize); - } - else { + + } else { + CORRUPTION_ERROR_ACTION(m); break; + } + } + } - if (should_trim(m, m->topsize)) - sys_trim(m, 0); + + if (should_trim(m, m->topsize)) sys_trim(m, 0); POSTACTION(m); + } + return unfreed; + } /* Traversal */ #if MALLOC_INSPECT_ALL static void internal_inspect_all(mstate m, - void(*handler)(void *start, - void *end, - size_t used_bytes, - void* callback_arg), - void* arg) { + void (*handler)(void *start, void *end, + size_t used_bytes, + void * callback_arg), + void *arg) { + if (is_initialized(m)) { - mchunkptr top = m->top; + + mchunkptr top = m->top; msegmentptr s; for (s = &m->seg; s != 0; s = s->next) { + mchunkptr q = align_as_chunk(s->base); while (segment_holds(s, q) && q->head != FENCEPOST_HEAD) { + mchunkptr next = next_chunk(q); - size_t sz = chunksize(q); - size_t used; - void* start; + size_t sz = chunksize(q); + size_t used; + void * start; if (is_inuse(q)) { - used = sz - CHUNK_OVERHEAD; /* must not be mmapped */ + + used = sz - CHUNK_OVERHEAD; /* must not be mmapped */ start = chunk2mem(q); - } - else { + + } else { + used = 0; - if (is_small(sz)) { /* offset by possible bookkeeping */ - start = (void*)((char*)q + sizeof(struct malloc_chunk)); - } - else { - start = (void*)((char*)q + sizeof(struct malloc_tree_chunk)); + if (is_small(sz)) { /* offset by possible bookkeeping */ + start = (void *)((char *)q + sizeof(struct malloc_chunk)); + + } else { + + start = (void *)((char *)q + sizeof(struct malloc_tree_chunk)); + } + } - if (start < (void*)next) /* skip if all space is bookkeeping */ + + if (start < (void *)next) /* skip if all space is bookkeeping */ handler(start, next, used, arg); - if (q == top) - break; + if (q == top) break; q = next; + } + } + } + } -#endif /* MALLOC_INSPECT_ALL */ + +#endif /* MALLOC_INSPECT_ALL */ /* ------------------ Exported realloc, memalign, etc -------------------- */ #if !ONLY_MSPACES -void* dlrealloc(void* oldmem, size_t bytes) { - void* mem = 0; +void *dlrealloc(void *oldmem, size_t bytes) { + + void *mem = 0; if (oldmem == 0) { + mem = dlmalloc(bytes); - } - else if (bytes >= MAX_REQUEST) { + + } else if (bytes >= MAX_REQUEST) { + MALLOC_FAILURE_ACTION; + } -#ifdef REALLOC_ZERO_BYTES_FREES + + #ifdef REALLOC_ZERO_BYTES_FREES else if (bytes == 0) { + dlfree(oldmem); + } -#endif /* REALLOC_ZERO_BYTES_FREES */ + + #endif /* REALLOC_ZERO_BYTES_FREES */ else { - size_t nb = request2size(bytes); + + size_t nb = request2size(bytes); mchunkptr oldp = mem2chunk(oldmem); -#if ! FOOTERS + #if !FOOTERS mstate m = gm; -#else /* FOOTERS */ + #else /* FOOTERS */ mstate m = get_mstate_for(oldp); if (!ok_magic(m)) { + USAGE_ERROR_ACTION(m, oldmem); return 0; + } -#endif /* FOOTERS */ + + #endif /* FOOTERS */ if (!PREACTION(m)) { + mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1); POSTACTION(m); if (newp != 0) { + check_inuse_chunk(m, newp); mem = chunk2mem(newp); - } - else { + + } else { + mem = internal_malloc(m, bytes); if (mem != 0) { + size_t oc = chunksize(oldp) - overhead_for(oldp); - __builtin_memcpy(mem, oldmem, (oc < bytes)? oc : bytes); + __builtin_memcpy(mem, oldmem, (oc < bytes) ? oc : bytes); internal_free(m, oldmem); + } + } + } + } + return mem; + } -void* dlrealloc_in_place(void* oldmem, size_t bytes) { - void* mem = 0; +void *dlrealloc_in_place(void *oldmem, size_t bytes) { + + void *mem = 0; if (oldmem != 0) { + if (bytes >= MAX_REQUEST) { + MALLOC_FAILURE_ACTION; - } - else { - size_t nb = request2size(bytes); + + } else { + + size_t nb = request2size(bytes); mchunkptr oldp = mem2chunk(oldmem); -#if ! FOOTERS + #if !FOOTERS mstate m = gm; -#else /* FOOTERS */ + #else /* FOOTERS */ mstate m = get_mstate_for(oldp); if (!ok_magic(m)) { + USAGE_ERROR_ACTION(m, oldmem); return 0; + } -#endif /* FOOTERS */ + + #endif /* FOOTERS */ if (!PREACTION(m)) { + mchunkptr newp = try_realloc_chunk(m, oldp, nb, 0); POSTACTION(m); if (newp == oldp) { + check_inuse_chunk(m, newp); mem = oldmem; + } + } + } + } + return mem; + } -void* dlmemalign(size_t alignment, size_t bytes) { - if (alignment <= MALLOC_ALIGNMENT) { - return dlmalloc(bytes); - } +void *dlmemalign(size_t alignment, size_t bytes) { + + if (alignment <= MALLOC_ALIGNMENT) { return dlmalloc(bytes); } return internal_memalign(gm, alignment, bytes); + } -int dlposix_memalign(void** pp, size_t alignment, size_t bytes) { - void* mem = 0; +int dlposix_memalign(void **pp, size_t alignment, size_t bytes) { + + void *mem = 0; if (alignment == MALLOC_ALIGNMENT) mem = dlmalloc(bytes); else { - size_t d = alignment / sizeof(void*); - size_t r = alignment % sizeof(void*); - if (r != 0 || d == 0 || (d & (d-SIZE_T_ONE)) != 0) + + size_t d = alignment / sizeof(void *); + size_t r = alignment % sizeof(void *); + if (r != 0 || d == 0 || (d & (d - SIZE_T_ONE)) != 0) return EINVAL; else if (bytes <= MAX_REQUEST - alignment) { - if (alignment < MIN_CHUNK_SIZE) - alignment = MIN_CHUNK_SIZE; + + if (alignment < MIN_CHUNK_SIZE) alignment = MIN_CHUNK_SIZE; mem = internal_memalign(gm, alignment, bytes); + } + } + if (mem == 0) return ENOMEM; else { + *pp = mem; return 0; + } + } -void* dlvalloc(size_t bytes) { +void *dlvalloc(size_t bytes) { + size_t pagesz; ensure_initialization(); pagesz = mparams.page_size; return dlmemalign(pagesz, bytes); + } -void* dlpvalloc(size_t bytes) { +void *dlpvalloc(size_t bytes) { + size_t pagesz; ensure_initialization(); pagesz = mparams.page_size; - return dlmemalign(pagesz, (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE)); + return dlmemalign(pagesz, + (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE)); + } -void** dlindependent_calloc(size_t n_elements, size_t elem_size, - void* chunks[]) { - size_t sz = elem_size; /* serves as 1-element array */ +void **dlindependent_calloc(size_t n_elements, size_t elem_size, + void *chunks[]) { + + size_t sz = elem_size; /* serves as 1-element array */ return ialloc(gm, n_elements, &sz, 3, chunks); + } -void** dlindependent_comalloc(size_t n_elements, size_t sizes[], - void* chunks[]) { +void **dlindependent_comalloc(size_t n_elements, size_t sizes[], + void *chunks[]) { + return ialloc(gm, n_elements, sizes, 0, chunks); + } -size_t dlbulk_free(void* array[], size_t nelem) { +size_t dlbulk_free(void *array[], size_t nelem) { + return internal_bulk_free(gm, array, nelem); + } -#if MALLOC_INSPECT_ALL -void dlmalloc_inspect_all(void(*handler)(void *start, - void *end, - size_t used_bytes, - void* callback_arg), - void* arg) { + #if MALLOC_INSPECT_ALL +void dlmalloc_inspect_all(void (*handler)(void *start, void *end, + size_t used_bytes, + void * callback_arg), + void *arg) { + ensure_initialization(); if (!PREACTION(gm)) { + internal_inspect_all(gm, handler, arg); POSTACTION(gm); + } + } -#endif /* MALLOC_INSPECT_ALL */ + + #endif /* MALLOC_INSPECT_ALL */ int dlmalloc_trim(size_t pad) { + int result = 0; ensure_initialization(); if (!PREACTION(gm)) { + result = sys_trim(gm, pad); POSTACTION(gm); + } + return result; + } size_t dlmalloc_footprint(void) { + return gm->footprint; + } size_t dlmalloc_max_footprint(void) { + return gm->max_footprint; + } size_t dlmalloc_footprint_limit(void) { + size_t maf = gm->footprint_limit; return maf == 0 ? MAX_SIZE_T : maf; + } size_t dlmalloc_set_footprint_limit(size_t bytes) { - size_t result; /* invert sense of 0 */ - if (bytes == 0) - result = granularity_align(1); /* Use minimal size */ + + size_t result; /* invert sense of 0 */ + if (bytes == 0) result = granularity_align(1); /* Use minimal size */ if (bytes == MAX_SIZE_T) - result = 0; /* disable */ + result = 0; /* disable */ else result = granularity_align(bytes); return gm->footprint_limit = result; + } -#if !NO_MALLINFO + #if !NO_MALLINFO struct mallinfo dlmallinfo(void) { + return internal_mallinfo(gm); + } -#endif /* NO_MALLINFO */ -#if !NO_MALLOC_STATS + #endif /* NO_MALLINFO */ + + #if !NO_MALLOC_STATS void dlmalloc_stats() { + internal_malloc_stats(gm); + } -#endif /* NO_MALLOC_STATS */ + + #endif /* NO_MALLOC_STATS */ int dlmallopt(int param_number, int value) { + return change_mparam(param_number, value); + } -size_t dlmalloc_usable_size(void* mem) { +size_t dlmalloc_usable_size(void *mem) { + if (mem != 0) { + mchunkptr p = mem2chunk(mem); - if (is_inuse(p)) - return chunksize(p) - overhead_for(p); + if (is_inuse(p)) return chunksize(p) - overhead_for(p); + } + return 0; + } -#endif /* !ONLY_MSPACES */ +#endif /* !ONLY_MSPACES */ /* ----------------------------- user mspaces ---------------------------- */ #if MSPACES -static mstate init_user_mstate(char* tbase, size_t tsize) { - size_t msize = pad_request(sizeof(struct malloc_state)); +static mstate init_user_mstate(char *tbase, size_t tsize) { + + size_t msize = pad_request(sizeof(struct malloc_state)); mchunkptr mn; mchunkptr msp = align_as_chunk(tbase); - mstate m = (mstate)(chunk2mem(msp)); + mstate m = (mstate)(chunk2mem(msp)); __builtin_memset(m, 0, msize); (void)INITIAL_LOCK(&m->mutex); - msp->head = (msize|INUSE_BITS); + msp->head = (msize | INUSE_BITS); m->seg.base = m->least_addr = tbase; m->seg.size = m->footprint = m->max_footprint = tsize; m->magic = mparams.magic; @@ -5461,82 +6217,111 @@ static mstate init_user_mstate(char* tbase, size_t tsize) { disable_contiguous(m); init_bins(m); mn = next_chunk(mem2chunk(m)); - init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) - TOP_FOOT_SIZE); + init_top(m, mn, (size_t)((tbase + tsize) - (char *)mn) - TOP_FOOT_SIZE); check_top_chunk(m, m->top); return m; + } mspace create_mspace(size_t capacity, int locked) { + mstate m = 0; size_t msize; ensure_initialization(); msize = pad_request(sizeof(struct malloc_state)); - if (capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) { - size_t rs = ((capacity == 0)? mparams.granularity : - (capacity + TOP_FOOT_SIZE + msize)); + if (capacity < (size_t) - (msize + TOP_FOOT_SIZE + mparams.page_size)) { + + size_t rs = ((capacity == 0) ? mparams.granularity + : (capacity + TOP_FOOT_SIZE + msize)); size_t tsize = granularity_align(rs); - char* tbase = (char*)(CALL_MMAP(tsize)); + char * tbase = (char *)(CALL_MMAP(tsize)); if (tbase != CMFAIL) { + m = init_user_mstate(tbase, tsize); m->seg.sflags = USE_MMAP_BIT; set_lock(m, locked); + } + } + return (mspace)m; + } -mspace create_mspace_with_base(void* base, size_t capacity, int locked) { +mspace create_mspace_with_base(void *base, size_t capacity, int locked) { + mstate m = 0; size_t msize; ensure_initialization(); msize = pad_request(sizeof(struct malloc_state)); if (capacity > msize + TOP_FOOT_SIZE && - capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) { - m = init_user_mstate((char*)base, capacity); + capacity < (size_t) - (msize + TOP_FOOT_SIZE + mparams.page_size)) { + + m = init_user_mstate((char *)base, capacity); m->seg.sflags = EXTERN_BIT; set_lock(m, locked); + } + return (mspace)m; + } int mspace_track_large_chunks(mspace msp, int enable) { - int ret = 0; + + int ret = 0; mstate ms = (mstate)msp; if (!PREACTION(ms)) { - if (!use_mmap(ms)) { - ret = 1; - } + + if (!use_mmap(ms)) { ret = 1; } if (!enable) { + enable_mmap(ms); + } else { + disable_mmap(ms); + } + POSTACTION(ms); + } + return ret; + } size_t destroy_mspace(mspace msp) { + size_t freed = 0; mstate ms = (mstate)msp; if (ok_magic(ms)) { + msegmentptr sp = &ms->seg; - (void)DESTROY_LOCK(&ms->mutex); /* destroy before unmapped */ + (void)DESTROY_LOCK(&ms->mutex); /* destroy before unmapped */ while (sp != 0) { - char* base = sp->base; + + char * base = sp->base; size_t size = sp->size; flag_t flag = sp->sflags; - (void)base; /* placate people compiling -Wunused-variable */ + (void)base; /* placate people compiling -Wunused-variable */ sp = sp->next; if ((flag & USE_MMAP_BIT) && !(flag & EXTERN_BIT) && CALL_MUNMAP(base, size) == 0) freed += size; + } + + } else { + + USAGE_ERROR_ACTION(ms, ms); + } - else { - USAGE_ERROR_ACTION(ms,ms); - } + return freed; + } /* @@ -5544,25 +6329,31 @@ size_t destroy_mspace(mspace msp) { versions. This is not so nice but better than the alternatives. */ -void* mspace_malloc(mspace msp, size_t bytes) { +void *mspace_malloc(mspace msp, size_t bytes) { + mstate ms = (mstate)msp; if (!ok_magic(ms)) { - USAGE_ERROR_ACTION(ms,ms); + + USAGE_ERROR_ACTION(ms, ms); return 0; + } + if (!PREACTION(ms)) { - void* mem; + + void * mem; size_t nb; if (bytes <= MAX_SMALL_REQUEST) { + bindex_t idx; binmap_t smallbits; - nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes); + nb = (bytes < MIN_REQUEST) ? MIN_CHUNK_SIZE : pad_request(bytes); idx = small_index(nb); smallbits = ms->smallmap >> idx; - if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ + if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ mchunkptr b, p; - idx += ~smallbits & 1; /* Uses next bin if idx empty */ + idx += ~smallbits & 1; /* Uses next bin if idx empty */ b = smallbin_at(ms, idx); p = b->fd; assert(chunksize(p) == small_index2size(idx)); @@ -5571,15 +6362,17 @@ void* mspace_malloc(mspace msp, size_t bytes) { mem = chunk2mem(p); check_malloced_chunk(ms, mem, nb); goto postaction; + } else if (nb > ms->dvsize) { - if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ + + if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ mchunkptr b, p, r; - size_t rsize; - bindex_t i; - binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx)); - binmap_t leastbit = least_bit(leftbits); + size_t rsize; + bindex_t i; + binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx)); + binmap_t leastbit = least_bit(leftbits); compute_bit2idx(leastbit, i); b = smallbin_at(ms, i); p = b->fd; @@ -5590,54 +6383,71 @@ void* mspace_malloc(mspace msp, size_t bytes) { if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) set_inuse_and_pinuse(ms, p, small_index2size(i)); else { + set_size_and_pinuse_of_inuse_chunk(ms, p, nb); r = chunk_plus_offset(p, nb); set_size_and_pinuse_of_free_chunk(r, rsize); replace_dv(ms, r, rsize); + } + mem = chunk2mem(p); check_malloced_chunk(ms, mem, nb); goto postaction; + } else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) { + check_malloced_chunk(ms, mem, nb); goto postaction; + } + } - } - else if (bytes >= MAX_REQUEST) + + } else if (bytes >= MAX_REQUEST) + nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */ else { + nb = pad_request(bytes); if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) { + check_malloced_chunk(ms, mem, nb); goto postaction; + } + } if (nb <= ms->dvsize) { - size_t rsize = ms->dvsize - nb; + + size_t rsize = ms->dvsize - nb; mchunkptr p = ms->dv; - if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ + if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ mchunkptr r = ms->dv = chunk_plus_offset(p, nb); ms->dvsize = rsize; set_size_and_pinuse_of_free_chunk(r, rsize); set_size_and_pinuse_of_inuse_chunk(ms, p, nb); - } - else { /* exhaust dv */ + + } else { /* exhaust dv */ + size_t dvs = ms->dvsize; ms->dvsize = 0; ms->dv = 0; set_inuse_and_pinuse(ms, p, dvs); + } + mem = chunk2mem(p); check_malloced_chunk(ms, mem, nb); goto postaction; + } - else if (nb < ms->topsize) { /* Split top */ - size_t rsize = ms->topsize -= nb; + else if (nb < ms->topsize) { /* Split top */ + size_t rsize = ms->topsize -= nb; mchunkptr p = ms->top; mchunkptr r = ms->top = chunk_plus_offset(p, nb); r->head = rsize | PINUSE_BIT; @@ -5646,6 +6456,7 @@ void* mspace_malloc(mspace msp, size_t bytes) { check_top_chunk(ms, ms->top); check_malloced_chunk(ms, mem, nb); goto postaction; + } mem = sys_alloc(ms, nb); @@ -5653,372 +6464,519 @@ void* mspace_malloc(mspace msp, size_t bytes) { postaction: POSTACTION(ms); return mem; + } return 0; + } -void mspace_free(mspace msp, void* mem) { +void mspace_free(mspace msp, void *mem) { + if (mem != 0) { - mchunkptr p = mem2chunk(mem); -#if FOOTERS + + mchunkptr p = mem2chunk(mem); + #if FOOTERS mstate fm = get_mstate_for(p); - (void)msp; /* placate people compiling -Wunused */ -#else /* FOOTERS */ + (void)msp; /* placate people compiling -Wunused */ + #else /* FOOTERS */ mstate fm = (mstate)msp; -#endif /* FOOTERS */ + #endif /* FOOTERS */ if (!ok_magic(fm)) { + USAGE_ERROR_ACTION(fm, p); return; + } + if (!PREACTION(fm)) { + check_inuse_chunk(fm, p); if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) { - size_t psize = chunksize(p); + + size_t psize = chunksize(p); mchunkptr next = chunk_plus_offset(p, psize); if (!pinuse(p)) { + size_t prevsize = p->prev_foot; if (is_mmapped(p)) { + psize += prevsize + MMAP_FOOT_PAD; - if (CALL_MUNMAP((char*)p - prevsize, psize) == 0) + if (CALL_MUNMAP((char *)p - prevsize, psize) == 0) fm->footprint -= psize; goto postaction; - } - else { + + } else { + mchunkptr prev = chunk_minus_offset(p, prevsize); psize += prevsize; p = prev; - if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */ + if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */ if (p != fm->dv) { + unlink_chunk(fm, p, prevsize); - } - else if ((next->head & INUSE_BITS) == INUSE_BITS) { + + } else if ((next->head & INUSE_BITS) == INUSE_BITS) { + fm->dvsize = psize; set_free_with_pinuse(p, psize, next); goto postaction; + } - } - else + + } else + goto erroraction; + } + } if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) { - if (!cinuse(next)) { /* consolidate forward */ + + if (!cinuse(next)) { /* consolidate forward */ if (next == fm->top) { + size_t tsize = fm->topsize += psize; fm->top = p; p->head = tsize | PINUSE_BIT; if (p == fm->dv) { + fm->dv = 0; fm->dvsize = 0; + } - if (should_trim(fm, tsize)) - sys_trim(fm, 0); + + if (should_trim(fm, tsize)) sys_trim(fm, 0); goto postaction; - } - else if (next == fm->dv) { + + } else if (next == fm->dv) { + size_t dsize = fm->dvsize += psize; fm->dv = p; set_size_and_pinuse_of_free_chunk(p, dsize); goto postaction; - } - else { + + } else { + size_t nsize = chunksize(next); psize += nsize; unlink_chunk(fm, next, nsize); set_size_and_pinuse_of_free_chunk(p, psize); if (p == fm->dv) { + fm->dvsize = psize; goto postaction; + } + } - } - else + + } else + set_free_with_pinuse(p, psize, next); if (is_small(psize)) { + insert_small_chunk(fm, p, psize); check_free_chunk(fm, p); - } - else { + + } else { + tchunkptr tp = (tchunkptr)p; insert_large_chunk(fm, tp, psize); check_free_chunk(fm, p); - if (--fm->release_checks == 0) - release_unused_segments(fm); + if (--fm->release_checks == 0) release_unused_segments(fm); + } + goto postaction; + } + } + erroraction: USAGE_ERROR_ACTION(fm, p); postaction: POSTACTION(fm); + } + } + } -void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) { - void* mem; +void *mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) { + + void * mem; size_t req = 0; mstate ms = (mstate)msp; if (!ok_magic(ms)) { - USAGE_ERROR_ACTION(ms,ms); + + USAGE_ERROR_ACTION(ms, ms); return 0; + } + if (n_elements != 0) { + req = n_elements * elem_size; if (((n_elements | elem_size) & ~(size_t)0xffff) && (req / n_elements != elem_size)) - req = MAX_SIZE_T; /* force downstream failure on overflow */ + req = MAX_SIZE_T; /* force downstream failure on overflow */ + } + mem = internal_malloc(ms, req); if (mem != 0 && calloc_must_clear(mem2chunk(mem))) __builtin_memset(mem, 0, req); return mem; + } -void* mspace_realloc(mspace msp, void* oldmem, size_t bytes) { - void* mem = 0; +void *mspace_realloc(mspace msp, void *oldmem, size_t bytes) { + + void *mem = 0; if (oldmem == 0) { + mem = mspace_malloc(msp, bytes); - } - else if (bytes >= MAX_REQUEST) { + + } else if (bytes >= MAX_REQUEST) { + MALLOC_FAILURE_ACTION; + } -#ifdef REALLOC_ZERO_BYTES_FREES + + #ifdef REALLOC_ZERO_BYTES_FREES else if (bytes == 0) { + mspace_free(msp, oldmem); + } -#endif /* REALLOC_ZERO_BYTES_FREES */ + + #endif /* REALLOC_ZERO_BYTES_FREES */ else { - size_t nb = request2size(bytes); + + size_t nb = request2size(bytes); mchunkptr oldp = mem2chunk(oldmem); -#if ! FOOTERS + #if !FOOTERS mstate m = (mstate)msp; -#else /* FOOTERS */ + #else /* FOOTERS */ mstate m = get_mstate_for(oldp); if (!ok_magic(m)) { + USAGE_ERROR_ACTION(m, oldmem); return 0; + } -#endif /* FOOTERS */ + + #endif /* FOOTERS */ if (!PREACTION(m)) { + mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1); POSTACTION(m); if (newp != 0) { + check_inuse_chunk(m, newp); mem = chunk2mem(newp); - } - else { + + } else { + mem = mspace_malloc(m, bytes); if (mem != 0) { + size_t oc = chunksize(oldp) - overhead_for(oldp); - __builtin_memcpy(mem, oldmem, (oc < bytes)? oc : bytes); + __builtin_memcpy(mem, oldmem, (oc < bytes) ? oc : bytes); mspace_free(m, oldmem); + } + } + } + } + return mem; + } -void* mspace_realloc_in_place(mspace msp, void* oldmem, size_t bytes) { - void* mem = 0; +void *mspace_realloc_in_place(mspace msp, void *oldmem, size_t bytes) { + + void *mem = 0; if (oldmem != 0) { + if (bytes >= MAX_REQUEST) { + MALLOC_FAILURE_ACTION; - } - else { - size_t nb = request2size(bytes); + + } else { + + size_t nb = request2size(bytes); mchunkptr oldp = mem2chunk(oldmem); -#if ! FOOTERS + #if !FOOTERS mstate m = (mstate)msp; -#else /* FOOTERS */ + #else /* FOOTERS */ mstate m = get_mstate_for(oldp); - (void)msp; /* placate people compiling -Wunused */ + (void)msp; /* placate people compiling -Wunused */ if (!ok_magic(m)) { + USAGE_ERROR_ACTION(m, oldmem); return 0; + } -#endif /* FOOTERS */ + + #endif /* FOOTERS */ if (!PREACTION(m)) { + mchunkptr newp = try_realloc_chunk(m, oldp, nb, 0); POSTACTION(m); if (newp == oldp) { + check_inuse_chunk(m, newp); mem = oldmem; + } + } + } + } + return mem; + } -void* mspace_memalign(mspace msp, size_t alignment, size_t bytes) { +void *mspace_memalign(mspace msp, size_t alignment, size_t bytes) { + mstate ms = (mstate)msp; if (!ok_magic(ms)) { - USAGE_ERROR_ACTION(ms,ms); + + USAGE_ERROR_ACTION(ms, ms); return 0; + } - if (alignment <= MALLOC_ALIGNMENT) - return mspace_malloc(msp, bytes); + + if (alignment <= MALLOC_ALIGNMENT) return mspace_malloc(msp, bytes); return internal_memalign(ms, alignment, bytes); + } -void** mspace_independent_calloc(mspace msp, size_t n_elements, - size_t elem_size, void* chunks[]) { - size_t sz = elem_size; /* serves as 1-element array */ +void **mspace_independent_calloc(mspace msp, size_t n_elements, + size_t elem_size, void *chunks[]) { + + size_t sz = elem_size; /* serves as 1-element array */ mstate ms = (mstate)msp; if (!ok_magic(ms)) { - USAGE_ERROR_ACTION(ms,ms); + + USAGE_ERROR_ACTION(ms, ms); return 0; + } + return ialloc(ms, n_elements, &sz, 3, chunks); + } -void** mspace_independent_comalloc(mspace msp, size_t n_elements, - size_t sizes[], void* chunks[]) { +void **mspace_independent_comalloc(mspace msp, size_t n_elements, + size_t sizes[], void *chunks[]) { + mstate ms = (mstate)msp; if (!ok_magic(ms)) { - USAGE_ERROR_ACTION(ms,ms); + + USAGE_ERROR_ACTION(ms, ms); return 0; + } + return ialloc(ms, n_elements, sizes, 0, chunks); + } -size_t mspace_bulk_free(mspace msp, void* array[], size_t nelem) { +size_t mspace_bulk_free(mspace msp, void *array[], size_t nelem) { + return internal_bulk_free((mstate)msp, array, nelem); + } -#if MALLOC_INSPECT_ALL + #if MALLOC_INSPECT_ALL void mspace_inspect_all(mspace msp, - void(*handler)(void *start, - void *end, - size_t used_bytes, - void* callback_arg), - void* arg) { + void (*handler)(void *start, void *end, + size_t used_bytes, void *callback_arg), + void *arg) { + mstate ms = (mstate)msp; if (ok_magic(ms)) { + if (!PREACTION(ms)) { + internal_inspect_all(ms, handler, arg); POSTACTION(ms); + } + + } else { + + USAGE_ERROR_ACTION(ms, ms); + } - else { - USAGE_ERROR_ACTION(ms,ms); - } + } -#endif /* MALLOC_INSPECT_ALL */ + + #endif /* MALLOC_INSPECT_ALL */ int mspace_trim(mspace msp, size_t pad) { - int result = 0; + + int result = 0; mstate ms = (mstate)msp; if (ok_magic(ms)) { + if (!PREACTION(ms)) { + result = sys_trim(ms, pad); POSTACTION(ms); + } + + } else { + + USAGE_ERROR_ACTION(ms, ms); + } - else { - USAGE_ERROR_ACTION(ms,ms); - } + return result; + } -#if !NO_MALLOC_STATS + #if !NO_MALLOC_STATS void mspace_malloc_stats(mspace msp) { + mstate ms = (mstate)msp; if (ok_magic(ms)) { + internal_malloc_stats(ms); + + } else { + + USAGE_ERROR_ACTION(ms, ms); + } - else { - USAGE_ERROR_ACTION(ms,ms); - } + } -#endif /* NO_MALLOC_STATS */ + + #endif /* NO_MALLOC_STATS */ size_t mspace_footprint(mspace msp) { + size_t result = 0; mstate ms = (mstate)msp; if (ok_magic(ms)) { + result = ms->footprint; + + } else { + + USAGE_ERROR_ACTION(ms, ms); + } - else { - USAGE_ERROR_ACTION(ms,ms); - } + return result; + } size_t mspace_max_footprint(mspace msp) { + size_t result = 0; mstate ms = (mstate)msp; if (ok_magic(ms)) { + result = ms->max_footprint; + + } else { + + USAGE_ERROR_ACTION(ms, ms); + } - else { - USAGE_ERROR_ACTION(ms,ms); - } + return result; + } size_t mspace_footprint_limit(mspace msp) { + size_t result = 0; mstate ms = (mstate)msp; if (ok_magic(ms)) { + size_t maf = ms->footprint_limit; result = (maf == 0) ? MAX_SIZE_T : maf; + + } else { + + USAGE_ERROR_ACTION(ms, ms); + } - else { - USAGE_ERROR_ACTION(ms,ms); - } + return result; + } size_t mspace_set_footprint_limit(mspace msp, size_t bytes) { + size_t result = 0; mstate ms = (mstate)msp; if (ok_magic(ms)) { - if (bytes == 0) - result = granularity_align(1); /* Use minimal size */ + + if (bytes == 0) result = granularity_align(1); /* Use minimal size */ if (bytes == MAX_SIZE_T) - result = 0; /* disable */ + result = 0; /* disable */ else result = granularity_align(bytes); ms->footprint_limit = result; + + } else { + + USAGE_ERROR_ACTION(ms, ms); + } - else { - USAGE_ERROR_ACTION(ms,ms); - } + return result; + } -#if !NO_MALLINFO + #if !NO_MALLINFO struct mallinfo mspace_mallinfo(mspace msp) { + mstate ms = (mstate)msp; - if (!ok_magic(ms)) { - USAGE_ERROR_ACTION(ms,ms); - } + if (!ok_magic(ms)) { USAGE_ERROR_ACTION(ms, ms); } return internal_mallinfo(ms); + } -#endif /* NO_MALLINFO */ -size_t mspace_usable_size(const void* mem) { + #endif /* NO_MALLINFO */ + +size_t mspace_usable_size(const void *mem) { + if (mem != 0) { + mchunkptr p = mem2chunk(mem); - if (is_inuse(p)) - return chunksize(p) - overhead_for(p); + if (is_inuse(p)) return chunksize(p) - overhead_for(p); + } + return 0; + } int mspace_mallopt(int param_number, int value) { + return change_mparam(param_number, value); -} -#endif /* MSPACES */ +} +#endif /* MSPACES */ /* -------------------- Alternative MORECORE functions ------------------- */ @@ -6063,35 +7021,48 @@ int mspace_mallopt(int param_number, int value) { void *osMoreCore(int size) { + void *ptr = 0; static void *sbrk_top = 0; if (size > 0) { + if (size < MINIMUM_MORECORE_SIZE) size = MINIMUM_MORECORE_SIZE; if (CurrentExecutionLevel() == kTaskLevel) ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0); if (ptr == 0) { + return (void *) MFAIL; + } + // save ptrs so they can be freed during cleanup our_os_pools[next_os_pool] = ptr; next_os_pool++; ptr = (void *) ((((size_t) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK); sbrk_top = (char *) ptr + size; return ptr; + } + else if (size < 0) { + // we don't currently support shrink behavior return (void *) MFAIL; + } + else { + return sbrk_top; + } + } // cleanup any allocated memory pools @@ -6099,19 +7070,22 @@ int mspace_mallopt(int param_number, int value) { void osCleanupMem(void) { + void **ptr; for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++) if (*ptr) { + PoolDeallocate(*ptr); *ptr = 0; + } + } */ - /* ----------------------------------------------------------------------- History: v2.8.6 Wed Aug 29 06:57:58 2012 Doug Lea @@ -6330,3 +7304,4 @@ History: structure of old version, but most details differ.) */ + diff --git a/qemu_mode/libqasan/hooks.c b/qemu_mode/libqasan/hooks.c index b1ee2e0e..3bb4cc42 100644 --- a/qemu_mode/libqasan/hooks.c +++ b/qemu_mode/libqasan/hooks.c @@ -306,6 +306,7 @@ void bzero(void *s, size_t n) { __libqasan_memset(s, 0, n); } + #endif void explicit_bzero(void *s, size_t n) { diff --git a/qemu_mode/libqasan/libqasan.h b/qemu_mode/libqasan/libqasan.h index 0761e157..43b7adb5 100644 --- a/qemu_mode/libqasan/libqasan.h +++ b/qemu_mode/libqasan/libqasan.h @@ -54,28 +54,29 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. } while (0) #ifdef DEBUG -#define QASAN_DEBUG(msg...) \ - do { \ - \ - if (__qasan_debug) { \ - \ - fprintf(stderr, "==%d== ", getpid()); \ - fprintf(stderr, msg); \ - \ - } \ - \ - } while (0) + #define QASAN_DEBUG(msg...) \ + do { \ + \ + if (__qasan_debug) { \ + \ + fprintf(stderr, "==%d== ", getpid()); \ + fprintf(stderr, msg); \ + \ + } \ + \ + } while (0) + #else -#define QASAN_DEBUG(msg...) \ - do { \ - \ - } while (0) + #define QASAN_DEBUG(msg...) \ + do { \ + \ + } while (0) #endif #define ASSERT_DLSYM(name) \ ({ \ \ - void* a = (void*)dlsym(RTLD_NEXT, #name); \ + void *a = (void *)dlsym(RTLD_NEXT, #name); \ if (!a) { \ \ fprintf(stderr, \ @@ -95,37 +96,37 @@ void __libqasan_init_malloc(void); void __libqasan_hotpatch(void); -size_t __libqasan_malloc_usable_size(void* ptr); -void* __libqasan_malloc(size_t size); -void __libqasan_free(void* ptr); -void* __libqasan_calloc(size_t nmemb, size_t size); -void* __libqasan_realloc(void* ptr, size_t size); -int __libqasan_posix_memalign(void** ptr, size_t align, size_t len); -void* __libqasan_memalign(size_t align, size_t len); -void* __libqasan_aligned_alloc(size_t align, size_t len); - -void* __libqasan_memcpy(void* dest, const void* src, size_t n); -void* __libqasan_memmove(void* dest, const void* src, size_t n); -void* __libqasan_memset(void* s, int c, size_t n); -void* __libqasan_memchr(const void* s, int c, size_t n); -void* __libqasan_memrchr(const void* s, int c, size_t n); -size_t __libqasan_strlen(const char* s); -size_t __libqasan_strnlen(const char* s, size_t len); -int __libqasan_strcmp(const char* str1, const char* str2); -int __libqasan_strncmp(const char* str1, const char* str2, size_t len); -int __libqasan_strcasecmp(const char* str1, const char* str2); -int __libqasan_strncasecmp(const char* str1, const char* str2, size_t len); -int __libqasan_memcmp(const void* mem1, const void* mem2, size_t len); -int __libqasan_bcmp(const void* mem1, const void* mem2, size_t len); -char* __libqasan_strstr(const char* haystack, const char* needle); -char* __libqasan_strcasestr(const char* haystack, const char* needle); -void* __libqasan_memmem(const void* haystack, size_t haystack_len, - const void* needle, size_t needle_len); -char* __libqasan_strchr(const char* s, int c); -char* __libqasan_strrchr(const char* s, int c); -size_t __libqasan_wcslen(const wchar_t* s); -wchar_t* __libqasan_wcscpy(wchar_t* d, const wchar_t* s); -int __libqasan_wcscmp(const wchar_t* s1, const wchar_t* s2); +size_t __libqasan_malloc_usable_size(void *ptr); +void * __libqasan_malloc(size_t size); +void __libqasan_free(void *ptr); +void * __libqasan_calloc(size_t nmemb, size_t size); +void * __libqasan_realloc(void *ptr, size_t size); +int __libqasan_posix_memalign(void **ptr, size_t align, size_t len); +void * __libqasan_memalign(size_t align, size_t len); +void * __libqasan_aligned_alloc(size_t align, size_t len); + +void * __libqasan_memcpy(void *dest, const void *src, size_t n); +void * __libqasan_memmove(void *dest, const void *src, size_t n); +void * __libqasan_memset(void *s, int c, size_t n); +void * __libqasan_memchr(const void *s, int c, size_t n); +void * __libqasan_memrchr(const void *s, int c, size_t n); +size_t __libqasan_strlen(const char *s); +size_t __libqasan_strnlen(const char *s, size_t len); +int __libqasan_strcmp(const char *str1, const char *str2); +int __libqasan_strncmp(const char *str1, const char *str2, size_t len); +int __libqasan_strcasecmp(const char *str1, const char *str2); +int __libqasan_strncasecmp(const char *str1, const char *str2, size_t len); +int __libqasan_memcmp(const void *mem1, const void *mem2, size_t len); +int __libqasan_bcmp(const void *mem1, const void *mem2, size_t len); +char * __libqasan_strstr(const char *haystack, const char *needle); +char * __libqasan_strcasestr(const char *haystack, const char *needle); +void * __libqasan_memmem(const void *haystack, size_t haystack_len, + const void *needle, size_t needle_len); +char * __libqasan_strchr(const char *s, int c); +char * __libqasan_strrchr(const char *s, int c); +size_t __libqasan_wcslen(const wchar_t *s); +wchar_t *__libqasan_wcscpy(wchar_t *d, const wchar_t *s); +int __libqasan_wcscmp(const wchar_t *s1, const wchar_t *s2); #endif diff --git a/qemu_mode/libqasan/malloc.c b/qemu_mode/libqasan/malloc.c index a53dbb4b..f8237826 100644 --- a/qemu_mode/libqasan/malloc.c +++ b/qemu_mode/libqasan/malloc.c @@ -50,9 +50,9 @@ typedef struct { struct chunk_begin { size_t requested_size; - void* aligned_orig; // NULL if not aligned - struct chunk_begin* next; - struct chunk_begin* prev; + void * aligned_orig; // NULL if not aligned + struct chunk_begin *next; + struct chunk_begin *prev; char redzone[REDZONE_SIZE]; }; @@ -66,29 +66,29 @@ struct chunk_struct { }; // From dlmalloc.c -void* dlmalloc(size_t); -void dlfree(void*); +void *dlmalloc(size_t); +void dlfree(void *); int __libqasan_malloc_initialized; -static struct chunk_begin* quarantine_top; -static struct chunk_begin* quarantine_end; +static struct chunk_begin *quarantine_top; +static struct chunk_begin *quarantine_end; static size_t quarantine_bytes; #ifdef __BIONIC__ -static pthread_mutex_t quarantine_lock; -#define LOCK_TRY pthread_mutex_trylock -#define LOCK_INIT pthread_mutex_init -#define LOCK_UNLOCK pthread_mutex_unlock +static pthread_mutex_t quarantine_lock; + #define LOCK_TRY pthread_mutex_trylock + #define LOCK_INIT pthread_mutex_init + #define LOCK_UNLOCK pthread_mutex_unlock #else -static pthread_spinlock_t quarantine_lock; -#define LOCK_TRY pthread_spin_trylock -#define LOCK_INIT pthread_spin_init -#define LOCK_UNLOCK pthread_spin_unlock +static pthread_spinlock_t quarantine_lock; + #define LOCK_TRY pthread_spin_trylock + #define LOCK_INIT pthread_spin_init + #define LOCK_UNLOCK pthread_spin_unlock #endif // need qasan disabled -static int quanratine_push(struct chunk_begin* ck) { +static int quanratine_push(struct chunk_begin *ck) { if (ck->requested_size >= QUARANTINE_MAX_BYTES) return 0; @@ -96,7 +96,7 @@ static int quanratine_push(struct chunk_begin* ck) { while (ck->requested_size + quarantine_bytes >= QUARANTINE_MAX_BYTES) { - struct chunk_begin* tmp = quarantine_end; + struct chunk_begin *tmp = quarantine_end; quarantine_end = tmp->prev; quarantine_bytes -= tmp->requested_size; @@ -131,29 +131,24 @@ void __libqasan_init_malloc(void) { } -size_t __libqasan_malloc_usable_size(void* ptr) { +size_t __libqasan_malloc_usable_size(void *ptr) { - char* p = ptr; + char *p = ptr; p -= sizeof(struct chunk_begin); - return ((struct chunk_begin*)p)->requested_size; + return ((struct chunk_begin *)p)->requested_size; } -void* __libqasan_malloc(size_t size) { +void *__libqasan_malloc(size_t size) { - if (!__libqasan_malloc_initialized) { - - __libqasan_init_malloc(); + if (!__libqasan_malloc_initialized) { __libqasan_init_malloc(); } - } - - if (!__libqasan_malloc_initialized) - __libqasan_init_malloc(); + if (!__libqasan_malloc_initialized) __libqasan_init_malloc(); int state = QASAN_SWAP(QASAN_DISABLED); // disable qasan for this thread - struct chunk_begin* p = dlmalloc(sizeof(struct chunk_struct) + size); + struct chunk_begin *p = dlmalloc(sizeof(struct chunk_struct) + size); QASAN_SWAP(state); @@ -165,14 +160,14 @@ void* __libqasan_malloc(size_t size) { p->aligned_orig = NULL; p->next = p->prev = NULL; - QASAN_ALLOC(&p[1], (char*)&p[1] + size); + QASAN_ALLOC(&p[1], (char *)&p[1] + size); QASAN_POISON(p->redzone, REDZONE_SIZE, ASAN_HEAP_LEFT_RZ); if (size & (ALLOC_ALIGN_SIZE - 1)) - QASAN_POISON((char*)&p[1] + size, + QASAN_POISON((char *)&p[1] + size, (size & ~(ALLOC_ALIGN_SIZE - 1)) + 8 - size + REDZONE_SIZE, ASAN_HEAP_RIGHT_RZ); else - QASAN_POISON((char*)&p[1] + size, REDZONE_SIZE, ASAN_HEAP_RIGHT_RZ); + QASAN_POISON((char *)&p[1] + size, REDZONE_SIZE, ASAN_HEAP_RIGHT_RZ); __builtin_memset(&p[1], 0xff, size); @@ -180,11 +175,11 @@ void* __libqasan_malloc(size_t size) { } -void __libqasan_free(void* ptr) { +void __libqasan_free(void *ptr) { if (!ptr) return; - struct chunk_begin* p = ptr; + struct chunk_begin *p = ptr; p -= 1; size_t n = p->requested_size; @@ -211,11 +206,11 @@ void __libqasan_free(void* ptr) { } -void* __libqasan_calloc(size_t nmemb, size_t size) { +void *__libqasan_calloc(size_t nmemb, size_t size) { size *= nmemb; - char* p = __libqasan_malloc(size); + char *p = __libqasan_malloc(size); if (!p) return NULL; __builtin_memset(p, 0, size); @@ -224,14 +219,14 @@ void* __libqasan_calloc(size_t nmemb, size_t size) { } -void* __libqasan_realloc(void* ptr, size_t size) { +void *__libqasan_realloc(void *ptr, size_t size) { - char* p = __libqasan_malloc(size); + char *p = __libqasan_malloc(size); if (!p) return NULL; if (!ptr) return p; - size_t n = ((struct chunk_begin*)ptr)[-1].requested_size; + size_t n = ((struct chunk_begin *)ptr)[-1].requested_size; if (size < n) n = size; __builtin_memcpy(p, ptr, n); @@ -241,9 +236,9 @@ void* __libqasan_realloc(void* ptr, size_t size) { } -int __libqasan_posix_memalign(void** ptr, size_t align, size_t len) { +int __libqasan_posix_memalign(void **ptr, size_t align, size_t len) { - if ((align % 2) || (align % sizeof(void*))) return EINVAL; + if ((align % 2) || (align % sizeof(void *))) return EINVAL; if (len == 0) { *ptr = NULL; @@ -257,7 +252,7 @@ int __libqasan_posix_memalign(void** ptr, size_t align, size_t len) { int state = QASAN_SWAP(QASAN_DISABLED); // disable qasan for this thread - char* orig = dlmalloc(sizeof(struct chunk_struct) + size); + char *orig = dlmalloc(sizeof(struct chunk_struct) + size); QASAN_SWAP(state); @@ -265,10 +260,10 @@ int __libqasan_posix_memalign(void** ptr, size_t align, size_t len) { QASAN_UNPOISON(orig, sizeof(struct chunk_struct) + size); - char* data = orig + sizeof(struct chunk_begin); + char *data = orig + sizeof(struct chunk_begin); data += align - ((uintptr_t)data % align); - struct chunk_begin* p = (struct chunk_begin*)data - 1; + struct chunk_begin *p = (struct chunk_begin *)data - 1; p->requested_size = len; p->aligned_orig = orig; @@ -291,9 +286,9 @@ int __libqasan_posix_memalign(void** ptr, size_t align, size_t len) { } -void* __libqasan_memalign(size_t align, size_t len) { +void *__libqasan_memalign(size_t align, size_t len) { - void* ret = NULL; + void *ret = NULL; __libqasan_posix_memalign(&ret, align, len); @@ -301,9 +296,9 @@ void* __libqasan_memalign(size_t align, size_t len) { } -void* __libqasan_aligned_alloc(size_t align, size_t len) { +void *__libqasan_aligned_alloc(size_t align, size_t len) { - void* ret = NULL; + void *ret = NULL; if ((len % align)) return NULL; diff --git a/qemu_mode/libqasan/patch.c b/qemu_mode/libqasan/patch.c index ed783292..fbc09c99 100644 --- a/qemu_mode/libqasan/patch.c +++ b/qemu_mode/libqasan/patch.c @@ -28,12 +28,12 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #ifdef __x86_64__ -uint8_t* __libqasan_patch_jump(uint8_t* addr, uint8_t* dest) { +uint8_t *__libqasan_patch_jump(uint8_t *addr, uint8_t *dest) { // mov rax, dest addr[0] = 0x48; addr[1] = 0xb8; - *(uint8_t**)&addr[2] = dest; + *(uint8_t **)&addr[2] = dest; // jmp rax addr[10] = 0xff; @@ -45,11 +45,11 @@ uint8_t* __libqasan_patch_jump(uint8_t* addr, uint8_t* dest) { #elif __i386__ -uint8_t* __libqasan_patch_jump(uint8_t* addr, uint8_t* dest) { +uint8_t *__libqasan_patch_jump(uint8_t *addr, uint8_t *dest) { // mov eax, dest addr[0] = 0xb8; - *(uint8_t**)&addr[1] = dest; + *(uint8_t **)&addr[1] = dest; // jmp eax addr[5] = 0xff; @@ -64,7 +64,7 @@ uint8_t* __libqasan_patch_jump(uint8_t* addr, uint8_t* dest) { // in ARM, r12 is a scratch register used by the linker to jump, // so let's use it in our stub -uint8_t* __libqasan_patch_jump(uint8_t* addr, uint8_t* dest) { +uint8_t *__libqasan_patch_jump(uint8_t *addr, uint8_t *dest) { // ldr r12, OFF addr[0] = 0x0; @@ -79,7 +79,7 @@ uint8_t* __libqasan_patch_jump(uint8_t* addr, uint8_t* dest) { addr[7] = 0xe0; // OFF: .word dest - *(uint32_t*)&addr[8] = (uint32_t)dest; + *(uint32_t *)&addr[8] = (uint32_t)dest; return &addr[12]; @@ -90,7 +90,7 @@ uint8_t* __libqasan_patch_jump(uint8_t* addr, uint8_t* dest) { // in ARM64, x16 is a scratch register used by the linker to jump, // so let's use it in our stub -uint8_t* __libqasan_patch_jump(uint8_t* addr, uint8_t* dest) { +uint8_t *__libqasan_patch_jump(uint8_t *addr, uint8_t *dest) { // ldr x16, OFF addr[0] = 0x50; @@ -105,7 +105,7 @@ uint8_t* __libqasan_patch_jump(uint8_t* addr, uint8_t* dest) { addr[7] = 0xd6; // OFF: .dword dest - *(uint64_t*)&addr[8] = (uint64_t)dest; + *(uint64_t *)&addr[8] = (uint64_t)dest; return &addr[16]; @@ -113,7 +113,7 @@ uint8_t* __libqasan_patch_jump(uint8_t* addr, uint8_t* dest) { #else -#define CANNOT_HOTPATCH + #define CANNOT_HOTPATCH #endif @@ -130,8 +130,8 @@ int libc_perms; static void find_libc(void) { - FILE* fp; - char* line = NULL; + FILE * fp; + char * line = NULL; size_t len = 0; ssize_t read; @@ -156,8 +156,8 @@ static void find_libc(void) { if (flag_x == 'x' && (__libqasan_strstr(path, "/libc.so") || __libqasan_strstr(path, "/libc-"))) { - libc_start = (void*)min; - libc_end = (void*)max; + libc_start = (void *)min; + libc_end = (void *)max; libc_perms = PROT_EXEC; if (flag_w == 'w') libc_perms |= PROT_WRITE; @@ -190,30 +190,30 @@ void __libqasan_hotpatch(void) { PROT_READ | PROT_WRITE | PROT_EXEC) < 0) return; - void* libc = dlopen("libc.so.6", RTLD_LAZY); + void *libc = dlopen("libc.so.6", RTLD_LAZY); -#define HOTPATCH(fn) \ - uint8_t* p_##fn = (uint8_t*)dlsym(libc, #fn); \ - if (p_##fn) __libqasan_patch_jump(p_##fn, (uint8_t*)&(fn)); + #define HOTPATCH(fn) \ + uint8_t *p_##fn = (uint8_t *)dlsym(libc, #fn); \ + if (p_##fn) __libqasan_patch_jump(p_##fn, (uint8_t *)&(fn)); HOTPATCH(memcmp) HOTPATCH(memmove) - uint8_t* p_memcpy = (uint8_t*)dlsym(libc, "memcpy"); + uint8_t *p_memcpy = (uint8_t *)dlsym(libc, "memcpy"); // fuck you libc if (p_memcpy && p_memmove != p_memcpy) - __libqasan_patch_jump(p_memcpy, (uint8_t*)&memcpy); + __libqasan_patch_jump(p_memcpy, (uint8_t *)&memcpy); HOTPATCH(memchr) HOTPATCH(memrchr) HOTPATCH(memmem) -#ifndef __BIONIC__ + #ifndef __BIONIC__ HOTPATCH(bzero) HOTPATCH(explicit_bzero) HOTPATCH(mempcpy) HOTPATCH(bcmp) -#endif - + #endif + HOTPATCH(strchr) HOTPATCH(strrchr) HOTPATCH(strcasecmp) @@ -233,7 +233,7 @@ void __libqasan_hotpatch(void) { HOTPATCH(wcscpy) HOTPATCH(wcscmp) -#undef HOTPATCH + #undef HOTPATCH mprotect(libc_start, libc_end - libc_start, libc_perms); diff --git a/qemu_mode/libqasan/string.c b/qemu_mode/libqasan/string.c index 520f0342..c850463b 100644 --- a/qemu_mode/libqasan/string.c +++ b/qemu_mode/libqasan/string.c @@ -26,10 +26,10 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "libqasan.h" #include -void* __libqasan_memcpy(void* dest, const void* src, size_t n) { +void *__libqasan_memcpy(void *dest, const void *src, size_t n) { - unsigned char* d = dest; - const unsigned char* s = src; + unsigned char * d = dest; + const unsigned char *s = src; if (!n) return dest; @@ -45,10 +45,10 @@ void* __libqasan_memcpy(void* dest, const void* src, size_t n) { } -void* __libqasan_memmove(void* dest, const void* src, size_t n) { +void *__libqasan_memmove(void *dest, const void *src, size_t n) { - unsigned char* d = dest; - const unsigned char* s = src; + unsigned char * d = dest; + const unsigned char *s = src; if (!n) return dest; @@ -65,18 +65,18 @@ void* __libqasan_memmove(void* dest, const void* src, size_t n) { } -void* __libqasan_memset(void* s, int c, size_t n) { +void *__libqasan_memset(void *s, int c, size_t n) { - unsigned char* b = s; + unsigned char *b = s; while (n--) *(b++) = (unsigned char)c; return s; } -void* __libqasan_memchr(const void* s, int c, size_t n) { +void *__libqasan_memchr(const void *s, int c, size_t n) { - unsigned char* m = (unsigned char*)s; + unsigned char *m = (unsigned char *)s; size_t i; for (i = 0; i < n; ++i) if (m[i] == (unsigned char)c) return &m[i]; @@ -84,9 +84,9 @@ void* __libqasan_memchr(const void* s, int c, size_t n) { } -void* __libqasan_memrchr(const void* s, int c, size_t n) { +void *__libqasan_memrchr(const void *s, int c, size_t n) { - unsigned char* m = (unsigned char*)s; + unsigned char *m = (unsigned char *)s; long i; for (i = n; i >= 0; --i) if (m[i] == (unsigned char)c) return &m[i]; @@ -94,16 +94,16 @@ void* __libqasan_memrchr(const void* s, int c, size_t n) { } -size_t __libqasan_strlen(const char* s) { +size_t __libqasan_strlen(const char *s) { - const char* i = s; + const char *i = s; while (*(i++)) ; return i - s - 1; } -size_t __libqasan_strnlen(const char* s, size_t len) { +size_t __libqasan_strnlen(const char *s, size_t len) { size_t r = 0; while (len-- && *(s++)) @@ -112,7 +112,7 @@ size_t __libqasan_strnlen(const char* s, size_t len) { } -int __libqasan_strcmp(const char* str1, const char* str2) { +int __libqasan_strcmp(const char *str1, const char *str2) { while (1) { @@ -129,7 +129,7 @@ int __libqasan_strcmp(const char* str1, const char* str2) { } -int __libqasan_strncmp(const char* str1, const char* str2, size_t len) { +int __libqasan_strncmp(const char *str1, const char *str2, size_t len) { while (len--) { @@ -146,7 +146,7 @@ int __libqasan_strncmp(const char* str1, const char* str2, size_t len) { } -int __libqasan_strcasecmp(const char* str1, const char* str2) { +int __libqasan_strcasecmp(const char *str1, const char *str2) { while (1) { @@ -163,7 +163,7 @@ int __libqasan_strcasecmp(const char* str1, const char* str2) { } -int __libqasan_strncasecmp(const char* str1, const char* str2, size_t len) { +int __libqasan_strncasecmp(const char *str1, const char *str2, size_t len) { while (len--) { @@ -180,10 +180,10 @@ int __libqasan_strncasecmp(const char* str1, const char* str2, size_t len) { } -int __libqasan_memcmp(const void* mem1, const void* mem2, size_t len) { +int __libqasan_memcmp(const void *mem1, const void *mem2, size_t len) { - const char* strmem1 = (const char*)mem1; - const char* strmem2 = (const char*)mem2; + const char *strmem1 = (const char *)mem1; + const char *strmem2 = (const char *)mem2; while (len--) { @@ -198,10 +198,10 @@ int __libqasan_memcmp(const void* mem1, const void* mem2, size_t len) { } -int __libqasan_bcmp(const void* mem1, const void* mem2, size_t len) { +int __libqasan_bcmp(const void *mem1, const void *mem2, size_t len) { - const char* strmem1 = (const char*)mem1; - const char* strmem2 = (const char*)mem2; + const char *strmem1 = (const char *)mem1; + const char *strmem2 = (const char *)mem2; while (len--) { @@ -216,17 +216,17 @@ int __libqasan_bcmp(const void* mem1, const void* mem2, size_t len) { } -char* __libqasan_strstr(const char* haystack, const char* needle) { +char *__libqasan_strstr(const char *haystack, const char *needle) { do { - const char* n = needle; - const char* h = haystack; + const char *n = needle; + const char *h = haystack; while (*n && *h && *n == *h) n++, h++; - if (!*n) return (char*)haystack; + if (!*n) return (char *)haystack; } while (*(haystack++)); @@ -234,17 +234,17 @@ char* __libqasan_strstr(const char* haystack, const char* needle) { } -char* __libqasan_strcasestr(const char* haystack, const char* needle) { +char *__libqasan_strcasestr(const char *haystack, const char *needle) { do { - const char* n = needle; - const char* h = haystack; + const char *n = needle; + const char *h = haystack; while (*n && *h && tolower(*n) == tolower(*h)) n++, h++; - if (!*n) return (char*)haystack; + if (!*n) return (char *)haystack; } while (*(haystack++)); @@ -252,22 +252,22 @@ char* __libqasan_strcasestr(const char* haystack, const char* needle) { } -void* __libqasan_memmem(const void* haystack, size_t haystack_len, - const void* needle, size_t needle_len) { +void *__libqasan_memmem(const void *haystack, size_t haystack_len, + const void *needle, size_t needle_len) { - const char* n = (const char*)needle; - const char* h = (const char*)haystack; + const char *n = (const char *)needle; + const char *h = (const char *)haystack; if (haystack_len < needle_len) return 0; - if (needle_len == 0) return (void*)haystack; + if (needle_len == 0) return (void *)haystack; if (needle_len == 1) return memchr(haystack, *n, haystack_len); - const char* end = h + (haystack_len - needle_len); + const char *end = h + (haystack_len - needle_len); do { if (*h == *n) { - if (memcmp(h, n, needle_len) == 0) return (void*)h; + if (memcmp(h, n, needle_len) == 0) return (void *)h; } @@ -277,26 +277,26 @@ void* __libqasan_memmem(const void* haystack, size_t haystack_len, } -char* __libqasan_strchr(const char* s, int c) { +char *__libqasan_strchr(const char *s, int c) { while (*s != (char)c) if (!*s++) return 0; - return (char*)s; + return (char *)s; } -char* __libqasan_strrchr(const char* s, int c) { +char *__libqasan_strrchr(const char *s, int c) { - char* r = NULL; + char *r = NULL; do - if (*s == (char)c) r = (char*)s; + if (*s == (char)c) r = (char *)s; while (*s++); return r; } -size_t __libqasan_wcslen(const wchar_t* s) { +size_t __libqasan_wcslen(const wchar_t *s) { size_t len = 0; @@ -313,16 +313,16 @@ size_t __libqasan_wcslen(const wchar_t* s) { } -wchar_t* __libqasan_wcscpy(wchar_t* d, const wchar_t* s) { +wchar_t *__libqasan_wcscpy(wchar_t *d, const wchar_t *s) { - wchar_t* a = d; + wchar_t *a = d; while ((*d++ = *s++)) ; return a; } -int __libqasan_wcscmp(const wchar_t* s1, const wchar_t* s2) { +int __libqasan_wcscmp(const wchar_t *s1, const wchar_t *s2) { wchar_t c1, c2; do { -- cgit 1.4.1 From 209c5ba4657b641bf261da7ac9ce7d3f809109c2 Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Sun, 7 Feb 2021 05:33:02 +0100 Subject: larger map, stats reload fix, code format --- docs/Changelog.md | 2 + instrumentation/afl-compiler-rt.o.c | 2 +- instrumentation/afl-llvm-lto-instrumentation.so.cc | 3 +- qemu_mode/libqasan/dlmalloc.c | 5 ++ src/afl-fuzz-bitmap.c | 3 +- src/afl-fuzz-statsd.c | 63 ++++++++++++---------- utils/afl_untracer/afl-untracer.c | 10 ++-- 7 files changed, 52 insertions(+), 36 deletions(-) (limited to 'qemu_mode/libqasan/dlmalloc.c') diff --git a/docs/Changelog.md b/docs/Changelog.md index e9efdf38..f2041917 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -26,6 +26,8 @@ sending a mail to . `-i` or resumes (as these have most likely already been done) - fix crash for very, very fast targets+systems (thanks to mhlakhani for reporting) + - on restarts (-i)/autoresume (AFL_AUTORESUME) the stats are now + reloaded and used, thanks to Vimal Joseph for this PR! - if determinstic mode is active (-D, or -M without -d) then we sync after every queue entry as this can take very long time otherwise - better detection if a target needs a large shared map diff --git a/instrumentation/afl-compiler-rt.o.c b/instrumentation/afl-compiler-rt.o.c index 65a5d3d2..059691ec 100644 --- a/instrumentation/afl-compiler-rt.o.c +++ b/instrumentation/afl-compiler-rt.o.c @@ -70,7 +70,7 @@ run. It will end up as .comm, so it shouldn't be too wasteful. */ #if MAP_SIZE <= 65536 - #define MAP_INITIAL_SIZE 1048576 + #define MAP_INITIAL_SIZE 2097152 #else #define MAP_INITIAL_SIZE MAP_SIZE #endif diff --git a/instrumentation/afl-llvm-lto-instrumentation.so.cc b/instrumentation/afl-llvm-lto-instrumentation.so.cc index fa494f44..841d52e5 100644 --- a/instrumentation/afl-llvm-lto-instrumentation.so.cc +++ b/instrumentation/afl-llvm-lto-instrumentation.so.cc @@ -69,7 +69,8 @@ class AFLLTOPass : public ModulePass { if (getenv("AFL_DEBUG")) debug = 1; if ((ptr = getenv("AFL_LLVM_LTO_STARTID")) != NULL) - if ((afl_global_id = (uint32_t)atoi(ptr)) < 0 || afl_global_id >= MAP_SIZE) + if ((afl_global_id = (uint32_t)atoi(ptr)) < 0 || + afl_global_id >= MAP_SIZE) FATAL("AFL_LLVM_LTO_STARTID value of \"%s\" is not between 0 and %u\n", ptr, MAP_SIZE - 1); diff --git a/qemu_mode/libqasan/dlmalloc.c b/qemu_mode/libqasan/dlmalloc.c index 39ca4301..ce94451d 100644 --- a/qemu_mode/libqasan/dlmalloc.c +++ b/qemu_mode/libqasan/dlmalloc.c @@ -3907,6 +3907,7 @@ static void internal_malloc_stats(mstate m) { clear_smallmap(M, I); \ \ } else if (RTCHECK(B == smallbin_at(M, I) || \ + \ (ok_address(M, B) && B->fd == P))) { \ \ F->bk = B; \ @@ -4117,6 +4118,7 @@ static void internal_malloc_stats(mstate m) { XP->child[1] = R; \ \ } else \ + \ CORRUPTION_ERROR_ACTION(M); \ if (R != 0) { \ \ @@ -4132,6 +4134,7 @@ static void internal_malloc_stats(mstate m) { C0->parent = R; \ \ } else \ + \ CORRUPTION_ERROR_ACTION(M); \ \ } \ @@ -4143,11 +4146,13 @@ static void internal_malloc_stats(mstate m) { C1->parent = R; \ \ } else \ + \ CORRUPTION_ERROR_ACTION(M); \ \ } \ \ } else \ + \ CORRUPTION_ERROR_ACTION(M); \ \ } \ diff --git a/src/afl-fuzz-bitmap.c b/src/afl-fuzz-bitmap.c index 0c4a114e..4ed59364 100644 --- a/src/afl-fuzz-bitmap.c +++ b/src/afl-fuzz-bitmap.c @@ -325,7 +325,8 @@ u8 *describe_op(afl_state_t *afl, u8 new_bits, size_t max_description_len) { } - sprintf(ret + strlen(ret), ",time:%llu", get_cur_time() - afl->start_time); + sprintf(ret + strlen(ret), ",time:%llu", + get_cur_time() + afl->prev_run_time - afl->start_time); if (afl->current_custom_fuzz && afl->current_custom_fuzz->afl_custom_describe) { diff --git a/src/afl-fuzz-statsd.c b/src/afl-fuzz-statsd.c index 69cafd90..461bbbf6 100644 --- a/src/afl-fuzz-statsd.c +++ b/src/afl-fuzz-statsd.c @@ -1,3 +1,8 @@ +/* + * This implements rpc.statsd support, see docs/rpc_statsd.md + * + */ + #include #include #include @@ -226,37 +231,39 @@ int statsd_format_metric(afl_state_t *afl, char *buff, size_t bufflen) { */ if (afl->statsd_metric_format_type == STATSD_TAGS_TYPE_SUFFIX) { - snprintf(buff, bufflen, afl->statsd_metric_format, - afl->queue_cycle ? (afl->queue_cycle - 1) : 0, tags, - afl->cycles_wo_finds, tags, afl->fsrv.total_execs, tags, - afl->fsrv.total_execs / - ((double)(get_cur_time() - afl->start_time) / 1000), - tags, afl->queued_paths, tags, afl->queued_favored, tags, - afl->queued_discovered, tags, afl->queued_imported, tags, - afl->max_depth, tags, afl->current_entry, tags, - afl->pending_favored, tags, afl->pending_not_fuzzed, tags, - afl->queued_variable, tags, afl->unique_crashes, tags, - afl->unique_hangs, tags, afl->total_crashes, tags, - afl->slowest_exec_ms, tags, - count_non_255_bytes(afl, afl->virgin_bits), tags, - afl->var_byte_count, tags, afl->expand_havoc, tags); + snprintf( + buff, bufflen, afl->statsd_metric_format, + afl->queue_cycle ? (afl->queue_cycle - 1) : 0, tags, + afl->cycles_wo_finds, tags, afl->fsrv.total_execs, tags, + afl->fsrv.total_execs / + ((double)(get_cur_time() + afl->prev_run_time - afl->start_time) / + 1000), + tags, afl->queued_paths, tags, afl->queued_favored, tags, + afl->queued_discovered, tags, afl->queued_imported, tags, + afl->max_depth, tags, afl->current_entry, tags, afl->pending_favored, + tags, afl->pending_not_fuzzed, tags, afl->queued_variable, tags, + afl->unique_crashes, tags, afl->unique_hangs, tags, afl->total_crashes, + tags, afl->slowest_exec_ms, tags, + count_non_255_bytes(afl, afl->virgin_bits), tags, afl->var_byte_count, + tags, afl->expand_havoc, tags); } else if (afl->statsd_metric_format_type == STATSD_TAGS_TYPE_MID) { - snprintf(buff, bufflen, afl->statsd_metric_format, tags, - afl->queue_cycle ? (afl->queue_cycle - 1) : 0, tags, - afl->cycles_wo_finds, tags, afl->fsrv.total_execs, tags, - afl->fsrv.total_execs / - ((double)(get_cur_time() - afl->start_time) / 1000), - tags, afl->queued_paths, tags, afl->queued_favored, tags, - afl->queued_discovered, tags, afl->queued_imported, tags, - afl->max_depth, tags, afl->current_entry, tags, - afl->pending_favored, tags, afl->pending_not_fuzzed, tags, - afl->queued_variable, tags, afl->unique_crashes, tags, - afl->unique_hangs, tags, afl->total_crashes, tags, - afl->slowest_exec_ms, tags, - count_non_255_bytes(afl, afl->virgin_bits), tags, - afl->var_byte_count, tags, afl->expand_havoc); + snprintf( + buff, bufflen, afl->statsd_metric_format, tags, + afl->queue_cycle ? (afl->queue_cycle - 1) : 0, tags, + afl->cycles_wo_finds, tags, afl->fsrv.total_execs, tags, + afl->fsrv.total_execs / + ((double)(get_cur_time() + afl->prev_run_time - afl->start_time) / + 1000), + tags, afl->queued_paths, tags, afl->queued_favored, tags, + afl->queued_discovered, tags, afl->queued_imported, tags, + afl->max_depth, tags, afl->current_entry, tags, afl->pending_favored, + tags, afl->pending_not_fuzzed, tags, afl->queued_variable, tags, + afl->unique_crashes, tags, afl->unique_hangs, tags, afl->total_crashes, + tags, afl->slowest_exec_ms, tags, + count_non_255_bytes(afl, afl->virgin_bits), tags, afl->var_byte_count, + tags, afl->expand_havoc); } diff --git a/utils/afl_untracer/afl-untracer.c b/utils/afl_untracer/afl-untracer.c index 1f1a10ea..2baeb58d 100644 --- a/utils/afl_untracer/afl-untracer.c +++ b/utils/afl_untracer/afl-untracer.c @@ -480,9 +480,9 @@ void setup_trap_instrumentation(void) { // Index into the coverage bitmap for the current trap instruction. #ifdef __aarch64__ uint64_t bitmap_index = 0; -#ifdef __APPLE__ + #ifdef __APPLE__ pthread_jit_write_protect_np(0); -#endif + #endif #else uint32_t bitmap_index = 0; #endif @@ -627,13 +627,13 @@ static void sigtrap_handler(int signum, siginfo_t *si, void *context) { // Must re-execute the instruction, so decrement PC by one instruction. ucontext_t *ctx = (ucontext_t *)context; #if defined(__APPLE__) && defined(__LP64__) -#if defined(__x86_64__) + #if defined(__x86_64__) ctx->uc_mcontext->__ss.__rip -= 1; addr = ctx->uc_mcontext->__ss.__rip; -#else + #else ctx->uc_mcontext->__ss.__pc -= 4; addr = ctx->uc_mcontext->__ss.__pc; -#endif + #endif #elif defined(__linux__) #if defined(__x86_64__) || defined(__i386__) ctx->uc_mcontext.gregs[REG_RIP] -= 1; -- cgit 1.4.1 From c2c65fd9c1cc3604200bc6ae62e2a1a7e6950a0e Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Sun, 7 Feb 2021 09:42:28 +0100 Subject: mark llvm 13 as unsupported (yet) --- GNUmakefile.llvm | 2 +- README.md | 3 +++ qemu_mode/libqasan/dlmalloc.c | 5 +++++ 3 files changed, 9 insertions(+), 1 deletion(-) (limited to 'qemu_mode/libqasan/dlmalloc.c') diff --git a/GNUmakefile.llvm b/GNUmakefile.llvm index a9092579..d3691658 100644 --- a/GNUmakefile.llvm +++ b/GNUmakefile.llvm @@ -43,7 +43,7 @@ endif LLVMVER = $(shell $(LLVM_CONFIG) --version 2>/dev/null | sed 's/git//' | sed 's/svn//' ) LLVM_MAJOR = $(shell $(LLVM_CONFIG) --version 2>/dev/null | sed 's/\..*//' ) LLVM_MINOR = $(shell $(LLVM_CONFIG) --version 2>/dev/null | sed 's/.*\.//' | sed 's/git//' | sed 's/svn//' | sed 's/ .*//' ) -LLVM_UNSUPPORTED = $(shell $(LLVM_CONFIG) --version 2>/dev/null | egrep -q '^3\.[0-3]|^19' && echo 1 || echo 0 ) +LLVM_UNSUPPORTED = $(shell $(LLVM_CONFIG) --version 2>/dev/null | egrep -q '^3\.[0-3]|^1[3-9]' && echo 1 || echo 0 ) LLVM_NEW_API = $(shell $(LLVM_CONFIG) --version 2>/dev/null | egrep -q '^1[0-9]' && echo 1 || echo 0 ) LLVM_10_OK = $(shell $(LLVM_CONFIG) --version 2>/dev/null | egrep -q '^1[1-9]|^10\.[1-9]|^10\.0.[1-9]' && echo 1 || echo 0 ) LLVM_HAVE_LTO = $(shell $(LLVM_CONFIG) --version 2>/dev/null | egrep -q '^1[1-9]' && echo 1 || echo 0 ) diff --git a/README.md b/README.md index 118a619d..e3886ca7 100644 --- a/README.md +++ b/README.md @@ -730,6 +730,9 @@ campaigns as these are much shorter runnings. 1. Always: * LTO has a much longer compile time which is diametrical to short fuzzing - hence use afl-clang-fast instead. + * If you compile with CMPLOG then you can save fuzzing time and reuse that + compiled target for both the -c option and the main fuzz target. + This will impact the speed by ~15% though. * `AFL_FAST_CAL` - Enable fast calibration, this halfs the time the saturated corpus needs to be loaded. * `AFL_CMPLOG_ONLY_NEW` - only perform cmplog on new found paths, not the diff --git a/qemu_mode/libqasan/dlmalloc.c b/qemu_mode/libqasan/dlmalloc.c index ce94451d..3c7dcea8 100644 --- a/qemu_mode/libqasan/dlmalloc.c +++ b/qemu_mode/libqasan/dlmalloc.c @@ -3908,6 +3908,7 @@ static void internal_malloc_stats(mstate m) { \ } else if (RTCHECK(B == smallbin_at(M, I) || \ \ + \ (ok_address(M, B) && B->fd == P))) { \ \ F->bk = B; \ @@ -4119,6 +4120,7 @@ static void internal_malloc_stats(mstate m) { \ } else \ \ + \ CORRUPTION_ERROR_ACTION(M); \ if (R != 0) { \ \ @@ -4135,6 +4137,7 @@ static void internal_malloc_stats(mstate m) { \ } else \ \ + \ CORRUPTION_ERROR_ACTION(M); \ \ } \ @@ -4147,12 +4150,14 @@ static void internal_malloc_stats(mstate m) { \ } else \ \ + \ CORRUPTION_ERROR_ACTION(M); \ \ } \ \ } else \ \ + \ CORRUPTION_ERROR_ACTION(M); \ \ } \ -- cgit 1.4.1 From 267b085f80074e61bdacf1e85e99014b6b2cdad2 Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Wed, 10 Feb 2021 15:15:16 +0100 Subject: dlmalloc only for non glibc qasan and AFL_QEMU_FORCE_DFL --- docs/env_variables.md | 6 + qemu_mode/QEMUAFL_VERSION | 2 +- qemu_mode/libqasan/dlmalloc.c | 4769 ++++++++++++++++------------------------- qemu_mode/libqasan/malloc.c | 147 +- qemu_mode/qemuafl | 2 +- 5 files changed, 2003 insertions(+), 2923 deletions(-) (limited to 'qemu_mode/libqasan/dlmalloc.c') diff --git a/docs/env_variables.md b/docs/env_variables.md index 4c3b1cfb..ab56c178 100644 --- a/docs/env_variables.md +++ b/docs/env_variables.md @@ -514,6 +514,12 @@ The QEMU wrapper used to instrument binary-only code supports several settings: stack pointer in which QEMU can find the return address when `start addr` is hit. + - With `AFL_USE_QASAN` you can enable QEMU AddressSanitizer for dynamically + linked binaries. + + - With `AFL_QEMU_FORCE_DFL` you force QEMU to ignore the registered singal + handlers of the target. + ## 6) Settings for afl-cmin The corpus minimization script offers very little customization: diff --git a/qemu_mode/QEMUAFL_VERSION b/qemu_mode/QEMUAFL_VERSION index 97184973..d9f0ec33 100644 --- a/qemu_mode/QEMUAFL_VERSION +++ b/qemu_mode/QEMUAFL_VERSION @@ -1 +1 @@ -6ab6bf28de +47722f64e4 diff --git a/qemu_mode/libqasan/dlmalloc.c b/qemu_mode/libqasan/dlmalloc.c index 3c7dcea8..7e3cb159 100644 --- a/qemu_mode/libqasan/dlmalloc.c +++ b/qemu_mode/libqasan/dlmalloc.c @@ -1,3 +1,7 @@ +#include + +#ifndef __GLIBC__ + /* This is a version (aka dlmalloc) of malloc/free/realloc written by Doug Lea and released to the public domain, as explained at @@ -203,12 +207,9 @@ mspaces as thread-locals. For example: static __thread mspace tlms = 0; void* tlmalloc(size_t bytes) { - if (tlms == 0) tlms = create_mspace(0, 0); return mspace_malloc(tlms, bytes); - } - void tlfree(void* mem) { mspace_free(tlms, mem); } Unless FOOTERS is defined, each mspace is completely independent. @@ -528,198 +529,197 @@ MAX_RELEASE_CHECK_RATE default: 4095 unless not HAVE_MMAP /* Version identifier to allow people to support multiple versions */ #ifndef DLMALLOC_VERSION - #define DLMALLOC_VERSION 20806 -#endif /* DLMALLOC_VERSION */ +#define DLMALLOC_VERSION 20806 +#endif /* DLMALLOC_VERSION */ #ifndef DLMALLOC_EXPORT - #define DLMALLOC_EXPORT extern +#define DLMALLOC_EXPORT extern #endif #ifndef WIN32 - #ifdef _WIN32 - #define WIN32 1 - #endif /* _WIN32 */ - #ifdef _WIN32_WCE - #define LACKS_FCNTL_H - #define WIN32 1 - #endif /* _WIN32_WCE */ -#endif /* WIN32 */ +#ifdef _WIN32 +#define WIN32 1 +#endif /* _WIN32 */ +#ifdef _WIN32_WCE +#define LACKS_FCNTL_H +#define WIN32 1 +#endif /* _WIN32_WCE */ +#endif /* WIN32 */ #ifdef WIN32 - #define WIN32_LEAN_AND_MEAN - #include - #include - #define HAVE_MMAP 1 - #define HAVE_MORECORE 0 - #define LACKS_UNISTD_H - #define LACKS_SYS_PARAM_H - #define LACKS_SYS_MMAN_H - #define LACKS_STRING_H - #define LACKS_STRINGS_H - #define LACKS_SYS_TYPES_H - #define LACKS_ERRNO_H - #define LACKS_SCHED_H - #ifndef MALLOC_FAILURE_ACTION - #define MALLOC_FAILURE_ACTION - #endif /* MALLOC_FAILURE_ACTION */ - #ifndef MMAP_CLEARS - #ifdef _WIN32_WCE /* WINCE reportedly does not clear */ - #define MMAP_CLEARS 0 - #else - #define MMAP_CLEARS 1 - #endif /* _WIN32_WCE */ - #endif /*MMAP_CLEARS */ -#endif /* WIN32 */ +#define WIN32_LEAN_AND_MEAN +#include +#include +#define HAVE_MMAP 1 +#define HAVE_MORECORE 0 +#define LACKS_UNISTD_H +#define LACKS_SYS_PARAM_H +#define LACKS_SYS_MMAN_H +#define LACKS_STRING_H +#define LACKS_STRINGS_H +#define LACKS_SYS_TYPES_H +#define LACKS_ERRNO_H +#define LACKS_SCHED_H +#ifndef MALLOC_FAILURE_ACTION +#define MALLOC_FAILURE_ACTION +#endif /* MALLOC_FAILURE_ACTION */ +#ifndef MMAP_CLEARS +#ifdef _WIN32_WCE /* WINCE reportedly does not clear */ +#define MMAP_CLEARS 0 +#else +#define MMAP_CLEARS 1 +#endif /* _WIN32_WCE */ +#endif /*MMAP_CLEARS */ +#endif /* WIN32 */ #if defined(DARWIN) || defined(_DARWIN) - /* Mac OSX docs advise not to use sbrk; it seems better to use mmap */ - #ifndef HAVE_MORECORE - #define HAVE_MORECORE 0 - #define HAVE_MMAP 1 - /* OSX allocators provide 16 byte alignment */ - #ifndef MALLOC_ALIGNMENT - #define MALLOC_ALIGNMENT ((size_t)16U) - #endif - #endif /* HAVE_MORECORE */ -#endif /* DARWIN */ +/* Mac OSX docs advise not to use sbrk; it seems better to use mmap */ +#ifndef HAVE_MORECORE +#define HAVE_MORECORE 0 +#define HAVE_MMAP 1 +/* OSX allocators provide 16 byte alignment */ +#ifndef MALLOC_ALIGNMENT +#define MALLOC_ALIGNMENT ((size_t)16U) +#endif +#endif /* HAVE_MORECORE */ +#endif /* DARWIN */ #ifndef LACKS_SYS_TYPES_H - #include /* For size_t */ -#endif /* LACKS_SYS_TYPES_H */ +#include /* For size_t */ +#endif /* LACKS_SYS_TYPES_H */ /* The maximum possible size_t value has all bits set */ -#define MAX_SIZE_T (~(size_t)0) - -#ifndef USE_LOCKS /* ensure true if spin or recursive locks set */ - #define USE_LOCKS \ - ((defined(USE_SPIN_LOCKS) && USE_SPIN_LOCKS != 0) || \ - (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0)) -#endif /* USE_LOCKS */ - -#if USE_LOCKS /* Spin locks for gcc >= 4.1, older gcc on x86, MSC >= 1310 */ - #if ((defined(__GNUC__) && \ - ((__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) || \ - defined(__i386__) || defined(__x86_64__))) || \ - (defined(_MSC_VER) && _MSC_VER >= 1310)) - #ifndef USE_SPIN_LOCKS - #define USE_SPIN_LOCKS 1 - #endif /* USE_SPIN_LOCKS */ - #elif USE_SPIN_LOCKS - #error "USE_SPIN_LOCKS defined without implementation" - #endif /* ... locks available... */ +#define MAX_SIZE_T (~(size_t)0) + +#ifndef USE_LOCKS /* ensure true if spin or recursive locks set */ +#define USE_LOCKS ((defined(USE_SPIN_LOCKS) && USE_SPIN_LOCKS != 0) || \ + (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0)) +#endif /* USE_LOCKS */ + +#if USE_LOCKS /* Spin locks for gcc >= 4.1, older gcc on x86, MSC >= 1310 */ +#if ((defined(__GNUC__) && \ + ((__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) || \ + defined(__i386__) || defined(__x86_64__))) || \ + (defined(_MSC_VER) && _MSC_VER>=1310)) +#ifndef USE_SPIN_LOCKS +#define USE_SPIN_LOCKS 1 +#endif /* USE_SPIN_LOCKS */ +#elif USE_SPIN_LOCKS +#error "USE_SPIN_LOCKS defined without implementation" +#endif /* ... locks available... */ #elif !defined(USE_SPIN_LOCKS) - #define USE_SPIN_LOCKS 0 -#endif /* USE_LOCKS */ +#define USE_SPIN_LOCKS 0 +#endif /* USE_LOCKS */ #ifndef ONLY_MSPACES - #define ONLY_MSPACES 0 -#endif /* ONLY_MSPACES */ +#define ONLY_MSPACES 0 +#endif /* ONLY_MSPACES */ #ifndef MSPACES - #if ONLY_MSPACES - #define MSPACES 1 - #else /* ONLY_MSPACES */ - #define MSPACES 0 - #endif /* ONLY_MSPACES */ -#endif /* MSPACES */ +#if ONLY_MSPACES +#define MSPACES 1 +#else /* ONLY_MSPACES */ +#define MSPACES 0 +#endif /* ONLY_MSPACES */ +#endif /* MSPACES */ #ifndef MALLOC_ALIGNMENT - #define MALLOC_ALIGNMENT ((size_t)(2 * sizeof(void *))) -#endif /* MALLOC_ALIGNMENT */ +#define MALLOC_ALIGNMENT ((size_t)(2 * sizeof(void *))) +#endif /* MALLOC_ALIGNMENT */ #ifndef FOOTERS - #define FOOTERS 0 -#endif /* FOOTERS */ +#define FOOTERS 0 +#endif /* FOOTERS */ #ifndef ABORT - #define ABORT abort() -#endif /* ABORT */ +#define ABORT abort() +#endif /* ABORT */ #ifndef ABORT_ON_ASSERT_FAILURE - #define ABORT_ON_ASSERT_FAILURE 1 -#endif /* ABORT_ON_ASSERT_FAILURE */ +#define ABORT_ON_ASSERT_FAILURE 1 +#endif /* ABORT_ON_ASSERT_FAILURE */ #ifndef PROCEED_ON_ERROR - #define PROCEED_ON_ERROR 0 -#endif /* PROCEED_ON_ERROR */ +#define PROCEED_ON_ERROR 0 +#endif /* PROCEED_ON_ERROR */ #ifndef INSECURE - #define INSECURE 0 -#endif /* INSECURE */ +#define INSECURE 0 +#endif /* INSECURE */ #ifndef MALLOC_INSPECT_ALL - #define MALLOC_INSPECT_ALL 0 -#endif /* MALLOC_INSPECT_ALL */ +#define MALLOC_INSPECT_ALL 0 +#endif /* MALLOC_INSPECT_ALL */ #ifndef HAVE_MMAP - #define HAVE_MMAP 1 -#endif /* HAVE_MMAP */ +#define HAVE_MMAP 1 +#endif /* HAVE_MMAP */ #ifndef MMAP_CLEARS - #define MMAP_CLEARS 1 -#endif /* MMAP_CLEARS */ +#define MMAP_CLEARS 1 +#endif /* MMAP_CLEARS */ #ifndef HAVE_MREMAP - #ifdef linux - #define HAVE_MREMAP 1 - #define _GNU_SOURCE /* Turns on mremap() definition */ - #else /* linux */ - #define HAVE_MREMAP 0 - #endif /* linux */ -#endif /* HAVE_MREMAP */ +#ifdef linux +#define HAVE_MREMAP 1 +#define _GNU_SOURCE /* Turns on mremap() definition */ +#else /* linux */ +#define HAVE_MREMAP 0 +#endif /* linux */ +#endif /* HAVE_MREMAP */ #ifndef MALLOC_FAILURE_ACTION - #define MALLOC_FAILURE_ACTION errno = ENOMEM; -#endif /* MALLOC_FAILURE_ACTION */ +#define MALLOC_FAILURE_ACTION errno = ENOMEM; +#endif /* MALLOC_FAILURE_ACTION */ #ifndef HAVE_MORECORE - #if ONLY_MSPACES - #define HAVE_MORECORE 0 - #else /* ONLY_MSPACES */ - #define HAVE_MORECORE 1 - #endif /* ONLY_MSPACES */ -#endif /* HAVE_MORECORE */ +#if ONLY_MSPACES +#define HAVE_MORECORE 0 +#else /* ONLY_MSPACES */ +#define HAVE_MORECORE 1 +#endif /* ONLY_MSPACES */ +#endif /* HAVE_MORECORE */ #if !HAVE_MORECORE - #define MORECORE_CONTIGUOUS 0 -#else /* !HAVE_MORECORE */ - #define MORECORE_DEFAULT sbrk - #ifndef MORECORE_CONTIGUOUS - #define MORECORE_CONTIGUOUS 1 - #endif /* MORECORE_CONTIGUOUS */ -#endif /* HAVE_MORECORE */ +#define MORECORE_CONTIGUOUS 0 +#else /* !HAVE_MORECORE */ +#define MORECORE_DEFAULT sbrk +#ifndef MORECORE_CONTIGUOUS +#define MORECORE_CONTIGUOUS 1 +#endif /* MORECORE_CONTIGUOUS */ +#endif /* HAVE_MORECORE */ #ifndef DEFAULT_GRANULARITY - #if (MORECORE_CONTIGUOUS || defined(WIN32)) - #define DEFAULT_GRANULARITY (0) /* 0 means to compute in init_mparams */ - #else /* MORECORE_CONTIGUOUS */ - #define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U) - #endif /* MORECORE_CONTIGUOUS */ -#endif /* DEFAULT_GRANULARITY */ +#if (MORECORE_CONTIGUOUS || defined(WIN32)) +#define DEFAULT_GRANULARITY (0) /* 0 means to compute in init_mparams */ +#else /* MORECORE_CONTIGUOUS */ +#define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U) +#endif /* MORECORE_CONTIGUOUS */ +#endif /* DEFAULT_GRANULARITY */ #ifndef DEFAULT_TRIM_THRESHOLD - #ifndef MORECORE_CANNOT_TRIM - #define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U) - #else /* MORECORE_CANNOT_TRIM */ - #define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T - #endif /* MORECORE_CANNOT_TRIM */ -#endif /* DEFAULT_TRIM_THRESHOLD */ +#ifndef MORECORE_CANNOT_TRIM +#define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U) +#else /* MORECORE_CANNOT_TRIM */ +#define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T +#endif /* MORECORE_CANNOT_TRIM */ +#endif /* DEFAULT_TRIM_THRESHOLD */ #ifndef DEFAULT_MMAP_THRESHOLD - #if HAVE_MMAP - #define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U) - #else /* HAVE_MMAP */ - #define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T - #endif /* HAVE_MMAP */ -#endif /* DEFAULT_MMAP_THRESHOLD */ +#if HAVE_MMAP +#define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U) +#else /* HAVE_MMAP */ +#define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T +#endif /* HAVE_MMAP */ +#endif /* DEFAULT_MMAP_THRESHOLD */ #ifndef MAX_RELEASE_CHECK_RATE - #if HAVE_MMAP - #define MAX_RELEASE_CHECK_RATE 4095 - #else - #define MAX_RELEASE_CHECK_RATE MAX_SIZE_T - #endif /* HAVE_MMAP */ -#endif /* MAX_RELEASE_CHECK_RATE */ +#if HAVE_MMAP +#define MAX_RELEASE_CHECK_RATE 4095 +#else +#define MAX_RELEASE_CHECK_RATE MAX_SIZE_T +#endif /* HAVE_MMAP */ +#endif /* MAX_RELEASE_CHECK_RATE */ #ifndef USE_BUILTIN_FFS - #define USE_BUILTIN_FFS 0 -#endif /* USE_BUILTIN_FFS */ +#define USE_BUILTIN_FFS 0 +#endif /* USE_BUILTIN_FFS */ #ifndef USE_DEV_RANDOM - #define USE_DEV_RANDOM 0 -#endif /* USE_DEV_RANDOM */ +#define USE_DEV_RANDOM 0 +#endif /* USE_DEV_RANDOM */ #ifndef NO_MALLINFO - #define NO_MALLINFO 0 -#endif /* NO_MALLINFO */ +#define NO_MALLINFO 0 +#endif /* NO_MALLINFO */ #ifndef MALLINFO_FIELD_TYPE - #define MALLINFO_FIELD_TYPE size_t -#endif /* MALLINFO_FIELD_TYPE */ +#define MALLINFO_FIELD_TYPE size_t +#endif /* MALLINFO_FIELD_TYPE */ #ifndef NO_MALLOC_STATS - #define NO_MALLOC_STATS 0 -#endif /* NO_MALLOC_STATS */ +#define NO_MALLOC_STATS 0 +#endif /* NO_MALLOC_STATS */ #ifndef NO_SEGMENT_TRAVERSAL - #define NO_SEGMENT_TRAVERSAL 0 -#endif /* NO_SEGMENT_TRAVERSAL */ +#define NO_SEGMENT_TRAVERSAL 0 +#endif /* NO_SEGMENT_TRAVERSAL */ /* mallopt tuning options. SVID/XPG defines four standard parameter @@ -731,9 +731,9 @@ MAX_RELEASE_CHECK_RATE default: 4095 unless not HAVE_MMAP #undef M_TRIM_THRESHOLD #undef M_GRANULARITY #undef M_MMAP_THRESHOLD -#define M_TRIM_THRESHOLD (-1) -#define M_GRANULARITY (-2) -#define M_MMAP_THRESHOLD (-3) +#define M_TRIM_THRESHOLD (-1) +#define M_GRANULARITY (-2) +#define M_MMAP_THRESHOLD (-3) /* ------------------------ Mallinfo declarations ------------------------ */ @@ -762,32 +762,28 @@ MAX_RELEASE_CHECK_RATE default: 4095 unless not HAVE_MMAP /* #define HAVE_USR_INCLUDE_MALLOC_H */ - #ifdef HAVE_USR_INCLUDE_MALLOC_H - #include "/usr/include/malloc.h" - #else /* HAVE_USR_INCLUDE_MALLOC_H */ - #ifndef STRUCT_MALLINFO_DECLARED - /* HP-UX (and others?) redefines mallinfo unless _STRUCT_MALLINFO is - * defined */ - #define _STRUCT_MALLINFO - #define STRUCT_MALLINFO_DECLARED 1 +#ifdef HAVE_USR_INCLUDE_MALLOC_H +#include "/usr/include/malloc.h" +#else /* HAVE_USR_INCLUDE_MALLOC_H */ +#ifndef STRUCT_MALLINFO_DECLARED +/* HP-UX (and others?) redefines mallinfo unless _STRUCT_MALLINFO is defined */ +#define _STRUCT_MALLINFO +#define STRUCT_MALLINFO_DECLARED 1 struct mallinfo { - - MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system */ - MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */ - MALLINFO_FIELD_TYPE smblks; /* always 0 */ - MALLINFO_FIELD_TYPE hblks; /* always 0 */ - MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */ - MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */ - MALLINFO_FIELD_TYPE fsmblks; /* always 0 */ - MALLINFO_FIELD_TYPE uordblks; /* total allocated space */ - MALLINFO_FIELD_TYPE fordblks; /* total free space */ - MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */ - + MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system */ + MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */ + MALLINFO_FIELD_TYPE smblks; /* always 0 */ + MALLINFO_FIELD_TYPE hblks; /* always 0 */ + MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */ + MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */ + MALLINFO_FIELD_TYPE fsmblks; /* always 0 */ + MALLINFO_FIELD_TYPE uordblks; /* total allocated space */ + MALLINFO_FIELD_TYPE fordblks; /* total free space */ + MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */ }; - - #endif /* STRUCT_MALLINFO_DECLARED */ - #endif /* HAVE_USR_INCLUDE_MALLOC_H */ -#endif /* NO_MALLINFO */ +#endif /* STRUCT_MALLINFO_DECLARED */ +#endif /* HAVE_USR_INCLUDE_MALLOC_H */ +#endif /* NO_MALLINFO */ /* Try to persuade compilers to inline. The most critical functions for @@ -796,14 +792,14 @@ struct mallinfo { #ifndef FORCEINLINE #if defined(__GNUC__) - #define FORCEINLINE __inline __attribute__((always_inline)) +#define FORCEINLINE __inline __attribute__ ((always_inline)) #elif defined(_MSC_VER) #define FORCEINLINE __forceinline #endif #endif #ifndef NOINLINE #if defined(__GNUC__) - #define NOINLINE __attribute__((noinline)) + #define NOINLINE __attribute__ ((noinline)) #elif defined(_MSC_VER) #define NOINLINE __declspec(noinline) #else @@ -813,43 +809,42 @@ struct mallinfo { #ifdef __cplusplus extern "C" { - - #ifndef FORCEINLINE - #define FORCEINLINE inline - #endif -#endif /* __cplusplus */ #ifndef FORCEINLINE - #define FORCEINLINE + #define FORCEINLINE inline +#endif +#endif /* __cplusplus */ +#ifndef FORCEINLINE + #define FORCEINLINE #endif #if !ONLY_MSPACES /* ------------------- Declarations of public routines ------------------- */ - #ifndef USE_DL_PREFIX - #define dlcalloc calloc - #define dlfree free - #define dlmalloc malloc - #define dlmemalign memalign - #define dlposix_memalign posix_memalign - #define dlrealloc realloc - #define dlrealloc_in_place realloc_in_place - #define dlvalloc valloc - #define dlpvalloc pvalloc - #define dlmallinfo mallinfo - #define dlmallopt mallopt - #define dlmalloc_trim malloc_trim - #define dlmalloc_stats malloc_stats - #define dlmalloc_usable_size malloc_usable_size - #define dlmalloc_footprint malloc_footprint - #define dlmalloc_max_footprint malloc_max_footprint - #define dlmalloc_footprint_limit malloc_footprint_limit - #define dlmalloc_set_footprint_limit malloc_set_footprint_limit - #define dlmalloc_inspect_all malloc_inspect_all - #define dlindependent_calloc independent_calloc - #define dlindependent_comalloc independent_comalloc - #define dlbulk_free bulk_free - #endif /* USE_DL_PREFIX */ +#ifndef USE_DL_PREFIX +#define dlcalloc calloc +#define dlfree free +#define dlmalloc malloc +#define dlmemalign memalign +#define dlposix_memalign posix_memalign +#define dlrealloc realloc +#define dlrealloc_in_place realloc_in_place +#define dlvalloc valloc +#define dlpvalloc pvalloc +#define dlmallinfo mallinfo +#define dlmallopt mallopt +#define dlmalloc_trim malloc_trim +#define dlmalloc_stats malloc_stats +#define dlmalloc_usable_size malloc_usable_size +#define dlmalloc_footprint malloc_footprint +#define dlmalloc_max_footprint malloc_max_footprint +#define dlmalloc_footprint_limit malloc_footprint_limit +#define dlmalloc_set_footprint_limit malloc_set_footprint_limit +#define dlmalloc_inspect_all malloc_inspect_all +#define dlindependent_calloc independent_calloc +#define dlindependent_comalloc independent_comalloc +#define dlbulk_free bulk_free +#endif /* USE_DL_PREFIX */ /* malloc(size_t n) @@ -865,7 +860,7 @@ extern "C" { maximum supported value of n differs across systems, but is in all cases less than the maximum representable value of a size_t. */ -DLMALLOC_EXPORT void *dlmalloc(size_t); +DLMALLOC_EXPORT void* dlmalloc(size_t); /* free(void* p) @@ -874,14 +869,14 @@ DLMALLOC_EXPORT void *dlmalloc(size_t); It has no effect if p is null. If p was not malloced or already freed, free(p) will by default cause the current program to abort. */ -DLMALLOC_EXPORT void dlfree(void *); +DLMALLOC_EXPORT void dlfree(void*); /* calloc(size_t n_elements, size_t element_size); Returns a pointer to n_elements * element_size bytes, with all locations set to zero. */ -DLMALLOC_EXPORT void *dlcalloc(size_t, size_t); +DLMALLOC_EXPORT void* dlcalloc(size_t, size_t); /* realloc(void* p, size_t n) @@ -905,7 +900,7 @@ DLMALLOC_EXPORT void *dlcalloc(size_t, size_t); The old unix realloc convention of allowing the last-free'd chunk to be used as an argument to realloc is not supported. */ -DLMALLOC_EXPORT void *dlrealloc(void *, size_t); +DLMALLOC_EXPORT void* dlrealloc(void*, size_t); /* realloc_in_place(void* p, size_t n) @@ -920,7 +915,7 @@ DLMALLOC_EXPORT void *dlrealloc(void *, size_t); Returns p if successful; otherwise null. */ -DLMALLOC_EXPORT void *dlrealloc_in_place(void *, size_t); +DLMALLOC_EXPORT void* dlrealloc_in_place(void*, size_t); /* memalign(size_t alignment, size_t n); @@ -934,7 +929,7 @@ DLMALLOC_EXPORT void *dlrealloc_in_place(void *, size_t); Overreliance on memalign is a sure way to fragment space. */ -DLMALLOC_EXPORT void *dlmemalign(size_t, size_t); +DLMALLOC_EXPORT void* dlmemalign(size_t, size_t); /* int posix_memalign(void** pp, size_t alignment, size_t n); @@ -944,14 +939,14 @@ DLMALLOC_EXPORT void *dlmemalign(size_t, size_t); returns EINVAL if the alignment is not a power of two (3) fails and returns ENOMEM if memory cannot be allocated. */ -DLMALLOC_EXPORT int dlposix_memalign(void **, size_t, size_t); +DLMALLOC_EXPORT int dlposix_memalign(void**, size_t, size_t); /* valloc(size_t n); Equivalent to memalign(pagesize, n), where pagesize is the page size of the system. If the pagesize is unknown, 4096 is used. */ -DLMALLOC_EXPORT void *dlvalloc(size_t); +DLMALLOC_EXPORT void* dlvalloc(size_t); /* mallopt(int parameter_number, int parameter_value) @@ -1026,7 +1021,7 @@ DLMALLOC_EXPORT size_t dlmalloc_footprint_limit(); */ DLMALLOC_EXPORT size_t dlmalloc_set_footprint_limit(size_t bytes); - #if MALLOC_INSPECT_ALL +#if MALLOC_INSPECT_ALL /* malloc_inspect_all(void(*handler)(void *start, void *end, @@ -1048,23 +1043,19 @@ DLMALLOC_EXPORT size_t dlmalloc_set_footprint_limit(size_t bytes); than 1000, you could write: static int count = 0; void count_chunks(void* start, void* end, size_t used, void* arg) { - if (used >= 1000) ++count; - } - then: malloc_inspect_all(count_chunks, NULL); malloc_inspect_all is compiled only if MALLOC_INSPECT_ALL is defined. */ -DLMALLOC_EXPORT void dlmalloc_inspect_all(void (*handler)(void *, void *, - size_t, void *), - void *arg); +DLMALLOC_EXPORT void dlmalloc_inspect_all(void(*handler)(void*, void *, size_t, void*), + void* arg); - #endif /* MALLOC_INSPECT_ALL */ +#endif /* MALLOC_INSPECT_ALL */ - #if !NO_MALLINFO +#if !NO_MALLINFO /* mallinfo() Returns (by copy) a struct containing various summary statistics: @@ -1088,7 +1079,7 @@ DLMALLOC_EXPORT void dlmalloc_inspect_all(void (*handler)(void *, void *, thus be inaccurate. */ DLMALLOC_EXPORT struct mallinfo dlmallinfo(void); - #endif /* NO_MALLINFO */ +#endif /* NO_MALLINFO */ /* independent_calloc(size_t n_elements, size_t element_size, void* chunks[]); @@ -1126,7 +1117,6 @@ DLMALLOC_EXPORT struct mallinfo dlmallinfo(void); struct Node { int item; struct Node* next; }; struct Node* build_list() { - struct Node** pool; int n = read_number_of_nodes_needed(); if (n <= 0) return 0; @@ -1138,11 +1128,9 @@ DLMALLOC_EXPORT struct mallinfo dlmallinfo(void); pool[i]->next = pool[i+1]; free(pool); // Can now free the array (or not, if it is needed later) return first; - } - */ -DLMALLOC_EXPORT void **dlindependent_calloc(size_t, size_t, void **); +DLMALLOC_EXPORT void** dlindependent_calloc(size_t, size_t, void**); /* independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]); @@ -1181,7 +1169,6 @@ DLMALLOC_EXPORT void **dlindependent_calloc(size_t, size_t, void **); struct Foot { ... } void send_message(char* msg) { - int msglen = strlen(msg); size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) }; void* chunks[3]; @@ -1191,7 +1178,6 @@ DLMALLOC_EXPORT void **dlindependent_calloc(size_t, size_t, void **); char* body = (char*)(chunks[1]); struct Foot* foot = (struct Foot*)(chunks[2]); // ... - } In general though, independent_comalloc is worth using only for @@ -1202,7 +1188,7 @@ DLMALLOC_EXPORT void **dlindependent_calloc(size_t, size_t, void **); since it cannot reuse existing noncontiguous small chunks that might be available for some of the elements. */ -DLMALLOC_EXPORT void **dlindependent_comalloc(size_t, size_t *, void **); +DLMALLOC_EXPORT void** dlindependent_comalloc(size_t, size_t*, void**); /* bulk_free(void* array[], size_t n_elements) @@ -1213,14 +1199,14 @@ DLMALLOC_EXPORT void **dlindependent_comalloc(size_t, size_t *, void **); is returned. For large arrays of pointers with poor locality, it may be worthwhile to sort this array before calling bulk_free. */ -DLMALLOC_EXPORT size_t dlbulk_free(void **, size_t n_elements); +DLMALLOC_EXPORT size_t dlbulk_free(void**, size_t n_elements); /* pvalloc(size_t n); Equivalent to valloc(minimum-page-that-holds(n)), that is, round up n to nearest pagesize. */ -DLMALLOC_EXPORT void *dlpvalloc(size_t); +DLMALLOC_EXPORT void* dlpvalloc(size_t); /* malloc_trim(size_t pad); @@ -1243,7 +1229,7 @@ DLMALLOC_EXPORT void *dlpvalloc(size_t); Malloc_trim returns 1 if it actually released any memory, else 0. */ -DLMALLOC_EXPORT int dlmalloc_trim(size_t); +DLMALLOC_EXPORT int dlmalloc_trim(size_t); /* malloc_stats(); @@ -1264,7 +1250,7 @@ DLMALLOC_EXPORT int dlmalloc_trim(size_t); malloc_stats prints only the most commonly interesting statistics. More information can be obtained by calling mallinfo. */ -DLMALLOC_EXPORT void dlmalloc_stats(void); +DLMALLOC_EXPORT void dlmalloc_stats(void); /* malloc_usable_size(void* p); @@ -1280,9 +1266,9 @@ DLMALLOC_EXPORT void dlmalloc_stats(void); p = malloc(n); assert(malloc_usable_size(p) >= 256); */ -size_t dlmalloc_usable_size(void *); +size_t dlmalloc_usable_size(void*); -#endif /* ONLY_MSPACES */ +#endif /* ONLY_MSPACES */ #if MSPACES @@ -1290,7 +1276,7 @@ size_t dlmalloc_usable_size(void *); mspace is an opaque type representing an independent region of space that supports mspace_malloc, etc. */ -typedef void *mspace; +typedef void* mspace; /* create_mspace creates and returns a new independent space with the @@ -1322,8 +1308,7 @@ DLMALLOC_EXPORT size_t destroy_mspace(mspace msp); Destroying this space will deallocate all additionally allocated space (if possible) but not the initial base. */ -DLMALLOC_EXPORT mspace create_mspace_with_base(void *base, size_t capacity, - int locked); +DLMALLOC_EXPORT mspace create_mspace_with_base(void* base, size_t capacity, int locked); /* mspace_track_large_chunks controls whether requests for large chunks @@ -1338,11 +1323,12 @@ DLMALLOC_EXPORT mspace create_mspace_with_base(void *base, size_t capacity, */ DLMALLOC_EXPORT int mspace_track_large_chunks(mspace msp, int enable); + /* mspace_malloc behaves as malloc, but operates within the given space. */ -DLMALLOC_EXPORT void *mspace_malloc(mspace msp, size_t bytes); +DLMALLOC_EXPORT void* mspace_malloc(mspace msp, size_t bytes); /* mspace_free behaves as free, but operates within @@ -1352,7 +1338,7 @@ DLMALLOC_EXPORT void *mspace_malloc(mspace msp, size_t bytes); free may be called instead of mspace_free because freed chunks from any space are handled by their originating spaces. */ -DLMALLOC_EXPORT void mspace_free(mspace msp, void *mem); +DLMALLOC_EXPORT void mspace_free(mspace msp, void* mem); /* mspace_realloc behaves as realloc, but operates within @@ -1363,38 +1349,33 @@ DLMALLOC_EXPORT void mspace_free(mspace msp, void *mem); realloced chunks from any space are handled by their originating spaces. */ -DLMALLOC_EXPORT void *mspace_realloc(mspace msp, void *mem, size_t newsize); +DLMALLOC_EXPORT void* mspace_realloc(mspace msp, void* mem, size_t newsize); /* mspace_calloc behaves as calloc, but operates within the given space. */ -DLMALLOC_EXPORT void *mspace_calloc(mspace msp, size_t n_elements, - size_t elem_size); +DLMALLOC_EXPORT void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size); /* mspace_memalign behaves as memalign, but operates within the given space. */ -DLMALLOC_EXPORT void *mspace_memalign(mspace msp, size_t alignment, - size_t bytes); +DLMALLOC_EXPORT void* mspace_memalign(mspace msp, size_t alignment, size_t bytes); /* mspace_independent_calloc behaves as independent_calloc, but operates within the given space. */ -DLMALLOC_EXPORT void **mspace_independent_calloc(mspace msp, size_t n_elements, - size_t elem_size, - void * chunks[]); +DLMALLOC_EXPORT void** mspace_independent_calloc(mspace msp, size_t n_elements, + size_t elem_size, void* chunks[]); /* mspace_independent_comalloc behaves as independent_comalloc, but operates within the given space. */ -DLMALLOC_EXPORT void **mspace_independent_comalloc(mspace msp, - size_t n_elements, - size_t sizes[], - void * chunks[]); +DLMALLOC_EXPORT void** mspace_independent_comalloc(mspace msp, size_t n_elements, + size_t sizes[], void* chunks[]); /* mspace_footprint() returns the number of bytes obtained from the @@ -1408,18 +1389,19 @@ DLMALLOC_EXPORT size_t mspace_footprint(mspace msp); */ DLMALLOC_EXPORT size_t mspace_max_footprint(mspace msp); - #if !NO_MALLINFO + +#if !NO_MALLINFO /* mspace_mallinfo behaves as mallinfo, but reports properties of the given space. */ DLMALLOC_EXPORT struct mallinfo mspace_mallinfo(mspace msp); - #endif /* NO_MALLINFO */ +#endif /* NO_MALLINFO */ /* malloc_usable_size(void* p) behaves the same as malloc_usable_size; */ -DLMALLOC_EXPORT size_t mspace_usable_size(const void *mem); +DLMALLOC_EXPORT size_t mspace_usable_size(const void* mem); /* mspace_malloc_stats behaves as malloc_stats, but reports @@ -1438,13 +1420,11 @@ DLMALLOC_EXPORT int mspace_trim(mspace msp, size_t pad); */ DLMALLOC_EXPORT int mspace_mallopt(int, int); -#endif /* MSPACES */ +#endif /* MSPACES */ #ifdef __cplusplus - -} /* end of extern "C" */ - -#endif /* __cplusplus */ +} /* end of extern "C" */ +#endif /* __cplusplus */ /* ======================================================================== @@ -1459,207 +1439,195 @@ DLMALLOC_EXPORT int mspace_mallopt(int, int); /*------------------------------ internal #includes ---------------------- */ #ifdef _MSC_VER - #pragma warning(disable : 4146) /* no "unsigned" warnings */ -#endif /* _MSC_VER */ +#pragma warning( disable : 4146 ) /* no "unsigned" warnings */ +#endif /* _MSC_VER */ #if !NO_MALLOC_STATS - #include /* for printing in malloc_stats */ -#endif /* NO_MALLOC_STATS */ +#include /* for printing in malloc_stats */ +#endif /* NO_MALLOC_STATS */ #ifndef LACKS_ERRNO_H - #include /* for MALLOC_FAILURE_ACTION */ -#endif /* LACKS_ERRNO_H */ +#include /* for MALLOC_FAILURE_ACTION */ +#endif /* LACKS_ERRNO_H */ #ifdef DEBUG - #if ABORT_ON_ASSERT_FAILURE - #undef assert - #define assert(x) \ - if (!(x)) ABORT - #else /* ABORT_ON_ASSERT_FAILURE */ - #include - #endif /* ABORT_ON_ASSERT_FAILURE */ -#else /* DEBUG */ - #ifndef assert - #define assert(x) - #endif - #define DEBUG 0 -#endif /* DEBUG */ +#if ABORT_ON_ASSERT_FAILURE +#undef assert +#define assert(x) if(!(x)) ABORT +#else /* ABORT_ON_ASSERT_FAILURE */ +#include +#endif /* ABORT_ON_ASSERT_FAILURE */ +#else /* DEBUG */ +#ifndef assert +#define assert(x) +#endif +#define DEBUG 0 +#endif /* DEBUG */ #if !defined(WIN32) && !defined(LACKS_TIME_H) - #include /* for magic initialization */ -#endif /* WIN32 */ +#include /* for magic initialization */ +#endif /* WIN32 */ #ifndef LACKS_STDLIB_H - #include /* for abort() */ -#endif /* LACKS_STDLIB_H */ +#include /* for abort() */ +#endif /* LACKS_STDLIB_H */ #ifndef LACKS_STRING_H - #include /* for memset etc */ -#endif /* LACKS_STRING_H */ +#include /* for memset etc */ +#endif /* LACKS_STRING_H */ #if USE_BUILTIN_FFS - #ifndef LACKS_STRINGS_H - #include /* for ffs */ - #endif /* LACKS_STRINGS_H */ -#endif /* USE_BUILTIN_FFS */ +#ifndef LACKS_STRINGS_H +#include /* for ffs */ +#endif /* LACKS_STRINGS_H */ +#endif /* USE_BUILTIN_FFS */ #if HAVE_MMAP - #ifndef LACKS_SYS_MMAN_H - /* On some versions of linux, mremap decl in mman.h needs __USE_GNU set */ - #if (defined(linux) && !defined(__USE_GNU)) - #define __USE_GNU 1 - #include /* for mmap */ - #undef __USE_GNU - #else - #include /* for mmap */ - #endif /* linux */ - #endif /* LACKS_SYS_MMAN_H */ - #ifndef LACKS_FCNTL_H - #include - #endif /* LACKS_FCNTL_H */ -#endif /* HAVE_MMAP */ +#ifndef LACKS_SYS_MMAN_H +/* On some versions of linux, mremap decl in mman.h needs __USE_GNU set */ +#if (defined(linux) && !defined(__USE_GNU)) +#define __USE_GNU 1 +#include /* for mmap */ +#undef __USE_GNU +#else +#include /* for mmap */ +#endif /* linux */ +#endif /* LACKS_SYS_MMAN_H */ +#ifndef LACKS_FCNTL_H +#include +#endif /* LACKS_FCNTL_H */ +#endif /* HAVE_MMAP */ #ifndef LACKS_UNISTD_H - #include /* for sbrk, sysconf */ -#else /* LACKS_UNISTD_H */ - #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__) -extern void *sbrk(ptrdiff_t); - #endif /* FreeBSD etc */ -#endif /* LACKS_UNISTD_H */ +#include /* for sbrk, sysconf */ +#else /* LACKS_UNISTD_H */ +#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__) +extern void* sbrk(ptrdiff_t); +#endif /* FreeBSD etc */ +#endif /* LACKS_UNISTD_H */ /* Declarations for locking */ #if USE_LOCKS - #ifndef WIN32 - #if defined(__SVR4) && defined(__sun) /* solaris */ - #include - #elif !defined(LACKS_SCHED_H) - #include - #endif /* solaris or LACKS_SCHED_H */ - #if (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0) || \ - !USE_SPIN_LOCKS - #include - #endif /* USE_RECURSIVE_LOCKS ... */ - #elif defined(_MSC_VER) - #ifndef _M_AMD64 - /* These are already defined on AMD64 builds */ - #ifdef __cplusplus +#ifndef WIN32 +#if defined (__SVR4) && defined (__sun) /* solaris */ +#include +#elif !defined(LACKS_SCHED_H) +#include +#endif /* solaris or LACKS_SCHED_H */ +#if (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0) || !USE_SPIN_LOCKS +#include +#endif /* USE_RECURSIVE_LOCKS ... */ +#elif defined(_MSC_VER) +#ifndef _M_AMD64 +/* These are already defined on AMD64 builds */ +#ifdef __cplusplus extern "C" { - - #endif /* __cplusplus */ -LONG __cdecl _InterlockedCompareExchange(LONG volatile *Dest, LONG Exchange, - LONG Comp); +#endif /* __cplusplus */ +LONG __cdecl _InterlockedCompareExchange(LONG volatile *Dest, LONG Exchange, LONG Comp); LONG __cdecl _InterlockedExchange(LONG volatile *Target, LONG Value); - #ifdef __cplusplus - +#ifdef __cplusplus } - - #endif /* __cplusplus */ - #endif /* _M_AMD64 */ - #pragma intrinsic(_InterlockedCompareExchange) - #pragma intrinsic(_InterlockedExchange) - #define interlockedcompareexchange _InterlockedCompareExchange - #define interlockedexchange _InterlockedExchange - #elif defined(WIN32) && defined(__GNUC__) - #define interlockedcompareexchange(a, b, c) \ - __sync_val_compare_and_swap(a, c, b) - #define interlockedexchange __sync_lock_test_and_set - #endif /* Win32 */ -#else /* USE_LOCKS */ -#endif /* USE_LOCKS */ +#endif /* __cplusplus */ +#endif /* _M_AMD64 */ +#pragma intrinsic (_InterlockedCompareExchange) +#pragma intrinsic (_InterlockedExchange) +#define interlockedcompareexchange _InterlockedCompareExchange +#define interlockedexchange _InterlockedExchange +#elif defined(WIN32) && defined(__GNUC__) +#define interlockedcompareexchange(a, b, c) __sync_val_compare_and_swap(a, c, b) +#define interlockedexchange __sync_lock_test_and_set +#endif /* Win32 */ +#else /* USE_LOCKS */ +#endif /* USE_LOCKS */ #ifndef LOCK_AT_FORK - #define LOCK_AT_FORK 0 +#define LOCK_AT_FORK 0 #endif /* Declarations for bit scanning on win32 */ -#if defined(_MSC_VER) && _MSC_VER >= 1300 - #ifndef BitScanForward /* Try to avoid pulling in WinNT.h */ - #ifdef __cplusplus +#if defined(_MSC_VER) && _MSC_VER>=1300 +#ifndef BitScanForward /* Try to avoid pulling in WinNT.h */ +#ifdef __cplusplus extern "C" { - - #endif /* __cplusplus */ +#endif /* __cplusplus */ unsigned char _BitScanForward(unsigned long *index, unsigned long mask); unsigned char _BitScanReverse(unsigned long *index, unsigned long mask); - #ifdef __cplusplus - +#ifdef __cplusplus } +#endif /* __cplusplus */ - #endif /* __cplusplus */ - - #define BitScanForward _BitScanForward - #define BitScanReverse _BitScanReverse - #pragma intrinsic(_BitScanForward) - #pragma intrinsic(_BitScanReverse) - #endif /* BitScanForward */ -#endif /* defined(_MSC_VER) && _MSC_VER>=1300 */ +#define BitScanForward _BitScanForward +#define BitScanReverse _BitScanReverse +#pragma intrinsic(_BitScanForward) +#pragma intrinsic(_BitScanReverse) +#endif /* BitScanForward */ +#endif /* defined(_MSC_VER) && _MSC_VER>=1300 */ #ifndef WIN32 - #ifndef malloc_getpagesize - #ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */ - #ifndef _SC_PAGE_SIZE - #define _SC_PAGE_SIZE _SC_PAGESIZE - #endif - #endif - #ifdef _SC_PAGE_SIZE - #define malloc_getpagesize sysconf(_SC_PAGE_SIZE) - #else - #if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE) -extern size_t getpagesize(); - #define malloc_getpagesize getpagesize() - #else - #ifdef WIN32 /* use supplied emulation of getpagesize */ - #define malloc_getpagesize getpagesize() - #else - #ifndef LACKS_SYS_PARAM_H - #include - #endif - #ifdef EXEC_PAGESIZE - #define malloc_getpagesize EXEC_PAGESIZE - #else - #ifdef NBPG - #ifndef CLSIZE - #define malloc_getpagesize NBPG - #else - #define malloc_getpagesize (NBPG * CLSIZE) - #endif - #else - #ifdef NBPC - #define malloc_getpagesize NBPC - #else - #ifdef PAGESIZE - #define malloc_getpagesize PAGESIZE - #else /* just guess */ - #define malloc_getpagesize ((size_t)4096U) - #endif - #endif - #endif - #endif - #endif - #endif - #endif - #endif +#ifndef malloc_getpagesize +# ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */ +# ifndef _SC_PAGE_SIZE +# define _SC_PAGE_SIZE _SC_PAGESIZE +# endif +# endif +# ifdef _SC_PAGE_SIZE +# define malloc_getpagesize sysconf(_SC_PAGE_SIZE) +# else +# if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE) + extern size_t getpagesize(); +# define malloc_getpagesize getpagesize() +# else +# ifdef WIN32 /* use supplied emulation of getpagesize */ +# define malloc_getpagesize getpagesize() +# else +# ifndef LACKS_SYS_PARAM_H +# include +# endif +# ifdef EXEC_PAGESIZE +# define malloc_getpagesize EXEC_PAGESIZE +# else +# ifdef NBPG +# ifndef CLSIZE +# define malloc_getpagesize NBPG +# else +# define malloc_getpagesize (NBPG * CLSIZE) +# endif +# else +# ifdef NBPC +# define malloc_getpagesize NBPC +# else +# ifdef PAGESIZE +# define malloc_getpagesize PAGESIZE +# else /* just guess */ +# define malloc_getpagesize ((size_t)4096U) +# endif +# endif +# endif +# endif +# endif +# endif +# endif +#endif #endif /* ------------------- size_t and alignment properties -------------------- */ /* The byte and bit size of a size_t */ -#define SIZE_T_SIZE (sizeof(size_t)) -#define SIZE_T_BITSIZE (sizeof(size_t) << 3) +#define SIZE_T_SIZE (sizeof(size_t)) +#define SIZE_T_BITSIZE (sizeof(size_t) << 3) /* Some constants coerced to size_t */ /* Annoying but necessary to avoid errors on some platforms */ -#define SIZE_T_ZERO ((size_t)0) -#define SIZE_T_ONE ((size_t)1) -#define SIZE_T_TWO ((size_t)2) -#define SIZE_T_FOUR ((size_t)4) -#define TWO_SIZE_T_SIZES (SIZE_T_SIZE << 1) -#define FOUR_SIZE_T_SIZES (SIZE_T_SIZE << 2) -#define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES + TWO_SIZE_T_SIZES) -#define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U) +#define SIZE_T_ZERO ((size_t)0) +#define SIZE_T_ONE ((size_t)1) +#define SIZE_T_TWO ((size_t)2) +#define SIZE_T_FOUR ((size_t)4) +#define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1) +#define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2) +#define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES) +#define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U) /* The bit mask value corresponding to MALLOC_ALIGNMENT */ -#define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE) +#define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE) /* True if address a has acceptable alignment */ -#define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0) +#define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0) /* the number of bytes to offset an address to align it */ -#define align_offset(A) \ - ((((size_t)(A)&CHUNK_ALIGN_MASK) == 0) \ - ? 0 \ - : ((MALLOC_ALIGNMENT - ((size_t)(A)&CHUNK_ALIGN_MASK)) & \ - CHUNK_ALIGN_MASK)) +#define align_offset(A)\ + ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\ + ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK)) /* -------------------------- MMAP preliminaries ------------------------- */ @@ -1669,202 +1637,193 @@ extern size_t getpagesize(); using so many "#if"s. */ + /* MORECORE and MMAP must return MFAIL on failure */ -#define MFAIL ((void *)(MAX_SIZE_T)) -#define CMFAIL ((char *)(MFAIL)) /* defined for convenience */ +#define MFAIL ((void*)(MAX_SIZE_T)) +#define CMFAIL ((char*)(MFAIL)) /* defined for convenience */ #if HAVE_MMAP - #ifndef WIN32 - #define MMAP_PROT (PROT_READ | PROT_WRITE) - #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON) - #define MAP_ANONYMOUS MAP_ANON - #endif /* MAP_ANON */ - #ifdef MAP_ANONYMOUS - - #define MMAP_FLAGS (MAP_PRIVATE | MAP_ANONYMOUS) +#ifndef WIN32 +#define MMAP_PROT (PROT_READ|PROT_WRITE) +#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON) +#define MAP_ANONYMOUS MAP_ANON +#endif /* MAP_ANON */ +#ifdef MAP_ANONYMOUS -static FORCEINLINE void *unixmmap(size_t size) { +#define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS) - void *result; +static FORCEINLINE void* unixmmap(size_t size) { + void* result; result = mmap(0, size, MMAP_PROT, MMAP_FLAGS, -1, 0); - if (result == MFAIL) return MFAIL; + if (result == MFAIL) + return MFAIL; return result; - } -static FORCEINLINE int unixmunmap(void *ptr, size_t size) { - +static FORCEINLINE int unixmunmap(void* ptr, size_t size) { int result; result = munmap(ptr, size); - if (result != 0) return result; + if (result != 0) + return result; return result; - } - #define MMAP_DEFAULT(s) unixmmap(s) - #define MUNMAP_DEFAULT(a, s) unixmunmap((a), (s)) +#define MMAP_DEFAULT(s) unixmmap(s) +#define MUNMAP_DEFAULT(a, s) unixmunmap((a), (s)) - #else /* MAP_ANONYMOUS */ - /* - Nearly all versions of mmap support MAP_ANONYMOUS, so the following - is unlikely to be needed, but is supplied just in case. - */ - #define MMAP_FLAGS (MAP_PRIVATE) -static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */ - #define MMAP_DEFAULT(s) \ - ((dev_zero_fd < 0) \ - ? (dev_zero_fd = open("/dev/zero", O_RDWR), \ - mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) \ - : mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) - #define MUNMAP_DEFAULT(a, s) munmap((a), (s)) - #endif /* MAP_ANONYMOUS */ +#else /* MAP_ANONYMOUS */ +/* + Nearly all versions of mmap support MAP_ANONYMOUS, so the following + is unlikely to be needed, but is supplied just in case. +*/ +#define MMAP_FLAGS (MAP_PRIVATE) +static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */ +#define MMAP_DEFAULT(s) ((dev_zero_fd < 0) ? \ + (dev_zero_fd = open("/dev/zero", O_RDWR), \ + mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \ + mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) +#define MUNMAP_DEFAULT(a, s) munmap((a), (s)) +#endif /* MAP_ANONYMOUS */ - #define DIRECT_MMAP_DEFAULT(s) MMAP_DEFAULT(s) +#define DIRECT_MMAP_DEFAULT(s) MMAP_DEFAULT(s) - #else /* WIN32 */ +#else /* WIN32 */ /* Win32 MMAP via VirtualAlloc */ -static FORCEINLINE void *win32mmap(size_t size) { - - void *ptr; +static FORCEINLINE void* win32mmap(size_t size) { + void* ptr; - ptr = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); - if (ptr == 0) return MFAIL; + ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE); + if (ptr == 0) + return MFAIL; return ptr; - } /* For direct MMAP, use MEM_TOP_DOWN to minimize interference */ -static FORCEINLINE void *win32direct_mmap(size_t size) { - - void *ptr; +static FORCEINLINE void* win32direct_mmap(size_t size) { + void* ptr; - ptr = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT | MEM_TOP_DOWN, - PAGE_READWRITE); - if (ptr == 0) return MFAIL; + ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, + PAGE_READWRITE); + if (ptr == 0) + return MFAIL; return ptr; - } /* This function supports releasing coalesed segments */ -static FORCEINLINE int win32munmap(void *ptr, size_t size) { - +static FORCEINLINE int win32munmap(void* ptr, size_t size) { MEMORY_BASIC_INFORMATION minfo; - char *cptr = (char *)ptr; + char* cptr = (char*)ptr; while (size) { - - if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0) return -1; + if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0) + return -1; if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr || minfo.State != MEM_COMMIT || minfo.RegionSize > size) return -1; - if (VirtualFree(cptr, 0, MEM_RELEASE) == 0) return -1; + if (VirtualFree(cptr, 0, MEM_RELEASE) == 0) + return -1; cptr += minfo.RegionSize; size -= minfo.RegionSize; - } return 0; - } - #define MMAP_DEFAULT(s) win32mmap(s) - #define MUNMAP_DEFAULT(a, s) win32munmap((a), (s)) - #define DIRECT_MMAP_DEFAULT(s) win32direct_mmap(s) - #endif /* WIN32 */ -#endif /* HAVE_MMAP */ +#define MMAP_DEFAULT(s) win32mmap(s) +#define MUNMAP_DEFAULT(a, s) win32munmap((a), (s)) +#define DIRECT_MMAP_DEFAULT(s) win32direct_mmap(s) +#endif /* WIN32 */ +#endif /* HAVE_MMAP */ #if HAVE_MREMAP - #ifndef WIN32 - -static FORCEINLINE void *dlmremap(void *old_address, size_t old_size, - size_t new_size, int flags) { +#ifndef WIN32 - void *result; +static FORCEINLINE void* dlmremap(void* old_address, size_t old_size, size_t new_size, int flags) { + void* result; result = mremap(old_address, old_size, new_size, flags); - if (result == MFAIL) return MFAIL; + if (result == MFAIL) + return MFAIL; return result; - } - #define MREMAP_DEFAULT(addr, osz, nsz, mv) \ - dlmremap((addr), (osz), (nsz), (mv)) - #endif /* WIN32 */ -#endif /* HAVE_MREMAP */ +#define MREMAP_DEFAULT(addr, osz, nsz, mv) dlmremap((addr), (osz), (nsz), (mv)) +#endif /* WIN32 */ +#endif /* HAVE_MREMAP */ /** * Define CALL_MORECORE */ #if HAVE_MORECORE - #ifdef MORECORE - #define CALL_MORECORE(S) MORECORE(S) - #else /* MORECORE */ - #define CALL_MORECORE(S) MORECORE_DEFAULT(S) - #endif /* MORECORE */ -#else /* HAVE_MORECORE */ - #define CALL_MORECORE(S) MFAIL -#endif /* HAVE_MORECORE */ + #ifdef MORECORE + #define CALL_MORECORE(S) MORECORE(S) + #else /* MORECORE */ + #define CALL_MORECORE(S) MORECORE_DEFAULT(S) + #endif /* MORECORE */ +#else /* HAVE_MORECORE */ + #define CALL_MORECORE(S) MFAIL +#endif /* HAVE_MORECORE */ /** * Define CALL_MMAP/CALL_MUNMAP/CALL_DIRECT_MMAP */ #if HAVE_MMAP - #define USE_MMAP_BIT (SIZE_T_ONE) - - #ifdef MMAP - #define CALL_MMAP(s) MMAP(s) - #else /* MMAP */ - #define CALL_MMAP(s) MMAP_DEFAULT(s) - #endif /* MMAP */ - #ifdef MUNMAP - #define CALL_MUNMAP(a, s) MUNMAP((a), (s)) - #else /* MUNMAP */ - #define CALL_MUNMAP(a, s) MUNMAP_DEFAULT((a), (s)) - #endif /* MUNMAP */ - #ifdef DIRECT_MMAP - #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s) - #else /* DIRECT_MMAP */ - #define CALL_DIRECT_MMAP(s) DIRECT_MMAP_DEFAULT(s) - #endif /* DIRECT_MMAP */ -#else /* HAVE_MMAP */ - #define USE_MMAP_BIT (SIZE_T_ZERO) - - #define MMAP(s) MFAIL - #define MUNMAP(a, s) (-1) - #define DIRECT_MMAP(s) MFAIL - #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s) - #define CALL_MMAP(s) MMAP(s) - #define CALL_MUNMAP(a, s) MUNMAP((a), (s)) -#endif /* HAVE_MMAP */ + #define USE_MMAP_BIT (SIZE_T_ONE) + + #ifdef MMAP + #define CALL_MMAP(s) MMAP(s) + #else /* MMAP */ + #define CALL_MMAP(s) MMAP_DEFAULT(s) + #endif /* MMAP */ + #ifdef MUNMAP + #define CALL_MUNMAP(a, s) MUNMAP((a), (s)) + #else /* MUNMAP */ + #define CALL_MUNMAP(a, s) MUNMAP_DEFAULT((a), (s)) + #endif /* MUNMAP */ + #ifdef DIRECT_MMAP + #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s) + #else /* DIRECT_MMAP */ + #define CALL_DIRECT_MMAP(s) DIRECT_MMAP_DEFAULT(s) + #endif /* DIRECT_MMAP */ +#else /* HAVE_MMAP */ + #define USE_MMAP_BIT (SIZE_T_ZERO) + + #define MMAP(s) MFAIL + #define MUNMAP(a, s) (-1) + #define DIRECT_MMAP(s) MFAIL + #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s) + #define CALL_MMAP(s) MMAP(s) + #define CALL_MUNMAP(a, s) MUNMAP((a), (s)) +#endif /* HAVE_MMAP */ /** * Define CALL_MREMAP */ #if HAVE_MMAP && HAVE_MREMAP - #ifdef MREMAP - #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP((addr), (osz), (nsz), (mv)) - #else /* MREMAP */ - #define CALL_MREMAP(addr, osz, nsz, mv) \ - MREMAP_DEFAULT((addr), (osz), (nsz), (mv)) - #endif /* MREMAP */ -#else /* HAVE_MMAP && HAVE_MREMAP */ - #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL -#endif /* HAVE_MMAP && HAVE_MREMAP */ + #ifdef MREMAP + #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP((addr), (osz), (nsz), (mv)) + #else /* MREMAP */ + #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP_DEFAULT((addr), (osz), (nsz), (mv)) + #endif /* MREMAP */ +#else /* HAVE_MMAP && HAVE_MREMAP */ + #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL +#endif /* HAVE_MMAP && HAVE_MREMAP */ /* mstate bit set if continguous morecore disabled or failed */ #define USE_NONCONTIGUOUS_BIT (4U) /* segment bit set in create_mspace_with_base */ -#define EXTERN_BIT (8U) +#define EXTERN_BIT (8U) + /* --------------------------- Lock preliminaries ------------------------ */ @@ -1897,284 +1856,247 @@ static FORCEINLINE void *dlmremap(void *old_address, size_t old_size, */ #if !USE_LOCKS - #define USE_LOCK_BIT (0U) - #define INITIAL_LOCK(l) (0) - #define DESTROY_LOCK(l) (0) - #define ACQUIRE_MALLOC_GLOBAL_LOCK() - #define RELEASE_MALLOC_GLOBAL_LOCK() +#define USE_LOCK_BIT (0U) +#define INITIAL_LOCK(l) (0) +#define DESTROY_LOCK(l) (0) +#define ACQUIRE_MALLOC_GLOBAL_LOCK() +#define RELEASE_MALLOC_GLOBAL_LOCK() #else - #if USE_LOCKS > 1 - /* ----------------------- User-defined locks ------------------------ */ - /* Define your own lock implementation here */ - /* #define INITIAL_LOCK(lk) ... */ - /* #define DESTROY_LOCK(lk) ... */ - /* #define ACQUIRE_LOCK(lk) ... */ - /* #define RELEASE_LOCK(lk) ... */ - /* #define TRY_LOCK(lk) ... */ - /* static MLOCK_T malloc_global_mutex = ... */ - - #elif USE_SPIN_LOCKS - - /* First, define CAS_LOCK and CLEAR_LOCK on ints */ - /* Note CAS_LOCK defined to return 0 on success */ - - #if defined(__GNUC__) && \ - (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) - #define CAS_LOCK(sl) __sync_lock_test_and_set(sl, 1) - #define CLEAR_LOCK(sl) __sync_lock_release(sl) - - #elif (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))) +#if USE_LOCKS > 1 +/* ----------------------- User-defined locks ------------------------ */ +/* Define your own lock implementation here */ +/* #define INITIAL_LOCK(lk) ... */ +/* #define DESTROY_LOCK(lk) ... */ +/* #define ACQUIRE_LOCK(lk) ... */ +/* #define RELEASE_LOCK(lk) ... */ +/* #define TRY_LOCK(lk) ... */ +/* static MLOCK_T malloc_global_mutex = ... */ + +#elif USE_SPIN_LOCKS + +/* First, define CAS_LOCK and CLEAR_LOCK on ints */ +/* Note CAS_LOCK defined to return 0 on success */ + +#if defined(__GNUC__)&& (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) +#define CAS_LOCK(sl) __sync_lock_test_and_set(sl, 1) +#define CLEAR_LOCK(sl) __sync_lock_release(sl) + +#elif (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))) /* Custom spin locks for older gcc on x86 */ static FORCEINLINE int x86_cas_lock(int *sl) { - int ret; int val = 1; int cmp = 0; - __asm__ __volatile__("lock; cmpxchgl %1, %2" - : "=a"(ret) - : "r"(val), "m"(*(sl)), "0"(cmp) - : "memory", "cc"); + __asm__ __volatile__ ("lock; cmpxchgl %1, %2" + : "=a" (ret) + : "r" (val), "m" (*(sl)), "0"(cmp) + : "memory", "cc"); return ret; - } -static FORCEINLINE void x86_clear_lock(int *sl) { - +static FORCEINLINE void x86_clear_lock(int* sl) { assert(*sl != 0); int prev = 0; int ret; - __asm__ __volatile__("lock; xchgl %0, %1" - : "=r"(ret) - : "m"(*(sl)), "0"(prev) - : "memory"); - + __asm__ __volatile__ ("lock; xchgl %0, %1" + : "=r" (ret) + : "m" (*(sl)), "0"(prev) + : "memory"); } - #define CAS_LOCK(sl) x86_cas_lock(sl) - #define CLEAR_LOCK(sl) x86_clear_lock(sl) - - #else /* Win32 MSC */ - #define CAS_LOCK(sl) interlockedexchange((volatile LONG *)sl, (LONG)1) - #define CLEAR_LOCK(sl) interlockedexchange((volatile LONG *)sl, (LONG)0) - - #endif /* ... gcc spins locks ... */ - - /* How to yield for a spin lock */ - #define SPINS_PER_YIELD 63 - #if defined(_MSC_VER) - #define SLEEP_EX_DURATION 50 /* delay for yield/sleep */ - #define SPIN_LOCK_YIELD SleepEx(SLEEP_EX_DURATION, FALSE) - #elif defined(__SVR4) && defined(__sun) /* solaris */ - #define SPIN_LOCK_YIELD thr_yield(); - #elif !defined(LACKS_SCHED_H) - #define SPIN_LOCK_YIELD sched_yield(); - #else - #define SPIN_LOCK_YIELD - #endif /* ... yield ... */ - - #if !defined(USE_RECURSIVE_LOCKS) || USE_RECURSIVE_LOCKS == 0 +#define CAS_LOCK(sl) x86_cas_lock(sl) +#define CLEAR_LOCK(sl) x86_clear_lock(sl) + +#else /* Win32 MSC */ +#define CAS_LOCK(sl) interlockedexchange((volatile LONG *)sl, (LONG)1) +#define CLEAR_LOCK(sl) interlockedexchange((volatile LONG *)sl, (LONG)0) + +#endif /* ... gcc spins locks ... */ + +/* How to yield for a spin lock */ +#define SPINS_PER_YIELD 63 +#if defined(_MSC_VER) +#define SLEEP_EX_DURATION 50 /* delay for yield/sleep */ +#define SPIN_LOCK_YIELD SleepEx(SLEEP_EX_DURATION, FALSE) +#elif defined (__SVR4) && defined (__sun) /* solaris */ +#define SPIN_LOCK_YIELD thr_yield(); +#elif !defined(LACKS_SCHED_H) +#define SPIN_LOCK_YIELD sched_yield(); +#else +#define SPIN_LOCK_YIELD +#endif /* ... yield ... */ + +#if !defined(USE_RECURSIVE_LOCKS) || USE_RECURSIVE_LOCKS == 0 /* Plain spin locks use single word (embedded in malloc_states) */ static int spin_acquire_lock(int *sl) { - int spins = 0; while (*(volatile int *)sl != 0 || CAS_LOCK(sl)) { - - if ((++spins & SPINS_PER_YIELD) == 0) { SPIN_LOCK_YIELD; } - + if ((++spins & SPINS_PER_YIELD) == 0) { + SPIN_LOCK_YIELD; + } } - return 0; - } - #define MLOCK_T int - #define TRY_LOCK(sl) !CAS_LOCK(sl) - #define RELEASE_LOCK(sl) CLEAR_LOCK(sl) - #define ACQUIRE_LOCK(sl) (CAS_LOCK(sl) ? spin_acquire_lock(sl) : 0) - #define INITIAL_LOCK(sl) (*sl = 0) - #define DESTROY_LOCK(sl) (0) +#define MLOCK_T int +#define TRY_LOCK(sl) !CAS_LOCK(sl) +#define RELEASE_LOCK(sl) CLEAR_LOCK(sl) +#define ACQUIRE_LOCK(sl) (CAS_LOCK(sl)? spin_acquire_lock(sl) : 0) +#define INITIAL_LOCK(sl) (*sl = 0) +#define DESTROY_LOCK(sl) (0) static MLOCK_T malloc_global_mutex = 0; - #else /* USE_RECURSIVE_LOCKS */ - /* types for lock owners */ - #ifdef WIN32 - #define THREAD_ID_T DWORD - #define CURRENT_THREAD GetCurrentThreadId() - #define EQ_OWNER(X, Y) ((X) == (Y)) - #else - /* - Note: the following assume that pthread_t is a type that can be - initialized to (casted) zero. If this is not the case, you will need - to somehow redefine these or not use spin locks. - */ - #define THREAD_ID_T pthread_t - #define CURRENT_THREAD pthread_self() - #define EQ_OWNER(X, Y) pthread_equal(X, Y) - #endif +#else /* USE_RECURSIVE_LOCKS */ +/* types for lock owners */ +#ifdef WIN32 +#define THREAD_ID_T DWORD +#define CURRENT_THREAD GetCurrentThreadId() +#define EQ_OWNER(X,Y) ((X) == (Y)) +#else +/* + Note: the following assume that pthread_t is a type that can be + initialized to (casted) zero. If this is not the case, you will need to + somehow redefine these or not use spin locks. +*/ +#define THREAD_ID_T pthread_t +#define CURRENT_THREAD pthread_self() +#define EQ_OWNER(X,Y) pthread_equal(X, Y) +#endif struct malloc_recursive_lock { - - int sl; + int sl; unsigned int c; - THREAD_ID_T threadid; - + THREAD_ID_T threadid; }; - #define MLOCK_T struct malloc_recursive_lock -static MLOCK_T malloc_global_mutex = {0, 0, (THREAD_ID_T)0}; +#define MLOCK_T struct malloc_recursive_lock +static MLOCK_T malloc_global_mutex = { 0, 0, (THREAD_ID_T)0}; static FORCEINLINE void recursive_release_lock(MLOCK_T *lk) { - assert(lk->sl != 0); - if (--lk->c == 0) { CLEAR_LOCK(&lk->sl); } - + if (--lk->c == 0) { + CLEAR_LOCK(&lk->sl); + } } static FORCEINLINE int recursive_acquire_lock(MLOCK_T *lk) { - THREAD_ID_T mythreadid = CURRENT_THREAD; - int spins = 0; + int spins = 0; for (;;) { - if (*((volatile int *)(&lk->sl)) == 0) { - if (!CAS_LOCK(&lk->sl)) { - lk->threadid = mythreadid; lk->c = 1; return 0; - } - - } else if (EQ_OWNER(lk->threadid, mythreadid)) { - + } + else if (EQ_OWNER(lk->threadid, mythreadid)) { ++lk->c; return 0; - } - - if ((++spins & SPINS_PER_YIELD) == 0) { SPIN_LOCK_YIELD; } - + if ((++spins & SPINS_PER_YIELD) == 0) { + SPIN_LOCK_YIELD; + } } - } static FORCEINLINE int recursive_try_lock(MLOCK_T *lk) { - THREAD_ID_T mythreadid = CURRENT_THREAD; if (*((volatile int *)(&lk->sl)) == 0) { - if (!CAS_LOCK(&lk->sl)) { - lk->threadid = mythreadid; lk->c = 1; return 1; - } - - } else if (EQ_OWNER(lk->threadid, mythreadid)) { - + } + else if (EQ_OWNER(lk->threadid, mythreadid)) { ++lk->c; return 1; - } - return 0; - } - #define RELEASE_LOCK(lk) recursive_release_lock(lk) - #define TRY_LOCK(lk) recursive_try_lock(lk) - #define ACQUIRE_LOCK(lk) recursive_acquire_lock(lk) - #define INITIAL_LOCK(lk) \ - ((lk)->threadid = (THREAD_ID_T)0, (lk)->sl = 0, (lk)->c = 0) - #define DESTROY_LOCK(lk) (0) - #endif /* USE_RECURSIVE_LOCKS */ - - #elif defined(WIN32) /* Win32 critical sections */ - #define MLOCK_T CRITICAL_SECTION - #define ACQUIRE_LOCK(lk) (EnterCriticalSection(lk), 0) - #define RELEASE_LOCK(lk) LeaveCriticalSection(lk) - #define TRY_LOCK(lk) TryEnterCriticalSection(lk) - #define INITIAL_LOCK(lk) \ - (!InitializeCriticalSectionAndSpinCount((lk), 0x80000000 | 4000)) - #define DESTROY_LOCK(lk) (DeleteCriticalSection(lk), 0) - #define NEED_GLOBAL_LOCK_INIT - -static MLOCK_T malloc_global_mutex; +#define RELEASE_LOCK(lk) recursive_release_lock(lk) +#define TRY_LOCK(lk) recursive_try_lock(lk) +#define ACQUIRE_LOCK(lk) recursive_acquire_lock(lk) +#define INITIAL_LOCK(lk) ((lk)->threadid = (THREAD_ID_T)0, (lk)->sl = 0, (lk)->c = 0) +#define DESTROY_LOCK(lk) (0) +#endif /* USE_RECURSIVE_LOCKS */ + +#elif defined(WIN32) /* Win32 critical sections */ +#define MLOCK_T CRITICAL_SECTION +#define ACQUIRE_LOCK(lk) (EnterCriticalSection(lk), 0) +#define RELEASE_LOCK(lk) LeaveCriticalSection(lk) +#define TRY_LOCK(lk) TryEnterCriticalSection(lk) +#define INITIAL_LOCK(lk) (!InitializeCriticalSectionAndSpinCount((lk), 0x80000000|4000)) +#define DESTROY_LOCK(lk) (DeleteCriticalSection(lk), 0) +#define NEED_GLOBAL_LOCK_INIT + +static MLOCK_T malloc_global_mutex; static volatile LONG malloc_global_mutex_status; /* Use spin loop to initialize global lock */ static void init_malloc_global_mutex() { - for (;;) { - long stat = malloc_global_mutex_status; - if (stat > 0) return; + if (stat > 0) + return; /* transition to < 0 while initializing, then to > 0) */ - if (stat == 0 && interlockedcompareexchange(&malloc_global_mutex_status, - (LONG)-1, (LONG)0) == 0) { - + if (stat == 0 && + interlockedcompareexchange(&malloc_global_mutex_status, (LONG)-1, (LONG)0) == 0) { InitializeCriticalSection(&malloc_global_mutex); interlockedexchange(&malloc_global_mutex_status, (LONG)1); return; - } - SleepEx(0, FALSE); - } - } - #else /* pthreads-based locks */ - #define MLOCK_T pthread_mutex_t - #define ACQUIRE_LOCK(lk) pthread_mutex_lock(lk) - #define RELEASE_LOCK(lk) pthread_mutex_unlock(lk) - #define TRY_LOCK(lk) (!pthread_mutex_trylock(lk)) - #define INITIAL_LOCK(lk) pthread_init_lock(lk) - #define DESTROY_LOCK(lk) pthread_mutex_destroy(lk) +#else /* pthreads-based locks */ +#define MLOCK_T pthread_mutex_t +#define ACQUIRE_LOCK(lk) pthread_mutex_lock(lk) +#define RELEASE_LOCK(lk) pthread_mutex_unlock(lk) +#define TRY_LOCK(lk) (!pthread_mutex_trylock(lk)) +#define INITIAL_LOCK(lk) pthread_init_lock(lk) +#define DESTROY_LOCK(lk) pthread_mutex_destroy(lk) - #if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0 && \ - defined(linux) && !defined(PTHREAD_MUTEX_RECURSIVE) +#if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0 && defined(linux) && !defined(PTHREAD_MUTEX_RECURSIVE) /* Cope with old-style linux recursive lock initialization by adding */ /* skipped internal declaration from pthread.h */ -extern int pthread_mutexattr_setkind_np __P((pthread_mutexattr_t * __attr, - int __kind)); - #define PTHREAD_MUTEX_RECURSIVE PTHREAD_MUTEX_RECURSIVE_NP - #define pthread_mutexattr_settype(x, y) pthread_mutexattr_setkind_np(x, y) - #endif /* USE_RECURSIVE_LOCKS ... */ +extern int pthread_mutexattr_setkind_np __P ((pthread_mutexattr_t *__attr, + int __kind)); +#define PTHREAD_MUTEX_RECURSIVE PTHREAD_MUTEX_RECURSIVE_NP +#define pthread_mutexattr_settype(x,y) pthread_mutexattr_setkind_np(x,y) +#endif /* USE_RECURSIVE_LOCKS ... */ static MLOCK_T malloc_global_mutex = PTHREAD_MUTEX_INITIALIZER; -static int pthread_init_lock(MLOCK_T *lk) { - +static int pthread_init_lock (MLOCK_T *lk) { pthread_mutexattr_t attr; if (pthread_mutexattr_init(&attr)) return 1; - #if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0 +#if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0 if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE)) return 1; - #endif +#endif if (pthread_mutex_init(lk, &attr)) return 1; if (pthread_mutexattr_destroy(&attr)) return 1; return 0; - } - #endif /* ... lock types ... */ +#endif /* ... lock types ... */ - /* Common code for all lock types */ - #define USE_LOCK_BIT (2U) +/* Common code for all lock types */ +#define USE_LOCK_BIT (2U) - #ifndef ACQUIRE_MALLOC_GLOBAL_LOCK - #define ACQUIRE_MALLOC_GLOBAL_LOCK() ACQUIRE_LOCK(&malloc_global_mutex); - #endif +#ifndef ACQUIRE_MALLOC_GLOBAL_LOCK +#define ACQUIRE_MALLOC_GLOBAL_LOCK() ACQUIRE_LOCK(&malloc_global_mutex); +#endif - #ifndef RELEASE_MALLOC_GLOBAL_LOCK - #define RELEASE_MALLOC_GLOBAL_LOCK() RELEASE_LOCK(&malloc_global_mutex); - #endif +#ifndef RELEASE_MALLOC_GLOBAL_LOCK +#define RELEASE_MALLOC_GLOBAL_LOCK() RELEASE_LOCK(&malloc_global_mutex); +#endif -#endif /* USE_LOCKS */ +#endif /* USE_LOCKS */ /* ----------------------- Chunk representations ------------------------ */ @@ -2314,56 +2236,56 @@ static int pthread_init_lock(MLOCK_T *lk) { */ struct malloc_chunk { - - size_t prev_foot; /* Size of previous chunk (if free). */ - size_t head; /* Size and inuse bits. */ - struct malloc_chunk *fd; /* double links -- used only if free. */ - struct malloc_chunk *bk; - + size_t prev_foot; /* Size of previous chunk (if free). */ + size_t head; /* Size and inuse bits. */ + struct malloc_chunk* fd; /* double links -- used only if free. */ + struct malloc_chunk* bk; }; typedef struct malloc_chunk mchunk; -typedef struct malloc_chunk *mchunkptr; -typedef struct malloc_chunk *sbinptr; /* The type of bins of chunks */ -typedef unsigned int bindex_t; /* Described below */ -typedef unsigned int binmap_t; /* Described below */ -typedef unsigned int flag_t; /* The type of various bit flag sets */ +typedef struct malloc_chunk* mchunkptr; +typedef struct malloc_chunk* sbinptr; /* The type of bins of chunks */ +typedef unsigned int bindex_t; /* Described below */ +typedef unsigned int binmap_t; /* Described below */ +typedef unsigned int flag_t; /* The type of various bit flag sets */ /* ------------------- Chunks sizes and alignments ----------------------- */ -#define MCHUNK_SIZE (sizeof(mchunk)) +#define MCHUNK_SIZE (sizeof(mchunk)) #if FOOTERS - #define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) -#else /* FOOTERS */ - #define CHUNK_OVERHEAD (SIZE_T_SIZE) -#endif /* FOOTERS */ +#define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) +#else /* FOOTERS */ +#define CHUNK_OVERHEAD (SIZE_T_SIZE) +#endif /* FOOTERS */ /* MMapped chunks need a second word of overhead ... */ #define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) /* ... and additional padding for fake next-chunk at foot */ -#define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES) +#define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES) /* The smallest size we can malloc is an aligned minimal chunk */ -#define MIN_CHUNK_SIZE ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) +#define MIN_CHUNK_SIZE\ + ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) /* conversion from malloc headers to user pointers, and back */ -#define chunk2mem(p) ((void *)((char *)(p) + TWO_SIZE_T_SIZES)) -#define mem2chunk(mem) ((mchunkptr)((char *)(mem)-TWO_SIZE_T_SIZES)) +#define chunk2mem(p) ((void*)((char*)(p) + TWO_SIZE_T_SIZES)) +#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES)) /* chunk associated with aligned address A */ -#define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A))) +#define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A))) /* Bounds on request (not chunk) sizes. */ -#define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2) -#define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE) +#define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2) +#define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE) /* pad request bytes into a usable size */ #define pad_request(req) \ - (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) + (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) /* pad request, checking for minimum (but not maximum) */ #define request2size(req) \ - (((req) < MIN_REQUEST) ? MIN_CHUNK_SIZE : pad_request(req)) + (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req)) + /* ------------------ Operations on head and foot fields ----------------- */ @@ -2375,60 +2297,61 @@ typedef unsigned int flag_t; /* The type of various bit flag sets */ FLAG4_BIT is not used by this malloc, but might be useful in extensions. */ -#define PINUSE_BIT (SIZE_T_ONE) -#define CINUSE_BIT (SIZE_T_TWO) -#define FLAG4_BIT (SIZE_T_FOUR) -#define INUSE_BITS (PINUSE_BIT | CINUSE_BIT) -#define FLAG_BITS (PINUSE_BIT | CINUSE_BIT | FLAG4_BIT) +#define PINUSE_BIT (SIZE_T_ONE) +#define CINUSE_BIT (SIZE_T_TWO) +#define FLAG4_BIT (SIZE_T_FOUR) +#define INUSE_BITS (PINUSE_BIT|CINUSE_BIT) +#define FLAG_BITS (PINUSE_BIT|CINUSE_BIT|FLAG4_BIT) /* Head value for fenceposts */ -#define FENCEPOST_HEAD (INUSE_BITS | SIZE_T_SIZE) +#define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE) /* extraction of fields from head words */ -#define cinuse(p) ((p)->head & CINUSE_BIT) -#define pinuse(p) ((p)->head & PINUSE_BIT) -#define flag4inuse(p) ((p)->head & FLAG4_BIT) -#define is_inuse(p) (((p)->head & INUSE_BITS) != PINUSE_BIT) -#define is_mmapped(p) (((p)->head & INUSE_BITS) == 0) +#define cinuse(p) ((p)->head & CINUSE_BIT) +#define pinuse(p) ((p)->head & PINUSE_BIT) +#define flag4inuse(p) ((p)->head & FLAG4_BIT) +#define is_inuse(p) (((p)->head & INUSE_BITS) != PINUSE_BIT) +#define is_mmapped(p) (((p)->head & INUSE_BITS) == 0) -#define chunksize(p) ((p)->head & ~(FLAG_BITS)) +#define chunksize(p) ((p)->head & ~(FLAG_BITS)) -#define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT) -#define set_flag4(p) ((p)->head |= FLAG4_BIT) -#define clear_flag4(p) ((p)->head &= ~FLAG4_BIT) +#define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT) +#define set_flag4(p) ((p)->head |= FLAG4_BIT) +#define clear_flag4(p) ((p)->head &= ~FLAG4_BIT) /* Treat space at ptr +/- offset as a chunk */ -#define chunk_plus_offset(p, s) ((mchunkptr)(((char *)(p)) + (s))) -#define chunk_minus_offset(p, s) ((mchunkptr)(((char *)(p)) - (s))) +#define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s))) +#define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s))) /* Ptr to next or previous physical malloc_chunk. */ -#define next_chunk(p) ((mchunkptr)(((char *)(p)) + ((p)->head & ~FLAG_BITS))) -#define prev_chunk(p) ((mchunkptr)(((char *)(p)) - ((p)->prev_foot))) +#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~FLAG_BITS))) +#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) )) /* extract next chunk's pinuse bit */ -#define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT) +#define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT) /* Get/set size at footer */ -#define get_foot(p, s) (((mchunkptr)((char *)(p) + (s)))->prev_foot) -#define set_foot(p, s) (((mchunkptr)((char *)(p) + (s)))->prev_foot = (s)) +#define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot) +#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s)) /* Set size, pinuse bit, and foot */ -#define set_size_and_pinuse_of_free_chunk(p, s) \ - ((p)->head = (s | PINUSE_BIT), set_foot(p, s)) +#define set_size_and_pinuse_of_free_chunk(p, s)\ + ((p)->head = (s|PINUSE_BIT), set_foot(p, s)) /* Set size, pinuse bit, foot, and clear next pinuse */ -#define set_free_with_pinuse(p, s, n) \ +#define set_free_with_pinuse(p, s, n)\ (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s)) /* Get the internal overhead associated with chunk p */ -#define overhead_for(p) (is_mmapped(p) ? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD) +#define overhead_for(p)\ + (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD) /* Return true if malloced space is not necessarily cleared */ #if MMAP_CLEARS - #define calloc_must_clear(p) (!is_mmapped(p)) -#else /* MMAP_CLEARS */ - #define calloc_must_clear(p) (1) -#endif /* MMAP_CLEARS */ +#define calloc_must_clear(p) (!is_mmapped(p)) +#else /* MMAP_CLEARS */ +#define calloc_must_clear(p) (1) +#endif /* MMAP_CLEARS */ /* ---------------------- Overlaid data structures ----------------------- */ @@ -2522,25 +2445,23 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ struct malloc_tree_chunk { - /* The first four fields must be compatible with malloc_chunk */ size_t prev_foot; size_t head; - struct malloc_tree_chunk *fd; - struct malloc_tree_chunk *bk; + struct malloc_tree_chunk* fd; + struct malloc_tree_chunk* bk; - struct malloc_tree_chunk *child[2]; - struct malloc_tree_chunk *parent; + struct malloc_tree_chunk* child[2]; + struct malloc_tree_chunk* parent; bindex_t index; - }; typedef struct malloc_tree_chunk tchunk; -typedef struct malloc_tree_chunk *tchunkptr; -typedef struct malloc_tree_chunk *tbinptr; /* The type of bins of trees */ +typedef struct malloc_tree_chunk* tchunkptr; +typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */ /* A little helper macro for trees */ -#define leftmost_child(t) ((t)->child[0] != 0 ? (t)->child[0] : (t)->child[1]) +#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1]) /* ----------------------------- Segments -------------------------------- */ @@ -2600,19 +2521,17 @@ typedef struct malloc_tree_chunk *tbinptr; /* The type of bins of trees */ */ struct malloc_segment { - - char * base; /* base address */ - size_t size; /* allocated size */ - struct malloc_segment *next; /* ptr to next segment */ - flag_t sflags; /* mmap and extern flag */ - + char* base; /* base address */ + size_t size; /* allocated size */ + struct malloc_segment* next; /* ptr to next segment */ + flag_t sflags; /* mmap and extern flag */ }; -#define is_mmapped_segment(S) ((S)->sflags & USE_MMAP_BIT) -#define is_extern_segment(S) ((S)->sflags & EXTERN_BIT) +#define is_mmapped_segment(S) ((S)->sflags & USE_MMAP_BIT) +#define is_extern_segment(S) ((S)->sflags & EXTERN_BIT) typedef struct malloc_segment msegment; -typedef struct malloc_segment *msegmentptr; +typedef struct malloc_segment* msegmentptr; /* ---------------------------- malloc_state ----------------------------- */ @@ -2702,43 +2621,41 @@ typedef struct malloc_segment *msegmentptr; */ /* Bin types, widths and sizes */ -#define NSMALLBINS (32U) -#define NTREEBINS (32U) -#define SMALLBIN_SHIFT (3U) -#define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT) -#define TREEBIN_SHIFT (8U) -#define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT) -#define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE) +#define NSMALLBINS (32U) +#define NTREEBINS (32U) +#define SMALLBIN_SHIFT (3U) +#define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT) +#define TREEBIN_SHIFT (8U) +#define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT) +#define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE) #define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD) struct malloc_state { - - binmap_t smallmap; - binmap_t treemap; - size_t dvsize; - size_t topsize; - char * least_addr; - mchunkptr dv; - mchunkptr top; - size_t trim_check; - size_t release_checks; - size_t magic; - mchunkptr smallbins[(NSMALLBINS + 1) * 2]; - tbinptr treebins[NTREEBINS]; - size_t footprint; - size_t max_footprint; - size_t footprint_limit; /* zero means no limit */ - flag_t mflags; + binmap_t smallmap; + binmap_t treemap; + size_t dvsize; + size_t topsize; + char* least_addr; + mchunkptr dv; + mchunkptr top; + size_t trim_check; + size_t release_checks; + size_t magic; + mchunkptr smallbins[(NSMALLBINS+1)*2]; + tbinptr treebins[NTREEBINS]; + size_t footprint; + size_t max_footprint; + size_t footprint_limit; /* zero means no limit */ + flag_t mflags; #if USE_LOCKS - MLOCK_T mutex; /* locate lock among fields that rarely change */ -#endif /* USE_LOCKS */ - msegment seg; - void * extp; /* Unused but available for extensions */ - size_t exts; - + MLOCK_T mutex; /* locate lock among fields that rarely change */ +#endif /* USE_LOCKS */ + msegment seg; + void* extp; /* Unused but available for extensions */ + size_t exts; }; -typedef struct malloc_state *mstate; +typedef struct malloc_state* mstate; /* ------------- Global malloc_state and malloc_params ------------------- */ @@ -2750,14 +2667,12 @@ typedef struct malloc_state *mstate; */ struct malloc_params { - size_t magic; size_t page_size; size_t granularity; size_t mmap_threshold; size_t trim_threshold; flag_t default_mflags; - }; static struct malloc_params mparams; @@ -2769,108 +2684,106 @@ static struct malloc_params mparams; /* The global malloc_state used for all non-"mspace" calls */ static struct malloc_state _gm_; - #define gm (&_gm_) - #define is_global(M) ((M) == &_gm_) +#define gm (&_gm_) +#define is_global(M) ((M) == &_gm_) -#endif /* !ONLY_MSPACES */ +#endif /* !ONLY_MSPACES */ -#define is_initialized(M) ((M)->top != 0) +#define is_initialized(M) ((M)->top != 0) /* -------------------------- system alloc setup ------------------------- */ /* Operations on mflags */ -#define use_lock(M) ((M)->mflags & USE_LOCK_BIT) -#define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT) +#define use_lock(M) ((M)->mflags & USE_LOCK_BIT) +#define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT) #if USE_LOCKS - #define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT) +#define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT) #else - #define disable_lock(M) +#define disable_lock(M) #endif -#define use_mmap(M) ((M)->mflags & USE_MMAP_BIT) -#define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT) +#define use_mmap(M) ((M)->mflags & USE_MMAP_BIT) +#define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT) #if HAVE_MMAP - #define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT) +#define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT) #else - #define disable_mmap(M) +#define disable_mmap(M) #endif -#define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT) -#define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT) +#define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT) +#define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT) -#define set_lock(M, L) \ - ((M)->mflags = \ - (L) ? ((M)->mflags | USE_LOCK_BIT) : ((M)->mflags & ~USE_LOCK_BIT)) +#define set_lock(M,L)\ + ((M)->mflags = (L)?\ + ((M)->mflags | USE_LOCK_BIT) :\ + ((M)->mflags & ~USE_LOCK_BIT)) /* page-align a size */ -#define page_align(S) \ - (((S) + (mparams.page_size - SIZE_T_ONE)) & ~(mparams.page_size - SIZE_T_ONE)) +#define page_align(S)\ + (((S) + (mparams.page_size - SIZE_T_ONE)) & ~(mparams.page_size - SIZE_T_ONE)) /* granularity-align a size */ -#define granularity_align(S) \ - (((S) + (mparams.granularity - SIZE_T_ONE)) & \ - ~(mparams.granularity - SIZE_T_ONE)) +#define granularity_align(S)\ + (((S) + (mparams.granularity - SIZE_T_ONE))\ + & ~(mparams.granularity - SIZE_T_ONE)) + /* For mmap, use granularity alignment on windows, else page-align */ #ifdef WIN32 - #define mmap_align(S) granularity_align(S) +#define mmap_align(S) granularity_align(S) #else - #define mmap_align(S) page_align(S) +#define mmap_align(S) page_align(S) #endif /* For sys_alloc, enough padding to ensure can malloc request on success */ #define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT) -#define is_page_aligned(S) \ - (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0) -#define is_granularity_aligned(S) \ - (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0) +#define is_page_aligned(S)\ + (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0) +#define is_granularity_aligned(S)\ + (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0) /* True if segment S holds address A */ -#define segment_holds(S, A) \ - ((char *)(A) >= S->base && (char *)(A) < S->base + S->size) +#define segment_holds(S, A)\ + ((char*)(A) >= S->base && (char*)(A) < S->base + S->size) /* Return segment holding given address */ -static msegmentptr segment_holding(mstate m, char *addr) { - +static msegmentptr segment_holding(mstate m, char* addr) { msegmentptr sp = &m->seg; for (;;) { - - if (addr >= sp->base && addr < sp->base + sp->size) return sp; - if ((sp = sp->next) == 0) return 0; - + if (addr >= sp->base && addr < sp->base + sp->size) + return sp; + if ((sp = sp->next) == 0) + return 0; } - } /* Return true if segment contains a segment link */ static int has_segment_link(mstate m, msegmentptr ss) { - msegmentptr sp = &m->seg; for (;;) { - - if ((char *)sp >= ss->base && (char *)sp < ss->base + ss->size) return 1; - if ((sp = sp->next) == 0) return 0; - + if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size) + return 1; + if ((sp = sp->next) == 0) + return 0; } - } #ifndef MORECORE_CANNOT_TRIM - #define should_trim(M, s) ((s) > (M)->trim_check) -#else /* MORECORE_CANNOT_TRIM */ - #define should_trim(M, s) (0) -#endif /* MORECORE_CANNOT_TRIM */ +#define should_trim(M,s) ((s) > (M)->trim_check) +#else /* MORECORE_CANNOT_TRIM */ +#define should_trim(M,s) (0) +#endif /* MORECORE_CANNOT_TRIM */ /* TOP_FOOT_SIZE is padding at the end of a segment, including space that may be needed to place segment records and fenceposts when new noncontiguous segments are added. */ -#define TOP_FOOT_SIZE \ - (align_offset(chunk2mem(0)) + pad_request(sizeof(struct malloc_segment)) + \ - MIN_CHUNK_SIZE) +#define TOP_FOOT_SIZE\ + (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE) + /* ------------------------------- Hooks -------------------------------- */ @@ -2881,24 +2794,19 @@ static int has_segment_link(mstate m, msegmentptr ss) { */ #if USE_LOCKS - #define PREACTION(M) ((use_lock(M)) ? ACQUIRE_LOCK(&(M)->mutex) : 0) - #define POSTACTION(M) \ - { \ - \ - if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); \ - \ - } -#else /* USE_LOCKS */ +#define PREACTION(M) ((use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0) +#define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); } +#else /* USE_LOCKS */ - #ifndef PREACTION - #define PREACTION(M) (0) - #endif /* PREACTION */ +#ifndef PREACTION +#define PREACTION(M) (0) +#endif /* PREACTION */ - #ifndef POSTACTION - #define POSTACTION(M) - #endif /* POSTACTION */ +#ifndef POSTACTION +#define POSTACTION(M) +#endif /* POSTACTION */ -#endif /* USE_LOCKS */ +#endif /* USE_LOCKS */ /* CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses. @@ -2916,180 +2824,164 @@ int malloc_corruption_error_count; /* default corruption action */ static void reset_on_error(mstate m); - #define CORRUPTION_ERROR_ACTION(m) reset_on_error(m) - #define USAGE_ERROR_ACTION(m, p) +#define CORRUPTION_ERROR_ACTION(m) reset_on_error(m) +#define USAGE_ERROR_ACTION(m, p) -#else /* PROCEED_ON_ERROR */ +#else /* PROCEED_ON_ERROR */ - #ifndef CORRUPTION_ERROR_ACTION - #define CORRUPTION_ERROR_ACTION(m) ABORT - #endif /* CORRUPTION_ERROR_ACTION */ +#ifndef CORRUPTION_ERROR_ACTION +#define CORRUPTION_ERROR_ACTION(m) ABORT +#endif /* CORRUPTION_ERROR_ACTION */ - #ifndef USAGE_ERROR_ACTION - #define USAGE_ERROR_ACTION(m, p) ABORT - #endif /* USAGE_ERROR_ACTION */ +#ifndef USAGE_ERROR_ACTION +#define USAGE_ERROR_ACTION(m,p) ABORT +#endif /* USAGE_ERROR_ACTION */ + +#endif /* PROCEED_ON_ERROR */ -#endif /* PROCEED_ON_ERROR */ /* -------------------------- Debugging setup ---------------------------- */ -#if !DEBUG +#if ! DEBUG - #define check_free_chunk(M, P) - #define check_inuse_chunk(M, P) - #define check_malloced_chunk(M, P, N) - #define check_mmapped_chunk(M, P) - #define check_malloc_state(M) - #define check_top_chunk(M, P) +#define check_free_chunk(M,P) +#define check_inuse_chunk(M,P) +#define check_malloced_chunk(M,P,N) +#define check_mmapped_chunk(M,P) +#define check_malloc_state(M) +#define check_top_chunk(M,P) -#else /* DEBUG */ - #define check_free_chunk(M, P) do_check_free_chunk(M, P) - #define check_inuse_chunk(M, P) do_check_inuse_chunk(M, P) - #define check_top_chunk(M, P) do_check_top_chunk(M, P) - #define check_malloced_chunk(M, P, N) do_check_malloced_chunk(M, P, N) - #define check_mmapped_chunk(M, P) do_check_mmapped_chunk(M, P) - #define check_malloc_state(M) do_check_malloc_state(M) +#else /* DEBUG */ +#define check_free_chunk(M,P) do_check_free_chunk(M,P) +#define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P) +#define check_top_chunk(M,P) do_check_top_chunk(M,P) +#define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N) +#define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P) +#define check_malloc_state(M) do_check_malloc_state(M) static void do_check_any_chunk(mstate m, mchunkptr p); static void do_check_top_chunk(mstate m, mchunkptr p); static void do_check_mmapped_chunk(mstate m, mchunkptr p); static void do_check_inuse_chunk(mstate m, mchunkptr p); static void do_check_free_chunk(mstate m, mchunkptr p); -static void do_check_malloced_chunk(mstate m, void *mem, size_t s); +static void do_check_malloced_chunk(mstate m, void* mem, size_t s); static void do_check_tree(mstate m, tchunkptr t); static void do_check_treebin(mstate m, bindex_t i); static void do_check_smallbin(mstate m, bindex_t i); static void do_check_malloc_state(mstate m); static int bin_find(mstate m, mchunkptr x); static size_t traverse_and_check(mstate m); -#endif /* DEBUG */ +#endif /* DEBUG */ /* ---------------------------- Indexing Bins ---------------------------- */ -#define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS) -#define small_index(s) (bindex_t)((s) >> SMALLBIN_SHIFT) -#define small_index2size(i) ((i) << SMALLBIN_SHIFT) -#define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE)) +#define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS) +#define small_index(s) (bindex_t)((s) >> SMALLBIN_SHIFT) +#define small_index2size(i) ((i) << SMALLBIN_SHIFT) +#define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE)) /* addressing by index. See above about smallbin repositioning */ -#define smallbin_at(M, i) ((sbinptr)((char *)&((M)->smallbins[(i) << 1]))) -#define treebin_at(M, i) (&((M)->treebins[i])) +#define smallbin_at(M, i) ((sbinptr)((char*)&((M)->smallbins[(i)<<1]))) +#define treebin_at(M,i) (&((M)->treebins[i])) /* assign tree index for size S to variable I. Use x86 asm if possible */ #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) - #define compute_tree_index(S, I) \ - { \ - \ - unsigned int X = S >> TREEBIN_SHIFT; \ - if (X == 0) \ - I = 0; \ - else if (X > 0xFFFF) \ - I = NTREEBINS - 1; \ - else { \ - \ - unsigned int K = (unsigned)sizeof(X) * __CHAR_BIT__ - 1 - \ - (unsigned)__builtin_clz(X); \ - I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT - 1)) & 1))); \ - \ - } \ - \ - } +#define compute_tree_index(S, I)\ +{\ + unsigned int X = S >> TREEBIN_SHIFT;\ + if (X == 0)\ + I = 0;\ + else if (X > 0xFFFF)\ + I = NTREEBINS-1;\ + else {\ + unsigned int K = (unsigned) sizeof(X)*__CHAR_BIT__ - 1 - (unsigned) __builtin_clz(X); \ + I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ + }\ +} -#elif defined(__INTEL_COMPILER) - #define compute_tree_index(S, I) \ - { \ - \ - size_t X = S >> TREEBIN_SHIFT; \ - if (X == 0) \ - I = 0; \ - else if (X > 0xFFFF) \ - I = NTREEBINS - 1; \ - else { \ - \ - unsigned int K = _bit_scan_reverse(X); \ - I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT - 1)) & 1))); \ - \ - } \ - \ - } +#elif defined (__INTEL_COMPILER) +#define compute_tree_index(S, I)\ +{\ + size_t X = S >> TREEBIN_SHIFT;\ + if (X == 0)\ + I = 0;\ + else if (X > 0xFFFF)\ + I = NTREEBINS-1;\ + else {\ + unsigned int K = _bit_scan_reverse (X); \ + I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ + }\ +} -#elif defined(_MSC_VER) && _MSC_VER >= 1300 - #define compute_tree_index(S, I) \ - { \ - \ - size_t X = S >> TREEBIN_SHIFT; \ - if (X == 0) \ - I = 0; \ - else if (X > 0xFFFF) \ - I = NTREEBINS - 1; \ - else { \ - \ - unsigned int K; \ - _BitScanReverse((DWORD *)&K, (DWORD)X); \ - I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT - 1)) & 1))); \ - \ - } \ - \ - } +#elif defined(_MSC_VER) && _MSC_VER>=1300 +#define compute_tree_index(S, I)\ +{\ + size_t X = S >> TREEBIN_SHIFT;\ + if (X == 0)\ + I = 0;\ + else if (X > 0xFFFF)\ + I = NTREEBINS-1;\ + else {\ + unsigned int K;\ + _BitScanReverse((DWORD *) &K, (DWORD) X);\ + I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ + }\ +} -#else /* GNUC */ - #define compute_tree_index(S, I) \ - { \ - \ - size_t X = S >> TREEBIN_SHIFT; \ - if (X == 0) \ - I = 0; \ - else if (X > 0xFFFF) \ - I = NTREEBINS - 1; \ - else { \ - \ - unsigned int Y = (unsigned int)X; \ - unsigned int N = ((Y - 0x100) >> 16) & 8; \ - unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4; \ - N += K; \ - N += K = (((Y <<= K) - 0x4000) >> 16) & 2; \ - K = 14 - N + ((Y <<= K) >> 15); \ - I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT - 1)) & 1)); \ - \ - } \ - \ - } -#endif /* GNUC */ +#else /* GNUC */ +#define compute_tree_index(S, I)\ +{\ + size_t X = S >> TREEBIN_SHIFT;\ + if (X == 0)\ + I = 0;\ + else if (X > 0xFFFF)\ + I = NTREEBINS-1;\ + else {\ + unsigned int Y = (unsigned int)X;\ + unsigned int N = ((Y - 0x100) >> 16) & 8;\ + unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\ + N += K;\ + N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\ + K = 14 - N + ((Y <<= K) >> 15);\ + I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\ + }\ +} +#endif /* GNUC */ /* Bit representing maximum resolved size in a treebin at i */ #define bit_for_tree_index(i) \ - (i == NTREEBINS - 1) ? (SIZE_T_BITSIZE - 1) : (((i) >> 1) + TREEBIN_SHIFT - 2) + (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2) /* Shift placing maximum resolved bit in a treebin at i as sign bit */ #define leftshift_for_tree_index(i) \ - ((i == NTREEBINS - 1) \ - ? 0 \ - : ((SIZE_T_BITSIZE - SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2))) + ((i == NTREEBINS-1)? 0 : \ + ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2))) /* The size of the smallest chunk held in bin with index i */ -#define minsize_for_tree_index(i) \ - ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \ - (((size_t)((i)&SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1))) +#define minsize_for_tree_index(i) \ + ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \ + (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1))) + /* ------------------------ Operations on bin maps ----------------------- */ /* bit corresponding to given index */ -#define idx2bit(i) ((binmap_t)(1) << (i)) +#define idx2bit(i) ((binmap_t)(1) << (i)) /* Mark/Clear bits with given index */ -#define mark_smallmap(M, i) ((M)->smallmap |= idx2bit(i)) -#define clear_smallmap(M, i) ((M)->smallmap &= ~idx2bit(i)) -#define smallmap_is_marked(M, i) ((M)->smallmap & idx2bit(i)) +#define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i)) +#define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i)) +#define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i)) -#define mark_treemap(M, i) ((M)->treemap |= idx2bit(i)) -#define clear_treemap(M, i) ((M)->treemap &= ~idx2bit(i)) -#define treemap_is_marked(M, i) ((M)->treemap & idx2bit(i)) +#define mark_treemap(M,i) ((M)->treemap |= idx2bit(i)) +#define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i)) +#define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i)) /* isolate the least set bit of a bitmap */ -#define least_bit(x) ((x) & -(x)) +#define least_bit(x) ((x) & -(x)) /* mask with all bits to left of least bit of x on */ -#define left_bits(x) ((x << 1) | -(x << 1)) +#define left_bits(x) ((x<<1) | -(x<<1)) /* mask with all bits to left of or equal to least bit of x on */ #define same_or_left_bits(x) ((x) | -(x)) @@ -3097,58 +2989,46 @@ static size_t traverse_and_check(mstate m); /* index corresponding to given bit. Use x86 asm if possible */ #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) - #define compute_bit2idx(X, I) \ - { \ - \ - unsigned int J; \ - J = __builtin_ctz(X); \ - I = (bindex_t)J; \ - \ - } +#define compute_bit2idx(X, I)\ +{\ + unsigned int J;\ + J = __builtin_ctz(X); \ + I = (bindex_t)J;\ +} -#elif defined(__INTEL_COMPILER) - #define compute_bit2idx(X, I) \ - { \ - \ - unsigned int J; \ - J = _bit_scan_forward(X); \ - I = (bindex_t)J; \ - \ - } +#elif defined (__INTEL_COMPILER) +#define compute_bit2idx(X, I)\ +{\ + unsigned int J;\ + J = _bit_scan_forward (X); \ + I = (bindex_t)J;\ +} -#elif defined(_MSC_VER) && _MSC_VER >= 1300 - #define compute_bit2idx(X, I) \ - { \ - \ - unsigned int J; \ - _BitScanForward((DWORD *)&J, X); \ - I = (bindex_t)J; \ - \ - } +#elif defined(_MSC_VER) && _MSC_VER>=1300 +#define compute_bit2idx(X, I)\ +{\ + unsigned int J;\ + _BitScanForward((DWORD *) &J, X);\ + I = (bindex_t)J;\ +} #elif USE_BUILTIN_FFS - #define compute_bit2idx(X, I) I = ffs(X) - 1 +#define compute_bit2idx(X, I) I = ffs(X)-1 #else - #define compute_bit2idx(X, I) \ - { \ - \ - unsigned int Y = X - 1; \ - unsigned int K = Y >> (16 - 4) & 16; \ - unsigned int N = K; \ - Y >>= K; \ - N += K = Y >> (8 - 3) & 8; \ - Y >>= K; \ - N += K = Y >> (4 - 2) & 4; \ - Y >>= K; \ - N += K = Y >> (2 - 1) & 2; \ - Y >>= K; \ - N += K = Y >> (1 - 0) & 1; \ - Y >>= K; \ - I = (bindex_t)(N + Y); \ - \ - } -#endif /* GNUC */ +#define compute_bit2idx(X, I)\ +{\ + unsigned int Y = X - 1;\ + unsigned int K = Y >> (16-4) & 16;\ + unsigned int N = K; Y >>= K;\ + N += K = Y >> (8-3) & 8; Y >>= K;\ + N += K = Y >> (4-2) & 4; Y >>= K;\ + N += K = Y >> (2-1) & 2; Y >>= K;\ + N += K = Y >> (1-0) & 1; Y >>= K;\ + I = (bindex_t)(N + Y);\ +} +#endif /* GNUC */ + /* ----------------------- Runtime Check Support ------------------------- */ @@ -3179,141 +3059,121 @@ static size_t traverse_and_check(mstate m); */ #if !INSECURE - /* Check if address a is at least as high as any from MORECORE or MMAP */ - #define ok_address(M, a) ((char *)(a) >= (M)->least_addr) - /* Check if address of next chunk n is higher than base chunk p */ - #define ok_next(p, n) ((char *)(p) < (char *)(n)) - /* Check if p has inuse status */ - #define ok_inuse(p) is_inuse(p) - /* Check if p has its pinuse bit on */ - #define ok_pinuse(p) pinuse(p) - -#else /* !INSECURE */ - #define ok_address(M, a) (1) - #define ok_next(b, n) (1) - #define ok_inuse(p) (1) - #define ok_pinuse(p) (1) -#endif /* !INSECURE */ +/* Check if address a is at least as high as any from MORECORE or MMAP */ +#define ok_address(M, a) ((char*)(a) >= (M)->least_addr) +/* Check if address of next chunk n is higher than base chunk p */ +#define ok_next(p, n) ((char*)(p) < (char*)(n)) +/* Check if p has inuse status */ +#define ok_inuse(p) is_inuse(p) +/* Check if p has its pinuse bit on */ +#define ok_pinuse(p) pinuse(p) + +#else /* !INSECURE */ +#define ok_address(M, a) (1) +#define ok_next(b, n) (1) +#define ok_inuse(p) (1) +#define ok_pinuse(p) (1) +#endif /* !INSECURE */ #if (FOOTERS && !INSECURE) - /* Check if (alleged) mstate m has expected magic field */ - #define ok_magic(M) ((M)->magic == mparams.magic) -#else /* (FOOTERS && !INSECURE) */ - #define ok_magic(M) (1) -#endif /* (FOOTERS && !INSECURE) */ +/* Check if (alleged) mstate m has expected magic field */ +#define ok_magic(M) ((M)->magic == mparams.magic) +#else /* (FOOTERS && !INSECURE) */ +#define ok_magic(M) (1) +#endif /* (FOOTERS && !INSECURE) */ /* In gcc, use __builtin_expect to minimize impact of checks */ #if !INSECURE - #if defined(__GNUC__) && __GNUC__ >= 3 - #define RTCHECK(e) __builtin_expect(e, 1) - #else /* GNUC */ - #define RTCHECK(e) (e) - #endif /* GNUC */ -#else /* !INSECURE */ - #define RTCHECK(e) (1) -#endif /* !INSECURE */ +#if defined(__GNUC__) && __GNUC__ >= 3 +#define RTCHECK(e) __builtin_expect(e, 1) +#else /* GNUC */ +#define RTCHECK(e) (e) +#endif /* GNUC */ +#else /* !INSECURE */ +#define RTCHECK(e) (1) +#endif /* !INSECURE */ /* macros to set up inuse chunks with or without footers */ #if !FOOTERS - #define mark_inuse_foot(M, p, s) +#define mark_inuse_foot(M,p,s) - /* Macros for setting head/foot of non-mmapped chunks */ +/* Macros for setting head/foot of non-mmapped chunks */ - /* Set cinuse bit and pinuse bit of next chunk */ - #define set_inuse(M, p, s) \ - ((p)->head = (((p)->head & PINUSE_BIT) | s | CINUSE_BIT), \ - ((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT) +/* Set cinuse bit and pinuse bit of next chunk */ +#define set_inuse(M,p,s)\ + ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\ + ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT) - /* Set cinuse and pinuse of this chunk and pinuse of next chunk */ - #define set_inuse_and_pinuse(M, p, s) \ - ((p)->head = (s | PINUSE_BIT | CINUSE_BIT), \ - ((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT) +/* Set cinuse and pinuse of this chunk and pinuse of next chunk */ +#define set_inuse_and_pinuse(M,p,s)\ + ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ + ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT) - /* Set size, cinuse and pinuse bit of this chunk */ - #define set_size_and_pinuse_of_inuse_chunk(M, p, s) \ - ((p)->head = (s | PINUSE_BIT | CINUSE_BIT)) +/* Set size, cinuse and pinuse bit of this chunk */ +#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\ + ((p)->head = (s|PINUSE_BIT|CINUSE_BIT)) -#else /* FOOTERS */ +#else /* FOOTERS */ - /* Set foot of inuse chunk to be xor of mstate and seed */ - #define mark_inuse_foot(M, p, s) \ - (((mchunkptr)((char *)(p) + (s)))->prev_foot = \ - ((size_t)(M) ^ mparams.magic)) +/* Set foot of inuse chunk to be xor of mstate and seed */ +#define mark_inuse_foot(M,p,s)\ + (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic)) - #define get_mstate_for(p) \ - ((mstate)(((mchunkptr)((char *)(p) + (chunksize(p))))->prev_foot ^ \ - mparams.magic)) +#define get_mstate_for(p)\ + ((mstate)(((mchunkptr)((char*)(p) +\ + (chunksize(p))))->prev_foot ^ mparams.magic)) - #define set_inuse(M, p, s) \ - ((p)->head = (((p)->head & PINUSE_BIT) | s | CINUSE_BIT), \ - (((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT), \ - mark_inuse_foot(M, p, s)) +#define set_inuse(M,p,s)\ + ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\ + (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \ + mark_inuse_foot(M,p,s)) - #define set_inuse_and_pinuse(M, p, s) \ - ((p)->head = (s | PINUSE_BIT | CINUSE_BIT), \ - (((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT), \ - mark_inuse_foot(M, p, s)) +#define set_inuse_and_pinuse(M,p,s)\ + ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ + (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\ + mark_inuse_foot(M,p,s)) - #define set_size_and_pinuse_of_inuse_chunk(M, p, s) \ - ((p)->head = (s | PINUSE_BIT | CINUSE_BIT), mark_inuse_foot(M, p, s)) +#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\ + ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ + mark_inuse_foot(M, p, s)) -#endif /* !FOOTERS */ +#endif /* !FOOTERS */ /* ---------------------------- setting mparams -------------------------- */ #if LOCK_AT_FORK -static void pre_fork(void) { - - ACQUIRE_LOCK(&(gm)->mutex); - -} - -static void post_fork_parent(void) { - - RELEASE_LOCK(&(gm)->mutex); - -} - -static void post_fork_child(void) { - - INITIAL_LOCK(&(gm)->mutex); - -} - -#endif /* LOCK_AT_FORK */ +static void pre_fork(void) { ACQUIRE_LOCK(&(gm)->mutex); } +static void post_fork_parent(void) { RELEASE_LOCK(&(gm)->mutex); } +static void post_fork_child(void) { INITIAL_LOCK(&(gm)->mutex); } +#endif /* LOCK_AT_FORK */ /* Initialize mparams */ static int init_mparams(void) { - #ifdef NEED_GLOBAL_LOCK_INIT - if (malloc_global_mutex_status <= 0) init_malloc_global_mutex(); + if (malloc_global_mutex_status <= 0) + init_malloc_global_mutex(); #endif ACQUIRE_MALLOC_GLOBAL_LOCK(); if (mparams.magic == 0) { - size_t magic; size_t psize; size_t gsize; #ifndef WIN32 psize = malloc_getpagesize; - gsize = ((DEFAULT_GRANULARITY != 0) ? DEFAULT_GRANULARITY : psize); -#else /* WIN32 */ + gsize = ((DEFAULT_GRANULARITY != 0)? DEFAULT_GRANULARITY : psize); +#else /* WIN32 */ { - SYSTEM_INFO system_info; GetSystemInfo(&system_info); psize = system_info.dwPageSize; - gsize = - ((DEFAULT_GRANULARITY != 0) ? DEFAULT_GRANULARITY - : system_info.dwAllocationGranularity); - + gsize = ((DEFAULT_GRANULARITY != 0)? + DEFAULT_GRANULARITY : system_info.dwAllocationGranularity); } - -#endif /* WIN32 */ +#endif /* WIN32 */ /* Sanity-check configuration: size_t must be unsigned and as wide as pointer type. @@ -3321,23 +3181,24 @@ static int init_mparams(void) { alignment must be at least 8. Alignment, min chunk size, and page size must all be powers of 2. */ - if ((sizeof(size_t) != sizeof(char *)) || (MAX_SIZE_T < MIN_CHUNK_SIZE) || - (sizeof(int) < 4) || (MALLOC_ALIGNMENT < (size_t)8U) || - ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT - SIZE_T_ONE)) != 0) || - ((MCHUNK_SIZE & (MCHUNK_SIZE - SIZE_T_ONE)) != 0) || - ((gsize & (gsize - SIZE_T_ONE)) != 0) || - ((psize & (psize - SIZE_T_ONE)) != 0)) + if ((sizeof(size_t) != sizeof(char*)) || + (MAX_SIZE_T < MIN_CHUNK_SIZE) || + (sizeof(int) < 4) || + (MALLOC_ALIGNMENT < (size_t)8U) || + ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-SIZE_T_ONE)) != 0) || + ((MCHUNK_SIZE & (MCHUNK_SIZE-SIZE_T_ONE)) != 0) || + ((gsize & (gsize-SIZE_T_ONE)) != 0) || + ((psize & (psize-SIZE_T_ONE)) != 0)) ABORT; mparams.granularity = gsize; mparams.page_size = psize; mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD; mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD; #if MORECORE_CONTIGUOUS - mparams.default_mflags = USE_LOCK_BIT | USE_MMAP_BIT; -#else /* MORECORE_CONTIGUOUS */ - mparams.default_mflags = - USE_LOCK_BIT | USE_MMAP_BIT | USE_NONCONTIGUOUS_BIT; -#endif /* MORECORE_CONTIGUOUS */ + mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT; +#else /* MORECORE_CONTIGUOUS */ + mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT; +#endif /* MORECORE_CONTIGUOUS */ #if !ONLY_MSPACES /* Set up lock for main malloc area */ @@ -3349,69 +3210,57 @@ static int init_mparams(void) { #endif { - #if USE_DEV_RANDOM - int fd; + int fd; unsigned char buf[sizeof(size_t)]; /* Try to use /dev/urandom, else fall back on using time */ if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 && read(fd, buf, sizeof(buf)) == sizeof(buf)) { - - magic = *((size_t *)buf); + magic = *((size_t *) buf); close(fd); - - } else - -#endif /* USE_DEV_RANDOM */ + } + else +#endif /* USE_DEV_RANDOM */ #ifdef WIN32 - magic = (size_t)(GetTickCount() ^ (size_t)0x55555555U); + magic = (size_t)(GetTickCount() ^ (size_t)0x55555555U); #elif defined(LACKS_TIME_H) magic = (size_t)&magic ^ (size_t)0x55555555U; #else magic = (size_t)(time(0) ^ (size_t)0x55555555U); #endif - magic |= (size_t)8U; /* ensure nonzero */ - magic &= ~(size_t)7U; /* improve chances of fault for bad values */ + magic |= (size_t)8U; /* ensure nonzero */ + magic &= ~(size_t)7U; /* improve chances of fault for bad values */ /* Until memory modes commonly available, use volatile-write */ (*(volatile size_t *)(&(mparams.magic))) = magic; - } - } RELEASE_MALLOC_GLOBAL_LOCK(); return 1; - } /* support for mallopt */ static int change_mparam(int param_number, int value) { - size_t val; ensure_initialization(); - val = (value == -1) ? MAX_SIZE_T : (size_t)value; - switch (param_number) { - - case M_TRIM_THRESHOLD: - mparams.trim_threshold = val; - return 1; - case M_GRANULARITY: - if (val >= mparams.page_size && ((val & (val - 1)) == 0)) { - - mparams.granularity = val; - return 1; - - } else - - return 0; - case M_MMAP_THRESHOLD: - mparams.mmap_threshold = val; + val = (value == -1)? MAX_SIZE_T : (size_t)value; + switch(param_number) { + case M_TRIM_THRESHOLD: + mparams.trim_threshold = val; + return 1; + case M_GRANULARITY: + if (val >= mparams.page_size && ((val & (val-1)) == 0)) { + mparams.granularity = val; return 1; - default: + } + else return 0; - + case M_MMAP_THRESHOLD: + mparams.mmap_threshold = val; + return 1; + default: + return 0; } - } #if DEBUG @@ -3419,118 +3268,100 @@ static int change_mparam(int param_number, int value) { /* Check properties of any chunk, whether free, inuse, mmapped etc */ static void do_check_any_chunk(mstate m, mchunkptr p) { - assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); assert(ok_address(m, p)); - } /* Check properties of top chunk */ static void do_check_top_chunk(mstate m, mchunkptr p) { - - msegmentptr sp = segment_holding(m, (char *)p); - size_t sz = p->head & ~INUSE_BITS; /* third-lowest bit can be set! */ + msegmentptr sp = segment_holding(m, (char*)p); + size_t sz = p->head & ~INUSE_BITS; /* third-lowest bit can be set! */ assert(sp != 0); assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); assert(ok_address(m, p)); assert(sz == m->topsize); assert(sz > 0); - assert(sz == ((sp->base + sp->size) - (char *)p) - TOP_FOOT_SIZE); + assert(sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE); assert(pinuse(p)); assert(!pinuse(chunk_plus_offset(p, sz))); - } /* Check properties of (inuse) mmapped chunks */ static void do_check_mmapped_chunk(mstate m, mchunkptr p) { - - size_t sz = chunksize(p); + size_t sz = chunksize(p); size_t len = (sz + (p->prev_foot) + MMAP_FOOT_PAD); assert(is_mmapped(p)); assert(use_mmap(m)); assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); assert(ok_address(m, p)); assert(!is_small(sz)); - assert((len & (mparams.page_size - SIZE_T_ONE)) == 0); + assert((len & (mparams.page_size-SIZE_T_ONE)) == 0); assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD); - assert(chunk_plus_offset(p, sz + SIZE_T_SIZE)->head == 0); - + assert(chunk_plus_offset(p, sz+SIZE_T_SIZE)->head == 0); } /* Check properties of inuse chunks */ static void do_check_inuse_chunk(mstate m, mchunkptr p) { - do_check_any_chunk(m, p); assert(is_inuse(p)); assert(next_pinuse(p)); /* If not pinuse and not mmapped, previous chunk has OK offset */ assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p); - if (is_mmapped(p)) do_check_mmapped_chunk(m, p); - + if (is_mmapped(p)) + do_check_mmapped_chunk(m, p); } /* Check properties of free chunks */ static void do_check_free_chunk(mstate m, mchunkptr p) { - - size_t sz = chunksize(p); + size_t sz = chunksize(p); mchunkptr next = chunk_plus_offset(p, sz); do_check_any_chunk(m, p); assert(!is_inuse(p)); assert(!next_pinuse(p)); - assert(!is_mmapped(p)); + assert (!is_mmapped(p)); if (p != m->dv && p != m->top) { - if (sz >= MIN_CHUNK_SIZE) { - assert((sz & CHUNK_ALIGN_MASK) == 0); assert(is_aligned(chunk2mem(p))); assert(next->prev_foot == sz); assert(pinuse(p)); - assert(next == m->top || is_inuse(next)); + assert (next == m->top || is_inuse(next)); assert(p->fd->bk == p); assert(p->bk->fd == p); - - } else /* markers are always of size SIZE_T_SIZE */ - + } + else /* markers are always of size SIZE_T_SIZE */ assert(sz == SIZE_T_SIZE); - } - } /* Check properties of malloced chunks at the point they are malloced */ -static void do_check_malloced_chunk(mstate m, void *mem, size_t s) { - +static void do_check_malloced_chunk(mstate m, void* mem, size_t s) { if (mem != 0) { - mchunkptr p = mem2chunk(mem); - size_t sz = p->head & ~INUSE_BITS; + size_t sz = p->head & ~INUSE_BITS; do_check_inuse_chunk(m, p); assert((sz & CHUNK_ALIGN_MASK) == 0); assert(sz >= MIN_CHUNK_SIZE); assert(sz >= s); /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */ assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE)); - } - } /* Check a tree and its subtrees. */ static void do_check_tree(mstate m, tchunkptr t) { - tchunkptr head = 0; tchunkptr u = t; - bindex_t tindex = t->index; - size_t tsize = chunksize(t); - bindex_t idx; + bindex_t tindex = t->index; + size_t tsize = chunksize(t); + bindex_t idx; compute_tree_index(tsize, idx); assert(tindex == idx); assert(tsize >= MIN_LARGE_SIZE); assert(tsize >= minsize_for_tree_index(idx)); - assert((idx == NTREEBINS - 1) || (tsize < minsize_for_tree_index((idx + 1)))); + assert((idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1)))); - do { /* traverse through chain of same-sized nodes */ + do { /* traverse through chain of same-sized nodes */ do_check_any_chunk(m, ((mchunkptr)u)); assert(u->index == tindex); assert(chunksize(u) == tsize); @@ -3539,72 +3370,56 @@ static void do_check_tree(mstate m, tchunkptr t) { assert(u->fd->bk == u); assert(u->bk->fd == u); if (u->parent == 0) { - assert(u->child[0] == 0); assert(u->child[1] == 0); - - } else { - - assert(head == 0); /* only one node on chain has parent */ + } + else { + assert(head == 0); /* only one node on chain has parent */ head = u; assert(u->parent != u); - assert(u->parent->child[0] == u || u->parent->child[1] == u || - *((tbinptr *)(u->parent)) == u); + assert (u->parent->child[0] == u || + u->parent->child[1] == u || + *((tbinptr*)(u->parent)) == u); if (u->child[0] != 0) { - assert(u->child[0]->parent == u); assert(u->child[0] != u); do_check_tree(m, u->child[0]); - } - if (u->child[1] != 0) { - assert(u->child[1]->parent == u); assert(u->child[1] != u); do_check_tree(m, u->child[1]); - } - if (u->child[0] != 0 && u->child[1] != 0) { - assert(chunksize(u->child[0]) < chunksize(u->child[1])); - } - } - u = u->fd; - } while (u != t); - assert(head != 0); - } /* Check all the chunks in a treebin. */ static void do_check_treebin(mstate m, bindex_t i) { - - tbinptr * tb = treebin_at(m, i); + tbinptr* tb = treebin_at(m, i); tchunkptr t = *tb; - int empty = (m->treemap & (1U << i)) == 0; - if (t == 0) assert(empty); - if (!empty) do_check_tree(m, t); - + int empty = (m->treemap & (1U << i)) == 0; + if (t == 0) + assert(empty); + if (!empty) + do_check_tree(m, t); } /* Check all the chunks in a smallbin. */ static void do_check_smallbin(mstate m, bindex_t i) { - - sbinptr b = smallbin_at(m, i); - mchunkptr p = b->bk; + sbinptr b = smallbin_at(m, i); + mchunkptr p = b->bk; unsigned int empty = (m->smallmap & (1U << i)) == 0; - if (p == b) assert(empty); + if (p == b) + assert(empty); if (!empty) { - for (; p != b; p = p->bk) { - - size_t size = chunksize(p); + size_t size = chunksize(p); mchunkptr q; /* each chunk claims to be free */ do_check_free_chunk(m, p); @@ -3613,249 +3428,185 @@ static void do_check_smallbin(mstate m, bindex_t i) { assert(p->bk == b || chunksize(p->bk) == chunksize(p)); /* chunk is followed by an inuse chunk */ q = next_chunk(p); - if (q->head != FENCEPOST_HEAD) do_check_inuse_chunk(m, q); - + if (q->head != FENCEPOST_HEAD) + do_check_inuse_chunk(m, q); } - } - } /* Find x in a bin. Used in other check functions. */ static int bin_find(mstate m, mchunkptr x) { - size_t size = chunksize(x); if (is_small(size)) { - bindex_t sidx = small_index(size); - sbinptr b = smallbin_at(m, sidx); + sbinptr b = smallbin_at(m, sidx); if (smallmap_is_marked(m, sidx)) { - mchunkptr p = b; do { - - if (p == x) return 1; - + if (p == x) + return 1; } while ((p = p->fd) != b); - } - - } else { - + } + else { bindex_t tidx; compute_tree_index(size, tidx); if (treemap_is_marked(m, tidx)) { - tchunkptr t = *treebin_at(m, tidx); - size_t sizebits = size << leftshift_for_tree_index(tidx); + size_t sizebits = size << leftshift_for_tree_index(tidx); while (t != 0 && chunksize(t) != size) { - - t = t->child[(sizebits >> (SIZE_T_BITSIZE - SIZE_T_ONE)) & 1]; + t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]; sizebits <<= 1; - } - if (t != 0) { - tchunkptr u = t; do { - - if (u == (tchunkptr)x) return 1; - + if (u == (tchunkptr)x) + return 1; } while ((u = u->fd) != t); - } - } - } - return 0; - } /* Traverse each chunk and check it; return total */ static size_t traverse_and_check(mstate m) { - size_t sum = 0; if (is_initialized(m)) { - msegmentptr s = &m->seg; sum += m->topsize + TOP_FOOT_SIZE; while (s != 0) { - mchunkptr q = align_as_chunk(s->base); mchunkptr lastq = 0; assert(pinuse(q)); - while (segment_holds(s, q) && q != m->top && q->head != FENCEPOST_HEAD) { - + while (segment_holds(s, q) && + q != m->top && q->head != FENCEPOST_HEAD) { sum += chunksize(q); if (is_inuse(q)) { - assert(!bin_find(m, q)); do_check_inuse_chunk(m, q); - - } else { - + } + else { assert(q == m->dv || bin_find(m, q)); - assert(lastq == 0 || is_inuse(lastq)); /* Not 2 consecutive free */ + assert(lastq == 0 || is_inuse(lastq)); /* Not 2 consecutive free */ do_check_free_chunk(m, q); - } - lastq = q; q = next_chunk(q); - } - s = s->next; - } - } - return sum; - } + /* Check all properties of malloc_state. */ static void do_check_malloc_state(mstate m) { - bindex_t i; - size_t total; + size_t total; /* check bins */ for (i = 0; i < NSMALLBINS; ++i) do_check_smallbin(m, i); for (i = 0; i < NTREEBINS; ++i) do_check_treebin(m, i); - if (m->dvsize != 0) { /* check dv chunk */ + if (m->dvsize != 0) { /* check dv chunk */ do_check_any_chunk(m, m->dv); assert(m->dvsize == chunksize(m->dv)); assert(m->dvsize >= MIN_CHUNK_SIZE); assert(bin_find(m, m->dv) == 0); - } - if (m->top != 0) { /* check top chunk */ + if (m->top != 0) { /* check top chunk */ do_check_top_chunk(m, m->top); /*assert(m->topsize == chunksize(m->top)); redundant */ assert(m->topsize > 0); assert(bin_find(m, m->top) == 0); - } total = traverse_and_check(m); assert(total <= m->footprint); assert(m->footprint <= m->max_footprint); - } - -#endif /* DEBUG */ +#endif /* DEBUG */ /* ----------------------------- statistics ------------------------------ */ #if !NO_MALLINFO static struct mallinfo internal_mallinfo(mstate m) { - - struct mallinfo nm = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; + struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; ensure_initialization(); if (!PREACTION(m)) { - check_malloc_state(m); if (is_initialized(m)) { - - size_t nfree = SIZE_T_ONE; /* top always free */ - size_t mfree = m->topsize + TOP_FOOT_SIZE; - size_t sum = mfree; + size_t nfree = SIZE_T_ONE; /* top always free */ + size_t mfree = m->topsize + TOP_FOOT_SIZE; + size_t sum = mfree; msegmentptr s = &m->seg; while (s != 0) { - mchunkptr q = align_as_chunk(s->base); - while (segment_holds(s, q) && q != m->top && - q->head != FENCEPOST_HEAD) { - + while (segment_holds(s, q) && + q != m->top && q->head != FENCEPOST_HEAD) { size_t sz = chunksize(q); sum += sz; if (!is_inuse(q)) { - mfree += sz; ++nfree; - } - q = next_chunk(q); - } - s = s->next; - } - nm.arena = sum; - nm.ordblks = nfree; - nm.hblkhd = m->footprint - sum; - nm.usmblks = m->max_footprint; + nm.arena = sum; + nm.ordblks = nfree; + nm.hblkhd = m->footprint - sum; + nm.usmblks = m->max_footprint; nm.uordblks = m->footprint - mfree; nm.fordblks = mfree; nm.keepcost = m->topsize; - } POSTACTION(m); - } - return nm; - } - -#endif /* !NO_MALLINFO */ +#endif /* !NO_MALLINFO */ #if !NO_MALLOC_STATS static void internal_malloc_stats(mstate m) { - ensure_initialization(); if (!PREACTION(m)) { - size_t maxfp = 0; size_t fp = 0; size_t used = 0; check_malloc_state(m); if (is_initialized(m)) { - msegmentptr s = &m->seg; maxfp = m->max_footprint; fp = m->footprint; used = fp - (m->topsize + TOP_FOOT_SIZE); while (s != 0) { - mchunkptr q = align_as_chunk(s->base); - while (segment_holds(s, q) && q != m->top && - q->head != FENCEPOST_HEAD) { - - if (!is_inuse(q)) used -= chunksize(q); + while (segment_holds(s, q) && + q != m->top && q->head != FENCEPOST_HEAD) { + if (!is_inuse(q)) + used -= chunksize(q); q = next_chunk(q); - } - s = s->next; - } - } - - POSTACTION(m); /* drop lock */ + POSTACTION(m); /* drop lock */ fprintf(stderr, "max system bytes = %10lu\n", (unsigned long)(maxfp)); fprintf(stderr, "system bytes = %10lu\n", (unsigned long)(fp)); fprintf(stderr, "in use bytes = %10lu\n", (unsigned long)(used)); - } - } - -#endif /* NO_MALLOC_STATS */ +#endif /* NO_MALLOC_STATS */ /* ----------------------- Operations on smallbins ----------------------- */ @@ -3867,181 +3618,134 @@ static void internal_malloc_stats(mstate m) { */ /* Link a free chunk into a smallbin */ -#define insert_small_chunk(M, P, S) \ - { \ - \ - bindex_t I = small_index(S); \ - mchunkptr B = smallbin_at(M, I); \ - mchunkptr F = B; \ - assert(S >= MIN_CHUNK_SIZE); \ - if (!smallmap_is_marked(M, I)) \ - mark_smallmap(M, I); \ - else if (RTCHECK(ok_address(M, B->fd))) \ - F = B->fd; \ - else { \ - \ - CORRUPTION_ERROR_ACTION(M); \ - \ - } \ - B->fd = P; \ - F->bk = P; \ - P->fd = F; \ - P->bk = B; \ - \ - } +#define insert_small_chunk(M, P, S) {\ + bindex_t I = small_index(S);\ + mchunkptr B = smallbin_at(M, I);\ + mchunkptr F = B;\ + assert(S >= MIN_CHUNK_SIZE);\ + if (!smallmap_is_marked(M, I))\ + mark_smallmap(M, I);\ + else if (RTCHECK(ok_address(M, B->fd)))\ + F = B->fd;\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + B->fd = P;\ + F->bk = P;\ + P->fd = F;\ + P->bk = B;\ +} /* Unlink a chunk from a smallbin */ -#define unlink_small_chunk(M, P, S) \ - { \ - \ - mchunkptr F = P->fd; \ - mchunkptr B = P->bk; \ - bindex_t I = small_index(S); \ - assert(P != B); \ - assert(P != F); \ - assert(chunksize(P) == small_index2size(I)); \ - if (RTCHECK(F == smallbin_at(M, I) || (ok_address(M, F) && F->bk == P))) { \ - \ - if (B == F) { \ - \ - clear_smallmap(M, I); \ - \ - } else if (RTCHECK(B == smallbin_at(M, I) || \ - \ - \ - (ok_address(M, B) && B->fd == P))) { \ - \ - F->bk = B; \ - B->fd = F; \ - \ - } else { \ - \ - CORRUPTION_ERROR_ACTION(M); \ - \ - } \ - \ - } else { \ - \ - CORRUPTION_ERROR_ACTION(M); \ - \ - } \ - \ - } +#define unlink_small_chunk(M, P, S) {\ + mchunkptr F = P->fd;\ + mchunkptr B = P->bk;\ + bindex_t I = small_index(S);\ + assert(P != B);\ + assert(P != F);\ + assert(chunksize(P) == small_index2size(I));\ + if (RTCHECK(F == smallbin_at(M,I) || (ok_address(M, F) && F->bk == P))) { \ + if (B == F) {\ + clear_smallmap(M, I);\ + }\ + else if (RTCHECK(B == smallbin_at(M,I) ||\ + (ok_address(M, B) && B->fd == P))) {\ + F->bk = B;\ + B->fd = F;\ + }\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + }\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + }\ +} /* Unlink the first chunk from a smallbin */ -#define unlink_first_small_chunk(M, B, P, I) \ - { \ - \ - mchunkptr F = P->fd; \ - assert(P != B); \ - assert(P != F); \ - assert(chunksize(P) == small_index2size(I)); \ - if (B == F) { \ - \ - clear_smallmap(M, I); \ - \ - } else if (RTCHECK(ok_address(M, F) && F->bk == P)) { \ - \ - F->bk = B; \ - B->fd = F; \ - \ - } else { \ - \ - CORRUPTION_ERROR_ACTION(M); \ - \ - } \ - \ - } +#define unlink_first_small_chunk(M, B, P, I) {\ + mchunkptr F = P->fd;\ + assert(P != B);\ + assert(P != F);\ + assert(chunksize(P) == small_index2size(I));\ + if (B == F) {\ + clear_smallmap(M, I);\ + }\ + else if (RTCHECK(ok_address(M, F) && F->bk == P)) {\ + F->bk = B;\ + B->fd = F;\ + }\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + }\ +} /* Replace dv node, binning the old one */ /* Used only when dvsize known to be small */ -#define replace_dv(M, P, S) \ - { \ - \ - size_t DVS = M->dvsize; \ - assert(is_small(DVS)); \ - if (DVS != 0) { \ - \ - mchunkptr DV = M->dv; \ - insert_small_chunk(M, DV, DVS); \ - \ - } \ - M->dvsize = S; \ - M->dv = P; \ - \ - } +#define replace_dv(M, P, S) {\ + size_t DVS = M->dvsize;\ + assert(is_small(DVS));\ + if (DVS != 0) {\ + mchunkptr DV = M->dv;\ + insert_small_chunk(M, DV, DVS);\ + }\ + M->dvsize = S;\ + M->dv = P;\ +} /* ------------------------- Operations on trees ------------------------- */ /* Insert chunk into tree */ -#define insert_large_chunk(M, X, S) \ - { \ - \ - tbinptr *H; \ - bindex_t I; \ - compute_tree_index(S, I); \ - H = treebin_at(M, I); \ - X->index = I; \ - X->child[0] = X->child[1] = 0; \ - if (!treemap_is_marked(M, I)) { \ - \ - mark_treemap(M, I); \ - *H = X; \ - X->parent = (tchunkptr)H; \ - X->fd = X->bk = X; \ - \ - } else { \ - \ - tchunkptr T = *H; \ - size_t K = S << leftshift_for_tree_index(I); \ - for (;;) { \ - \ - if (chunksize(T) != S) { \ - \ - tchunkptr *C = \ - &(T->child[(K >> (SIZE_T_BITSIZE - SIZE_T_ONE)) & 1]); \ - K <<= 1; \ - if (*C != 0) \ - T = *C; \ - else if (RTCHECK(ok_address(M, C))) { \ - \ - *C = X; \ - X->parent = T; \ - X->fd = X->bk = X; \ - break; \ - \ - } else { \ - \ - CORRUPTION_ERROR_ACTION(M); \ - break; \ - \ - } \ - \ - } else { \ - \ - tchunkptr F = T->fd; \ - if (RTCHECK(ok_address(M, T) && ok_address(M, F))) { \ - \ - T->fd = F->bk = X; \ - X->fd = F; \ - X->bk = T; \ - X->parent = 0; \ - break; \ - \ - } else { \ - \ - CORRUPTION_ERROR_ACTION(M); \ - break; \ - \ - } \ - \ - } \ - \ - } \ - \ - } \ - \ - } +#define insert_large_chunk(M, X, S) {\ + tbinptr* H;\ + bindex_t I;\ + compute_tree_index(S, I);\ + H = treebin_at(M, I);\ + X->index = I;\ + X->child[0] = X->child[1] = 0;\ + if (!treemap_is_marked(M, I)) {\ + mark_treemap(M, I);\ + *H = X;\ + X->parent = (tchunkptr)H;\ + X->fd = X->bk = X;\ + }\ + else {\ + tchunkptr T = *H;\ + size_t K = S << leftshift_for_tree_index(I);\ + for (;;) {\ + if (chunksize(T) != S) {\ + tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\ + K <<= 1;\ + if (*C != 0)\ + T = *C;\ + else if (RTCHECK(ok_address(M, C))) {\ + *C = X;\ + X->parent = T;\ + X->fd = X->bk = X;\ + break;\ + }\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + break;\ + }\ + }\ + else {\ + tchunkptr F = T->fd;\ + if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\ + T->fd = F->bk = X;\ + X->fd = F;\ + X->bk = T;\ + X->parent = 0;\ + break;\ + }\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + break;\ + }\ + }\ + }\ + }\ +} /* Unlink steps: @@ -4060,149 +3764,104 @@ static void internal_malloc_stats(mstate m) { x's parent and children to x's replacement (or null if none). */ -#define unlink_large_chunk(M, X) \ - { \ - \ - tchunkptr XP = X->parent; \ - tchunkptr R; \ - if (X->bk != X) { \ - \ - tchunkptr F = X->fd; \ - R = X->bk; \ - if (RTCHECK(ok_address(M, F) && F->bk == X && R->fd == X)) { \ - \ - F->bk = R; \ - R->fd = F; \ - \ - } else { \ - \ - CORRUPTION_ERROR_ACTION(M); \ - \ - } \ - \ - } else { \ - \ - tchunkptr *RP; \ - if (((R = *(RP = &(X->child[1]))) != 0) || \ - ((R = *(RP = &(X->child[0]))) != 0)) { \ - \ - tchunkptr *CP; \ - while ((*(CP = &(R->child[1])) != 0) || \ - (*(CP = &(R->child[0])) != 0)) { \ - \ - R = *(RP = CP); \ - \ - } \ - if (RTCHECK(ok_address(M, RP))) \ - *RP = 0; \ - else { \ - \ - CORRUPTION_ERROR_ACTION(M); \ - \ - } \ - \ - } \ - \ - } \ - if (XP != 0) { \ - \ - tbinptr *H = treebin_at(M, X->index); \ - if (X == *H) { \ - \ - if ((*H = R) == 0) clear_treemap(M, X->index); \ - \ - } else if (RTCHECK(ok_address(M, XP))) { \ - \ - if (XP->child[0] == X) \ - XP->child[0] = R; \ - else \ - XP->child[1] = R; \ - \ - } else \ - \ - \ - CORRUPTION_ERROR_ACTION(M); \ - if (R != 0) { \ - \ - if (RTCHECK(ok_address(M, R))) { \ - \ - tchunkptr C0, C1; \ - R->parent = XP; \ - if ((C0 = X->child[0]) != 0) { \ - \ - if (RTCHECK(ok_address(M, C0))) { \ - \ - R->child[0] = C0; \ - C0->parent = R; \ - \ - } else \ - \ - \ - CORRUPTION_ERROR_ACTION(M); \ - \ - } \ - if ((C1 = X->child[1]) != 0) { \ - \ - if (RTCHECK(ok_address(M, C1))) { \ - \ - R->child[1] = C1; \ - C1->parent = R; \ - \ - } else \ - \ - \ - CORRUPTION_ERROR_ACTION(M); \ - \ - } \ - \ - } else \ - \ - \ - CORRUPTION_ERROR_ACTION(M); \ - \ - } \ - \ - } \ - \ - } +#define unlink_large_chunk(M, X) {\ + tchunkptr XP = X->parent;\ + tchunkptr R;\ + if (X->bk != X) {\ + tchunkptr F = X->fd;\ + R = X->bk;\ + if (RTCHECK(ok_address(M, F) && F->bk == X && R->fd == X)) {\ + F->bk = R;\ + R->fd = F;\ + }\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + }\ + else {\ + tchunkptr* RP;\ + if (((R = *(RP = &(X->child[1]))) != 0) ||\ + ((R = *(RP = &(X->child[0]))) != 0)) {\ + tchunkptr* CP;\ + while ((*(CP = &(R->child[1])) != 0) ||\ + (*(CP = &(R->child[0])) != 0)) {\ + R = *(RP = CP);\ + }\ + if (RTCHECK(ok_address(M, RP)))\ + *RP = 0;\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + }\ + }\ + if (XP != 0) {\ + tbinptr* H = treebin_at(M, X->index);\ + if (X == *H) {\ + if ((*H = R) == 0) \ + clear_treemap(M, X->index);\ + }\ + else if (RTCHECK(ok_address(M, XP))) {\ + if (XP->child[0] == X) \ + XP->child[0] = R;\ + else \ + XP->child[1] = R;\ + }\ + else\ + CORRUPTION_ERROR_ACTION(M);\ + if (R != 0) {\ + if (RTCHECK(ok_address(M, R))) {\ + tchunkptr C0, C1;\ + R->parent = XP;\ + if ((C0 = X->child[0]) != 0) {\ + if (RTCHECK(ok_address(M, C0))) {\ + R->child[0] = C0;\ + C0->parent = R;\ + }\ + else\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + if ((C1 = X->child[1]) != 0) {\ + if (RTCHECK(ok_address(M, C1))) {\ + R->child[1] = C1;\ + C1->parent = R;\ + }\ + else\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + }\ + else\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + }\ +} /* Relays to large vs small bin operations */ -#define insert_chunk(M, P, S) \ - if (is_small(S)) insert_small_chunk(M, P, S) else { \ - \ - tchunkptr TP = (tchunkptr)(P); \ - insert_large_chunk(M, TP, S); \ - \ - } +#define insert_chunk(M, P, S)\ + if (is_small(S)) insert_small_chunk(M, P, S)\ + else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); } + +#define unlink_chunk(M, P, S)\ + if (is_small(S)) unlink_small_chunk(M, P, S)\ + else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); } -#define unlink_chunk(M, P, S) \ - if (is_small(S)) unlink_small_chunk(M, P, S) else { \ - \ - tchunkptr TP = (tchunkptr)(P); \ - unlink_large_chunk(M, TP); \ - \ - } /* Relays to internal calls to malloc/free from realloc, memalign etc */ #if ONLY_MSPACES - #define internal_malloc(m, b) mspace_malloc(m, b) - #define internal_free(m, mem) mspace_free(m, mem); -#else /* ONLY_MSPACES */ - #if MSPACES - #define internal_malloc(m, b) \ - ((m == gm) ? dlmalloc(b) : mspace_malloc(m, b)) - #define internal_free(m, mem) \ - if (m == gm) \ - dlfree(mem); \ - else \ - mspace_free(m, mem); - #else /* MSPACES */ - #define internal_malloc(m, b) dlmalloc(b) - #define internal_free(m, mem) dlfree(mem) - #endif /* MSPACES */ -#endif /* ONLY_MSPACES */ +#define internal_malloc(m, b) mspace_malloc(m, b) +#define internal_free(m, mem) mspace_free(m,mem); +#else /* ONLY_MSPACES */ +#if MSPACES +#define internal_malloc(m, b)\ + ((m == gm)? dlmalloc(b) : mspace_malloc(m, b)) +#define internal_free(m, mem)\ + if (m == gm) dlfree(mem); else mspace_free(m,mem); +#else /* MSPACES */ +#define internal_malloc(m, b) dlmalloc(b) +#define internal_free(m, mem) dlfree(mem) +#endif /* MSPACES */ +#endif /* ONLY_MSPACES */ /* ----------------------- Direct-mmapping chunks ----------------------- */ @@ -4215,93 +3874,80 @@ static void internal_malloc_stats(mstate m) { */ /* Malloc using mmap */ -static void *mmap_alloc(mstate m, size_t nb) { - +static void* mmap_alloc(mstate m, size_t nb) { size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK); if (m->footprint_limit != 0) { - size_t fp = m->footprint + mmsize; - if (fp <= m->footprint || fp > m->footprint_limit) return 0; - + if (fp <= m->footprint || fp > m->footprint_limit) + return 0; } - - if (mmsize > nb) { /* Check for wrap around 0 */ - char *mm = (char *)(CALL_DIRECT_MMAP(mmsize)); + if (mmsize > nb) { /* Check for wrap around 0 */ + char* mm = (char*)(CALL_DIRECT_MMAP(mmsize)); if (mm != CMFAIL) { - - size_t offset = align_offset(chunk2mem(mm)); - size_t psize = mmsize - offset - MMAP_FOOT_PAD; + size_t offset = align_offset(chunk2mem(mm)); + size_t psize = mmsize - offset - MMAP_FOOT_PAD; mchunkptr p = (mchunkptr)(mm + offset); p->prev_foot = offset; p->head = psize; mark_inuse_foot(m, p, psize); chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD; - chunk_plus_offset(p, psize + SIZE_T_SIZE)->head = 0; + chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0; - if (m->least_addr == 0 || mm < m->least_addr) m->least_addr = mm; + if (m->least_addr == 0 || mm < m->least_addr) + m->least_addr = mm; if ((m->footprint += mmsize) > m->max_footprint) m->max_footprint = m->footprint; assert(is_aligned(chunk2mem(p))); check_mmapped_chunk(m, p); return chunk2mem(p); - } - } - return 0; - } /* Realloc using mmap */ static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb, int flags) { - size_t oldsize = chunksize(oldp); - (void)flags; /* placate people compiling -Wunused */ - if (is_small(nb)) /* Can't shrink mmap regions below small size */ + (void)flags; /* placate people compiling -Wunused */ + if (is_small(nb)) /* Can't shrink mmap regions below small size */ return 0; /* Keep old chunk if big enough but not too big */ if (oldsize >= nb + SIZE_T_SIZE && (oldsize - nb) <= (mparams.granularity << 1)) return oldp; else { - size_t offset = oldp->prev_foot; size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD; size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK); - char * cp = - (char *)CALL_MREMAP((char *)oldp - offset, oldmmsize, newmmsize, flags); + char* cp = (char*)CALL_MREMAP((char*)oldp - offset, + oldmmsize, newmmsize, flags); if (cp != CMFAIL) { - mchunkptr newp = (mchunkptr)(cp + offset); - size_t psize = newmmsize - offset - MMAP_FOOT_PAD; + size_t psize = newmmsize - offset - MMAP_FOOT_PAD; newp->head = psize; mark_inuse_foot(m, newp, psize); chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD; - chunk_plus_offset(newp, psize + SIZE_T_SIZE)->head = 0; + chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0; - if (cp < m->least_addr) m->least_addr = cp; + if (cp < m->least_addr) + m->least_addr = cp; if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint) m->max_footprint = m->footprint; check_mmapped_chunk(m, newp); return newp; - } - } - return 0; - } + /* -------------------------- mspace management -------------------------- */ /* Initialize top chunk and its size */ static void init_top(mstate m, mchunkptr p, size_t psize) { - /* Ensure alignment */ size_t offset = align_offset(chunk2mem(p)); - p = (mchunkptr)((char *)p + offset); + p = (mchunkptr)((char*)p + offset); psize -= offset; m->top = p; @@ -4309,29 +3955,23 @@ static void init_top(mstate m, mchunkptr p, size_t psize) { p->head = psize | PINUSE_BIT; /* set size of fake trailing chunk holding overhead space only once */ chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE; - m->trim_check = mparams.trim_threshold; /* reset on each update */ - + m->trim_check = mparams.trim_threshold; /* reset on each update */ } /* Initialize bins for a new mstate that is otherwise zeroed out */ static void init_bins(mstate m) { - /* Establish circular links for smallbins */ bindex_t i; for (i = 0; i < NSMALLBINS; ++i) { - - sbinptr bin = smallbin_at(m, i); + sbinptr bin = smallbin_at(m,i); bin->fd = bin->bk = bin; - } - } #if PROCEED_ON_ERROR /* default corruption action */ static void reset_on_error(mstate m) { - int i; ++malloc_corruption_error_count; /* Reinitialize fields to forget about all memory */ @@ -4344,78 +3984,67 @@ static void reset_on_error(mstate m) { for (i = 0; i < NTREEBINS; ++i) *treebin_at(m, i) = 0; init_bins(m); - } - -#endif /* PROCEED_ON_ERROR */ +#endif /* PROCEED_ON_ERROR */ /* Allocate chunk and prepend remainder with chunk in successor base. */ -static void *prepend_alloc(mstate m, char *newbase, char *oldbase, size_t nb) { - +static void* prepend_alloc(mstate m, char* newbase, char* oldbase, + size_t nb) { mchunkptr p = align_as_chunk(newbase); mchunkptr oldfirst = align_as_chunk(oldbase); - size_t psize = (char *)oldfirst - (char *)p; + size_t psize = (char*)oldfirst - (char*)p; mchunkptr q = chunk_plus_offset(p, nb); - size_t qsize = psize - nb; + size_t qsize = psize - nb; set_size_and_pinuse_of_inuse_chunk(m, p, nb); - assert((char *)oldfirst > (char *)q); + assert((char*)oldfirst > (char*)q); assert(pinuse(oldfirst)); assert(qsize >= MIN_CHUNK_SIZE); /* consolidate remainder with first chunk of old base */ if (oldfirst == m->top) { - size_t tsize = m->topsize += qsize; m->top = q; q->head = tsize | PINUSE_BIT; check_top_chunk(m, q); - - } else if (oldfirst == m->dv) { - + } + else if (oldfirst == m->dv) { size_t dsize = m->dvsize += qsize; m->dv = q; set_size_and_pinuse_of_free_chunk(q, dsize); - - } else { - + } + else { if (!is_inuse(oldfirst)) { - size_t nsize = chunksize(oldfirst); unlink_chunk(m, oldfirst, nsize); oldfirst = chunk_plus_offset(oldfirst, nsize); qsize += nsize; - } - set_free_with_pinuse(q, qsize, oldfirst); insert_chunk(m, q, qsize); check_free_chunk(m, q); - } check_malloced_chunk(m, chunk2mem(p), nb); return chunk2mem(p); - } /* Add a segment to hold a new noncontiguous region */ -static void add_segment(mstate m, char *tbase, size_t tsize, flag_t mmapped) { - +static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) { /* Determine locations and sizes of segment, fenceposts, old top */ - char * old_top = (char *)m->top; + char* old_top = (char*)m->top; msegmentptr oldsp = segment_holding(m, old_top); - char * old_end = oldsp->base + oldsp->size; - size_t ssize = pad_request(sizeof(struct malloc_segment)); - char * rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK); - size_t offset = align_offset(chunk2mem(rawsp)); - char * asp = rawsp + offset; - char * csp = (asp < (old_top + MIN_CHUNK_SIZE)) ? old_top : asp; - mchunkptr sp = (mchunkptr)csp; + char* old_end = oldsp->base + oldsp->size; + size_t ssize = pad_request(sizeof(struct malloc_segment)); + char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK); + size_t offset = align_offset(chunk2mem(rawsp)); + char* asp = rawsp + offset; + char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp; + mchunkptr sp = (mchunkptr)csp; msegmentptr ss = (msegmentptr)(chunk2mem(sp)); - mchunkptr tnext = chunk_plus_offset(sp, ssize); - mchunkptr p = tnext; - int nfences = 0; + mchunkptr tnext = chunk_plus_offset(sp, ssize); + mchunkptr p = tnext; + int nfences = 0; /* reset top to new space */ init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE); @@ -4423,7 +4052,7 @@ static void add_segment(mstate m, char *tbase, size_t tsize, flag_t mmapped) { /* Set up segment record */ assert(is_aligned(ss)); set_size_and_pinuse_of_inuse_chunk(m, sp, ssize); - *ss = m->seg; /* Push current record */ + *ss = m->seg; /* Push current record */ m->seg.base = tbase; m->seg.size = tsize; m->seg.sflags = mmapped; @@ -4431,61 +4060,53 @@ static void add_segment(mstate m, char *tbase, size_t tsize, flag_t mmapped) { /* Insert trailing fenceposts */ for (;;) { - mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE); p->head = FENCEPOST_HEAD; ++nfences; - if ((char *)(&(nextp->head)) < old_end) + if ((char*)(&(nextp->head)) < old_end) p = nextp; else break; - } - assert(nfences >= 2); /* Insert the rest of old top into a bin as an ordinary free chunk */ if (csp != old_top) { - mchunkptr q = (mchunkptr)old_top; - size_t psize = csp - old_top; + size_t psize = csp - old_top; mchunkptr tn = chunk_plus_offset(q, psize); set_free_with_pinuse(q, psize, tn); insert_chunk(m, q, psize); - } check_top_chunk(m, m->top); - } /* -------------------------- System allocation -------------------------- */ /* Get memory from system using MORECORE or MMAP */ -static void *sys_alloc(mstate m, size_t nb) { - - char * tbase = CMFAIL; +static void* sys_alloc(mstate m, size_t nb) { + char* tbase = CMFAIL; size_t tsize = 0; flag_t mmap_flag = 0; - size_t asize; /* allocation size */ + size_t asize; /* allocation size */ ensure_initialization(); /* Directly map large chunks, but only if already initialized */ if (use_mmap(m) && nb >= mparams.mmap_threshold && m->topsize != 0) { - - void *mem = mmap_alloc(m, nb); - if (mem != 0) return mem; - + void* mem = mmap_alloc(m, nb); + if (mem != 0) + return mem; } asize = granularity_align(nb + SYS_ALLOC_PADDING); - if (asize <= nb) return 0; /* wraparound */ + if (asize <= nb) + return 0; /* wraparound */ if (m->footprint_limit != 0) { - size_t fp = m->footprint + asize; - if (fp <= m->footprint || fp > m->footprint_limit) return 0; - + if (fp <= m->footprint || fp > m->footprint_limit) + return 0; } /* @@ -4511,119 +4132,91 @@ static void *sys_alloc(mstate m, size_t nb) { */ if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) { - - char * br = CMFAIL; - size_t ssize = asize; /* sbrk call size */ - msegmentptr ss = (m->top == 0) ? 0 : segment_holding(m, (char *)m->top); + char* br = CMFAIL; + size_t ssize = asize; /* sbrk call size */ + msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (char*)m->top); ACQUIRE_MALLOC_GLOBAL_LOCK(); - if (ss == 0) { /* First time through or recovery */ - char *base = (char *)CALL_MORECORE(0); + if (ss == 0) { /* First time through or recovery */ + char* base = (char*)CALL_MORECORE(0); if (base != CMFAIL) { - size_t fp; /* Adjust to end on a page boundary */ if (!is_page_aligned(base)) ssize += (page_align((size_t)base) - (size_t)base); - fp = m->footprint + ssize; /* recheck limits */ + fp = m->footprint + ssize; /* recheck limits */ if (ssize > nb && ssize < HALF_MAX_SIZE_T && (m->footprint_limit == 0 || (fp > m->footprint && fp <= m->footprint_limit)) && - (br = (char *)(CALL_MORECORE(ssize))) == base) { - + (br = (char*)(CALL_MORECORE(ssize))) == base) { tbase = base; tsize = ssize; - } - } - - } else { - + } + else { /* Subtract out existing available top space from MORECORE request. */ ssize = granularity_align(nb - m->topsize + SYS_ALLOC_PADDING); /* Use mem here only if it did continuously extend old space */ if (ssize < HALF_MAX_SIZE_T && - (br = (char *)(CALL_MORECORE(ssize))) == ss->base + ss->size) { - + (br = (char*)(CALL_MORECORE(ssize))) == ss->base+ss->size) { tbase = br; tsize = ssize; - } - } - if (tbase == CMFAIL) { /* Cope with partial failure */ - if (br != CMFAIL) { /* Try to use/extend the space we did get */ - if (ssize < HALF_MAX_SIZE_T && ssize < nb + SYS_ALLOC_PADDING) { - + if (tbase == CMFAIL) { /* Cope with partial failure */ + if (br != CMFAIL) { /* Try to use/extend the space we did get */ + if (ssize < HALF_MAX_SIZE_T && + ssize < nb + SYS_ALLOC_PADDING) { size_t esize = granularity_align(nb + SYS_ALLOC_PADDING - ssize); if (esize < HALF_MAX_SIZE_T) { - - char *end = (char *)CALL_MORECORE(esize); + char* end = (char*)CALL_MORECORE(esize); if (end != CMFAIL) ssize += esize; - else { /* Can't use; try to release */ - (void)CALL_MORECORE(-ssize); + else { /* Can't use; try to release */ + (void) CALL_MORECORE(-ssize); br = CMFAIL; - } - } - } - } - - if (br != CMFAIL) { /* Use the space we did get */ + if (br != CMFAIL) { /* Use the space we did get */ tbase = br; tsize = ssize; - - } else - - disable_contiguous(m); /* Don't try contiguous path in the future */ - + } + else + disable_contiguous(m); /* Don't try contiguous path in the future */ } RELEASE_MALLOC_GLOBAL_LOCK(); - } - if (HAVE_MMAP && tbase == CMFAIL) { /* Try MMAP */ - char *mp = (char *)(CALL_MMAP(asize)); + if (HAVE_MMAP && tbase == CMFAIL) { /* Try MMAP */ + char* mp = (char*)(CALL_MMAP(asize)); if (mp != CMFAIL) { - tbase = mp; tsize = asize; mmap_flag = USE_MMAP_BIT; - } - } - if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */ + if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */ if (asize < HALF_MAX_SIZE_T) { - - char *br = CMFAIL; - char *end = CMFAIL; + char* br = CMFAIL; + char* end = CMFAIL; ACQUIRE_MALLOC_GLOBAL_LOCK(); - br = (char *)(CALL_MORECORE(asize)); - end = (char *)(CALL_MORECORE(0)); + br = (char*)(CALL_MORECORE(asize)); + end = (char*)(CALL_MORECORE(0)); RELEASE_MALLOC_GLOBAL_LOCK(); if (br != CMFAIL && end != CMFAIL && br < end) { - size_t ssize = end - br; if (ssize > nb + TOP_FOOT_SIZE) { - tbase = br; tsize = ssize; - } - } - } - } if (tbase != CMFAIL) { @@ -4631,8 +4224,9 @@ static void *sys_alloc(mstate m, size_t nb) { if ((m->footprint += tsize) > m->max_footprint) m->max_footprint = m->footprint; - if (!is_initialized(m)) { /* first-time initialization */ - if (m->least_addr == 0 || tbase < m->least_addr) m->least_addr = tbase; + if (!is_initialized(m)) { /* first-time initialization */ + if (m->least_addr == 0 || tbase < m->least_addr) + m->least_addr = tbase; m->seg.base = tbase; m->seg.size = tsize; m->seg.sflags = mmap_flag; @@ -4645,52 +4239,46 @@ static void *sys_alloc(mstate m, size_t nb) { else #endif { - /* Offset top by embedded malloc_state */ mchunkptr mn = next_chunk(mem2chunk(m)); - init_top(m, mn, (size_t)((tbase + tsize) - (char *)mn) - TOP_FOOT_SIZE); - + init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE); } - } else { - /* Try to merge with an existing segment */ msegmentptr sp = &m->seg; /* Only consider most recent segment if traversal suppressed */ while (sp != 0 && tbase != sp->base + sp->size) sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next; - if (sp != 0 && !is_extern_segment(sp) && + if (sp != 0 && + !is_extern_segment(sp) && (sp->sflags & USE_MMAP_BIT) == mmap_flag && - segment_holds(sp, m->top)) { /* append */ + segment_holds(sp, m->top)) { /* append */ sp->size += tsize; init_top(m, m->top, m->topsize + tsize); - - } else { - - if (tbase < m->least_addr) m->least_addr = tbase; + } + else { + if (tbase < m->least_addr) + m->least_addr = tbase; sp = &m->seg; while (sp != 0 && sp->base != tbase + tsize) sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next; - if (sp != 0 && !is_extern_segment(sp) && + if (sp != 0 && + !is_extern_segment(sp) && (sp->sflags & USE_MMAP_BIT) == mmap_flag) { - - char *oldbase = sp->base; + char* oldbase = sp->base; sp->base = tbase; sp->size += tsize; return prepend_alloc(m, tbase, oldbase, nb); - - } else - + } + else add_segment(m, tbase, tsize, mmap_flag); - } - } - if (nb < m->topsize) { /* Allocate from new or extended top space */ - size_t rsize = m->topsize -= nb; + if (nb < m->topsize) { /* Allocate from new or extended top space */ + size_t rsize = m->topsize -= nb; mchunkptr p = m->top; mchunkptr r = m->top = chunk_plus_offset(p, nb); r->head = rsize | PINUSE_BIT; @@ -4698,421 +4286,313 @@ static void *sys_alloc(mstate m, size_t nb) { check_top_chunk(m, m->top); check_malloced_chunk(m, chunk2mem(p), nb); return chunk2mem(p); - } - } MALLOC_FAILURE_ACTION; return 0; - } /* ----------------------- system deallocation -------------------------- */ /* Unmap and unlink any mmapped segments that don't contain used chunks */ static size_t release_unused_segments(mstate m) { - - size_t released = 0; - int nsegs = 0; + size_t released = 0; + int nsegs = 0; msegmentptr pred = &m->seg; msegmentptr sp = pred->next; while (sp != 0) { - - char * base = sp->base; - size_t size = sp->size; + char* base = sp->base; + size_t size = sp->size; msegmentptr next = sp->next; ++nsegs; if (is_mmapped_segment(sp) && !is_extern_segment(sp)) { - mchunkptr p = align_as_chunk(base); - size_t psize = chunksize(p); + size_t psize = chunksize(p); /* Can unmap if first chunk holds entire segment and not pinned */ - if (!is_inuse(p) && (char *)p + psize >= base + size - TOP_FOOT_SIZE) { - + if (!is_inuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) { tchunkptr tp = (tchunkptr)p; - assert(segment_holds(sp, (char *)sp)); + assert(segment_holds(sp, (char*)sp)); if (p == m->dv) { - m->dv = 0; m->dvsize = 0; - - } else { - + } + else { unlink_large_chunk(m, tp); - } - if (CALL_MUNMAP(base, size) == 0) { - released += size; m->footprint -= size; /* unlink obsoleted record */ sp = pred; sp->next = next; - - } else { /* back out if cannot unmap */ - + } + else { /* back out if cannot unmap */ insert_large_chunk(m, tp, psize); - } - } - } - - if (NO_SEGMENT_TRAVERSAL) /* scan only first segment */ + if (NO_SEGMENT_TRAVERSAL) /* scan only first segment */ break; pred = sp; sp = next; - } - /* Reset check counter */ - m->release_checks = (((size_t)nsegs > (size_t)MAX_RELEASE_CHECK_RATE) - ? (size_t)nsegs - : (size_t)MAX_RELEASE_CHECK_RATE); + m->release_checks = (((size_t) nsegs > (size_t) MAX_RELEASE_CHECK_RATE)? + (size_t) nsegs : (size_t) MAX_RELEASE_CHECK_RATE); return released; - } static int sys_trim(mstate m, size_t pad) { - size_t released = 0; ensure_initialization(); if (pad < MAX_REQUEST && is_initialized(m)) { - - pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */ + pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */ if (m->topsize > pad) { - /* Shrink top space in granularity-size units, keeping at least one */ size_t unit = mparams.granularity; - size_t extra = - ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit - SIZE_T_ONE) * unit; - msegmentptr sp = segment_holding(m, (char *)m->top); + size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit - + SIZE_T_ONE) * unit; + msegmentptr sp = segment_holding(m, (char*)m->top); if (!is_extern_segment(sp)) { - if (is_mmapped_segment(sp)) { - - if (HAVE_MMAP && sp->size >= extra && - !has_segment_link(m, sp)) { /* can't shrink if pinned */ + if (HAVE_MMAP && + sp->size >= extra && + !has_segment_link(m, sp)) { /* can't shrink if pinned */ size_t newsize = sp->size - extra; - (void)newsize; /* placate people compiling -Wunused-variable */ + (void)newsize; /* placate people compiling -Wunused-variable */ /* Prefer mremap, fall back to munmap */ if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) || (CALL_MUNMAP(sp->base + newsize, extra) == 0)) { - released = extra; - } - } - - } else if (HAVE_MORECORE) { - - if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */ + } + else if (HAVE_MORECORE) { + if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */ extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit; ACQUIRE_MALLOC_GLOBAL_LOCK(); { - /* Make sure end of memory is where we last set it. */ - char *old_br = (char *)(CALL_MORECORE(0)); + char* old_br = (char*)(CALL_MORECORE(0)); if (old_br == sp->base + sp->size) { - - char *rel_br = (char *)(CALL_MORECORE(-extra)); - char *new_br = (char *)(CALL_MORECORE(0)); + char* rel_br = (char*)(CALL_MORECORE(-extra)); + char* new_br = (char*)(CALL_MORECORE(0)); if (rel_br != CMFAIL && new_br < old_br) released = old_br - new_br; - } - } - RELEASE_MALLOC_GLOBAL_LOCK(); - } - } if (released != 0) { - sp->size -= released; m->footprint -= released; init_top(m, m->top, m->topsize - released); check_top_chunk(m, m->top); - } - } /* Unmap any unused mmapped segments */ - if (HAVE_MMAP) released += release_unused_segments(m); + if (HAVE_MMAP) + released += release_unused_segments(m); /* On failure, disable autotrim to avoid repeated failed future calls */ - if (released == 0 && m->topsize > m->trim_check) m->trim_check = MAX_SIZE_T; - + if (released == 0 && m->topsize > m->trim_check) + m->trim_check = MAX_SIZE_T; } - return (released != 0) ? 1 : 0; - + return (released != 0)? 1 : 0; } /* Consolidate and bin a chunk. Differs from exported versions of free mainly in that the chunk need not be marked as inuse. */ static void dispose_chunk(mstate m, mchunkptr p, size_t psize) { - mchunkptr next = chunk_plus_offset(p, psize); if (!pinuse(p)) { - mchunkptr prev; - size_t prevsize = p->prev_foot; + size_t prevsize = p->prev_foot; if (is_mmapped(p)) { - psize += prevsize + MMAP_FOOT_PAD; - if (CALL_MUNMAP((char *)p - prevsize, psize) == 0) m->footprint -= psize; + if (CALL_MUNMAP((char*)p - prevsize, psize) == 0) + m->footprint -= psize; return; - } - prev = chunk_minus_offset(p, prevsize); psize += prevsize; p = prev; - if (RTCHECK(ok_address(m, prev))) { /* consolidate backward */ + if (RTCHECK(ok_address(m, prev))) { /* consolidate backward */ if (p != m->dv) { - unlink_chunk(m, p, prevsize); - - } else if ((next->head & INUSE_BITS) == INUSE_BITS) { - + } + else if ((next->head & INUSE_BITS) == INUSE_BITS) { m->dvsize = psize; set_free_with_pinuse(p, psize, next); return; - } - - } else { - + } + else { CORRUPTION_ERROR_ACTION(m); return; - } - } - if (RTCHECK(ok_address(m, next))) { - - if (!cinuse(next)) { /* consolidate forward */ + if (!cinuse(next)) { /* consolidate forward */ if (next == m->top) { - size_t tsize = m->topsize += psize; m->top = p; p->head = tsize | PINUSE_BIT; if (p == m->dv) { - m->dv = 0; m->dvsize = 0; - } - return; - - } else if (next == m->dv) { - + } + else if (next == m->dv) { size_t dsize = m->dvsize += psize; m->dv = p; set_size_and_pinuse_of_free_chunk(p, dsize); return; - - } else { - + } + else { size_t nsize = chunksize(next); psize += nsize; unlink_chunk(m, next, nsize); set_size_and_pinuse_of_free_chunk(p, psize); if (p == m->dv) { - m->dvsize = psize; return; - } - } - - } else { - + } + else { set_free_with_pinuse(p, psize, next); - } - insert_chunk(m, p, psize); - - } else { - + } + else { CORRUPTION_ERROR_ACTION(m); - } - } /* ---------------------------- malloc --------------------------- */ /* allocate a large request from the best fitting chunk in a treebin */ -static void *tmalloc_large(mstate m, size_t nb) { - +static void* tmalloc_large(mstate m, size_t nb) { tchunkptr v = 0; - size_t rsize = -nb; /* Unsigned negation */ + size_t rsize = -nb; /* Unsigned negation */ tchunkptr t; - bindex_t idx; + bindex_t idx; compute_tree_index(nb, idx); if ((t = *treebin_at(m, idx)) != 0) { - /* Traverse tree for this bin looking for node with size == nb */ - size_t sizebits = nb << leftshift_for_tree_index(idx); - tchunkptr rst = 0; /* The deepest untaken right subtree */ + size_t sizebits = nb << leftshift_for_tree_index(idx); + tchunkptr rst = 0; /* The deepest untaken right subtree */ for (;;) { - tchunkptr rt; - size_t trem = chunksize(t) - nb; + size_t trem = chunksize(t) - nb; if (trem < rsize) { - v = t; - if ((rsize = trem) == 0) break; - + if ((rsize = trem) == 0) + break; } - rt = t->child[1]; - t = t->child[(sizebits >> (SIZE_T_BITSIZE - SIZE_T_ONE)) & 1]; - if (rt != 0 && rt != t) rst = rt; + t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]; + if (rt != 0 && rt != t) + rst = rt; if (t == 0) { - - t = rst; /* set t to least subtree holding sizes > nb */ + t = rst; /* set t to least subtree holding sizes > nb */ break; - } - sizebits <<= 1; - } - } - - if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */ + if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */ binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap; if (leftbits != 0) { - bindex_t i; binmap_t leastbit = least_bit(leftbits); compute_bit2idx(leastbit, i); t = *treebin_at(m, i); - } - } - while (t != 0) { /* find smallest of tree or subtree */ + while (t != 0) { /* find smallest of tree or subtree */ size_t trem = chunksize(t) - nb; if (trem < rsize) { - rsize = trem; v = t; - } - t = leftmost_child(t); - } /* If dv is a better fit, return 0 so malloc will use it */ if (v != 0 && rsize < (size_t)(m->dvsize - nb)) { - - if (RTCHECK(ok_address(m, v))) { /* split */ + if (RTCHECK(ok_address(m, v))) { /* split */ mchunkptr r = chunk_plus_offset(v, nb); assert(chunksize(v) == rsize + nb); if (RTCHECK(ok_next(v, r))) { - unlink_large_chunk(m, v); if (rsize < MIN_CHUNK_SIZE) set_inuse_and_pinuse(m, v, (rsize + nb)); else { - set_size_and_pinuse_of_inuse_chunk(m, v, nb); set_size_and_pinuse_of_free_chunk(r, rsize); insert_chunk(m, r, rsize); - } - return chunk2mem(v); - } - } - CORRUPTION_ERROR_ACTION(m); - } - return 0; - } /* allocate a small request from the best fitting chunk in a treebin */ -static void *tmalloc_small(mstate m, size_t nb) { - +static void* tmalloc_small(mstate m, size_t nb) { tchunkptr t, v; - size_t rsize; - bindex_t i; - binmap_t leastbit = least_bit(m->treemap); + size_t rsize; + bindex_t i; + binmap_t leastbit = least_bit(m->treemap); compute_bit2idx(leastbit, i); v = t = *treebin_at(m, i); rsize = chunksize(t) - nb; while ((t = leftmost_child(t)) != 0) { - size_t trem = chunksize(t) - nb; if (trem < rsize) { - rsize = trem; v = t; - } - } if (RTCHECK(ok_address(m, v))) { - mchunkptr r = chunk_plus_offset(v, nb); assert(chunksize(v) == rsize + nb); if (RTCHECK(ok_next(v, r))) { - unlink_large_chunk(m, v); if (rsize < MIN_CHUNK_SIZE) set_inuse_and_pinuse(m, v, (rsize + nb)); else { - set_size_and_pinuse_of_inuse_chunk(m, v, nb); set_size_and_pinuse_of_free_chunk(r, rsize); replace_dv(m, r, rsize); - } - return chunk2mem(v); - } - } CORRUPTION_ERROR_ACTION(m); return 0; - } #if !ONLY_MSPACES -void *dlmalloc(size_t bytes) { - +void* dlmalloc(size_t bytes) { /* Basic algorithm: If a small request (< 256 bytes minus per-chunk overhead): @@ -5136,25 +4616,23 @@ void *dlmalloc(size_t bytes) { The ugly goto's here ensure that postaction occurs along all paths. */ - #if USE_LOCKS - ensure_initialization(); /* initialize in sys_alloc if not using locks */ - #endif +#if USE_LOCKS + ensure_initialization(); /* initialize in sys_alloc if not using locks */ +#endif if (!PREACTION(gm)) { - - void * mem; + void* mem; size_t nb; if (bytes <= MAX_SMALL_REQUEST) { - bindex_t idx; binmap_t smallbits; - nb = (bytes < MIN_REQUEST) ? MIN_CHUNK_SIZE : pad_request(bytes); + nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes); idx = small_index(nb); smallbits = gm->smallmap >> idx; - if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ + if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ mchunkptr b, p; - idx += ~smallbits & 1; /* Uses next bin if idx empty */ + idx += ~smallbits & 1; /* Uses next bin if idx empty */ b = smallbin_at(gm, idx); p = b->fd; assert(chunksize(p) == small_index2size(idx)); @@ -5163,17 +4641,15 @@ void *dlmalloc(size_t bytes) { mem = chunk2mem(p); check_malloced_chunk(gm, mem, nb); goto postaction; - } else if (nb > gm->dvsize) { - - if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ + if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ mchunkptr b, p, r; - size_t rsize; - bindex_t i; - binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx)); - binmap_t leastbit = least_bit(leftbits); + size_t rsize; + bindex_t i; + binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx)); + binmap_t leastbit = least_bit(leftbits); compute_bit2idx(leastbit, i); b = smallbin_at(gm, i); p = b->fd; @@ -5184,71 +4660,54 @@ void *dlmalloc(size_t bytes) { if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) set_inuse_and_pinuse(gm, p, small_index2size(i)); else { - set_size_and_pinuse_of_inuse_chunk(gm, p, nb); r = chunk_plus_offset(p, nb); set_size_and_pinuse_of_free_chunk(r, rsize); replace_dv(gm, r, rsize); - } - mem = chunk2mem(p); check_malloced_chunk(gm, mem, nb); goto postaction; - } else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) { - check_malloced_chunk(gm, mem, nb); goto postaction; - } - } - - } else if (bytes >= MAX_REQUEST) - + } + else if (bytes >= MAX_REQUEST) nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */ else { - nb = pad_request(bytes); if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) { - check_malloced_chunk(gm, mem, nb); goto postaction; - } - } if (nb <= gm->dvsize) { - - size_t rsize = gm->dvsize - nb; + size_t rsize = gm->dvsize - nb; mchunkptr p = gm->dv; - if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ + if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ mchunkptr r = gm->dv = chunk_plus_offset(p, nb); gm->dvsize = rsize; set_size_and_pinuse_of_free_chunk(r, rsize); set_size_and_pinuse_of_inuse_chunk(gm, p, nb); - - } else { /* exhaust dv */ - + } + else { /* exhaust dv */ size_t dvs = gm->dvsize; gm->dvsize = 0; gm->dv = 0; set_inuse_and_pinuse(gm, p, dvs); - } - mem = chunk2mem(p); check_malloced_chunk(gm, mem, nb); goto postaction; - } - else if (nb < gm->topsize) { /* Split top */ - size_t rsize = gm->topsize -= nb; + else if (nb < gm->topsize) { /* Split top */ + size_t rsize = gm->topsize -= nb; mchunkptr p = gm->top; mchunkptr r = gm->top = chunk_plus_offset(p, nb); r->head = rsize | PINUSE_BIT; @@ -5257,7 +4716,6 @@ void *dlmalloc(size_t bytes) { check_top_chunk(gm, gm->top); check_malloced_chunk(gm, mem, nb); goto postaction; - } mem = sys_alloc(gm, nb); @@ -5265,17 +4723,14 @@ void *dlmalloc(size_t bytes) { postaction: POSTACTION(gm); return mem; - } return 0; - } /* ---------------------------- free --------------------------- */ -void dlfree(void *mem) { - +void dlfree(void* mem) { /* Consolidate freed chunks with preceeding or succeeding bordering free chunks, if they exist, and then place in a bin. Intermixed @@ -5283,216 +4738,164 @@ void dlfree(void *mem) { */ if (mem != 0) { - - mchunkptr p = mem2chunk(mem); - #if FOOTERS + mchunkptr p = mem2chunk(mem); +#if FOOTERS mstate fm = get_mstate_for(p); if (!ok_magic(fm)) { - USAGE_ERROR_ACTION(fm, p); return; - } - - #else /* FOOTERS */ - #define fm gm - #endif /* FOOTERS */ +#else /* FOOTERS */ +#define fm gm +#endif /* FOOTERS */ if (!PREACTION(fm)) { - check_inuse_chunk(fm, p); if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) { - - size_t psize = chunksize(p); + size_t psize = chunksize(p); mchunkptr next = chunk_plus_offset(p, psize); if (!pinuse(p)) { - size_t prevsize = p->prev_foot; if (is_mmapped(p)) { - psize += prevsize + MMAP_FOOT_PAD; - if (CALL_MUNMAP((char *)p - prevsize, psize) == 0) + if (CALL_MUNMAP((char*)p - prevsize, psize) == 0) fm->footprint -= psize; goto postaction; - - } else { - + } + else { mchunkptr prev = chunk_minus_offset(p, prevsize); psize += prevsize; p = prev; - if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */ + if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */ if (p != fm->dv) { - unlink_chunk(fm, p, prevsize); - - } else if ((next->head & INUSE_BITS) == INUSE_BITS) { - + } + else if ((next->head & INUSE_BITS) == INUSE_BITS) { fm->dvsize = psize; set_free_with_pinuse(p, psize, next); goto postaction; - } - - } else - + } + else goto erroraction; - } - } if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) { - - if (!cinuse(next)) { /* consolidate forward */ + if (!cinuse(next)) { /* consolidate forward */ if (next == fm->top) { - size_t tsize = fm->topsize += psize; fm->top = p; p->head = tsize | PINUSE_BIT; if (p == fm->dv) { - fm->dv = 0; fm->dvsize = 0; - } - - if (should_trim(fm, tsize)) sys_trim(fm, 0); + if (should_trim(fm, tsize)) + sys_trim(fm, 0); goto postaction; - - } else if (next == fm->dv) { - + } + else if (next == fm->dv) { size_t dsize = fm->dvsize += psize; fm->dv = p; set_size_and_pinuse_of_free_chunk(p, dsize); goto postaction; - - } else { - + } + else { size_t nsize = chunksize(next); psize += nsize; unlink_chunk(fm, next, nsize); set_size_and_pinuse_of_free_chunk(p, psize); if (p == fm->dv) { - fm->dvsize = psize; goto postaction; - } - } - - } else - + } + else set_free_with_pinuse(p, psize, next); if (is_small(psize)) { - insert_small_chunk(fm, p, psize); check_free_chunk(fm, p); - - } else { - + } + else { tchunkptr tp = (tchunkptr)p; insert_large_chunk(fm, tp, psize); check_free_chunk(fm, p); - if (--fm->release_checks == 0) release_unused_segments(fm); - + if (--fm->release_checks == 0) + release_unused_segments(fm); } - goto postaction; - } - } - erroraction: USAGE_ERROR_ACTION(fm, p); postaction: POSTACTION(fm); - } - } - - #if !FOOTERS - #undef fm - #endif /* FOOTERS */ - +#if !FOOTERS +#undef fm +#endif /* FOOTERS */ } -void *dlcalloc(size_t n_elements, size_t elem_size) { - - void * mem; +void* dlcalloc(size_t n_elements, size_t elem_size) { + void* mem; size_t req = 0; if (n_elements != 0) { - req = n_elements * elem_size; if (((n_elements | elem_size) & ~(size_t)0xffff) && (req / n_elements != elem_size)) - req = MAX_SIZE_T; /* force downstream failure on overflow */ - + req = MAX_SIZE_T; /* force downstream failure on overflow */ } - mem = dlmalloc(req); if (mem != 0 && calloc_must_clear(mem2chunk(mem))) __builtin_memset(mem, 0, req); return mem; - } -#endif /* !ONLY_MSPACES */ +#endif /* !ONLY_MSPACES */ /* ------------ Internal support for realloc, memalign, etc -------------- */ /* Try to realloc; only in-place unless can_move true */ static mchunkptr try_realloc_chunk(mstate m, mchunkptr p, size_t nb, int can_move) { - mchunkptr newp = 0; - size_t oldsize = chunksize(p); + size_t oldsize = chunksize(p); mchunkptr next = chunk_plus_offset(p, oldsize); - if (RTCHECK(ok_address(m, p) && ok_inuse(p) && ok_next(p, next) && - ok_pinuse(next))) { - + if (RTCHECK(ok_address(m, p) && ok_inuse(p) && + ok_next(p, next) && ok_pinuse(next))) { if (is_mmapped(p)) { - newp = mmap_resize(m, p, nb, can_move); - - } else if (oldsize >= nb) { /* already big enough */ - + } + else if (oldsize >= nb) { /* already big enough */ size_t rsize = oldsize - nb; - if (rsize >= MIN_CHUNK_SIZE) { /* split off remainder */ + if (rsize >= MIN_CHUNK_SIZE) { /* split off remainder */ mchunkptr r = chunk_plus_offset(p, nb); set_inuse(m, p, nb); set_inuse(m, r, rsize); dispose_chunk(m, r, rsize); - } - newp = p; - - } else if (next == m->top) { /* extend into top */ - + } + else if (next == m->top) { /* extend into top */ if (oldsize + m->topsize > nb) { - - size_t newsize = oldsize + m->topsize; - size_t newtopsize = newsize - nb; + size_t newsize = oldsize + m->topsize; + size_t newtopsize = newsize - nb; mchunkptr newtop = chunk_plus_offset(p, nb); set_inuse(m, p, nb); - newtop->head = newtopsize | PINUSE_BIT; + newtop->head = newtopsize |PINUSE_BIT; m->top = newtop; m->topsize = newtopsize; newp = p; - } - - } else if (next == m->dv) { /* extend into dv */ - + } + else if (next == m->dv) { /* extend into dv */ size_t dvs = m->dvsize; if (oldsize + dvs >= nb) { - size_t dsize = oldsize + dvs - nb; if (dsize >= MIN_CHUNK_SIZE) { - mchunkptr r = chunk_plus_offset(p, nb); mchunkptr n = chunk_plus_offset(r, dsize); set_inuse(m, p, nb); @@ -5500,87 +4903,64 @@ static mchunkptr try_realloc_chunk(mstate m, mchunkptr p, size_t nb, clear_pinuse(n); m->dvsize = dsize; m->dv = r; - - } else { /* exhaust dv */ - + } + else { /* exhaust dv */ size_t newsize = oldsize + dvs; set_inuse(m, p, newsize); m->dvsize = 0; m->dv = 0; - } - newp = p; - } - - } else if (!cinuse(next)) { /* extend into next free chunk */ - + } + else if (!cinuse(next)) { /* extend into next free chunk */ size_t nextsize = chunksize(next); if (oldsize + nextsize >= nb) { - size_t rsize = oldsize + nextsize - nb; unlink_chunk(m, next, nextsize); if (rsize < MIN_CHUNK_SIZE) { - size_t newsize = oldsize + nextsize; set_inuse(m, p, newsize); - - } else { - + } + else { mchunkptr r = chunk_plus_offset(p, nb); set_inuse(m, p, nb); set_inuse(m, r, rsize); dispose_chunk(m, r, rsize); - } - newp = p; - } - } - - } else { - + } + else { USAGE_ERROR_ACTION(m, chunk2mem(p)); - } - return newp; - } -static void *internal_memalign(mstate m, size_t alignment, size_t bytes) { - - void *mem = 0; - if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */ +static void* internal_memalign(mstate m, size_t alignment, size_t bytes) { + void* mem = 0; + if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */ alignment = MIN_CHUNK_SIZE; - if ((alignment & (alignment - SIZE_T_ONE)) != 0) { /* Ensure a power of 2 */ + if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of 2 */ size_t a = MALLOC_ALIGNMENT << 1; - while (a < alignment) - a <<= 1; + while (a < alignment) a <<= 1; alignment = a; - } - if (bytes >= MAX_REQUEST - alignment) { - - if (m != 0) { /* Test isn't needed but avoids compiler warning */ + if (m != 0) { /* Test isn't needed but avoids compiler warning */ MALLOC_FAILURE_ACTION; - } - - } else { - + } + else { size_t nb = request2size(bytes); size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD; mem = internal_malloc(m, req); if (mem != 0) { - mchunkptr p = mem2chunk(mem); - if (PREACTION(m)) return 0; - if ((((size_t)(mem)) & (alignment - 1)) != 0) { /* misaligned */ + if (PREACTION(m)) + return 0; + if ((((size_t)(mem)) & (alignment - 1)) != 0) { /* misaligned */ /* Find an aligned spot inside chunk. Since we need to give back leading space in a chunk of at least MIN_CHUNK_SIZE, if @@ -5589,59 +4969,47 @@ static void *internal_memalign(mstate m, size_t alignment, size_t bytes) { We've allocated enough total room so that this is always possible. */ - char * br = (char *)mem2chunk((size_t)( - ((size_t)((char *)mem + alignment - SIZE_T_ONE)) & -alignment)); - char * pos = ((size_t)(br - (char *)(p)) >= MIN_CHUNK_SIZE) - ? br - : br + alignment; + char* br = (char*)mem2chunk((size_t)(((size_t)((char*)mem + alignment - + SIZE_T_ONE)) & + -alignment)); + char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)? + br : br+alignment; mchunkptr newp = (mchunkptr)pos; - size_t leadsize = pos - (char *)(p); - size_t newsize = chunksize(p) - leadsize; + size_t leadsize = pos - (char*)(p); + size_t newsize = chunksize(p) - leadsize; - if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */ + if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */ newp->prev_foot = p->prev_foot + leadsize; newp->head = newsize; - - } else { /* Otherwise, give back leader, use the rest */ - + } + else { /* Otherwise, give back leader, use the rest */ set_inuse(m, newp, newsize); set_inuse(m, p, leadsize); dispose_chunk(m, p, leadsize); - } - p = newp; - } /* Give back spare room at the end */ if (!is_mmapped(p)) { - size_t size = chunksize(p); if (size > nb + MIN_CHUNK_SIZE) { - - size_t remainder_size = size - nb; + size_t remainder_size = size - nb; mchunkptr remainder = chunk_plus_offset(p, nb); set_inuse(m, p, nb); set_inuse(m, remainder, remainder_size); dispose_chunk(m, remainder, remainder_size); - } - } mem = chunk2mem(p); - assert(chunksize(p) >= nb); + assert (chunksize(p) >= nb); assert(((size_t)mem & (alignment - 1)) == 0); check_inuse_chunk(m, p); POSTACTION(m); - } - } - return mem; - } /* @@ -5651,50 +5019,50 @@ static void *internal_memalign(mstate m, size_t alignment, size_t bytes) { bit 0 set if all elements are same size (using sizes[0]) bit 1 set if elements should be zeroed */ -static void **ialloc(mstate m, size_t n_elements, size_t *sizes, int opts, - void *chunks[]) { - - size_t element_size; /* chunksize of each element, if all same */ - size_t contents_size; /* total size of elements */ - size_t array_size; /* request size of pointer array */ - void * mem; /* malloced aggregate space */ - mchunkptr p; /* corresponding chunk */ - size_t remainder_size; /* remaining bytes while splitting */ - void ** marray; /* either "chunks" or malloced ptr array */ - mchunkptr array_chunk; /* chunk for malloced ptr array */ - flag_t was_enabled; /* to disable mmap */ +static void** ialloc(mstate m, + size_t n_elements, + size_t* sizes, + int opts, + void* chunks[]) { + + size_t element_size; /* chunksize of each element, if all same */ + size_t contents_size; /* total size of elements */ + size_t array_size; /* request size of pointer array */ + void* mem; /* malloced aggregate space */ + mchunkptr p; /* corresponding chunk */ + size_t remainder_size; /* remaining bytes while splitting */ + void** marray; /* either "chunks" or malloced ptr array */ + mchunkptr array_chunk; /* chunk for malloced ptr array */ + flag_t was_enabled; /* to disable mmap */ size_t size; size_t i; ensure_initialization(); /* compute array length, if needed */ if (chunks != 0) { - - if (n_elements == 0) return chunks; /* nothing to do */ + if (n_elements == 0) + return chunks; /* nothing to do */ marray = chunks; array_size = 0; - - } else { - + } + else { /* if empty req, must still return chunk representing empty array */ - if (n_elements == 0) return (void **)internal_malloc(m, 0); + if (n_elements == 0) + return (void**)internal_malloc(m, 0); marray = 0; - array_size = request2size(n_elements * (sizeof(void *))); - + array_size = request2size(n_elements * (sizeof(void*))); } /* compute total element size */ - if (opts & 0x1) { /* all-same-size */ + if (opts & 0x1) { /* all-same-size */ element_size = request2size(*sizes); contents_size = n_elements * element_size; - - } else { /* add up all the sizes */ - + } + else { /* add up all the sizes */ element_size = 0; contents_size = 0; for (i = 0; i != n_elements; ++i) contents_size += request2size(sizes[i]); - } size = contents_size + array_size; @@ -5707,8 +5075,10 @@ static void **ialloc(mstate m, size_t n_elements, size_t *sizes, int opts, was_enabled = use_mmap(m); disable_mmap(m); mem = internal_malloc(m, size - CHUNK_OVERHEAD); - if (was_enabled) enable_mmap(m); - if (mem == 0) return 0; + if (was_enabled) + enable_mmap(m); + if (mem == 0) + return 0; if (PREACTION(m)) return 0; p = mem2chunk(mem); @@ -5716,30 +5086,24 @@ static void **ialloc(mstate m, size_t n_elements, size_t *sizes, int opts, assert(!is_mmapped(p)); - if (opts & 0x2) { /* optionally clear the elements */ - __builtin_memset((size_t *)mem, 0, - remainder_size - SIZE_T_SIZE - array_size); - + if (opts & 0x2) { /* optionally clear the elements */ + __builtin_memset((size_t*)mem, 0, remainder_size - SIZE_T_SIZE - array_size); } /* If not provided, allocate the pointer array as final part of chunk */ if (marray == 0) { - - size_t array_chunk_size; + size_t array_chunk_size; array_chunk = chunk_plus_offset(p, contents_size); array_chunk_size = remainder_size - contents_size; - marray = (void **)(chunk2mem(array_chunk)); + marray = (void**) (chunk2mem(array_chunk)); set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size); remainder_size = contents_size; - } /* split out elements */ - for (i = 0;; ++i) { - + for (i = 0; ; ++i) { marray[i] = chunk2mem(p); - if (i != n_elements - 1) { - + if (i != n_elements-1) { if (element_size != 0) size = element_size; else @@ -5747,42 +5111,31 @@ static void **ialloc(mstate m, size_t n_elements, size_t *sizes, int opts, remainder_size -= size; set_size_and_pinuse_of_inuse_chunk(m, p, size); p = chunk_plus_offset(p, size); - - } else { /* the final element absorbs any overallocation slop */ - + } + else { /* the final element absorbs any overallocation slop */ set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size); break; - } - } #if DEBUG if (marray != chunks) { - /* final element must have exactly exhausted chunk */ if (element_size != 0) { - assert(remainder_size == element_size); - - } else { - + } + else { assert(remainder_size == request2size(sizes[i])); - } - check_inuse_chunk(m, mem2chunk(marray)); - } - for (i = 0; i != n_elements; ++i) check_inuse_chunk(m, mem2chunk(marray[i])); -#endif /* DEBUG */ +#endif /* DEBUG */ POSTACTION(m); return marray; - } /* Try to free all pointers in the given array. @@ -5792,431 +5145,316 @@ static void **ialloc(mstate m, size_t n_elements, size_t *sizes, int opts, chunks before freeing, which will occur often if allocated with ialloc or the array is sorted. */ -static size_t internal_bulk_free(mstate m, void *array[], size_t nelem) { - +static size_t internal_bulk_free(mstate m, void* array[], size_t nelem) { size_t unfreed = 0; if (!PREACTION(m)) { - - void **a; - void **fence = &(array[nelem]); + void** a; + void** fence = &(array[nelem]); for (a = array; a != fence; ++a) { - - void *mem = *a; + void* mem = *a; if (mem != 0) { - mchunkptr p = mem2chunk(mem); - size_t psize = chunksize(p); + size_t psize = chunksize(p); #if FOOTERS if (get_mstate_for(p) != m) { - ++unfreed; continue; - } - #endif check_inuse_chunk(m, p); *a = 0; if (RTCHECK(ok_address(m, p) && ok_inuse(p))) { - - void ** b = a + 1; /* try to merge with next chunk */ + void ** b = a + 1; /* try to merge with next chunk */ mchunkptr next = next_chunk(p); if (b != fence && *b == chunk2mem(next)) { - size_t newsize = chunksize(next) + psize; set_inuse(m, p, newsize); *b = chunk2mem(p); - - } else - + } + else dispose_chunk(m, p, psize); - - } else { - + } + else { CORRUPTION_ERROR_ACTION(m); break; - } - } - } - - if (should_trim(m, m->topsize)) sys_trim(m, 0); + if (should_trim(m, m->topsize)) + sys_trim(m, 0); POSTACTION(m); - } - return unfreed; - } /* Traversal */ #if MALLOC_INSPECT_ALL static void internal_inspect_all(mstate m, - void (*handler)(void *start, void *end, - size_t used_bytes, - void * callback_arg), - void *arg) { - + void(*handler)(void *start, + void *end, + size_t used_bytes, + void* callback_arg), + void* arg) { if (is_initialized(m)) { - - mchunkptr top = m->top; + mchunkptr top = m->top; msegmentptr s; for (s = &m->seg; s != 0; s = s->next) { - mchunkptr q = align_as_chunk(s->base); while (segment_holds(s, q) && q->head != FENCEPOST_HEAD) { - mchunkptr next = next_chunk(q); - size_t sz = chunksize(q); - size_t used; - void * start; + size_t sz = chunksize(q); + size_t used; + void* start; if (is_inuse(q)) { - - used = sz - CHUNK_OVERHEAD; /* must not be mmapped */ + used = sz - CHUNK_OVERHEAD; /* must not be mmapped */ start = chunk2mem(q); - - } else { - + } + else { used = 0; - if (is_small(sz)) { /* offset by possible bookkeeping */ - start = (void *)((char *)q + sizeof(struct malloc_chunk)); - - } else { - - start = (void *)((char *)q + sizeof(struct malloc_tree_chunk)); - + if (is_small(sz)) { /* offset by possible bookkeeping */ + start = (void*)((char*)q + sizeof(struct malloc_chunk)); + } + else { + start = (void*)((char*)q + sizeof(struct malloc_tree_chunk)); } - } - - if (start < (void *)next) /* skip if all space is bookkeeping */ + if (start < (void*)next) /* skip if all space is bookkeeping */ handler(start, next, used, arg); - if (q == top) break; + if (q == top) + break; q = next; - } - } - } - } - -#endif /* MALLOC_INSPECT_ALL */ +#endif /* MALLOC_INSPECT_ALL */ /* ------------------ Exported realloc, memalign, etc -------------------- */ #if !ONLY_MSPACES -void *dlrealloc(void *oldmem, size_t bytes) { - - void *mem = 0; +void* dlrealloc(void* oldmem, size_t bytes) { + void* mem = 0; if (oldmem == 0) { - mem = dlmalloc(bytes); - - } else if (bytes >= MAX_REQUEST) { - + } + else if (bytes >= MAX_REQUEST) { MALLOC_FAILURE_ACTION; - } - - #ifdef REALLOC_ZERO_BYTES_FREES +#ifdef REALLOC_ZERO_BYTES_FREES else if (bytes == 0) { - dlfree(oldmem); - } - - #endif /* REALLOC_ZERO_BYTES_FREES */ +#endif /* REALLOC_ZERO_BYTES_FREES */ else { - - size_t nb = request2size(bytes); + size_t nb = request2size(bytes); mchunkptr oldp = mem2chunk(oldmem); - #if !FOOTERS +#if ! FOOTERS mstate m = gm; - #else /* FOOTERS */ +#else /* FOOTERS */ mstate m = get_mstate_for(oldp); if (!ok_magic(m)) { - USAGE_ERROR_ACTION(m, oldmem); return 0; - } - - #endif /* FOOTERS */ +#endif /* FOOTERS */ if (!PREACTION(m)) { - mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1); POSTACTION(m); if (newp != 0) { - check_inuse_chunk(m, newp); mem = chunk2mem(newp); - - } else { - + } + else { mem = internal_malloc(m, bytes); if (mem != 0) { - size_t oc = chunksize(oldp) - overhead_for(oldp); - __builtin_memcpy(mem, oldmem, (oc < bytes) ? oc : bytes); + __builtin_memcpy(mem, oldmem, (oc < bytes)? oc : bytes); internal_free(m, oldmem); - } - } - } - } - return mem; - } -void *dlrealloc_in_place(void *oldmem, size_t bytes) { - - void *mem = 0; +void* dlrealloc_in_place(void* oldmem, size_t bytes) { + void* mem = 0; if (oldmem != 0) { - if (bytes >= MAX_REQUEST) { - MALLOC_FAILURE_ACTION; - - } else { - - size_t nb = request2size(bytes); + } + else { + size_t nb = request2size(bytes); mchunkptr oldp = mem2chunk(oldmem); - #if !FOOTERS +#if ! FOOTERS mstate m = gm; - #else /* FOOTERS */ +#else /* FOOTERS */ mstate m = get_mstate_for(oldp); if (!ok_magic(m)) { - USAGE_ERROR_ACTION(m, oldmem); return 0; - } - - #endif /* FOOTERS */ +#endif /* FOOTERS */ if (!PREACTION(m)) { - mchunkptr newp = try_realloc_chunk(m, oldp, nb, 0); POSTACTION(m); if (newp == oldp) { - check_inuse_chunk(m, newp); mem = oldmem; - } - } - } - } - return mem; - } -void *dlmemalign(size_t alignment, size_t bytes) { - - if (alignment <= MALLOC_ALIGNMENT) { return dlmalloc(bytes); } +void* dlmemalign(size_t alignment, size_t bytes) { + if (alignment <= MALLOC_ALIGNMENT) { + return dlmalloc(bytes); + } return internal_memalign(gm, alignment, bytes); - } -int dlposix_memalign(void **pp, size_t alignment, size_t bytes) { - - void *mem = 0; +int dlposix_memalign(void** pp, size_t alignment, size_t bytes) { + void* mem = 0; if (alignment == MALLOC_ALIGNMENT) mem = dlmalloc(bytes); else { - - size_t d = alignment / sizeof(void *); - size_t r = alignment % sizeof(void *); - if (r != 0 || d == 0 || (d & (d - SIZE_T_ONE)) != 0) + size_t d = alignment / sizeof(void*); + size_t r = alignment % sizeof(void*); + if (r != 0 || d == 0 || (d & (d-SIZE_T_ONE)) != 0) return EINVAL; else if (bytes <= MAX_REQUEST - alignment) { - - if (alignment < MIN_CHUNK_SIZE) alignment = MIN_CHUNK_SIZE; + if (alignment < MIN_CHUNK_SIZE) + alignment = MIN_CHUNK_SIZE; mem = internal_memalign(gm, alignment, bytes); - } - } - if (mem == 0) return ENOMEM; else { - *pp = mem; return 0; - } - } -void *dlvalloc(size_t bytes) { - +void* dlvalloc(size_t bytes) { size_t pagesz; ensure_initialization(); pagesz = mparams.page_size; return dlmemalign(pagesz, bytes); - } -void *dlpvalloc(size_t bytes) { - +void* dlpvalloc(size_t bytes) { size_t pagesz; ensure_initialization(); pagesz = mparams.page_size; - return dlmemalign(pagesz, - (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE)); - + return dlmemalign(pagesz, (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE)); } -void **dlindependent_calloc(size_t n_elements, size_t elem_size, - void *chunks[]) { - - size_t sz = elem_size; /* serves as 1-element array */ +void** dlindependent_calloc(size_t n_elements, size_t elem_size, + void* chunks[]) { + size_t sz = elem_size; /* serves as 1-element array */ return ialloc(gm, n_elements, &sz, 3, chunks); - } -void **dlindependent_comalloc(size_t n_elements, size_t sizes[], - void *chunks[]) { - +void** dlindependent_comalloc(size_t n_elements, size_t sizes[], + void* chunks[]) { return ialloc(gm, n_elements, sizes, 0, chunks); - } -size_t dlbulk_free(void *array[], size_t nelem) { - +size_t dlbulk_free(void* array[], size_t nelem) { return internal_bulk_free(gm, array, nelem); - } - #if MALLOC_INSPECT_ALL -void dlmalloc_inspect_all(void (*handler)(void *start, void *end, - size_t used_bytes, - void * callback_arg), - void *arg) { - +#if MALLOC_INSPECT_ALL +void dlmalloc_inspect_all(void(*handler)(void *start, + void *end, + size_t used_bytes, + void* callback_arg), + void* arg) { ensure_initialization(); if (!PREACTION(gm)) { - internal_inspect_all(gm, handler, arg); POSTACTION(gm); - } - } - - #endif /* MALLOC_INSPECT_ALL */ +#endif /* MALLOC_INSPECT_ALL */ int dlmalloc_trim(size_t pad) { - int result = 0; ensure_initialization(); if (!PREACTION(gm)) { - result = sys_trim(gm, pad); POSTACTION(gm); - } - return result; - } size_t dlmalloc_footprint(void) { - return gm->footprint; - } size_t dlmalloc_max_footprint(void) { - return gm->max_footprint; - } size_t dlmalloc_footprint_limit(void) { - size_t maf = gm->footprint_limit; return maf == 0 ? MAX_SIZE_T : maf; - } size_t dlmalloc_set_footprint_limit(size_t bytes) { - - size_t result; /* invert sense of 0 */ - if (bytes == 0) result = granularity_align(1); /* Use minimal size */ + size_t result; /* invert sense of 0 */ + if (bytes == 0) + result = granularity_align(1); /* Use minimal size */ if (bytes == MAX_SIZE_T) - result = 0; /* disable */ + result = 0; /* disable */ else result = granularity_align(bytes); return gm->footprint_limit = result; - } - #if !NO_MALLINFO +#if !NO_MALLINFO struct mallinfo dlmallinfo(void) { - return internal_mallinfo(gm); - } +#endif /* NO_MALLINFO */ - #endif /* NO_MALLINFO */ - - #if !NO_MALLOC_STATS +#if !NO_MALLOC_STATS void dlmalloc_stats() { - internal_malloc_stats(gm); - } - - #endif /* NO_MALLOC_STATS */ +#endif /* NO_MALLOC_STATS */ int dlmallopt(int param_number, int value) { - return change_mparam(param_number, value); - } -size_t dlmalloc_usable_size(void *mem) { - +size_t dlmalloc_usable_size(void* mem) { if (mem != 0) { - mchunkptr p = mem2chunk(mem); - if (is_inuse(p)) return chunksize(p) - overhead_for(p); - + if (is_inuse(p)) + return chunksize(p) - overhead_for(p); } - return 0; - } -#endif /* !ONLY_MSPACES */ +#endif /* !ONLY_MSPACES */ /* ----------------------------- user mspaces ---------------------------- */ #if MSPACES -static mstate init_user_mstate(char *tbase, size_t tsize) { - - size_t msize = pad_request(sizeof(struct malloc_state)); +static mstate init_user_mstate(char* tbase, size_t tsize) { + size_t msize = pad_request(sizeof(struct malloc_state)); mchunkptr mn; mchunkptr msp = align_as_chunk(tbase); - mstate m = (mstate)(chunk2mem(msp)); + mstate m = (mstate)(chunk2mem(msp)); __builtin_memset(m, 0, msize); (void)INITIAL_LOCK(&m->mutex); - msp->head = (msize | INUSE_BITS); + msp->head = (msize|INUSE_BITS); m->seg.base = m->least_addr = tbase; m->seg.size = m->footprint = m->max_footprint = tsize; m->magic = mparams.magic; @@ -6227,111 +5465,82 @@ static mstate init_user_mstate(char *tbase, size_t tsize) { disable_contiguous(m); init_bins(m); mn = next_chunk(mem2chunk(m)); - init_top(m, mn, (size_t)((tbase + tsize) - (char *)mn) - TOP_FOOT_SIZE); + init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) - TOP_FOOT_SIZE); check_top_chunk(m, m->top); return m; - } mspace create_mspace(size_t capacity, int locked) { - mstate m = 0; size_t msize; ensure_initialization(); msize = pad_request(sizeof(struct malloc_state)); - if (capacity < (size_t) - (msize + TOP_FOOT_SIZE + mparams.page_size)) { - - size_t rs = ((capacity == 0) ? mparams.granularity - : (capacity + TOP_FOOT_SIZE + msize)); + if (capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) { + size_t rs = ((capacity == 0)? mparams.granularity : + (capacity + TOP_FOOT_SIZE + msize)); size_t tsize = granularity_align(rs); - char * tbase = (char *)(CALL_MMAP(tsize)); + char* tbase = (char*)(CALL_MMAP(tsize)); if (tbase != CMFAIL) { - m = init_user_mstate(tbase, tsize); m->seg.sflags = USE_MMAP_BIT; set_lock(m, locked); - } - } - return (mspace)m; - } -mspace create_mspace_with_base(void *base, size_t capacity, int locked) { - +mspace create_mspace_with_base(void* base, size_t capacity, int locked) { mstate m = 0; size_t msize; ensure_initialization(); msize = pad_request(sizeof(struct malloc_state)); if (capacity > msize + TOP_FOOT_SIZE && - capacity < (size_t) - (msize + TOP_FOOT_SIZE + mparams.page_size)) { - - m = init_user_mstate((char *)base, capacity); + capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) { + m = init_user_mstate((char*)base, capacity); m->seg.sflags = EXTERN_BIT; set_lock(m, locked); - } - return (mspace)m; - } int mspace_track_large_chunks(mspace msp, int enable) { - - int ret = 0; + int ret = 0; mstate ms = (mstate)msp; if (!PREACTION(ms)) { - - if (!use_mmap(ms)) { ret = 1; } + if (!use_mmap(ms)) { + ret = 1; + } if (!enable) { - enable_mmap(ms); - } else { - disable_mmap(ms); - } - POSTACTION(ms); - } - return ret; - } size_t destroy_mspace(mspace msp) { - size_t freed = 0; mstate ms = (mstate)msp; if (ok_magic(ms)) { - msegmentptr sp = &ms->seg; - (void)DESTROY_LOCK(&ms->mutex); /* destroy before unmapped */ + (void)DESTROY_LOCK(&ms->mutex); /* destroy before unmapped */ while (sp != 0) { - - char * base = sp->base; + char* base = sp->base; size_t size = sp->size; flag_t flag = sp->sflags; - (void)base; /* placate people compiling -Wunused-variable */ + (void)base; /* placate people compiling -Wunused-variable */ sp = sp->next; if ((flag & USE_MMAP_BIT) && !(flag & EXTERN_BIT) && CALL_MUNMAP(base, size) == 0) freed += size; - } - - } else { - - USAGE_ERROR_ACTION(ms, ms); - } - + else { + USAGE_ERROR_ACTION(ms,ms); + } return freed; - } /* @@ -6339,31 +5548,25 @@ size_t destroy_mspace(mspace msp) { versions. This is not so nice but better than the alternatives. */ -void *mspace_malloc(mspace msp, size_t bytes) { - +void* mspace_malloc(mspace msp, size_t bytes) { mstate ms = (mstate)msp; if (!ok_magic(ms)) { - - USAGE_ERROR_ACTION(ms, ms); + USAGE_ERROR_ACTION(ms,ms); return 0; - } - if (!PREACTION(ms)) { - - void * mem; + void* mem; size_t nb; if (bytes <= MAX_SMALL_REQUEST) { - bindex_t idx; binmap_t smallbits; - nb = (bytes < MIN_REQUEST) ? MIN_CHUNK_SIZE : pad_request(bytes); + nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes); idx = small_index(nb); smallbits = ms->smallmap >> idx; - if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ + if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ mchunkptr b, p; - idx += ~smallbits & 1; /* Uses next bin if idx empty */ + idx += ~smallbits & 1; /* Uses next bin if idx empty */ b = smallbin_at(ms, idx); p = b->fd; assert(chunksize(p) == small_index2size(idx)); @@ -6372,17 +5575,15 @@ void *mspace_malloc(mspace msp, size_t bytes) { mem = chunk2mem(p); check_malloced_chunk(ms, mem, nb); goto postaction; - } else if (nb > ms->dvsize) { - - if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ + if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ mchunkptr b, p, r; - size_t rsize; - bindex_t i; - binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx)); - binmap_t leastbit = least_bit(leftbits); + size_t rsize; + bindex_t i; + binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx)); + binmap_t leastbit = least_bit(leftbits); compute_bit2idx(leastbit, i); b = smallbin_at(ms, i); p = b->fd; @@ -6393,71 +5594,54 @@ void *mspace_malloc(mspace msp, size_t bytes) { if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) set_inuse_and_pinuse(ms, p, small_index2size(i)); else { - set_size_and_pinuse_of_inuse_chunk(ms, p, nb); r = chunk_plus_offset(p, nb); set_size_and_pinuse_of_free_chunk(r, rsize); replace_dv(ms, r, rsize); - } - mem = chunk2mem(p); check_malloced_chunk(ms, mem, nb); goto postaction; - } else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) { - check_malloced_chunk(ms, mem, nb); goto postaction; - } - } - - } else if (bytes >= MAX_REQUEST) - + } + else if (bytes >= MAX_REQUEST) nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */ else { - nb = pad_request(bytes); if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) { - check_malloced_chunk(ms, mem, nb); goto postaction; - } - } if (nb <= ms->dvsize) { - - size_t rsize = ms->dvsize - nb; + size_t rsize = ms->dvsize - nb; mchunkptr p = ms->dv; - if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ + if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ mchunkptr r = ms->dv = chunk_plus_offset(p, nb); ms->dvsize = rsize; set_size_and_pinuse_of_free_chunk(r, rsize); set_size_and_pinuse_of_inuse_chunk(ms, p, nb); - - } else { /* exhaust dv */ - + } + else { /* exhaust dv */ size_t dvs = ms->dvsize; ms->dvsize = 0; ms->dv = 0; set_inuse_and_pinuse(ms, p, dvs); - } - mem = chunk2mem(p); check_malloced_chunk(ms, mem, nb); goto postaction; - } - else if (nb < ms->topsize) { /* Split top */ - size_t rsize = ms->topsize -= nb; + else if (nb < ms->topsize) { /* Split top */ + size_t rsize = ms->topsize -= nb; mchunkptr p = ms->top; mchunkptr r = ms->top = chunk_plus_offset(p, nb); r->head = rsize | PINUSE_BIT; @@ -6466,7 +5650,6 @@ void *mspace_malloc(mspace msp, size_t bytes) { check_top_chunk(ms, ms->top); check_malloced_chunk(ms, mem, nb); goto postaction; - } mem = sys_alloc(ms, nb); @@ -6474,519 +5657,372 @@ void *mspace_malloc(mspace msp, size_t bytes) { postaction: POSTACTION(ms); return mem; - } return 0; - } -void mspace_free(mspace msp, void *mem) { - +void mspace_free(mspace msp, void* mem) { if (mem != 0) { - - mchunkptr p = mem2chunk(mem); - #if FOOTERS + mchunkptr p = mem2chunk(mem); +#if FOOTERS mstate fm = get_mstate_for(p); - (void)msp; /* placate people compiling -Wunused */ - #else /* FOOTERS */ + (void)msp; /* placate people compiling -Wunused */ +#else /* FOOTERS */ mstate fm = (mstate)msp; - #endif /* FOOTERS */ +#endif /* FOOTERS */ if (!ok_magic(fm)) { - USAGE_ERROR_ACTION(fm, p); return; - } - if (!PREACTION(fm)) { - check_inuse_chunk(fm, p); if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) { - - size_t psize = chunksize(p); + size_t psize = chunksize(p); mchunkptr next = chunk_plus_offset(p, psize); if (!pinuse(p)) { - size_t prevsize = p->prev_foot; if (is_mmapped(p)) { - psize += prevsize + MMAP_FOOT_PAD; - if (CALL_MUNMAP((char *)p - prevsize, psize) == 0) + if (CALL_MUNMAP((char*)p - prevsize, psize) == 0) fm->footprint -= psize; goto postaction; - - } else { - + } + else { mchunkptr prev = chunk_minus_offset(p, prevsize); psize += prevsize; p = prev; - if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */ + if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */ if (p != fm->dv) { - unlink_chunk(fm, p, prevsize); - - } else if ((next->head & INUSE_BITS) == INUSE_BITS) { - + } + else if ((next->head & INUSE_BITS) == INUSE_BITS) { fm->dvsize = psize; set_free_with_pinuse(p, psize, next); goto postaction; - } - - } else - + } + else goto erroraction; - } - } if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) { - - if (!cinuse(next)) { /* consolidate forward */ + if (!cinuse(next)) { /* consolidate forward */ if (next == fm->top) { - size_t tsize = fm->topsize += psize; fm->top = p; p->head = tsize | PINUSE_BIT; if (p == fm->dv) { - fm->dv = 0; fm->dvsize = 0; - } - - if (should_trim(fm, tsize)) sys_trim(fm, 0); + if (should_trim(fm, tsize)) + sys_trim(fm, 0); goto postaction; - - } else if (next == fm->dv) { - + } + else if (next == fm->dv) { size_t dsize = fm->dvsize += psize; fm->dv = p; set_size_and_pinuse_of_free_chunk(p, dsize); goto postaction; - - } else { - + } + else { size_t nsize = chunksize(next); psize += nsize; unlink_chunk(fm, next, nsize); set_size_and_pinuse_of_free_chunk(p, psize); if (p == fm->dv) { - fm->dvsize = psize; goto postaction; - } - } - - } else - + } + else set_free_with_pinuse(p, psize, next); if (is_small(psize)) { - insert_small_chunk(fm, p, psize); check_free_chunk(fm, p); - - } else { - + } + else { tchunkptr tp = (tchunkptr)p; insert_large_chunk(fm, tp, psize); check_free_chunk(fm, p); - if (--fm->release_checks == 0) release_unused_segments(fm); - + if (--fm->release_checks == 0) + release_unused_segments(fm); } - goto postaction; - } - } - erroraction: USAGE_ERROR_ACTION(fm, p); postaction: POSTACTION(fm); - } - } - } -void *mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) { - - void * mem; +void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) { + void* mem; size_t req = 0; mstate ms = (mstate)msp; if (!ok_magic(ms)) { - - USAGE_ERROR_ACTION(ms, ms); + USAGE_ERROR_ACTION(ms,ms); return 0; - } - if (n_elements != 0) { - req = n_elements * elem_size; if (((n_elements | elem_size) & ~(size_t)0xffff) && (req / n_elements != elem_size)) - req = MAX_SIZE_T; /* force downstream failure on overflow */ - + req = MAX_SIZE_T; /* force downstream failure on overflow */ } - mem = internal_malloc(ms, req); if (mem != 0 && calloc_must_clear(mem2chunk(mem))) __builtin_memset(mem, 0, req); return mem; - } -void *mspace_realloc(mspace msp, void *oldmem, size_t bytes) { - - void *mem = 0; +void* mspace_realloc(mspace msp, void* oldmem, size_t bytes) { + void* mem = 0; if (oldmem == 0) { - mem = mspace_malloc(msp, bytes); - - } else if (bytes >= MAX_REQUEST) { - + } + else if (bytes >= MAX_REQUEST) { MALLOC_FAILURE_ACTION; - } - - #ifdef REALLOC_ZERO_BYTES_FREES +#ifdef REALLOC_ZERO_BYTES_FREES else if (bytes == 0) { - mspace_free(msp, oldmem); - } - - #endif /* REALLOC_ZERO_BYTES_FREES */ +#endif /* REALLOC_ZERO_BYTES_FREES */ else { - - size_t nb = request2size(bytes); + size_t nb = request2size(bytes); mchunkptr oldp = mem2chunk(oldmem); - #if !FOOTERS +#if ! FOOTERS mstate m = (mstate)msp; - #else /* FOOTERS */ +#else /* FOOTERS */ mstate m = get_mstate_for(oldp); if (!ok_magic(m)) { - USAGE_ERROR_ACTION(m, oldmem); return 0; - } - - #endif /* FOOTERS */ +#endif /* FOOTERS */ if (!PREACTION(m)) { - mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1); POSTACTION(m); if (newp != 0) { - check_inuse_chunk(m, newp); mem = chunk2mem(newp); - - } else { - + } + else { mem = mspace_malloc(m, bytes); if (mem != 0) { - size_t oc = chunksize(oldp) - overhead_for(oldp); - __builtin_memcpy(mem, oldmem, (oc < bytes) ? oc : bytes); + __builtin_memcpy(mem, oldmem, (oc < bytes)? oc : bytes); mspace_free(m, oldmem); - } - } - } - } - return mem; - } -void *mspace_realloc_in_place(mspace msp, void *oldmem, size_t bytes) { - - void *mem = 0; +void* mspace_realloc_in_place(mspace msp, void* oldmem, size_t bytes) { + void* mem = 0; if (oldmem != 0) { - if (bytes >= MAX_REQUEST) { - MALLOC_FAILURE_ACTION; - - } else { - - size_t nb = request2size(bytes); + } + else { + size_t nb = request2size(bytes); mchunkptr oldp = mem2chunk(oldmem); - #if !FOOTERS +#if ! FOOTERS mstate m = (mstate)msp; - #else /* FOOTERS */ +#else /* FOOTERS */ mstate m = get_mstate_for(oldp); - (void)msp; /* placate people compiling -Wunused */ + (void)msp; /* placate people compiling -Wunused */ if (!ok_magic(m)) { - USAGE_ERROR_ACTION(m, oldmem); return 0; - } - - #endif /* FOOTERS */ +#endif /* FOOTERS */ if (!PREACTION(m)) { - mchunkptr newp = try_realloc_chunk(m, oldp, nb, 0); POSTACTION(m); if (newp == oldp) { - check_inuse_chunk(m, newp); mem = oldmem; - } - } - } - } - return mem; - } -void *mspace_memalign(mspace msp, size_t alignment, size_t bytes) { - +void* mspace_memalign(mspace msp, size_t alignment, size_t bytes) { mstate ms = (mstate)msp; if (!ok_magic(ms)) { - - USAGE_ERROR_ACTION(ms, ms); + USAGE_ERROR_ACTION(ms,ms); return 0; - } - - if (alignment <= MALLOC_ALIGNMENT) return mspace_malloc(msp, bytes); + if (alignment <= MALLOC_ALIGNMENT) + return mspace_malloc(msp, bytes); return internal_memalign(ms, alignment, bytes); - } -void **mspace_independent_calloc(mspace msp, size_t n_elements, - size_t elem_size, void *chunks[]) { - - size_t sz = elem_size; /* serves as 1-element array */ +void** mspace_independent_calloc(mspace msp, size_t n_elements, + size_t elem_size, void* chunks[]) { + size_t sz = elem_size; /* serves as 1-element array */ mstate ms = (mstate)msp; if (!ok_magic(ms)) { - - USAGE_ERROR_ACTION(ms, ms); + USAGE_ERROR_ACTION(ms,ms); return 0; - } - return ialloc(ms, n_elements, &sz, 3, chunks); - } -void **mspace_independent_comalloc(mspace msp, size_t n_elements, - size_t sizes[], void *chunks[]) { - +void** mspace_independent_comalloc(mspace msp, size_t n_elements, + size_t sizes[], void* chunks[]) { mstate ms = (mstate)msp; if (!ok_magic(ms)) { - - USAGE_ERROR_ACTION(ms, ms); + USAGE_ERROR_ACTION(ms,ms); return 0; - } - return ialloc(ms, n_elements, sizes, 0, chunks); - } -size_t mspace_bulk_free(mspace msp, void *array[], size_t nelem) { - +size_t mspace_bulk_free(mspace msp, void* array[], size_t nelem) { return internal_bulk_free((mstate)msp, array, nelem); - } - #if MALLOC_INSPECT_ALL +#if MALLOC_INSPECT_ALL void mspace_inspect_all(mspace msp, - void (*handler)(void *start, void *end, - size_t used_bytes, void *callback_arg), - void *arg) { - + void(*handler)(void *start, + void *end, + size_t used_bytes, + void* callback_arg), + void* arg) { mstate ms = (mstate)msp; if (ok_magic(ms)) { - if (!PREACTION(ms)) { - internal_inspect_all(ms, handler, arg); POSTACTION(ms); - } - - } else { - - USAGE_ERROR_ACTION(ms, ms); - } - + else { + USAGE_ERROR_ACTION(ms,ms); + } } - - #endif /* MALLOC_INSPECT_ALL */ +#endif /* MALLOC_INSPECT_ALL */ int mspace_trim(mspace msp, size_t pad) { - - int result = 0; + int result = 0; mstate ms = (mstate)msp; if (ok_magic(ms)) { - if (!PREACTION(ms)) { - result = sys_trim(ms, pad); POSTACTION(ms); - } - - } else { - - USAGE_ERROR_ACTION(ms, ms); - } - + else { + USAGE_ERROR_ACTION(ms,ms); + } return result; - } - #if !NO_MALLOC_STATS +#if !NO_MALLOC_STATS void mspace_malloc_stats(mspace msp) { - mstate ms = (mstate)msp; if (ok_magic(ms)) { - internal_malloc_stats(ms); - - } else { - - USAGE_ERROR_ACTION(ms, ms); - } - + else { + USAGE_ERROR_ACTION(ms,ms); + } } - - #endif /* NO_MALLOC_STATS */ +#endif /* NO_MALLOC_STATS */ size_t mspace_footprint(mspace msp) { - size_t result = 0; mstate ms = (mstate)msp; if (ok_magic(ms)) { - result = ms->footprint; - - } else { - - USAGE_ERROR_ACTION(ms, ms); - } - + else { + USAGE_ERROR_ACTION(ms,ms); + } return result; - } size_t mspace_max_footprint(mspace msp) { - size_t result = 0; mstate ms = (mstate)msp; if (ok_magic(ms)) { - result = ms->max_footprint; - - } else { - - USAGE_ERROR_ACTION(ms, ms); - } - + else { + USAGE_ERROR_ACTION(ms,ms); + } return result; - } size_t mspace_footprint_limit(mspace msp) { - size_t result = 0; mstate ms = (mstate)msp; if (ok_magic(ms)) { - size_t maf = ms->footprint_limit; result = (maf == 0) ? MAX_SIZE_T : maf; - - } else { - - USAGE_ERROR_ACTION(ms, ms); - } - + else { + USAGE_ERROR_ACTION(ms,ms); + } return result; - } size_t mspace_set_footprint_limit(mspace msp, size_t bytes) { - size_t result = 0; mstate ms = (mstate)msp; if (ok_magic(ms)) { - - if (bytes == 0) result = granularity_align(1); /* Use minimal size */ + if (bytes == 0) + result = granularity_align(1); /* Use minimal size */ if (bytes == MAX_SIZE_T) - result = 0; /* disable */ + result = 0; /* disable */ else result = granularity_align(bytes); ms->footprint_limit = result; - - } else { - - USAGE_ERROR_ACTION(ms, ms); - } - + else { + USAGE_ERROR_ACTION(ms,ms); + } return result; - } - #if !NO_MALLINFO +#if !NO_MALLINFO struct mallinfo mspace_mallinfo(mspace msp) { - mstate ms = (mstate)msp; - if (!ok_magic(ms)) { USAGE_ERROR_ACTION(ms, ms); } + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + } return internal_mallinfo(ms); - } +#endif /* NO_MALLINFO */ - #endif /* NO_MALLINFO */ - -size_t mspace_usable_size(const void *mem) { - +size_t mspace_usable_size(const void* mem) { if (mem != 0) { - mchunkptr p = mem2chunk(mem); - if (is_inuse(p)) return chunksize(p) - overhead_for(p); - + if (is_inuse(p)) + return chunksize(p) - overhead_for(p); } - return 0; - } int mspace_mallopt(int param_number, int value) { - return change_mparam(param_number, value); - } -#endif /* MSPACES */ +#endif /* MSPACES */ + /* -------------------- Alternative MORECORE functions ------------------- */ @@ -7031,48 +6067,35 @@ int mspace_mallopt(int param_number, int value) { void *osMoreCore(int size) { - void *ptr = 0; static void *sbrk_top = 0; if (size > 0) { - if (size < MINIMUM_MORECORE_SIZE) size = MINIMUM_MORECORE_SIZE; if (CurrentExecutionLevel() == kTaskLevel) ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0); if (ptr == 0) { - return (void *) MFAIL; - } - // save ptrs so they can be freed during cleanup our_os_pools[next_os_pool] = ptr; next_os_pool++; ptr = (void *) ((((size_t) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK); sbrk_top = (char *) ptr + size; return ptr; - } - else if (size < 0) { - // we don't currently support shrink behavior return (void *) MFAIL; - } - else { - return sbrk_top; - } - } // cleanup any allocated memory pools @@ -7080,22 +6103,19 @@ int mspace_mallopt(int param_number, int value) { void osCleanupMem(void) { - void **ptr; for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++) if (*ptr) { - PoolDeallocate(*ptr); *ptr = 0; - } - } */ + /* ----------------------------------------------------------------------- History: v2.8.6 Wed Aug 29 06:57:58 2012 Doug Lea @@ -7315,3 +6335,4 @@ History: */ +#endif // __GLIBC__ diff --git a/qemu_mode/libqasan/malloc.c b/qemu_mode/libqasan/malloc.c index f8237826..54c1096a 100644 --- a/qemu_mode/libqasan/malloc.c +++ b/qemu_mode/libqasan/malloc.c @@ -24,6 +24,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *******************************************************************************/ #include "libqasan.h" +#include #include #include #include @@ -50,9 +51,9 @@ typedef struct { struct chunk_begin { size_t requested_size; - void * aligned_orig; // NULL if not aligned - struct chunk_begin *next; - struct chunk_begin *prev; + void* aligned_orig; // NULL if not aligned + struct chunk_begin* next; + struct chunk_begin* prev; char redzone[REDZONE_SIZE]; }; @@ -65,30 +66,47 @@ struct chunk_struct { }; +#ifdef __GLIBC__ + +void* (*__lq_libc_malloc)(size_t); +void (*__lq_libc_free)(void*); +#define backend_malloc __lq_libc_malloc +#define backend_free __lq_libc_free + +#define TMP_ZONE_SIZE 4096 +static int __tmp_alloc_zone_idx; +static unsigned char __tmp_alloc_zone[TMP_ZONE_SIZE]; + +#else + // From dlmalloc.c -void *dlmalloc(size_t); -void dlfree(void *); +void* dlmalloc(size_t); +void dlfree(void*); +#define backend_malloc dlmalloc +#define backend_free dlfree + +#endif int __libqasan_malloc_initialized; -static struct chunk_begin *quarantine_top; -static struct chunk_begin *quarantine_end; +static struct chunk_begin* quarantine_top; +static struct chunk_begin* quarantine_end; static size_t quarantine_bytes; #ifdef __BIONIC__ -static pthread_mutex_t quarantine_lock; - #define LOCK_TRY pthread_mutex_trylock - #define LOCK_INIT pthread_mutex_init - #define LOCK_UNLOCK pthread_mutex_unlock +static pthread_mutex_t quarantine_lock; +#define LOCK_TRY pthread_mutex_trylock +#define LOCK_INIT pthread_mutex_init +#define LOCK_UNLOCK pthread_mutex_unlock #else -static pthread_spinlock_t quarantine_lock; - #define LOCK_TRY pthread_spin_trylock - #define LOCK_INIT pthread_spin_init - #define LOCK_UNLOCK pthread_spin_unlock +static pthread_spinlock_t quarantine_lock; +#define LOCK_TRY pthread_spin_trylock +#define LOCK_INIT pthread_spin_init +#define LOCK_UNLOCK pthread_spin_unlock #endif // need qasan disabled -static int quanratine_push(struct chunk_begin *ck) { +static int quanratine_push(struct chunk_begin* ck) { if (ck->requested_size >= QUARANTINE_MAX_BYTES) return 0; @@ -96,15 +114,15 @@ static int quanratine_push(struct chunk_begin *ck) { while (ck->requested_size + quarantine_bytes >= QUARANTINE_MAX_BYTES) { - struct chunk_begin *tmp = quarantine_end; + struct chunk_begin* tmp = quarantine_end; quarantine_end = tmp->prev; quarantine_bytes -= tmp->requested_size; if (tmp->aligned_orig) - dlfree(tmp->aligned_orig); + backend_free(tmp->aligned_orig); else - dlfree(tmp); + backend_free(tmp); } @@ -122,6 +140,11 @@ void __libqasan_init_malloc(void) { if (__libqasan_malloc_initialized) return; +#ifdef __GLIBC__ + __lq_libc_malloc = dlsym(RTLD_NEXT, "malloc"); + __lq_libc_free = dlsym(RTLD_NEXT, "free"); +#endif + LOCK_INIT(&quarantine_lock, PTHREAD_PROCESS_PRIVATE); __libqasan_malloc_initialized = 1; @@ -131,24 +154,38 @@ void __libqasan_init_malloc(void) { } -size_t __libqasan_malloc_usable_size(void *ptr) { +size_t __libqasan_malloc_usable_size(void* ptr) { - char *p = ptr; + char* p = ptr; p -= sizeof(struct chunk_begin); - return ((struct chunk_begin *)p)->requested_size; + return ((struct chunk_begin*)p)->requested_size; } -void *__libqasan_malloc(size_t size) { +void* __libqasan_malloc(size_t size) { - if (!__libqasan_malloc_initialized) { __libqasan_init_malloc(); } + if (!__libqasan_malloc_initialized) { + + __libqasan_init_malloc(); - if (!__libqasan_malloc_initialized) __libqasan_init_malloc(); +#ifdef __GLIBC__ + void* r = &__tmp_alloc_zone[__tmp_alloc_zone_idx]; + + if (size & (ALLOC_ALIGN_SIZE - 1)) + __tmp_alloc_zone_idx += + (size & ~(ALLOC_ALIGN_SIZE - 1)) + ALLOC_ALIGN_SIZE; + else + __tmp_alloc_zone_idx += size; + + return r; +#endif + + } int state = QASAN_SWAP(QASAN_DISABLED); // disable qasan for this thread - struct chunk_begin *p = dlmalloc(sizeof(struct chunk_struct) + size); + struct chunk_begin* p = backend_malloc(sizeof(struct chunk_struct) + size); QASAN_SWAP(state); @@ -160,14 +197,14 @@ void *__libqasan_malloc(size_t size) { p->aligned_orig = NULL; p->next = p->prev = NULL; - QASAN_ALLOC(&p[1], (char *)&p[1] + size); + QASAN_ALLOC(&p[1], (char*)&p[1] + size); QASAN_POISON(p->redzone, REDZONE_SIZE, ASAN_HEAP_LEFT_RZ); if (size & (ALLOC_ALIGN_SIZE - 1)) - QASAN_POISON((char *)&p[1] + size, + QASAN_POISON((char*)&p[1] + size, (size & ~(ALLOC_ALIGN_SIZE - 1)) + 8 - size + REDZONE_SIZE, ASAN_HEAP_RIGHT_RZ); else - QASAN_POISON((char *)&p[1] + size, REDZONE_SIZE, ASAN_HEAP_RIGHT_RZ); + QASAN_POISON((char*)&p[1] + size, REDZONE_SIZE, ASAN_HEAP_RIGHT_RZ); __builtin_memset(&p[1], 0xff, size); @@ -175,11 +212,17 @@ void *__libqasan_malloc(size_t size) { } -void __libqasan_free(void *ptr) { +void __libqasan_free(void* ptr) { if (!ptr) return; + +#ifdef __GLIBC__ + if (ptr >= (void*)__tmp_alloc_zone && + ptr < ((void*)__tmp_alloc_zone + TMP_ZONE_SIZE)) + return; +#endif - struct chunk_begin *p = ptr; + struct chunk_begin* p = ptr; p -= 1; size_t n = p->requested_size; @@ -190,9 +233,9 @@ void __libqasan_free(void *ptr) { if (!quanratine_push(p)) { if (p->aligned_orig) - dlfree(p->aligned_orig); + backend_free(p->aligned_orig); else - dlfree(p); + backend_free(p); } @@ -206,11 +249,21 @@ void __libqasan_free(void *ptr) { } -void *__libqasan_calloc(size_t nmemb, size_t size) { +void* __libqasan_calloc(size_t nmemb, size_t size) { size *= nmemb; - char *p = __libqasan_malloc(size); +#ifdef __GLIBC__ + if (!__libqasan_malloc_initialized) { + + void* r = &__tmp_alloc_zone[__tmp_alloc_zone_idx]; + __tmp_alloc_zone_idx += size; + return r; + + } +#endif + + char* p = __libqasan_malloc(size); if (!p) return NULL; __builtin_memset(p, 0, size); @@ -219,14 +272,14 @@ void *__libqasan_calloc(size_t nmemb, size_t size) { } -void *__libqasan_realloc(void *ptr, size_t size) { +void* __libqasan_realloc(void* ptr, size_t size) { - char *p = __libqasan_malloc(size); + char* p = __libqasan_malloc(size); if (!p) return NULL; if (!ptr) return p; - size_t n = ((struct chunk_begin *)ptr)[-1].requested_size; + size_t n = ((struct chunk_begin*)ptr)[-1].requested_size; if (size < n) n = size; __builtin_memcpy(p, ptr, n); @@ -236,9 +289,9 @@ void *__libqasan_realloc(void *ptr, size_t size) { } -int __libqasan_posix_memalign(void **ptr, size_t align, size_t len) { +int __libqasan_posix_memalign(void** ptr, size_t align, size_t len) { - if ((align % 2) || (align % sizeof(void *))) return EINVAL; + if ((align % 2) || (align % sizeof(void*))) return EINVAL; if (len == 0) { *ptr = NULL; @@ -252,7 +305,7 @@ int __libqasan_posix_memalign(void **ptr, size_t align, size_t len) { int state = QASAN_SWAP(QASAN_DISABLED); // disable qasan for this thread - char *orig = dlmalloc(sizeof(struct chunk_struct) + size); + char* orig = backend_malloc(sizeof(struct chunk_struct) + size); QASAN_SWAP(state); @@ -260,10 +313,10 @@ int __libqasan_posix_memalign(void **ptr, size_t align, size_t len) { QASAN_UNPOISON(orig, sizeof(struct chunk_struct) + size); - char *data = orig + sizeof(struct chunk_begin); + char* data = orig + sizeof(struct chunk_begin); data += align - ((uintptr_t)data % align); - struct chunk_begin *p = (struct chunk_begin *)data - 1; + struct chunk_begin* p = (struct chunk_begin*)data - 1; p->requested_size = len; p->aligned_orig = orig; @@ -286,9 +339,9 @@ int __libqasan_posix_memalign(void **ptr, size_t align, size_t len) { } -void *__libqasan_memalign(size_t align, size_t len) { +void* __libqasan_memalign(size_t align, size_t len) { - void *ret = NULL; + void* ret = NULL; __libqasan_posix_memalign(&ret, align, len); @@ -296,9 +349,9 @@ void *__libqasan_memalign(size_t align, size_t len) { } -void *__libqasan_aligned_alloc(size_t align, size_t len) { +void* __libqasan_aligned_alloc(size_t align, size_t len) { - void *ret = NULL; + void* ret = NULL; if ((len % align)) return NULL; diff --git a/qemu_mode/qemuafl b/qemu_mode/qemuafl index 6ab6bf28..47722f64 160000 --- a/qemu_mode/qemuafl +++ b/qemu_mode/qemuafl @@ -1 +1 @@ -Subproject commit 6ab6bf28decb3e36eee43ffbd4a3bfd052dbbb50 +Subproject commit 47722f64e4c1662bad97dc25f3e4cc63959ff5f3 -- cgit 1.4.1 From 22a3c7f7d043d9dbf39c847061d88a4577537031 Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Fri, 12 Feb 2021 09:42:22 +0100 Subject: fix #736 (ty b1gr3db) --- qemu_mode/QEMUAFL_VERSION | 2 +- qemu_mode/libqasan/dlmalloc.c | 5553 ++++++++++++++++++++++++----------------- qemu_mode/libqasan/hooks.c | 2 + qemu_mode/libqasan/libqasan.c | 2 +- qemu_mode/libqasan/malloc.c | 109 +- qemu_mode/libqasan/string.c | 2 +- qemu_mode/qemuafl | 2 +- src/afl-fuzz-redqueen.c | 3 +- 8 files changed, 3332 insertions(+), 2343 deletions(-) (limited to 'qemu_mode/libqasan/dlmalloc.c') diff --git a/qemu_mode/QEMUAFL_VERSION b/qemu_mode/QEMUAFL_VERSION index d9f0ec33..e73a9588 100644 --- a/qemu_mode/QEMUAFL_VERSION +++ b/qemu_mode/QEMUAFL_VERSION @@ -1 +1 @@ -47722f64e4 +9a258d5b7a diff --git a/qemu_mode/libqasan/dlmalloc.c b/qemu_mode/libqasan/dlmalloc.c index 7e3cb159..bace0ff6 100644 --- a/qemu_mode/libqasan/dlmalloc.c +++ b/qemu_mode/libqasan/dlmalloc.c @@ -207,9 +207,12 @@ mspaces as thread-locals. For example: static __thread mspace tlms = 0; void* tlmalloc(size_t bytes) { + if (tlms == 0) tlms = create_mspace(0, 0); return mspace_malloc(tlms, bytes); + } + void tlfree(void* mem) { mspace_free(tlms, mem); } Unless FOOTERS is defined, each mspace is completely independent. @@ -525,201 +528,203 @@ MAX_RELEASE_CHECK_RATE default: 4095 unless not HAVE_MMAP improvement at the expense of carrying around more memory. */ -#define USE_DL_PREFIX - -/* Version identifier to allow people to support multiple versions */ -#ifndef DLMALLOC_VERSION -#define DLMALLOC_VERSION 20806 -#endif /* DLMALLOC_VERSION */ - -#ifndef DLMALLOC_EXPORT -#define DLMALLOC_EXPORT extern -#endif - -#ifndef WIN32 -#ifdef _WIN32 -#define WIN32 1 -#endif /* _WIN32 */ -#ifdef _WIN32_WCE -#define LACKS_FCNTL_H -#define WIN32 1 -#endif /* _WIN32_WCE */ -#endif /* WIN32 */ -#ifdef WIN32 -#define WIN32_LEAN_AND_MEAN -#include -#include -#define HAVE_MMAP 1 -#define HAVE_MORECORE 0 -#define LACKS_UNISTD_H -#define LACKS_SYS_PARAM_H -#define LACKS_SYS_MMAN_H -#define LACKS_STRING_H -#define LACKS_STRINGS_H -#define LACKS_SYS_TYPES_H -#define LACKS_ERRNO_H -#define LACKS_SCHED_H -#ifndef MALLOC_FAILURE_ACTION -#define MALLOC_FAILURE_ACTION -#endif /* MALLOC_FAILURE_ACTION */ -#ifndef MMAP_CLEARS -#ifdef _WIN32_WCE /* WINCE reportedly does not clear */ -#define MMAP_CLEARS 0 -#else -#define MMAP_CLEARS 1 -#endif /* _WIN32_WCE */ -#endif /*MMAP_CLEARS */ -#endif /* WIN32 */ - -#if defined(DARWIN) || defined(_DARWIN) -/* Mac OSX docs advise not to use sbrk; it seems better to use mmap */ -#ifndef HAVE_MORECORE -#define HAVE_MORECORE 0 -#define HAVE_MMAP 1 -/* OSX allocators provide 16 byte alignment */ -#ifndef MALLOC_ALIGNMENT -#define MALLOC_ALIGNMENT ((size_t)16U) -#endif -#endif /* HAVE_MORECORE */ -#endif /* DARWIN */ - -#ifndef LACKS_SYS_TYPES_H -#include /* For size_t */ -#endif /* LACKS_SYS_TYPES_H */ - -/* The maximum possible size_t value has all bits set */ -#define MAX_SIZE_T (~(size_t)0) - -#ifndef USE_LOCKS /* ensure true if spin or recursive locks set */ -#define USE_LOCKS ((defined(USE_SPIN_LOCKS) && USE_SPIN_LOCKS != 0) || \ - (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0)) -#endif /* USE_LOCKS */ - -#if USE_LOCKS /* Spin locks for gcc >= 4.1, older gcc on x86, MSC >= 1310 */ -#if ((defined(__GNUC__) && \ - ((__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) || \ - defined(__i386__) || defined(__x86_64__))) || \ - (defined(_MSC_VER) && _MSC_VER>=1310)) -#ifndef USE_SPIN_LOCKS -#define USE_SPIN_LOCKS 1 -#endif /* USE_SPIN_LOCKS */ -#elif USE_SPIN_LOCKS -#error "USE_SPIN_LOCKS defined without implementation" -#endif /* ... locks available... */ -#elif !defined(USE_SPIN_LOCKS) -#define USE_SPIN_LOCKS 0 -#endif /* USE_LOCKS */ - -#ifndef ONLY_MSPACES -#define ONLY_MSPACES 0 -#endif /* ONLY_MSPACES */ -#ifndef MSPACES -#if ONLY_MSPACES -#define MSPACES 1 -#else /* ONLY_MSPACES */ -#define MSPACES 0 -#endif /* ONLY_MSPACES */ -#endif /* MSPACES */ -#ifndef MALLOC_ALIGNMENT -#define MALLOC_ALIGNMENT ((size_t)(2 * sizeof(void *))) -#endif /* MALLOC_ALIGNMENT */ -#ifndef FOOTERS -#define FOOTERS 0 -#endif /* FOOTERS */ -#ifndef ABORT -#define ABORT abort() -#endif /* ABORT */ -#ifndef ABORT_ON_ASSERT_FAILURE -#define ABORT_ON_ASSERT_FAILURE 1 -#endif /* ABORT_ON_ASSERT_FAILURE */ -#ifndef PROCEED_ON_ERROR -#define PROCEED_ON_ERROR 0 -#endif /* PROCEED_ON_ERROR */ - -#ifndef INSECURE -#define INSECURE 0 -#endif /* INSECURE */ -#ifndef MALLOC_INSPECT_ALL -#define MALLOC_INSPECT_ALL 0 -#endif /* MALLOC_INSPECT_ALL */ -#ifndef HAVE_MMAP -#define HAVE_MMAP 1 -#endif /* HAVE_MMAP */ -#ifndef MMAP_CLEARS -#define MMAP_CLEARS 1 -#endif /* MMAP_CLEARS */ -#ifndef HAVE_MREMAP -#ifdef linux -#define HAVE_MREMAP 1 -#define _GNU_SOURCE /* Turns on mremap() definition */ -#else /* linux */ -#define HAVE_MREMAP 0 -#endif /* linux */ -#endif /* HAVE_MREMAP */ -#ifndef MALLOC_FAILURE_ACTION -#define MALLOC_FAILURE_ACTION errno = ENOMEM; -#endif /* MALLOC_FAILURE_ACTION */ -#ifndef HAVE_MORECORE -#if ONLY_MSPACES -#define HAVE_MORECORE 0 -#else /* ONLY_MSPACES */ -#define HAVE_MORECORE 1 -#endif /* ONLY_MSPACES */ -#endif /* HAVE_MORECORE */ -#if !HAVE_MORECORE -#define MORECORE_CONTIGUOUS 0 -#else /* !HAVE_MORECORE */ -#define MORECORE_DEFAULT sbrk -#ifndef MORECORE_CONTIGUOUS -#define MORECORE_CONTIGUOUS 1 -#endif /* MORECORE_CONTIGUOUS */ -#endif /* HAVE_MORECORE */ -#ifndef DEFAULT_GRANULARITY -#if (MORECORE_CONTIGUOUS || defined(WIN32)) -#define DEFAULT_GRANULARITY (0) /* 0 means to compute in init_mparams */ -#else /* MORECORE_CONTIGUOUS */ -#define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U) -#endif /* MORECORE_CONTIGUOUS */ -#endif /* DEFAULT_GRANULARITY */ -#ifndef DEFAULT_TRIM_THRESHOLD -#ifndef MORECORE_CANNOT_TRIM -#define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U) -#else /* MORECORE_CANNOT_TRIM */ -#define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T -#endif /* MORECORE_CANNOT_TRIM */ -#endif /* DEFAULT_TRIM_THRESHOLD */ -#ifndef DEFAULT_MMAP_THRESHOLD -#if HAVE_MMAP -#define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U) -#else /* HAVE_MMAP */ -#define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T -#endif /* HAVE_MMAP */ -#endif /* DEFAULT_MMAP_THRESHOLD */ -#ifndef MAX_RELEASE_CHECK_RATE -#if HAVE_MMAP -#define MAX_RELEASE_CHECK_RATE 4095 -#else -#define MAX_RELEASE_CHECK_RATE MAX_SIZE_T -#endif /* HAVE_MMAP */ -#endif /* MAX_RELEASE_CHECK_RATE */ -#ifndef USE_BUILTIN_FFS -#define USE_BUILTIN_FFS 0 -#endif /* USE_BUILTIN_FFS */ -#ifndef USE_DEV_RANDOM -#define USE_DEV_RANDOM 0 -#endif /* USE_DEV_RANDOM */ -#ifndef NO_MALLINFO -#define NO_MALLINFO 0 -#endif /* NO_MALLINFO */ -#ifndef MALLINFO_FIELD_TYPE -#define MALLINFO_FIELD_TYPE size_t -#endif /* MALLINFO_FIELD_TYPE */ -#ifndef NO_MALLOC_STATS -#define NO_MALLOC_STATS 0 -#endif /* NO_MALLOC_STATS */ -#ifndef NO_SEGMENT_TRAVERSAL -#define NO_SEGMENT_TRAVERSAL 0 -#endif /* NO_SEGMENT_TRAVERSAL */ + #define USE_DL_PREFIX + + /* Version identifier to allow people to support multiple versions */ + #ifndef DLMALLOC_VERSION + #define DLMALLOC_VERSION 20806 + #endif /* DLMALLOC_VERSION */ + + #ifndef DLMALLOC_EXPORT + #define DLMALLOC_EXPORT extern + #endif + + #ifndef WIN32 + #ifdef _WIN32 + #define WIN32 1 + #endif /* _WIN32 */ + #ifdef _WIN32_WCE + #define LACKS_FCNTL_H + #define WIN32 1 + #endif /* _WIN32_WCE */ + #endif /* WIN32 */ + #ifdef WIN32 + #define WIN32_LEAN_AND_MEAN + #include + #include + #define HAVE_MMAP 1 + #define HAVE_MORECORE 0 + #define LACKS_UNISTD_H + #define LACKS_SYS_PARAM_H + #define LACKS_SYS_MMAN_H + #define LACKS_STRING_H + #define LACKS_STRINGS_H + #define LACKS_SYS_TYPES_H + #define LACKS_ERRNO_H + #define LACKS_SCHED_H + #ifndef MALLOC_FAILURE_ACTION + #define MALLOC_FAILURE_ACTION + #endif /* MALLOC_FAILURE_ACTION */ + #ifndef MMAP_CLEARS + #ifdef _WIN32_WCE /* WINCE reportedly does not clear */ + #define MMAP_CLEARS 0 + #else + #define MMAP_CLEARS 1 + #endif /* _WIN32_WCE */ + #endif /*MMAP_CLEARS */ + #endif /* WIN32 */ + + #if defined(DARWIN) || defined(_DARWIN) + /* Mac OSX docs advise not to use sbrk; it seems better to use mmap */ + #ifndef HAVE_MORECORE + #define HAVE_MORECORE 0 + #define HAVE_MMAP 1 + /* OSX allocators provide 16 byte alignment */ + #ifndef MALLOC_ALIGNMENT + #define MALLOC_ALIGNMENT ((size_t)16U) + #endif + #endif /* HAVE_MORECORE */ + #endif /* DARWIN */ + + #ifndef LACKS_SYS_TYPES_H + #include /* For size_t */ + #endif /* LACKS_SYS_TYPES_H */ + + /* The maximum possible size_t value has all bits set */ + #define MAX_SIZE_T (~(size_t)0) + + #ifndef USE_LOCKS /* ensure true if spin or recursive locks set */ + #define USE_LOCKS \ + ((defined(USE_SPIN_LOCKS) && USE_SPIN_LOCKS != 0) || \ + (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0)) + #endif /* USE_LOCKS */ + + #if USE_LOCKS /* Spin locks for gcc >= 4.1, older gcc on x86, MSC >= 1310 */ + #if ((defined(__GNUC__) && \ + ((__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) || \ + defined(__i386__) || defined(__x86_64__))) || \ + (defined(_MSC_VER) && _MSC_VER >= 1310)) + #ifndef USE_SPIN_LOCKS + #define USE_SPIN_LOCKS 1 + #endif /* USE_SPIN_LOCKS */ + #elif USE_SPIN_LOCKS + #error "USE_SPIN_LOCKS defined without implementation" + #endif /* ... locks available... */ + #elif !defined(USE_SPIN_LOCKS) + #define USE_SPIN_LOCKS 0 + #endif /* USE_LOCKS */ + + #ifndef ONLY_MSPACES + #define ONLY_MSPACES 0 + #endif /* ONLY_MSPACES */ + #ifndef MSPACES + #if ONLY_MSPACES + #define MSPACES 1 + #else /* ONLY_MSPACES */ + #define MSPACES 0 + #endif /* ONLY_MSPACES */ + #endif /* MSPACES */ + #ifndef MALLOC_ALIGNMENT + #define MALLOC_ALIGNMENT ((size_t)(2 * sizeof(void *))) + #endif /* MALLOC_ALIGNMENT */ + #ifndef FOOTERS + #define FOOTERS 0 + #endif /* FOOTERS */ + #ifndef ABORT + #define ABORT abort() + #endif /* ABORT */ + #ifndef ABORT_ON_ASSERT_FAILURE + #define ABORT_ON_ASSERT_FAILURE 1 + #endif /* ABORT_ON_ASSERT_FAILURE */ + #ifndef PROCEED_ON_ERROR + #define PROCEED_ON_ERROR 0 + #endif /* PROCEED_ON_ERROR */ + + #ifndef INSECURE + #define INSECURE 0 + #endif /* INSECURE */ + #ifndef MALLOC_INSPECT_ALL + #define MALLOC_INSPECT_ALL 0 + #endif /* MALLOC_INSPECT_ALL */ + #ifndef HAVE_MMAP + #define HAVE_MMAP 1 + #endif /* HAVE_MMAP */ + #ifndef MMAP_CLEARS + #define MMAP_CLEARS 1 + #endif /* MMAP_CLEARS */ + #ifndef HAVE_MREMAP + #ifdef linux + #define HAVE_MREMAP 1 + #define _GNU_SOURCE /* Turns on mremap() definition */ + #else /* linux */ + #define HAVE_MREMAP 0 + #endif /* linux */ + #endif /* HAVE_MREMAP */ + #ifndef MALLOC_FAILURE_ACTION + #define MALLOC_FAILURE_ACTION errno = ENOMEM; + #endif /* MALLOC_FAILURE_ACTION */ + #ifndef HAVE_MORECORE + #if ONLY_MSPACES + #define HAVE_MORECORE 0 + #else /* ONLY_MSPACES */ + #define HAVE_MORECORE 1 + #endif /* ONLY_MSPACES */ + #endif /* HAVE_MORECORE */ + #if !HAVE_MORECORE + #define MORECORE_CONTIGUOUS 0 + #else /* !HAVE_MORECORE */ + #define MORECORE_DEFAULT sbrk + #ifndef MORECORE_CONTIGUOUS + #define MORECORE_CONTIGUOUS 1 + #endif /* MORECORE_CONTIGUOUS */ + #endif /* HAVE_MORECORE */ + #ifndef DEFAULT_GRANULARITY + #if (MORECORE_CONTIGUOUS || defined(WIN32)) + #define DEFAULT_GRANULARITY (0) /* 0 means to compute in init_mparams */ + #else /* MORECORE_CONTIGUOUS */ + #define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U) + #endif /* MORECORE_CONTIGUOUS */ + #endif /* DEFAULT_GRANULARITY */ + #ifndef DEFAULT_TRIM_THRESHOLD + #ifndef MORECORE_CANNOT_TRIM + #define DEFAULT_TRIM_THRESHOLD \ + ((size_t)2U * (size_t)1024U * (size_t)1024U) + #else /* MORECORE_CANNOT_TRIM */ + #define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T + #endif /* MORECORE_CANNOT_TRIM */ + #endif /* DEFAULT_TRIM_THRESHOLD */ + #ifndef DEFAULT_MMAP_THRESHOLD + #if HAVE_MMAP + #define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U) + #else /* HAVE_MMAP */ + #define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T + #endif /* HAVE_MMAP */ + #endif /* DEFAULT_MMAP_THRESHOLD */ + #ifndef MAX_RELEASE_CHECK_RATE + #if HAVE_MMAP + #define MAX_RELEASE_CHECK_RATE 4095 + #else + #define MAX_RELEASE_CHECK_RATE MAX_SIZE_T + #endif /* HAVE_MMAP */ + #endif /* MAX_RELEASE_CHECK_RATE */ + #ifndef USE_BUILTIN_FFS + #define USE_BUILTIN_FFS 0 + #endif /* USE_BUILTIN_FFS */ + #ifndef USE_DEV_RANDOM + #define USE_DEV_RANDOM 0 + #endif /* USE_DEV_RANDOM */ + #ifndef NO_MALLINFO + #define NO_MALLINFO 0 + #endif /* NO_MALLINFO */ + #ifndef MALLINFO_FIELD_TYPE + #define MALLINFO_FIELD_TYPE size_t + #endif /* MALLINFO_FIELD_TYPE */ + #ifndef NO_MALLOC_STATS + #define NO_MALLOC_STATS 0 + #endif /* NO_MALLOC_STATS */ + #ifndef NO_SEGMENT_TRAVERSAL + #define NO_SEGMENT_TRAVERSAL 0 + #endif /* NO_SEGMENT_TRAVERSAL */ /* mallopt tuning options. SVID/XPG defines four standard parameter @@ -728,123 +733,128 @@ MAX_RELEASE_CHECK_RATE default: 4095 unless not HAVE_MMAP malloc does support the following options. */ -#undef M_TRIM_THRESHOLD -#undef M_GRANULARITY -#undef M_MMAP_THRESHOLD -#define M_TRIM_THRESHOLD (-1) -#define M_GRANULARITY (-2) -#define M_MMAP_THRESHOLD (-3) + #undef M_TRIM_THRESHOLD + #undef M_GRANULARITY + #undef M_MMAP_THRESHOLD + #define M_TRIM_THRESHOLD (-1) + #define M_GRANULARITY (-2) + #define M_MMAP_THRESHOLD (-3) /* ------------------------ Mallinfo declarations ------------------------ */ -#if !NO_MALLINFO -/* - This version of malloc supports the standard SVID/XPG mallinfo - routine that returns a struct containing usage properties and - statistics. It should work on any system that has a - /usr/include/malloc.h defining struct mallinfo. The main - declaration needed is the mallinfo struct that is returned (by-copy) - by mallinfo(). The malloinfo struct contains a bunch of fields that - are not even meaningful in this version of malloc. These fields are - are instead filled by mallinfo() with other numbers that might be of - interest. - - HAVE_USR_INCLUDE_MALLOC_H should be set if you have a - /usr/include/malloc.h file that includes a declaration of struct - mallinfo. If so, it is included; else a compliant version is - declared below. These must be precisely the same for mallinfo() to - work. The original SVID version of this struct, defined on most - systems with mallinfo, declares all fields as ints. But some others - define as unsigned long. If your system defines the fields using a - type of different width than listed here, you MUST #include your - system version and #define HAVE_USR_INCLUDE_MALLOC_H. -*/ + #if !NO_MALLINFO + /* + This version of malloc supports the standard SVID/XPG mallinfo + routine that returns a struct containing usage properties and + statistics. It should work on any system that has a + /usr/include/malloc.h defining struct mallinfo. The main + declaration needed is the mallinfo struct that is returned (by-copy) + by mallinfo(). The malloinfo struct contains a bunch of fields that + are not even meaningful in this version of malloc. These fields are + are instead filled by mallinfo() with other numbers that might be of + interest. + + HAVE_USR_INCLUDE_MALLOC_H should be set if you have a + /usr/include/malloc.h file that includes a declaration of struct + mallinfo. If so, it is included; else a compliant version is + declared below. These must be precisely the same for mallinfo() to + work. The original SVID version of this struct, defined on most + systems with mallinfo, declares all fields as ints. But some others + define as unsigned long. If your system defines the fields using a + type of different width than listed here, you MUST #include your + system version and #define HAVE_USR_INCLUDE_MALLOC_H. + */ -/* #define HAVE_USR_INCLUDE_MALLOC_H */ + /* #define HAVE_USR_INCLUDE_MALLOC_H */ -#ifdef HAVE_USR_INCLUDE_MALLOC_H -#include "/usr/include/malloc.h" -#else /* HAVE_USR_INCLUDE_MALLOC_H */ -#ifndef STRUCT_MALLINFO_DECLARED -/* HP-UX (and others?) redefines mallinfo unless _STRUCT_MALLINFO is defined */ -#define _STRUCT_MALLINFO -#define STRUCT_MALLINFO_DECLARED 1 + #ifdef HAVE_USR_INCLUDE_MALLOC_H + #include "/usr/include/malloc.h" + #else /* HAVE_USR_INCLUDE_MALLOC_H */ + #ifndef STRUCT_MALLINFO_DECLARED + /* HP-UX (and others?) redefines mallinfo unless _STRUCT_MALLINFO is + * defined */ + #define _STRUCT_MALLINFO + #define STRUCT_MALLINFO_DECLARED 1 struct mallinfo { - MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system */ - MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */ - MALLINFO_FIELD_TYPE smblks; /* always 0 */ - MALLINFO_FIELD_TYPE hblks; /* always 0 */ - MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */ - MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */ - MALLINFO_FIELD_TYPE fsmblks; /* always 0 */ - MALLINFO_FIELD_TYPE uordblks; /* total allocated space */ - MALLINFO_FIELD_TYPE fordblks; /* total free space */ - MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */ + + MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system */ + MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */ + MALLINFO_FIELD_TYPE smblks; /* always 0 */ + MALLINFO_FIELD_TYPE hblks; /* always 0 */ + MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */ + MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */ + MALLINFO_FIELD_TYPE fsmblks; /* always 0 */ + MALLINFO_FIELD_TYPE uordblks; /* total allocated space */ + MALLINFO_FIELD_TYPE fordblks; /* total free space */ + MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */ + }; -#endif /* STRUCT_MALLINFO_DECLARED */ -#endif /* HAVE_USR_INCLUDE_MALLOC_H */ -#endif /* NO_MALLINFO */ + + #endif /* STRUCT_MALLINFO_DECLARED */ + #endif /* HAVE_USR_INCLUDE_MALLOC_H */ + #endif /* NO_MALLINFO */ /* Try to persuade compilers to inline. The most critical functions for inlining are defined as macros, so these aren't used for them. */ -#ifndef FORCEINLINE - #if defined(__GNUC__) -#define FORCEINLINE __inline __attribute__ ((always_inline)) - #elif defined(_MSC_VER) - #define FORCEINLINE __forceinline + #ifndef FORCEINLINE + #if defined(__GNUC__) + #define FORCEINLINE __inline __attribute__((always_inline)) + #elif defined(_MSC_VER) + #define FORCEINLINE __forceinline + #endif #endif -#endif -#ifndef NOINLINE - #if defined(__GNUC__) - #define NOINLINE __attribute__ ((noinline)) - #elif defined(_MSC_VER) - #define NOINLINE __declspec(noinline) - #else - #define NOINLINE + #ifndef NOINLINE + #if defined(__GNUC__) + #define NOINLINE __attribute__((noinline)) + #elif defined(_MSC_VER) + #define NOINLINE __declspec(noinline) + #else + #define NOINLINE + #endif #endif -#endif -#ifdef __cplusplus + #ifdef __cplusplus extern "C" { -#ifndef FORCEINLINE - #define FORCEINLINE inline -#endif -#endif /* __cplusplus */ -#ifndef FORCEINLINE - #define FORCEINLINE -#endif - -#if !ONLY_MSPACES - -/* ------------------- Declarations of public routines ------------------- */ - -#ifndef USE_DL_PREFIX -#define dlcalloc calloc -#define dlfree free -#define dlmalloc malloc -#define dlmemalign memalign -#define dlposix_memalign posix_memalign -#define dlrealloc realloc -#define dlrealloc_in_place realloc_in_place -#define dlvalloc valloc -#define dlpvalloc pvalloc -#define dlmallinfo mallinfo -#define dlmallopt mallopt -#define dlmalloc_trim malloc_trim -#define dlmalloc_stats malloc_stats -#define dlmalloc_usable_size malloc_usable_size -#define dlmalloc_footprint malloc_footprint -#define dlmalloc_max_footprint malloc_max_footprint -#define dlmalloc_footprint_limit malloc_footprint_limit -#define dlmalloc_set_footprint_limit malloc_set_footprint_limit -#define dlmalloc_inspect_all malloc_inspect_all -#define dlindependent_calloc independent_calloc -#define dlindependent_comalloc independent_comalloc -#define dlbulk_free bulk_free -#endif /* USE_DL_PREFIX */ + + #ifndef FORCEINLINE + #define FORCEINLINE inline + #endif + #endif /* __cplusplus */ + #ifndef FORCEINLINE + #define FORCEINLINE + #endif + + #if !ONLY_MSPACES + + /* ------------------- Declarations of public routines ------------------- */ + + #ifndef USE_DL_PREFIX + #define dlcalloc calloc + #define dlfree free + #define dlmalloc malloc + #define dlmemalign memalign + #define dlposix_memalign posix_memalign + #define dlrealloc realloc + #define dlrealloc_in_place realloc_in_place + #define dlvalloc valloc + #define dlpvalloc pvalloc + #define dlmallinfo mallinfo + #define dlmallopt mallopt + #define dlmalloc_trim malloc_trim + #define dlmalloc_stats malloc_stats + #define dlmalloc_usable_size malloc_usable_size + #define dlmalloc_footprint malloc_footprint + #define dlmalloc_max_footprint malloc_max_footprint + #define dlmalloc_footprint_limit malloc_footprint_limit + #define dlmalloc_set_footprint_limit malloc_set_footprint_limit + #define dlmalloc_inspect_all malloc_inspect_all + #define dlindependent_calloc independent_calloc + #define dlindependent_comalloc independent_comalloc + #define dlbulk_free bulk_free + #endif /* USE_DL_PREFIX */ /* malloc(size_t n) @@ -860,7 +870,7 @@ extern "C" { maximum supported value of n differs across systems, but is in all cases less than the maximum representable value of a size_t. */ -DLMALLOC_EXPORT void* dlmalloc(size_t); +DLMALLOC_EXPORT void *dlmalloc(size_t); /* free(void* p) @@ -869,14 +879,14 @@ DLMALLOC_EXPORT void* dlmalloc(size_t); It has no effect if p is null. If p was not malloced or already freed, free(p) will by default cause the current program to abort. */ -DLMALLOC_EXPORT void dlfree(void*); +DLMALLOC_EXPORT void dlfree(void *); /* calloc(size_t n_elements, size_t element_size); Returns a pointer to n_elements * element_size bytes, with all locations set to zero. */ -DLMALLOC_EXPORT void* dlcalloc(size_t, size_t); +DLMALLOC_EXPORT void *dlcalloc(size_t, size_t); /* realloc(void* p, size_t n) @@ -900,7 +910,7 @@ DLMALLOC_EXPORT void* dlcalloc(size_t, size_t); The old unix realloc convention of allowing the last-free'd chunk to be used as an argument to realloc is not supported. */ -DLMALLOC_EXPORT void* dlrealloc(void*, size_t); +DLMALLOC_EXPORT void *dlrealloc(void *, size_t); /* realloc_in_place(void* p, size_t n) @@ -915,7 +925,7 @@ DLMALLOC_EXPORT void* dlrealloc(void*, size_t); Returns p if successful; otherwise null. */ -DLMALLOC_EXPORT void* dlrealloc_in_place(void*, size_t); +DLMALLOC_EXPORT void *dlrealloc_in_place(void *, size_t); /* memalign(size_t alignment, size_t n); @@ -929,7 +939,7 @@ DLMALLOC_EXPORT void* dlrealloc_in_place(void*, size_t); Overreliance on memalign is a sure way to fragment space. */ -DLMALLOC_EXPORT void* dlmemalign(size_t, size_t); +DLMALLOC_EXPORT void *dlmemalign(size_t, size_t); /* int posix_memalign(void** pp, size_t alignment, size_t n); @@ -939,14 +949,14 @@ DLMALLOC_EXPORT void* dlmemalign(size_t, size_t); returns EINVAL if the alignment is not a power of two (3) fails and returns ENOMEM if memory cannot be allocated. */ -DLMALLOC_EXPORT int dlposix_memalign(void**, size_t, size_t); +DLMALLOC_EXPORT int dlposix_memalign(void **, size_t, size_t); /* valloc(size_t n); Equivalent to memalign(pagesize, n), where pagesize is the page size of the system. If the pagesize is unknown, 4096 is used. */ -DLMALLOC_EXPORT void* dlvalloc(size_t); +DLMALLOC_EXPORT void *dlvalloc(size_t); /* mallopt(int parameter_number, int parameter_value) @@ -1021,7 +1031,7 @@ DLMALLOC_EXPORT size_t dlmalloc_footprint_limit(); */ DLMALLOC_EXPORT size_t dlmalloc_set_footprint_limit(size_t bytes); -#if MALLOC_INSPECT_ALL + #if MALLOC_INSPECT_ALL /* malloc_inspect_all(void(*handler)(void *start, void *end, @@ -1043,19 +1053,23 @@ DLMALLOC_EXPORT size_t dlmalloc_set_footprint_limit(size_t bytes); than 1000, you could write: static int count = 0; void count_chunks(void* start, void* end, size_t used, void* arg) { + if (used >= 1000) ++count; + } + then: malloc_inspect_all(count_chunks, NULL); malloc_inspect_all is compiled only if MALLOC_INSPECT_ALL is defined. */ -DLMALLOC_EXPORT void dlmalloc_inspect_all(void(*handler)(void*, void *, size_t, void*), - void* arg); +DLMALLOC_EXPORT void dlmalloc_inspect_all(void (*handler)(void *, void *, + size_t, void *), + void *arg); -#endif /* MALLOC_INSPECT_ALL */ + #endif /* MALLOC_INSPECT_ALL */ -#if !NO_MALLINFO + #if !NO_MALLINFO /* mallinfo() Returns (by copy) a struct containing various summary statistics: @@ -1079,7 +1093,7 @@ DLMALLOC_EXPORT void dlmalloc_inspect_all(void(*handler)(void*, void *, size_t, thus be inaccurate. */ DLMALLOC_EXPORT struct mallinfo dlmallinfo(void); -#endif /* NO_MALLINFO */ + #endif /* NO_MALLINFO */ /* independent_calloc(size_t n_elements, size_t element_size, void* chunks[]); @@ -1117,6 +1131,7 @@ DLMALLOC_EXPORT struct mallinfo dlmallinfo(void); struct Node { int item; struct Node* next; }; struct Node* build_list() { + struct Node** pool; int n = read_number_of_nodes_needed(); if (n <= 0) return 0; @@ -1128,9 +1143,11 @@ DLMALLOC_EXPORT struct mallinfo dlmallinfo(void); pool[i]->next = pool[i+1]; free(pool); // Can now free the array (or not, if it is needed later) return first; + } + */ -DLMALLOC_EXPORT void** dlindependent_calloc(size_t, size_t, void**); +DLMALLOC_EXPORT void **dlindependent_calloc(size_t, size_t, void **); /* independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]); @@ -1169,6 +1186,7 @@ DLMALLOC_EXPORT void** dlindependent_calloc(size_t, size_t, void**); struct Foot { ... } void send_message(char* msg) { + int msglen = strlen(msg); size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) }; void* chunks[3]; @@ -1178,6 +1196,7 @@ DLMALLOC_EXPORT void** dlindependent_calloc(size_t, size_t, void**); char* body = (char*)(chunks[1]); struct Foot* foot = (struct Foot*)(chunks[2]); // ... + } In general though, independent_comalloc is worth using only for @@ -1188,7 +1207,7 @@ DLMALLOC_EXPORT void** dlindependent_calloc(size_t, size_t, void**); since it cannot reuse existing noncontiguous small chunks that might be available for some of the elements. */ -DLMALLOC_EXPORT void** dlindependent_comalloc(size_t, size_t*, void**); +DLMALLOC_EXPORT void **dlindependent_comalloc(size_t, size_t *, void **); /* bulk_free(void* array[], size_t n_elements) @@ -1199,14 +1218,14 @@ DLMALLOC_EXPORT void** dlindependent_comalloc(size_t, size_t*, void**); is returned. For large arrays of pointers with poor locality, it may be worthwhile to sort this array before calling bulk_free. */ -DLMALLOC_EXPORT size_t dlbulk_free(void**, size_t n_elements); +DLMALLOC_EXPORT size_t dlbulk_free(void **, size_t n_elements); /* pvalloc(size_t n); Equivalent to valloc(minimum-page-that-holds(n)), that is, round up n to nearest pagesize. */ -DLMALLOC_EXPORT void* dlpvalloc(size_t); +DLMALLOC_EXPORT void *dlpvalloc(size_t); /* malloc_trim(size_t pad); @@ -1229,7 +1248,7 @@ DLMALLOC_EXPORT void* dlpvalloc(size_t); Malloc_trim returns 1 if it actually released any memory, else 0. */ -DLMALLOC_EXPORT int dlmalloc_trim(size_t); +DLMALLOC_EXPORT int dlmalloc_trim(size_t); /* malloc_stats(); @@ -1250,7 +1269,7 @@ DLMALLOC_EXPORT int dlmalloc_trim(size_t); malloc_stats prints only the most commonly interesting statistics. More information can be obtained by calling mallinfo. */ -DLMALLOC_EXPORT void dlmalloc_stats(void); +DLMALLOC_EXPORT void dlmalloc_stats(void); /* malloc_usable_size(void* p); @@ -1266,17 +1285,17 @@ DLMALLOC_EXPORT void dlmalloc_stats(void); p = malloc(n); assert(malloc_usable_size(p) >= 256); */ -size_t dlmalloc_usable_size(void*); +size_t dlmalloc_usable_size(void *); -#endif /* ONLY_MSPACES */ + #endif /* ONLY_MSPACES */ -#if MSPACES + #if MSPACES /* mspace is an opaque type representing an independent region of space that supports mspace_malloc, etc. */ -typedef void* mspace; +typedef void *mspace; /* create_mspace creates and returns a new independent space with the @@ -1308,7 +1327,8 @@ DLMALLOC_EXPORT size_t destroy_mspace(mspace msp); Destroying this space will deallocate all additionally allocated space (if possible) but not the initial base. */ -DLMALLOC_EXPORT mspace create_mspace_with_base(void* base, size_t capacity, int locked); +DLMALLOC_EXPORT mspace create_mspace_with_base(void *base, size_t capacity, + int locked); /* mspace_track_large_chunks controls whether requests for large chunks @@ -1323,12 +1343,11 @@ DLMALLOC_EXPORT mspace create_mspace_with_base(void* base, size_t capacity, int */ DLMALLOC_EXPORT int mspace_track_large_chunks(mspace msp, int enable); - /* mspace_malloc behaves as malloc, but operates within the given space. */ -DLMALLOC_EXPORT void* mspace_malloc(mspace msp, size_t bytes); +DLMALLOC_EXPORT void *mspace_malloc(mspace msp, size_t bytes); /* mspace_free behaves as free, but operates within @@ -1338,7 +1357,7 @@ DLMALLOC_EXPORT void* mspace_malloc(mspace msp, size_t bytes); free may be called instead of mspace_free because freed chunks from any space are handled by their originating spaces. */ -DLMALLOC_EXPORT void mspace_free(mspace msp, void* mem); +DLMALLOC_EXPORT void mspace_free(mspace msp, void *mem); /* mspace_realloc behaves as realloc, but operates within @@ -1349,33 +1368,38 @@ DLMALLOC_EXPORT void mspace_free(mspace msp, void* mem); realloced chunks from any space are handled by their originating spaces. */ -DLMALLOC_EXPORT void* mspace_realloc(mspace msp, void* mem, size_t newsize); +DLMALLOC_EXPORT void *mspace_realloc(mspace msp, void *mem, size_t newsize); /* mspace_calloc behaves as calloc, but operates within the given space. */ -DLMALLOC_EXPORT void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size); +DLMALLOC_EXPORT void *mspace_calloc(mspace msp, size_t n_elements, + size_t elem_size); /* mspace_memalign behaves as memalign, but operates within the given space. */ -DLMALLOC_EXPORT void* mspace_memalign(mspace msp, size_t alignment, size_t bytes); +DLMALLOC_EXPORT void *mspace_memalign(mspace msp, size_t alignment, + size_t bytes); /* mspace_independent_calloc behaves as independent_calloc, but operates within the given space. */ -DLMALLOC_EXPORT void** mspace_independent_calloc(mspace msp, size_t n_elements, - size_t elem_size, void* chunks[]); +DLMALLOC_EXPORT void **mspace_independent_calloc(mspace msp, size_t n_elements, + size_t elem_size, + void * chunks[]); /* mspace_independent_comalloc behaves as independent_comalloc, but operates within the given space. */ -DLMALLOC_EXPORT void** mspace_independent_comalloc(mspace msp, size_t n_elements, - size_t sizes[], void* chunks[]); +DLMALLOC_EXPORT void **mspace_independent_comalloc(mspace msp, + size_t n_elements, + size_t sizes[], + void * chunks[]); /* mspace_footprint() returns the number of bytes obtained from the @@ -1389,19 +1413,18 @@ DLMALLOC_EXPORT size_t mspace_footprint(mspace msp); */ DLMALLOC_EXPORT size_t mspace_max_footprint(mspace msp); - -#if !NO_MALLINFO + #if !NO_MALLINFO /* mspace_mallinfo behaves as mallinfo, but reports properties of the given space. */ DLMALLOC_EXPORT struct mallinfo mspace_mallinfo(mspace msp); -#endif /* NO_MALLINFO */ + #endif /* NO_MALLINFO */ /* malloc_usable_size(void* p) behaves the same as malloc_usable_size; */ -DLMALLOC_EXPORT size_t mspace_usable_size(const void* mem); +DLMALLOC_EXPORT size_t mspace_usable_size(const void *mem); /* mspace_malloc_stats behaves as malloc_stats, but reports @@ -1420,11 +1443,13 @@ DLMALLOC_EXPORT int mspace_trim(mspace msp, size_t pad); */ DLMALLOC_EXPORT int mspace_mallopt(int, int); -#endif /* MSPACES */ + #endif /* MSPACES */ -#ifdef __cplusplus -} /* end of extern "C" */ -#endif /* __cplusplus */ + #ifdef __cplusplus + +} /* end of extern "C" */ + + #endif /* __cplusplus */ /* ======================================================================== @@ -1438,392 +1463,413 @@ DLMALLOC_EXPORT int mspace_mallopt(int, int); /*------------------------------ internal #includes ---------------------- */ -#ifdef _MSC_VER -#pragma warning( disable : 4146 ) /* no "unsigned" warnings */ -#endif /* _MSC_VER */ -#if !NO_MALLOC_STATS -#include /* for printing in malloc_stats */ -#endif /* NO_MALLOC_STATS */ -#ifndef LACKS_ERRNO_H -#include /* for MALLOC_FAILURE_ACTION */ -#endif /* LACKS_ERRNO_H */ -#ifdef DEBUG -#if ABORT_ON_ASSERT_FAILURE -#undef assert -#define assert(x) if(!(x)) ABORT -#else /* ABORT_ON_ASSERT_FAILURE */ -#include -#endif /* ABORT_ON_ASSERT_FAILURE */ -#else /* DEBUG */ -#ifndef assert -#define assert(x) -#endif -#define DEBUG 0 -#endif /* DEBUG */ -#if !defined(WIN32) && !defined(LACKS_TIME_H) -#include /* for magic initialization */ -#endif /* WIN32 */ -#ifndef LACKS_STDLIB_H -#include /* for abort() */ -#endif /* LACKS_STDLIB_H */ -#ifndef LACKS_STRING_H -#include /* for memset etc */ -#endif /* LACKS_STRING_H */ -#if USE_BUILTIN_FFS -#ifndef LACKS_STRINGS_H -#include /* for ffs */ -#endif /* LACKS_STRINGS_H */ -#endif /* USE_BUILTIN_FFS */ -#if HAVE_MMAP -#ifndef LACKS_SYS_MMAN_H -/* On some versions of linux, mremap decl in mman.h needs __USE_GNU set */ -#if (defined(linux) && !defined(__USE_GNU)) -#define __USE_GNU 1 -#include /* for mmap */ -#undef __USE_GNU -#else -#include /* for mmap */ -#endif /* linux */ -#endif /* LACKS_SYS_MMAN_H */ -#ifndef LACKS_FCNTL_H -#include -#endif /* LACKS_FCNTL_H */ -#endif /* HAVE_MMAP */ -#ifndef LACKS_UNISTD_H -#include /* for sbrk, sysconf */ -#else /* LACKS_UNISTD_H */ -#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__) -extern void* sbrk(ptrdiff_t); -#endif /* FreeBSD etc */ -#endif /* LACKS_UNISTD_H */ - -/* Declarations for locking */ -#if USE_LOCKS -#ifndef WIN32 -#if defined (__SVR4) && defined (__sun) /* solaris */ -#include -#elif !defined(LACKS_SCHED_H) -#include -#endif /* solaris or LACKS_SCHED_H */ -#if (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0) || !USE_SPIN_LOCKS -#include -#endif /* USE_RECURSIVE_LOCKS ... */ -#elif defined(_MSC_VER) -#ifndef _M_AMD64 -/* These are already defined on AMD64 builds */ -#ifdef __cplusplus + #ifdef _MSC_VER + #pragma warning(disable : 4146) /* no "unsigned" warnings */ + #endif /* _MSC_VER */ + #if !NO_MALLOC_STATS + #include /* for printing in malloc_stats */ + #endif /* NO_MALLOC_STATS */ + #ifndef LACKS_ERRNO_H + #include /* for MALLOC_FAILURE_ACTION */ + #endif /* LACKS_ERRNO_H */ + #ifdef DEBUG + #if ABORT_ON_ASSERT_FAILURE + #undef assert + #define assert(x) \ + if (!(x)) ABORT + #else /* ABORT_ON_ASSERT_FAILURE */ + #include + #endif /* ABORT_ON_ASSERT_FAILURE */ + #else /* DEBUG */ + #ifndef assert + #define assert(x) + #endif + #define DEBUG 0 + #endif /* DEBUG */ + #if !defined(WIN32) && !defined(LACKS_TIME_H) + #include /* for magic initialization */ + #endif /* WIN32 */ + #ifndef LACKS_STDLIB_H + #include /* for abort() */ + #endif /* LACKS_STDLIB_H */ + #ifndef LACKS_STRING_H + #include /* for memset etc */ + #endif /* LACKS_STRING_H */ + #if USE_BUILTIN_FFS + #ifndef LACKS_STRINGS_H + #include /* for ffs */ + #endif /* LACKS_STRINGS_H */ + #endif /* USE_BUILTIN_FFS */ + #if HAVE_MMAP + #ifndef LACKS_SYS_MMAN_H + /* On some versions of linux, mremap decl in mman.h needs __USE_GNU set */ + #if (defined(linux) && !defined(__USE_GNU)) + #define __USE_GNU 1 + #include /* for mmap */ + #undef __USE_GNU + #else + #include /* for mmap */ + #endif /* linux */ + #endif /* LACKS_SYS_MMAN_H */ + #ifndef LACKS_FCNTL_H + #include + #endif /* LACKS_FCNTL_H */ + #endif /* HAVE_MMAP */ + #ifndef LACKS_UNISTD_H + #include /* for sbrk, sysconf */ + #else /* LACKS_UNISTD_H */ + #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__) +extern void *sbrk(ptrdiff_t); + #endif /* FreeBSD etc */ + #endif /* LACKS_UNISTD_H */ + + /* Declarations for locking */ + #if USE_LOCKS + #ifndef WIN32 + #if defined(__SVR4) && defined(__sun) /* solaris */ + #include + #elif !defined(LACKS_SCHED_H) + #include + #endif /* solaris or LACKS_SCHED_H */ + #if (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0) || \ + !USE_SPIN_LOCKS + #include + #endif /* USE_RECURSIVE_LOCKS ... */ + #elif defined(_MSC_VER) + #ifndef _M_AMD64 + /* These are already defined on AMD64 builds */ + #ifdef __cplusplus extern "C" { -#endif /* __cplusplus */ -LONG __cdecl _InterlockedCompareExchange(LONG volatile *Dest, LONG Exchange, LONG Comp); + + #endif /* __cplusplus */ +LONG __cdecl _InterlockedCompareExchange(LONG volatile *Dest, LONG Exchange, + LONG Comp); LONG __cdecl _InterlockedExchange(LONG volatile *Target, LONG Value); -#ifdef __cplusplus + #ifdef __cplusplus + } -#endif /* __cplusplus */ -#endif /* _M_AMD64 */ -#pragma intrinsic (_InterlockedCompareExchange) -#pragma intrinsic (_InterlockedExchange) -#define interlockedcompareexchange _InterlockedCompareExchange -#define interlockedexchange _InterlockedExchange -#elif defined(WIN32) && defined(__GNUC__) -#define interlockedcompareexchange(a, b, c) __sync_val_compare_and_swap(a, c, b) -#define interlockedexchange __sync_lock_test_and_set -#endif /* Win32 */ -#else /* USE_LOCKS */ -#endif /* USE_LOCKS */ - -#ifndef LOCK_AT_FORK -#define LOCK_AT_FORK 0 -#endif - -/* Declarations for bit scanning on win32 */ -#if defined(_MSC_VER) && _MSC_VER>=1300 -#ifndef BitScanForward /* Try to avoid pulling in WinNT.h */ -#ifdef __cplusplus + + #endif /* __cplusplus */ + #endif /* _M_AMD64 */ + #pragma intrinsic(_InterlockedCompareExchange) + #pragma intrinsic(_InterlockedExchange) + #define interlockedcompareexchange _InterlockedCompareExchange + #define interlockedexchange _InterlockedExchange + #elif defined(WIN32) && defined(__GNUC__) + #define interlockedcompareexchange(a, b, c) \ + __sync_val_compare_and_swap(a, c, b) + #define interlockedexchange __sync_lock_test_and_set + #endif /* Win32 */ + #else /* USE_LOCKS */ + #endif /* USE_LOCKS */ + + #ifndef LOCK_AT_FORK + #define LOCK_AT_FORK 0 + #endif + + /* Declarations for bit scanning on win32 */ + #if defined(_MSC_VER) && _MSC_VER >= 1300 + #ifndef BitScanForward /* Try to avoid pulling in WinNT.h */ + #ifdef __cplusplus extern "C" { -#endif /* __cplusplus */ + + #endif /* __cplusplus */ unsigned char _BitScanForward(unsigned long *index, unsigned long mask); unsigned char _BitScanReverse(unsigned long *index, unsigned long mask); -#ifdef __cplusplus + #ifdef __cplusplus + } -#endif /* __cplusplus */ - -#define BitScanForward _BitScanForward -#define BitScanReverse _BitScanReverse -#pragma intrinsic(_BitScanForward) -#pragma intrinsic(_BitScanReverse) -#endif /* BitScanForward */ -#endif /* defined(_MSC_VER) && _MSC_VER>=1300 */ - -#ifndef WIN32 -#ifndef malloc_getpagesize -# ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */ -# ifndef _SC_PAGE_SIZE -# define _SC_PAGE_SIZE _SC_PAGESIZE -# endif -# endif -# ifdef _SC_PAGE_SIZE -# define malloc_getpagesize sysconf(_SC_PAGE_SIZE) -# else -# if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE) - extern size_t getpagesize(); -# define malloc_getpagesize getpagesize() -# else -# ifdef WIN32 /* use supplied emulation of getpagesize */ -# define malloc_getpagesize getpagesize() -# else -# ifndef LACKS_SYS_PARAM_H -# include -# endif -# ifdef EXEC_PAGESIZE -# define malloc_getpagesize EXEC_PAGESIZE -# else -# ifdef NBPG -# ifndef CLSIZE -# define malloc_getpagesize NBPG -# else -# define malloc_getpagesize (NBPG * CLSIZE) -# endif -# else -# ifdef NBPC -# define malloc_getpagesize NBPC -# else -# ifdef PAGESIZE -# define malloc_getpagesize PAGESIZE -# else /* just guess */ -# define malloc_getpagesize ((size_t)4096U) -# endif -# endif -# endif -# endif -# endif -# endif -# endif -#endif -#endif - -/* ------------------- size_t and alignment properties -------------------- */ - -/* The byte and bit size of a size_t */ -#define SIZE_T_SIZE (sizeof(size_t)) -#define SIZE_T_BITSIZE (sizeof(size_t) << 3) - -/* Some constants coerced to size_t */ -/* Annoying but necessary to avoid errors on some platforms */ -#define SIZE_T_ZERO ((size_t)0) -#define SIZE_T_ONE ((size_t)1) -#define SIZE_T_TWO ((size_t)2) -#define SIZE_T_FOUR ((size_t)4) -#define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1) -#define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2) -#define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES) -#define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U) - -/* The bit mask value corresponding to MALLOC_ALIGNMENT */ -#define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE) - -/* True if address a has acceptable alignment */ -#define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0) - -/* the number of bytes to offset an address to align it */ -#define align_offset(A)\ - ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\ - ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK)) - -/* -------------------------- MMAP preliminaries ------------------------- */ -/* - If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and - checks to fail so compiler optimizer can delete code rather than - using so many "#if"s. -*/ + #endif /* __cplusplus */ + + #define BitScanForward _BitScanForward + #define BitScanReverse _BitScanReverse + #pragma intrinsic(_BitScanForward) + #pragma intrinsic(_BitScanReverse) + #endif /* BitScanForward */ + #endif /* defined(_MSC_VER) && _MSC_VER>=1300 */ + + #ifndef WIN32 + #ifndef malloc_getpagesize + #ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */ + #ifndef _SC_PAGE_SIZE + #define _SC_PAGE_SIZE _SC_PAGESIZE + #endif + #endif + #ifdef _SC_PAGE_SIZE + #define malloc_getpagesize sysconf(_SC_PAGE_SIZE) + #else + #if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE) +extern size_t getpagesize(); + #define malloc_getpagesize getpagesize() + #else + #ifdef WIN32 /* use supplied emulation of getpagesize */ + #define malloc_getpagesize getpagesize() + #else + #ifndef LACKS_SYS_PARAM_H + #include + #endif + #ifdef EXEC_PAGESIZE + #define malloc_getpagesize EXEC_PAGESIZE + #else + #ifdef NBPG + #ifndef CLSIZE + #define malloc_getpagesize NBPG + #else + #define malloc_getpagesize (NBPG * CLSIZE) + #endif + #else + #ifdef NBPC + #define malloc_getpagesize NBPC + #else + #ifdef PAGESIZE + #define malloc_getpagesize PAGESIZE + #else /* just guess */ + #define malloc_getpagesize ((size_t)4096U) + #endif + #endif + #endif + #endif + #endif + #endif + #endif + #endif + #endif + + /* ------------------- size_t and alignment properties -------------------- */ + + /* The byte and bit size of a size_t */ + #define SIZE_T_SIZE (sizeof(size_t)) + #define SIZE_T_BITSIZE (sizeof(size_t) << 3) + + /* Some constants coerced to size_t */ + /* Annoying but necessary to avoid errors on some platforms */ + #define SIZE_T_ZERO ((size_t)0) + #define SIZE_T_ONE ((size_t)1) + #define SIZE_T_TWO ((size_t)2) + #define SIZE_T_FOUR ((size_t)4) + #define TWO_SIZE_T_SIZES (SIZE_T_SIZE << 1) + #define FOUR_SIZE_T_SIZES (SIZE_T_SIZE << 2) + #define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES + TWO_SIZE_T_SIZES) + #define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U) + + /* The bit mask value corresponding to MALLOC_ALIGNMENT */ + #define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE) + + /* True if address a has acceptable alignment */ + #define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0) + + /* the number of bytes to offset an address to align it */ + #define align_offset(A) \ + ((((size_t)(A)&CHUNK_ALIGN_MASK) == 0) \ + ? 0 \ + : ((MALLOC_ALIGNMENT - ((size_t)(A)&CHUNK_ALIGN_MASK)) & \ + CHUNK_ALIGN_MASK)) + + /* -------------------------- MMAP preliminaries ------------------------- */ + + /* + If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and + checks to fail so compiler optimizer can delete code rather than + using so many "#if"s. + */ + /* MORECORE and MMAP must return MFAIL on failure */ + #define MFAIL ((void *)(MAX_SIZE_T)) + #define CMFAIL ((char *)(MFAIL)) /* defined for convenience */ -/* MORECORE and MMAP must return MFAIL on failure */ -#define MFAIL ((void*)(MAX_SIZE_T)) -#define CMFAIL ((char*)(MFAIL)) /* defined for convenience */ + #if HAVE_MMAP -#if HAVE_MMAP + #ifndef WIN32 + #define MMAP_PROT (PROT_READ | PROT_WRITE) + #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON) + #define MAP_ANONYMOUS MAP_ANON + #endif /* MAP_ANON */ + #ifdef MAP_ANONYMOUS -#ifndef WIN32 -#define MMAP_PROT (PROT_READ|PROT_WRITE) -#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON) -#define MAP_ANONYMOUS MAP_ANON -#endif /* MAP_ANON */ -#ifdef MAP_ANONYMOUS + #define MMAP_FLAGS (MAP_PRIVATE | MAP_ANONYMOUS) -#define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS) +static FORCEINLINE void *unixmmap(size_t size) { -static FORCEINLINE void* unixmmap(size_t size) { - void* result; + void *result; result = mmap(0, size, MMAP_PROT, MMAP_FLAGS, -1, 0); - if (result == MFAIL) - return MFAIL; + if (result == MFAIL) return MFAIL; return result; + } -static FORCEINLINE int unixmunmap(void* ptr, size_t size) { +static FORCEINLINE int unixmunmap(void *ptr, size_t size) { + int result; result = munmap(ptr, size); - if (result != 0) - return result; + if (result != 0) return result; return result; + } -#define MMAP_DEFAULT(s) unixmmap(s) -#define MUNMAP_DEFAULT(a, s) unixmunmap((a), (s)) + #define MMAP_DEFAULT(s) unixmmap(s) + #define MUNMAP_DEFAULT(a, s) unixmunmap((a), (s)) -#else /* MAP_ANONYMOUS */ -/* - Nearly all versions of mmap support MAP_ANONYMOUS, so the following - is unlikely to be needed, but is supplied just in case. -*/ -#define MMAP_FLAGS (MAP_PRIVATE) -static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */ -#define MMAP_DEFAULT(s) ((dev_zero_fd < 0) ? \ - (dev_zero_fd = open("/dev/zero", O_RDWR), \ - mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \ - mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) -#define MUNMAP_DEFAULT(a, s) munmap((a), (s)) -#endif /* MAP_ANONYMOUS */ + #else /* MAP_ANONYMOUS */ + /* + Nearly all versions of mmap support MAP_ANONYMOUS, so the following + is unlikely to be needed, but is supplied just in case. + */ + #define MMAP_FLAGS (MAP_PRIVATE) +static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */ + #define MMAP_DEFAULT(s) \ + ((dev_zero_fd < 0) \ + ? (dev_zero_fd = open("/dev/zero", O_RDWR), \ + mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) \ + : mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) + #define MUNMAP_DEFAULT(a, s) munmap((a), (s)) + #endif /* MAP_ANONYMOUS */ -#define DIRECT_MMAP_DEFAULT(s) MMAP_DEFAULT(s) + #define DIRECT_MMAP_DEFAULT(s) MMAP_DEFAULT(s) -#else /* WIN32 */ + #else /* WIN32 */ /* Win32 MMAP via VirtualAlloc */ -static FORCEINLINE void* win32mmap(size_t size) { - void* ptr; +static FORCEINLINE void *win32mmap(size_t size) { + + void *ptr; - ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE); - if (ptr == 0) - return MFAIL; + ptr = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); + if (ptr == 0) return MFAIL; return ptr; + } /* For direct MMAP, use MEM_TOP_DOWN to minimize interference */ -static FORCEINLINE void* win32direct_mmap(size_t size) { - void* ptr; +static FORCEINLINE void *win32direct_mmap(size_t size) { + + void *ptr; - ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, - PAGE_READWRITE); - if (ptr == 0) - return MFAIL; + ptr = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT | MEM_TOP_DOWN, + PAGE_READWRITE); + if (ptr == 0) return MFAIL; return ptr; + } /* This function supports releasing coalesed segments */ -static FORCEINLINE int win32munmap(void* ptr, size_t size) { +static FORCEINLINE int win32munmap(void *ptr, size_t size) { + MEMORY_BASIC_INFORMATION minfo; - char* cptr = (char*)ptr; + char *cptr = (char *)ptr; while (size) { - if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0) - return -1; + + if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0) return -1; if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr || minfo.State != MEM_COMMIT || minfo.RegionSize > size) return -1; - if (VirtualFree(cptr, 0, MEM_RELEASE) == 0) - return -1; + if (VirtualFree(cptr, 0, MEM_RELEASE) == 0) return -1; cptr += minfo.RegionSize; size -= minfo.RegionSize; + } return 0; + } -#define MMAP_DEFAULT(s) win32mmap(s) -#define MUNMAP_DEFAULT(a, s) win32munmap((a), (s)) -#define DIRECT_MMAP_DEFAULT(s) win32direct_mmap(s) -#endif /* WIN32 */ -#endif /* HAVE_MMAP */ + #define MMAP_DEFAULT(s) win32mmap(s) + #define MUNMAP_DEFAULT(a, s) win32munmap((a), (s)) + #define DIRECT_MMAP_DEFAULT(s) win32direct_mmap(s) + #endif /* WIN32 */ + #endif /* HAVE_MMAP */ + + #if HAVE_MREMAP + #ifndef WIN32 -#if HAVE_MREMAP -#ifndef WIN32 +static FORCEINLINE void *dlmremap(void *old_address, size_t old_size, + size_t new_size, int flags) { -static FORCEINLINE void* dlmremap(void* old_address, size_t old_size, size_t new_size, int flags) { - void* result; + void *result; result = mremap(old_address, old_size, new_size, flags); - if (result == MFAIL) - return MFAIL; + if (result == MFAIL) return MFAIL; return result; + } -#define MREMAP_DEFAULT(addr, osz, nsz, mv) dlmremap((addr), (osz), (nsz), (mv)) -#endif /* WIN32 */ -#endif /* HAVE_MREMAP */ + #define MREMAP_DEFAULT(addr, osz, nsz, mv) \ + dlmremap((addr), (osz), (nsz), (mv)) + #endif /* WIN32 */ + #endif /* HAVE_MREMAP */ -/** - * Define CALL_MORECORE - */ -#if HAVE_MORECORE + /** + * Define CALL_MORECORE + */ + #if HAVE_MORECORE #ifdef MORECORE - #define CALL_MORECORE(S) MORECORE(S) - #else /* MORECORE */ - #define CALL_MORECORE(S) MORECORE_DEFAULT(S) - #endif /* MORECORE */ -#else /* HAVE_MORECORE */ - #define CALL_MORECORE(S) MFAIL -#endif /* HAVE_MORECORE */ - -/** - * Define CALL_MMAP/CALL_MUNMAP/CALL_DIRECT_MMAP - */ -#if HAVE_MMAP - #define USE_MMAP_BIT (SIZE_T_ONE) + #define CALL_MORECORE(S) MORECORE(S) + #else /* MORECORE */ + #define CALL_MORECORE(S) MORECORE_DEFAULT(S) + #endif /* MORECORE */ + #else /* HAVE_MORECORE */ + #define CALL_MORECORE(S) MFAIL + #endif /* HAVE_MORECORE */ + + /** + * Define CALL_MMAP/CALL_MUNMAP/CALL_DIRECT_MMAP + */ + #if HAVE_MMAP + #define USE_MMAP_BIT (SIZE_T_ONE) #ifdef MMAP - #define CALL_MMAP(s) MMAP(s) - #else /* MMAP */ - #define CALL_MMAP(s) MMAP_DEFAULT(s) - #endif /* MMAP */ + #define CALL_MMAP(s) MMAP(s) + #else /* MMAP */ + #define CALL_MMAP(s) MMAP_DEFAULT(s) + #endif /* MMAP */ #ifdef MUNMAP - #define CALL_MUNMAP(a, s) MUNMAP((a), (s)) - #else /* MUNMAP */ - #define CALL_MUNMAP(a, s) MUNMAP_DEFAULT((a), (s)) - #endif /* MUNMAP */ + #define CALL_MUNMAP(a, s) MUNMAP((a), (s)) + #else /* MUNMAP */ + #define CALL_MUNMAP(a, s) MUNMAP_DEFAULT((a), (s)) + #endif /* MUNMAP */ #ifdef DIRECT_MMAP - #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s) - #else /* DIRECT_MMAP */ - #define CALL_DIRECT_MMAP(s) DIRECT_MMAP_DEFAULT(s) - #endif /* DIRECT_MMAP */ -#else /* HAVE_MMAP */ - #define USE_MMAP_BIT (SIZE_T_ZERO) - - #define MMAP(s) MFAIL - #define MUNMAP(a, s) (-1) - #define DIRECT_MMAP(s) MFAIL - #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s) - #define CALL_MMAP(s) MMAP(s) - #define CALL_MUNMAP(a, s) MUNMAP((a), (s)) -#endif /* HAVE_MMAP */ - -/** - * Define CALL_MREMAP - */ -#if HAVE_MMAP && HAVE_MREMAP + #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s) + #else /* DIRECT_MMAP */ + #define CALL_DIRECT_MMAP(s) DIRECT_MMAP_DEFAULT(s) + #endif /* DIRECT_MMAP */ + #else /* HAVE_MMAP */ + #define USE_MMAP_BIT (SIZE_T_ZERO) + + #define MMAP(s) MFAIL + #define MUNMAP(a, s) (-1) + #define DIRECT_MMAP(s) MFAIL + #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s) + #define CALL_MMAP(s) MMAP(s) + #define CALL_MUNMAP(a, s) MUNMAP((a), (s)) + #endif /* HAVE_MMAP */ + + /** + * Define CALL_MREMAP + */ + #if HAVE_MMAP && HAVE_MREMAP #ifdef MREMAP - #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP((addr), (osz), (nsz), (mv)) - #else /* MREMAP */ - #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP_DEFAULT((addr), (osz), (nsz), (mv)) - #endif /* MREMAP */ -#else /* HAVE_MMAP && HAVE_MREMAP */ - #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL -#endif /* HAVE_MMAP && HAVE_MREMAP */ + #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP((addr), (osz), (nsz), (mv)) + #else /* MREMAP */ + #define CALL_MREMAP(addr, osz, nsz, mv) \ + MREMAP_DEFAULT((addr), (osz), (nsz), (mv)) + #endif /* MREMAP */ + #else /* HAVE_MMAP && HAVE_MREMAP */ + #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL + #endif /* HAVE_MMAP && HAVE_MREMAP */ -/* mstate bit set if continguous morecore disabled or failed */ -#define USE_NONCONTIGUOUS_BIT (4U) - -/* segment bit set in create_mspace_with_base */ -#define EXTERN_BIT (8U) + /* mstate bit set if continguous morecore disabled or failed */ + #define USE_NONCONTIGUOUS_BIT (4U) + /* segment bit set in create_mspace_with_base */ + #define EXTERN_BIT (8U) /* --------------------------- Lock preliminaries ------------------------ */ @@ -1855,248 +1901,286 @@ static FORCEINLINE void* dlmremap(void* old_address, size_t old_size, size_t new */ -#if !USE_LOCKS -#define USE_LOCK_BIT (0U) -#define INITIAL_LOCK(l) (0) -#define DESTROY_LOCK(l) (0) -#define ACQUIRE_MALLOC_GLOBAL_LOCK() -#define RELEASE_MALLOC_GLOBAL_LOCK() - -#else -#if USE_LOCKS > 1 -/* ----------------------- User-defined locks ------------------------ */ -/* Define your own lock implementation here */ -/* #define INITIAL_LOCK(lk) ... */ -/* #define DESTROY_LOCK(lk) ... */ -/* #define ACQUIRE_LOCK(lk) ... */ -/* #define RELEASE_LOCK(lk) ... */ -/* #define TRY_LOCK(lk) ... */ -/* static MLOCK_T malloc_global_mutex = ... */ - -#elif USE_SPIN_LOCKS - -/* First, define CAS_LOCK and CLEAR_LOCK on ints */ -/* Note CAS_LOCK defined to return 0 on success */ - -#if defined(__GNUC__)&& (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) -#define CAS_LOCK(sl) __sync_lock_test_and_set(sl, 1) -#define CLEAR_LOCK(sl) __sync_lock_release(sl) - -#elif (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))) + #if !USE_LOCKS + #define USE_LOCK_BIT (0U) + #define INITIAL_LOCK(l) (0) + #define DESTROY_LOCK(l) (0) + #define ACQUIRE_MALLOC_GLOBAL_LOCK() + #define RELEASE_MALLOC_GLOBAL_LOCK() + + #else + #if USE_LOCKS > 1 + /* ----------------------- User-defined locks ------------------------ */ + /* Define your own lock implementation here */ + /* #define INITIAL_LOCK(lk) ... */ + /* #define DESTROY_LOCK(lk) ... */ + /* #define ACQUIRE_LOCK(lk) ... */ + /* #define RELEASE_LOCK(lk) ... */ + /* #define TRY_LOCK(lk) ... */ + /* static MLOCK_T malloc_global_mutex = ... */ + + #elif USE_SPIN_LOCKS + + /* First, define CAS_LOCK and CLEAR_LOCK on ints */ + /* Note CAS_LOCK defined to return 0 on success */ + + #if defined(__GNUC__) && \ + (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) + #define CAS_LOCK(sl) __sync_lock_test_and_set(sl, 1) + #define CLEAR_LOCK(sl) __sync_lock_release(sl) + + #elif (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))) /* Custom spin locks for older gcc on x86 */ static FORCEINLINE int x86_cas_lock(int *sl) { + int ret; int val = 1; int cmp = 0; - __asm__ __volatile__ ("lock; cmpxchgl %1, %2" - : "=a" (ret) - : "r" (val), "m" (*(sl)), "0"(cmp) - : "memory", "cc"); + __asm__ __volatile__("lock; cmpxchgl %1, %2" + : "=a"(ret) + : "r"(val), "m"(*(sl)), "0"(cmp) + : "memory", "cc"); return ret; + } -static FORCEINLINE void x86_clear_lock(int* sl) { +static FORCEINLINE void x86_clear_lock(int *sl) { + assert(*sl != 0); int prev = 0; int ret; - __asm__ __volatile__ ("lock; xchgl %0, %1" - : "=r" (ret) - : "m" (*(sl)), "0"(prev) - : "memory"); + __asm__ __volatile__("lock; xchgl %0, %1" + : "=r"(ret) + : "m"(*(sl)), "0"(prev) + : "memory"); + } -#define CAS_LOCK(sl) x86_cas_lock(sl) -#define CLEAR_LOCK(sl) x86_clear_lock(sl) - -#else /* Win32 MSC */ -#define CAS_LOCK(sl) interlockedexchange((volatile LONG *)sl, (LONG)1) -#define CLEAR_LOCK(sl) interlockedexchange((volatile LONG *)sl, (LONG)0) - -#endif /* ... gcc spins locks ... */ - -/* How to yield for a spin lock */ -#define SPINS_PER_YIELD 63 -#if defined(_MSC_VER) -#define SLEEP_EX_DURATION 50 /* delay for yield/sleep */ -#define SPIN_LOCK_YIELD SleepEx(SLEEP_EX_DURATION, FALSE) -#elif defined (__SVR4) && defined (__sun) /* solaris */ -#define SPIN_LOCK_YIELD thr_yield(); -#elif !defined(LACKS_SCHED_H) -#define SPIN_LOCK_YIELD sched_yield(); -#else -#define SPIN_LOCK_YIELD -#endif /* ... yield ... */ - -#if !defined(USE_RECURSIVE_LOCKS) || USE_RECURSIVE_LOCKS == 0 + #define CAS_LOCK(sl) x86_cas_lock(sl) + #define CLEAR_LOCK(sl) x86_clear_lock(sl) + + #else /* Win32 MSC */ + #define CAS_LOCK(sl) interlockedexchange((volatile LONG *)sl, (LONG)1) + #define CLEAR_LOCK(sl) interlockedexchange((volatile LONG *)sl, (LONG)0) + + #endif /* ... gcc spins locks ... */ + + /* How to yield for a spin lock */ + #define SPINS_PER_YIELD 63 + #if defined(_MSC_VER) + #define SLEEP_EX_DURATION 50 /* delay for yield/sleep */ + #define SPIN_LOCK_YIELD SleepEx(SLEEP_EX_DURATION, FALSE) + #elif defined(__SVR4) && defined(__sun) /* solaris */ + #define SPIN_LOCK_YIELD thr_yield(); + #elif !defined(LACKS_SCHED_H) + #define SPIN_LOCK_YIELD sched_yield(); + #else + #define SPIN_LOCK_YIELD + #endif /* ... yield ... */ + + #if !defined(USE_RECURSIVE_LOCKS) || USE_RECURSIVE_LOCKS == 0 /* Plain spin locks use single word (embedded in malloc_states) */ static int spin_acquire_lock(int *sl) { + int spins = 0; while (*(volatile int *)sl != 0 || CAS_LOCK(sl)) { - if ((++spins & SPINS_PER_YIELD) == 0) { - SPIN_LOCK_YIELD; - } + + if ((++spins & SPINS_PER_YIELD) == 0) { SPIN_LOCK_YIELD; } + } + return 0; + } -#define MLOCK_T int -#define TRY_LOCK(sl) !CAS_LOCK(sl) -#define RELEASE_LOCK(sl) CLEAR_LOCK(sl) -#define ACQUIRE_LOCK(sl) (CAS_LOCK(sl)? spin_acquire_lock(sl) : 0) -#define INITIAL_LOCK(sl) (*sl = 0) -#define DESTROY_LOCK(sl) (0) + #define MLOCK_T int + #define TRY_LOCK(sl) !CAS_LOCK(sl) + #define RELEASE_LOCK(sl) CLEAR_LOCK(sl) + #define ACQUIRE_LOCK(sl) (CAS_LOCK(sl) ? spin_acquire_lock(sl) : 0) + #define INITIAL_LOCK(sl) (*sl = 0) + #define DESTROY_LOCK(sl) (0) static MLOCK_T malloc_global_mutex = 0; -#else /* USE_RECURSIVE_LOCKS */ -/* types for lock owners */ -#ifdef WIN32 -#define THREAD_ID_T DWORD -#define CURRENT_THREAD GetCurrentThreadId() -#define EQ_OWNER(X,Y) ((X) == (Y)) -#else -/* - Note: the following assume that pthread_t is a type that can be - initialized to (casted) zero. If this is not the case, you will need to - somehow redefine these or not use spin locks. -*/ -#define THREAD_ID_T pthread_t -#define CURRENT_THREAD pthread_self() -#define EQ_OWNER(X,Y) pthread_equal(X, Y) -#endif + #else /* USE_RECURSIVE_LOCKS */ + /* types for lock owners */ + #ifdef WIN32 + #define THREAD_ID_T DWORD + #define CURRENT_THREAD GetCurrentThreadId() + #define EQ_OWNER(X, Y) ((X) == (Y)) + #else + /* + Note: the following assume that pthread_t is a type that can be + initialized to (casted) zero. If this is not the case, you will need + to somehow redefine these or not use spin locks. + */ + #define THREAD_ID_T pthread_t + #define CURRENT_THREAD pthread_self() + #define EQ_OWNER(X, Y) pthread_equal(X, Y) + #endif struct malloc_recursive_lock { - int sl; + + int sl; unsigned int c; - THREAD_ID_T threadid; + THREAD_ID_T threadid; + }; -#define MLOCK_T struct malloc_recursive_lock -static MLOCK_T malloc_global_mutex = { 0, 0, (THREAD_ID_T)0}; + #define MLOCK_T struct malloc_recursive_lock +static MLOCK_T malloc_global_mutex = {0, 0, (THREAD_ID_T)0}; static FORCEINLINE void recursive_release_lock(MLOCK_T *lk) { + assert(lk->sl != 0); - if (--lk->c == 0) { - CLEAR_LOCK(&lk->sl); - } + if (--lk->c == 0) { CLEAR_LOCK(&lk->sl); } + } static FORCEINLINE int recursive_acquire_lock(MLOCK_T *lk) { + THREAD_ID_T mythreadid = CURRENT_THREAD; - int spins = 0; + int spins = 0; for (;;) { + if (*((volatile int *)(&lk->sl)) == 0) { + if (!CAS_LOCK(&lk->sl)) { + lk->threadid = mythreadid; lk->c = 1; return 0; + } - } - else if (EQ_OWNER(lk->threadid, mythreadid)) { + + } else if (EQ_OWNER(lk->threadid, mythreadid)) { + ++lk->c; return 0; + } - if ((++spins & SPINS_PER_YIELD) == 0) { - SPIN_LOCK_YIELD; - } + + if ((++spins & SPINS_PER_YIELD) == 0) { SPIN_LOCK_YIELD; } + } + } static FORCEINLINE int recursive_try_lock(MLOCK_T *lk) { + THREAD_ID_T mythreadid = CURRENT_THREAD; if (*((volatile int *)(&lk->sl)) == 0) { + if (!CAS_LOCK(&lk->sl)) { + lk->threadid = mythreadid; lk->c = 1; return 1; + } - } - else if (EQ_OWNER(lk->threadid, mythreadid)) { + + } else if (EQ_OWNER(lk->threadid, mythreadid)) { + ++lk->c; return 1; + } + return 0; + } -#define RELEASE_LOCK(lk) recursive_release_lock(lk) -#define TRY_LOCK(lk) recursive_try_lock(lk) -#define ACQUIRE_LOCK(lk) recursive_acquire_lock(lk) -#define INITIAL_LOCK(lk) ((lk)->threadid = (THREAD_ID_T)0, (lk)->sl = 0, (lk)->c = 0) -#define DESTROY_LOCK(lk) (0) -#endif /* USE_RECURSIVE_LOCKS */ - -#elif defined(WIN32) /* Win32 critical sections */ -#define MLOCK_T CRITICAL_SECTION -#define ACQUIRE_LOCK(lk) (EnterCriticalSection(lk), 0) -#define RELEASE_LOCK(lk) LeaveCriticalSection(lk) -#define TRY_LOCK(lk) TryEnterCriticalSection(lk) -#define INITIAL_LOCK(lk) (!InitializeCriticalSectionAndSpinCount((lk), 0x80000000|4000)) -#define DESTROY_LOCK(lk) (DeleteCriticalSection(lk), 0) -#define NEED_GLOBAL_LOCK_INIT - -static MLOCK_T malloc_global_mutex; + #define RELEASE_LOCK(lk) recursive_release_lock(lk) + #define TRY_LOCK(lk) recursive_try_lock(lk) + #define ACQUIRE_LOCK(lk) recursive_acquire_lock(lk) + #define INITIAL_LOCK(lk) \ + ((lk)->threadid = (THREAD_ID_T)0, (lk)->sl = 0, (lk)->c = 0) + #define DESTROY_LOCK(lk) (0) + #endif /* USE_RECURSIVE_LOCKS */ + + #elif defined(WIN32) /* Win32 critical sections */ + #define MLOCK_T CRITICAL_SECTION + #define ACQUIRE_LOCK(lk) (EnterCriticalSection(lk), 0) + #define RELEASE_LOCK(lk) LeaveCriticalSection(lk) + #define TRY_LOCK(lk) TryEnterCriticalSection(lk) + #define INITIAL_LOCK(lk) \ + (!InitializeCriticalSectionAndSpinCount((lk), 0x80000000 | 4000)) + #define DESTROY_LOCK(lk) (DeleteCriticalSection(lk), 0) + #define NEED_GLOBAL_LOCK_INIT + +static MLOCK_T malloc_global_mutex; static volatile LONG malloc_global_mutex_status; /* Use spin loop to initialize global lock */ static void init_malloc_global_mutex() { + for (;;) { + long stat = malloc_global_mutex_status; - if (stat > 0) - return; + if (stat > 0) return; /* transition to < 0 while initializing, then to > 0) */ - if (stat == 0 && - interlockedcompareexchange(&malloc_global_mutex_status, (LONG)-1, (LONG)0) == 0) { + if (stat == 0 && interlockedcompareexchange(&malloc_global_mutex_status, + (LONG)-1, (LONG)0) == 0) { + InitializeCriticalSection(&malloc_global_mutex); interlockedexchange(&malloc_global_mutex_status, (LONG)1); return; + } + SleepEx(0, FALSE); + } + } -#else /* pthreads-based locks */ -#define MLOCK_T pthread_mutex_t -#define ACQUIRE_LOCK(lk) pthread_mutex_lock(lk) -#define RELEASE_LOCK(lk) pthread_mutex_unlock(lk) -#define TRY_LOCK(lk) (!pthread_mutex_trylock(lk)) -#define INITIAL_LOCK(lk) pthread_init_lock(lk) -#define DESTROY_LOCK(lk) pthread_mutex_destroy(lk) + #else /* pthreads-based locks */ + #define MLOCK_T pthread_mutex_t + #define ACQUIRE_LOCK(lk) pthread_mutex_lock(lk) + #define RELEASE_LOCK(lk) pthread_mutex_unlock(lk) + #define TRY_LOCK(lk) (!pthread_mutex_trylock(lk)) + #define INITIAL_LOCK(lk) pthread_init_lock(lk) + #define DESTROY_LOCK(lk) pthread_mutex_destroy(lk) -#if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0 && defined(linux) && !defined(PTHREAD_MUTEX_RECURSIVE) + #if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0 && \ + defined(linux) && !defined(PTHREAD_MUTEX_RECURSIVE) /* Cope with old-style linux recursive lock initialization by adding */ /* skipped internal declaration from pthread.h */ -extern int pthread_mutexattr_setkind_np __P ((pthread_mutexattr_t *__attr, - int __kind)); -#define PTHREAD_MUTEX_RECURSIVE PTHREAD_MUTEX_RECURSIVE_NP -#define pthread_mutexattr_settype(x,y) pthread_mutexattr_setkind_np(x,y) -#endif /* USE_RECURSIVE_LOCKS ... */ +extern int pthread_mutexattr_setkind_np __P((pthread_mutexattr_t * __attr, + int __kind)); + #define PTHREAD_MUTEX_RECURSIVE PTHREAD_MUTEX_RECURSIVE_NP + #define pthread_mutexattr_settype(x, y) \ + pthread_mutexattr_setkind_np(x, y) + #endif /* USE_RECURSIVE_LOCKS ... */ static MLOCK_T malloc_global_mutex = PTHREAD_MUTEX_INITIALIZER; -static int pthread_init_lock (MLOCK_T *lk) { +static int pthread_init_lock(MLOCK_T *lk) { + pthread_mutexattr_t attr; if (pthread_mutexattr_init(&attr)) return 1; -#if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0 + #if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0 if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE)) return 1; -#endif + #endif if (pthread_mutex_init(lk, &attr)) return 1; if (pthread_mutexattr_destroy(&attr)) return 1; return 0; + } -#endif /* ... lock types ... */ + #endif /* ... lock types ... */ -/* Common code for all lock types */ -#define USE_LOCK_BIT (2U) + /* Common code for all lock types */ + #define USE_LOCK_BIT (2U) -#ifndef ACQUIRE_MALLOC_GLOBAL_LOCK -#define ACQUIRE_MALLOC_GLOBAL_LOCK() ACQUIRE_LOCK(&malloc_global_mutex); -#endif + #ifndef ACQUIRE_MALLOC_GLOBAL_LOCK + #define ACQUIRE_MALLOC_GLOBAL_LOCK() ACQUIRE_LOCK(&malloc_global_mutex); + #endif -#ifndef RELEASE_MALLOC_GLOBAL_LOCK -#define RELEASE_MALLOC_GLOBAL_LOCK() RELEASE_LOCK(&malloc_global_mutex); -#endif + #ifndef RELEASE_MALLOC_GLOBAL_LOCK + #define RELEASE_MALLOC_GLOBAL_LOCK() RELEASE_LOCK(&malloc_global_mutex); + #endif -#endif /* USE_LOCKS */ + #endif /* USE_LOCKS */ /* ----------------------- Chunk representations ------------------------ */ @@ -2236,56 +2320,56 @@ static int pthread_init_lock (MLOCK_T *lk) { */ struct malloc_chunk { - size_t prev_foot; /* Size of previous chunk (if free). */ - size_t head; /* Size and inuse bits. */ - struct malloc_chunk* fd; /* double links -- used only if free. */ - struct malloc_chunk* bk; + + size_t prev_foot; /* Size of previous chunk (if free). */ + size_t head; /* Size and inuse bits. */ + struct malloc_chunk *fd; /* double links -- used only if free. */ + struct malloc_chunk *bk; + }; typedef struct malloc_chunk mchunk; -typedef struct malloc_chunk* mchunkptr; -typedef struct malloc_chunk* sbinptr; /* The type of bins of chunks */ -typedef unsigned int bindex_t; /* Described below */ -typedef unsigned int binmap_t; /* Described below */ -typedef unsigned int flag_t; /* The type of various bit flag sets */ +typedef struct malloc_chunk *mchunkptr; +typedef struct malloc_chunk *sbinptr; /* The type of bins of chunks */ +typedef unsigned int bindex_t; /* Described below */ +typedef unsigned int binmap_t; /* Described below */ +typedef unsigned int flag_t; /* The type of various bit flag sets */ /* ------------------- Chunks sizes and alignments ----------------------- */ -#define MCHUNK_SIZE (sizeof(mchunk)) + #define MCHUNK_SIZE (sizeof(mchunk)) -#if FOOTERS -#define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) -#else /* FOOTERS */ -#define CHUNK_OVERHEAD (SIZE_T_SIZE) -#endif /* FOOTERS */ + #if FOOTERS + #define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) + #else /* FOOTERS */ + #define CHUNK_OVERHEAD (SIZE_T_SIZE) + #endif /* FOOTERS */ -/* MMapped chunks need a second word of overhead ... */ -#define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) -/* ... and additional padding for fake next-chunk at foot */ -#define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES) + /* MMapped chunks need a second word of overhead ... */ + #define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) + /* ... and additional padding for fake next-chunk at foot */ + #define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES) -/* The smallest size we can malloc is an aligned minimal chunk */ -#define MIN_CHUNK_SIZE\ - ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) + /* The smallest size we can malloc is an aligned minimal chunk */ + #define MIN_CHUNK_SIZE ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) -/* conversion from malloc headers to user pointers, and back */ -#define chunk2mem(p) ((void*)((char*)(p) + TWO_SIZE_T_SIZES)) -#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES)) -/* chunk associated with aligned address A */ -#define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A))) + /* conversion from malloc headers to user pointers, and back */ + #define chunk2mem(p) ((void *)((char *)(p) + TWO_SIZE_T_SIZES)) + #define mem2chunk(mem) ((mchunkptr)((char *)(mem)-TWO_SIZE_T_SIZES)) + /* chunk associated with aligned address A */ + #define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A))) -/* Bounds on request (not chunk) sizes. */ -#define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2) -#define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE) + /* Bounds on request (not chunk) sizes. */ + #define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2) + #define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE) -/* pad request bytes into a usable size */ -#define pad_request(req) \ - (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) - -/* pad request, checking for minimum (but not maximum) */ -#define request2size(req) \ - (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req)) + /* pad request bytes into a usable size */ + #define pad_request(req) \ + (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) + /* pad request, checking for minimum (but not maximum) */ + #define request2size(req) \ + (((req) < MIN_REQUEST) ? MIN_CHUNK_SIZE : pad_request(req)) /* ------------------ Operations on head and foot fields ----------------- */ @@ -2297,61 +2381,60 @@ typedef unsigned int flag_t; /* The type of various bit flag sets */ FLAG4_BIT is not used by this malloc, but might be useful in extensions. */ -#define PINUSE_BIT (SIZE_T_ONE) -#define CINUSE_BIT (SIZE_T_TWO) -#define FLAG4_BIT (SIZE_T_FOUR) -#define INUSE_BITS (PINUSE_BIT|CINUSE_BIT) -#define FLAG_BITS (PINUSE_BIT|CINUSE_BIT|FLAG4_BIT) + #define PINUSE_BIT (SIZE_T_ONE) + #define CINUSE_BIT (SIZE_T_TWO) + #define FLAG4_BIT (SIZE_T_FOUR) + #define INUSE_BITS (PINUSE_BIT | CINUSE_BIT) + #define FLAG_BITS (PINUSE_BIT | CINUSE_BIT | FLAG4_BIT) -/* Head value for fenceposts */ -#define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE) + /* Head value for fenceposts */ + #define FENCEPOST_HEAD (INUSE_BITS | SIZE_T_SIZE) -/* extraction of fields from head words */ -#define cinuse(p) ((p)->head & CINUSE_BIT) -#define pinuse(p) ((p)->head & PINUSE_BIT) -#define flag4inuse(p) ((p)->head & FLAG4_BIT) -#define is_inuse(p) (((p)->head & INUSE_BITS) != PINUSE_BIT) -#define is_mmapped(p) (((p)->head & INUSE_BITS) == 0) + /* extraction of fields from head words */ + #define cinuse(p) ((p)->head & CINUSE_BIT) + #define pinuse(p) ((p)->head & PINUSE_BIT) + #define flag4inuse(p) ((p)->head & FLAG4_BIT) + #define is_inuse(p) (((p)->head & INUSE_BITS) != PINUSE_BIT) + #define is_mmapped(p) (((p)->head & INUSE_BITS) == 0) -#define chunksize(p) ((p)->head & ~(FLAG_BITS)) + #define chunksize(p) ((p)->head & ~(FLAG_BITS)) -#define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT) -#define set_flag4(p) ((p)->head |= FLAG4_BIT) -#define clear_flag4(p) ((p)->head &= ~FLAG4_BIT) + #define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT) + #define set_flag4(p) ((p)->head |= FLAG4_BIT) + #define clear_flag4(p) ((p)->head &= ~FLAG4_BIT) -/* Treat space at ptr +/- offset as a chunk */ -#define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s))) -#define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s))) + /* Treat space at ptr +/- offset as a chunk */ + #define chunk_plus_offset(p, s) ((mchunkptr)(((char *)(p)) + (s))) + #define chunk_minus_offset(p, s) ((mchunkptr)(((char *)(p)) - (s))) -/* Ptr to next or previous physical malloc_chunk. */ -#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~FLAG_BITS))) -#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) )) + /* Ptr to next or previous physical malloc_chunk. */ + #define next_chunk(p) ((mchunkptr)(((char *)(p)) + ((p)->head & ~FLAG_BITS))) + #define prev_chunk(p) ((mchunkptr)(((char *)(p)) - ((p)->prev_foot))) -/* extract next chunk's pinuse bit */ -#define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT) + /* extract next chunk's pinuse bit */ + #define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT) -/* Get/set size at footer */ -#define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot) -#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s)) + /* Get/set size at footer */ + #define get_foot(p, s) (((mchunkptr)((char *)(p) + (s)))->prev_foot) + #define set_foot(p, s) (((mchunkptr)((char *)(p) + (s)))->prev_foot = (s)) -/* Set size, pinuse bit, and foot */ -#define set_size_and_pinuse_of_free_chunk(p, s)\ - ((p)->head = (s|PINUSE_BIT), set_foot(p, s)) + /* Set size, pinuse bit, and foot */ + #define set_size_and_pinuse_of_free_chunk(p, s) \ + ((p)->head = (s | PINUSE_BIT), set_foot(p, s)) -/* Set size, pinuse bit, foot, and clear next pinuse */ -#define set_free_with_pinuse(p, s, n)\ - (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s)) + /* Set size, pinuse bit, foot, and clear next pinuse */ + #define set_free_with_pinuse(p, s, n) \ + (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s)) -/* Get the internal overhead associated with chunk p */ -#define overhead_for(p)\ - (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD) + /* Get the internal overhead associated with chunk p */ + #define overhead_for(p) (is_mmapped(p) ? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD) -/* Return true if malloced space is not necessarily cleared */ -#if MMAP_CLEARS -#define calloc_must_clear(p) (!is_mmapped(p)) -#else /* MMAP_CLEARS */ -#define calloc_must_clear(p) (1) -#endif /* MMAP_CLEARS */ + /* Return true if malloced space is not necessarily cleared */ + #if MMAP_CLEARS + #define calloc_must_clear(p) (!is_mmapped(p)) + #else /* MMAP_CLEARS */ + #define calloc_must_clear(p) (1) + #endif /* MMAP_CLEARS */ /* ---------------------- Overlaid data structures ----------------------- */ @@ -2445,23 +2528,25 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ struct malloc_tree_chunk { + /* The first four fields must be compatible with malloc_chunk */ size_t prev_foot; size_t head; - struct malloc_tree_chunk* fd; - struct malloc_tree_chunk* bk; + struct malloc_tree_chunk *fd; + struct malloc_tree_chunk *bk; - struct malloc_tree_chunk* child[2]; - struct malloc_tree_chunk* parent; + struct malloc_tree_chunk *child[2]; + struct malloc_tree_chunk *parent; bindex_t index; + }; typedef struct malloc_tree_chunk tchunk; -typedef struct malloc_tree_chunk* tchunkptr; -typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */ +typedef struct malloc_tree_chunk *tchunkptr; +typedef struct malloc_tree_chunk *tbinptr; /* The type of bins of trees */ -/* A little helper macro for trees */ -#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1]) + /* A little helper macro for trees */ + #define leftmost_child(t) ((t)->child[0] != 0 ? (t)->child[0] : (t)->child[1]) /* ----------------------------- Segments -------------------------------- */ @@ -2521,141 +2606,145 @@ typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */ */ struct malloc_segment { - char* base; /* base address */ - size_t size; /* allocated size */ - struct malloc_segment* next; /* ptr to next segment */ - flag_t sflags; /* mmap and extern flag */ + + char * base; /* base address */ + size_t size; /* allocated size */ + struct malloc_segment *next; /* ptr to next segment */ + flag_t sflags; /* mmap and extern flag */ + }; -#define is_mmapped_segment(S) ((S)->sflags & USE_MMAP_BIT) -#define is_extern_segment(S) ((S)->sflags & EXTERN_BIT) + #define is_mmapped_segment(S) ((S)->sflags & USE_MMAP_BIT) + #define is_extern_segment(S) ((S)->sflags & EXTERN_BIT) typedef struct malloc_segment msegment; -typedef struct malloc_segment* msegmentptr; +typedef struct malloc_segment *msegmentptr; -/* ---------------------------- malloc_state ----------------------------- */ + /* ---------------------------- malloc_state ----------------------------- */ -/* - A malloc_state holds all of the bookkeeping for a space. - The main fields are: - - Top - The topmost chunk of the currently active segment. Its size is - cached in topsize. The actual size of topmost space is - topsize+TOP_FOOT_SIZE, which includes space reserved for adding - fenceposts and segment records if necessary when getting more - space from the system. The size at which to autotrim top is - cached from mparams in trim_check, except that it is disabled if - an autotrim fails. - - Designated victim (dv) - This is the preferred chunk for servicing small requests that - don't have exact fits. It is normally the chunk split off most - recently to service another small request. Its size is cached in - dvsize. The link fields of this chunk are not maintained since it - is not kept in a bin. - - SmallBins - An array of bin headers for free chunks. These bins hold chunks - with sizes less than MIN_LARGE_SIZE bytes. Each bin contains - chunks of all the same size, spaced 8 bytes apart. To simplify - use in double-linked lists, each bin header acts as a malloc_chunk - pointing to the real first node, if it exists (else pointing to - itself). This avoids special-casing for headers. But to avoid - waste, we allocate only the fd/bk pointers of bins, and then use - repositioning tricks to treat these as the fields of a chunk. - - TreeBins - Treebins are pointers to the roots of trees holding a range of - sizes. There are 2 equally spaced treebins for each power of two - from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything - larger. - - Bin maps - There is one bit map for small bins ("smallmap") and one for - treebins ("treemap). Each bin sets its bit when non-empty, and - clears the bit when empty. Bit operations are then used to avoid - bin-by-bin searching -- nearly all "search" is done without ever - looking at bins that won't be selected. The bit maps - conservatively use 32 bits per map word, even if on 64bit system. - For a good description of some of the bit-based techniques used - here, see Henry S. Warren Jr's book "Hacker's Delight" (and - supplement at http://hackersdelight.org/). Many of these are - intended to reduce the branchiness of paths through malloc etc, as - well as to reduce the number of memory locations read or written. - - Segments - A list of segments headed by an embedded malloc_segment record - representing the initial space. - - Address check support - The least_addr field is the least address ever obtained from - MORECORE or MMAP. Attempted frees and reallocs of any address less - than this are trapped (unless INSECURE is defined). - - Magic tag - A cross-check field that should always hold same value as mparams.magic. - - Max allowed footprint - The maximum allowed bytes to allocate from system (zero means no limit) - - Flags - Bits recording whether to use MMAP, locks, or contiguous MORECORE - - Statistics - Each space keeps track of current and maximum system memory - obtained via MORECORE or MMAP. - - Trim support - Fields holding the amount of unused topmost memory that should trigger - trimming, and a counter to force periodic scanning to release unused - non-topmost segments. - - Locking - If USE_LOCKS is defined, the "mutex" lock is acquired and released - around every public call using this mspace. - - Extension support - A void* pointer and a size_t field that can be used to help implement - extensions to this malloc. -*/ + /* + A malloc_state holds all of the bookkeeping for a space. + The main fields are: + + Top + The topmost chunk of the currently active segment. Its size is + cached in topsize. The actual size of topmost space is + topsize+TOP_FOOT_SIZE, which includes space reserved for adding + fenceposts and segment records if necessary when getting more + space from the system. The size at which to autotrim top is + cached from mparams in trim_check, except that it is disabled if + an autotrim fails. + + Designated victim (dv) + This is the preferred chunk for servicing small requests that + don't have exact fits. It is normally the chunk split off most + recently to service another small request. Its size is cached in + dvsize. The link fields of this chunk are not maintained since it + is not kept in a bin. + + SmallBins + An array of bin headers for free chunks. These bins hold chunks + with sizes less than MIN_LARGE_SIZE bytes. Each bin contains + chunks of all the same size, spaced 8 bytes apart. To simplify + use in double-linked lists, each bin header acts as a malloc_chunk + pointing to the real first node, if it exists (else pointing to + itself). This avoids special-casing for headers. But to avoid + waste, we allocate only the fd/bk pointers of bins, and then use + repositioning tricks to treat these as the fields of a chunk. + + TreeBins + Treebins are pointers to the roots of trees holding a range of + sizes. There are 2 equally spaced treebins for each power of two + from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything + larger. + + Bin maps + There is one bit map for small bins ("smallmap") and one for + treebins ("treemap). Each bin sets its bit when non-empty, and + clears the bit when empty. Bit operations are then used to avoid + bin-by-bin searching -- nearly all "search" is done without ever + looking at bins that won't be selected. The bit maps + conservatively use 32 bits per map word, even if on 64bit system. + For a good description of some of the bit-based techniques used + here, see Henry S. Warren Jr's book "Hacker's Delight" (and + supplement at http://hackersdelight.org/). Many of these are + intended to reduce the branchiness of paths through malloc etc, as + well as to reduce the number of memory locations read or written. + + Segments + A list of segments headed by an embedded malloc_segment record + representing the initial space. + + Address check support + The least_addr field is the least address ever obtained from + MORECORE or MMAP. Attempted frees and reallocs of any address less + than this are trapped (unless INSECURE is defined). + + Magic tag + A cross-check field that should always hold same value as mparams.magic. + + Max allowed footprint + The maximum allowed bytes to allocate from system (zero means no limit) + + Flags + Bits recording whether to use MMAP, locks, or contiguous MORECORE + + Statistics + Each space keeps track of current and maximum system memory + obtained via MORECORE or MMAP. + + Trim support + Fields holding the amount of unused topmost memory that should trigger + trimming, and a counter to force periodic scanning to release unused + non-topmost segments. + + Locking + If USE_LOCKS is defined, the "mutex" lock is acquired and released + around every public call using this mspace. + + Extension support + A void* pointer and a size_t field that can be used to help implement + extensions to this malloc. + */ -/* Bin types, widths and sizes */ -#define NSMALLBINS (32U) -#define NTREEBINS (32U) -#define SMALLBIN_SHIFT (3U) -#define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT) -#define TREEBIN_SHIFT (8U) -#define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT) -#define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE) -#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD) + /* Bin types, widths and sizes */ + #define NSMALLBINS (32U) + #define NTREEBINS (32U) + #define SMALLBIN_SHIFT (3U) + #define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT) + #define TREEBIN_SHIFT (8U) + #define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT) + #define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE) + #define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD) struct malloc_state { - binmap_t smallmap; - binmap_t treemap; - size_t dvsize; - size_t topsize; - char* least_addr; - mchunkptr dv; - mchunkptr top; - size_t trim_check; - size_t release_checks; - size_t magic; - mchunkptr smallbins[(NSMALLBINS+1)*2]; - tbinptr treebins[NTREEBINS]; - size_t footprint; - size_t max_footprint; - size_t footprint_limit; /* zero means no limit */ - flag_t mflags; -#if USE_LOCKS - MLOCK_T mutex; /* locate lock among fields that rarely change */ -#endif /* USE_LOCKS */ - msegment seg; - void* extp; /* Unused but available for extensions */ - size_t exts; + + binmap_t smallmap; + binmap_t treemap; + size_t dvsize; + size_t topsize; + char * least_addr; + mchunkptr dv; + mchunkptr top; + size_t trim_check; + size_t release_checks; + size_t magic; + mchunkptr smallbins[(NSMALLBINS + 1) * 2]; + tbinptr treebins[NTREEBINS]; + size_t footprint; + size_t max_footprint; + size_t footprint_limit; /* zero means no limit */ + flag_t mflags; + #if USE_LOCKS + MLOCK_T mutex; /* locate lock among fields that rarely change */ + #endif /* USE_LOCKS */ + msegment seg; + void * extp; /* Unused but available for extensions */ + size_t exts; + }; -typedef struct malloc_state* mstate; +typedef struct malloc_state *mstate; /* ------------- Global malloc_state and malloc_params ------------------- */ @@ -2667,123 +2756,128 @@ typedef struct malloc_state* mstate; */ struct malloc_params { + size_t magic; size_t page_size; size_t granularity; size_t mmap_threshold; size_t trim_threshold; flag_t default_mflags; + }; static struct malloc_params mparams; -/* Ensure mparams initialized */ -#define ensure_initialization() (void)(mparams.magic != 0 || init_mparams()) + /* Ensure mparams initialized */ + #define ensure_initialization() (void)(mparams.magic != 0 || init_mparams()) -#if !ONLY_MSPACES + #if !ONLY_MSPACES /* The global malloc_state used for all non-"mspace" calls */ static struct malloc_state _gm_; -#define gm (&_gm_) -#define is_global(M) ((M) == &_gm_) + #define gm (&_gm_) + #define is_global(M) ((M) == &_gm_) -#endif /* !ONLY_MSPACES */ + #endif /* !ONLY_MSPACES */ -#define is_initialized(M) ((M)->top != 0) + #define is_initialized(M) ((M)->top != 0) /* -------------------------- system alloc setup ------------------------- */ /* Operations on mflags */ -#define use_lock(M) ((M)->mflags & USE_LOCK_BIT) -#define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT) -#if USE_LOCKS -#define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT) -#else -#define disable_lock(M) -#endif - -#define use_mmap(M) ((M)->mflags & USE_MMAP_BIT) -#define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT) -#if HAVE_MMAP -#define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT) -#else -#define disable_mmap(M) -#endif - -#define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT) -#define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT) - -#define set_lock(M,L)\ - ((M)->mflags = (L)?\ - ((M)->mflags | USE_LOCK_BIT) :\ - ((M)->mflags & ~USE_LOCK_BIT)) - -/* page-align a size */ -#define page_align(S)\ - (((S) + (mparams.page_size - SIZE_T_ONE)) & ~(mparams.page_size - SIZE_T_ONE)) - -/* granularity-align a size */ -#define granularity_align(S)\ - (((S) + (mparams.granularity - SIZE_T_ONE))\ - & ~(mparams.granularity - SIZE_T_ONE)) - - -/* For mmap, use granularity alignment on windows, else page-align */ -#ifdef WIN32 -#define mmap_align(S) granularity_align(S) -#else -#define mmap_align(S) page_align(S) -#endif - -/* For sys_alloc, enough padding to ensure can malloc request on success */ -#define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT) - -#define is_page_aligned(S)\ - (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0) -#define is_granularity_aligned(S)\ - (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0) - -/* True if segment S holds address A */ -#define segment_holds(S, A)\ - ((char*)(A) >= S->base && (char*)(A) < S->base + S->size) + #define use_lock(M) ((M)->mflags & USE_LOCK_BIT) + #define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT) + #if USE_LOCKS + #define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT) + #else + #define disable_lock(M) + #endif + + #define use_mmap(M) ((M)->mflags & USE_MMAP_BIT) + #define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT) + #if HAVE_MMAP + #define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT) + #else + #define disable_mmap(M) + #endif + + #define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT) + #define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT) + + #define set_lock(M, L) \ + ((M)->mflags = \ + (L) ? ((M)->mflags | USE_LOCK_BIT) : ((M)->mflags & ~USE_LOCK_BIT)) + + /* page-align a size */ + #define page_align(S) \ + (((S) + (mparams.page_size - SIZE_T_ONE)) & \ + ~(mparams.page_size - SIZE_T_ONE)) + + /* granularity-align a size */ + #define granularity_align(S) \ + (((S) + (mparams.granularity - SIZE_T_ONE)) & \ + ~(mparams.granularity - SIZE_T_ONE)) + + /* For mmap, use granularity alignment on windows, else page-align */ + #ifdef WIN32 + #define mmap_align(S) granularity_align(S) + #else + #define mmap_align(S) page_align(S) + #endif + + /* For sys_alloc, enough padding to ensure can malloc request on success */ + #define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT) + + #define is_page_aligned(S) \ + (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0) + #define is_granularity_aligned(S) \ + (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0) + + /* True if segment S holds address A */ + #define segment_holds(S, A) \ + ((char *)(A) >= S->base && (char *)(A) < S->base + S->size) /* Return segment holding given address */ -static msegmentptr segment_holding(mstate m, char* addr) { +static msegmentptr segment_holding(mstate m, char *addr) { + msegmentptr sp = &m->seg; for (;;) { - if (addr >= sp->base && addr < sp->base + sp->size) - return sp; - if ((sp = sp->next) == 0) - return 0; + + if (addr >= sp->base && addr < sp->base + sp->size) return sp; + if ((sp = sp->next) == 0) return 0; + } + } /* Return true if segment contains a segment link */ static int has_segment_link(mstate m, msegmentptr ss) { + msegmentptr sp = &m->seg; for (;;) { - if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size) - return 1; - if ((sp = sp->next) == 0) - return 0; + + if ((char *)sp >= ss->base && (char *)sp < ss->base + ss->size) return 1; + if ((sp = sp->next) == 0) return 0; + } -} -#ifndef MORECORE_CANNOT_TRIM -#define should_trim(M,s) ((s) > (M)->trim_check) -#else /* MORECORE_CANNOT_TRIM */ -#define should_trim(M,s) (0) -#endif /* MORECORE_CANNOT_TRIM */ +} -/* - TOP_FOOT_SIZE is padding at the end of a segment, including space - that may be needed to place segment records and fenceposts when new - noncontiguous segments are added. -*/ -#define TOP_FOOT_SIZE\ - (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE) + #ifndef MORECORE_CANNOT_TRIM + #define should_trim(M, s) ((s) > (M)->trim_check) + #else /* MORECORE_CANNOT_TRIM */ + #define should_trim(M, s) (0) + #endif /* MORECORE_CANNOT_TRIM */ + /* + TOP_FOOT_SIZE is padding at the end of a segment, including space + that may be needed to place segment records and fenceposts when new + noncontiguous segments are added. + */ + #define TOP_FOOT_SIZE \ + (align_offset(chunk2mem(0)) + pad_request(sizeof(struct malloc_segment)) + \ + MIN_CHUNK_SIZE) /* ------------------------------- Hooks -------------------------------- */ @@ -2793,20 +2887,25 @@ static int has_segment_link(mstate m, msegmentptr ss) { anything you like. */ -#if USE_LOCKS -#define PREACTION(M) ((use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0) -#define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); } -#else /* USE_LOCKS */ + #if USE_LOCKS + #define PREACTION(M) ((use_lock(M)) ? ACQUIRE_LOCK(&(M)->mutex) : 0) + #define POSTACTION(M) \ + { \ + \ + if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); \ + \ + } + #else /* USE_LOCKS */ -#ifndef PREACTION -#define PREACTION(M) (0) -#endif /* PREACTION */ + #ifndef PREACTION + #define PREACTION(M) (0) + #endif /* PREACTION */ -#ifndef POSTACTION -#define POSTACTION(M) -#endif /* POSTACTION */ + #ifndef POSTACTION + #define POSTACTION(M) + #endif /* POSTACTION */ -#endif /* USE_LOCKS */ + #endif /* USE_LOCKS */ /* CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses. @@ -2816,7 +2915,7 @@ static int has_segment_link(mstate m, msegmentptr ss) { useful in custom actions that try to help diagnose errors. */ -#if PROCEED_ON_ERROR + #if PROCEED_ON_ERROR /* A count of the number of corruption errors causing resets */ int malloc_corruption_error_count; @@ -2824,211 +2923,240 @@ int malloc_corruption_error_count; /* default corruption action */ static void reset_on_error(mstate m); -#define CORRUPTION_ERROR_ACTION(m) reset_on_error(m) -#define USAGE_ERROR_ACTION(m, p) - -#else /* PROCEED_ON_ERROR */ + #define CORRUPTION_ERROR_ACTION(m) reset_on_error(m) + #define USAGE_ERROR_ACTION(m, p) -#ifndef CORRUPTION_ERROR_ACTION -#define CORRUPTION_ERROR_ACTION(m) ABORT -#endif /* CORRUPTION_ERROR_ACTION */ + #else /* PROCEED_ON_ERROR */ -#ifndef USAGE_ERROR_ACTION -#define USAGE_ERROR_ACTION(m,p) ABORT -#endif /* USAGE_ERROR_ACTION */ + #ifndef CORRUPTION_ERROR_ACTION + #define CORRUPTION_ERROR_ACTION(m) ABORT + #endif /* CORRUPTION_ERROR_ACTION */ -#endif /* PROCEED_ON_ERROR */ + #ifndef USAGE_ERROR_ACTION + #define USAGE_ERROR_ACTION(m, p) ABORT + #endif /* USAGE_ERROR_ACTION */ + #endif /* PROCEED_ON_ERROR */ /* -------------------------- Debugging setup ---------------------------- */ -#if ! DEBUG + #if !DEBUG -#define check_free_chunk(M,P) -#define check_inuse_chunk(M,P) -#define check_malloced_chunk(M,P,N) -#define check_mmapped_chunk(M,P) -#define check_malloc_state(M) -#define check_top_chunk(M,P) + #define check_free_chunk(M, P) + #define check_inuse_chunk(M, P) + #define check_malloced_chunk(M, P, N) + #define check_mmapped_chunk(M, P) + #define check_malloc_state(M) + #define check_top_chunk(M, P) -#else /* DEBUG */ -#define check_free_chunk(M,P) do_check_free_chunk(M,P) -#define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P) -#define check_top_chunk(M,P) do_check_top_chunk(M,P) -#define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N) -#define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P) -#define check_malloc_state(M) do_check_malloc_state(M) + #else /* DEBUG */ + #define check_free_chunk(M, P) do_check_free_chunk(M, P) + #define check_inuse_chunk(M, P) do_check_inuse_chunk(M, P) + #define check_top_chunk(M, P) do_check_top_chunk(M, P) + #define check_malloced_chunk(M, P, N) do_check_malloced_chunk(M, P, N) + #define check_mmapped_chunk(M, P) do_check_mmapped_chunk(M, P) + #define check_malloc_state(M) do_check_malloc_state(M) static void do_check_any_chunk(mstate m, mchunkptr p); static void do_check_top_chunk(mstate m, mchunkptr p); static void do_check_mmapped_chunk(mstate m, mchunkptr p); static void do_check_inuse_chunk(mstate m, mchunkptr p); static void do_check_free_chunk(mstate m, mchunkptr p); -static void do_check_malloced_chunk(mstate m, void* mem, size_t s); +static void do_check_malloced_chunk(mstate m, void *mem, size_t s); static void do_check_tree(mstate m, tchunkptr t); static void do_check_treebin(mstate m, bindex_t i); static void do_check_smallbin(mstate m, bindex_t i); static void do_check_malloc_state(mstate m); static int bin_find(mstate m, mchunkptr x); static size_t traverse_and_check(mstate m); -#endif /* DEBUG */ + #endif /* DEBUG */ /* ---------------------------- Indexing Bins ---------------------------- */ -#define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS) -#define small_index(s) (bindex_t)((s) >> SMALLBIN_SHIFT) -#define small_index2size(i) ((i) << SMALLBIN_SHIFT) -#define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE)) - -/* addressing by index. See above about smallbin repositioning */ -#define smallbin_at(M, i) ((sbinptr)((char*)&((M)->smallbins[(i)<<1]))) -#define treebin_at(M,i) (&((M)->treebins[i])) - -/* assign tree index for size S to variable I. Use x86 asm if possible */ -#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) -#define compute_tree_index(S, I)\ -{\ - unsigned int X = S >> TREEBIN_SHIFT;\ - if (X == 0)\ - I = 0;\ - else if (X > 0xFFFF)\ - I = NTREEBINS-1;\ - else {\ - unsigned int K = (unsigned) sizeof(X)*__CHAR_BIT__ - 1 - (unsigned) __builtin_clz(X); \ - I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ - }\ -} - -#elif defined (__INTEL_COMPILER) -#define compute_tree_index(S, I)\ -{\ - size_t X = S >> TREEBIN_SHIFT;\ - if (X == 0)\ - I = 0;\ - else if (X > 0xFFFF)\ - I = NTREEBINS-1;\ - else {\ - unsigned int K = _bit_scan_reverse (X); \ - I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ - }\ -} + #define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS) + #define small_index(s) (bindex_t)((s) >> SMALLBIN_SHIFT) + #define small_index2size(i) ((i) << SMALLBIN_SHIFT) + #define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE)) + + /* addressing by index. See above about smallbin repositioning */ + #define smallbin_at(M, i) ((sbinptr)((char *)&((M)->smallbins[(i) << 1]))) + #define treebin_at(M, i) (&((M)->treebins[i])) + + /* assign tree index for size S to variable I. Use x86 asm if possible */ + #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) + #define compute_tree_index(S, I) \ + { \ + \ + unsigned int X = S >> TREEBIN_SHIFT; \ + if (X == 0) \ + I = 0; \ + else if (X > 0xFFFF) \ + I = NTREEBINS - 1; \ + else { \ + \ + unsigned int K = (unsigned)sizeof(X) * __CHAR_BIT__ - 1 - \ + (unsigned)__builtin_clz(X); \ + I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT - 1)) & 1))); \ + \ + } \ + \ + } -#elif defined(_MSC_VER) && _MSC_VER>=1300 -#define compute_tree_index(S, I)\ -{\ - size_t X = S >> TREEBIN_SHIFT;\ - if (X == 0)\ - I = 0;\ - else if (X > 0xFFFF)\ - I = NTREEBINS-1;\ - else {\ - unsigned int K;\ - _BitScanReverse((DWORD *) &K, (DWORD) X);\ - I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ - }\ -} + #elif defined(__INTEL_COMPILER) + #define compute_tree_index(S, I) \ + { \ + \ + size_t X = S >> TREEBIN_SHIFT; \ + if (X == 0) \ + I = 0; \ + else if (X > 0xFFFF) \ + I = NTREEBINS - 1; \ + else { \ + \ + unsigned int K = _bit_scan_reverse(X); \ + I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT - 1)) & 1))); \ + \ + } \ + \ + } -#else /* GNUC */ -#define compute_tree_index(S, I)\ -{\ - size_t X = S >> TREEBIN_SHIFT;\ - if (X == 0)\ - I = 0;\ - else if (X > 0xFFFF)\ - I = NTREEBINS-1;\ - else {\ - unsigned int Y = (unsigned int)X;\ - unsigned int N = ((Y - 0x100) >> 16) & 8;\ - unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\ - N += K;\ - N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\ - K = 14 - N + ((Y <<= K) >> 15);\ - I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\ - }\ -} -#endif /* GNUC */ + #elif defined(_MSC_VER) && _MSC_VER >= 1300 + #define compute_tree_index(S, I) \ + { \ + \ + size_t X = S >> TREEBIN_SHIFT; \ + if (X == 0) \ + I = 0; \ + else if (X > 0xFFFF) \ + I = NTREEBINS - 1; \ + else { \ + \ + unsigned int K; \ + _BitScanReverse((DWORD *)&K, (DWORD)X); \ + I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT - 1)) & 1))); \ + \ + } \ + \ + } -/* Bit representing maximum resolved size in a treebin at i */ -#define bit_for_tree_index(i) \ - (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2) + #else /* GNUC */ + #define compute_tree_index(S, I) \ + { \ + \ + size_t X = S >> TREEBIN_SHIFT; \ + if (X == 0) \ + I = 0; \ + else if (X > 0xFFFF) \ + I = NTREEBINS - 1; \ + else { \ + \ + unsigned int Y = (unsigned int)X; \ + unsigned int N = ((Y - 0x100) >> 16) & 8; \ + unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4; \ + N += K; \ + N += K = (((Y <<= K) - 0x4000) >> 16) & 2; \ + K = 14 - N + ((Y <<= K) >> 15); \ + I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT - 1)) & 1)); \ + \ + } \ + \ + } + #endif /* GNUC */ -/* Shift placing maximum resolved bit in a treebin at i as sign bit */ -#define leftshift_for_tree_index(i) \ - ((i == NTREEBINS-1)? 0 : \ - ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2))) + /* Bit representing maximum resolved size in a treebin at i */ + #define bit_for_tree_index(i) \ + (i == NTREEBINS - 1) ? (SIZE_T_BITSIZE - 1) \ + : (((i) >> 1) + TREEBIN_SHIFT - 2) -/* The size of the smallest chunk held in bin with index i */ -#define minsize_for_tree_index(i) \ - ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \ - (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1))) + /* Shift placing maximum resolved bit in a treebin at i as sign bit */ + #define leftshift_for_tree_index(i) \ + ((i == NTREEBINS - 1) \ + ? 0 \ + : ((SIZE_T_BITSIZE - SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2))) + /* The size of the smallest chunk held in bin with index i */ + #define minsize_for_tree_index(i) \ + ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \ + (((size_t)((i)&SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1))) -/* ------------------------ Operations on bin maps ----------------------- */ + /* ------------------------ Operations on bin maps ----------------------- */ -/* bit corresponding to given index */ -#define idx2bit(i) ((binmap_t)(1) << (i)) + /* bit corresponding to given index */ + #define idx2bit(i) ((binmap_t)(1) << (i)) -/* Mark/Clear bits with given index */ -#define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i)) -#define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i)) -#define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i)) + /* Mark/Clear bits with given index */ + #define mark_smallmap(M, i) ((M)->smallmap |= idx2bit(i)) + #define clear_smallmap(M, i) ((M)->smallmap &= ~idx2bit(i)) + #define smallmap_is_marked(M, i) ((M)->smallmap & idx2bit(i)) -#define mark_treemap(M,i) ((M)->treemap |= idx2bit(i)) -#define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i)) -#define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i)) + #define mark_treemap(M, i) ((M)->treemap |= idx2bit(i)) + #define clear_treemap(M, i) ((M)->treemap &= ~idx2bit(i)) + #define treemap_is_marked(M, i) ((M)->treemap & idx2bit(i)) -/* isolate the least set bit of a bitmap */ -#define least_bit(x) ((x) & -(x)) + /* isolate the least set bit of a bitmap */ + #define least_bit(x) ((x) & -(x)) -/* mask with all bits to left of least bit of x on */ -#define left_bits(x) ((x<<1) | -(x<<1)) + /* mask with all bits to left of least bit of x on */ + #define left_bits(x) ((x << 1) | -(x << 1)) -/* mask with all bits to left of or equal to least bit of x on */ -#define same_or_left_bits(x) ((x) | -(x)) + /* mask with all bits to left of or equal to least bit of x on */ + #define same_or_left_bits(x) ((x) | -(x)) /* index corresponding to given bit. Use x86 asm if possible */ -#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) -#define compute_bit2idx(X, I)\ -{\ - unsigned int J;\ - J = __builtin_ctz(X); \ - I = (bindex_t)J;\ -} + #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) + #define compute_bit2idx(X, I) \ + { \ + \ + unsigned int J; \ + J = __builtin_ctz(X); \ + I = (bindex_t)J; \ + \ + } -#elif defined (__INTEL_COMPILER) -#define compute_bit2idx(X, I)\ -{\ - unsigned int J;\ - J = _bit_scan_forward (X); \ - I = (bindex_t)J;\ -} + #elif defined(__INTEL_COMPILER) + #define compute_bit2idx(X, I) \ + { \ + \ + unsigned int J; \ + J = _bit_scan_forward(X); \ + I = (bindex_t)J; \ + \ + } -#elif defined(_MSC_VER) && _MSC_VER>=1300 -#define compute_bit2idx(X, I)\ -{\ - unsigned int J;\ - _BitScanForward((DWORD *) &J, X);\ - I = (bindex_t)J;\ -} + #elif defined(_MSC_VER) && _MSC_VER >= 1300 + #define compute_bit2idx(X, I) \ + { \ + \ + unsigned int J; \ + _BitScanForward((DWORD *)&J, X); \ + I = (bindex_t)J; \ + \ + } -#elif USE_BUILTIN_FFS -#define compute_bit2idx(X, I) I = ffs(X)-1 - -#else -#define compute_bit2idx(X, I)\ -{\ - unsigned int Y = X - 1;\ - unsigned int K = Y >> (16-4) & 16;\ - unsigned int N = K; Y >>= K;\ - N += K = Y >> (8-3) & 8; Y >>= K;\ - N += K = Y >> (4-2) & 4; Y >>= K;\ - N += K = Y >> (2-1) & 2; Y >>= K;\ - N += K = Y >> (1-0) & 1; Y >>= K;\ - I = (bindex_t)(N + Y);\ -} -#endif /* GNUC */ + #elif USE_BUILTIN_FFS + #define compute_bit2idx(X, I) I = ffs(X) - 1 + #else + #define compute_bit2idx(X, I) \ + { \ + \ + unsigned int Y = X - 1; \ + unsigned int K = Y >> (16 - 4) & 16; \ + unsigned int N = K; \ + Y >>= K; \ + N += K = Y >> (8 - 3) & 8; \ + Y >>= K; \ + N += K = Y >> (4 - 2) & 4; \ + Y >>= K; \ + N += K = Y >> (2 - 1) & 2; \ + Y >>= K; \ + N += K = Y >> (1 - 0) & 1; \ + Y >>= K; \ + I = (bindex_t)(N + Y); \ + \ + } + #endif /* GNUC */ /* ----------------------- Runtime Check Support ------------------------- */ @@ -3058,122 +3186,142 @@ static size_t traverse_and_check(mstate m); next, etc). This turns out to be cheaper than relying on hashes. */ -#if !INSECURE -/* Check if address a is at least as high as any from MORECORE or MMAP */ -#define ok_address(M, a) ((char*)(a) >= (M)->least_addr) -/* Check if address of next chunk n is higher than base chunk p */ -#define ok_next(p, n) ((char*)(p) < (char*)(n)) -/* Check if p has inuse status */ -#define ok_inuse(p) is_inuse(p) -/* Check if p has its pinuse bit on */ -#define ok_pinuse(p) pinuse(p) - -#else /* !INSECURE */ -#define ok_address(M, a) (1) -#define ok_next(b, n) (1) -#define ok_inuse(p) (1) -#define ok_pinuse(p) (1) -#endif /* !INSECURE */ - -#if (FOOTERS && !INSECURE) -/* Check if (alleged) mstate m has expected magic field */ -#define ok_magic(M) ((M)->magic == mparams.magic) -#else /* (FOOTERS && !INSECURE) */ -#define ok_magic(M) (1) -#endif /* (FOOTERS && !INSECURE) */ - -/* In gcc, use __builtin_expect to minimize impact of checks */ -#if !INSECURE -#if defined(__GNUC__) && __GNUC__ >= 3 -#define RTCHECK(e) __builtin_expect(e, 1) -#else /* GNUC */ -#define RTCHECK(e) (e) -#endif /* GNUC */ -#else /* !INSECURE */ -#define RTCHECK(e) (1) -#endif /* !INSECURE */ + #if !INSECURE + /* Check if address a is at least as high as any from MORECORE or MMAP */ + #define ok_address(M, a) ((char *)(a) >= (M)->least_addr) + /* Check if address of next chunk n is higher than base chunk p */ + #define ok_next(p, n) ((char *)(p) < (char *)(n)) + /* Check if p has inuse status */ + #define ok_inuse(p) is_inuse(p) + /* Check if p has its pinuse bit on */ + #define ok_pinuse(p) pinuse(p) + + #else /* !INSECURE */ + #define ok_address(M, a) (1) + #define ok_next(b, n) (1) + #define ok_inuse(p) (1) + #define ok_pinuse(p) (1) + #endif /* !INSECURE */ + + #if (FOOTERS && !INSECURE) + /* Check if (alleged) mstate m has expected magic field */ + #define ok_magic(M) ((M)->magic == mparams.magic) + #else /* (FOOTERS && !INSECURE) */ + #define ok_magic(M) (1) + #endif /* (FOOTERS && !INSECURE) */ + + /* In gcc, use __builtin_expect to minimize impact of checks */ + #if !INSECURE + #if defined(__GNUC__) && __GNUC__ >= 3 + #define RTCHECK(e) __builtin_expect(e, 1) + #else /* GNUC */ + #define RTCHECK(e) (e) + #endif /* GNUC */ + #else /* !INSECURE */ + #define RTCHECK(e) (1) + #endif /* !INSECURE */ /* macros to set up inuse chunks with or without footers */ -#if !FOOTERS + #if !FOOTERS -#define mark_inuse_foot(M,p,s) + #define mark_inuse_foot(M, p, s) -/* Macros for setting head/foot of non-mmapped chunks */ + /* Macros for setting head/foot of non-mmapped chunks */ -/* Set cinuse bit and pinuse bit of next chunk */ -#define set_inuse(M,p,s)\ - ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\ - ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT) + /* Set cinuse bit and pinuse bit of next chunk */ + #define set_inuse(M, p, s) \ + ((p)->head = (((p)->head & PINUSE_BIT) | s | CINUSE_BIT), \ + ((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT) -/* Set cinuse and pinuse of this chunk and pinuse of next chunk */ -#define set_inuse_and_pinuse(M,p,s)\ - ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ - ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT) + /* Set cinuse and pinuse of this chunk and pinuse of next chunk */ + #define set_inuse_and_pinuse(M, p, s) \ + ((p)->head = (s | PINUSE_BIT | CINUSE_BIT), \ + ((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT) -/* Set size, cinuse and pinuse bit of this chunk */ -#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\ - ((p)->head = (s|PINUSE_BIT|CINUSE_BIT)) + /* Set size, cinuse and pinuse bit of this chunk */ + #define set_size_and_pinuse_of_inuse_chunk(M, p, s) \ + ((p)->head = (s | PINUSE_BIT | CINUSE_BIT)) -#else /* FOOTERS */ + #else /* FOOTERS */ -/* Set foot of inuse chunk to be xor of mstate and seed */ -#define mark_inuse_foot(M,p,s)\ - (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic)) + /* Set foot of inuse chunk to be xor of mstate and seed */ + #define mark_inuse_foot(M, p, s) \ + (((mchunkptr)((char *)(p) + (s)))->prev_foot = \ + ((size_t)(M) ^ mparams.magic)) -#define get_mstate_for(p)\ - ((mstate)(((mchunkptr)((char*)(p) +\ - (chunksize(p))))->prev_foot ^ mparams.magic)) + #define get_mstate_for(p) \ + ((mstate)(((mchunkptr)((char *)(p) + (chunksize(p))))->prev_foot ^ \ + mparams.magic)) -#define set_inuse(M,p,s)\ - ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\ - (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \ - mark_inuse_foot(M,p,s)) + #define set_inuse(M, p, s) \ + ((p)->head = (((p)->head & PINUSE_BIT) | s | CINUSE_BIT), \ + (((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT), \ + mark_inuse_foot(M, p, s)) -#define set_inuse_and_pinuse(M,p,s)\ - ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ - (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\ - mark_inuse_foot(M,p,s)) + #define set_inuse_and_pinuse(M, p, s) \ + ((p)->head = (s | PINUSE_BIT | CINUSE_BIT), \ + (((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT), \ + mark_inuse_foot(M, p, s)) -#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\ - ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ - mark_inuse_foot(M, p, s)) + #define set_size_and_pinuse_of_inuse_chunk(M, p, s) \ + ((p)->head = (s | PINUSE_BIT | CINUSE_BIT), mark_inuse_foot(M, p, s)) -#endif /* !FOOTERS */ + #endif /* !FOOTERS */ /* ---------------------------- setting mparams -------------------------- */ -#if LOCK_AT_FORK -static void pre_fork(void) { ACQUIRE_LOCK(&(gm)->mutex); } -static void post_fork_parent(void) { RELEASE_LOCK(&(gm)->mutex); } -static void post_fork_child(void) { INITIAL_LOCK(&(gm)->mutex); } -#endif /* LOCK_AT_FORK */ + #if LOCK_AT_FORK +static void pre_fork(void) { + + ACQUIRE_LOCK(&(gm)->mutex); + +} + +static void post_fork_parent(void) { + + RELEASE_LOCK(&(gm)->mutex); + +} + +static void post_fork_child(void) { + + INITIAL_LOCK(&(gm)->mutex); + +} + + #endif /* LOCK_AT_FORK */ /* Initialize mparams */ static int init_mparams(void) { -#ifdef NEED_GLOBAL_LOCK_INIT - if (malloc_global_mutex_status <= 0) - init_malloc_global_mutex(); -#endif + + #ifdef NEED_GLOBAL_LOCK_INIT + if (malloc_global_mutex_status <= 0) init_malloc_global_mutex(); + #endif ACQUIRE_MALLOC_GLOBAL_LOCK(); if (mparams.magic == 0) { + size_t magic; size_t psize; size_t gsize; -#ifndef WIN32 + #ifndef WIN32 psize = malloc_getpagesize; - gsize = ((DEFAULT_GRANULARITY != 0)? DEFAULT_GRANULARITY : psize); -#else /* WIN32 */ + gsize = ((DEFAULT_GRANULARITY != 0) ? DEFAULT_GRANULARITY : psize); + #else /* WIN32 */ { + SYSTEM_INFO system_info; GetSystemInfo(&system_info); psize = system_info.dwPageSize; - gsize = ((DEFAULT_GRANULARITY != 0)? - DEFAULT_GRANULARITY : system_info.dwAllocationGranularity); + gsize = + ((DEFAULT_GRANULARITY != 0) ? DEFAULT_GRANULARITY + : system_info.dwAllocationGranularity); + } -#endif /* WIN32 */ + + #endif /* WIN32 */ /* Sanity-check configuration: size_t must be unsigned and as wide as pointer type. @@ -3181,187 +3329,216 @@ static int init_mparams(void) { alignment must be at least 8. Alignment, min chunk size, and page size must all be powers of 2. */ - if ((sizeof(size_t) != sizeof(char*)) || - (MAX_SIZE_T < MIN_CHUNK_SIZE) || - (sizeof(int) < 4) || - (MALLOC_ALIGNMENT < (size_t)8U) || - ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-SIZE_T_ONE)) != 0) || - ((MCHUNK_SIZE & (MCHUNK_SIZE-SIZE_T_ONE)) != 0) || - ((gsize & (gsize-SIZE_T_ONE)) != 0) || - ((psize & (psize-SIZE_T_ONE)) != 0)) + if ((sizeof(size_t) != sizeof(char *)) || (MAX_SIZE_T < MIN_CHUNK_SIZE) || + (sizeof(int) < 4) || (MALLOC_ALIGNMENT < (size_t)8U) || + ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT - SIZE_T_ONE)) != 0) || + ((MCHUNK_SIZE & (MCHUNK_SIZE - SIZE_T_ONE)) != 0) || + ((gsize & (gsize - SIZE_T_ONE)) != 0) || + ((psize & (psize - SIZE_T_ONE)) != 0)) ABORT; mparams.granularity = gsize; mparams.page_size = psize; mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD; mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD; -#if MORECORE_CONTIGUOUS - mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT; -#else /* MORECORE_CONTIGUOUS */ - mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT; -#endif /* MORECORE_CONTIGUOUS */ - -#if !ONLY_MSPACES + #if MORECORE_CONTIGUOUS + mparams.default_mflags = USE_LOCK_BIT | USE_MMAP_BIT; + #else /* MORECORE_CONTIGUOUS */ + mparams.default_mflags = + USE_LOCK_BIT | USE_MMAP_BIT | USE_NONCONTIGUOUS_BIT; + #endif /* MORECORE_CONTIGUOUS */ + + #if !ONLY_MSPACES /* Set up lock for main malloc area */ gm->mflags = mparams.default_mflags; (void)INITIAL_LOCK(&gm->mutex); -#endif -#if LOCK_AT_FORK + #endif + #if LOCK_AT_FORK pthread_atfork(&pre_fork, &post_fork_parent, &post_fork_child); -#endif + #endif { -#if USE_DEV_RANDOM - int fd; + + #if USE_DEV_RANDOM + int fd; unsigned char buf[sizeof(size_t)]; /* Try to use /dev/urandom, else fall back on using time */ if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 && read(fd, buf, sizeof(buf)) == sizeof(buf)) { - magic = *((size_t *) buf); + + magic = *((size_t *)buf); close(fd); - } - else -#endif /* USE_DEV_RANDOM */ -#ifdef WIN32 - magic = (size_t)(GetTickCount() ^ (size_t)0x55555555U); -#elif defined(LACKS_TIME_H) + + } else + + #endif /* USE_DEV_RANDOM */ + #ifdef WIN32 + magic = (size_t)(GetTickCount() ^ (size_t)0x55555555U); + #elif defined(LACKS_TIME_H) magic = (size_t)&magic ^ (size_t)0x55555555U; -#else + #else magic = (size_t)(time(0) ^ (size_t)0x55555555U); -#endif - magic |= (size_t)8U; /* ensure nonzero */ - magic &= ~(size_t)7U; /* improve chances of fault for bad values */ + #endif + magic |= (size_t)8U; /* ensure nonzero */ + magic &= ~(size_t)7U; /* improve chances of fault for bad values */ /* Until memory modes commonly available, use volatile-write */ (*(volatile size_t *)(&(mparams.magic))) = magic; + } + } RELEASE_MALLOC_GLOBAL_LOCK(); return 1; + } /* support for mallopt */ static int change_mparam(int param_number, int value) { + size_t val; ensure_initialization(); - val = (value == -1)? MAX_SIZE_T : (size_t)value; - switch(param_number) { - case M_TRIM_THRESHOLD: - mparams.trim_threshold = val; - return 1; - case M_GRANULARITY: - if (val >= mparams.page_size && ((val & (val-1)) == 0)) { - mparams.granularity = val; + val = (value == -1) ? MAX_SIZE_T : (size_t)value; + switch (param_number) { + + case M_TRIM_THRESHOLD: + mparams.trim_threshold = val; return 1; - } - else + case M_GRANULARITY: + if (val >= mparams.page_size && ((val & (val - 1)) == 0)) { + + mparams.granularity = val; + return 1; + + } else + + return 0; + case M_MMAP_THRESHOLD: + mparams.mmap_threshold = val; + return 1; + default: return 0; - case M_MMAP_THRESHOLD: - mparams.mmap_threshold = val; - return 1; - default: - return 0; + } + } -#if DEBUG + #if DEBUG /* ------------------------- Debugging Support --------------------------- */ /* Check properties of any chunk, whether free, inuse, mmapped etc */ static void do_check_any_chunk(mstate m, mchunkptr p) { + assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); assert(ok_address(m, p)); + } /* Check properties of top chunk */ static void do_check_top_chunk(mstate m, mchunkptr p) { - msegmentptr sp = segment_holding(m, (char*)p); - size_t sz = p->head & ~INUSE_BITS; /* third-lowest bit can be set! */ + + msegmentptr sp = segment_holding(m, (char *)p); + size_t sz = p->head & ~INUSE_BITS; /* third-lowest bit can be set! */ assert(sp != 0); assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); assert(ok_address(m, p)); assert(sz == m->topsize); assert(sz > 0); - assert(sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE); + assert(sz == ((sp->base + sp->size) - (char *)p) - TOP_FOOT_SIZE); assert(pinuse(p)); assert(!pinuse(chunk_plus_offset(p, sz))); + } /* Check properties of (inuse) mmapped chunks */ static void do_check_mmapped_chunk(mstate m, mchunkptr p) { - size_t sz = chunksize(p); + + size_t sz = chunksize(p); size_t len = (sz + (p->prev_foot) + MMAP_FOOT_PAD); assert(is_mmapped(p)); assert(use_mmap(m)); assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); assert(ok_address(m, p)); assert(!is_small(sz)); - assert((len & (mparams.page_size-SIZE_T_ONE)) == 0); + assert((len & (mparams.page_size - SIZE_T_ONE)) == 0); assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD); - assert(chunk_plus_offset(p, sz+SIZE_T_SIZE)->head == 0); + assert(chunk_plus_offset(p, sz + SIZE_T_SIZE)->head == 0); + } /* Check properties of inuse chunks */ static void do_check_inuse_chunk(mstate m, mchunkptr p) { + do_check_any_chunk(m, p); assert(is_inuse(p)); assert(next_pinuse(p)); /* If not pinuse and not mmapped, previous chunk has OK offset */ assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p); - if (is_mmapped(p)) - do_check_mmapped_chunk(m, p); + if (is_mmapped(p)) do_check_mmapped_chunk(m, p); + } /* Check properties of free chunks */ static void do_check_free_chunk(mstate m, mchunkptr p) { - size_t sz = chunksize(p); + + size_t sz = chunksize(p); mchunkptr next = chunk_plus_offset(p, sz); do_check_any_chunk(m, p); assert(!is_inuse(p)); assert(!next_pinuse(p)); - assert (!is_mmapped(p)); + assert(!is_mmapped(p)); if (p != m->dv && p != m->top) { + if (sz >= MIN_CHUNK_SIZE) { + assert((sz & CHUNK_ALIGN_MASK) == 0); assert(is_aligned(chunk2mem(p))); assert(next->prev_foot == sz); assert(pinuse(p)); - assert (next == m->top || is_inuse(next)); + assert(next == m->top || is_inuse(next)); assert(p->fd->bk == p); assert(p->bk->fd == p); - } - else /* markers are always of size SIZE_T_SIZE */ + + } else /* markers are always of size SIZE_T_SIZE */ + assert(sz == SIZE_T_SIZE); + } + } /* Check properties of malloced chunks at the point they are malloced */ -static void do_check_malloced_chunk(mstate m, void* mem, size_t s) { +static void do_check_malloced_chunk(mstate m, void *mem, size_t s) { + if (mem != 0) { + mchunkptr p = mem2chunk(mem); - size_t sz = p->head & ~INUSE_BITS; + size_t sz = p->head & ~INUSE_BITS; do_check_inuse_chunk(m, p); assert((sz & CHUNK_ALIGN_MASK) == 0); assert(sz >= MIN_CHUNK_SIZE); assert(sz >= s); /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */ assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE)); + } + } /* Check a tree and its subtrees. */ static void do_check_tree(mstate m, tchunkptr t) { + tchunkptr head = 0; tchunkptr u = t; - bindex_t tindex = t->index; - size_t tsize = chunksize(t); - bindex_t idx; + bindex_t tindex = t->index; + size_t tsize = chunksize(t); + bindex_t idx; compute_tree_index(tsize, idx); assert(tindex == idx); assert(tsize >= MIN_LARGE_SIZE); assert(tsize >= minsize_for_tree_index(idx)); - assert((idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1)))); + assert((idx == NTREEBINS - 1) || (tsize < minsize_for_tree_index((idx + 1)))); - do { /* traverse through chain of same-sized nodes */ + do { /* traverse through chain of same-sized nodes */ do_check_any_chunk(m, ((mchunkptr)u)); assert(u->index == tindex); assert(chunksize(u) == tsize); @@ -3370,56 +3547,72 @@ static void do_check_tree(mstate m, tchunkptr t) { assert(u->fd->bk == u); assert(u->bk->fd == u); if (u->parent == 0) { + assert(u->child[0] == 0); assert(u->child[1] == 0); - } - else { - assert(head == 0); /* only one node on chain has parent */ + + } else { + + assert(head == 0); /* only one node on chain has parent */ head = u; assert(u->parent != u); - assert (u->parent->child[0] == u || - u->parent->child[1] == u || - *((tbinptr*)(u->parent)) == u); + assert(u->parent->child[0] == u || u->parent->child[1] == u || + *((tbinptr *)(u->parent)) == u); if (u->child[0] != 0) { + assert(u->child[0]->parent == u); assert(u->child[0] != u); do_check_tree(m, u->child[0]); + } + if (u->child[1] != 0) { + assert(u->child[1]->parent == u); assert(u->child[1] != u); do_check_tree(m, u->child[1]); + } + if (u->child[0] != 0 && u->child[1] != 0) { + assert(chunksize(u->child[0]) < chunksize(u->child[1])); + } + } + u = u->fd; + } while (u != t); + assert(head != 0); + } /* Check all the chunks in a treebin. */ static void do_check_treebin(mstate m, bindex_t i) { - tbinptr* tb = treebin_at(m, i); + + tbinptr * tb = treebin_at(m, i); tchunkptr t = *tb; - int empty = (m->treemap & (1U << i)) == 0; - if (t == 0) - assert(empty); - if (!empty) - do_check_tree(m, t); + int empty = (m->treemap & (1U << i)) == 0; + if (t == 0) assert(empty); + if (!empty) do_check_tree(m, t); + } /* Check all the chunks in a smallbin. */ static void do_check_smallbin(mstate m, bindex_t i) { - sbinptr b = smallbin_at(m, i); - mchunkptr p = b->bk; + + sbinptr b = smallbin_at(m, i); + mchunkptr p = b->bk; unsigned int empty = (m->smallmap & (1U << i)) == 0; - if (p == b) - assert(empty); + if (p == b) assert(empty); if (!empty) { + for (; p != b; p = p->bk) { - size_t size = chunksize(p); + + size_t size = chunksize(p); mchunkptr q; /* each chunk claims to be free */ do_check_free_chunk(m, p); @@ -3428,324 +3621,435 @@ static void do_check_smallbin(mstate m, bindex_t i) { assert(p->bk == b || chunksize(p->bk) == chunksize(p)); /* chunk is followed by an inuse chunk */ q = next_chunk(p); - if (q->head != FENCEPOST_HEAD) - do_check_inuse_chunk(m, q); + if (q->head != FENCEPOST_HEAD) do_check_inuse_chunk(m, q); + } + } + } /* Find x in a bin. Used in other check functions. */ static int bin_find(mstate m, mchunkptr x) { + size_t size = chunksize(x); if (is_small(size)) { + bindex_t sidx = small_index(size); - sbinptr b = smallbin_at(m, sidx); + sbinptr b = smallbin_at(m, sidx); if (smallmap_is_marked(m, sidx)) { + mchunkptr p = b; do { - if (p == x) - return 1; + + if (p == x) return 1; + } while ((p = p->fd) != b); + } - } - else { + + } else { + bindex_t tidx; compute_tree_index(size, tidx); if (treemap_is_marked(m, tidx)) { + tchunkptr t = *treebin_at(m, tidx); - size_t sizebits = size << leftshift_for_tree_index(tidx); + size_t sizebits = size << leftshift_for_tree_index(tidx); while (t != 0 && chunksize(t) != size) { - t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]; + + t = t->child[(sizebits >> (SIZE_T_BITSIZE - SIZE_T_ONE)) & 1]; sizebits <<= 1; + } + if (t != 0) { + tchunkptr u = t; do { - if (u == (tchunkptr)x) - return 1; + + if (u == (tchunkptr)x) return 1; + } while ((u = u->fd) != t); + } + } + } + return 0; + } /* Traverse each chunk and check it; return total */ static size_t traverse_and_check(mstate m) { + size_t sum = 0; if (is_initialized(m)) { + msegmentptr s = &m->seg; sum += m->topsize + TOP_FOOT_SIZE; while (s != 0) { + mchunkptr q = align_as_chunk(s->base); mchunkptr lastq = 0; assert(pinuse(q)); - while (segment_holds(s, q) && - q != m->top && q->head != FENCEPOST_HEAD) { + while (segment_holds(s, q) && q != m->top && q->head != FENCEPOST_HEAD) { + sum += chunksize(q); if (is_inuse(q)) { + assert(!bin_find(m, q)); do_check_inuse_chunk(m, q); - } - else { + + } else { + assert(q == m->dv || bin_find(m, q)); - assert(lastq == 0 || is_inuse(lastq)); /* Not 2 consecutive free */ + assert(lastq == 0 || is_inuse(lastq)); /* Not 2 consecutive free */ do_check_free_chunk(m, q); + } + lastq = q; q = next_chunk(q); + } + s = s->next; + } + } + return sum; -} +} /* Check all properties of malloc_state. */ static void do_check_malloc_state(mstate m) { + bindex_t i; - size_t total; + size_t total; /* check bins */ for (i = 0; i < NSMALLBINS; ++i) do_check_smallbin(m, i); for (i = 0; i < NTREEBINS; ++i) do_check_treebin(m, i); - if (m->dvsize != 0) { /* check dv chunk */ + if (m->dvsize != 0) { /* check dv chunk */ do_check_any_chunk(m, m->dv); assert(m->dvsize == chunksize(m->dv)); assert(m->dvsize >= MIN_CHUNK_SIZE); assert(bin_find(m, m->dv) == 0); + } - if (m->top != 0) { /* check top chunk */ + if (m->top != 0) { /* check top chunk */ do_check_top_chunk(m, m->top); /*assert(m->topsize == chunksize(m->top)); redundant */ assert(m->topsize > 0); assert(bin_find(m, m->top) == 0); + } total = traverse_and_check(m); assert(total <= m->footprint); assert(m->footprint <= m->max_footprint); + } -#endif /* DEBUG */ + + #endif /* DEBUG */ /* ----------------------------- statistics ------------------------------ */ -#if !NO_MALLINFO + #if !NO_MALLINFO static struct mallinfo internal_mallinfo(mstate m) { - struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + + struct mallinfo nm = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; ensure_initialization(); if (!PREACTION(m)) { + check_malloc_state(m); if (is_initialized(m)) { - size_t nfree = SIZE_T_ONE; /* top always free */ - size_t mfree = m->topsize + TOP_FOOT_SIZE; - size_t sum = mfree; + + size_t nfree = SIZE_T_ONE; /* top always free */ + size_t mfree = m->topsize + TOP_FOOT_SIZE; + size_t sum = mfree; msegmentptr s = &m->seg; while (s != 0) { + mchunkptr q = align_as_chunk(s->base); - while (segment_holds(s, q) && - q != m->top && q->head != FENCEPOST_HEAD) { + while (segment_holds(s, q) && q != m->top && + q->head != FENCEPOST_HEAD) { + size_t sz = chunksize(q); sum += sz; if (!is_inuse(q)) { + mfree += sz; ++nfree; + } + q = next_chunk(q); + } + s = s->next; + } - nm.arena = sum; - nm.ordblks = nfree; - nm.hblkhd = m->footprint - sum; - nm.usmblks = m->max_footprint; + nm.arena = sum; + nm.ordblks = nfree; + nm.hblkhd = m->footprint - sum; + nm.usmblks = m->max_footprint; nm.uordblks = m->footprint - mfree; nm.fordblks = mfree; nm.keepcost = m->topsize; + } POSTACTION(m); + } + return nm; + } -#endif /* !NO_MALLINFO */ -#if !NO_MALLOC_STATS + #endif /* !NO_MALLINFO */ + + #if !NO_MALLOC_STATS static void internal_malloc_stats(mstate m) { + ensure_initialization(); if (!PREACTION(m)) { + size_t maxfp = 0; size_t fp = 0; size_t used = 0; check_malloc_state(m); if (is_initialized(m)) { + msegmentptr s = &m->seg; maxfp = m->max_footprint; fp = m->footprint; used = fp - (m->topsize + TOP_FOOT_SIZE); while (s != 0) { + mchunkptr q = align_as_chunk(s->base); - while (segment_holds(s, q) && - q != m->top && q->head != FENCEPOST_HEAD) { - if (!is_inuse(q)) - used -= chunksize(q); + while (segment_holds(s, q) && q != m->top && + q->head != FENCEPOST_HEAD) { + + if (!is_inuse(q)) used -= chunksize(q); q = next_chunk(q); + } + s = s->next; + } + } - POSTACTION(m); /* drop lock */ + + POSTACTION(m); /* drop lock */ fprintf(stderr, "max system bytes = %10lu\n", (unsigned long)(maxfp)); fprintf(stderr, "system bytes = %10lu\n", (unsigned long)(fp)); fprintf(stderr, "in use bytes = %10lu\n", (unsigned long)(used)); + } + } -#endif /* NO_MALLOC_STATS */ -/* ----------------------- Operations on smallbins ----------------------- */ + #endif /* NO_MALLOC_STATS */ -/* - Various forms of linking and unlinking are defined as macros. Even - the ones for trees, which are very long but have very short typical - paths. This is ugly but reduces reliance on inlining support of - compilers. -*/ + /* ----------------------- Operations on smallbins ----------------------- */ -/* Link a free chunk into a smallbin */ -#define insert_small_chunk(M, P, S) {\ - bindex_t I = small_index(S);\ - mchunkptr B = smallbin_at(M, I);\ - mchunkptr F = B;\ - assert(S >= MIN_CHUNK_SIZE);\ - if (!smallmap_is_marked(M, I))\ - mark_smallmap(M, I);\ - else if (RTCHECK(ok_address(M, B->fd)))\ - F = B->fd;\ - else {\ - CORRUPTION_ERROR_ACTION(M);\ - }\ - B->fd = P;\ - F->bk = P;\ - P->fd = F;\ - P->bk = B;\ -} + /* + Various forms of linking and unlinking are defined as macros. Even + the ones for trees, which are very long but have very short typical + paths. This is ugly but reduces reliance on inlining support of + compilers. + */ -/* Unlink a chunk from a smallbin */ -#define unlink_small_chunk(M, P, S) {\ - mchunkptr F = P->fd;\ - mchunkptr B = P->bk;\ - bindex_t I = small_index(S);\ - assert(P != B);\ - assert(P != F);\ - assert(chunksize(P) == small_index2size(I));\ - if (RTCHECK(F == smallbin_at(M,I) || (ok_address(M, F) && F->bk == P))) { \ - if (B == F) {\ - clear_smallmap(M, I);\ - }\ - else if (RTCHECK(B == smallbin_at(M,I) ||\ - (ok_address(M, B) && B->fd == P))) {\ - F->bk = B;\ - B->fd = F;\ - }\ - else {\ - CORRUPTION_ERROR_ACTION(M);\ - }\ - }\ - else {\ - CORRUPTION_ERROR_ACTION(M);\ - }\ -} + /* Link a free chunk into a smallbin */ + #define insert_small_chunk(M, P, S) \ + { \ + \ + bindex_t I = small_index(S); \ + mchunkptr B = smallbin_at(M, I); \ + mchunkptr F = B; \ + assert(S >= MIN_CHUNK_SIZE); \ + if (!smallmap_is_marked(M, I)) \ + mark_smallmap(M, I); \ + else if (RTCHECK(ok_address(M, B->fd))) \ + F = B->fd; \ + else { \ + \ + CORRUPTION_ERROR_ACTION(M); \ + \ + } \ + B->fd = P; \ + F->bk = P; \ + P->fd = F; \ + P->bk = B; \ + \ + } -/* Unlink the first chunk from a smallbin */ -#define unlink_first_small_chunk(M, B, P, I) {\ - mchunkptr F = P->fd;\ - assert(P != B);\ - assert(P != F);\ - assert(chunksize(P) == small_index2size(I));\ - if (B == F) {\ - clear_smallmap(M, I);\ - }\ - else if (RTCHECK(ok_address(M, F) && F->bk == P)) {\ - F->bk = B;\ - B->fd = F;\ - }\ - else {\ - CORRUPTION_ERROR_ACTION(M);\ - }\ -} + /* Unlink a chunk from a smallbin */ + #define unlink_small_chunk(M, P, S) \ + { \ + \ + mchunkptr F = P->fd; \ + mchunkptr B = P->bk; \ + bindex_t I = small_index(S); \ + assert(P != B); \ + assert(P != F); \ + assert(chunksize(P) == small_index2size(I)); \ + if (RTCHECK(F == smallbin_at(M, I) || \ + (ok_address(M, F) && F->bk == P))) { \ + \ + if (B == F) { \ + \ + clear_smallmap(M, I); \ + \ + } else if (RTCHECK(B == smallbin_at(M, I) || \ + \ + (ok_address(M, B) && B->fd == P))) { \ + \ + F->bk = B; \ + B->fd = F; \ + \ + } else { \ + \ + CORRUPTION_ERROR_ACTION(M); \ + \ + } \ + \ + } else { \ + \ + CORRUPTION_ERROR_ACTION(M); \ + \ + } \ + \ + } -/* Replace dv node, binning the old one */ -/* Used only when dvsize known to be small */ -#define replace_dv(M, P, S) {\ - size_t DVS = M->dvsize;\ - assert(is_small(DVS));\ - if (DVS != 0) {\ - mchunkptr DV = M->dv;\ - insert_small_chunk(M, DV, DVS);\ - }\ - M->dvsize = S;\ - M->dv = P;\ -} + /* Unlink the first chunk from a smallbin */ + #define unlink_first_small_chunk(M, B, P, I) \ + { \ + \ + mchunkptr F = P->fd; \ + assert(P != B); \ + assert(P != F); \ + assert(chunksize(P) == small_index2size(I)); \ + if (B == F) { \ + \ + clear_smallmap(M, I); \ + \ + } else if (RTCHECK(ok_address(M, F) && F->bk == P)) { \ + \ + F->bk = B; \ + B->fd = F; \ + \ + } else { \ + \ + CORRUPTION_ERROR_ACTION(M); \ + \ + } \ + \ + } -/* ------------------------- Operations on trees ------------------------- */ - -/* Insert chunk into tree */ -#define insert_large_chunk(M, X, S) {\ - tbinptr* H;\ - bindex_t I;\ - compute_tree_index(S, I);\ - H = treebin_at(M, I);\ - X->index = I;\ - X->child[0] = X->child[1] = 0;\ - if (!treemap_is_marked(M, I)) {\ - mark_treemap(M, I);\ - *H = X;\ - X->parent = (tchunkptr)H;\ - X->fd = X->bk = X;\ - }\ - else {\ - tchunkptr T = *H;\ - size_t K = S << leftshift_for_tree_index(I);\ - for (;;) {\ - if (chunksize(T) != S) {\ - tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\ - K <<= 1;\ - if (*C != 0)\ - T = *C;\ - else if (RTCHECK(ok_address(M, C))) {\ - *C = X;\ - X->parent = T;\ - X->fd = X->bk = X;\ - break;\ - }\ - else {\ - CORRUPTION_ERROR_ACTION(M);\ - break;\ - }\ - }\ - else {\ - tchunkptr F = T->fd;\ - if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\ - T->fd = F->bk = X;\ - X->fd = F;\ - X->bk = T;\ - X->parent = 0;\ - break;\ - }\ - else {\ - CORRUPTION_ERROR_ACTION(M);\ - break;\ - }\ - }\ - }\ - }\ -} + /* Replace dv node, binning the old one */ + /* Used only when dvsize known to be small */ + #define replace_dv(M, P, S) \ + { \ + \ + size_t DVS = M->dvsize; \ + assert(is_small(DVS)); \ + if (DVS != 0) { \ + \ + mchunkptr DV = M->dv; \ + insert_small_chunk(M, DV, DVS); \ + \ + } \ + M->dvsize = S; \ + M->dv = P; \ + \ + } + + /* ------------------------- Operations on trees ------------------------- */ + + /* Insert chunk into tree */ + #define insert_large_chunk(M, X, S) \ + { \ + \ + tbinptr *H; \ + bindex_t I; \ + compute_tree_index(S, I); \ + H = treebin_at(M, I); \ + X->index = I; \ + X->child[0] = X->child[1] = 0; \ + if (!treemap_is_marked(M, I)) { \ + \ + mark_treemap(M, I); \ + *H = X; \ + X->parent = (tchunkptr)H; \ + X->fd = X->bk = X; \ + \ + } else { \ + \ + tchunkptr T = *H; \ + size_t K = S << leftshift_for_tree_index(I); \ + for (;;) { \ + \ + if (chunksize(T) != S) { \ + \ + tchunkptr *C = \ + &(T->child[(K >> (SIZE_T_BITSIZE - SIZE_T_ONE)) & 1]); \ + K <<= 1; \ + if (*C != 0) \ + T = *C; \ + else if (RTCHECK(ok_address(M, C))) { \ + \ + *C = X; \ + X->parent = T; \ + X->fd = X->bk = X; \ + break; \ + \ + } else { \ + \ + CORRUPTION_ERROR_ACTION(M); \ + break; \ + \ + } \ + \ + } else { \ + \ + tchunkptr F = T->fd; \ + if (RTCHECK(ok_address(M, T) && ok_address(M, F))) { \ + \ + T->fd = F->bk = X; \ + X->fd = F; \ + X->bk = T; \ + X->parent = 0; \ + break; \ + \ + } else { \ + \ + CORRUPTION_ERROR_ACTION(M); \ + break; \ + \ + } \ + \ + } \ + \ + } \ + \ + } \ + \ + } /* Unlink steps: @@ -3764,104 +4068,145 @@ static void internal_malloc_stats(mstate m) { x's parent and children to x's replacement (or null if none). */ -#define unlink_large_chunk(M, X) {\ - tchunkptr XP = X->parent;\ - tchunkptr R;\ - if (X->bk != X) {\ - tchunkptr F = X->fd;\ - R = X->bk;\ - if (RTCHECK(ok_address(M, F) && F->bk == X && R->fd == X)) {\ - F->bk = R;\ - R->fd = F;\ - }\ - else {\ - CORRUPTION_ERROR_ACTION(M);\ - }\ - }\ - else {\ - tchunkptr* RP;\ - if (((R = *(RP = &(X->child[1]))) != 0) ||\ - ((R = *(RP = &(X->child[0]))) != 0)) {\ - tchunkptr* CP;\ - while ((*(CP = &(R->child[1])) != 0) ||\ - (*(CP = &(R->child[0])) != 0)) {\ - R = *(RP = CP);\ - }\ - if (RTCHECK(ok_address(M, RP)))\ - *RP = 0;\ - else {\ - CORRUPTION_ERROR_ACTION(M);\ - }\ - }\ - }\ - if (XP != 0) {\ - tbinptr* H = treebin_at(M, X->index);\ - if (X == *H) {\ - if ((*H = R) == 0) \ - clear_treemap(M, X->index);\ - }\ - else if (RTCHECK(ok_address(M, XP))) {\ - if (XP->child[0] == X) \ - XP->child[0] = R;\ - else \ - XP->child[1] = R;\ - }\ - else\ - CORRUPTION_ERROR_ACTION(M);\ - if (R != 0) {\ - if (RTCHECK(ok_address(M, R))) {\ - tchunkptr C0, C1;\ - R->parent = XP;\ - if ((C0 = X->child[0]) != 0) {\ - if (RTCHECK(ok_address(M, C0))) {\ - R->child[0] = C0;\ - C0->parent = R;\ - }\ - else\ - CORRUPTION_ERROR_ACTION(M);\ - }\ - if ((C1 = X->child[1]) != 0) {\ - if (RTCHECK(ok_address(M, C1))) {\ - R->child[1] = C1;\ - C1->parent = R;\ - }\ - else\ - CORRUPTION_ERROR_ACTION(M);\ - }\ - }\ - else\ - CORRUPTION_ERROR_ACTION(M);\ - }\ - }\ -} + #define unlink_large_chunk(M, X) \ + { \ + \ + tchunkptr XP = X->parent; \ + tchunkptr R; \ + if (X->bk != X) { \ + \ + tchunkptr F = X->fd; \ + R = X->bk; \ + if (RTCHECK(ok_address(M, F) && F->bk == X && R->fd == X)) { \ + \ + F->bk = R; \ + R->fd = F; \ + \ + } else { \ + \ + CORRUPTION_ERROR_ACTION(M); \ + \ + } \ + \ + } else { \ + \ + tchunkptr *RP; \ + if (((R = *(RP = &(X->child[1]))) != 0) || \ + ((R = *(RP = &(X->child[0]))) != 0)) { \ + \ + tchunkptr *CP; \ + while ((*(CP = &(R->child[1])) != 0) || \ + (*(CP = &(R->child[0])) != 0)) { \ + \ + R = *(RP = CP); \ + \ + } \ + if (RTCHECK(ok_address(M, RP))) \ + *RP = 0; \ + else { \ + \ + CORRUPTION_ERROR_ACTION(M); \ + \ + } \ + \ + } \ + \ + } \ + if (XP != 0) { \ + \ + tbinptr *H = treebin_at(M, X->index); \ + if (X == *H) { \ + \ + if ((*H = R) == 0) clear_treemap(M, X->index); \ + \ + } else if (RTCHECK(ok_address(M, XP))) { \ + \ + if (XP->child[0] == X) \ + XP->child[0] = R; \ + else \ + XP->child[1] = R; \ + \ + } else \ + \ + CORRUPTION_ERROR_ACTION(M); \ + if (R != 0) { \ + \ + if (RTCHECK(ok_address(M, R))) { \ + \ + tchunkptr C0, C1; \ + R->parent = XP; \ + if ((C0 = X->child[0]) != 0) { \ + \ + if (RTCHECK(ok_address(M, C0))) { \ + \ + R->child[0] = C0; \ + C0->parent = R; \ + \ + } else \ + \ + CORRUPTION_ERROR_ACTION(M); \ + \ + } \ + if ((C1 = X->child[1]) != 0) { \ + \ + if (RTCHECK(ok_address(M, C1))) { \ + \ + R->child[1] = C1; \ + C1->parent = R; \ + \ + } else \ + \ + CORRUPTION_ERROR_ACTION(M); \ + \ + } \ + \ + } else \ + \ + CORRUPTION_ERROR_ACTION(M); \ + \ + } \ + \ + } \ + \ + } /* Relays to large vs small bin operations */ -#define insert_chunk(M, P, S)\ - if (is_small(S)) insert_small_chunk(M, P, S)\ - else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); } - -#define unlink_chunk(M, P, S)\ - if (is_small(S)) unlink_small_chunk(M, P, S)\ - else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); } + #define insert_chunk(M, P, S) \ + if (is_small(S)) insert_small_chunk(M, P, S) else { \ + \ + tchunkptr TP = (tchunkptr)(P); \ + insert_large_chunk(M, TP, S); \ + \ + } + #define unlink_chunk(M, P, S) \ + if (is_small(S)) unlink_small_chunk(M, P, S) else { \ + \ + tchunkptr TP = (tchunkptr)(P); \ + unlink_large_chunk(M, TP); \ + \ + } /* Relays to internal calls to malloc/free from realloc, memalign etc */ -#if ONLY_MSPACES -#define internal_malloc(m, b) mspace_malloc(m, b) -#define internal_free(m, mem) mspace_free(m,mem); -#else /* ONLY_MSPACES */ -#if MSPACES -#define internal_malloc(m, b)\ - ((m == gm)? dlmalloc(b) : mspace_malloc(m, b)) -#define internal_free(m, mem)\ - if (m == gm) dlfree(mem); else mspace_free(m,mem); -#else /* MSPACES */ -#define internal_malloc(m, b) dlmalloc(b) -#define internal_free(m, mem) dlfree(mem) -#endif /* MSPACES */ -#endif /* ONLY_MSPACES */ + #if ONLY_MSPACES + #define internal_malloc(m, b) mspace_malloc(m, b) + #define internal_free(m, mem) mspace_free(m, mem); + #else /* ONLY_MSPACES */ + #if MSPACES + #define internal_malloc(m, b) \ + ((m == gm) ? dlmalloc(b) : mspace_malloc(m, b)) + #define internal_free(m, mem) \ + if (m == gm) \ + dlfree(mem); \ + else \ + mspace_free(m, mem); + #else /* MSPACES */ + #define internal_malloc(m, b) dlmalloc(b) + #define internal_free(m, mem) dlfree(mem) + #endif /* MSPACES */ + #endif /* ONLY_MSPACES */ /* ----------------------- Direct-mmapping chunks ----------------------- */ @@ -3874,80 +4219,93 @@ static void internal_malloc_stats(mstate m) { */ /* Malloc using mmap */ -static void* mmap_alloc(mstate m, size_t nb) { +static void *mmap_alloc(mstate m, size_t nb) { + size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK); if (m->footprint_limit != 0) { + size_t fp = m->footprint + mmsize; - if (fp <= m->footprint || fp > m->footprint_limit) - return 0; + if (fp <= m->footprint || fp > m->footprint_limit) return 0; + } - if (mmsize > nb) { /* Check for wrap around 0 */ - char* mm = (char*)(CALL_DIRECT_MMAP(mmsize)); + + if (mmsize > nb) { /* Check for wrap around 0 */ + char *mm = (char *)(CALL_DIRECT_MMAP(mmsize)); if (mm != CMFAIL) { - size_t offset = align_offset(chunk2mem(mm)); - size_t psize = mmsize - offset - MMAP_FOOT_PAD; + + size_t offset = align_offset(chunk2mem(mm)); + size_t psize = mmsize - offset - MMAP_FOOT_PAD; mchunkptr p = (mchunkptr)(mm + offset); p->prev_foot = offset; p->head = psize; mark_inuse_foot(m, p, psize); chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD; - chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0; + chunk_plus_offset(p, psize + SIZE_T_SIZE)->head = 0; - if (m->least_addr == 0 || mm < m->least_addr) - m->least_addr = mm; + if (m->least_addr == 0 || mm < m->least_addr) m->least_addr = mm; if ((m->footprint += mmsize) > m->max_footprint) m->max_footprint = m->footprint; assert(is_aligned(chunk2mem(p))); check_mmapped_chunk(m, p); return chunk2mem(p); + } + } + return 0; + } /* Realloc using mmap */ static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb, int flags) { + size_t oldsize = chunksize(oldp); - (void)flags; /* placate people compiling -Wunused */ - if (is_small(nb)) /* Can't shrink mmap regions below small size */ + (void)flags; /* placate people compiling -Wunused */ + if (is_small(nb)) /* Can't shrink mmap regions below small size */ return 0; /* Keep old chunk if big enough but not too big */ if (oldsize >= nb + SIZE_T_SIZE && (oldsize - nb) <= (mparams.granularity << 1)) return oldp; else { + size_t offset = oldp->prev_foot; size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD; size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK); - char* cp = (char*)CALL_MREMAP((char*)oldp - offset, - oldmmsize, newmmsize, flags); + char * cp = + (char *)CALL_MREMAP((char *)oldp - offset, oldmmsize, newmmsize, flags); if (cp != CMFAIL) { + mchunkptr newp = (mchunkptr)(cp + offset); - size_t psize = newmmsize - offset - MMAP_FOOT_PAD; + size_t psize = newmmsize - offset - MMAP_FOOT_PAD; newp->head = psize; mark_inuse_foot(m, newp, psize); chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD; - chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0; + chunk_plus_offset(newp, psize + SIZE_T_SIZE)->head = 0; - if (cp < m->least_addr) - m->least_addr = cp; + if (cp < m->least_addr) m->least_addr = cp; if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint) m->max_footprint = m->footprint; check_mmapped_chunk(m, newp); return newp; + } + } + return 0; -} +} /* -------------------------- mspace management -------------------------- */ /* Initialize top chunk and its size */ static void init_top(mstate m, mchunkptr p, size_t psize) { + /* Ensure alignment */ size_t offset = align_offset(chunk2mem(p)); - p = (mchunkptr)((char*)p + offset); + p = (mchunkptr)((char *)p + offset); psize -= offset; m->top = p; @@ -3955,23 +4313,29 @@ static void init_top(mstate m, mchunkptr p, size_t psize) { p->head = psize | PINUSE_BIT; /* set size of fake trailing chunk holding overhead space only once */ chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE; - m->trim_check = mparams.trim_threshold; /* reset on each update */ + m->trim_check = mparams.trim_threshold; /* reset on each update */ + } /* Initialize bins for a new mstate that is otherwise zeroed out */ static void init_bins(mstate m) { + /* Establish circular links for smallbins */ bindex_t i; for (i = 0; i < NSMALLBINS; ++i) { - sbinptr bin = smallbin_at(m,i); + + sbinptr bin = smallbin_at(m, i); bin->fd = bin->bk = bin; + } + } -#if PROCEED_ON_ERROR + #if PROCEED_ON_ERROR /* default corruption action */ static void reset_on_error(mstate m) { + int i; ++malloc_corruption_error_count; /* Reinitialize fields to forget about all memory */ @@ -3984,67 +4348,78 @@ static void reset_on_error(mstate m) { for (i = 0; i < NTREEBINS; ++i) *treebin_at(m, i) = 0; init_bins(m); + } -#endif /* PROCEED_ON_ERROR */ + + #endif /* PROCEED_ON_ERROR */ /* Allocate chunk and prepend remainder with chunk in successor base. */ -static void* prepend_alloc(mstate m, char* newbase, char* oldbase, - size_t nb) { +static void *prepend_alloc(mstate m, char *newbase, char *oldbase, size_t nb) { + mchunkptr p = align_as_chunk(newbase); mchunkptr oldfirst = align_as_chunk(oldbase); - size_t psize = (char*)oldfirst - (char*)p; + size_t psize = (char *)oldfirst - (char *)p; mchunkptr q = chunk_plus_offset(p, nb); - size_t qsize = psize - nb; + size_t qsize = psize - nb; set_size_and_pinuse_of_inuse_chunk(m, p, nb); - assert((char*)oldfirst > (char*)q); + assert((char *)oldfirst > (char *)q); assert(pinuse(oldfirst)); assert(qsize >= MIN_CHUNK_SIZE); /* consolidate remainder with first chunk of old base */ if (oldfirst == m->top) { + size_t tsize = m->topsize += qsize; m->top = q; q->head = tsize | PINUSE_BIT; check_top_chunk(m, q); - } - else if (oldfirst == m->dv) { + + } else if (oldfirst == m->dv) { + size_t dsize = m->dvsize += qsize; m->dv = q; set_size_and_pinuse_of_free_chunk(q, dsize); - } - else { + + } else { + if (!is_inuse(oldfirst)) { + size_t nsize = chunksize(oldfirst); unlink_chunk(m, oldfirst, nsize); oldfirst = chunk_plus_offset(oldfirst, nsize); qsize += nsize; + } + set_free_with_pinuse(q, qsize, oldfirst); insert_chunk(m, q, qsize); check_free_chunk(m, q); + } check_malloced_chunk(m, chunk2mem(p), nb); return chunk2mem(p); + } /* Add a segment to hold a new noncontiguous region */ -static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) { +static void add_segment(mstate m, char *tbase, size_t tsize, flag_t mmapped) { + /* Determine locations and sizes of segment, fenceposts, old top */ - char* old_top = (char*)m->top; + char * old_top = (char *)m->top; msegmentptr oldsp = segment_holding(m, old_top); - char* old_end = oldsp->base + oldsp->size; - size_t ssize = pad_request(sizeof(struct malloc_segment)); - char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK); - size_t offset = align_offset(chunk2mem(rawsp)); - char* asp = rawsp + offset; - char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp; - mchunkptr sp = (mchunkptr)csp; + char * old_end = oldsp->base + oldsp->size; + size_t ssize = pad_request(sizeof(struct malloc_segment)); + char * rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK); + size_t offset = align_offset(chunk2mem(rawsp)); + char * asp = rawsp + offset; + char * csp = (asp < (old_top + MIN_CHUNK_SIZE)) ? old_top : asp; + mchunkptr sp = (mchunkptr)csp; msegmentptr ss = (msegmentptr)(chunk2mem(sp)); - mchunkptr tnext = chunk_plus_offset(sp, ssize); - mchunkptr p = tnext; - int nfences = 0; + mchunkptr tnext = chunk_plus_offset(sp, ssize); + mchunkptr p = tnext; + int nfences = 0; /* reset top to new space */ init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE); @@ -4052,7 +4427,7 @@ static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) { /* Set up segment record */ assert(is_aligned(ss)); set_size_and_pinuse_of_inuse_chunk(m, sp, ssize); - *ss = m->seg; /* Push current record */ + *ss = m->seg; /* Push current record */ m->seg.base = tbase; m->seg.size = tsize; m->seg.sflags = mmapped; @@ -4060,53 +4435,61 @@ static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) { /* Insert trailing fenceposts */ for (;;) { + mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE); p->head = FENCEPOST_HEAD; ++nfences; - if ((char*)(&(nextp->head)) < old_end) + if ((char *)(&(nextp->head)) < old_end) p = nextp; else break; + } + assert(nfences >= 2); /* Insert the rest of old top into a bin as an ordinary free chunk */ if (csp != old_top) { + mchunkptr q = (mchunkptr)old_top; - size_t psize = csp - old_top; + size_t psize = csp - old_top; mchunkptr tn = chunk_plus_offset(q, psize); set_free_with_pinuse(q, psize, tn); insert_chunk(m, q, psize); + } check_top_chunk(m, m->top); + } /* -------------------------- System allocation -------------------------- */ /* Get memory from system using MORECORE or MMAP */ -static void* sys_alloc(mstate m, size_t nb) { - char* tbase = CMFAIL; +static void *sys_alloc(mstate m, size_t nb) { + + char * tbase = CMFAIL; size_t tsize = 0; flag_t mmap_flag = 0; - size_t asize; /* allocation size */ + size_t asize; /* allocation size */ ensure_initialization(); /* Directly map large chunks, but only if already initialized */ if (use_mmap(m) && nb >= mparams.mmap_threshold && m->topsize != 0) { - void* mem = mmap_alloc(m, nb); - if (mem != 0) - return mem; + + void *mem = mmap_alloc(m, nb); + if (mem != 0) return mem; + } asize = granularity_align(nb + SYS_ALLOC_PADDING); - if (asize <= nb) - return 0; /* wraparound */ + if (asize <= nb) return 0; /* wraparound */ if (m->footprint_limit != 0) { + size_t fp = m->footprint + asize; - if (fp <= m->footprint || fp > m->footprint_limit) - return 0; + if (fp <= m->footprint || fp > m->footprint_limit) return 0; + } /* @@ -4132,91 +4515,119 @@ static void* sys_alloc(mstate m, size_t nb) { */ if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) { - char* br = CMFAIL; - size_t ssize = asize; /* sbrk call size */ - msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (char*)m->top); + + char * br = CMFAIL; + size_t ssize = asize; /* sbrk call size */ + msegmentptr ss = (m->top == 0) ? 0 : segment_holding(m, (char *)m->top); ACQUIRE_MALLOC_GLOBAL_LOCK(); - if (ss == 0) { /* First time through or recovery */ - char* base = (char*)CALL_MORECORE(0); + if (ss == 0) { /* First time through or recovery */ + char *base = (char *)CALL_MORECORE(0); if (base != CMFAIL) { + size_t fp; /* Adjust to end on a page boundary */ if (!is_page_aligned(base)) ssize += (page_align((size_t)base) - (size_t)base); - fp = m->footprint + ssize; /* recheck limits */ + fp = m->footprint + ssize; /* recheck limits */ if (ssize > nb && ssize < HALF_MAX_SIZE_T && (m->footprint_limit == 0 || (fp > m->footprint && fp <= m->footprint_limit)) && - (br = (char*)(CALL_MORECORE(ssize))) == base) { + (br = (char *)(CALL_MORECORE(ssize))) == base) { + tbase = base; tsize = ssize; + } + } - } - else { + + } else { + /* Subtract out existing available top space from MORECORE request. */ ssize = granularity_align(nb - m->topsize + SYS_ALLOC_PADDING); /* Use mem here only if it did continuously extend old space */ if (ssize < HALF_MAX_SIZE_T && - (br = (char*)(CALL_MORECORE(ssize))) == ss->base+ss->size) { + (br = (char *)(CALL_MORECORE(ssize))) == ss->base + ss->size) { + tbase = br; tsize = ssize; + } + } - if (tbase == CMFAIL) { /* Cope with partial failure */ - if (br != CMFAIL) { /* Try to use/extend the space we did get */ - if (ssize < HALF_MAX_SIZE_T && - ssize < nb + SYS_ALLOC_PADDING) { + if (tbase == CMFAIL) { /* Cope with partial failure */ + if (br != CMFAIL) { /* Try to use/extend the space we did get */ + if (ssize < HALF_MAX_SIZE_T && ssize < nb + SYS_ALLOC_PADDING) { + size_t esize = granularity_align(nb + SYS_ALLOC_PADDING - ssize); if (esize < HALF_MAX_SIZE_T) { - char* end = (char*)CALL_MORECORE(esize); + + char *end = (char *)CALL_MORECORE(esize); if (end != CMFAIL) ssize += esize; - else { /* Can't use; try to release */ - (void) CALL_MORECORE(-ssize); + else { /* Can't use; try to release */ + (void)CALL_MORECORE(-ssize); br = CMFAIL; + } + } + } + } - if (br != CMFAIL) { /* Use the space we did get */ + + if (br != CMFAIL) { /* Use the space we did get */ tbase = br; tsize = ssize; - } - else - disable_contiguous(m); /* Don't try contiguous path in the future */ + + } else + + disable_contiguous(m); /* Don't try contiguous path in the future */ + } RELEASE_MALLOC_GLOBAL_LOCK(); + } - if (HAVE_MMAP && tbase == CMFAIL) { /* Try MMAP */ - char* mp = (char*)(CALL_MMAP(asize)); + if (HAVE_MMAP && tbase == CMFAIL) { /* Try MMAP */ + char *mp = (char *)(CALL_MMAP(asize)); if (mp != CMFAIL) { + tbase = mp; tsize = asize; mmap_flag = USE_MMAP_BIT; + } + } - if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */ + if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */ if (asize < HALF_MAX_SIZE_T) { - char* br = CMFAIL; - char* end = CMFAIL; + + char *br = CMFAIL; + char *end = CMFAIL; ACQUIRE_MALLOC_GLOBAL_LOCK(); - br = (char*)(CALL_MORECORE(asize)); - end = (char*)(CALL_MORECORE(0)); + br = (char *)(CALL_MORECORE(asize)); + end = (char *)(CALL_MORECORE(0)); RELEASE_MALLOC_GLOBAL_LOCK(); if (br != CMFAIL && end != CMFAIL && br < end) { + size_t ssize = end - br; if (ssize > nb + TOP_FOOT_SIZE) { + tbase = br; tsize = ssize; + } + } + } + } if (tbase != CMFAIL) { @@ -4224,61 +4635,66 @@ static void* sys_alloc(mstate m, size_t nb) { if ((m->footprint += tsize) > m->max_footprint) m->max_footprint = m->footprint; - if (!is_initialized(m)) { /* first-time initialization */ - if (m->least_addr == 0 || tbase < m->least_addr) - m->least_addr = tbase; + if (!is_initialized(m)) { /* first-time initialization */ + if (m->least_addr == 0 || tbase < m->least_addr) m->least_addr = tbase; m->seg.base = tbase; m->seg.size = tsize; m->seg.sflags = mmap_flag; m->magic = mparams.magic; m->release_checks = MAX_RELEASE_CHECK_RATE; init_bins(m); -#if !ONLY_MSPACES + #if !ONLY_MSPACES if (is_global(m)) init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE); else -#endif + #endif { + /* Offset top by embedded malloc_state */ mchunkptr mn = next_chunk(mem2chunk(m)); - init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE); + init_top(m, mn, (size_t)((tbase + tsize) - (char *)mn) - TOP_FOOT_SIZE); + } + } else { + /* Try to merge with an existing segment */ msegmentptr sp = &m->seg; /* Only consider most recent segment if traversal suppressed */ while (sp != 0 && tbase != sp->base + sp->size) sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next; - if (sp != 0 && - !is_extern_segment(sp) && + if (sp != 0 && !is_extern_segment(sp) && (sp->sflags & USE_MMAP_BIT) == mmap_flag && - segment_holds(sp, m->top)) { /* append */ + segment_holds(sp, m->top)) { /* append */ sp->size += tsize; init_top(m, m->top, m->topsize + tsize); - } - else { - if (tbase < m->least_addr) - m->least_addr = tbase; + + } else { + + if (tbase < m->least_addr) m->least_addr = tbase; sp = &m->seg; while (sp != 0 && sp->base != tbase + tsize) sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next; - if (sp != 0 && - !is_extern_segment(sp) && + if (sp != 0 && !is_extern_segment(sp) && (sp->sflags & USE_MMAP_BIT) == mmap_flag) { - char* oldbase = sp->base; + + char *oldbase = sp->base; sp->base = tbase; sp->size += tsize; return prepend_alloc(m, tbase, oldbase, nb); - } - else + + } else + add_segment(m, tbase, tsize, mmap_flag); + } + } - if (nb < m->topsize) { /* Allocate from new or extended top space */ - size_t rsize = m->topsize -= nb; + if (nb < m->topsize) { /* Allocate from new or extended top space */ + size_t rsize = m->topsize -= nb; mchunkptr p = m->top; mchunkptr r = m->top = chunk_plus_offset(p, nb); r->head = rsize | PINUSE_BIT; @@ -4286,353 +4702,463 @@ static void* sys_alloc(mstate m, size_t nb) { check_top_chunk(m, m->top); check_malloced_chunk(m, chunk2mem(p), nb); return chunk2mem(p); + } + } MALLOC_FAILURE_ACTION; return 0; + } /* ----------------------- system deallocation -------------------------- */ /* Unmap and unlink any mmapped segments that don't contain used chunks */ static size_t release_unused_segments(mstate m) { - size_t released = 0; - int nsegs = 0; + + size_t released = 0; + int nsegs = 0; msegmentptr pred = &m->seg; msegmentptr sp = pred->next; while (sp != 0) { - char* base = sp->base; - size_t size = sp->size; + + char * base = sp->base; + size_t size = sp->size; msegmentptr next = sp->next; ++nsegs; if (is_mmapped_segment(sp) && !is_extern_segment(sp)) { + mchunkptr p = align_as_chunk(base); - size_t psize = chunksize(p); + size_t psize = chunksize(p); /* Can unmap if first chunk holds entire segment and not pinned */ - if (!is_inuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) { + if (!is_inuse(p) && (char *)p + psize >= base + size - TOP_FOOT_SIZE) { + tchunkptr tp = (tchunkptr)p; - assert(segment_holds(sp, (char*)sp)); + assert(segment_holds(sp, (char *)sp)); if (p == m->dv) { + m->dv = 0; m->dvsize = 0; - } - else { + + } else { + unlink_large_chunk(m, tp); + } + if (CALL_MUNMAP(base, size) == 0) { + released += size; m->footprint -= size; /* unlink obsoleted record */ sp = pred; sp->next = next; - } - else { /* back out if cannot unmap */ + + } else { /* back out if cannot unmap */ + insert_large_chunk(m, tp, psize); + } + } + } - if (NO_SEGMENT_TRAVERSAL) /* scan only first segment */ + + if (NO_SEGMENT_TRAVERSAL) /* scan only first segment */ break; pred = sp; sp = next; + } + /* Reset check counter */ - m->release_checks = (((size_t) nsegs > (size_t) MAX_RELEASE_CHECK_RATE)? - (size_t) nsegs : (size_t) MAX_RELEASE_CHECK_RATE); + m->release_checks = (((size_t)nsegs > (size_t)MAX_RELEASE_CHECK_RATE) + ? (size_t)nsegs + : (size_t)MAX_RELEASE_CHECK_RATE); return released; + } static int sys_trim(mstate m, size_t pad) { + size_t released = 0; ensure_initialization(); if (pad < MAX_REQUEST && is_initialized(m)) { - pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */ + + pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */ if (m->topsize > pad) { + /* Shrink top space in granularity-size units, keeping at least one */ size_t unit = mparams.granularity; - size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit - - SIZE_T_ONE) * unit; - msegmentptr sp = segment_holding(m, (char*)m->top); + size_t extra = + ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit - SIZE_T_ONE) * unit; + msegmentptr sp = segment_holding(m, (char *)m->top); if (!is_extern_segment(sp)) { + if (is_mmapped_segment(sp)) { - if (HAVE_MMAP && - sp->size >= extra && - !has_segment_link(m, sp)) { /* can't shrink if pinned */ + + if (HAVE_MMAP && sp->size >= extra && + !has_segment_link(m, sp)) { /* can't shrink if pinned */ size_t newsize = sp->size - extra; - (void)newsize; /* placate people compiling -Wunused-variable */ + (void)newsize; /* placate people compiling -Wunused-variable */ /* Prefer mremap, fall back to munmap */ if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) || (CALL_MUNMAP(sp->base + newsize, extra) == 0)) { + released = extra; + } + } - } - else if (HAVE_MORECORE) { - if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */ + + } else if (HAVE_MORECORE) { + + if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */ extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit; ACQUIRE_MALLOC_GLOBAL_LOCK(); { + /* Make sure end of memory is where we last set it. */ - char* old_br = (char*)(CALL_MORECORE(0)); + char *old_br = (char *)(CALL_MORECORE(0)); if (old_br == sp->base + sp->size) { - char* rel_br = (char*)(CALL_MORECORE(-extra)); - char* new_br = (char*)(CALL_MORECORE(0)); + + char *rel_br = (char *)(CALL_MORECORE(-extra)); + char *new_br = (char *)(CALL_MORECORE(0)); if (rel_br != CMFAIL && new_br < old_br) released = old_br - new_br; + } + } + RELEASE_MALLOC_GLOBAL_LOCK(); + } + } if (released != 0) { + sp->size -= released; m->footprint -= released; init_top(m, m->top, m->topsize - released); check_top_chunk(m, m->top); + } + } /* Unmap any unused mmapped segments */ - if (HAVE_MMAP) - released += release_unused_segments(m); + if (HAVE_MMAP) released += release_unused_segments(m); /* On failure, disable autotrim to avoid repeated failed future calls */ - if (released == 0 && m->topsize > m->trim_check) - m->trim_check = MAX_SIZE_T; + if (released == 0 && m->topsize > m->trim_check) m->trim_check = MAX_SIZE_T; + } - return (released != 0)? 1 : 0; + return (released != 0) ? 1 : 0; + } /* Consolidate and bin a chunk. Differs from exported versions of free mainly in that the chunk need not be marked as inuse. */ static void dispose_chunk(mstate m, mchunkptr p, size_t psize) { + mchunkptr next = chunk_plus_offset(p, psize); if (!pinuse(p)) { + mchunkptr prev; - size_t prevsize = p->prev_foot; + size_t prevsize = p->prev_foot; if (is_mmapped(p)) { + psize += prevsize + MMAP_FOOT_PAD; - if (CALL_MUNMAP((char*)p - prevsize, psize) == 0) - m->footprint -= psize; + if (CALL_MUNMAP((char *)p - prevsize, psize) == 0) m->footprint -= psize; return; + } + prev = chunk_minus_offset(p, prevsize); psize += prevsize; p = prev; - if (RTCHECK(ok_address(m, prev))) { /* consolidate backward */ + if (RTCHECK(ok_address(m, prev))) { /* consolidate backward */ if (p != m->dv) { + unlink_chunk(m, p, prevsize); - } - else if ((next->head & INUSE_BITS) == INUSE_BITS) { + + } else if ((next->head & INUSE_BITS) == INUSE_BITS) { + m->dvsize = psize; set_free_with_pinuse(p, psize, next); return; + } - } - else { + + } else { + CORRUPTION_ERROR_ACTION(m); return; + } + } + if (RTCHECK(ok_address(m, next))) { - if (!cinuse(next)) { /* consolidate forward */ + + if (!cinuse(next)) { /* consolidate forward */ if (next == m->top) { + size_t tsize = m->topsize += psize; m->top = p; p->head = tsize | PINUSE_BIT; if (p == m->dv) { + m->dv = 0; m->dvsize = 0; + } + return; - } - else if (next == m->dv) { + + } else if (next == m->dv) { + size_t dsize = m->dvsize += psize; m->dv = p; set_size_and_pinuse_of_free_chunk(p, dsize); return; - } - else { + + } else { + size_t nsize = chunksize(next); psize += nsize; unlink_chunk(m, next, nsize); set_size_and_pinuse_of_free_chunk(p, psize); if (p == m->dv) { + m->dvsize = psize; return; + } + } - } - else { + + } else { + set_free_with_pinuse(p, psize, next); + } + insert_chunk(m, p, psize); - } - else { + + } else { + CORRUPTION_ERROR_ACTION(m); + } + } /* ---------------------------- malloc --------------------------- */ /* allocate a large request from the best fitting chunk in a treebin */ -static void* tmalloc_large(mstate m, size_t nb) { +static void *tmalloc_large(mstate m, size_t nb) { + tchunkptr v = 0; - size_t rsize = -nb; /* Unsigned negation */ + size_t rsize = -nb; /* Unsigned negation */ tchunkptr t; - bindex_t idx; + bindex_t idx; compute_tree_index(nb, idx); if ((t = *treebin_at(m, idx)) != 0) { + /* Traverse tree for this bin looking for node with size == nb */ - size_t sizebits = nb << leftshift_for_tree_index(idx); - tchunkptr rst = 0; /* The deepest untaken right subtree */ + size_t sizebits = nb << leftshift_for_tree_index(idx); + tchunkptr rst = 0; /* The deepest untaken right subtree */ for (;;) { + tchunkptr rt; - size_t trem = chunksize(t) - nb; + size_t trem = chunksize(t) - nb; if (trem < rsize) { + v = t; - if ((rsize = trem) == 0) - break; + if ((rsize = trem) == 0) break; + } + rt = t->child[1]; - t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]; - if (rt != 0 && rt != t) - rst = rt; + t = t->child[(sizebits >> (SIZE_T_BITSIZE - SIZE_T_ONE)) & 1]; + if (rt != 0 && rt != t) rst = rt; if (t == 0) { - t = rst; /* set t to least subtree holding sizes > nb */ + + t = rst; /* set t to least subtree holding sizes > nb */ break; + } + sizebits <<= 1; + } + } - if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */ + + if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */ binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap; if (leftbits != 0) { + bindex_t i; binmap_t leastbit = least_bit(leftbits); compute_bit2idx(leastbit, i); t = *treebin_at(m, i); + } + } - while (t != 0) { /* find smallest of tree or subtree */ + while (t != 0) { /* find smallest of tree or subtree */ size_t trem = chunksize(t) - nb; if (trem < rsize) { + rsize = trem; v = t; + } + t = leftmost_child(t); + } /* If dv is a better fit, return 0 so malloc will use it */ if (v != 0 && rsize < (size_t)(m->dvsize - nb)) { - if (RTCHECK(ok_address(m, v))) { /* split */ + + if (RTCHECK(ok_address(m, v))) { /* split */ mchunkptr r = chunk_plus_offset(v, nb); assert(chunksize(v) == rsize + nb); if (RTCHECK(ok_next(v, r))) { + unlink_large_chunk(m, v); if (rsize < MIN_CHUNK_SIZE) set_inuse_and_pinuse(m, v, (rsize + nb)); else { + set_size_and_pinuse_of_inuse_chunk(m, v, nb); set_size_and_pinuse_of_free_chunk(r, rsize); insert_chunk(m, r, rsize); + } + return chunk2mem(v); + } + } + CORRUPTION_ERROR_ACTION(m); + } + return 0; + } /* allocate a small request from the best fitting chunk in a treebin */ -static void* tmalloc_small(mstate m, size_t nb) { +static void *tmalloc_small(mstate m, size_t nb) { + tchunkptr t, v; - size_t rsize; - bindex_t i; - binmap_t leastbit = least_bit(m->treemap); + size_t rsize; + bindex_t i; + binmap_t leastbit = least_bit(m->treemap); compute_bit2idx(leastbit, i); v = t = *treebin_at(m, i); rsize = chunksize(t) - nb; while ((t = leftmost_child(t)) != 0) { + size_t trem = chunksize(t) - nb; if (trem < rsize) { + rsize = trem; v = t; + } + } if (RTCHECK(ok_address(m, v))) { + mchunkptr r = chunk_plus_offset(v, nb); assert(chunksize(v) == rsize + nb); if (RTCHECK(ok_next(v, r))) { + unlink_large_chunk(m, v); if (rsize < MIN_CHUNK_SIZE) set_inuse_and_pinuse(m, v, (rsize + nb)); else { + set_size_and_pinuse_of_inuse_chunk(m, v, nb); set_size_and_pinuse_of_free_chunk(r, rsize); replace_dv(m, r, rsize); + } + return chunk2mem(v); + } + } CORRUPTION_ERROR_ACTION(m); return 0; -} -#if !ONLY_MSPACES +} -void* dlmalloc(size_t bytes) { - /* - Basic algorithm: - If a small request (< 256 bytes minus per-chunk overhead): - 1. If one exists, use a remainderless chunk in associated smallbin. - (Remainderless means that there are too few excess bytes to - represent as a chunk.) - 2. If it is big enough, use the dv chunk, which is normally the - chunk adjacent to the one used for the most recent small request. - 3. If one exists, split the smallest available chunk in a bin, - saving remainder in dv. - 4. If it is big enough, use the top chunk. - 5. If available, get memory from system and use it - Otherwise, for a large request: - 1. Find the smallest available binned chunk that fits, and use it - if it is better fitting than dv chunk, splitting if necessary. - 2. If better fitting than any binned chunk, use the dv chunk. - 3. If it is big enough, use the top chunk. - 4. If request size >= mmap threshold, try to directly mmap this chunk. - 5. If available, get memory from system and use it - - The ugly goto's here ensure that postaction occurs along all paths. - */ + #if !ONLY_MSPACES + +void *dlmalloc(size_t bytes) { + + /* + Basic algorithm: + If a small request (< 256 bytes minus per-chunk overhead): + 1. If one exists, use a remainderless chunk in associated smallbin. + (Remainderless means that there are too few excess bytes to + represent as a chunk.) + 2. If it is big enough, use the dv chunk, which is normally the + chunk adjacent to the one used for the most recent small request. + 3. If one exists, split the smallest available chunk in a bin, + saving remainder in dv. + 4. If it is big enough, use the top chunk. + 5. If available, get memory from system and use it + Otherwise, for a large request: + 1. Find the smallest available binned chunk that fits, and use it + if it is better fitting than dv chunk, splitting if necessary. + 2. If better fitting than any binned chunk, use the dv chunk. + 3. If it is big enough, use the top chunk. + 4. If request size >= mmap threshold, try to directly mmap this chunk. + 5. If available, get memory from system and use it + + The ugly goto's here ensure that postaction occurs along all paths. + */ -#if USE_LOCKS - ensure_initialization(); /* initialize in sys_alloc if not using locks */ -#endif + #if USE_LOCKS + ensure_initialization(); /* initialize in sys_alloc if not using locks */ + #endif if (!PREACTION(gm)) { - void* mem; + + void * mem; size_t nb; if (bytes <= MAX_SMALL_REQUEST) { + bindex_t idx; binmap_t smallbits; - nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes); + nb = (bytes < MIN_REQUEST) ? MIN_CHUNK_SIZE : pad_request(bytes); idx = small_index(nb); smallbits = gm->smallmap >> idx; - if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ + if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ mchunkptr b, p; - idx += ~smallbits & 1; /* Uses next bin if idx empty */ + idx += ~smallbits & 1; /* Uses next bin if idx empty */ b = smallbin_at(gm, idx); p = b->fd; assert(chunksize(p) == small_index2size(idx)); @@ -4641,15 +5167,17 @@ void* dlmalloc(size_t bytes) { mem = chunk2mem(p); check_malloced_chunk(gm, mem, nb); goto postaction; + } else if (nb > gm->dvsize) { - if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ + + if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ mchunkptr b, p, r; - size_t rsize; - bindex_t i; - binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx)); - binmap_t leastbit = least_bit(leftbits); + size_t rsize; + bindex_t i; + binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx)); + binmap_t leastbit = least_bit(leftbits); compute_bit2idx(leastbit, i); b = smallbin_at(gm, i); p = b->fd; @@ -4660,54 +5188,71 @@ void* dlmalloc(size_t bytes) { if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) set_inuse_and_pinuse(gm, p, small_index2size(i)); else { + set_size_and_pinuse_of_inuse_chunk(gm, p, nb); r = chunk_plus_offset(p, nb); set_size_and_pinuse_of_free_chunk(r, rsize); replace_dv(gm, r, rsize); + } + mem = chunk2mem(p); check_malloced_chunk(gm, mem, nb); goto postaction; + } else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) { + check_malloced_chunk(gm, mem, nb); goto postaction; + } + } - } - else if (bytes >= MAX_REQUEST) + + } else if (bytes >= MAX_REQUEST) + nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */ else { + nb = pad_request(bytes); if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) { + check_malloced_chunk(gm, mem, nb); goto postaction; + } + } if (nb <= gm->dvsize) { - size_t rsize = gm->dvsize - nb; + + size_t rsize = gm->dvsize - nb; mchunkptr p = gm->dv; - if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ + if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ mchunkptr r = gm->dv = chunk_plus_offset(p, nb); gm->dvsize = rsize; set_size_and_pinuse_of_free_chunk(r, rsize); set_size_and_pinuse_of_inuse_chunk(gm, p, nb); - } - else { /* exhaust dv */ + + } else { /* exhaust dv */ + size_t dvs = gm->dvsize; gm->dvsize = 0; gm->dv = 0; set_inuse_and_pinuse(gm, p, dvs); + } + mem = chunk2mem(p); check_malloced_chunk(gm, mem, nb); goto postaction; + } - else if (nb < gm->topsize) { /* Split top */ - size_t rsize = gm->topsize -= nb; + else if (nb < gm->topsize) { /* Split top */ + size_t rsize = gm->topsize -= nb; mchunkptr p = gm->top; mchunkptr r = gm->top = chunk_plus_offset(p, nb); r->head = rsize | PINUSE_BIT; @@ -4716,6 +5261,7 @@ void* dlmalloc(size_t bytes) { check_top_chunk(gm, gm->top); check_malloced_chunk(gm, mem, nb); goto postaction; + } mem = sys_alloc(gm, nb); @@ -4723,14 +5269,17 @@ void* dlmalloc(size_t bytes) { postaction: POSTACTION(gm); return mem; + } return 0; + } /* ---------------------------- free --------------------------- */ -void dlfree(void* mem) { +void dlfree(void *mem) { + /* Consolidate freed chunks with preceeding or succeeding bordering free chunks, if they exist, and then place in a bin. Intermixed @@ -4738,164 +5287,216 @@ void dlfree(void* mem) { */ if (mem != 0) { - mchunkptr p = mem2chunk(mem); -#if FOOTERS + + mchunkptr p = mem2chunk(mem); + #if FOOTERS mstate fm = get_mstate_for(p); if (!ok_magic(fm)) { + USAGE_ERROR_ACTION(fm, p); return; + } -#else /* FOOTERS */ -#define fm gm -#endif /* FOOTERS */ + + #else /* FOOTERS */ + #define fm gm + #endif /* FOOTERS */ if (!PREACTION(fm)) { + check_inuse_chunk(fm, p); if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) { - size_t psize = chunksize(p); + + size_t psize = chunksize(p); mchunkptr next = chunk_plus_offset(p, psize); if (!pinuse(p)) { + size_t prevsize = p->prev_foot; if (is_mmapped(p)) { + psize += prevsize + MMAP_FOOT_PAD; - if (CALL_MUNMAP((char*)p - prevsize, psize) == 0) + if (CALL_MUNMAP((char *)p - prevsize, psize) == 0) fm->footprint -= psize; goto postaction; - } - else { + + } else { + mchunkptr prev = chunk_minus_offset(p, prevsize); psize += prevsize; p = prev; - if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */ + if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */ if (p != fm->dv) { + unlink_chunk(fm, p, prevsize); - } - else if ((next->head & INUSE_BITS) == INUSE_BITS) { + + } else if ((next->head & INUSE_BITS) == INUSE_BITS) { + fm->dvsize = psize; set_free_with_pinuse(p, psize, next); goto postaction; + } - } - else + + } else + goto erroraction; + } + } if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) { - if (!cinuse(next)) { /* consolidate forward */ + + if (!cinuse(next)) { /* consolidate forward */ if (next == fm->top) { + size_t tsize = fm->topsize += psize; fm->top = p; p->head = tsize | PINUSE_BIT; if (p == fm->dv) { + fm->dv = 0; fm->dvsize = 0; + } - if (should_trim(fm, tsize)) - sys_trim(fm, 0); + + if (should_trim(fm, tsize)) sys_trim(fm, 0); goto postaction; - } - else if (next == fm->dv) { + + } else if (next == fm->dv) { + size_t dsize = fm->dvsize += psize; fm->dv = p; set_size_and_pinuse_of_free_chunk(p, dsize); goto postaction; - } - else { + + } else { + size_t nsize = chunksize(next); psize += nsize; unlink_chunk(fm, next, nsize); set_size_and_pinuse_of_free_chunk(p, psize); if (p == fm->dv) { + fm->dvsize = psize; goto postaction; + } + } - } - else + + } else + set_free_with_pinuse(p, psize, next); if (is_small(psize)) { + insert_small_chunk(fm, p, psize); check_free_chunk(fm, p); - } - else { + + } else { + tchunkptr tp = (tchunkptr)p; insert_large_chunk(fm, tp, psize); check_free_chunk(fm, p); - if (--fm->release_checks == 0) - release_unused_segments(fm); + if (--fm->release_checks == 0) release_unused_segments(fm); + } + goto postaction; + } + } + erroraction: USAGE_ERROR_ACTION(fm, p); postaction: POSTACTION(fm); + } + } -#if !FOOTERS -#undef fm -#endif /* FOOTERS */ + + #if !FOOTERS + #undef fm + #endif /* FOOTERS */ + } -void* dlcalloc(size_t n_elements, size_t elem_size) { - void* mem; +void *dlcalloc(size_t n_elements, size_t elem_size) { + + void * mem; size_t req = 0; if (n_elements != 0) { + req = n_elements * elem_size; if (((n_elements | elem_size) & ~(size_t)0xffff) && (req / n_elements != elem_size)) - req = MAX_SIZE_T; /* force downstream failure on overflow */ + req = MAX_SIZE_T; /* force downstream failure on overflow */ + } + mem = dlmalloc(req); if (mem != 0 && calloc_must_clear(mem2chunk(mem))) __builtin_memset(mem, 0, req); return mem; + } -#endif /* !ONLY_MSPACES */ + #endif /* !ONLY_MSPACES */ /* ------------ Internal support for realloc, memalign, etc -------------- */ /* Try to realloc; only in-place unless can_move true */ static mchunkptr try_realloc_chunk(mstate m, mchunkptr p, size_t nb, int can_move) { + mchunkptr newp = 0; - size_t oldsize = chunksize(p); + size_t oldsize = chunksize(p); mchunkptr next = chunk_plus_offset(p, oldsize); - if (RTCHECK(ok_address(m, p) && ok_inuse(p) && - ok_next(p, next) && ok_pinuse(next))) { + if (RTCHECK(ok_address(m, p) && ok_inuse(p) && ok_next(p, next) && + ok_pinuse(next))) { + if (is_mmapped(p)) { + newp = mmap_resize(m, p, nb, can_move); - } - else if (oldsize >= nb) { /* already big enough */ + + } else if (oldsize >= nb) { /* already big enough */ + size_t rsize = oldsize - nb; - if (rsize >= MIN_CHUNK_SIZE) { /* split off remainder */ + if (rsize >= MIN_CHUNK_SIZE) { /* split off remainder */ mchunkptr r = chunk_plus_offset(p, nb); set_inuse(m, p, nb); set_inuse(m, r, rsize); dispose_chunk(m, r, rsize); + } + newp = p; - } - else if (next == m->top) { /* extend into top */ + + } else if (next == m->top) { /* extend into top */ + if (oldsize + m->topsize > nb) { - size_t newsize = oldsize + m->topsize; - size_t newtopsize = newsize - nb; + + size_t newsize = oldsize + m->topsize; + size_t newtopsize = newsize - nb; mchunkptr newtop = chunk_plus_offset(p, nb); set_inuse(m, p, nb); - newtop->head = newtopsize |PINUSE_BIT; + newtop->head = newtopsize | PINUSE_BIT; m->top = newtop; m->topsize = newtopsize; newp = p; + } - } - else if (next == m->dv) { /* extend into dv */ + + } else if (next == m->dv) { /* extend into dv */ + size_t dvs = m->dvsize; if (oldsize + dvs >= nb) { + size_t dsize = oldsize + dvs - nb; if (dsize >= MIN_CHUNK_SIZE) { + mchunkptr r = chunk_plus_offset(p, nb); mchunkptr n = chunk_plus_offset(r, dsize); set_inuse(m, p, nb); @@ -4903,64 +5504,87 @@ static mchunkptr try_realloc_chunk(mstate m, mchunkptr p, size_t nb, clear_pinuse(n); m->dvsize = dsize; m->dv = r; - } - else { /* exhaust dv */ + + } else { /* exhaust dv */ + size_t newsize = oldsize + dvs; set_inuse(m, p, newsize); m->dvsize = 0; m->dv = 0; + } + newp = p; + } - } - else if (!cinuse(next)) { /* extend into next free chunk */ + + } else if (!cinuse(next)) { /* extend into next free chunk */ + size_t nextsize = chunksize(next); if (oldsize + nextsize >= nb) { + size_t rsize = oldsize + nextsize - nb; unlink_chunk(m, next, nextsize); if (rsize < MIN_CHUNK_SIZE) { + size_t newsize = oldsize + nextsize; set_inuse(m, p, newsize); - } - else { + + } else { + mchunkptr r = chunk_plus_offset(p, nb); set_inuse(m, p, nb); set_inuse(m, r, rsize); dispose_chunk(m, r, rsize); + } + newp = p; + } + } - } - else { + + } else { + USAGE_ERROR_ACTION(m, chunk2mem(p)); + } + return newp; + } -static void* internal_memalign(mstate m, size_t alignment, size_t bytes) { - void* mem = 0; - if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */ +static void *internal_memalign(mstate m, size_t alignment, size_t bytes) { + + void *mem = 0; + if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */ alignment = MIN_CHUNK_SIZE; - if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of 2 */ + if ((alignment & (alignment - SIZE_T_ONE)) != 0) { /* Ensure a power of 2 */ size_t a = MALLOC_ALIGNMENT << 1; - while (a < alignment) a <<= 1; + while (a < alignment) + a <<= 1; alignment = a; + } + if (bytes >= MAX_REQUEST - alignment) { - if (m != 0) { /* Test isn't needed but avoids compiler warning */ + + if (m != 0) { /* Test isn't needed but avoids compiler warning */ MALLOC_FAILURE_ACTION; + } - } - else { + + } else { + size_t nb = request2size(bytes); size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD; mem = internal_malloc(m, req); if (mem != 0) { + mchunkptr p = mem2chunk(mem); - if (PREACTION(m)) - return 0; - if ((((size_t)(mem)) & (alignment - 1)) != 0) { /* misaligned */ + if (PREACTION(m)) return 0; + if ((((size_t)(mem)) & (alignment - 1)) != 0) { /* misaligned */ /* Find an aligned spot inside chunk. Since we need to give back leading space in a chunk of at least MIN_CHUNK_SIZE, if @@ -4969,47 +5593,59 @@ static void* internal_memalign(mstate m, size_t alignment, size_t bytes) { We've allocated enough total room so that this is always possible. */ - char* br = (char*)mem2chunk((size_t)(((size_t)((char*)mem + alignment - - SIZE_T_ONE)) & - -alignment)); - char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)? - br : br+alignment; + char * br = (char *)mem2chunk((size_t)( + ((size_t)((char *)mem + alignment - SIZE_T_ONE)) & -alignment)); + char * pos = ((size_t)(br - (char *)(p)) >= MIN_CHUNK_SIZE) + ? br + : br + alignment; mchunkptr newp = (mchunkptr)pos; - size_t leadsize = pos - (char*)(p); - size_t newsize = chunksize(p) - leadsize; + size_t leadsize = pos - (char *)(p); + size_t newsize = chunksize(p) - leadsize; - if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */ + if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */ newp->prev_foot = p->prev_foot + leadsize; newp->head = newsize; - } - else { /* Otherwise, give back leader, use the rest */ + + } else { /* Otherwise, give back leader, use the rest */ + set_inuse(m, newp, newsize); set_inuse(m, p, leadsize); dispose_chunk(m, p, leadsize); + } + p = newp; + } /* Give back spare room at the end */ if (!is_mmapped(p)) { + size_t size = chunksize(p); if (size > nb + MIN_CHUNK_SIZE) { - size_t remainder_size = size - nb; + + size_t remainder_size = size - nb; mchunkptr remainder = chunk_plus_offset(p, nb); set_inuse(m, p, nb); set_inuse(m, remainder, remainder_size); dispose_chunk(m, remainder, remainder_size); + } + } mem = chunk2mem(p); - assert (chunksize(p) >= nb); + assert(chunksize(p) >= nb); assert(((size_t)mem & (alignment - 1)) == 0); check_inuse_chunk(m, p); POSTACTION(m); + } + } + return mem; + } /* @@ -5019,50 +5655,50 @@ static void* internal_memalign(mstate m, size_t alignment, size_t bytes) { bit 0 set if all elements are same size (using sizes[0]) bit 1 set if elements should be zeroed */ -static void** ialloc(mstate m, - size_t n_elements, - size_t* sizes, - int opts, - void* chunks[]) { - - size_t element_size; /* chunksize of each element, if all same */ - size_t contents_size; /* total size of elements */ - size_t array_size; /* request size of pointer array */ - void* mem; /* malloced aggregate space */ - mchunkptr p; /* corresponding chunk */ - size_t remainder_size; /* remaining bytes while splitting */ - void** marray; /* either "chunks" or malloced ptr array */ - mchunkptr array_chunk; /* chunk for malloced ptr array */ - flag_t was_enabled; /* to disable mmap */ +static void **ialloc(mstate m, size_t n_elements, size_t *sizes, int opts, + void *chunks[]) { + + size_t element_size; /* chunksize of each element, if all same */ + size_t contents_size; /* total size of elements */ + size_t array_size; /* request size of pointer array */ + void * mem; /* malloced aggregate space */ + mchunkptr p; /* corresponding chunk */ + size_t remainder_size; /* remaining bytes while splitting */ + void ** marray; /* either "chunks" or malloced ptr array */ + mchunkptr array_chunk; /* chunk for malloced ptr array */ + flag_t was_enabled; /* to disable mmap */ size_t size; size_t i; ensure_initialization(); /* compute array length, if needed */ if (chunks != 0) { - if (n_elements == 0) - return chunks; /* nothing to do */ + + if (n_elements == 0) return chunks; /* nothing to do */ marray = chunks; array_size = 0; - } - else { + + } else { + /* if empty req, must still return chunk representing empty array */ - if (n_elements == 0) - return (void**)internal_malloc(m, 0); + if (n_elements == 0) return (void **)internal_malloc(m, 0); marray = 0; - array_size = request2size(n_elements * (sizeof(void*))); + array_size = request2size(n_elements * (sizeof(void *))); + } /* compute total element size */ - if (opts & 0x1) { /* all-same-size */ + if (opts & 0x1) { /* all-same-size */ element_size = request2size(*sizes); contents_size = n_elements * element_size; - } - else { /* add up all the sizes */ + + } else { /* add up all the sizes */ + element_size = 0; contents_size = 0; for (i = 0; i != n_elements; ++i) contents_size += request2size(sizes[i]); + } size = contents_size + array_size; @@ -5075,10 +5711,8 @@ static void** ialloc(mstate m, was_enabled = use_mmap(m); disable_mmap(m); mem = internal_malloc(m, size - CHUNK_OVERHEAD); - if (was_enabled) - enable_mmap(m); - if (mem == 0) - return 0; + if (was_enabled) enable_mmap(m); + if (mem == 0) return 0; if (PREACTION(m)) return 0; p = mem2chunk(mem); @@ -5086,24 +5720,30 @@ static void** ialloc(mstate m, assert(!is_mmapped(p)); - if (opts & 0x2) { /* optionally clear the elements */ - __builtin_memset((size_t*)mem, 0, remainder_size - SIZE_T_SIZE - array_size); + if (opts & 0x2) { /* optionally clear the elements */ + __builtin_memset((size_t *)mem, 0, + remainder_size - SIZE_T_SIZE - array_size); + } /* If not provided, allocate the pointer array as final part of chunk */ if (marray == 0) { - size_t array_chunk_size; + + size_t array_chunk_size; array_chunk = chunk_plus_offset(p, contents_size); array_chunk_size = remainder_size - contents_size; - marray = (void**) (chunk2mem(array_chunk)); + marray = (void **)(chunk2mem(array_chunk)); set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size); remainder_size = contents_size; + } /* split out elements */ - for (i = 0; ; ++i) { + for (i = 0;; ++i) { + marray[i] = chunk2mem(p); - if (i != n_elements-1) { + if (i != n_elements - 1) { + if (element_size != 0) size = element_size; else @@ -5111,31 +5751,42 @@ static void** ialloc(mstate m, remainder_size -= size; set_size_and_pinuse_of_inuse_chunk(m, p, size); p = chunk_plus_offset(p, size); - } - else { /* the final element absorbs any overallocation slop */ + + } else { /* the final element absorbs any overallocation slop */ + set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size); break; + } + } -#if DEBUG + #if DEBUG if (marray != chunks) { + /* final element must have exactly exhausted chunk */ if (element_size != 0) { + assert(remainder_size == element_size); - } - else { + + } else { + assert(remainder_size == request2size(sizes[i])); + } + check_inuse_chunk(m, mem2chunk(marray)); + } + for (i = 0; i != n_elements; ++i) check_inuse_chunk(m, mem2chunk(marray[i])); -#endif /* DEBUG */ + #endif /* DEBUG */ POSTACTION(m); return marray; + } /* Try to free all pointers in the given array. @@ -5145,316 +5796,431 @@ static void** ialloc(mstate m, chunks before freeing, which will occur often if allocated with ialloc or the array is sorted. */ -static size_t internal_bulk_free(mstate m, void* array[], size_t nelem) { +static size_t internal_bulk_free(mstate m, void *array[], size_t nelem) { + size_t unfreed = 0; if (!PREACTION(m)) { - void** a; - void** fence = &(array[nelem]); + + void **a; + void **fence = &(array[nelem]); for (a = array; a != fence; ++a) { - void* mem = *a; + + void *mem = *a; if (mem != 0) { + mchunkptr p = mem2chunk(mem); - size_t psize = chunksize(p); -#if FOOTERS + size_t psize = chunksize(p); + #if FOOTERS if (get_mstate_for(p) != m) { + ++unfreed; continue; + } -#endif + + #endif check_inuse_chunk(m, p); *a = 0; if (RTCHECK(ok_address(m, p) && ok_inuse(p))) { - void ** b = a + 1; /* try to merge with next chunk */ + + void ** b = a + 1; /* try to merge with next chunk */ mchunkptr next = next_chunk(p); if (b != fence && *b == chunk2mem(next)) { + size_t newsize = chunksize(next) + psize; set_inuse(m, p, newsize); *b = chunk2mem(p); - } - else + + } else + dispose_chunk(m, p, psize); - } - else { + + } else { + CORRUPTION_ERROR_ACTION(m); break; + } + } + } - if (should_trim(m, m->topsize)) - sys_trim(m, 0); + + if (should_trim(m, m->topsize)) sys_trim(m, 0); POSTACTION(m); + } + return unfreed; + } -/* Traversal */ -#if MALLOC_INSPECT_ALL + /* Traversal */ + #if MALLOC_INSPECT_ALL static void internal_inspect_all(mstate m, - void(*handler)(void *start, - void *end, - size_t used_bytes, - void* callback_arg), - void* arg) { + void (*handler)(void *start, void *end, + size_t used_bytes, + void * callback_arg), + void *arg) { + if (is_initialized(m)) { - mchunkptr top = m->top; + + mchunkptr top = m->top; msegmentptr s; for (s = &m->seg; s != 0; s = s->next) { + mchunkptr q = align_as_chunk(s->base); while (segment_holds(s, q) && q->head != FENCEPOST_HEAD) { + mchunkptr next = next_chunk(q); - size_t sz = chunksize(q); - size_t used; - void* start; + size_t sz = chunksize(q); + size_t used; + void * start; if (is_inuse(q)) { - used = sz - CHUNK_OVERHEAD; /* must not be mmapped */ + + used = sz - CHUNK_OVERHEAD; /* must not be mmapped */ start = chunk2mem(q); - } - else { + + } else { + used = 0; - if (is_small(sz)) { /* offset by possible bookkeeping */ - start = (void*)((char*)q + sizeof(struct malloc_chunk)); - } - else { - start = (void*)((char*)q + sizeof(struct malloc_tree_chunk)); + if (is_small(sz)) { /* offset by possible bookkeeping */ + start = (void *)((char *)q + sizeof(struct malloc_chunk)); + + } else { + + start = (void *)((char *)q + sizeof(struct malloc_tree_chunk)); + } + } - if (start < (void*)next) /* skip if all space is bookkeeping */ + + if (start < (void *)next) /* skip if all space is bookkeeping */ handler(start, next, used, arg); - if (q == top) - break; + if (q == top) break; q = next; + } + } + } + } -#endif /* MALLOC_INSPECT_ALL */ + + #endif /* MALLOC_INSPECT_ALL */ /* ------------------ Exported realloc, memalign, etc -------------------- */ -#if !ONLY_MSPACES + #if !ONLY_MSPACES -void* dlrealloc(void* oldmem, size_t bytes) { - void* mem = 0; +void *dlrealloc(void *oldmem, size_t bytes) { + + void *mem = 0; if (oldmem == 0) { + mem = dlmalloc(bytes); - } - else if (bytes >= MAX_REQUEST) { + + } else if (bytes >= MAX_REQUEST) { + MALLOC_FAILURE_ACTION; + } -#ifdef REALLOC_ZERO_BYTES_FREES + + #ifdef REALLOC_ZERO_BYTES_FREES else if (bytes == 0) { + dlfree(oldmem); + } -#endif /* REALLOC_ZERO_BYTES_FREES */ + + #endif /* REALLOC_ZERO_BYTES_FREES */ else { - size_t nb = request2size(bytes); + + size_t nb = request2size(bytes); mchunkptr oldp = mem2chunk(oldmem); -#if ! FOOTERS + #if !FOOTERS mstate m = gm; -#else /* FOOTERS */ + #else /* FOOTERS */ mstate m = get_mstate_for(oldp); if (!ok_magic(m)) { + USAGE_ERROR_ACTION(m, oldmem); return 0; + } -#endif /* FOOTERS */ + + #endif /* FOOTERS */ if (!PREACTION(m)) { + mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1); POSTACTION(m); if (newp != 0) { + check_inuse_chunk(m, newp); mem = chunk2mem(newp); - } - else { + + } else { + mem = internal_malloc(m, bytes); if (mem != 0) { + size_t oc = chunksize(oldp) - overhead_for(oldp); - __builtin_memcpy(mem, oldmem, (oc < bytes)? oc : bytes); + __builtin_memcpy(mem, oldmem, (oc < bytes) ? oc : bytes); internal_free(m, oldmem); + } + } + } + } + return mem; + } -void* dlrealloc_in_place(void* oldmem, size_t bytes) { - void* mem = 0; +void *dlrealloc_in_place(void *oldmem, size_t bytes) { + + void *mem = 0; if (oldmem != 0) { + if (bytes >= MAX_REQUEST) { + MALLOC_FAILURE_ACTION; - } - else { - size_t nb = request2size(bytes); + + } else { + + size_t nb = request2size(bytes); mchunkptr oldp = mem2chunk(oldmem); -#if ! FOOTERS + #if !FOOTERS mstate m = gm; -#else /* FOOTERS */ + #else /* FOOTERS */ mstate m = get_mstate_for(oldp); if (!ok_magic(m)) { + USAGE_ERROR_ACTION(m, oldmem); return 0; + } -#endif /* FOOTERS */ + + #endif /* FOOTERS */ if (!PREACTION(m)) { + mchunkptr newp = try_realloc_chunk(m, oldp, nb, 0); POSTACTION(m); if (newp == oldp) { + check_inuse_chunk(m, newp); mem = oldmem; + } + } + } + } + return mem; + } -void* dlmemalign(size_t alignment, size_t bytes) { - if (alignment <= MALLOC_ALIGNMENT) { - return dlmalloc(bytes); - } +void *dlmemalign(size_t alignment, size_t bytes) { + + if (alignment <= MALLOC_ALIGNMENT) { return dlmalloc(bytes); } return internal_memalign(gm, alignment, bytes); + } -int dlposix_memalign(void** pp, size_t alignment, size_t bytes) { - void* mem = 0; +int dlposix_memalign(void **pp, size_t alignment, size_t bytes) { + + void *mem = 0; if (alignment == MALLOC_ALIGNMENT) mem = dlmalloc(bytes); else { - size_t d = alignment / sizeof(void*); - size_t r = alignment % sizeof(void*); - if (r != 0 || d == 0 || (d & (d-SIZE_T_ONE)) != 0) + + size_t d = alignment / sizeof(void *); + size_t r = alignment % sizeof(void *); + if (r != 0 || d == 0 || (d & (d - SIZE_T_ONE)) != 0) return EINVAL; else if (bytes <= MAX_REQUEST - alignment) { - if (alignment < MIN_CHUNK_SIZE) - alignment = MIN_CHUNK_SIZE; + + if (alignment < MIN_CHUNK_SIZE) alignment = MIN_CHUNK_SIZE; mem = internal_memalign(gm, alignment, bytes); + } + } + if (mem == 0) return ENOMEM; else { + *pp = mem; return 0; + } + } -void* dlvalloc(size_t bytes) { +void *dlvalloc(size_t bytes) { + size_t pagesz; ensure_initialization(); pagesz = mparams.page_size; return dlmemalign(pagesz, bytes); + } -void* dlpvalloc(size_t bytes) { +void *dlpvalloc(size_t bytes) { + size_t pagesz; ensure_initialization(); pagesz = mparams.page_size; - return dlmemalign(pagesz, (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE)); + return dlmemalign(pagesz, + (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE)); + } -void** dlindependent_calloc(size_t n_elements, size_t elem_size, - void* chunks[]) { - size_t sz = elem_size; /* serves as 1-element array */ +void **dlindependent_calloc(size_t n_elements, size_t elem_size, + void *chunks[]) { + + size_t sz = elem_size; /* serves as 1-element array */ return ialloc(gm, n_elements, &sz, 3, chunks); + } -void** dlindependent_comalloc(size_t n_elements, size_t sizes[], - void* chunks[]) { +void **dlindependent_comalloc(size_t n_elements, size_t sizes[], + void *chunks[]) { + return ialloc(gm, n_elements, sizes, 0, chunks); + } -size_t dlbulk_free(void* array[], size_t nelem) { +size_t dlbulk_free(void *array[], size_t nelem) { + return internal_bulk_free(gm, array, nelem); + } -#if MALLOC_INSPECT_ALL -void dlmalloc_inspect_all(void(*handler)(void *start, - void *end, - size_t used_bytes, - void* callback_arg), - void* arg) { + #if MALLOC_INSPECT_ALL +void dlmalloc_inspect_all(void (*handler)(void *start, void *end, + size_t used_bytes, + void * callback_arg), + void *arg) { + ensure_initialization(); if (!PREACTION(gm)) { + internal_inspect_all(gm, handler, arg); POSTACTION(gm); + } + } -#endif /* MALLOC_INSPECT_ALL */ + + #endif /* MALLOC_INSPECT_ALL */ int dlmalloc_trim(size_t pad) { + int result = 0; ensure_initialization(); if (!PREACTION(gm)) { + result = sys_trim(gm, pad); POSTACTION(gm); + } + return result; + } size_t dlmalloc_footprint(void) { + return gm->footprint; + } size_t dlmalloc_max_footprint(void) { + return gm->max_footprint; + } size_t dlmalloc_footprint_limit(void) { + size_t maf = gm->footprint_limit; return maf == 0 ? MAX_SIZE_T : maf; + } size_t dlmalloc_set_footprint_limit(size_t bytes) { - size_t result; /* invert sense of 0 */ - if (bytes == 0) - result = granularity_align(1); /* Use minimal size */ + + size_t result; /* invert sense of 0 */ + if (bytes == 0) result = granularity_align(1); /* Use minimal size */ if (bytes == MAX_SIZE_T) - result = 0; /* disable */ + result = 0; /* disable */ else result = granularity_align(bytes); return gm->footprint_limit = result; + } -#if !NO_MALLINFO + #if !NO_MALLINFO struct mallinfo dlmallinfo(void) { + return internal_mallinfo(gm); + } -#endif /* NO_MALLINFO */ -#if !NO_MALLOC_STATS + #endif /* NO_MALLINFO */ + + #if !NO_MALLOC_STATS void dlmalloc_stats() { + internal_malloc_stats(gm); + } -#endif /* NO_MALLOC_STATS */ + + #endif /* NO_MALLOC_STATS */ int dlmallopt(int param_number, int value) { + return change_mparam(param_number, value); + } -size_t dlmalloc_usable_size(void* mem) { +size_t dlmalloc_usable_size(void *mem) { + if (mem != 0) { + mchunkptr p = mem2chunk(mem); - if (is_inuse(p)) - return chunksize(p) - overhead_for(p); + if (is_inuse(p)) return chunksize(p) - overhead_for(p); + } + return 0; + } -#endif /* !ONLY_MSPACES */ + #endif /* !ONLY_MSPACES */ /* ----------------------------- user mspaces ---------------------------- */ -#if MSPACES + #if MSPACES + +static mstate init_user_mstate(char *tbase, size_t tsize) { -static mstate init_user_mstate(char* tbase, size_t tsize) { - size_t msize = pad_request(sizeof(struct malloc_state)); + size_t msize = pad_request(sizeof(struct malloc_state)); mchunkptr mn; mchunkptr msp = align_as_chunk(tbase); - mstate m = (mstate)(chunk2mem(msp)); + mstate m = (mstate)(chunk2mem(msp)); __builtin_memset(m, 0, msize); (void)INITIAL_LOCK(&m->mutex); - msp->head = (msize|INUSE_BITS); + msp->head = (msize | INUSE_BITS); m->seg.base = m->least_addr = tbase; m->seg.size = m->footprint = m->max_footprint = tsize; m->magic = mparams.magic; @@ -5465,82 +6231,111 @@ static mstate init_user_mstate(char* tbase, size_t tsize) { disable_contiguous(m); init_bins(m); mn = next_chunk(mem2chunk(m)); - init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) - TOP_FOOT_SIZE); + init_top(m, mn, (size_t)((tbase + tsize) - (char *)mn) - TOP_FOOT_SIZE); check_top_chunk(m, m->top); return m; + } mspace create_mspace(size_t capacity, int locked) { + mstate m = 0; size_t msize; ensure_initialization(); msize = pad_request(sizeof(struct malloc_state)); - if (capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) { - size_t rs = ((capacity == 0)? mparams.granularity : - (capacity + TOP_FOOT_SIZE + msize)); + if (capacity < (size_t) - (msize + TOP_FOOT_SIZE + mparams.page_size)) { + + size_t rs = ((capacity == 0) ? mparams.granularity + : (capacity + TOP_FOOT_SIZE + msize)); size_t tsize = granularity_align(rs); - char* tbase = (char*)(CALL_MMAP(tsize)); + char * tbase = (char *)(CALL_MMAP(tsize)); if (tbase != CMFAIL) { + m = init_user_mstate(tbase, tsize); m->seg.sflags = USE_MMAP_BIT; set_lock(m, locked); + } + } + return (mspace)m; + } -mspace create_mspace_with_base(void* base, size_t capacity, int locked) { +mspace create_mspace_with_base(void *base, size_t capacity, int locked) { + mstate m = 0; size_t msize; ensure_initialization(); msize = pad_request(sizeof(struct malloc_state)); if (capacity > msize + TOP_FOOT_SIZE && - capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) { - m = init_user_mstate((char*)base, capacity); + capacity < (size_t) - (msize + TOP_FOOT_SIZE + mparams.page_size)) { + + m = init_user_mstate((char *)base, capacity); m->seg.sflags = EXTERN_BIT; set_lock(m, locked); + } + return (mspace)m; + } int mspace_track_large_chunks(mspace msp, int enable) { - int ret = 0; + + int ret = 0; mstate ms = (mstate)msp; if (!PREACTION(ms)) { - if (!use_mmap(ms)) { - ret = 1; - } + + if (!use_mmap(ms)) { ret = 1; } if (!enable) { + enable_mmap(ms); + } else { + disable_mmap(ms); + } + POSTACTION(ms); + } + return ret; + } size_t destroy_mspace(mspace msp) { + size_t freed = 0; mstate ms = (mstate)msp; if (ok_magic(ms)) { + msegmentptr sp = &ms->seg; - (void)DESTROY_LOCK(&ms->mutex); /* destroy before unmapped */ + (void)DESTROY_LOCK(&ms->mutex); /* destroy before unmapped */ while (sp != 0) { - char* base = sp->base; + + char * base = sp->base; size_t size = sp->size; flag_t flag = sp->sflags; - (void)base; /* placate people compiling -Wunused-variable */ + (void)base; /* placate people compiling -Wunused-variable */ sp = sp->next; if ((flag & USE_MMAP_BIT) && !(flag & EXTERN_BIT) && CALL_MUNMAP(base, size) == 0) freed += size; + } + + } else { + + USAGE_ERROR_ACTION(ms, ms); + } - else { - USAGE_ERROR_ACTION(ms,ms); - } + return freed; + } /* @@ -5548,25 +6343,31 @@ size_t destroy_mspace(mspace msp) { versions. This is not so nice but better than the alternatives. */ -void* mspace_malloc(mspace msp, size_t bytes) { +void *mspace_malloc(mspace msp, size_t bytes) { + mstate ms = (mstate)msp; if (!ok_magic(ms)) { - USAGE_ERROR_ACTION(ms,ms); + + USAGE_ERROR_ACTION(ms, ms); return 0; + } + if (!PREACTION(ms)) { - void* mem; + + void * mem; size_t nb; if (bytes <= MAX_SMALL_REQUEST) { + bindex_t idx; binmap_t smallbits; - nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes); + nb = (bytes < MIN_REQUEST) ? MIN_CHUNK_SIZE : pad_request(bytes); idx = small_index(nb); smallbits = ms->smallmap >> idx; - if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ + if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ mchunkptr b, p; - idx += ~smallbits & 1; /* Uses next bin if idx empty */ + idx += ~smallbits & 1; /* Uses next bin if idx empty */ b = smallbin_at(ms, idx); p = b->fd; assert(chunksize(p) == small_index2size(idx)); @@ -5575,15 +6376,17 @@ void* mspace_malloc(mspace msp, size_t bytes) { mem = chunk2mem(p); check_malloced_chunk(ms, mem, nb); goto postaction; + } else if (nb > ms->dvsize) { - if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ + + if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ mchunkptr b, p, r; - size_t rsize; - bindex_t i; - binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx)); - binmap_t leastbit = least_bit(leftbits); + size_t rsize; + bindex_t i; + binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx)); + binmap_t leastbit = least_bit(leftbits); compute_bit2idx(leastbit, i); b = smallbin_at(ms, i); p = b->fd; @@ -5594,54 +6397,71 @@ void* mspace_malloc(mspace msp, size_t bytes) { if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) set_inuse_and_pinuse(ms, p, small_index2size(i)); else { + set_size_and_pinuse_of_inuse_chunk(ms, p, nb); r = chunk_plus_offset(p, nb); set_size_and_pinuse_of_free_chunk(r, rsize); replace_dv(ms, r, rsize); + } + mem = chunk2mem(p); check_malloced_chunk(ms, mem, nb); goto postaction; + } else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) { + check_malloced_chunk(ms, mem, nb); goto postaction; + } + } - } - else if (bytes >= MAX_REQUEST) + + } else if (bytes >= MAX_REQUEST) + nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */ else { + nb = pad_request(bytes); if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) { + check_malloced_chunk(ms, mem, nb); goto postaction; + } + } if (nb <= ms->dvsize) { - size_t rsize = ms->dvsize - nb; + + size_t rsize = ms->dvsize - nb; mchunkptr p = ms->dv; - if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ + if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ mchunkptr r = ms->dv = chunk_plus_offset(p, nb); ms->dvsize = rsize; set_size_and_pinuse_of_free_chunk(r, rsize); set_size_and_pinuse_of_inuse_chunk(ms, p, nb); - } - else { /* exhaust dv */ + + } else { /* exhaust dv */ + size_t dvs = ms->dvsize; ms->dvsize = 0; ms->dv = 0; set_inuse_and_pinuse(ms, p, dvs); + } + mem = chunk2mem(p); check_malloced_chunk(ms, mem, nb); goto postaction; + } - else if (nb < ms->topsize) { /* Split top */ - size_t rsize = ms->topsize -= nb; + else if (nb < ms->topsize) { /* Split top */ + size_t rsize = ms->topsize -= nb; mchunkptr p = ms->top; mchunkptr r = ms->top = chunk_plus_offset(p, nb); r->head = rsize | PINUSE_BIT; @@ -5650,6 +6470,7 @@ void* mspace_malloc(mspace msp, size_t bytes) { check_top_chunk(ms, ms->top); check_malloced_chunk(ms, mem, nb); goto postaction; + } mem = sys_alloc(ms, nb); @@ -5657,372 +6478,519 @@ void* mspace_malloc(mspace msp, size_t bytes) { postaction: POSTACTION(ms); return mem; + } return 0; + } -void mspace_free(mspace msp, void* mem) { +void mspace_free(mspace msp, void *mem) { + if (mem != 0) { - mchunkptr p = mem2chunk(mem); -#if FOOTERS + + mchunkptr p = mem2chunk(mem); + #if FOOTERS mstate fm = get_mstate_for(p); - (void)msp; /* placate people compiling -Wunused */ -#else /* FOOTERS */ + (void)msp; /* placate people compiling -Wunused */ + #else /* FOOTERS */ mstate fm = (mstate)msp; -#endif /* FOOTERS */ + #endif /* FOOTERS */ if (!ok_magic(fm)) { + USAGE_ERROR_ACTION(fm, p); return; + } + if (!PREACTION(fm)) { + check_inuse_chunk(fm, p); if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) { - size_t psize = chunksize(p); + + size_t psize = chunksize(p); mchunkptr next = chunk_plus_offset(p, psize); if (!pinuse(p)) { + size_t prevsize = p->prev_foot; if (is_mmapped(p)) { + psize += prevsize + MMAP_FOOT_PAD; - if (CALL_MUNMAP((char*)p - prevsize, psize) == 0) + if (CALL_MUNMAP((char *)p - prevsize, psize) == 0) fm->footprint -= psize; goto postaction; - } - else { + + } else { + mchunkptr prev = chunk_minus_offset(p, prevsize); psize += prevsize; p = prev; - if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */ + if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */ if (p != fm->dv) { + unlink_chunk(fm, p, prevsize); - } - else if ((next->head & INUSE_BITS) == INUSE_BITS) { + + } else if ((next->head & INUSE_BITS) == INUSE_BITS) { + fm->dvsize = psize; set_free_with_pinuse(p, psize, next); goto postaction; + } - } - else + + } else + goto erroraction; + } + } if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) { - if (!cinuse(next)) { /* consolidate forward */ + + if (!cinuse(next)) { /* consolidate forward */ if (next == fm->top) { + size_t tsize = fm->topsize += psize; fm->top = p; p->head = tsize | PINUSE_BIT; if (p == fm->dv) { + fm->dv = 0; fm->dvsize = 0; + } - if (should_trim(fm, tsize)) - sys_trim(fm, 0); + + if (should_trim(fm, tsize)) sys_trim(fm, 0); goto postaction; - } - else if (next == fm->dv) { + + } else if (next == fm->dv) { + size_t dsize = fm->dvsize += psize; fm->dv = p; set_size_and_pinuse_of_free_chunk(p, dsize); goto postaction; - } - else { + + } else { + size_t nsize = chunksize(next); psize += nsize; unlink_chunk(fm, next, nsize); set_size_and_pinuse_of_free_chunk(p, psize); if (p == fm->dv) { + fm->dvsize = psize; goto postaction; + } + } - } - else + + } else + set_free_with_pinuse(p, psize, next); if (is_small(psize)) { + insert_small_chunk(fm, p, psize); check_free_chunk(fm, p); - } - else { + + } else { + tchunkptr tp = (tchunkptr)p; insert_large_chunk(fm, tp, psize); check_free_chunk(fm, p); - if (--fm->release_checks == 0) - release_unused_segments(fm); + if (--fm->release_checks == 0) release_unused_segments(fm); + } + goto postaction; + } + } + erroraction: USAGE_ERROR_ACTION(fm, p); postaction: POSTACTION(fm); + } + } + } -void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) { - void* mem; +void *mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) { + + void * mem; size_t req = 0; mstate ms = (mstate)msp; if (!ok_magic(ms)) { - USAGE_ERROR_ACTION(ms,ms); + + USAGE_ERROR_ACTION(ms, ms); return 0; + } + if (n_elements != 0) { + req = n_elements * elem_size; if (((n_elements | elem_size) & ~(size_t)0xffff) && (req / n_elements != elem_size)) - req = MAX_SIZE_T; /* force downstream failure on overflow */ + req = MAX_SIZE_T; /* force downstream failure on overflow */ + } + mem = internal_malloc(ms, req); if (mem != 0 && calloc_must_clear(mem2chunk(mem))) __builtin_memset(mem, 0, req); return mem; + } -void* mspace_realloc(mspace msp, void* oldmem, size_t bytes) { - void* mem = 0; +void *mspace_realloc(mspace msp, void *oldmem, size_t bytes) { + + void *mem = 0; if (oldmem == 0) { + mem = mspace_malloc(msp, bytes); - } - else if (bytes >= MAX_REQUEST) { + + } else if (bytes >= MAX_REQUEST) { + MALLOC_FAILURE_ACTION; + } -#ifdef REALLOC_ZERO_BYTES_FREES + + #ifdef REALLOC_ZERO_BYTES_FREES else if (bytes == 0) { + mspace_free(msp, oldmem); + } -#endif /* REALLOC_ZERO_BYTES_FREES */ + + #endif /* REALLOC_ZERO_BYTES_FREES */ else { - size_t nb = request2size(bytes); + + size_t nb = request2size(bytes); mchunkptr oldp = mem2chunk(oldmem); -#if ! FOOTERS + #if !FOOTERS mstate m = (mstate)msp; -#else /* FOOTERS */ + #else /* FOOTERS */ mstate m = get_mstate_for(oldp); if (!ok_magic(m)) { + USAGE_ERROR_ACTION(m, oldmem); return 0; + } -#endif /* FOOTERS */ + + #endif /* FOOTERS */ if (!PREACTION(m)) { + mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1); POSTACTION(m); if (newp != 0) { + check_inuse_chunk(m, newp); mem = chunk2mem(newp); - } - else { + + } else { + mem = mspace_malloc(m, bytes); if (mem != 0) { + size_t oc = chunksize(oldp) - overhead_for(oldp); - __builtin_memcpy(mem, oldmem, (oc < bytes)? oc : bytes); + __builtin_memcpy(mem, oldmem, (oc < bytes) ? oc : bytes); mspace_free(m, oldmem); + } + } + } + } + return mem; + } -void* mspace_realloc_in_place(mspace msp, void* oldmem, size_t bytes) { - void* mem = 0; +void *mspace_realloc_in_place(mspace msp, void *oldmem, size_t bytes) { + + void *mem = 0; if (oldmem != 0) { + if (bytes >= MAX_REQUEST) { + MALLOC_FAILURE_ACTION; - } - else { - size_t nb = request2size(bytes); + + } else { + + size_t nb = request2size(bytes); mchunkptr oldp = mem2chunk(oldmem); -#if ! FOOTERS + #if !FOOTERS mstate m = (mstate)msp; -#else /* FOOTERS */ + #else /* FOOTERS */ mstate m = get_mstate_for(oldp); - (void)msp; /* placate people compiling -Wunused */ + (void)msp; /* placate people compiling -Wunused */ if (!ok_magic(m)) { + USAGE_ERROR_ACTION(m, oldmem); return 0; + } -#endif /* FOOTERS */ + + #endif /* FOOTERS */ if (!PREACTION(m)) { + mchunkptr newp = try_realloc_chunk(m, oldp, nb, 0); POSTACTION(m); if (newp == oldp) { + check_inuse_chunk(m, newp); mem = oldmem; + } + } + } + } + return mem; + } -void* mspace_memalign(mspace msp, size_t alignment, size_t bytes) { +void *mspace_memalign(mspace msp, size_t alignment, size_t bytes) { + mstate ms = (mstate)msp; if (!ok_magic(ms)) { - USAGE_ERROR_ACTION(ms,ms); + + USAGE_ERROR_ACTION(ms, ms); return 0; + } - if (alignment <= MALLOC_ALIGNMENT) - return mspace_malloc(msp, bytes); + + if (alignment <= MALLOC_ALIGNMENT) return mspace_malloc(msp, bytes); return internal_memalign(ms, alignment, bytes); + } -void** mspace_independent_calloc(mspace msp, size_t n_elements, - size_t elem_size, void* chunks[]) { - size_t sz = elem_size; /* serves as 1-element array */ +void **mspace_independent_calloc(mspace msp, size_t n_elements, + size_t elem_size, void *chunks[]) { + + size_t sz = elem_size; /* serves as 1-element array */ mstate ms = (mstate)msp; if (!ok_magic(ms)) { - USAGE_ERROR_ACTION(ms,ms); + + USAGE_ERROR_ACTION(ms, ms); return 0; + } + return ialloc(ms, n_elements, &sz, 3, chunks); + } -void** mspace_independent_comalloc(mspace msp, size_t n_elements, - size_t sizes[], void* chunks[]) { +void **mspace_independent_comalloc(mspace msp, size_t n_elements, + size_t sizes[], void *chunks[]) { + mstate ms = (mstate)msp; if (!ok_magic(ms)) { - USAGE_ERROR_ACTION(ms,ms); + + USAGE_ERROR_ACTION(ms, ms); return 0; + } + return ialloc(ms, n_elements, sizes, 0, chunks); + } -size_t mspace_bulk_free(mspace msp, void* array[], size_t nelem) { +size_t mspace_bulk_free(mspace msp, void *array[], size_t nelem) { + return internal_bulk_free((mstate)msp, array, nelem); + } -#if MALLOC_INSPECT_ALL + #if MALLOC_INSPECT_ALL void mspace_inspect_all(mspace msp, - void(*handler)(void *start, - void *end, - size_t used_bytes, - void* callback_arg), - void* arg) { + void (*handler)(void *start, void *end, + size_t used_bytes, void *callback_arg), + void *arg) { + mstate ms = (mstate)msp; if (ok_magic(ms)) { + if (!PREACTION(ms)) { + internal_inspect_all(ms, handler, arg); POSTACTION(ms); + } + + } else { + + USAGE_ERROR_ACTION(ms, ms); + } - else { - USAGE_ERROR_ACTION(ms,ms); - } + } -#endif /* MALLOC_INSPECT_ALL */ + + #endif /* MALLOC_INSPECT_ALL */ int mspace_trim(mspace msp, size_t pad) { - int result = 0; + + int result = 0; mstate ms = (mstate)msp; if (ok_magic(ms)) { + if (!PREACTION(ms)) { + result = sys_trim(ms, pad); POSTACTION(ms); + } + + } else { + + USAGE_ERROR_ACTION(ms, ms); + } - else { - USAGE_ERROR_ACTION(ms,ms); - } + return result; + } -#if !NO_MALLOC_STATS + #if !NO_MALLOC_STATS void mspace_malloc_stats(mspace msp) { + mstate ms = (mstate)msp; if (ok_magic(ms)) { + internal_malloc_stats(ms); + + } else { + + USAGE_ERROR_ACTION(ms, ms); + } - else { - USAGE_ERROR_ACTION(ms,ms); - } + } -#endif /* NO_MALLOC_STATS */ + + #endif /* NO_MALLOC_STATS */ size_t mspace_footprint(mspace msp) { + size_t result = 0; mstate ms = (mstate)msp; if (ok_magic(ms)) { + result = ms->footprint; + + } else { + + USAGE_ERROR_ACTION(ms, ms); + } - else { - USAGE_ERROR_ACTION(ms,ms); - } + return result; + } size_t mspace_max_footprint(mspace msp) { + size_t result = 0; mstate ms = (mstate)msp; if (ok_magic(ms)) { + result = ms->max_footprint; + + } else { + + USAGE_ERROR_ACTION(ms, ms); + } - else { - USAGE_ERROR_ACTION(ms,ms); - } + return result; + } size_t mspace_footprint_limit(mspace msp) { + size_t result = 0; mstate ms = (mstate)msp; if (ok_magic(ms)) { + size_t maf = ms->footprint_limit; result = (maf == 0) ? MAX_SIZE_T : maf; + + } else { + + USAGE_ERROR_ACTION(ms, ms); + } - else { - USAGE_ERROR_ACTION(ms,ms); - } + return result; + } size_t mspace_set_footprint_limit(mspace msp, size_t bytes) { + size_t result = 0; mstate ms = (mstate)msp; if (ok_magic(ms)) { - if (bytes == 0) - result = granularity_align(1); /* Use minimal size */ + + if (bytes == 0) result = granularity_align(1); /* Use minimal size */ if (bytes == MAX_SIZE_T) - result = 0; /* disable */ + result = 0; /* disable */ else result = granularity_align(bytes); ms->footprint_limit = result; + + } else { + + USAGE_ERROR_ACTION(ms, ms); + } - else { - USAGE_ERROR_ACTION(ms,ms); - } + return result; + } -#if !NO_MALLINFO + #if !NO_MALLINFO struct mallinfo mspace_mallinfo(mspace msp) { + mstate ms = (mstate)msp; - if (!ok_magic(ms)) { - USAGE_ERROR_ACTION(ms,ms); - } + if (!ok_magic(ms)) { USAGE_ERROR_ACTION(ms, ms); } return internal_mallinfo(ms); + } -#endif /* NO_MALLINFO */ -size_t mspace_usable_size(const void* mem) { + #endif /* NO_MALLINFO */ + +size_t mspace_usable_size(const void *mem) { + if (mem != 0) { + mchunkptr p = mem2chunk(mem); - if (is_inuse(p)) - return chunksize(p) - overhead_for(p); + if (is_inuse(p)) return chunksize(p) - overhead_for(p); + } + return 0; + } int mspace_mallopt(int param_number, int value) { + return change_mparam(param_number, value); -} -#endif /* MSPACES */ +} + #endif /* MSPACES */ /* -------------------- Alternative MORECORE functions ------------------- */ @@ -6067,35 +7035,48 @@ int mspace_mallopt(int param_number, int value) { void *osMoreCore(int size) { + void *ptr = 0; static void *sbrk_top = 0; if (size > 0) { + if (size < MINIMUM_MORECORE_SIZE) size = MINIMUM_MORECORE_SIZE; if (CurrentExecutionLevel() == kTaskLevel) ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0); if (ptr == 0) { + return (void *) MFAIL; + } + // save ptrs so they can be freed during cleanup our_os_pools[next_os_pool] = ptr; next_os_pool++; ptr = (void *) ((((size_t) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK); sbrk_top = (char *) ptr + size; return ptr; + } + else if (size < 0) { + // we don't currently support shrink behavior return (void *) MFAIL; + } + else { + return sbrk_top; + } + } // cleanup any allocated memory pools @@ -6103,19 +7084,22 @@ int mspace_mallopt(int param_number, int value) { void osCleanupMem(void) { + void **ptr; for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++) if (*ptr) { + PoolDeallocate(*ptr); *ptr = 0; + } + } */ - /* ----------------------------------------------------------------------- History: v2.8.6 Wed Aug 29 06:57:58 2012 Doug Lea @@ -6335,4 +7319,5 @@ History: */ -#endif // __GLIBC__ +#endif // __GLIBC__ + diff --git a/qemu_mode/libqasan/hooks.c b/qemu_mode/libqasan/hooks.c index 3bb4cc42..405dddae 100644 --- a/qemu_mode/libqasan/hooks.c +++ b/qemu_mode/libqasan/hooks.c @@ -174,7 +174,9 @@ char *fgets(char *s, int size, FILE *stream) { QASAN_DEBUG("%14p: fgets(%p, %d, %p)\n", rtv, s, size, stream); QASAN_STORE(s, size); +#ifndef __ANDROID__ QASAN_LOAD(stream, sizeof(FILE)); +#endif char *r = __lq_libc_fgets(s, size, stream); QASAN_DEBUG("\t\t = %p\n", r); diff --git a/qemu_mode/libqasan/libqasan.c b/qemu_mode/libqasan/libqasan.c index 11b50270..9fc4ef7a 100644 --- a/qemu_mode/libqasan/libqasan.c +++ b/qemu_mode/libqasan/libqasan.c @@ -72,7 +72,7 @@ void __libqasan_print_maps(void) { QASAN_LOG("QEMU-AddressSanitizer (v%s)\n", QASAN_VERSTR); QASAN_LOG( - "Copyright (C) 2019-2020 Andrea Fioraldi \n"); + "Copyright (C) 2019-2021 Andrea Fioraldi \n"); QASAN_LOG("\n"); if (__qasan_log) __libqasan_print_maps(); diff --git a/qemu_mode/libqasan/malloc.c b/qemu_mode/libqasan/malloc.c index 54c1096a..5a2d2a0c 100644 --- a/qemu_mode/libqasan/malloc.c +++ b/qemu_mode/libqasan/malloc.c @@ -51,9 +51,9 @@ typedef struct { struct chunk_begin { size_t requested_size; - void* aligned_orig; // NULL if not aligned - struct chunk_begin* next; - struct chunk_begin* prev; + void * aligned_orig; // NULL if not aligned + struct chunk_begin *next; + struct chunk_begin *prev; char redzone[REDZONE_SIZE]; }; @@ -68,45 +68,45 @@ struct chunk_struct { #ifdef __GLIBC__ -void* (*__lq_libc_malloc)(size_t); -void (*__lq_libc_free)(void*); -#define backend_malloc __lq_libc_malloc -#define backend_free __lq_libc_free +void *(*__lq_libc_malloc)(size_t); +void (*__lq_libc_free)(void *); + #define backend_malloc __lq_libc_malloc + #define backend_free __lq_libc_free -#define TMP_ZONE_SIZE 4096 + #define TMP_ZONE_SIZE 4096 static int __tmp_alloc_zone_idx; static unsigned char __tmp_alloc_zone[TMP_ZONE_SIZE]; #else // From dlmalloc.c -void* dlmalloc(size_t); -void dlfree(void*); -#define backend_malloc dlmalloc -#define backend_free dlfree +void * dlmalloc(size_t); +void dlfree(void *); + #define backend_malloc dlmalloc + #define backend_free dlfree #endif int __libqasan_malloc_initialized; -static struct chunk_begin* quarantine_top; -static struct chunk_begin* quarantine_end; +static struct chunk_begin *quarantine_top; +static struct chunk_begin *quarantine_end; static size_t quarantine_bytes; #ifdef __BIONIC__ -static pthread_mutex_t quarantine_lock; -#define LOCK_TRY pthread_mutex_trylock -#define LOCK_INIT pthread_mutex_init -#define LOCK_UNLOCK pthread_mutex_unlock +static pthread_mutex_t quarantine_lock; + #define LOCK_TRY pthread_mutex_trylock + #define LOCK_INIT pthread_mutex_init + #define LOCK_UNLOCK pthread_mutex_unlock #else -static pthread_spinlock_t quarantine_lock; -#define LOCK_TRY pthread_spin_trylock -#define LOCK_INIT pthread_spin_init -#define LOCK_UNLOCK pthread_spin_unlock +static pthread_spinlock_t quarantine_lock; + #define LOCK_TRY pthread_spin_trylock + #define LOCK_INIT pthread_spin_init + #define LOCK_UNLOCK pthread_spin_unlock #endif // need qasan disabled -static int quanratine_push(struct chunk_begin* ck) { +static int quanratine_push(struct chunk_begin *ck) { if (ck->requested_size >= QUARANTINE_MAX_BYTES) return 0; @@ -114,7 +114,7 @@ static int quanratine_push(struct chunk_begin* ck) { while (ck->requested_size + quarantine_bytes >= QUARANTINE_MAX_BYTES) { - struct chunk_begin* tmp = quarantine_end; + struct chunk_begin *tmp = quarantine_end; quarantine_end = tmp->prev; quarantine_bytes -= tmp->requested_size; @@ -154,23 +154,23 @@ void __libqasan_init_malloc(void) { } -size_t __libqasan_malloc_usable_size(void* ptr) { +size_t __libqasan_malloc_usable_size(void *ptr) { - char* p = ptr; + char *p = ptr; p -= sizeof(struct chunk_begin); - return ((struct chunk_begin*)p)->requested_size; + return ((struct chunk_begin *)p)->requested_size; } -void* __libqasan_malloc(size_t size) { +void *__libqasan_malloc(size_t size) { if (!__libqasan_malloc_initialized) { - + __libqasan_init_malloc(); #ifdef __GLIBC__ - void* r = &__tmp_alloc_zone[__tmp_alloc_zone_idx]; + void *r = &__tmp_alloc_zone[__tmp_alloc_zone_idx]; if (size & (ALLOC_ALIGN_SIZE - 1)) __tmp_alloc_zone_idx += @@ -185,7 +185,7 @@ void* __libqasan_malloc(size_t size) { int state = QASAN_SWAP(QASAN_DISABLED); // disable qasan for this thread - struct chunk_begin* p = backend_malloc(sizeof(struct chunk_struct) + size); + struct chunk_begin *p = backend_malloc(sizeof(struct chunk_struct) + size); QASAN_SWAP(state); @@ -197,14 +197,14 @@ void* __libqasan_malloc(size_t size) { p->aligned_orig = NULL; p->next = p->prev = NULL; - QASAN_ALLOC(&p[1], (char*)&p[1] + size); + QASAN_ALLOC(&p[1], (char *)&p[1] + size); QASAN_POISON(p->redzone, REDZONE_SIZE, ASAN_HEAP_LEFT_RZ); if (size & (ALLOC_ALIGN_SIZE - 1)) - QASAN_POISON((char*)&p[1] + size, + QASAN_POISON((char *)&p[1] + size, (size & ~(ALLOC_ALIGN_SIZE - 1)) + 8 - size + REDZONE_SIZE, ASAN_HEAP_RIGHT_RZ); else - QASAN_POISON((char*)&p[1] + size, REDZONE_SIZE, ASAN_HEAP_RIGHT_RZ); + QASAN_POISON((char *)&p[1] + size, REDZONE_SIZE, ASAN_HEAP_RIGHT_RZ); __builtin_memset(&p[1], 0xff, size); @@ -212,17 +212,17 @@ void* __libqasan_malloc(size_t size) { } -void __libqasan_free(void* ptr) { +void __libqasan_free(void *ptr) { if (!ptr) return; - + #ifdef __GLIBC__ - if (ptr >= (void*)__tmp_alloc_zone && - ptr < ((void*)__tmp_alloc_zone + TMP_ZONE_SIZE)) + if (ptr >= (void *)__tmp_alloc_zone && + ptr < ((void *)__tmp_alloc_zone + TMP_ZONE_SIZE)) return; #endif - struct chunk_begin* p = ptr; + struct chunk_begin *p = ptr; p -= 1; size_t n = p->requested_size; @@ -249,21 +249,22 @@ void __libqasan_free(void* ptr) { } -void* __libqasan_calloc(size_t nmemb, size_t size) { +void *__libqasan_calloc(size_t nmemb, size_t size) { size *= nmemb; #ifdef __GLIBC__ if (!__libqasan_malloc_initialized) { - void* r = &__tmp_alloc_zone[__tmp_alloc_zone_idx]; + void *r = &__tmp_alloc_zone[__tmp_alloc_zone_idx]; __tmp_alloc_zone_idx += size; return r; } + #endif - char* p = __libqasan_malloc(size); + char *p = __libqasan_malloc(size); if (!p) return NULL; __builtin_memset(p, 0, size); @@ -272,14 +273,14 @@ void* __libqasan_calloc(size_t nmemb, size_t size) { } -void* __libqasan_realloc(void* ptr, size_t size) { +void *__libqasan_realloc(void *ptr, size_t size) { - char* p = __libqasan_malloc(size); + char *p = __libqasan_malloc(size); if (!p) return NULL; if (!ptr) return p; - size_t n = ((struct chunk_begin*)ptr)[-1].requested_size; + size_t n = ((struct chunk_begin *)ptr)[-1].requested_size; if (size < n) n = size; __builtin_memcpy(p, ptr, n); @@ -289,9 +290,9 @@ void* __libqasan_realloc(void* ptr, size_t size) { } -int __libqasan_posix_memalign(void** ptr, size_t align, size_t len) { +int __libqasan_posix_memalign(void **ptr, size_t align, size_t len) { - if ((align % 2) || (align % sizeof(void*))) return EINVAL; + if ((align % 2) || (align % sizeof(void *))) return EINVAL; if (len == 0) { *ptr = NULL; @@ -305,7 +306,7 @@ int __libqasan_posix_memalign(void** ptr, size_t align, size_t len) { int state = QASAN_SWAP(QASAN_DISABLED); // disable qasan for this thread - char* orig = backend_malloc(sizeof(struct chunk_struct) + size); + char *orig = backend_malloc(sizeof(struct chunk_struct) + size); QASAN_SWAP(state); @@ -313,10 +314,10 @@ int __libqasan_posix_memalign(void** ptr, size_t align, size_t len) { QASAN_UNPOISON(orig, sizeof(struct chunk_struct) + size); - char* data = orig + sizeof(struct chunk_begin); + char *data = orig + sizeof(struct chunk_begin); data += align - ((uintptr_t)data % align); - struct chunk_begin* p = (struct chunk_begin*)data - 1; + struct chunk_begin *p = (struct chunk_begin *)data - 1; p->requested_size = len; p->aligned_orig = orig; @@ -339,9 +340,9 @@ int __libqasan_posix_memalign(void** ptr, size_t align, size_t len) { } -void* __libqasan_memalign(size_t align, size_t len) { +void *__libqasan_memalign(size_t align, size_t len) { - void* ret = NULL; + void *ret = NULL; __libqasan_posix_memalign(&ret, align, len); @@ -349,9 +350,9 @@ void* __libqasan_memalign(size_t align, size_t len) { } -void* __libqasan_aligned_alloc(size_t align, size_t len) { +void *__libqasan_aligned_alloc(size_t align, size_t len) { - void* ret = NULL; + void *ret = NULL; if ((len % align)) return NULL; diff --git a/qemu_mode/libqasan/string.c b/qemu_mode/libqasan/string.c index 4be01279..c850463b 100644 --- a/qemu_mode/libqasan/string.c +++ b/qemu_mode/libqasan/string.c @@ -271,7 +271,7 @@ void *__libqasan_memmem(const void *haystack, size_t haystack_len, } - } while (++h <= end); + } while (h++ <= end); return 0; diff --git a/qemu_mode/qemuafl b/qemu_mode/qemuafl index 47722f64..9a258d5b 160000 --- a/qemu_mode/qemuafl +++ b/qemu_mode/qemuafl @@ -1 +1 @@ -Subproject commit 47722f64e4c1662bad97dc25f3e4cc63959ff5f3 +Subproject commit 9a258d5b7a38c045a6e385fcfcf80a746a60e557 diff --git a/src/afl-fuzz-redqueen.c b/src/afl-fuzz-redqueen.c index 7844eedf..3ce4148d 100644 --- a/src/afl-fuzz-redqueen.c +++ b/src/afl-fuzz-redqueen.c @@ -1303,7 +1303,7 @@ static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header *h, } -#endif /* CMPLOG_SOLVE_ARITHMETIC */ +#endif /* CMPLOG_SOLVE_ARITHMETIC */ return 0; @@ -2670,3 +2670,4 @@ exit_its: return r; } + -- cgit 1.4.1 From e3a5c31307f323452dc4b5288e0d19a02b596a33 Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Mon, 15 Feb 2021 13:25:15 +0100 Subject: llvm bug workaround for lto extint --- docs/Changelog.md | 1 + include/envs.h | 1 + instrumentation/cmplog-instructions-pass.cc | 39 ++++++++++++++++++++++++++--- qemu_mode/libqasan/dlmalloc.c | 5 ++++ src/afl-cc.c | 2 ++ src/afl-fuzz-redqueen.c | 1 + 6 files changed, 45 insertions(+), 4 deletions(-) (limited to 'qemu_mode/libqasan/dlmalloc.c') diff --git a/docs/Changelog.md b/docs/Changelog.md index 71ef4c2c..e2482f8f 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -49,6 +49,7 @@ sending a mail to . CLANG for old afl-clang - fixed a potential crash in the LAF feature - workaround for llvm 13 + - workaround for llvm internal lto bug that lets not bitcast from _ExtInt() - qemuafl - QASan (address sanitizer for Qemu) ported to qemuafl! See qemu_mode/libqasan/README.md diff --git a/include/envs.h b/include/envs.h index 4313e053..36667ebc 100644 --- a/include/envs.h +++ b/include/envs.h @@ -16,6 +16,7 @@ static char *afl_environment_deprecated[] = { static char *afl_environment_variables[] = { + "_AFL_LTO_COMPILE", "AFL_ALIGNED_ALLOC", "AFL_ALLOW_TMP", "AFL_ANALYZE_HEX", diff --git a/instrumentation/cmplog-instructions-pass.cc b/instrumentation/cmplog-instructions-pass.cc index b5cc1882..6b071b48 100644 --- a/instrumentation/cmplog-instructions-pass.cc +++ b/instrumentation/cmplog-instructions-pass.cc @@ -113,6 +113,8 @@ bool CmpLogInstructions::hookInstrs(Module &M) { IntegerType *Int64Ty = IntegerType::getInt64Ty(C); IntegerType *Int128Ty = IntegerType::getInt128Ty(C); + char *is_lto = getenv("_AFL_LTO_COMPILE"); + #if LLVM_VERSION_MAJOR < 9 Constant * #else @@ -265,10 +267,20 @@ bool CmpLogInstructions::hookInstrs(Module &M) { unsigned int max_size = Val->getType()->getIntegerBitWidth(), cast_size; unsigned char do_cast = 0; - if (!SI->getNumCases() || max_size < 16 || max_size % 8) { + if (!SI->getNumCases() || max_size < 16) { continue; } + + if (max_size % 8) { + + if (is_lto) { + + continue; // LTO cannot bitcast from _ExtInt() :( + + } else { - // if (!be_quiet) errs() << "skip trivial switch..\n"; - continue; + max_size = (((max_size / 8) + 1) * 8); + do_cast = 1; + + } } @@ -285,6 +297,7 @@ bool CmpLogInstructions::hookInstrs(Module &M) { } + if (is_lto) { continue; } // LTO cannot bitcast _ExtInt() :( max_size = 128; do_cast = 1; @@ -301,6 +314,7 @@ bool CmpLogInstructions::hookInstrs(Module &M) { cast_size = max_size; break; default: + if (is_lto) { continue; } // LTO cannot bitcast _ExtInt() :( cast_size = 128; do_cast = 1; @@ -540,7 +554,22 @@ bool CmpLogInstructions::hookInstrs(Module &M) { } - if (!max_size || max_size % 8 || max_size < 16) { continue; } + if (!max_size || max_size < 16) { continue; } + + if (max_size % 8) { + + if (is_lto) { + + continue; // LTO cannot bitcast from _ExtInt() :( + + } else { + + max_size = (((max_size / 8) + 1) * 8); + do_cast = 1; + + } + + } if (max_size > 128) { @@ -552,6 +581,7 @@ bool CmpLogInstructions::hookInstrs(Module &M) { } + if (is_lto) { continue; } // LTO cannot bitcast from _ExtInt() :( max_size = 128; do_cast = 1; @@ -568,6 +598,7 @@ bool CmpLogInstructions::hookInstrs(Module &M) { cast_size = max_size; break; default: + if (is_lto) { continue; } // LTO cannot bitcast from _ExtInt() :( cast_size = 128; do_cast = 1; diff --git a/qemu_mode/libqasan/dlmalloc.c b/qemu_mode/libqasan/dlmalloc.c index bace0ff6..aff58ad5 100644 --- a/qemu_mode/libqasan/dlmalloc.c +++ b/qemu_mode/libqasan/dlmalloc.c @@ -3917,6 +3917,7 @@ static void internal_malloc_stats(mstate m) { \ } else if (RTCHECK(B == smallbin_at(M, I) || \ \ + \ (ok_address(M, B) && B->fd == P))) { \ \ F->bk = B; \ @@ -4128,6 +4129,7 @@ static void internal_malloc_stats(mstate m) { \ } else \ \ + \ CORRUPTION_ERROR_ACTION(M); \ if (R != 0) { \ \ @@ -4144,6 +4146,7 @@ static void internal_malloc_stats(mstate m) { \ } else \ \ + \ CORRUPTION_ERROR_ACTION(M); \ \ } \ @@ -4156,12 +4159,14 @@ static void internal_malloc_stats(mstate m) { \ } else \ \ + \ CORRUPTION_ERROR_ACTION(M); \ \ } \ \ } else \ \ + \ CORRUPTION_ERROR_ACTION(M); \ \ } \ diff --git a/src/afl-cc.c b/src/afl-cc.c index d41f79a2..959c9a6f 100644 --- a/src/afl-cc.c +++ b/src/afl-cc.c @@ -1875,6 +1875,8 @@ int main(int argc, char **argv, char **envp) { edit_params(argc, argv, envp); + if (lto_mode) { setenv("_AFL_LTO_COMPILE", "1", 1); } + if (debug) { DEBUGF("cd '%s';", getthecwd()); diff --git a/src/afl-fuzz-redqueen.c b/src/afl-fuzz-redqueen.c index 527feef5..2b01ecad 100644 --- a/src/afl-fuzz-redqueen.c +++ b/src/afl-fuzz-redqueen.c @@ -1533,6 +1533,7 @@ static u8 cmp_fuzz(afl_state_t *afl, u32 key, u8 *orig_buf, u8 *buf, u8 *cbuf, is_n = 1; } + #endif for (i = 0; i < loggeds; ++i) { -- cgit 1.4.1 From 0298ae82b06c9776294c1da15ec278ef35dfa770 Mon Sep 17 00:00:00 2001 From: Dominik Maier Date: Mon, 15 Feb 2021 13:52:03 +0100 Subject: code-format --- qemu_mode/libqasan/dlmalloc.c | 5548 ++++++++++++++++++++++++----------------- qemu_mode/libqasan/malloc.c | 109 +- 2 files changed, 3319 insertions(+), 2338 deletions(-) (limited to 'qemu_mode/libqasan/dlmalloc.c') diff --git a/qemu_mode/libqasan/dlmalloc.c b/qemu_mode/libqasan/dlmalloc.c index 7e3cb159..71cafd9d 100644 --- a/qemu_mode/libqasan/dlmalloc.c +++ b/qemu_mode/libqasan/dlmalloc.c @@ -207,9 +207,12 @@ mspaces as thread-locals. For example: static __thread mspace tlms = 0; void* tlmalloc(size_t bytes) { + if (tlms == 0) tlms = create_mspace(0, 0); return mspace_malloc(tlms, bytes); + } + void tlfree(void* mem) { mspace_free(tlms, mem); } Unless FOOTERS is defined, each mspace is completely independent. @@ -525,201 +528,203 @@ MAX_RELEASE_CHECK_RATE default: 4095 unless not HAVE_MMAP improvement at the expense of carrying around more memory. */ -#define USE_DL_PREFIX - -/* Version identifier to allow people to support multiple versions */ -#ifndef DLMALLOC_VERSION -#define DLMALLOC_VERSION 20806 -#endif /* DLMALLOC_VERSION */ - -#ifndef DLMALLOC_EXPORT -#define DLMALLOC_EXPORT extern -#endif - -#ifndef WIN32 -#ifdef _WIN32 -#define WIN32 1 -#endif /* _WIN32 */ -#ifdef _WIN32_WCE -#define LACKS_FCNTL_H -#define WIN32 1 -#endif /* _WIN32_WCE */ -#endif /* WIN32 */ -#ifdef WIN32 -#define WIN32_LEAN_AND_MEAN -#include -#include -#define HAVE_MMAP 1 -#define HAVE_MORECORE 0 -#define LACKS_UNISTD_H -#define LACKS_SYS_PARAM_H -#define LACKS_SYS_MMAN_H -#define LACKS_STRING_H -#define LACKS_STRINGS_H -#define LACKS_SYS_TYPES_H -#define LACKS_ERRNO_H -#define LACKS_SCHED_H -#ifndef MALLOC_FAILURE_ACTION -#define MALLOC_FAILURE_ACTION -#endif /* MALLOC_FAILURE_ACTION */ -#ifndef MMAP_CLEARS -#ifdef _WIN32_WCE /* WINCE reportedly does not clear */ -#define MMAP_CLEARS 0 -#else -#define MMAP_CLEARS 1 -#endif /* _WIN32_WCE */ -#endif /*MMAP_CLEARS */ -#endif /* WIN32 */ - -#if defined(DARWIN) || defined(_DARWIN) -/* Mac OSX docs advise not to use sbrk; it seems better to use mmap */ -#ifndef HAVE_MORECORE -#define HAVE_MORECORE 0 -#define HAVE_MMAP 1 -/* OSX allocators provide 16 byte alignment */ -#ifndef MALLOC_ALIGNMENT -#define MALLOC_ALIGNMENT ((size_t)16U) -#endif -#endif /* HAVE_MORECORE */ -#endif /* DARWIN */ - -#ifndef LACKS_SYS_TYPES_H -#include /* For size_t */ -#endif /* LACKS_SYS_TYPES_H */ - -/* The maximum possible size_t value has all bits set */ -#define MAX_SIZE_T (~(size_t)0) - -#ifndef USE_LOCKS /* ensure true if spin or recursive locks set */ -#define USE_LOCKS ((defined(USE_SPIN_LOCKS) && USE_SPIN_LOCKS != 0) || \ - (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0)) -#endif /* USE_LOCKS */ - -#if USE_LOCKS /* Spin locks for gcc >= 4.1, older gcc on x86, MSC >= 1310 */ -#if ((defined(__GNUC__) && \ - ((__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) || \ - defined(__i386__) || defined(__x86_64__))) || \ - (defined(_MSC_VER) && _MSC_VER>=1310)) -#ifndef USE_SPIN_LOCKS -#define USE_SPIN_LOCKS 1 -#endif /* USE_SPIN_LOCKS */ -#elif USE_SPIN_LOCKS -#error "USE_SPIN_LOCKS defined without implementation" -#endif /* ... locks available... */ -#elif !defined(USE_SPIN_LOCKS) -#define USE_SPIN_LOCKS 0 -#endif /* USE_LOCKS */ - -#ifndef ONLY_MSPACES -#define ONLY_MSPACES 0 -#endif /* ONLY_MSPACES */ -#ifndef MSPACES -#if ONLY_MSPACES -#define MSPACES 1 -#else /* ONLY_MSPACES */ -#define MSPACES 0 -#endif /* ONLY_MSPACES */ -#endif /* MSPACES */ -#ifndef MALLOC_ALIGNMENT -#define MALLOC_ALIGNMENT ((size_t)(2 * sizeof(void *))) -#endif /* MALLOC_ALIGNMENT */ -#ifndef FOOTERS -#define FOOTERS 0 -#endif /* FOOTERS */ -#ifndef ABORT -#define ABORT abort() -#endif /* ABORT */ -#ifndef ABORT_ON_ASSERT_FAILURE -#define ABORT_ON_ASSERT_FAILURE 1 -#endif /* ABORT_ON_ASSERT_FAILURE */ -#ifndef PROCEED_ON_ERROR -#define PROCEED_ON_ERROR 0 -#endif /* PROCEED_ON_ERROR */ - -#ifndef INSECURE -#define INSECURE 0 -#endif /* INSECURE */ -#ifndef MALLOC_INSPECT_ALL -#define MALLOC_INSPECT_ALL 0 -#endif /* MALLOC_INSPECT_ALL */ -#ifndef HAVE_MMAP -#define HAVE_MMAP 1 -#endif /* HAVE_MMAP */ -#ifndef MMAP_CLEARS -#define MMAP_CLEARS 1 -#endif /* MMAP_CLEARS */ -#ifndef HAVE_MREMAP -#ifdef linux -#define HAVE_MREMAP 1 -#define _GNU_SOURCE /* Turns on mremap() definition */ -#else /* linux */ -#define HAVE_MREMAP 0 -#endif /* linux */ -#endif /* HAVE_MREMAP */ -#ifndef MALLOC_FAILURE_ACTION -#define MALLOC_FAILURE_ACTION errno = ENOMEM; -#endif /* MALLOC_FAILURE_ACTION */ -#ifndef HAVE_MORECORE -#if ONLY_MSPACES -#define HAVE_MORECORE 0 -#else /* ONLY_MSPACES */ -#define HAVE_MORECORE 1 -#endif /* ONLY_MSPACES */ -#endif /* HAVE_MORECORE */ -#if !HAVE_MORECORE -#define MORECORE_CONTIGUOUS 0 -#else /* !HAVE_MORECORE */ -#define MORECORE_DEFAULT sbrk -#ifndef MORECORE_CONTIGUOUS -#define MORECORE_CONTIGUOUS 1 -#endif /* MORECORE_CONTIGUOUS */ -#endif /* HAVE_MORECORE */ -#ifndef DEFAULT_GRANULARITY -#if (MORECORE_CONTIGUOUS || defined(WIN32)) -#define DEFAULT_GRANULARITY (0) /* 0 means to compute in init_mparams */ -#else /* MORECORE_CONTIGUOUS */ -#define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U) -#endif /* MORECORE_CONTIGUOUS */ -#endif /* DEFAULT_GRANULARITY */ -#ifndef DEFAULT_TRIM_THRESHOLD -#ifndef MORECORE_CANNOT_TRIM -#define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U) -#else /* MORECORE_CANNOT_TRIM */ -#define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T -#endif /* MORECORE_CANNOT_TRIM */ -#endif /* DEFAULT_TRIM_THRESHOLD */ -#ifndef DEFAULT_MMAP_THRESHOLD -#if HAVE_MMAP -#define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U) -#else /* HAVE_MMAP */ -#define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T -#endif /* HAVE_MMAP */ -#endif /* DEFAULT_MMAP_THRESHOLD */ -#ifndef MAX_RELEASE_CHECK_RATE -#if HAVE_MMAP -#define MAX_RELEASE_CHECK_RATE 4095 -#else -#define MAX_RELEASE_CHECK_RATE MAX_SIZE_T -#endif /* HAVE_MMAP */ -#endif /* MAX_RELEASE_CHECK_RATE */ -#ifndef USE_BUILTIN_FFS -#define USE_BUILTIN_FFS 0 -#endif /* USE_BUILTIN_FFS */ -#ifndef USE_DEV_RANDOM -#define USE_DEV_RANDOM 0 -#endif /* USE_DEV_RANDOM */ -#ifndef NO_MALLINFO -#define NO_MALLINFO 0 -#endif /* NO_MALLINFO */ -#ifndef MALLINFO_FIELD_TYPE -#define MALLINFO_FIELD_TYPE size_t -#endif /* MALLINFO_FIELD_TYPE */ -#ifndef NO_MALLOC_STATS -#define NO_MALLOC_STATS 0 -#endif /* NO_MALLOC_STATS */ -#ifndef NO_SEGMENT_TRAVERSAL -#define NO_SEGMENT_TRAVERSAL 0 -#endif /* NO_SEGMENT_TRAVERSAL */ + #define USE_DL_PREFIX + + /* Version identifier to allow people to support multiple versions */ + #ifndef DLMALLOC_VERSION + #define DLMALLOC_VERSION 20806 + #endif /* DLMALLOC_VERSION */ + + #ifndef DLMALLOC_EXPORT + #define DLMALLOC_EXPORT extern + #endif + + #ifndef WIN32 + #ifdef _WIN32 + #define WIN32 1 + #endif /* _WIN32 */ + #ifdef _WIN32_WCE + #define LACKS_FCNTL_H + #define WIN32 1 + #endif /* _WIN32_WCE */ + #endif /* WIN32 */ + #ifdef WIN32 + #define WIN32_LEAN_AND_MEAN + #include + #include + #define HAVE_MMAP 1 + #define HAVE_MORECORE 0 + #define LACKS_UNISTD_H + #define LACKS_SYS_PARAM_H + #define LACKS_SYS_MMAN_H + #define LACKS_STRING_H + #define LACKS_STRINGS_H + #define LACKS_SYS_TYPES_H + #define LACKS_ERRNO_H + #define LACKS_SCHED_H + #ifndef MALLOC_FAILURE_ACTION + #define MALLOC_FAILURE_ACTION + #endif /* MALLOC_FAILURE_ACTION */ + #ifndef MMAP_CLEARS + #ifdef _WIN32_WCE /* WINCE reportedly does not clear */ + #define MMAP_CLEARS 0 + #else + #define MMAP_CLEARS 1 + #endif /* _WIN32_WCE */ + #endif /*MMAP_CLEARS */ + #endif /* WIN32 */ + + #if defined(DARWIN) || defined(_DARWIN) + /* Mac OSX docs advise not to use sbrk; it seems better to use mmap */ + #ifndef HAVE_MORECORE + #define HAVE_MORECORE 0 + #define HAVE_MMAP 1 + /* OSX allocators provide 16 byte alignment */ + #ifndef MALLOC_ALIGNMENT + #define MALLOC_ALIGNMENT ((size_t)16U) + #endif + #endif /* HAVE_MORECORE */ + #endif /* DARWIN */ + + #ifndef LACKS_SYS_TYPES_H + #include /* For size_t */ + #endif /* LACKS_SYS_TYPES_H */ + + /* The maximum possible size_t value has all bits set */ + #define MAX_SIZE_T (~(size_t)0) + + #ifndef USE_LOCKS /* ensure true if spin or recursive locks set */ + #define USE_LOCKS \ + ((defined(USE_SPIN_LOCKS) && USE_SPIN_LOCKS != 0) || \ + (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0)) + #endif /* USE_LOCKS */ + + #if USE_LOCKS /* Spin locks for gcc >= 4.1, older gcc on x86, MSC >= 1310 */ + #if ((defined(__GNUC__) && \ + ((__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) || \ + defined(__i386__) || defined(__x86_64__))) || \ + (defined(_MSC_VER) && _MSC_VER >= 1310)) + #ifndef USE_SPIN_LOCKS + #define USE_SPIN_LOCKS 1 + #endif /* USE_SPIN_LOCKS */ + #elif USE_SPIN_LOCKS + #error "USE_SPIN_LOCKS defined without implementation" + #endif /* ... locks available... */ + #elif !defined(USE_SPIN_LOCKS) + #define USE_SPIN_LOCKS 0 + #endif /* USE_LOCKS */ + + #ifndef ONLY_MSPACES + #define ONLY_MSPACES 0 + #endif /* ONLY_MSPACES */ + #ifndef MSPACES + #if ONLY_MSPACES + #define MSPACES 1 + #else /* ONLY_MSPACES */ + #define MSPACES 0 + #endif /* ONLY_MSPACES */ + #endif /* MSPACES */ + #ifndef MALLOC_ALIGNMENT + #define MALLOC_ALIGNMENT ((size_t)(2 * sizeof(void *))) + #endif /* MALLOC_ALIGNMENT */ + #ifndef FOOTERS + #define FOOTERS 0 + #endif /* FOOTERS */ + #ifndef ABORT + #define ABORT abort() + #endif /* ABORT */ + #ifndef ABORT_ON_ASSERT_FAILURE + #define ABORT_ON_ASSERT_FAILURE 1 + #endif /* ABORT_ON_ASSERT_FAILURE */ + #ifndef PROCEED_ON_ERROR + #define PROCEED_ON_ERROR 0 + #endif /* PROCEED_ON_ERROR */ + + #ifndef INSECURE + #define INSECURE 0 + #endif /* INSECURE */ + #ifndef MALLOC_INSPECT_ALL + #define MALLOC_INSPECT_ALL 0 + #endif /* MALLOC_INSPECT_ALL */ + #ifndef HAVE_MMAP + #define HAVE_MMAP 1 + #endif /* HAVE_MMAP */ + #ifndef MMAP_CLEARS + #define MMAP_CLEARS 1 + #endif /* MMAP_CLEARS */ + #ifndef HAVE_MREMAP + #ifdef linux + #define HAVE_MREMAP 1 + #define _GNU_SOURCE /* Turns on mremap() definition */ + #else /* linux */ + #define HAVE_MREMAP 0 + #endif /* linux */ + #endif /* HAVE_MREMAP */ + #ifndef MALLOC_FAILURE_ACTION + #define MALLOC_FAILURE_ACTION errno = ENOMEM; + #endif /* MALLOC_FAILURE_ACTION */ + #ifndef HAVE_MORECORE + #if ONLY_MSPACES + #define HAVE_MORECORE 0 + #else /* ONLY_MSPACES */ + #define HAVE_MORECORE 1 + #endif /* ONLY_MSPACES */ + #endif /* HAVE_MORECORE */ + #if !HAVE_MORECORE + #define MORECORE_CONTIGUOUS 0 + #else /* !HAVE_MORECORE */ + #define MORECORE_DEFAULT sbrk + #ifndef MORECORE_CONTIGUOUS + #define MORECORE_CONTIGUOUS 1 + #endif /* MORECORE_CONTIGUOUS */ + #endif /* HAVE_MORECORE */ + #ifndef DEFAULT_GRANULARITY + #if (MORECORE_CONTIGUOUS || defined(WIN32)) + #define DEFAULT_GRANULARITY (0) /* 0 means to compute in init_mparams */ + #else /* MORECORE_CONTIGUOUS */ + #define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U) + #endif /* MORECORE_CONTIGUOUS */ + #endif /* DEFAULT_GRANULARITY */ + #ifndef DEFAULT_TRIM_THRESHOLD + #ifndef MORECORE_CANNOT_TRIM + #define DEFAULT_TRIM_THRESHOLD \ + ((size_t)2U * (size_t)1024U * (size_t)1024U) + #else /* MORECORE_CANNOT_TRIM */ + #define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T + #endif /* MORECORE_CANNOT_TRIM */ + #endif /* DEFAULT_TRIM_THRESHOLD */ + #ifndef DEFAULT_MMAP_THRESHOLD + #if HAVE_MMAP + #define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U) + #else /* HAVE_MMAP */ + #define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T + #endif /* HAVE_MMAP */ + #endif /* DEFAULT_MMAP_THRESHOLD */ + #ifndef MAX_RELEASE_CHECK_RATE + #if HAVE_MMAP + #define MAX_RELEASE_CHECK_RATE 4095 + #else + #define MAX_RELEASE_CHECK_RATE MAX_SIZE_T + #endif /* HAVE_MMAP */ + #endif /* MAX_RELEASE_CHECK_RATE */ + #ifndef USE_BUILTIN_FFS + #define USE_BUILTIN_FFS 0 + #endif /* USE_BUILTIN_FFS */ + #ifndef USE_DEV_RANDOM + #define USE_DEV_RANDOM 0 + #endif /* USE_DEV_RANDOM */ + #ifndef NO_MALLINFO + #define NO_MALLINFO 0 + #endif /* NO_MALLINFO */ + #ifndef MALLINFO_FIELD_TYPE + #define MALLINFO_FIELD_TYPE size_t + #endif /* MALLINFO_FIELD_TYPE */ + #ifndef NO_MALLOC_STATS + #define NO_MALLOC_STATS 0 + #endif /* NO_MALLOC_STATS */ + #ifndef NO_SEGMENT_TRAVERSAL + #define NO_SEGMENT_TRAVERSAL 0 + #endif /* NO_SEGMENT_TRAVERSAL */ /* mallopt tuning options. SVID/XPG defines four standard parameter @@ -728,123 +733,128 @@ MAX_RELEASE_CHECK_RATE default: 4095 unless not HAVE_MMAP malloc does support the following options. */ -#undef M_TRIM_THRESHOLD -#undef M_GRANULARITY -#undef M_MMAP_THRESHOLD -#define M_TRIM_THRESHOLD (-1) -#define M_GRANULARITY (-2) -#define M_MMAP_THRESHOLD (-3) + #undef M_TRIM_THRESHOLD + #undef M_GRANULARITY + #undef M_MMAP_THRESHOLD + #define M_TRIM_THRESHOLD (-1) + #define M_GRANULARITY (-2) + #define M_MMAP_THRESHOLD (-3) /* ------------------------ Mallinfo declarations ------------------------ */ -#if !NO_MALLINFO -/* - This version of malloc supports the standard SVID/XPG mallinfo - routine that returns a struct containing usage properties and - statistics. It should work on any system that has a - /usr/include/malloc.h defining struct mallinfo. The main - declaration needed is the mallinfo struct that is returned (by-copy) - by mallinfo(). The malloinfo struct contains a bunch of fields that - are not even meaningful in this version of malloc. These fields are - are instead filled by mallinfo() with other numbers that might be of - interest. - - HAVE_USR_INCLUDE_MALLOC_H should be set if you have a - /usr/include/malloc.h file that includes a declaration of struct - mallinfo. If so, it is included; else a compliant version is - declared below. These must be precisely the same for mallinfo() to - work. The original SVID version of this struct, defined on most - systems with mallinfo, declares all fields as ints. But some others - define as unsigned long. If your system defines the fields using a - type of different width than listed here, you MUST #include your - system version and #define HAVE_USR_INCLUDE_MALLOC_H. -*/ + #if !NO_MALLINFO + /* + This version of malloc supports the standard SVID/XPG mallinfo + routine that returns a struct containing usage properties and + statistics. It should work on any system that has a + /usr/include/malloc.h defining struct mallinfo. The main + declaration needed is the mallinfo struct that is returned (by-copy) + by mallinfo(). The malloinfo struct contains a bunch of fields that + are not even meaningful in this version of malloc. These fields are + are instead filled by mallinfo() with other numbers that might be of + interest. + + HAVE_USR_INCLUDE_MALLOC_H should be set if you have a + /usr/include/malloc.h file that includes a declaration of struct + mallinfo. If so, it is included; else a compliant version is + declared below. These must be precisely the same for mallinfo() to + work. The original SVID version of this struct, defined on most + systems with mallinfo, declares all fields as ints. But some others + define as unsigned long. If your system defines the fields using a + type of different width than listed here, you MUST #include your + system version and #define HAVE_USR_INCLUDE_MALLOC_H. + */ -/* #define HAVE_USR_INCLUDE_MALLOC_H */ + /* #define HAVE_USR_INCLUDE_MALLOC_H */ -#ifdef HAVE_USR_INCLUDE_MALLOC_H -#include "/usr/include/malloc.h" -#else /* HAVE_USR_INCLUDE_MALLOC_H */ -#ifndef STRUCT_MALLINFO_DECLARED -/* HP-UX (and others?) redefines mallinfo unless _STRUCT_MALLINFO is defined */ -#define _STRUCT_MALLINFO -#define STRUCT_MALLINFO_DECLARED 1 + #ifdef HAVE_USR_INCLUDE_MALLOC_H + #include "/usr/include/malloc.h" + #else /* HAVE_USR_INCLUDE_MALLOC_H */ + #ifndef STRUCT_MALLINFO_DECLARED + /* HP-UX (and others?) redefines mallinfo unless _STRUCT_MALLINFO is + * defined */ + #define _STRUCT_MALLINFO + #define STRUCT_MALLINFO_DECLARED 1 struct mallinfo { - MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system */ - MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */ - MALLINFO_FIELD_TYPE smblks; /* always 0 */ - MALLINFO_FIELD_TYPE hblks; /* always 0 */ - MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */ - MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */ - MALLINFO_FIELD_TYPE fsmblks; /* always 0 */ - MALLINFO_FIELD_TYPE uordblks; /* total allocated space */ - MALLINFO_FIELD_TYPE fordblks; /* total free space */ - MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */ + + MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system */ + MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */ + MALLINFO_FIELD_TYPE smblks; /* always 0 */ + MALLINFO_FIELD_TYPE hblks; /* always 0 */ + MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */ + MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */ + MALLINFO_FIELD_TYPE fsmblks; /* always 0 */ + MALLINFO_FIELD_TYPE uordblks; /* total allocated space */ + MALLINFO_FIELD_TYPE fordblks; /* total free space */ + MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */ + }; -#endif /* STRUCT_MALLINFO_DECLARED */ -#endif /* HAVE_USR_INCLUDE_MALLOC_H */ -#endif /* NO_MALLINFO */ + + #endif /* STRUCT_MALLINFO_DECLARED */ + #endif /* HAVE_USR_INCLUDE_MALLOC_H */ + #endif /* NO_MALLINFO */ /* Try to persuade compilers to inline. The most critical functions for inlining are defined as macros, so these aren't used for them. */ -#ifndef FORCEINLINE - #if defined(__GNUC__) -#define FORCEINLINE __inline __attribute__ ((always_inline)) - #elif defined(_MSC_VER) - #define FORCEINLINE __forceinline + #ifndef FORCEINLINE + #if defined(__GNUC__) + #define FORCEINLINE __inline __attribute__((always_inline)) + #elif defined(_MSC_VER) + #define FORCEINLINE __forceinline + #endif #endif -#endif -#ifndef NOINLINE - #if defined(__GNUC__) - #define NOINLINE __attribute__ ((noinline)) - #elif defined(_MSC_VER) - #define NOINLINE __declspec(noinline) - #else - #define NOINLINE + #ifndef NOINLINE + #if defined(__GNUC__) + #define NOINLINE __attribute__((noinline)) + #elif defined(_MSC_VER) + #define NOINLINE __declspec(noinline) + #else + #define NOINLINE + #endif #endif -#endif -#ifdef __cplusplus + #ifdef __cplusplus extern "C" { -#ifndef FORCEINLINE - #define FORCEINLINE inline -#endif -#endif /* __cplusplus */ -#ifndef FORCEINLINE - #define FORCEINLINE -#endif - -#if !ONLY_MSPACES - -/* ------------------- Declarations of public routines ------------------- */ - -#ifndef USE_DL_PREFIX -#define dlcalloc calloc -#define dlfree free -#define dlmalloc malloc -#define dlmemalign memalign -#define dlposix_memalign posix_memalign -#define dlrealloc realloc -#define dlrealloc_in_place realloc_in_place -#define dlvalloc valloc -#define dlpvalloc pvalloc -#define dlmallinfo mallinfo -#define dlmallopt mallopt -#define dlmalloc_trim malloc_trim -#define dlmalloc_stats malloc_stats -#define dlmalloc_usable_size malloc_usable_size -#define dlmalloc_footprint malloc_footprint -#define dlmalloc_max_footprint malloc_max_footprint -#define dlmalloc_footprint_limit malloc_footprint_limit -#define dlmalloc_set_footprint_limit malloc_set_footprint_limit -#define dlmalloc_inspect_all malloc_inspect_all -#define dlindependent_calloc independent_calloc -#define dlindependent_comalloc independent_comalloc -#define dlbulk_free bulk_free -#endif /* USE_DL_PREFIX */ + + #ifndef FORCEINLINE + #define FORCEINLINE inline + #endif + #endif /* __cplusplus */ + #ifndef FORCEINLINE + #define FORCEINLINE + #endif + + #if !ONLY_MSPACES + + /* ------------------- Declarations of public routines ------------------- */ + + #ifndef USE_DL_PREFIX + #define dlcalloc calloc + #define dlfree free + #define dlmalloc malloc + #define dlmemalign memalign + #define dlposix_memalign posix_memalign + #define dlrealloc realloc + #define dlrealloc_in_place realloc_in_place + #define dlvalloc valloc + #define dlpvalloc pvalloc + #define dlmallinfo mallinfo + #define dlmallopt mallopt + #define dlmalloc_trim malloc_trim + #define dlmalloc_stats malloc_stats + #define dlmalloc_usable_size malloc_usable_size + #define dlmalloc_footprint malloc_footprint + #define dlmalloc_max_footprint malloc_max_footprint + #define dlmalloc_footprint_limit malloc_footprint_limit + #define dlmalloc_set_footprint_limit malloc_set_footprint_limit + #define dlmalloc_inspect_all malloc_inspect_all + #define dlindependent_calloc independent_calloc + #define dlindependent_comalloc independent_comalloc + #define dlbulk_free bulk_free + #endif /* USE_DL_PREFIX */ /* malloc(size_t n) @@ -860,7 +870,7 @@ extern "C" { maximum supported value of n differs across systems, but is in all cases less than the maximum representable value of a size_t. */ -DLMALLOC_EXPORT void* dlmalloc(size_t); +DLMALLOC_EXPORT void *dlmalloc(size_t); /* free(void* p) @@ -869,14 +879,14 @@ DLMALLOC_EXPORT void* dlmalloc(size_t); It has no effect if p is null. If p was not malloced or already freed, free(p) will by default cause the current program to abort. */ -DLMALLOC_EXPORT void dlfree(void*); +DLMALLOC_EXPORT void dlfree(void *); /* calloc(size_t n_elements, size_t element_size); Returns a pointer to n_elements * element_size bytes, with all locations set to zero. */ -DLMALLOC_EXPORT void* dlcalloc(size_t, size_t); +DLMALLOC_EXPORT void *dlcalloc(size_t, size_t); /* realloc(void* p, size_t n) @@ -900,7 +910,7 @@ DLMALLOC_EXPORT void* dlcalloc(size_t, size_t); The old unix realloc convention of allowing the last-free'd chunk to be used as an argument to realloc is not supported. */ -DLMALLOC_EXPORT void* dlrealloc(void*, size_t); +DLMALLOC_EXPORT void *dlrealloc(void *, size_t); /* realloc_in_place(void* p, size_t n) @@ -915,7 +925,7 @@ DLMALLOC_EXPORT void* dlrealloc(void*, size_t); Returns p if successful; otherwise null. */ -DLMALLOC_EXPORT void* dlrealloc_in_place(void*, size_t); +DLMALLOC_EXPORT void *dlrealloc_in_place(void *, size_t); /* memalign(size_t alignment, size_t n); @@ -929,7 +939,7 @@ DLMALLOC_EXPORT void* dlrealloc_in_place(void*, size_t); Overreliance on memalign is a sure way to fragment space. */ -DLMALLOC_EXPORT void* dlmemalign(size_t, size_t); +DLMALLOC_EXPORT void *dlmemalign(size_t, size_t); /* int posix_memalign(void** pp, size_t alignment, size_t n); @@ -939,14 +949,14 @@ DLMALLOC_EXPORT void* dlmemalign(size_t, size_t); returns EINVAL if the alignment is not a power of two (3) fails and returns ENOMEM if memory cannot be allocated. */ -DLMALLOC_EXPORT int dlposix_memalign(void**, size_t, size_t); +DLMALLOC_EXPORT int dlposix_memalign(void **, size_t, size_t); /* valloc(size_t n); Equivalent to memalign(pagesize, n), where pagesize is the page size of the system. If the pagesize is unknown, 4096 is used. */ -DLMALLOC_EXPORT void* dlvalloc(size_t); +DLMALLOC_EXPORT void *dlvalloc(size_t); /* mallopt(int parameter_number, int parameter_value) @@ -1021,7 +1031,7 @@ DLMALLOC_EXPORT size_t dlmalloc_footprint_limit(); */ DLMALLOC_EXPORT size_t dlmalloc_set_footprint_limit(size_t bytes); -#if MALLOC_INSPECT_ALL + #if MALLOC_INSPECT_ALL /* malloc_inspect_all(void(*handler)(void *start, void *end, @@ -1043,19 +1053,23 @@ DLMALLOC_EXPORT size_t dlmalloc_set_footprint_limit(size_t bytes); than 1000, you could write: static int count = 0; void count_chunks(void* start, void* end, size_t used, void* arg) { + if (used >= 1000) ++count; + } + then: malloc_inspect_all(count_chunks, NULL); malloc_inspect_all is compiled only if MALLOC_INSPECT_ALL is defined. */ -DLMALLOC_EXPORT void dlmalloc_inspect_all(void(*handler)(void*, void *, size_t, void*), - void* arg); +DLMALLOC_EXPORT void dlmalloc_inspect_all(void (*handler)(void *, void *, + size_t, void *), + void *arg); -#endif /* MALLOC_INSPECT_ALL */ + #endif /* MALLOC_INSPECT_ALL */ -#if !NO_MALLINFO + #if !NO_MALLINFO /* mallinfo() Returns (by copy) a struct containing various summary statistics: @@ -1079,7 +1093,7 @@ DLMALLOC_EXPORT void dlmalloc_inspect_all(void(*handler)(void*, void *, size_t, thus be inaccurate. */ DLMALLOC_EXPORT struct mallinfo dlmallinfo(void); -#endif /* NO_MALLINFO */ + #endif /* NO_MALLINFO */ /* independent_calloc(size_t n_elements, size_t element_size, void* chunks[]); @@ -1117,6 +1131,7 @@ DLMALLOC_EXPORT struct mallinfo dlmallinfo(void); struct Node { int item; struct Node* next; }; struct Node* build_list() { + struct Node** pool; int n = read_number_of_nodes_needed(); if (n <= 0) return 0; @@ -1128,9 +1143,11 @@ DLMALLOC_EXPORT struct mallinfo dlmallinfo(void); pool[i]->next = pool[i+1]; free(pool); // Can now free the array (or not, if it is needed later) return first; + } + */ -DLMALLOC_EXPORT void** dlindependent_calloc(size_t, size_t, void**); +DLMALLOC_EXPORT void **dlindependent_calloc(size_t, size_t, void **); /* independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]); @@ -1169,6 +1186,7 @@ DLMALLOC_EXPORT void** dlindependent_calloc(size_t, size_t, void**); struct Foot { ... } void send_message(char* msg) { + int msglen = strlen(msg); size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) }; void* chunks[3]; @@ -1178,6 +1196,7 @@ DLMALLOC_EXPORT void** dlindependent_calloc(size_t, size_t, void**); char* body = (char*)(chunks[1]); struct Foot* foot = (struct Foot*)(chunks[2]); // ... + } In general though, independent_comalloc is worth using only for @@ -1188,7 +1207,7 @@ DLMALLOC_EXPORT void** dlindependent_calloc(size_t, size_t, void**); since it cannot reuse existing noncontiguous small chunks that might be available for some of the elements. */ -DLMALLOC_EXPORT void** dlindependent_comalloc(size_t, size_t*, void**); +DLMALLOC_EXPORT void **dlindependent_comalloc(size_t, size_t *, void **); /* bulk_free(void* array[], size_t n_elements) @@ -1199,14 +1218,14 @@ DLMALLOC_EXPORT void** dlindependent_comalloc(size_t, size_t*, void**); is returned. For large arrays of pointers with poor locality, it may be worthwhile to sort this array before calling bulk_free. */ -DLMALLOC_EXPORT size_t dlbulk_free(void**, size_t n_elements); +DLMALLOC_EXPORT size_t dlbulk_free(void **, size_t n_elements); /* pvalloc(size_t n); Equivalent to valloc(minimum-page-that-holds(n)), that is, round up n to nearest pagesize. */ -DLMALLOC_EXPORT void* dlpvalloc(size_t); +DLMALLOC_EXPORT void *dlpvalloc(size_t); /* malloc_trim(size_t pad); @@ -1229,7 +1248,7 @@ DLMALLOC_EXPORT void* dlpvalloc(size_t); Malloc_trim returns 1 if it actually released any memory, else 0. */ -DLMALLOC_EXPORT int dlmalloc_trim(size_t); +DLMALLOC_EXPORT int dlmalloc_trim(size_t); /* malloc_stats(); @@ -1250,7 +1269,7 @@ DLMALLOC_EXPORT int dlmalloc_trim(size_t); malloc_stats prints only the most commonly interesting statistics. More information can be obtained by calling mallinfo. */ -DLMALLOC_EXPORT void dlmalloc_stats(void); +DLMALLOC_EXPORT void dlmalloc_stats(void); /* malloc_usable_size(void* p); @@ -1266,17 +1285,17 @@ DLMALLOC_EXPORT void dlmalloc_stats(void); p = malloc(n); assert(malloc_usable_size(p) >= 256); */ -size_t dlmalloc_usable_size(void*); +size_t dlmalloc_usable_size(void *); -#endif /* ONLY_MSPACES */ + #endif /* ONLY_MSPACES */ -#if MSPACES + #if MSPACES /* mspace is an opaque type representing an independent region of space that supports mspace_malloc, etc. */ -typedef void* mspace; +typedef void *mspace; /* create_mspace creates and returns a new independent space with the @@ -1308,7 +1327,8 @@ DLMALLOC_EXPORT size_t destroy_mspace(mspace msp); Destroying this space will deallocate all additionally allocated space (if possible) but not the initial base. */ -DLMALLOC_EXPORT mspace create_mspace_with_base(void* base, size_t capacity, int locked); +DLMALLOC_EXPORT mspace create_mspace_with_base(void *base, size_t capacity, + int locked); /* mspace_track_large_chunks controls whether requests for large chunks @@ -1323,12 +1343,11 @@ DLMALLOC_EXPORT mspace create_mspace_with_base(void* base, size_t capacity, int */ DLMALLOC_EXPORT int mspace_track_large_chunks(mspace msp, int enable); - /* mspace_malloc behaves as malloc, but operates within the given space. */ -DLMALLOC_EXPORT void* mspace_malloc(mspace msp, size_t bytes); +DLMALLOC_EXPORT void *mspace_malloc(mspace msp, size_t bytes); /* mspace_free behaves as free, but operates within @@ -1338,7 +1357,7 @@ DLMALLOC_EXPORT void* mspace_malloc(mspace msp, size_t bytes); free may be called instead of mspace_free because freed chunks from any space are handled by their originating spaces. */ -DLMALLOC_EXPORT void mspace_free(mspace msp, void* mem); +DLMALLOC_EXPORT void mspace_free(mspace msp, void *mem); /* mspace_realloc behaves as realloc, but operates within @@ -1349,33 +1368,38 @@ DLMALLOC_EXPORT void mspace_free(mspace msp, void* mem); realloced chunks from any space are handled by their originating spaces. */ -DLMALLOC_EXPORT void* mspace_realloc(mspace msp, void* mem, size_t newsize); +DLMALLOC_EXPORT void *mspace_realloc(mspace msp, void *mem, size_t newsize); /* mspace_calloc behaves as calloc, but operates within the given space. */ -DLMALLOC_EXPORT void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size); +DLMALLOC_EXPORT void *mspace_calloc(mspace msp, size_t n_elements, + size_t elem_size); /* mspace_memalign behaves as memalign, but operates within the given space. */ -DLMALLOC_EXPORT void* mspace_memalign(mspace msp, size_t alignment, size_t bytes); +DLMALLOC_EXPORT void *mspace_memalign(mspace msp, size_t alignment, + size_t bytes); /* mspace_independent_calloc behaves as independent_calloc, but operates within the given space. */ -DLMALLOC_EXPORT void** mspace_independent_calloc(mspace msp, size_t n_elements, - size_t elem_size, void* chunks[]); +DLMALLOC_EXPORT void **mspace_independent_calloc(mspace msp, size_t n_elements, + size_t elem_size, + void * chunks[]); /* mspace_independent_comalloc behaves as independent_comalloc, but operates within the given space. */ -DLMALLOC_EXPORT void** mspace_independent_comalloc(mspace msp, size_t n_elements, - size_t sizes[], void* chunks[]); +DLMALLOC_EXPORT void **mspace_independent_comalloc(mspace msp, + size_t n_elements, + size_t sizes[], + void * chunks[]); /* mspace_footprint() returns the number of bytes obtained from the @@ -1389,19 +1413,18 @@ DLMALLOC_EXPORT size_t mspace_footprint(mspace msp); */ DLMALLOC_EXPORT size_t mspace_max_footprint(mspace msp); - -#if !NO_MALLINFO + #if !NO_MALLINFO /* mspace_mallinfo behaves as mallinfo, but reports properties of the given space. */ DLMALLOC_EXPORT struct mallinfo mspace_mallinfo(mspace msp); -#endif /* NO_MALLINFO */ + #endif /* NO_MALLINFO */ /* malloc_usable_size(void* p) behaves the same as malloc_usable_size; */ -DLMALLOC_EXPORT size_t mspace_usable_size(const void* mem); +DLMALLOC_EXPORT size_t mspace_usable_size(const void *mem); /* mspace_malloc_stats behaves as malloc_stats, but reports @@ -1420,11 +1443,13 @@ DLMALLOC_EXPORT int mspace_trim(mspace msp, size_t pad); */ DLMALLOC_EXPORT int mspace_mallopt(int, int); -#endif /* MSPACES */ + #endif /* MSPACES */ -#ifdef __cplusplus -} /* end of extern "C" */ -#endif /* __cplusplus */ + #ifdef __cplusplus + +} /* end of extern "C" */ + + #endif /* __cplusplus */ /* ======================================================================== @@ -1438,392 +1463,413 @@ DLMALLOC_EXPORT int mspace_mallopt(int, int); /*------------------------------ internal #includes ---------------------- */ -#ifdef _MSC_VER -#pragma warning( disable : 4146 ) /* no "unsigned" warnings */ -#endif /* _MSC_VER */ -#if !NO_MALLOC_STATS -#include /* for printing in malloc_stats */ -#endif /* NO_MALLOC_STATS */ -#ifndef LACKS_ERRNO_H -#include /* for MALLOC_FAILURE_ACTION */ -#endif /* LACKS_ERRNO_H */ -#ifdef DEBUG -#if ABORT_ON_ASSERT_FAILURE -#undef assert -#define assert(x) if(!(x)) ABORT -#else /* ABORT_ON_ASSERT_FAILURE */ -#include -#endif /* ABORT_ON_ASSERT_FAILURE */ -#else /* DEBUG */ -#ifndef assert -#define assert(x) -#endif -#define DEBUG 0 -#endif /* DEBUG */ -#if !defined(WIN32) && !defined(LACKS_TIME_H) -#include /* for magic initialization */ -#endif /* WIN32 */ -#ifndef LACKS_STDLIB_H -#include /* for abort() */ -#endif /* LACKS_STDLIB_H */ -#ifndef LACKS_STRING_H -#include /* for memset etc */ -#endif /* LACKS_STRING_H */ -#if USE_BUILTIN_FFS -#ifndef LACKS_STRINGS_H -#include /* for ffs */ -#endif /* LACKS_STRINGS_H */ -#endif /* USE_BUILTIN_FFS */ -#if HAVE_MMAP -#ifndef LACKS_SYS_MMAN_H -/* On some versions of linux, mremap decl in mman.h needs __USE_GNU set */ -#if (defined(linux) && !defined(__USE_GNU)) -#define __USE_GNU 1 -#include /* for mmap */ -#undef __USE_GNU -#else -#include /* for mmap */ -#endif /* linux */ -#endif /* LACKS_SYS_MMAN_H */ -#ifndef LACKS_FCNTL_H -#include -#endif /* LACKS_FCNTL_H */ -#endif /* HAVE_MMAP */ -#ifndef LACKS_UNISTD_H -#include /* for sbrk, sysconf */ -#else /* LACKS_UNISTD_H */ -#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__) -extern void* sbrk(ptrdiff_t); -#endif /* FreeBSD etc */ -#endif /* LACKS_UNISTD_H */ - -/* Declarations for locking */ -#if USE_LOCKS -#ifndef WIN32 -#if defined (__SVR4) && defined (__sun) /* solaris */ -#include -#elif !defined(LACKS_SCHED_H) -#include -#endif /* solaris or LACKS_SCHED_H */ -#if (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0) || !USE_SPIN_LOCKS -#include -#endif /* USE_RECURSIVE_LOCKS ... */ -#elif defined(_MSC_VER) -#ifndef _M_AMD64 -/* These are already defined on AMD64 builds */ -#ifdef __cplusplus + #ifdef _MSC_VER + #pragma warning(disable : 4146) /* no "unsigned" warnings */ + #endif /* _MSC_VER */ + #if !NO_MALLOC_STATS + #include /* for printing in malloc_stats */ + #endif /* NO_MALLOC_STATS */ + #ifndef LACKS_ERRNO_H + #include /* for MALLOC_FAILURE_ACTION */ + #endif /* LACKS_ERRNO_H */ + #ifdef DEBUG + #if ABORT_ON_ASSERT_FAILURE + #undef assert + #define assert(x) \ + if (!(x)) ABORT + #else /* ABORT_ON_ASSERT_FAILURE */ + #include + #endif /* ABORT_ON_ASSERT_FAILURE */ + #else /* DEBUG */ + #ifndef assert + #define assert(x) + #endif + #define DEBUG 0 + #endif /* DEBUG */ + #if !defined(WIN32) && !defined(LACKS_TIME_H) + #include /* for magic initialization */ + #endif /* WIN32 */ + #ifndef LACKS_STDLIB_H + #include /* for abort() */ + #endif /* LACKS_STDLIB_H */ + #ifndef LACKS_STRING_H + #include /* for memset etc */ + #endif /* LACKS_STRING_H */ + #if USE_BUILTIN_FFS + #ifndef LACKS_STRINGS_H + #include /* for ffs */ + #endif /* LACKS_STRINGS_H */ + #endif /* USE_BUILTIN_FFS */ + #if HAVE_MMAP + #ifndef LACKS_SYS_MMAN_H + /* On some versions of linux, mremap decl in mman.h needs __USE_GNU set */ + #if (defined(linux) && !defined(__USE_GNU)) + #define __USE_GNU 1 + #include /* for mmap */ + #undef __USE_GNU + #else + #include /* for mmap */ + #endif /* linux */ + #endif /* LACKS_SYS_MMAN_H */ + #ifndef LACKS_FCNTL_H + #include + #endif /* LACKS_FCNTL_H */ + #endif /* HAVE_MMAP */ + #ifndef LACKS_UNISTD_H + #include /* for sbrk, sysconf */ + #else /* LACKS_UNISTD_H */ + #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__) +extern void *sbrk(ptrdiff_t); + #endif /* FreeBSD etc */ + #endif /* LACKS_UNISTD_H */ + + /* Declarations for locking */ + #if USE_LOCKS + #ifndef WIN32 + #if defined(__SVR4) && defined(__sun) /* solaris */ + #include + #elif !defined(LACKS_SCHED_H) + #include + #endif /* solaris or LACKS_SCHED_H */ + #if (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0) || \ + !USE_SPIN_LOCKS + #include + #endif /* USE_RECURSIVE_LOCKS ... */ + #elif defined(_MSC_VER) + #ifndef _M_AMD64 + /* These are already defined on AMD64 builds */ + #ifdef __cplusplus extern "C" { -#endif /* __cplusplus */ -LONG __cdecl _InterlockedCompareExchange(LONG volatile *Dest, LONG Exchange, LONG Comp); + + #endif /* __cplusplus */ +LONG __cdecl _InterlockedCompareExchange(LONG volatile *Dest, LONG Exchange, + LONG Comp); LONG __cdecl _InterlockedExchange(LONG volatile *Target, LONG Value); -#ifdef __cplusplus + #ifdef __cplusplus + } -#endif /* __cplusplus */ -#endif /* _M_AMD64 */ -#pragma intrinsic (_InterlockedCompareExchange) -#pragma intrinsic (_InterlockedExchange) -#define interlockedcompareexchange _InterlockedCompareExchange -#define interlockedexchange _InterlockedExchange -#elif defined(WIN32) && defined(__GNUC__) -#define interlockedcompareexchange(a, b, c) __sync_val_compare_and_swap(a, c, b) -#define interlockedexchange __sync_lock_test_and_set -#endif /* Win32 */ -#else /* USE_LOCKS */ -#endif /* USE_LOCKS */ - -#ifndef LOCK_AT_FORK -#define LOCK_AT_FORK 0 -#endif - -/* Declarations for bit scanning on win32 */ -#if defined(_MSC_VER) && _MSC_VER>=1300 -#ifndef BitScanForward /* Try to avoid pulling in WinNT.h */ -#ifdef __cplusplus + + #endif /* __cplusplus */ + #endif /* _M_AMD64 */ + #pragma intrinsic(_InterlockedCompareExchange) + #pragma intrinsic(_InterlockedExchange) + #define interlockedcompareexchange _InterlockedCompareExchange + #define interlockedexchange _InterlockedExchange + #elif defined(WIN32) && defined(__GNUC__) + #define interlockedcompareexchange(a, b, c) \ + __sync_val_compare_and_swap(a, c, b) + #define interlockedexchange __sync_lock_test_and_set + #endif /* Win32 */ + #else /* USE_LOCKS */ + #endif /* USE_LOCKS */ + + #ifndef LOCK_AT_FORK + #define LOCK_AT_FORK 0 + #endif + + /* Declarations for bit scanning on win32 */ + #if defined(_MSC_VER) && _MSC_VER >= 1300 + #ifndef BitScanForward /* Try to avoid pulling in WinNT.h */ + #ifdef __cplusplus extern "C" { -#endif /* __cplusplus */ + + #endif /* __cplusplus */ unsigned char _BitScanForward(unsigned long *index, unsigned long mask); unsigned char _BitScanReverse(unsigned long *index, unsigned long mask); -#ifdef __cplusplus + #ifdef __cplusplus + } -#endif /* __cplusplus */ - -#define BitScanForward _BitScanForward -#define BitScanReverse _BitScanReverse -#pragma intrinsic(_BitScanForward) -#pragma intrinsic(_BitScanReverse) -#endif /* BitScanForward */ -#endif /* defined(_MSC_VER) && _MSC_VER>=1300 */ - -#ifndef WIN32 -#ifndef malloc_getpagesize -# ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */ -# ifndef _SC_PAGE_SIZE -# define _SC_PAGE_SIZE _SC_PAGESIZE -# endif -# endif -# ifdef _SC_PAGE_SIZE -# define malloc_getpagesize sysconf(_SC_PAGE_SIZE) -# else -# if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE) - extern size_t getpagesize(); -# define malloc_getpagesize getpagesize() -# else -# ifdef WIN32 /* use supplied emulation of getpagesize */ -# define malloc_getpagesize getpagesize() -# else -# ifndef LACKS_SYS_PARAM_H -# include -# endif -# ifdef EXEC_PAGESIZE -# define malloc_getpagesize EXEC_PAGESIZE -# else -# ifdef NBPG -# ifndef CLSIZE -# define malloc_getpagesize NBPG -# else -# define malloc_getpagesize (NBPG * CLSIZE) -# endif -# else -# ifdef NBPC -# define malloc_getpagesize NBPC -# else -# ifdef PAGESIZE -# define malloc_getpagesize PAGESIZE -# else /* just guess */ -# define malloc_getpagesize ((size_t)4096U) -# endif -# endif -# endif -# endif -# endif -# endif -# endif -#endif -#endif - -/* ------------------- size_t and alignment properties -------------------- */ - -/* The byte and bit size of a size_t */ -#define SIZE_T_SIZE (sizeof(size_t)) -#define SIZE_T_BITSIZE (sizeof(size_t) << 3) - -/* Some constants coerced to size_t */ -/* Annoying but necessary to avoid errors on some platforms */ -#define SIZE_T_ZERO ((size_t)0) -#define SIZE_T_ONE ((size_t)1) -#define SIZE_T_TWO ((size_t)2) -#define SIZE_T_FOUR ((size_t)4) -#define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1) -#define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2) -#define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES) -#define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U) - -/* The bit mask value corresponding to MALLOC_ALIGNMENT */ -#define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE) - -/* True if address a has acceptable alignment */ -#define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0) - -/* the number of bytes to offset an address to align it */ -#define align_offset(A)\ - ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\ - ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK)) - -/* -------------------------- MMAP preliminaries ------------------------- */ -/* - If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and - checks to fail so compiler optimizer can delete code rather than - using so many "#if"s. -*/ + #endif /* __cplusplus */ + + #define BitScanForward _BitScanForward + #define BitScanReverse _BitScanReverse + #pragma intrinsic(_BitScanForward) + #pragma intrinsic(_BitScanReverse) + #endif /* BitScanForward */ + #endif /* defined(_MSC_VER) && _MSC_VER>=1300 */ + + #ifndef WIN32 + #ifndef malloc_getpagesize + #ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */ + #ifndef _SC_PAGE_SIZE + #define _SC_PAGE_SIZE _SC_PAGESIZE + #endif + #endif + #ifdef _SC_PAGE_SIZE + #define malloc_getpagesize sysconf(_SC_PAGE_SIZE) + #else + #if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE) +extern size_t getpagesize(); + #define malloc_getpagesize getpagesize() + #else + #ifdef WIN32 /* use supplied emulation of getpagesize */ + #define malloc_getpagesize getpagesize() + #else + #ifndef LACKS_SYS_PARAM_H + #include + #endif + #ifdef EXEC_PAGESIZE + #define malloc_getpagesize EXEC_PAGESIZE + #else + #ifdef NBPG + #ifndef CLSIZE + #define malloc_getpagesize NBPG + #else + #define malloc_getpagesize (NBPG * CLSIZE) + #endif + #else + #ifdef NBPC + #define malloc_getpagesize NBPC + #else + #ifdef PAGESIZE + #define malloc_getpagesize PAGESIZE + #else /* just guess */ + #define malloc_getpagesize ((size_t)4096U) + #endif + #endif + #endif + #endif + #endif + #endif + #endif + #endif + #endif + + /* ------------------- size_t and alignment properties -------------------- */ + + /* The byte and bit size of a size_t */ + #define SIZE_T_SIZE (sizeof(size_t)) + #define SIZE_T_BITSIZE (sizeof(size_t) << 3) + + /* Some constants coerced to size_t */ + /* Annoying but necessary to avoid errors on some platforms */ + #define SIZE_T_ZERO ((size_t)0) + #define SIZE_T_ONE ((size_t)1) + #define SIZE_T_TWO ((size_t)2) + #define SIZE_T_FOUR ((size_t)4) + #define TWO_SIZE_T_SIZES (SIZE_T_SIZE << 1) + #define FOUR_SIZE_T_SIZES (SIZE_T_SIZE << 2) + #define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES + TWO_SIZE_T_SIZES) + #define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U) + + /* The bit mask value corresponding to MALLOC_ALIGNMENT */ + #define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE) + + /* True if address a has acceptable alignment */ + #define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0) + + /* the number of bytes to offset an address to align it */ + #define align_offset(A) \ + ((((size_t)(A)&CHUNK_ALIGN_MASK) == 0) \ + ? 0 \ + : ((MALLOC_ALIGNMENT - ((size_t)(A)&CHUNK_ALIGN_MASK)) & \ + CHUNK_ALIGN_MASK)) + + /* -------------------------- MMAP preliminaries ------------------------- */ + + /* + If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and + checks to fail so compiler optimizer can delete code rather than + using so many "#if"s. + */ + /* MORECORE and MMAP must return MFAIL on failure */ + #define MFAIL ((void *)(MAX_SIZE_T)) + #define CMFAIL ((char *)(MFAIL)) /* defined for convenience */ -/* MORECORE and MMAP must return MFAIL on failure */ -#define MFAIL ((void*)(MAX_SIZE_T)) -#define CMFAIL ((char*)(MFAIL)) /* defined for convenience */ + #if HAVE_MMAP -#if HAVE_MMAP + #ifndef WIN32 + #define MMAP_PROT (PROT_READ | PROT_WRITE) + #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON) + #define MAP_ANONYMOUS MAP_ANON + #endif /* MAP_ANON */ + #ifdef MAP_ANONYMOUS -#ifndef WIN32 -#define MMAP_PROT (PROT_READ|PROT_WRITE) -#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON) -#define MAP_ANONYMOUS MAP_ANON -#endif /* MAP_ANON */ -#ifdef MAP_ANONYMOUS + #define MMAP_FLAGS (MAP_PRIVATE | MAP_ANONYMOUS) -#define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS) +static FORCEINLINE void *unixmmap(size_t size) { -static FORCEINLINE void* unixmmap(size_t size) { - void* result; + void *result; result = mmap(0, size, MMAP_PROT, MMAP_FLAGS, -1, 0); - if (result == MFAIL) - return MFAIL; + if (result == MFAIL) return MFAIL; return result; + } -static FORCEINLINE int unixmunmap(void* ptr, size_t size) { +static FORCEINLINE int unixmunmap(void *ptr, size_t size) { + int result; result = munmap(ptr, size); - if (result != 0) - return result; + if (result != 0) return result; return result; + } -#define MMAP_DEFAULT(s) unixmmap(s) -#define MUNMAP_DEFAULT(a, s) unixmunmap((a), (s)) + #define MMAP_DEFAULT(s) unixmmap(s) + #define MUNMAP_DEFAULT(a, s) unixmunmap((a), (s)) -#else /* MAP_ANONYMOUS */ -/* - Nearly all versions of mmap support MAP_ANONYMOUS, so the following - is unlikely to be needed, but is supplied just in case. -*/ -#define MMAP_FLAGS (MAP_PRIVATE) -static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */ -#define MMAP_DEFAULT(s) ((dev_zero_fd < 0) ? \ - (dev_zero_fd = open("/dev/zero", O_RDWR), \ - mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \ - mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) -#define MUNMAP_DEFAULT(a, s) munmap((a), (s)) -#endif /* MAP_ANONYMOUS */ + #else /* MAP_ANONYMOUS */ + /* + Nearly all versions of mmap support MAP_ANONYMOUS, so the following + is unlikely to be needed, but is supplied just in case. + */ + #define MMAP_FLAGS (MAP_PRIVATE) +static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */ + #define MMAP_DEFAULT(s) \ + ((dev_zero_fd < 0) \ + ? (dev_zero_fd = open("/dev/zero", O_RDWR), \ + mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) \ + : mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) + #define MUNMAP_DEFAULT(a, s) munmap((a), (s)) + #endif /* MAP_ANONYMOUS */ -#define DIRECT_MMAP_DEFAULT(s) MMAP_DEFAULT(s) + #define DIRECT_MMAP_DEFAULT(s) MMAP_DEFAULT(s) -#else /* WIN32 */ + #else /* WIN32 */ /* Win32 MMAP via VirtualAlloc */ -static FORCEINLINE void* win32mmap(size_t size) { - void* ptr; +static FORCEINLINE void *win32mmap(size_t size) { + + void *ptr; - ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE); - if (ptr == 0) - return MFAIL; + ptr = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); + if (ptr == 0) return MFAIL; return ptr; + } /* For direct MMAP, use MEM_TOP_DOWN to minimize interference */ -static FORCEINLINE void* win32direct_mmap(size_t size) { - void* ptr; +static FORCEINLINE void *win32direct_mmap(size_t size) { + + void *ptr; - ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, - PAGE_READWRITE); - if (ptr == 0) - return MFAIL; + ptr = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT | MEM_TOP_DOWN, + PAGE_READWRITE); + if (ptr == 0) return MFAIL; return ptr; + } /* This function supports releasing coalesed segments */ -static FORCEINLINE int win32munmap(void* ptr, size_t size) { +static FORCEINLINE int win32munmap(void *ptr, size_t size) { + MEMORY_BASIC_INFORMATION minfo; - char* cptr = (char*)ptr; + char *cptr = (char *)ptr; while (size) { - if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0) - return -1; + + if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0) return -1; if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr || minfo.State != MEM_COMMIT || minfo.RegionSize > size) return -1; - if (VirtualFree(cptr, 0, MEM_RELEASE) == 0) - return -1; + if (VirtualFree(cptr, 0, MEM_RELEASE) == 0) return -1; cptr += minfo.RegionSize; size -= minfo.RegionSize; + } return 0; + } -#define MMAP_DEFAULT(s) win32mmap(s) -#define MUNMAP_DEFAULT(a, s) win32munmap((a), (s)) -#define DIRECT_MMAP_DEFAULT(s) win32direct_mmap(s) -#endif /* WIN32 */ -#endif /* HAVE_MMAP */ + #define MMAP_DEFAULT(s) win32mmap(s) + #define MUNMAP_DEFAULT(a, s) win32munmap((a), (s)) + #define DIRECT_MMAP_DEFAULT(s) win32direct_mmap(s) + #endif /* WIN32 */ + #endif /* HAVE_MMAP */ + + #if HAVE_MREMAP + #ifndef WIN32 -#if HAVE_MREMAP -#ifndef WIN32 +static FORCEINLINE void *dlmremap(void *old_address, size_t old_size, + size_t new_size, int flags) { -static FORCEINLINE void* dlmremap(void* old_address, size_t old_size, size_t new_size, int flags) { - void* result; + void *result; result = mremap(old_address, old_size, new_size, flags); - if (result == MFAIL) - return MFAIL; + if (result == MFAIL) return MFAIL; return result; + } -#define MREMAP_DEFAULT(addr, osz, nsz, mv) dlmremap((addr), (osz), (nsz), (mv)) -#endif /* WIN32 */ -#endif /* HAVE_MREMAP */ + #define MREMAP_DEFAULT(addr, osz, nsz, mv) \ + dlmremap((addr), (osz), (nsz), (mv)) + #endif /* WIN32 */ + #endif /* HAVE_MREMAP */ -/** - * Define CALL_MORECORE - */ -#if HAVE_MORECORE + /** + * Define CALL_MORECORE + */ + #if HAVE_MORECORE #ifdef MORECORE - #define CALL_MORECORE(S) MORECORE(S) - #else /* MORECORE */ - #define CALL_MORECORE(S) MORECORE_DEFAULT(S) - #endif /* MORECORE */ -#else /* HAVE_MORECORE */ - #define CALL_MORECORE(S) MFAIL -#endif /* HAVE_MORECORE */ - -/** - * Define CALL_MMAP/CALL_MUNMAP/CALL_DIRECT_MMAP - */ -#if HAVE_MMAP - #define USE_MMAP_BIT (SIZE_T_ONE) + #define CALL_MORECORE(S) MORECORE(S) + #else /* MORECORE */ + #define CALL_MORECORE(S) MORECORE_DEFAULT(S) + #endif /* MORECORE */ + #else /* HAVE_MORECORE */ + #define CALL_MORECORE(S) MFAIL + #endif /* HAVE_MORECORE */ + + /** + * Define CALL_MMAP/CALL_MUNMAP/CALL_DIRECT_MMAP + */ + #if HAVE_MMAP + #define USE_MMAP_BIT (SIZE_T_ONE) #ifdef MMAP - #define CALL_MMAP(s) MMAP(s) - #else /* MMAP */ - #define CALL_MMAP(s) MMAP_DEFAULT(s) - #endif /* MMAP */ + #define CALL_MMAP(s) MMAP(s) + #else /* MMAP */ + #define CALL_MMAP(s) MMAP_DEFAULT(s) + #endif /* MMAP */ #ifdef MUNMAP - #define CALL_MUNMAP(a, s) MUNMAP((a), (s)) - #else /* MUNMAP */ - #define CALL_MUNMAP(a, s) MUNMAP_DEFAULT((a), (s)) - #endif /* MUNMAP */ + #define CALL_MUNMAP(a, s) MUNMAP((a), (s)) + #else /* MUNMAP */ + #define CALL_MUNMAP(a, s) MUNMAP_DEFAULT((a), (s)) + #endif /* MUNMAP */ #ifdef DIRECT_MMAP - #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s) - #else /* DIRECT_MMAP */ - #define CALL_DIRECT_MMAP(s) DIRECT_MMAP_DEFAULT(s) - #endif /* DIRECT_MMAP */ -#else /* HAVE_MMAP */ - #define USE_MMAP_BIT (SIZE_T_ZERO) - - #define MMAP(s) MFAIL - #define MUNMAP(a, s) (-1) - #define DIRECT_MMAP(s) MFAIL - #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s) - #define CALL_MMAP(s) MMAP(s) - #define CALL_MUNMAP(a, s) MUNMAP((a), (s)) -#endif /* HAVE_MMAP */ - -/** - * Define CALL_MREMAP - */ -#if HAVE_MMAP && HAVE_MREMAP + #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s) + #else /* DIRECT_MMAP */ + #define CALL_DIRECT_MMAP(s) DIRECT_MMAP_DEFAULT(s) + #endif /* DIRECT_MMAP */ + #else /* HAVE_MMAP */ + #define USE_MMAP_BIT (SIZE_T_ZERO) + + #define MMAP(s) MFAIL + #define MUNMAP(a, s) (-1) + #define DIRECT_MMAP(s) MFAIL + #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s) + #define CALL_MMAP(s) MMAP(s) + #define CALL_MUNMAP(a, s) MUNMAP((a), (s)) + #endif /* HAVE_MMAP */ + + /** + * Define CALL_MREMAP + */ + #if HAVE_MMAP && HAVE_MREMAP #ifdef MREMAP - #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP((addr), (osz), (nsz), (mv)) - #else /* MREMAP */ - #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP_DEFAULT((addr), (osz), (nsz), (mv)) - #endif /* MREMAP */ -#else /* HAVE_MMAP && HAVE_MREMAP */ - #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL -#endif /* HAVE_MMAP && HAVE_MREMAP */ + #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP((addr), (osz), (nsz), (mv)) + #else /* MREMAP */ + #define CALL_MREMAP(addr, osz, nsz, mv) \ + MREMAP_DEFAULT((addr), (osz), (nsz), (mv)) + #endif /* MREMAP */ + #else /* HAVE_MMAP && HAVE_MREMAP */ + #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL + #endif /* HAVE_MMAP && HAVE_MREMAP */ -/* mstate bit set if continguous morecore disabled or failed */ -#define USE_NONCONTIGUOUS_BIT (4U) - -/* segment bit set in create_mspace_with_base */ -#define EXTERN_BIT (8U) + /* mstate bit set if continguous morecore disabled or failed */ + #define USE_NONCONTIGUOUS_BIT (4U) + /* segment bit set in create_mspace_with_base */ + #define EXTERN_BIT (8U) /* --------------------------- Lock preliminaries ------------------------ */ @@ -1855,248 +1901,286 @@ static FORCEINLINE void* dlmremap(void* old_address, size_t old_size, size_t new */ -#if !USE_LOCKS -#define USE_LOCK_BIT (0U) -#define INITIAL_LOCK(l) (0) -#define DESTROY_LOCK(l) (0) -#define ACQUIRE_MALLOC_GLOBAL_LOCK() -#define RELEASE_MALLOC_GLOBAL_LOCK() - -#else -#if USE_LOCKS > 1 -/* ----------------------- User-defined locks ------------------------ */ -/* Define your own lock implementation here */ -/* #define INITIAL_LOCK(lk) ... */ -/* #define DESTROY_LOCK(lk) ... */ -/* #define ACQUIRE_LOCK(lk) ... */ -/* #define RELEASE_LOCK(lk) ... */ -/* #define TRY_LOCK(lk) ... */ -/* static MLOCK_T malloc_global_mutex = ... */ - -#elif USE_SPIN_LOCKS - -/* First, define CAS_LOCK and CLEAR_LOCK on ints */ -/* Note CAS_LOCK defined to return 0 on success */ - -#if defined(__GNUC__)&& (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) -#define CAS_LOCK(sl) __sync_lock_test_and_set(sl, 1) -#define CLEAR_LOCK(sl) __sync_lock_release(sl) - -#elif (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))) + #if !USE_LOCKS + #define USE_LOCK_BIT (0U) + #define INITIAL_LOCK(l) (0) + #define DESTROY_LOCK(l) (0) + #define ACQUIRE_MALLOC_GLOBAL_LOCK() + #define RELEASE_MALLOC_GLOBAL_LOCK() + + #else + #if USE_LOCKS > 1 + /* ----------------------- User-defined locks ------------------------ */ + /* Define your own lock implementation here */ + /* #define INITIAL_LOCK(lk) ... */ + /* #define DESTROY_LOCK(lk) ... */ + /* #define ACQUIRE_LOCK(lk) ... */ + /* #define RELEASE_LOCK(lk) ... */ + /* #define TRY_LOCK(lk) ... */ + /* static MLOCK_T malloc_global_mutex = ... */ + + #elif USE_SPIN_LOCKS + + /* First, define CAS_LOCK and CLEAR_LOCK on ints */ + /* Note CAS_LOCK defined to return 0 on success */ + + #if defined(__GNUC__) && \ + (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) + #define CAS_LOCK(sl) __sync_lock_test_and_set(sl, 1) + #define CLEAR_LOCK(sl) __sync_lock_release(sl) + + #elif (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))) /* Custom spin locks for older gcc on x86 */ static FORCEINLINE int x86_cas_lock(int *sl) { + int ret; int val = 1; int cmp = 0; - __asm__ __volatile__ ("lock; cmpxchgl %1, %2" - : "=a" (ret) - : "r" (val), "m" (*(sl)), "0"(cmp) - : "memory", "cc"); + __asm__ __volatile__("lock; cmpxchgl %1, %2" + : "=a"(ret) + : "r"(val), "m"(*(sl)), "0"(cmp) + : "memory", "cc"); return ret; + } -static FORCEINLINE void x86_clear_lock(int* sl) { +static FORCEINLINE void x86_clear_lock(int *sl) { + assert(*sl != 0); int prev = 0; int ret; - __asm__ __volatile__ ("lock; xchgl %0, %1" - : "=r" (ret) - : "m" (*(sl)), "0"(prev) - : "memory"); + __asm__ __volatile__("lock; xchgl %0, %1" + : "=r"(ret) + : "m"(*(sl)), "0"(prev) + : "memory"); + } -#define CAS_LOCK(sl) x86_cas_lock(sl) -#define CLEAR_LOCK(sl) x86_clear_lock(sl) - -#else /* Win32 MSC */ -#define CAS_LOCK(sl) interlockedexchange((volatile LONG *)sl, (LONG)1) -#define CLEAR_LOCK(sl) interlockedexchange((volatile LONG *)sl, (LONG)0) - -#endif /* ... gcc spins locks ... */ - -/* How to yield for a spin lock */ -#define SPINS_PER_YIELD 63 -#if defined(_MSC_VER) -#define SLEEP_EX_DURATION 50 /* delay for yield/sleep */ -#define SPIN_LOCK_YIELD SleepEx(SLEEP_EX_DURATION, FALSE) -#elif defined (__SVR4) && defined (__sun) /* solaris */ -#define SPIN_LOCK_YIELD thr_yield(); -#elif !defined(LACKS_SCHED_H) -#define SPIN_LOCK_YIELD sched_yield(); -#else -#define SPIN_LOCK_YIELD -#endif /* ... yield ... */ - -#if !defined(USE_RECURSIVE_LOCKS) || USE_RECURSIVE_LOCKS == 0 + #define CAS_LOCK(sl) x86_cas_lock(sl) + #define CLEAR_LOCK(sl) x86_clear_lock(sl) + + #else /* Win32 MSC */ + #define CAS_LOCK(sl) interlockedexchange((volatile LONG *)sl, (LONG)1) + #define CLEAR_LOCK(sl) interlockedexchange((volatile LONG *)sl, (LONG)0) + + #endif /* ... gcc spins locks ... */ + + /* How to yield for a spin lock */ + #define SPINS_PER_YIELD 63 + #if defined(_MSC_VER) + #define SLEEP_EX_DURATION 50 /* delay for yield/sleep */ + #define SPIN_LOCK_YIELD SleepEx(SLEEP_EX_DURATION, FALSE) + #elif defined(__SVR4) && defined(__sun) /* solaris */ + #define SPIN_LOCK_YIELD thr_yield(); + #elif !defined(LACKS_SCHED_H) + #define SPIN_LOCK_YIELD sched_yield(); + #else + #define SPIN_LOCK_YIELD + #endif /* ... yield ... */ + + #if !defined(USE_RECURSIVE_LOCKS) || USE_RECURSIVE_LOCKS == 0 /* Plain spin locks use single word (embedded in malloc_states) */ static int spin_acquire_lock(int *sl) { + int spins = 0; while (*(volatile int *)sl != 0 || CAS_LOCK(sl)) { - if ((++spins & SPINS_PER_YIELD) == 0) { - SPIN_LOCK_YIELD; - } + + if ((++spins & SPINS_PER_YIELD) == 0) { SPIN_LOCK_YIELD; } + } + return 0; + } -#define MLOCK_T int -#define TRY_LOCK(sl) !CAS_LOCK(sl) -#define RELEASE_LOCK(sl) CLEAR_LOCK(sl) -#define ACQUIRE_LOCK(sl) (CAS_LOCK(sl)? spin_acquire_lock(sl) : 0) -#define INITIAL_LOCK(sl) (*sl = 0) -#define DESTROY_LOCK(sl) (0) + #define MLOCK_T int + #define TRY_LOCK(sl) !CAS_LOCK(sl) + #define RELEASE_LOCK(sl) CLEAR_LOCK(sl) + #define ACQUIRE_LOCK(sl) (CAS_LOCK(sl) ? spin_acquire_lock(sl) : 0) + #define INITIAL_LOCK(sl) (*sl = 0) + #define DESTROY_LOCK(sl) (0) static MLOCK_T malloc_global_mutex = 0; -#else /* USE_RECURSIVE_LOCKS */ -/* types for lock owners */ -#ifdef WIN32 -#define THREAD_ID_T DWORD -#define CURRENT_THREAD GetCurrentThreadId() -#define EQ_OWNER(X,Y) ((X) == (Y)) -#else -/* - Note: the following assume that pthread_t is a type that can be - initialized to (casted) zero. If this is not the case, you will need to - somehow redefine these or not use spin locks. -*/ -#define THREAD_ID_T pthread_t -#define CURRENT_THREAD pthread_self() -#define EQ_OWNER(X,Y) pthread_equal(X, Y) -#endif + #else /* USE_RECURSIVE_LOCKS */ + /* types for lock owners */ + #ifdef WIN32 + #define THREAD_ID_T DWORD + #define CURRENT_THREAD GetCurrentThreadId() + #define EQ_OWNER(X, Y) ((X) == (Y)) + #else + /* + Note: the following assume that pthread_t is a type that can be + initialized to (casted) zero. If this is not the case, you will need + to somehow redefine these or not use spin locks. + */ + #define THREAD_ID_T pthread_t + #define CURRENT_THREAD pthread_self() + #define EQ_OWNER(X, Y) pthread_equal(X, Y) + #endif struct malloc_recursive_lock { - int sl; + + int sl; unsigned int c; - THREAD_ID_T threadid; + THREAD_ID_T threadid; + }; -#define MLOCK_T struct malloc_recursive_lock -static MLOCK_T malloc_global_mutex = { 0, 0, (THREAD_ID_T)0}; + #define MLOCK_T struct malloc_recursive_lock +static MLOCK_T malloc_global_mutex = {0, 0, (THREAD_ID_T)0}; static FORCEINLINE void recursive_release_lock(MLOCK_T *lk) { + assert(lk->sl != 0); - if (--lk->c == 0) { - CLEAR_LOCK(&lk->sl); - } + if (--lk->c == 0) { CLEAR_LOCK(&lk->sl); } + } static FORCEINLINE int recursive_acquire_lock(MLOCK_T *lk) { + THREAD_ID_T mythreadid = CURRENT_THREAD; - int spins = 0; + int spins = 0; for (;;) { + if (*((volatile int *)(&lk->sl)) == 0) { + if (!CAS_LOCK(&lk->sl)) { + lk->threadid = mythreadid; lk->c = 1; return 0; + } - } - else if (EQ_OWNER(lk->threadid, mythreadid)) { + + } else if (EQ_OWNER(lk->threadid, mythreadid)) { + ++lk->c; return 0; + } - if ((++spins & SPINS_PER_YIELD) == 0) { - SPIN_LOCK_YIELD; - } + + if ((++spins & SPINS_PER_YIELD) == 0) { SPIN_LOCK_YIELD; } + } + } static FORCEINLINE int recursive_try_lock(MLOCK_T *lk) { + THREAD_ID_T mythreadid = CURRENT_THREAD; if (*((volatile int *)(&lk->sl)) == 0) { + if (!CAS_LOCK(&lk->sl)) { + lk->threadid = mythreadid; lk->c = 1; return 1; + } - } - else if (EQ_OWNER(lk->threadid, mythreadid)) { + + } else if (EQ_OWNER(lk->threadid, mythreadid)) { + ++lk->c; return 1; + } + return 0; + } -#define RELEASE_LOCK(lk) recursive_release_lock(lk) -#define TRY_LOCK(lk) recursive_try_lock(lk) -#define ACQUIRE_LOCK(lk) recursive_acquire_lock(lk) -#define INITIAL_LOCK(lk) ((lk)->threadid = (THREAD_ID_T)0, (lk)->sl = 0, (lk)->c = 0) -#define DESTROY_LOCK(lk) (0) -#endif /* USE_RECURSIVE_LOCKS */ - -#elif defined(WIN32) /* Win32 critical sections */ -#define MLOCK_T CRITICAL_SECTION -#define ACQUIRE_LOCK(lk) (EnterCriticalSection(lk), 0) -#define RELEASE_LOCK(lk) LeaveCriticalSection(lk) -#define TRY_LOCK(lk) TryEnterCriticalSection(lk) -#define INITIAL_LOCK(lk) (!InitializeCriticalSectionAndSpinCount((lk), 0x80000000|4000)) -#define DESTROY_LOCK(lk) (DeleteCriticalSection(lk), 0) -#define NEED_GLOBAL_LOCK_INIT - -static MLOCK_T malloc_global_mutex; + #define RELEASE_LOCK(lk) recursive_release_lock(lk) + #define TRY_LOCK(lk) recursive_try_lock(lk) + #define ACQUIRE_LOCK(lk) recursive_acquire_lock(lk) + #define INITIAL_LOCK(lk) \ + ((lk)->threadid = (THREAD_ID_T)0, (lk)->sl = 0, (lk)->c = 0) + #define DESTROY_LOCK(lk) (0) + #endif /* USE_RECURSIVE_LOCKS */ + + #elif defined(WIN32) /* Win32 critical sections */ + #define MLOCK_T CRITICAL_SECTION + #define ACQUIRE_LOCK(lk) (EnterCriticalSection(lk), 0) + #define RELEASE_LOCK(lk) LeaveCriticalSection(lk) + #define TRY_LOCK(lk) TryEnterCriticalSection(lk) + #define INITIAL_LOCK(lk) \ + (!InitializeCriticalSectionAndSpinCount((lk), 0x80000000 | 4000)) + #define DESTROY_LOCK(lk) (DeleteCriticalSection(lk), 0) + #define NEED_GLOBAL_LOCK_INIT + +static MLOCK_T malloc_global_mutex; static volatile LONG malloc_global_mutex_status; /* Use spin loop to initialize global lock */ static void init_malloc_global_mutex() { + for (;;) { + long stat = malloc_global_mutex_status; - if (stat > 0) - return; + if (stat > 0) return; /* transition to < 0 while initializing, then to > 0) */ - if (stat == 0 && - interlockedcompareexchange(&malloc_global_mutex_status, (LONG)-1, (LONG)0) == 0) { + if (stat == 0 && interlockedcompareexchange(&malloc_global_mutex_status, + (LONG)-1, (LONG)0) == 0) { + InitializeCriticalSection(&malloc_global_mutex); interlockedexchange(&malloc_global_mutex_status, (LONG)1); return; + } + SleepEx(0, FALSE); + } + } -#else /* pthreads-based locks */ -#define MLOCK_T pthread_mutex_t -#define ACQUIRE_LOCK(lk) pthread_mutex_lock(lk) -#define RELEASE_LOCK(lk) pthread_mutex_unlock(lk) -#define TRY_LOCK(lk) (!pthread_mutex_trylock(lk)) -#define INITIAL_LOCK(lk) pthread_init_lock(lk) -#define DESTROY_LOCK(lk) pthread_mutex_destroy(lk) + #else /* pthreads-based locks */ + #define MLOCK_T pthread_mutex_t + #define ACQUIRE_LOCK(lk) pthread_mutex_lock(lk) + #define RELEASE_LOCK(lk) pthread_mutex_unlock(lk) + #define TRY_LOCK(lk) (!pthread_mutex_trylock(lk)) + #define INITIAL_LOCK(lk) pthread_init_lock(lk) + #define DESTROY_LOCK(lk) pthread_mutex_destroy(lk) -#if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0 && defined(linux) && !defined(PTHREAD_MUTEX_RECURSIVE) + #if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0 && \ + defined(linux) && !defined(PTHREAD_MUTEX_RECURSIVE) /* Cope with old-style linux recursive lock initialization by adding */ /* skipped internal declaration from pthread.h */ -extern int pthread_mutexattr_setkind_np __P ((pthread_mutexattr_t *__attr, - int __kind)); -#define PTHREAD_MUTEX_RECURSIVE PTHREAD_MUTEX_RECURSIVE_NP -#define pthread_mutexattr_settype(x,y) pthread_mutexattr_setkind_np(x,y) -#endif /* USE_RECURSIVE_LOCKS ... */ +extern int pthread_mutexattr_setkind_np __P((pthread_mutexattr_t * __attr, + int __kind)); + #define PTHREAD_MUTEX_RECURSIVE PTHREAD_MUTEX_RECURSIVE_NP + #define pthread_mutexattr_settype(x, y) \ + pthread_mutexattr_setkind_np(x, y) + #endif /* USE_RECURSIVE_LOCKS ... */ static MLOCK_T malloc_global_mutex = PTHREAD_MUTEX_INITIALIZER; -static int pthread_init_lock (MLOCK_T *lk) { +static int pthread_init_lock(MLOCK_T *lk) { + pthread_mutexattr_t attr; if (pthread_mutexattr_init(&attr)) return 1; -#if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0 + #if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0 if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE)) return 1; -#endif + #endif if (pthread_mutex_init(lk, &attr)) return 1; if (pthread_mutexattr_destroy(&attr)) return 1; return 0; + } -#endif /* ... lock types ... */ + #endif /* ... lock types ... */ -/* Common code for all lock types */ -#define USE_LOCK_BIT (2U) + /* Common code for all lock types */ + #define USE_LOCK_BIT (2U) -#ifndef ACQUIRE_MALLOC_GLOBAL_LOCK -#define ACQUIRE_MALLOC_GLOBAL_LOCK() ACQUIRE_LOCK(&malloc_global_mutex); -#endif + #ifndef ACQUIRE_MALLOC_GLOBAL_LOCK + #define ACQUIRE_MALLOC_GLOBAL_LOCK() ACQUIRE_LOCK(&malloc_global_mutex); + #endif -#ifndef RELEASE_MALLOC_GLOBAL_LOCK -#define RELEASE_MALLOC_GLOBAL_LOCK() RELEASE_LOCK(&malloc_global_mutex); -#endif + #ifndef RELEASE_MALLOC_GLOBAL_LOCK + #define RELEASE_MALLOC_GLOBAL_LOCK() RELEASE_LOCK(&malloc_global_mutex); + #endif -#endif /* USE_LOCKS */ + #endif /* USE_LOCKS */ /* ----------------------- Chunk representations ------------------------ */ @@ -2236,56 +2320,56 @@ static int pthread_init_lock (MLOCK_T *lk) { */ struct malloc_chunk { - size_t prev_foot; /* Size of previous chunk (if free). */ - size_t head; /* Size and inuse bits. */ - struct malloc_chunk* fd; /* double links -- used only if free. */ - struct malloc_chunk* bk; + + size_t prev_foot; /* Size of previous chunk (if free). */ + size_t head; /* Size and inuse bits. */ + struct malloc_chunk *fd; /* double links -- used only if free. */ + struct malloc_chunk *bk; + }; typedef struct malloc_chunk mchunk; -typedef struct malloc_chunk* mchunkptr; -typedef struct malloc_chunk* sbinptr; /* The type of bins of chunks */ -typedef unsigned int bindex_t; /* Described below */ -typedef unsigned int binmap_t; /* Described below */ -typedef unsigned int flag_t; /* The type of various bit flag sets */ +typedef struct malloc_chunk *mchunkptr; +typedef struct malloc_chunk *sbinptr; /* The type of bins of chunks */ +typedef unsigned int bindex_t; /* Described below */ +typedef unsigned int binmap_t; /* Described below */ +typedef unsigned int flag_t; /* The type of various bit flag sets */ /* ------------------- Chunks sizes and alignments ----------------------- */ -#define MCHUNK_SIZE (sizeof(mchunk)) + #define MCHUNK_SIZE (sizeof(mchunk)) -#if FOOTERS -#define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) -#else /* FOOTERS */ -#define CHUNK_OVERHEAD (SIZE_T_SIZE) -#endif /* FOOTERS */ + #if FOOTERS + #define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) + #else /* FOOTERS */ + #define CHUNK_OVERHEAD (SIZE_T_SIZE) + #endif /* FOOTERS */ -/* MMapped chunks need a second word of overhead ... */ -#define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) -/* ... and additional padding for fake next-chunk at foot */ -#define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES) + /* MMapped chunks need a second word of overhead ... */ + #define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) + /* ... and additional padding for fake next-chunk at foot */ + #define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES) -/* The smallest size we can malloc is an aligned minimal chunk */ -#define MIN_CHUNK_SIZE\ - ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) + /* The smallest size we can malloc is an aligned minimal chunk */ + #define MIN_CHUNK_SIZE ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) -/* conversion from malloc headers to user pointers, and back */ -#define chunk2mem(p) ((void*)((char*)(p) + TWO_SIZE_T_SIZES)) -#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES)) -/* chunk associated with aligned address A */ -#define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A))) + /* conversion from malloc headers to user pointers, and back */ + #define chunk2mem(p) ((void *)((char *)(p) + TWO_SIZE_T_SIZES)) + #define mem2chunk(mem) ((mchunkptr)((char *)(mem)-TWO_SIZE_T_SIZES)) + /* chunk associated with aligned address A */ + #define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A))) -/* Bounds on request (not chunk) sizes. */ -#define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2) -#define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE) + /* Bounds on request (not chunk) sizes. */ + #define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2) + #define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE) -/* pad request bytes into a usable size */ -#define pad_request(req) \ - (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) - -/* pad request, checking for minimum (but not maximum) */ -#define request2size(req) \ - (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req)) + /* pad request bytes into a usable size */ + #define pad_request(req) \ + (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) + /* pad request, checking for minimum (but not maximum) */ + #define request2size(req) \ + (((req) < MIN_REQUEST) ? MIN_CHUNK_SIZE : pad_request(req)) /* ------------------ Operations on head and foot fields ----------------- */ @@ -2297,61 +2381,60 @@ typedef unsigned int flag_t; /* The type of various bit flag sets */ FLAG4_BIT is not used by this malloc, but might be useful in extensions. */ -#define PINUSE_BIT (SIZE_T_ONE) -#define CINUSE_BIT (SIZE_T_TWO) -#define FLAG4_BIT (SIZE_T_FOUR) -#define INUSE_BITS (PINUSE_BIT|CINUSE_BIT) -#define FLAG_BITS (PINUSE_BIT|CINUSE_BIT|FLAG4_BIT) + #define PINUSE_BIT (SIZE_T_ONE) + #define CINUSE_BIT (SIZE_T_TWO) + #define FLAG4_BIT (SIZE_T_FOUR) + #define INUSE_BITS (PINUSE_BIT | CINUSE_BIT) + #define FLAG_BITS (PINUSE_BIT | CINUSE_BIT | FLAG4_BIT) -/* Head value for fenceposts */ -#define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE) + /* Head value for fenceposts */ + #define FENCEPOST_HEAD (INUSE_BITS | SIZE_T_SIZE) -/* extraction of fields from head words */ -#define cinuse(p) ((p)->head & CINUSE_BIT) -#define pinuse(p) ((p)->head & PINUSE_BIT) -#define flag4inuse(p) ((p)->head & FLAG4_BIT) -#define is_inuse(p) (((p)->head & INUSE_BITS) != PINUSE_BIT) -#define is_mmapped(p) (((p)->head & INUSE_BITS) == 0) + /* extraction of fields from head words */ + #define cinuse(p) ((p)->head & CINUSE_BIT) + #define pinuse(p) ((p)->head & PINUSE_BIT) + #define flag4inuse(p) ((p)->head & FLAG4_BIT) + #define is_inuse(p) (((p)->head & INUSE_BITS) != PINUSE_BIT) + #define is_mmapped(p) (((p)->head & INUSE_BITS) == 0) -#define chunksize(p) ((p)->head & ~(FLAG_BITS)) + #define chunksize(p) ((p)->head & ~(FLAG_BITS)) -#define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT) -#define set_flag4(p) ((p)->head |= FLAG4_BIT) -#define clear_flag4(p) ((p)->head &= ~FLAG4_BIT) + #define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT) + #define set_flag4(p) ((p)->head |= FLAG4_BIT) + #define clear_flag4(p) ((p)->head &= ~FLAG4_BIT) -/* Treat space at ptr +/- offset as a chunk */ -#define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s))) -#define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s))) + /* Treat space at ptr +/- offset as a chunk */ + #define chunk_plus_offset(p, s) ((mchunkptr)(((char *)(p)) + (s))) + #define chunk_minus_offset(p, s) ((mchunkptr)(((char *)(p)) - (s))) -/* Ptr to next or previous physical malloc_chunk. */ -#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~FLAG_BITS))) -#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) )) + /* Ptr to next or previous physical malloc_chunk. */ + #define next_chunk(p) ((mchunkptr)(((char *)(p)) + ((p)->head & ~FLAG_BITS))) + #define prev_chunk(p) ((mchunkptr)(((char *)(p)) - ((p)->prev_foot))) -/* extract next chunk's pinuse bit */ -#define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT) + /* extract next chunk's pinuse bit */ + #define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT) -/* Get/set size at footer */ -#define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot) -#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s)) + /* Get/set size at footer */ + #define get_foot(p, s) (((mchunkptr)((char *)(p) + (s)))->prev_foot) + #define set_foot(p, s) (((mchunkptr)((char *)(p) + (s)))->prev_foot = (s)) -/* Set size, pinuse bit, and foot */ -#define set_size_and_pinuse_of_free_chunk(p, s)\ - ((p)->head = (s|PINUSE_BIT), set_foot(p, s)) + /* Set size, pinuse bit, and foot */ + #define set_size_and_pinuse_of_free_chunk(p, s) \ + ((p)->head = (s | PINUSE_BIT), set_foot(p, s)) -/* Set size, pinuse bit, foot, and clear next pinuse */ -#define set_free_with_pinuse(p, s, n)\ - (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s)) + /* Set size, pinuse bit, foot, and clear next pinuse */ + #define set_free_with_pinuse(p, s, n) \ + (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s)) -/* Get the internal overhead associated with chunk p */ -#define overhead_for(p)\ - (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD) + /* Get the internal overhead associated with chunk p */ + #define overhead_for(p) (is_mmapped(p) ? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD) -/* Return true if malloced space is not necessarily cleared */ -#if MMAP_CLEARS -#define calloc_must_clear(p) (!is_mmapped(p)) -#else /* MMAP_CLEARS */ -#define calloc_must_clear(p) (1) -#endif /* MMAP_CLEARS */ + /* Return true if malloced space is not necessarily cleared */ + #if MMAP_CLEARS + #define calloc_must_clear(p) (!is_mmapped(p)) + #else /* MMAP_CLEARS */ + #define calloc_must_clear(p) (1) + #endif /* MMAP_CLEARS */ /* ---------------------- Overlaid data structures ----------------------- */ @@ -2445,23 +2528,25 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ struct malloc_tree_chunk { + /* The first four fields must be compatible with malloc_chunk */ size_t prev_foot; size_t head; - struct malloc_tree_chunk* fd; - struct malloc_tree_chunk* bk; + struct malloc_tree_chunk *fd; + struct malloc_tree_chunk *bk; - struct malloc_tree_chunk* child[2]; - struct malloc_tree_chunk* parent; + struct malloc_tree_chunk *child[2]; + struct malloc_tree_chunk *parent; bindex_t index; + }; typedef struct malloc_tree_chunk tchunk; -typedef struct malloc_tree_chunk* tchunkptr; -typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */ +typedef struct malloc_tree_chunk *tchunkptr; +typedef struct malloc_tree_chunk *tbinptr; /* The type of bins of trees */ -/* A little helper macro for trees */ -#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1]) + /* A little helper macro for trees */ + #define leftmost_child(t) ((t)->child[0] != 0 ? (t)->child[0] : (t)->child[1]) /* ----------------------------- Segments -------------------------------- */ @@ -2521,141 +2606,145 @@ typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */ */ struct malloc_segment { - char* base; /* base address */ - size_t size; /* allocated size */ - struct malloc_segment* next; /* ptr to next segment */ - flag_t sflags; /* mmap and extern flag */ + + char * base; /* base address */ + size_t size; /* allocated size */ + struct malloc_segment *next; /* ptr to next segment */ + flag_t sflags; /* mmap and extern flag */ + }; -#define is_mmapped_segment(S) ((S)->sflags & USE_MMAP_BIT) -#define is_extern_segment(S) ((S)->sflags & EXTERN_BIT) + #define is_mmapped_segment(S) ((S)->sflags & USE_MMAP_BIT) + #define is_extern_segment(S) ((S)->sflags & EXTERN_BIT) typedef struct malloc_segment msegment; -typedef struct malloc_segment* msegmentptr; +typedef struct malloc_segment *msegmentptr; -/* ---------------------------- malloc_state ----------------------------- */ + /* ---------------------------- malloc_state ----------------------------- */ -/* - A malloc_state holds all of the bookkeeping for a space. - The main fields are: - - Top - The topmost chunk of the currently active segment. Its size is - cached in topsize. The actual size of topmost space is - topsize+TOP_FOOT_SIZE, which includes space reserved for adding - fenceposts and segment records if necessary when getting more - space from the system. The size at which to autotrim top is - cached from mparams in trim_check, except that it is disabled if - an autotrim fails. - - Designated victim (dv) - This is the preferred chunk for servicing small requests that - don't have exact fits. It is normally the chunk split off most - recently to service another small request. Its size is cached in - dvsize. The link fields of this chunk are not maintained since it - is not kept in a bin. - - SmallBins - An array of bin headers for free chunks. These bins hold chunks - with sizes less than MIN_LARGE_SIZE bytes. Each bin contains - chunks of all the same size, spaced 8 bytes apart. To simplify - use in double-linked lists, each bin header acts as a malloc_chunk - pointing to the real first node, if it exists (else pointing to - itself). This avoids special-casing for headers. But to avoid - waste, we allocate only the fd/bk pointers of bins, and then use - repositioning tricks to treat these as the fields of a chunk. - - TreeBins - Treebins are pointers to the roots of trees holding a range of - sizes. There are 2 equally spaced treebins for each power of two - from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything - larger. - - Bin maps - There is one bit map for small bins ("smallmap") and one for - treebins ("treemap). Each bin sets its bit when non-empty, and - clears the bit when empty. Bit operations are then used to avoid - bin-by-bin searching -- nearly all "search" is done without ever - looking at bins that won't be selected. The bit maps - conservatively use 32 bits per map word, even if on 64bit system. - For a good description of some of the bit-based techniques used - here, see Henry S. Warren Jr's book "Hacker's Delight" (and - supplement at http://hackersdelight.org/). Many of these are - intended to reduce the branchiness of paths through malloc etc, as - well as to reduce the number of memory locations read or written. - - Segments - A list of segments headed by an embedded malloc_segment record - representing the initial space. - - Address check support - The least_addr field is the least address ever obtained from - MORECORE or MMAP. Attempted frees and reallocs of any address less - than this are trapped (unless INSECURE is defined). - - Magic tag - A cross-check field that should always hold same value as mparams.magic. - - Max allowed footprint - The maximum allowed bytes to allocate from system (zero means no limit) - - Flags - Bits recording whether to use MMAP, locks, or contiguous MORECORE - - Statistics - Each space keeps track of current and maximum system memory - obtained via MORECORE or MMAP. - - Trim support - Fields holding the amount of unused topmost memory that should trigger - trimming, and a counter to force periodic scanning to release unused - non-topmost segments. - - Locking - If USE_LOCKS is defined, the "mutex" lock is acquired and released - around every public call using this mspace. - - Extension support - A void* pointer and a size_t field that can be used to help implement - extensions to this malloc. -*/ + /* + A malloc_state holds all of the bookkeeping for a space. + The main fields are: + + Top + The topmost chunk of the currently active segment. Its size is + cached in topsize. The actual size of topmost space is + topsize+TOP_FOOT_SIZE, which includes space reserved for adding + fenceposts and segment records if necessary when getting more + space from the system. The size at which to autotrim top is + cached from mparams in trim_check, except that it is disabled if + an autotrim fails. + + Designated victim (dv) + This is the preferred chunk for servicing small requests that + don't have exact fits. It is normally the chunk split off most + recently to service another small request. Its size is cached in + dvsize. The link fields of this chunk are not maintained since it + is not kept in a bin. + + SmallBins + An array of bin headers for free chunks. These bins hold chunks + with sizes less than MIN_LARGE_SIZE bytes. Each bin contains + chunks of all the same size, spaced 8 bytes apart. To simplify + use in double-linked lists, each bin header acts as a malloc_chunk + pointing to the real first node, if it exists (else pointing to + itself). This avoids special-casing for headers. But to avoid + waste, we allocate only the fd/bk pointers of bins, and then use + repositioning tricks to treat these as the fields of a chunk. + + TreeBins + Treebins are pointers to the roots of trees holding a range of + sizes. There are 2 equally spaced treebins for each power of two + from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything + larger. + + Bin maps + There is one bit map for small bins ("smallmap") and one for + treebins ("treemap). Each bin sets its bit when non-empty, and + clears the bit when empty. Bit operations are then used to avoid + bin-by-bin searching -- nearly all "search" is done without ever + looking at bins that won't be selected. The bit maps + conservatively use 32 bits per map word, even if on 64bit system. + For a good description of some of the bit-based techniques used + here, see Henry S. Warren Jr's book "Hacker's Delight" (and + supplement at http://hackersdelight.org/). Many of these are + intended to reduce the branchiness of paths through malloc etc, as + well as to reduce the number of memory locations read or written. + + Segments + A list of segments headed by an embedded malloc_segment record + representing the initial space. + + Address check support + The least_addr field is the least address ever obtained from + MORECORE or MMAP. Attempted frees and reallocs of any address less + than this are trapped (unless INSECURE is defined). + + Magic tag + A cross-check field that should always hold same value as mparams.magic. + + Max allowed footprint + The maximum allowed bytes to allocate from system (zero means no limit) + + Flags + Bits recording whether to use MMAP, locks, or contiguous MORECORE + + Statistics + Each space keeps track of current and maximum system memory + obtained via MORECORE or MMAP. + + Trim support + Fields holding the amount of unused topmost memory that should trigger + trimming, and a counter to force periodic scanning to release unused + non-topmost segments. + + Locking + If USE_LOCKS is defined, the "mutex" lock is acquired and released + around every public call using this mspace. + + Extension support + A void* pointer and a size_t field that can be used to help implement + extensions to this malloc. + */ -/* Bin types, widths and sizes */ -#define NSMALLBINS (32U) -#define NTREEBINS (32U) -#define SMALLBIN_SHIFT (3U) -#define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT) -#define TREEBIN_SHIFT (8U) -#define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT) -#define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE) -#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD) + /* Bin types, widths and sizes */ + #define NSMALLBINS (32U) + #define NTREEBINS (32U) + #define SMALLBIN_SHIFT (3U) + #define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT) + #define TREEBIN_SHIFT (8U) + #define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT) + #define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE) + #define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD) struct malloc_state { - binmap_t smallmap; - binmap_t treemap; - size_t dvsize; - size_t topsize; - char* least_addr; - mchunkptr dv; - mchunkptr top; - size_t trim_check; - size_t release_checks; - size_t magic; - mchunkptr smallbins[(NSMALLBINS+1)*2]; - tbinptr treebins[NTREEBINS]; - size_t footprint; - size_t max_footprint; - size_t footprint_limit; /* zero means no limit */ - flag_t mflags; -#if USE_LOCKS - MLOCK_T mutex; /* locate lock among fields that rarely change */ -#endif /* USE_LOCKS */ - msegment seg; - void* extp; /* Unused but available for extensions */ - size_t exts; + + binmap_t smallmap; + binmap_t treemap; + size_t dvsize; + size_t topsize; + char * least_addr; + mchunkptr dv; + mchunkptr top; + size_t trim_check; + size_t release_checks; + size_t magic; + mchunkptr smallbins[(NSMALLBINS + 1) * 2]; + tbinptr treebins[NTREEBINS]; + size_t footprint; + size_t max_footprint; + size_t footprint_limit; /* zero means no limit */ + flag_t mflags; + #if USE_LOCKS + MLOCK_T mutex; /* locate lock among fields that rarely change */ + #endif /* USE_LOCKS */ + msegment seg; + void * extp; /* Unused but available for extensions */ + size_t exts; + }; -typedef struct malloc_state* mstate; +typedef struct malloc_state *mstate; /* ------------- Global malloc_state and malloc_params ------------------- */ @@ -2667,123 +2756,128 @@ typedef struct malloc_state* mstate; */ struct malloc_params { + size_t magic; size_t page_size; size_t granularity; size_t mmap_threshold; size_t trim_threshold; flag_t default_mflags; + }; static struct malloc_params mparams; -/* Ensure mparams initialized */ -#define ensure_initialization() (void)(mparams.magic != 0 || init_mparams()) + /* Ensure mparams initialized */ + #define ensure_initialization() (void)(mparams.magic != 0 || init_mparams()) -#if !ONLY_MSPACES + #if !ONLY_MSPACES /* The global malloc_state used for all non-"mspace" calls */ static struct malloc_state _gm_; -#define gm (&_gm_) -#define is_global(M) ((M) == &_gm_) + #define gm (&_gm_) + #define is_global(M) ((M) == &_gm_) -#endif /* !ONLY_MSPACES */ + #endif /* !ONLY_MSPACES */ -#define is_initialized(M) ((M)->top != 0) + #define is_initialized(M) ((M)->top != 0) /* -------------------------- system alloc setup ------------------------- */ /* Operations on mflags */ -#define use_lock(M) ((M)->mflags & USE_LOCK_BIT) -#define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT) -#if USE_LOCKS -#define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT) -#else -#define disable_lock(M) -#endif - -#define use_mmap(M) ((M)->mflags & USE_MMAP_BIT) -#define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT) -#if HAVE_MMAP -#define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT) -#else -#define disable_mmap(M) -#endif - -#define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT) -#define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT) - -#define set_lock(M,L)\ - ((M)->mflags = (L)?\ - ((M)->mflags | USE_LOCK_BIT) :\ - ((M)->mflags & ~USE_LOCK_BIT)) - -/* page-align a size */ -#define page_align(S)\ - (((S) + (mparams.page_size - SIZE_T_ONE)) & ~(mparams.page_size - SIZE_T_ONE)) - -/* granularity-align a size */ -#define granularity_align(S)\ - (((S) + (mparams.granularity - SIZE_T_ONE))\ - & ~(mparams.granularity - SIZE_T_ONE)) - - -/* For mmap, use granularity alignment on windows, else page-align */ -#ifdef WIN32 -#define mmap_align(S) granularity_align(S) -#else -#define mmap_align(S) page_align(S) -#endif - -/* For sys_alloc, enough padding to ensure can malloc request on success */ -#define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT) - -#define is_page_aligned(S)\ - (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0) -#define is_granularity_aligned(S)\ - (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0) - -/* True if segment S holds address A */ -#define segment_holds(S, A)\ - ((char*)(A) >= S->base && (char*)(A) < S->base + S->size) + #define use_lock(M) ((M)->mflags & USE_LOCK_BIT) + #define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT) + #if USE_LOCKS + #define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT) + #else + #define disable_lock(M) + #endif + + #define use_mmap(M) ((M)->mflags & USE_MMAP_BIT) + #define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT) + #if HAVE_MMAP + #define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT) + #else + #define disable_mmap(M) + #endif + + #define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT) + #define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT) + + #define set_lock(M, L) \ + ((M)->mflags = \ + (L) ? ((M)->mflags | USE_LOCK_BIT) : ((M)->mflags & ~USE_LOCK_BIT)) + + /* page-align a size */ + #define page_align(S) \ + (((S) + (mparams.page_size - SIZE_T_ONE)) & \ + ~(mparams.page_size - SIZE_T_ONE)) + + /* granularity-align a size */ + #define granularity_align(S) \ + (((S) + (mparams.granularity - SIZE_T_ONE)) & \ + ~(mparams.granularity - SIZE_T_ONE)) + + /* For mmap, use granularity alignment on windows, else page-align */ + #ifdef WIN32 + #define mmap_align(S) granularity_align(S) + #else + #define mmap_align(S) page_align(S) + #endif + + /* For sys_alloc, enough padding to ensure can malloc request on success */ + #define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT) + + #define is_page_aligned(S) \ + (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0) + #define is_granularity_aligned(S) \ + (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0) + + /* True if segment S holds address A */ + #define segment_holds(S, A) \ + ((char *)(A) >= S->base && (char *)(A) < S->base + S->size) /* Return segment holding given address */ -static msegmentptr segment_holding(mstate m, char* addr) { +static msegmentptr segment_holding(mstate m, char *addr) { + msegmentptr sp = &m->seg; for (;;) { - if (addr >= sp->base && addr < sp->base + sp->size) - return sp; - if ((sp = sp->next) == 0) - return 0; + + if (addr >= sp->base && addr < sp->base + sp->size) return sp; + if ((sp = sp->next) == 0) return 0; + } + } /* Return true if segment contains a segment link */ static int has_segment_link(mstate m, msegmentptr ss) { + msegmentptr sp = &m->seg; for (;;) { - if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size) - return 1; - if ((sp = sp->next) == 0) - return 0; + + if ((char *)sp >= ss->base && (char *)sp < ss->base + ss->size) return 1; + if ((sp = sp->next) == 0) return 0; + } -} -#ifndef MORECORE_CANNOT_TRIM -#define should_trim(M,s) ((s) > (M)->trim_check) -#else /* MORECORE_CANNOT_TRIM */ -#define should_trim(M,s) (0) -#endif /* MORECORE_CANNOT_TRIM */ +} -/* - TOP_FOOT_SIZE is padding at the end of a segment, including space - that may be needed to place segment records and fenceposts when new - noncontiguous segments are added. -*/ -#define TOP_FOOT_SIZE\ - (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE) + #ifndef MORECORE_CANNOT_TRIM + #define should_trim(M, s) ((s) > (M)->trim_check) + #else /* MORECORE_CANNOT_TRIM */ + #define should_trim(M, s) (0) + #endif /* MORECORE_CANNOT_TRIM */ + /* + TOP_FOOT_SIZE is padding at the end of a segment, including space + that may be needed to place segment records and fenceposts when new + noncontiguous segments are added. + */ + #define TOP_FOOT_SIZE \ + (align_offset(chunk2mem(0)) + pad_request(sizeof(struct malloc_segment)) + \ + MIN_CHUNK_SIZE) /* ------------------------------- Hooks -------------------------------- */ @@ -2793,20 +2887,25 @@ static int has_segment_link(mstate m, msegmentptr ss) { anything you like. */ -#if USE_LOCKS -#define PREACTION(M) ((use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0) -#define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); } -#else /* USE_LOCKS */ + #if USE_LOCKS + #define PREACTION(M) ((use_lock(M)) ? ACQUIRE_LOCK(&(M)->mutex) : 0) + #define POSTACTION(M) \ + { \ + \ + if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); \ + \ + } + #else /* USE_LOCKS */ -#ifndef PREACTION -#define PREACTION(M) (0) -#endif /* PREACTION */ + #ifndef PREACTION + #define PREACTION(M) (0) + #endif /* PREACTION */ -#ifndef POSTACTION -#define POSTACTION(M) -#endif /* POSTACTION */ + #ifndef POSTACTION + #define POSTACTION(M) + #endif /* POSTACTION */ -#endif /* USE_LOCKS */ + #endif /* USE_LOCKS */ /* CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses. @@ -2816,7 +2915,7 @@ static int has_segment_link(mstate m, msegmentptr ss) { useful in custom actions that try to help diagnose errors. */ -#if PROCEED_ON_ERROR + #if PROCEED_ON_ERROR /* A count of the number of corruption errors causing resets */ int malloc_corruption_error_count; @@ -2824,211 +2923,240 @@ int malloc_corruption_error_count; /* default corruption action */ static void reset_on_error(mstate m); -#define CORRUPTION_ERROR_ACTION(m) reset_on_error(m) -#define USAGE_ERROR_ACTION(m, p) - -#else /* PROCEED_ON_ERROR */ + #define CORRUPTION_ERROR_ACTION(m) reset_on_error(m) + #define USAGE_ERROR_ACTION(m, p) -#ifndef CORRUPTION_ERROR_ACTION -#define CORRUPTION_ERROR_ACTION(m) ABORT -#endif /* CORRUPTION_ERROR_ACTION */ + #else /* PROCEED_ON_ERROR */ -#ifndef USAGE_ERROR_ACTION -#define USAGE_ERROR_ACTION(m,p) ABORT -#endif /* USAGE_ERROR_ACTION */ + #ifndef CORRUPTION_ERROR_ACTION + #define CORRUPTION_ERROR_ACTION(m) ABORT + #endif /* CORRUPTION_ERROR_ACTION */ -#endif /* PROCEED_ON_ERROR */ + #ifndef USAGE_ERROR_ACTION + #define USAGE_ERROR_ACTION(m, p) ABORT + #endif /* USAGE_ERROR_ACTION */ + #endif /* PROCEED_ON_ERROR */ /* -------------------------- Debugging setup ---------------------------- */ -#if ! DEBUG + #if !DEBUG -#define check_free_chunk(M,P) -#define check_inuse_chunk(M,P) -#define check_malloced_chunk(M,P,N) -#define check_mmapped_chunk(M,P) -#define check_malloc_state(M) -#define check_top_chunk(M,P) + #define check_free_chunk(M, P) + #define check_inuse_chunk(M, P) + #define check_malloced_chunk(M, P, N) + #define check_mmapped_chunk(M, P) + #define check_malloc_state(M) + #define check_top_chunk(M, P) -#else /* DEBUG */ -#define check_free_chunk(M,P) do_check_free_chunk(M,P) -#define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P) -#define check_top_chunk(M,P) do_check_top_chunk(M,P) -#define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N) -#define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P) -#define check_malloc_state(M) do_check_malloc_state(M) + #else /* DEBUG */ + #define check_free_chunk(M, P) do_check_free_chunk(M, P) + #define check_inuse_chunk(M, P) do_check_inuse_chunk(M, P) + #define check_top_chunk(M, P) do_check_top_chunk(M, P) + #define check_malloced_chunk(M, P, N) do_check_malloced_chunk(M, P, N) + #define check_mmapped_chunk(M, P) do_check_mmapped_chunk(M, P) + #define check_malloc_state(M) do_check_malloc_state(M) static void do_check_any_chunk(mstate m, mchunkptr p); static void do_check_top_chunk(mstate m, mchunkptr p); static void do_check_mmapped_chunk(mstate m, mchunkptr p); static void do_check_inuse_chunk(mstate m, mchunkptr p); static void do_check_free_chunk(mstate m, mchunkptr p); -static void do_check_malloced_chunk(mstate m, void* mem, size_t s); +static void do_check_malloced_chunk(mstate m, void *mem, size_t s); static void do_check_tree(mstate m, tchunkptr t); static void do_check_treebin(mstate m, bindex_t i); static void do_check_smallbin(mstate m, bindex_t i); static void do_check_malloc_state(mstate m); static int bin_find(mstate m, mchunkptr x); static size_t traverse_and_check(mstate m); -#endif /* DEBUG */ + #endif /* DEBUG */ /* ---------------------------- Indexing Bins ---------------------------- */ -#define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS) -#define small_index(s) (bindex_t)((s) >> SMALLBIN_SHIFT) -#define small_index2size(i) ((i) << SMALLBIN_SHIFT) -#define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE)) - -/* addressing by index. See above about smallbin repositioning */ -#define smallbin_at(M, i) ((sbinptr)((char*)&((M)->smallbins[(i)<<1]))) -#define treebin_at(M,i) (&((M)->treebins[i])) - -/* assign tree index for size S to variable I. Use x86 asm if possible */ -#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) -#define compute_tree_index(S, I)\ -{\ - unsigned int X = S >> TREEBIN_SHIFT;\ - if (X == 0)\ - I = 0;\ - else if (X > 0xFFFF)\ - I = NTREEBINS-1;\ - else {\ - unsigned int K = (unsigned) sizeof(X)*__CHAR_BIT__ - 1 - (unsigned) __builtin_clz(X); \ - I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ - }\ -} - -#elif defined (__INTEL_COMPILER) -#define compute_tree_index(S, I)\ -{\ - size_t X = S >> TREEBIN_SHIFT;\ - if (X == 0)\ - I = 0;\ - else if (X > 0xFFFF)\ - I = NTREEBINS-1;\ - else {\ - unsigned int K = _bit_scan_reverse (X); \ - I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ - }\ -} + #define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS) + #define small_index(s) (bindex_t)((s) >> SMALLBIN_SHIFT) + #define small_index2size(i) ((i) << SMALLBIN_SHIFT) + #define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE)) + + /* addressing by index. See above about smallbin repositioning */ + #define smallbin_at(M, i) ((sbinptr)((char *)&((M)->smallbins[(i) << 1]))) + #define treebin_at(M, i) (&((M)->treebins[i])) + + /* assign tree index for size S to variable I. Use x86 asm if possible */ + #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) + #define compute_tree_index(S, I) \ + { \ + \ + unsigned int X = S >> TREEBIN_SHIFT; \ + if (X == 0) \ + I = 0; \ + else if (X > 0xFFFF) \ + I = NTREEBINS - 1; \ + else { \ + \ + unsigned int K = (unsigned)sizeof(X) * __CHAR_BIT__ - 1 - \ + (unsigned)__builtin_clz(X); \ + I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT - 1)) & 1))); \ + \ + } \ + \ + } -#elif defined(_MSC_VER) && _MSC_VER>=1300 -#define compute_tree_index(S, I)\ -{\ - size_t X = S >> TREEBIN_SHIFT;\ - if (X == 0)\ - I = 0;\ - else if (X > 0xFFFF)\ - I = NTREEBINS-1;\ - else {\ - unsigned int K;\ - _BitScanReverse((DWORD *) &K, (DWORD) X);\ - I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ - }\ -} + #elif defined(__INTEL_COMPILER) + #define compute_tree_index(S, I) \ + { \ + \ + size_t X = S >> TREEBIN_SHIFT; \ + if (X == 0) \ + I = 0; \ + else if (X > 0xFFFF) \ + I = NTREEBINS - 1; \ + else { \ + \ + unsigned int K = _bit_scan_reverse(X); \ + I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT - 1)) & 1))); \ + \ + } \ + \ + } -#else /* GNUC */ -#define compute_tree_index(S, I)\ -{\ - size_t X = S >> TREEBIN_SHIFT;\ - if (X == 0)\ - I = 0;\ - else if (X > 0xFFFF)\ - I = NTREEBINS-1;\ - else {\ - unsigned int Y = (unsigned int)X;\ - unsigned int N = ((Y - 0x100) >> 16) & 8;\ - unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\ - N += K;\ - N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\ - K = 14 - N + ((Y <<= K) >> 15);\ - I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\ - }\ -} -#endif /* GNUC */ + #elif defined(_MSC_VER) && _MSC_VER >= 1300 + #define compute_tree_index(S, I) \ + { \ + \ + size_t X = S >> TREEBIN_SHIFT; \ + if (X == 0) \ + I = 0; \ + else if (X > 0xFFFF) \ + I = NTREEBINS - 1; \ + else { \ + \ + unsigned int K; \ + _BitScanReverse((DWORD *)&K, (DWORD)X); \ + I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT - 1)) & 1))); \ + \ + } \ + \ + } -/* Bit representing maximum resolved size in a treebin at i */ -#define bit_for_tree_index(i) \ - (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2) + #else /* GNUC */ + #define compute_tree_index(S, I) \ + { \ + \ + size_t X = S >> TREEBIN_SHIFT; \ + if (X == 0) \ + I = 0; \ + else if (X > 0xFFFF) \ + I = NTREEBINS - 1; \ + else { \ + \ + unsigned int Y = (unsigned int)X; \ + unsigned int N = ((Y - 0x100) >> 16) & 8; \ + unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4; \ + N += K; \ + N += K = (((Y <<= K) - 0x4000) >> 16) & 2; \ + K = 14 - N + ((Y <<= K) >> 15); \ + I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT - 1)) & 1)); \ + \ + } \ + \ + } + #endif /* GNUC */ -/* Shift placing maximum resolved bit in a treebin at i as sign bit */ -#define leftshift_for_tree_index(i) \ - ((i == NTREEBINS-1)? 0 : \ - ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2))) + /* Bit representing maximum resolved size in a treebin at i */ + #define bit_for_tree_index(i) \ + (i == NTREEBINS - 1) ? (SIZE_T_BITSIZE - 1) \ + : (((i) >> 1) + TREEBIN_SHIFT - 2) -/* The size of the smallest chunk held in bin with index i */ -#define minsize_for_tree_index(i) \ - ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \ - (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1))) + /* Shift placing maximum resolved bit in a treebin at i as sign bit */ + #define leftshift_for_tree_index(i) \ + ((i == NTREEBINS - 1) \ + ? 0 \ + : ((SIZE_T_BITSIZE - SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2))) + /* The size of the smallest chunk held in bin with index i */ + #define minsize_for_tree_index(i) \ + ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \ + (((size_t)((i)&SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1))) -/* ------------------------ Operations on bin maps ----------------------- */ + /* ------------------------ Operations on bin maps ----------------------- */ -/* bit corresponding to given index */ -#define idx2bit(i) ((binmap_t)(1) << (i)) + /* bit corresponding to given index */ + #define idx2bit(i) ((binmap_t)(1) << (i)) -/* Mark/Clear bits with given index */ -#define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i)) -#define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i)) -#define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i)) + /* Mark/Clear bits with given index */ + #define mark_smallmap(M, i) ((M)->smallmap |= idx2bit(i)) + #define clear_smallmap(M, i) ((M)->smallmap &= ~idx2bit(i)) + #define smallmap_is_marked(M, i) ((M)->smallmap & idx2bit(i)) -#define mark_treemap(M,i) ((M)->treemap |= idx2bit(i)) -#define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i)) -#define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i)) + #define mark_treemap(M, i) ((M)->treemap |= idx2bit(i)) + #define clear_treemap(M, i) ((M)->treemap &= ~idx2bit(i)) + #define treemap_is_marked(M, i) ((M)->treemap & idx2bit(i)) -/* isolate the least set bit of a bitmap */ -#define least_bit(x) ((x) & -(x)) + /* isolate the least set bit of a bitmap */ + #define least_bit(x) ((x) & -(x)) -/* mask with all bits to left of least bit of x on */ -#define left_bits(x) ((x<<1) | -(x<<1)) + /* mask with all bits to left of least bit of x on */ + #define left_bits(x) ((x << 1) | -(x << 1)) -/* mask with all bits to left of or equal to least bit of x on */ -#define same_or_left_bits(x) ((x) | -(x)) + /* mask with all bits to left of or equal to least bit of x on */ + #define same_or_left_bits(x) ((x) | -(x)) /* index corresponding to given bit. Use x86 asm if possible */ -#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) -#define compute_bit2idx(X, I)\ -{\ - unsigned int J;\ - J = __builtin_ctz(X); \ - I = (bindex_t)J;\ -} + #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) + #define compute_bit2idx(X, I) \ + { \ + \ + unsigned int J; \ + J = __builtin_ctz(X); \ + I = (bindex_t)J; \ + \ + } -#elif defined (__INTEL_COMPILER) -#define compute_bit2idx(X, I)\ -{\ - unsigned int J;\ - J = _bit_scan_forward (X); \ - I = (bindex_t)J;\ -} + #elif defined(__INTEL_COMPILER) + #define compute_bit2idx(X, I) \ + { \ + \ + unsigned int J; \ + J = _bit_scan_forward(X); \ + I = (bindex_t)J; \ + \ + } -#elif defined(_MSC_VER) && _MSC_VER>=1300 -#define compute_bit2idx(X, I)\ -{\ - unsigned int J;\ - _BitScanForward((DWORD *) &J, X);\ - I = (bindex_t)J;\ -} + #elif defined(_MSC_VER) && _MSC_VER >= 1300 + #define compute_bit2idx(X, I) \ + { \ + \ + unsigned int J; \ + _BitScanForward((DWORD *)&J, X); \ + I = (bindex_t)J; \ + \ + } -#elif USE_BUILTIN_FFS -#define compute_bit2idx(X, I) I = ffs(X)-1 - -#else -#define compute_bit2idx(X, I)\ -{\ - unsigned int Y = X - 1;\ - unsigned int K = Y >> (16-4) & 16;\ - unsigned int N = K; Y >>= K;\ - N += K = Y >> (8-3) & 8; Y >>= K;\ - N += K = Y >> (4-2) & 4; Y >>= K;\ - N += K = Y >> (2-1) & 2; Y >>= K;\ - N += K = Y >> (1-0) & 1; Y >>= K;\ - I = (bindex_t)(N + Y);\ -} -#endif /* GNUC */ + #elif USE_BUILTIN_FFS + #define compute_bit2idx(X, I) I = ffs(X) - 1 + #else + #define compute_bit2idx(X, I) \ + { \ + \ + unsigned int Y = X - 1; \ + unsigned int K = Y >> (16 - 4) & 16; \ + unsigned int N = K; \ + Y >>= K; \ + N += K = Y >> (8 - 3) & 8; \ + Y >>= K; \ + N += K = Y >> (4 - 2) & 4; \ + Y >>= K; \ + N += K = Y >> (2 - 1) & 2; \ + Y >>= K; \ + N += K = Y >> (1 - 0) & 1; \ + Y >>= K; \ + I = (bindex_t)(N + Y); \ + \ + } + #endif /* GNUC */ /* ----------------------- Runtime Check Support ------------------------- */ @@ -3058,122 +3186,142 @@ static size_t traverse_and_check(mstate m); next, etc). This turns out to be cheaper than relying on hashes. */ -#if !INSECURE -/* Check if address a is at least as high as any from MORECORE or MMAP */ -#define ok_address(M, a) ((char*)(a) >= (M)->least_addr) -/* Check if address of next chunk n is higher than base chunk p */ -#define ok_next(p, n) ((char*)(p) < (char*)(n)) -/* Check if p has inuse status */ -#define ok_inuse(p) is_inuse(p) -/* Check if p has its pinuse bit on */ -#define ok_pinuse(p) pinuse(p) - -#else /* !INSECURE */ -#define ok_address(M, a) (1) -#define ok_next(b, n) (1) -#define ok_inuse(p) (1) -#define ok_pinuse(p) (1) -#endif /* !INSECURE */ - -#if (FOOTERS && !INSECURE) -/* Check if (alleged) mstate m has expected magic field */ -#define ok_magic(M) ((M)->magic == mparams.magic) -#else /* (FOOTERS && !INSECURE) */ -#define ok_magic(M) (1) -#endif /* (FOOTERS && !INSECURE) */ - -/* In gcc, use __builtin_expect to minimize impact of checks */ -#if !INSECURE -#if defined(__GNUC__) && __GNUC__ >= 3 -#define RTCHECK(e) __builtin_expect(e, 1) -#else /* GNUC */ -#define RTCHECK(e) (e) -#endif /* GNUC */ -#else /* !INSECURE */ -#define RTCHECK(e) (1) -#endif /* !INSECURE */ + #if !INSECURE + /* Check if address a is at least as high as any from MORECORE or MMAP */ + #define ok_address(M, a) ((char *)(a) >= (M)->least_addr) + /* Check if address of next chunk n is higher than base chunk p */ + #define ok_next(p, n) ((char *)(p) < (char *)(n)) + /* Check if p has inuse status */ + #define ok_inuse(p) is_inuse(p) + /* Check if p has its pinuse bit on */ + #define ok_pinuse(p) pinuse(p) + + #else /* !INSECURE */ + #define ok_address(M, a) (1) + #define ok_next(b, n) (1) + #define ok_inuse(p) (1) + #define ok_pinuse(p) (1) + #endif /* !INSECURE */ + + #if (FOOTERS && !INSECURE) + /* Check if (alleged) mstate m has expected magic field */ + #define ok_magic(M) ((M)->magic == mparams.magic) + #else /* (FOOTERS && !INSECURE) */ + #define ok_magic(M) (1) + #endif /* (FOOTERS && !INSECURE) */ + + /* In gcc, use __builtin_expect to minimize impact of checks */ + #if !INSECURE + #if defined(__GNUC__) && __GNUC__ >= 3 + #define RTCHECK(e) __builtin_expect(e, 1) + #else /* GNUC */ + #define RTCHECK(e) (e) + #endif /* GNUC */ + #else /* !INSECURE */ + #define RTCHECK(e) (1) + #endif /* !INSECURE */ /* macros to set up inuse chunks with or without footers */ -#if !FOOTERS + #if !FOOTERS -#define mark_inuse_foot(M,p,s) + #define mark_inuse_foot(M, p, s) -/* Macros for setting head/foot of non-mmapped chunks */ + /* Macros for setting head/foot of non-mmapped chunks */ -/* Set cinuse bit and pinuse bit of next chunk */ -#define set_inuse(M,p,s)\ - ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\ - ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT) + /* Set cinuse bit and pinuse bit of next chunk */ + #define set_inuse(M, p, s) \ + ((p)->head = (((p)->head & PINUSE_BIT) | s | CINUSE_BIT), \ + ((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT) -/* Set cinuse and pinuse of this chunk and pinuse of next chunk */ -#define set_inuse_and_pinuse(M,p,s)\ - ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ - ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT) + /* Set cinuse and pinuse of this chunk and pinuse of next chunk */ + #define set_inuse_and_pinuse(M, p, s) \ + ((p)->head = (s | PINUSE_BIT | CINUSE_BIT), \ + ((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT) -/* Set size, cinuse and pinuse bit of this chunk */ -#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\ - ((p)->head = (s|PINUSE_BIT|CINUSE_BIT)) + /* Set size, cinuse and pinuse bit of this chunk */ + #define set_size_and_pinuse_of_inuse_chunk(M, p, s) \ + ((p)->head = (s | PINUSE_BIT | CINUSE_BIT)) -#else /* FOOTERS */ + #else /* FOOTERS */ -/* Set foot of inuse chunk to be xor of mstate and seed */ -#define mark_inuse_foot(M,p,s)\ - (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic)) + /* Set foot of inuse chunk to be xor of mstate and seed */ + #define mark_inuse_foot(M, p, s) \ + (((mchunkptr)((char *)(p) + (s)))->prev_foot = \ + ((size_t)(M) ^ mparams.magic)) -#define get_mstate_for(p)\ - ((mstate)(((mchunkptr)((char*)(p) +\ - (chunksize(p))))->prev_foot ^ mparams.magic)) + #define get_mstate_for(p) \ + ((mstate)(((mchunkptr)((char *)(p) + (chunksize(p))))->prev_foot ^ \ + mparams.magic)) -#define set_inuse(M,p,s)\ - ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\ - (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \ - mark_inuse_foot(M,p,s)) + #define set_inuse(M, p, s) \ + ((p)->head = (((p)->head & PINUSE_BIT) | s | CINUSE_BIT), \ + (((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT), \ + mark_inuse_foot(M, p, s)) -#define set_inuse_and_pinuse(M,p,s)\ - ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ - (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\ - mark_inuse_foot(M,p,s)) + #define set_inuse_and_pinuse(M, p, s) \ + ((p)->head = (s | PINUSE_BIT | CINUSE_BIT), \ + (((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT), \ + mark_inuse_foot(M, p, s)) -#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\ - ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ - mark_inuse_foot(M, p, s)) + #define set_size_and_pinuse_of_inuse_chunk(M, p, s) \ + ((p)->head = (s | PINUSE_BIT | CINUSE_BIT), mark_inuse_foot(M, p, s)) -#endif /* !FOOTERS */ + #endif /* !FOOTERS */ /* ---------------------------- setting mparams -------------------------- */ -#if LOCK_AT_FORK -static void pre_fork(void) { ACQUIRE_LOCK(&(gm)->mutex); } -static void post_fork_parent(void) { RELEASE_LOCK(&(gm)->mutex); } -static void post_fork_child(void) { INITIAL_LOCK(&(gm)->mutex); } -#endif /* LOCK_AT_FORK */ + #if LOCK_AT_FORK +static void pre_fork(void) { + + ACQUIRE_LOCK(&(gm)->mutex); + +} + +static void post_fork_parent(void) { + + RELEASE_LOCK(&(gm)->mutex); + +} + +static void post_fork_child(void) { + + INITIAL_LOCK(&(gm)->mutex); + +} + + #endif /* LOCK_AT_FORK */ /* Initialize mparams */ static int init_mparams(void) { -#ifdef NEED_GLOBAL_LOCK_INIT - if (malloc_global_mutex_status <= 0) - init_malloc_global_mutex(); -#endif + + #ifdef NEED_GLOBAL_LOCK_INIT + if (malloc_global_mutex_status <= 0) init_malloc_global_mutex(); + #endif ACQUIRE_MALLOC_GLOBAL_LOCK(); if (mparams.magic == 0) { + size_t magic; size_t psize; size_t gsize; -#ifndef WIN32 + #ifndef WIN32 psize = malloc_getpagesize; - gsize = ((DEFAULT_GRANULARITY != 0)? DEFAULT_GRANULARITY : psize); -#else /* WIN32 */ + gsize = ((DEFAULT_GRANULARITY != 0) ? DEFAULT_GRANULARITY : psize); + #else /* WIN32 */ { + SYSTEM_INFO system_info; GetSystemInfo(&system_info); psize = system_info.dwPageSize; - gsize = ((DEFAULT_GRANULARITY != 0)? - DEFAULT_GRANULARITY : system_info.dwAllocationGranularity); + gsize = + ((DEFAULT_GRANULARITY != 0) ? DEFAULT_GRANULARITY + : system_info.dwAllocationGranularity); + } -#endif /* WIN32 */ + + #endif /* WIN32 */ /* Sanity-check configuration: size_t must be unsigned and as wide as pointer type. @@ -3181,187 +3329,216 @@ static int init_mparams(void) { alignment must be at least 8. Alignment, min chunk size, and page size must all be powers of 2. */ - if ((sizeof(size_t) != sizeof(char*)) || - (MAX_SIZE_T < MIN_CHUNK_SIZE) || - (sizeof(int) < 4) || - (MALLOC_ALIGNMENT < (size_t)8U) || - ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-SIZE_T_ONE)) != 0) || - ((MCHUNK_SIZE & (MCHUNK_SIZE-SIZE_T_ONE)) != 0) || - ((gsize & (gsize-SIZE_T_ONE)) != 0) || - ((psize & (psize-SIZE_T_ONE)) != 0)) + if ((sizeof(size_t) != sizeof(char *)) || (MAX_SIZE_T < MIN_CHUNK_SIZE) || + (sizeof(int) < 4) || (MALLOC_ALIGNMENT < (size_t)8U) || + ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT - SIZE_T_ONE)) != 0) || + ((MCHUNK_SIZE & (MCHUNK_SIZE - SIZE_T_ONE)) != 0) || + ((gsize & (gsize - SIZE_T_ONE)) != 0) || + ((psize & (psize - SIZE_T_ONE)) != 0)) ABORT; mparams.granularity = gsize; mparams.page_size = psize; mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD; mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD; -#if MORECORE_CONTIGUOUS - mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT; -#else /* MORECORE_CONTIGUOUS */ - mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT; -#endif /* MORECORE_CONTIGUOUS */ - -#if !ONLY_MSPACES + #if MORECORE_CONTIGUOUS + mparams.default_mflags = USE_LOCK_BIT | USE_MMAP_BIT; + #else /* MORECORE_CONTIGUOUS */ + mparams.default_mflags = + USE_LOCK_BIT | USE_MMAP_BIT | USE_NONCONTIGUOUS_BIT; + #endif /* MORECORE_CONTIGUOUS */ + + #if !ONLY_MSPACES /* Set up lock for main malloc area */ gm->mflags = mparams.default_mflags; (void)INITIAL_LOCK(&gm->mutex); -#endif -#if LOCK_AT_FORK + #endif + #if LOCK_AT_FORK pthread_atfork(&pre_fork, &post_fork_parent, &post_fork_child); -#endif + #endif { -#if USE_DEV_RANDOM - int fd; + + #if USE_DEV_RANDOM + int fd; unsigned char buf[sizeof(size_t)]; /* Try to use /dev/urandom, else fall back on using time */ if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 && read(fd, buf, sizeof(buf)) == sizeof(buf)) { - magic = *((size_t *) buf); + + magic = *((size_t *)buf); close(fd); - } - else -#endif /* USE_DEV_RANDOM */ -#ifdef WIN32 - magic = (size_t)(GetTickCount() ^ (size_t)0x55555555U); -#elif defined(LACKS_TIME_H) + + } else + + #endif /* USE_DEV_RANDOM */ + #ifdef WIN32 + magic = (size_t)(GetTickCount() ^ (size_t)0x55555555U); + #elif defined(LACKS_TIME_H) magic = (size_t)&magic ^ (size_t)0x55555555U; -#else + #else magic = (size_t)(time(0) ^ (size_t)0x55555555U); -#endif - magic |= (size_t)8U; /* ensure nonzero */ - magic &= ~(size_t)7U; /* improve chances of fault for bad values */ + #endif + magic |= (size_t)8U; /* ensure nonzero */ + magic &= ~(size_t)7U; /* improve chances of fault for bad values */ /* Until memory modes commonly available, use volatile-write */ (*(volatile size_t *)(&(mparams.magic))) = magic; + } + } RELEASE_MALLOC_GLOBAL_LOCK(); return 1; + } /* support for mallopt */ static int change_mparam(int param_number, int value) { + size_t val; ensure_initialization(); - val = (value == -1)? MAX_SIZE_T : (size_t)value; - switch(param_number) { - case M_TRIM_THRESHOLD: - mparams.trim_threshold = val; - return 1; - case M_GRANULARITY: - if (val >= mparams.page_size && ((val & (val-1)) == 0)) { - mparams.granularity = val; + val = (value == -1) ? MAX_SIZE_T : (size_t)value; + switch (param_number) { + + case M_TRIM_THRESHOLD: + mparams.trim_threshold = val; return 1; - } - else + case M_GRANULARITY: + if (val >= mparams.page_size && ((val & (val - 1)) == 0)) { + + mparams.granularity = val; + return 1; + + } else + + return 0; + case M_MMAP_THRESHOLD: + mparams.mmap_threshold = val; + return 1; + default: return 0; - case M_MMAP_THRESHOLD: - mparams.mmap_threshold = val; - return 1; - default: - return 0; + } + } -#if DEBUG + #if DEBUG /* ------------------------- Debugging Support --------------------------- */ /* Check properties of any chunk, whether free, inuse, mmapped etc */ static void do_check_any_chunk(mstate m, mchunkptr p) { + assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); assert(ok_address(m, p)); + } /* Check properties of top chunk */ static void do_check_top_chunk(mstate m, mchunkptr p) { - msegmentptr sp = segment_holding(m, (char*)p); - size_t sz = p->head & ~INUSE_BITS; /* third-lowest bit can be set! */ + + msegmentptr sp = segment_holding(m, (char *)p); + size_t sz = p->head & ~INUSE_BITS; /* third-lowest bit can be set! */ assert(sp != 0); assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); assert(ok_address(m, p)); assert(sz == m->topsize); assert(sz > 0); - assert(sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE); + assert(sz == ((sp->base + sp->size) - (char *)p) - TOP_FOOT_SIZE); assert(pinuse(p)); assert(!pinuse(chunk_plus_offset(p, sz))); + } /* Check properties of (inuse) mmapped chunks */ static void do_check_mmapped_chunk(mstate m, mchunkptr p) { - size_t sz = chunksize(p); + + size_t sz = chunksize(p); size_t len = (sz + (p->prev_foot) + MMAP_FOOT_PAD); assert(is_mmapped(p)); assert(use_mmap(m)); assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); assert(ok_address(m, p)); assert(!is_small(sz)); - assert((len & (mparams.page_size-SIZE_T_ONE)) == 0); + assert((len & (mparams.page_size - SIZE_T_ONE)) == 0); assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD); - assert(chunk_plus_offset(p, sz+SIZE_T_SIZE)->head == 0); + assert(chunk_plus_offset(p, sz + SIZE_T_SIZE)->head == 0); + } /* Check properties of inuse chunks */ static void do_check_inuse_chunk(mstate m, mchunkptr p) { + do_check_any_chunk(m, p); assert(is_inuse(p)); assert(next_pinuse(p)); /* If not pinuse and not mmapped, previous chunk has OK offset */ assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p); - if (is_mmapped(p)) - do_check_mmapped_chunk(m, p); + if (is_mmapped(p)) do_check_mmapped_chunk(m, p); + } /* Check properties of free chunks */ static void do_check_free_chunk(mstate m, mchunkptr p) { - size_t sz = chunksize(p); + + size_t sz = chunksize(p); mchunkptr next = chunk_plus_offset(p, sz); do_check_any_chunk(m, p); assert(!is_inuse(p)); assert(!next_pinuse(p)); - assert (!is_mmapped(p)); + assert(!is_mmapped(p)); if (p != m->dv && p != m->top) { + if (sz >= MIN_CHUNK_SIZE) { + assert((sz & CHUNK_ALIGN_MASK) == 0); assert(is_aligned(chunk2mem(p))); assert(next->prev_foot == sz); assert(pinuse(p)); - assert (next == m->top || is_inuse(next)); + assert(next == m->top || is_inuse(next)); assert(p->fd->bk == p); assert(p->bk->fd == p); - } - else /* markers are always of size SIZE_T_SIZE */ + + } else /* markers are always of size SIZE_T_SIZE */ + assert(sz == SIZE_T_SIZE); + } + } /* Check properties of malloced chunks at the point they are malloced */ -static void do_check_malloced_chunk(mstate m, void* mem, size_t s) { +static void do_check_malloced_chunk(mstate m, void *mem, size_t s) { + if (mem != 0) { + mchunkptr p = mem2chunk(mem); - size_t sz = p->head & ~INUSE_BITS; + size_t sz = p->head & ~INUSE_BITS; do_check_inuse_chunk(m, p); assert((sz & CHUNK_ALIGN_MASK) == 0); assert(sz >= MIN_CHUNK_SIZE); assert(sz >= s); /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */ assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE)); + } + } /* Check a tree and its subtrees. */ static void do_check_tree(mstate m, tchunkptr t) { + tchunkptr head = 0; tchunkptr u = t; - bindex_t tindex = t->index; - size_t tsize = chunksize(t); - bindex_t idx; + bindex_t tindex = t->index; + size_t tsize = chunksize(t); + bindex_t idx; compute_tree_index(tsize, idx); assert(tindex == idx); assert(tsize >= MIN_LARGE_SIZE); assert(tsize >= minsize_for_tree_index(idx)); - assert((idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1)))); + assert((idx == NTREEBINS - 1) || (tsize < minsize_for_tree_index((idx + 1)))); - do { /* traverse through chain of same-sized nodes */ + do { /* traverse through chain of same-sized nodes */ do_check_any_chunk(m, ((mchunkptr)u)); assert(u->index == tindex); assert(chunksize(u) == tsize); @@ -3370,56 +3547,72 @@ static void do_check_tree(mstate m, tchunkptr t) { assert(u->fd->bk == u); assert(u->bk->fd == u); if (u->parent == 0) { + assert(u->child[0] == 0); assert(u->child[1] == 0); - } - else { - assert(head == 0); /* only one node on chain has parent */ + + } else { + + assert(head == 0); /* only one node on chain has parent */ head = u; assert(u->parent != u); - assert (u->parent->child[0] == u || - u->parent->child[1] == u || - *((tbinptr*)(u->parent)) == u); + assert(u->parent->child[0] == u || u->parent->child[1] == u || + *((tbinptr *)(u->parent)) == u); if (u->child[0] != 0) { + assert(u->child[0]->parent == u); assert(u->child[0] != u); do_check_tree(m, u->child[0]); + } + if (u->child[1] != 0) { + assert(u->child[1]->parent == u); assert(u->child[1] != u); do_check_tree(m, u->child[1]); + } + if (u->child[0] != 0 && u->child[1] != 0) { + assert(chunksize(u->child[0]) < chunksize(u->child[1])); + } + } + u = u->fd; + } while (u != t); + assert(head != 0); + } /* Check all the chunks in a treebin. */ static void do_check_treebin(mstate m, bindex_t i) { - tbinptr* tb = treebin_at(m, i); + + tbinptr * tb = treebin_at(m, i); tchunkptr t = *tb; - int empty = (m->treemap & (1U << i)) == 0; - if (t == 0) - assert(empty); - if (!empty) - do_check_tree(m, t); + int empty = (m->treemap & (1U << i)) == 0; + if (t == 0) assert(empty); + if (!empty) do_check_tree(m, t); + } /* Check all the chunks in a smallbin. */ static void do_check_smallbin(mstate m, bindex_t i) { - sbinptr b = smallbin_at(m, i); - mchunkptr p = b->bk; + + sbinptr b = smallbin_at(m, i); + mchunkptr p = b->bk; unsigned int empty = (m->smallmap & (1U << i)) == 0; - if (p == b) - assert(empty); + if (p == b) assert(empty); if (!empty) { + for (; p != b; p = p->bk) { - size_t size = chunksize(p); + + size_t size = chunksize(p); mchunkptr q; /* each chunk claims to be free */ do_check_free_chunk(m, p); @@ -3428,324 +3621,434 @@ static void do_check_smallbin(mstate m, bindex_t i) { assert(p->bk == b || chunksize(p->bk) == chunksize(p)); /* chunk is followed by an inuse chunk */ q = next_chunk(p); - if (q->head != FENCEPOST_HEAD) - do_check_inuse_chunk(m, q); + if (q->head != FENCEPOST_HEAD) do_check_inuse_chunk(m, q); + } + } + } /* Find x in a bin. Used in other check functions. */ static int bin_find(mstate m, mchunkptr x) { + size_t size = chunksize(x); if (is_small(size)) { + bindex_t sidx = small_index(size); - sbinptr b = smallbin_at(m, sidx); + sbinptr b = smallbin_at(m, sidx); if (smallmap_is_marked(m, sidx)) { + mchunkptr p = b; do { - if (p == x) - return 1; + + if (p == x) return 1; + } while ((p = p->fd) != b); + } - } - else { + + } else { + bindex_t tidx; compute_tree_index(size, tidx); if (treemap_is_marked(m, tidx)) { + tchunkptr t = *treebin_at(m, tidx); - size_t sizebits = size << leftshift_for_tree_index(tidx); + size_t sizebits = size << leftshift_for_tree_index(tidx); while (t != 0 && chunksize(t) != size) { - t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]; + + t = t->child[(sizebits >> (SIZE_T_BITSIZE - SIZE_T_ONE)) & 1]; sizebits <<= 1; + } + if (t != 0) { + tchunkptr u = t; do { - if (u == (tchunkptr)x) - return 1; + + if (u == (tchunkptr)x) return 1; + } while ((u = u->fd) != t); + } + } + } + return 0; + } /* Traverse each chunk and check it; return total */ static size_t traverse_and_check(mstate m) { + size_t sum = 0; if (is_initialized(m)) { + msegmentptr s = &m->seg; sum += m->topsize + TOP_FOOT_SIZE; while (s != 0) { + mchunkptr q = align_as_chunk(s->base); mchunkptr lastq = 0; assert(pinuse(q)); - while (segment_holds(s, q) && - q != m->top && q->head != FENCEPOST_HEAD) { + while (segment_holds(s, q) && q != m->top && q->head != FENCEPOST_HEAD) { + sum += chunksize(q); if (is_inuse(q)) { + assert(!bin_find(m, q)); do_check_inuse_chunk(m, q); - } - else { + + } else { + assert(q == m->dv || bin_find(m, q)); - assert(lastq == 0 || is_inuse(lastq)); /* Not 2 consecutive free */ + assert(lastq == 0 || is_inuse(lastq)); /* Not 2 consecutive free */ do_check_free_chunk(m, q); + } + lastq = q; q = next_chunk(q); + } + s = s->next; + } + } + return sum; -} +} /* Check all properties of malloc_state. */ static void do_check_malloc_state(mstate m) { + bindex_t i; - size_t total; + size_t total; /* check bins */ for (i = 0; i < NSMALLBINS; ++i) do_check_smallbin(m, i); for (i = 0; i < NTREEBINS; ++i) do_check_treebin(m, i); - if (m->dvsize != 0) { /* check dv chunk */ + if (m->dvsize != 0) { /* check dv chunk */ do_check_any_chunk(m, m->dv); assert(m->dvsize == chunksize(m->dv)); assert(m->dvsize >= MIN_CHUNK_SIZE); assert(bin_find(m, m->dv) == 0); + } - if (m->top != 0) { /* check top chunk */ + if (m->top != 0) { /* check top chunk */ do_check_top_chunk(m, m->top); /*assert(m->topsize == chunksize(m->top)); redundant */ assert(m->topsize > 0); assert(bin_find(m, m->top) == 0); + } total = traverse_and_check(m); assert(total <= m->footprint); assert(m->footprint <= m->max_footprint); + } -#endif /* DEBUG */ + + #endif /* DEBUG */ /* ----------------------------- statistics ------------------------------ */ -#if !NO_MALLINFO + #if !NO_MALLINFO static struct mallinfo internal_mallinfo(mstate m) { - struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + + struct mallinfo nm = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; ensure_initialization(); if (!PREACTION(m)) { + check_malloc_state(m); if (is_initialized(m)) { - size_t nfree = SIZE_T_ONE; /* top always free */ - size_t mfree = m->topsize + TOP_FOOT_SIZE; - size_t sum = mfree; + + size_t nfree = SIZE_T_ONE; /* top always free */ + size_t mfree = m->topsize + TOP_FOOT_SIZE; + size_t sum = mfree; msegmentptr s = &m->seg; while (s != 0) { + mchunkptr q = align_as_chunk(s->base); - while (segment_holds(s, q) && - q != m->top && q->head != FENCEPOST_HEAD) { + while (segment_holds(s, q) && q != m->top && + q->head != FENCEPOST_HEAD) { + size_t sz = chunksize(q); sum += sz; if (!is_inuse(q)) { + mfree += sz; ++nfree; + } + q = next_chunk(q); + } + s = s->next; + } - nm.arena = sum; - nm.ordblks = nfree; - nm.hblkhd = m->footprint - sum; - nm.usmblks = m->max_footprint; + nm.arena = sum; + nm.ordblks = nfree; + nm.hblkhd = m->footprint - sum; + nm.usmblks = m->max_footprint; nm.uordblks = m->footprint - mfree; nm.fordblks = mfree; nm.keepcost = m->topsize; + } POSTACTION(m); + } + return nm; + } -#endif /* !NO_MALLINFO */ -#if !NO_MALLOC_STATS + #endif /* !NO_MALLINFO */ + + #if !NO_MALLOC_STATS static void internal_malloc_stats(mstate m) { + ensure_initialization(); if (!PREACTION(m)) { + size_t maxfp = 0; size_t fp = 0; size_t used = 0; check_malloc_state(m); if (is_initialized(m)) { + msegmentptr s = &m->seg; maxfp = m->max_footprint; fp = m->footprint; used = fp - (m->topsize + TOP_FOOT_SIZE); while (s != 0) { + mchunkptr q = align_as_chunk(s->base); - while (segment_holds(s, q) && - q != m->top && q->head != FENCEPOST_HEAD) { - if (!is_inuse(q)) - used -= chunksize(q); + while (segment_holds(s, q) && q != m->top && + q->head != FENCEPOST_HEAD) { + + if (!is_inuse(q)) used -= chunksize(q); q = next_chunk(q); + } + s = s->next; + } + } - POSTACTION(m); /* drop lock */ + + POSTACTION(m); /* drop lock */ fprintf(stderr, "max system bytes = %10lu\n", (unsigned long)(maxfp)); fprintf(stderr, "system bytes = %10lu\n", (unsigned long)(fp)); fprintf(stderr, "in use bytes = %10lu\n", (unsigned long)(used)); + } + } -#endif /* NO_MALLOC_STATS */ -/* ----------------------- Operations on smallbins ----------------------- */ + #endif /* NO_MALLOC_STATS */ -/* - Various forms of linking and unlinking are defined as macros. Even - the ones for trees, which are very long but have very short typical - paths. This is ugly but reduces reliance on inlining support of - compilers. -*/ + /* ----------------------- Operations on smallbins ----------------------- */ -/* Link a free chunk into a smallbin */ -#define insert_small_chunk(M, P, S) {\ - bindex_t I = small_index(S);\ - mchunkptr B = smallbin_at(M, I);\ - mchunkptr F = B;\ - assert(S >= MIN_CHUNK_SIZE);\ - if (!smallmap_is_marked(M, I))\ - mark_smallmap(M, I);\ - else if (RTCHECK(ok_address(M, B->fd)))\ - F = B->fd;\ - else {\ - CORRUPTION_ERROR_ACTION(M);\ - }\ - B->fd = P;\ - F->bk = P;\ - P->fd = F;\ - P->bk = B;\ -} + /* + Various forms of linking and unlinking are defined as macros. Even + the ones for trees, which are very long but have very short typical + paths. This is ugly but reduces reliance on inlining support of + compilers. + */ -/* Unlink a chunk from a smallbin */ -#define unlink_small_chunk(M, P, S) {\ - mchunkptr F = P->fd;\ - mchunkptr B = P->bk;\ - bindex_t I = small_index(S);\ - assert(P != B);\ - assert(P != F);\ - assert(chunksize(P) == small_index2size(I));\ - if (RTCHECK(F == smallbin_at(M,I) || (ok_address(M, F) && F->bk == P))) { \ - if (B == F) {\ - clear_smallmap(M, I);\ - }\ - else if (RTCHECK(B == smallbin_at(M,I) ||\ - (ok_address(M, B) && B->fd == P))) {\ - F->bk = B;\ - B->fd = F;\ - }\ - else {\ - CORRUPTION_ERROR_ACTION(M);\ - }\ - }\ - else {\ - CORRUPTION_ERROR_ACTION(M);\ - }\ -} + /* Link a free chunk into a smallbin */ + #define insert_small_chunk(M, P, S) \ + { \ + \ + bindex_t I = small_index(S); \ + mchunkptr B = smallbin_at(M, I); \ + mchunkptr F = B; \ + assert(S >= MIN_CHUNK_SIZE); \ + if (!smallmap_is_marked(M, I)) \ + mark_smallmap(M, I); \ + else if (RTCHECK(ok_address(M, B->fd))) \ + F = B->fd; \ + else { \ + \ + CORRUPTION_ERROR_ACTION(M); \ + \ + } \ + B->fd = P; \ + F->bk = P; \ + P->fd = F; \ + P->bk = B; \ + \ + } -/* Unlink the first chunk from a smallbin */ -#define unlink_first_small_chunk(M, B, P, I) {\ - mchunkptr F = P->fd;\ - assert(P != B);\ - assert(P != F);\ - assert(chunksize(P) == small_index2size(I));\ - if (B == F) {\ - clear_smallmap(M, I);\ - }\ - else if (RTCHECK(ok_address(M, F) && F->bk == P)) {\ - F->bk = B;\ - B->fd = F;\ - }\ - else {\ - CORRUPTION_ERROR_ACTION(M);\ - }\ -} + /* Unlink a chunk from a smallbin */ + #define unlink_small_chunk(M, P, S) \ + { \ + \ + mchunkptr F = P->fd; \ + mchunkptr B = P->bk; \ + bindex_t I = small_index(S); \ + assert(P != B); \ + assert(P != F); \ + assert(chunksize(P) == small_index2size(I)); \ + if (RTCHECK(F == smallbin_at(M, I) || \ + (ok_address(M, F) && F->bk == P))) { \ + \ + if (B == F) { \ + \ + clear_smallmap(M, I); \ + \ + } else if (RTCHECK(B == smallbin_at(M, I) || \ + (ok_address(M, B) && B->fd == P))) { \ + \ + F->bk = B; \ + B->fd = F; \ + \ + } else { \ + \ + CORRUPTION_ERROR_ACTION(M); \ + \ + } \ + \ + } else { \ + \ + CORRUPTION_ERROR_ACTION(M); \ + \ + } \ + \ + } -/* Replace dv node, binning the old one */ -/* Used only when dvsize known to be small */ -#define replace_dv(M, P, S) {\ - size_t DVS = M->dvsize;\ - assert(is_small(DVS));\ - if (DVS != 0) {\ - mchunkptr DV = M->dv;\ - insert_small_chunk(M, DV, DVS);\ - }\ - M->dvsize = S;\ - M->dv = P;\ -} + /* Unlink the first chunk from a smallbin */ + #define unlink_first_small_chunk(M, B, P, I) \ + { \ + \ + mchunkptr F = P->fd; \ + assert(P != B); \ + assert(P != F); \ + assert(chunksize(P) == small_index2size(I)); \ + if (B == F) { \ + \ + clear_smallmap(M, I); \ + \ + } else if (RTCHECK(ok_address(M, F) && F->bk == P)) { \ + \ + F->bk = B; \ + B->fd = F; \ + \ + } else { \ + \ + CORRUPTION_ERROR_ACTION(M); \ + \ + } \ + \ + } -/* ------------------------- Operations on trees ------------------------- */ - -/* Insert chunk into tree */ -#define insert_large_chunk(M, X, S) {\ - tbinptr* H;\ - bindex_t I;\ - compute_tree_index(S, I);\ - H = treebin_at(M, I);\ - X->index = I;\ - X->child[0] = X->child[1] = 0;\ - if (!treemap_is_marked(M, I)) {\ - mark_treemap(M, I);\ - *H = X;\ - X->parent = (tchunkptr)H;\ - X->fd = X->bk = X;\ - }\ - else {\ - tchunkptr T = *H;\ - size_t K = S << leftshift_for_tree_index(I);\ - for (;;) {\ - if (chunksize(T) != S) {\ - tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\ - K <<= 1;\ - if (*C != 0)\ - T = *C;\ - else if (RTCHECK(ok_address(M, C))) {\ - *C = X;\ - X->parent = T;\ - X->fd = X->bk = X;\ - break;\ - }\ - else {\ - CORRUPTION_ERROR_ACTION(M);\ - break;\ - }\ - }\ - else {\ - tchunkptr F = T->fd;\ - if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\ - T->fd = F->bk = X;\ - X->fd = F;\ - X->bk = T;\ - X->parent = 0;\ - break;\ - }\ - else {\ - CORRUPTION_ERROR_ACTION(M);\ - break;\ - }\ - }\ - }\ - }\ -} + /* Replace dv node, binning the old one */ + /* Used only when dvsize known to be small */ + #define replace_dv(M, P, S) \ + { \ + \ + size_t DVS = M->dvsize; \ + assert(is_small(DVS)); \ + if (DVS != 0) { \ + \ + mchunkptr DV = M->dv; \ + insert_small_chunk(M, DV, DVS); \ + \ + } \ + M->dvsize = S; \ + M->dv = P; \ + \ + } + + /* ------------------------- Operations on trees ------------------------- */ + + /* Insert chunk into tree */ + #define insert_large_chunk(M, X, S) \ + { \ + \ + tbinptr *H; \ + bindex_t I; \ + compute_tree_index(S, I); \ + H = treebin_at(M, I); \ + X->index = I; \ + X->child[0] = X->child[1] = 0; \ + if (!treemap_is_marked(M, I)) { \ + \ + mark_treemap(M, I); \ + *H = X; \ + X->parent = (tchunkptr)H; \ + X->fd = X->bk = X; \ + \ + } else { \ + \ + tchunkptr T = *H; \ + size_t K = S << leftshift_for_tree_index(I); \ + for (;;) { \ + \ + if (chunksize(T) != S) { \ + \ + tchunkptr *C = \ + &(T->child[(K >> (SIZE_T_BITSIZE - SIZE_T_ONE)) & 1]); \ + K <<= 1; \ + if (*C != 0) \ + T = *C; \ + else if (RTCHECK(ok_address(M, C))) { \ + \ + *C = X; \ + X->parent = T; \ + X->fd = X->bk = X; \ + break; \ + \ + } else { \ + \ + CORRUPTION_ERROR_ACTION(M); \ + break; \ + \ + } \ + \ + } else { \ + \ + tchunkptr F = T->fd; \ + if (RTCHECK(ok_address(M, T) && ok_address(M, F))) { \ + \ + T->fd = F->bk = X; \ + X->fd = F; \ + X->bk = T; \ + X->parent = 0; \ + break; \ + \ + } else { \ + \ + CORRUPTION_ERROR_ACTION(M); \ + break; \ + \ + } \ + \ + } \ + \ + } \ + \ + } \ + \ + } /* Unlink steps: @@ -3764,104 +4067,141 @@ static void internal_malloc_stats(mstate m) { x's parent and children to x's replacement (or null if none). */ -#define unlink_large_chunk(M, X) {\ - tchunkptr XP = X->parent;\ - tchunkptr R;\ - if (X->bk != X) {\ - tchunkptr F = X->fd;\ - R = X->bk;\ - if (RTCHECK(ok_address(M, F) && F->bk == X && R->fd == X)) {\ - F->bk = R;\ - R->fd = F;\ - }\ - else {\ - CORRUPTION_ERROR_ACTION(M);\ - }\ - }\ - else {\ - tchunkptr* RP;\ - if (((R = *(RP = &(X->child[1]))) != 0) ||\ - ((R = *(RP = &(X->child[0]))) != 0)) {\ - tchunkptr* CP;\ - while ((*(CP = &(R->child[1])) != 0) ||\ - (*(CP = &(R->child[0])) != 0)) {\ - R = *(RP = CP);\ - }\ - if (RTCHECK(ok_address(M, RP)))\ - *RP = 0;\ - else {\ - CORRUPTION_ERROR_ACTION(M);\ - }\ - }\ - }\ - if (XP != 0) {\ - tbinptr* H = treebin_at(M, X->index);\ - if (X == *H) {\ - if ((*H = R) == 0) \ - clear_treemap(M, X->index);\ - }\ - else if (RTCHECK(ok_address(M, XP))) {\ - if (XP->child[0] == X) \ - XP->child[0] = R;\ - else \ - XP->child[1] = R;\ - }\ - else\ - CORRUPTION_ERROR_ACTION(M);\ - if (R != 0) {\ - if (RTCHECK(ok_address(M, R))) {\ - tchunkptr C0, C1;\ - R->parent = XP;\ - if ((C0 = X->child[0]) != 0) {\ - if (RTCHECK(ok_address(M, C0))) {\ - R->child[0] = C0;\ - C0->parent = R;\ - }\ - else\ - CORRUPTION_ERROR_ACTION(M);\ - }\ - if ((C1 = X->child[1]) != 0) {\ - if (RTCHECK(ok_address(M, C1))) {\ - R->child[1] = C1;\ - C1->parent = R;\ - }\ - else\ - CORRUPTION_ERROR_ACTION(M);\ - }\ - }\ - else\ - CORRUPTION_ERROR_ACTION(M);\ - }\ - }\ -} + #define unlink_large_chunk(M, X) \ + { \ + \ + tchunkptr XP = X->parent; \ + tchunkptr R; \ + if (X->bk != X) { \ + \ + tchunkptr F = X->fd; \ + R = X->bk; \ + if (RTCHECK(ok_address(M, F) && F->bk == X && R->fd == X)) { \ + \ + F->bk = R; \ + R->fd = F; \ + \ + } else { \ + \ + CORRUPTION_ERROR_ACTION(M); \ + \ + } \ + \ + } else { \ + \ + tchunkptr *RP; \ + if (((R = *(RP = &(X->child[1]))) != 0) || \ + ((R = *(RP = &(X->child[0]))) != 0)) { \ + \ + tchunkptr *CP; \ + while ((*(CP = &(R->child[1])) != 0) || \ + (*(CP = &(R->child[0])) != 0)) { \ + \ + R = *(RP = CP); \ + \ + } \ + if (RTCHECK(ok_address(M, RP))) \ + *RP = 0; \ + else { \ + \ + CORRUPTION_ERROR_ACTION(M); \ + \ + } \ + \ + } \ + \ + } \ + if (XP != 0) { \ + \ + tbinptr *H = treebin_at(M, X->index); \ + if (X == *H) { \ + \ + if ((*H = R) == 0) clear_treemap(M, X->index); \ + \ + } else if (RTCHECK(ok_address(M, XP))) { \ + \ + if (XP->child[0] == X) \ + XP->child[0] = R; \ + else \ + XP->child[1] = R; \ + \ + } else \ + CORRUPTION_ERROR_ACTION(M); \ + if (R != 0) { \ + \ + if (RTCHECK(ok_address(M, R))) { \ + \ + tchunkptr C0, C1; \ + R->parent = XP; \ + if ((C0 = X->child[0]) != 0) { \ + \ + if (RTCHECK(ok_address(M, C0))) { \ + \ + R->child[0] = C0; \ + C0->parent = R; \ + \ + } else \ + CORRUPTION_ERROR_ACTION(M); \ + \ + } \ + if ((C1 = X->child[1]) != 0) { \ + \ + if (RTCHECK(ok_address(M, C1))) { \ + \ + R->child[1] = C1; \ + C1->parent = R; \ + \ + } else \ + CORRUPTION_ERROR_ACTION(M); \ + \ + } \ + \ + } else \ + CORRUPTION_ERROR_ACTION(M); \ + \ + } \ + \ + } \ + \ + } /* Relays to large vs small bin operations */ -#define insert_chunk(M, P, S)\ - if (is_small(S)) insert_small_chunk(M, P, S)\ - else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); } - -#define unlink_chunk(M, P, S)\ - if (is_small(S)) unlink_small_chunk(M, P, S)\ - else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); } + #define insert_chunk(M, P, S) \ + if (is_small(S)) insert_small_chunk(M, P, S) else { \ + \ + tchunkptr TP = (tchunkptr)(P); \ + insert_large_chunk(M, TP, S); \ + \ + } + #define unlink_chunk(M, P, S) \ + if (is_small(S)) unlink_small_chunk(M, P, S) else { \ + \ + tchunkptr TP = (tchunkptr)(P); \ + unlink_large_chunk(M, TP); \ + \ + } /* Relays to internal calls to malloc/free from realloc, memalign etc */ -#if ONLY_MSPACES -#define internal_malloc(m, b) mspace_malloc(m, b) -#define internal_free(m, mem) mspace_free(m,mem); -#else /* ONLY_MSPACES */ -#if MSPACES -#define internal_malloc(m, b)\ - ((m == gm)? dlmalloc(b) : mspace_malloc(m, b)) -#define internal_free(m, mem)\ - if (m == gm) dlfree(mem); else mspace_free(m,mem); -#else /* MSPACES */ -#define internal_malloc(m, b) dlmalloc(b) -#define internal_free(m, mem) dlfree(mem) -#endif /* MSPACES */ -#endif /* ONLY_MSPACES */ + #if ONLY_MSPACES + #define internal_malloc(m, b) mspace_malloc(m, b) + #define internal_free(m, mem) mspace_free(m, mem); + #else /* ONLY_MSPACES */ + #if MSPACES + #define internal_malloc(m, b) \ + ((m == gm) ? dlmalloc(b) : mspace_malloc(m, b)) + #define internal_free(m, mem) \ + if (m == gm) \ + dlfree(mem); \ + else \ + mspace_free(m, mem); + #else /* MSPACES */ + #define internal_malloc(m, b) dlmalloc(b) + #define internal_free(m, mem) dlfree(mem) + #endif /* MSPACES */ + #endif /* ONLY_MSPACES */ /* ----------------------- Direct-mmapping chunks ----------------------- */ @@ -3874,80 +4214,93 @@ static void internal_malloc_stats(mstate m) { */ /* Malloc using mmap */ -static void* mmap_alloc(mstate m, size_t nb) { +static void *mmap_alloc(mstate m, size_t nb) { + size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK); if (m->footprint_limit != 0) { + size_t fp = m->footprint + mmsize; - if (fp <= m->footprint || fp > m->footprint_limit) - return 0; + if (fp <= m->footprint || fp > m->footprint_limit) return 0; + } - if (mmsize > nb) { /* Check for wrap around 0 */ - char* mm = (char*)(CALL_DIRECT_MMAP(mmsize)); + + if (mmsize > nb) { /* Check for wrap around 0 */ + char *mm = (char *)(CALL_DIRECT_MMAP(mmsize)); if (mm != CMFAIL) { - size_t offset = align_offset(chunk2mem(mm)); - size_t psize = mmsize - offset - MMAP_FOOT_PAD; + + size_t offset = align_offset(chunk2mem(mm)); + size_t psize = mmsize - offset - MMAP_FOOT_PAD; mchunkptr p = (mchunkptr)(mm + offset); p->prev_foot = offset; p->head = psize; mark_inuse_foot(m, p, psize); chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD; - chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0; + chunk_plus_offset(p, psize + SIZE_T_SIZE)->head = 0; - if (m->least_addr == 0 || mm < m->least_addr) - m->least_addr = mm; + if (m->least_addr == 0 || mm < m->least_addr) m->least_addr = mm; if ((m->footprint += mmsize) > m->max_footprint) m->max_footprint = m->footprint; assert(is_aligned(chunk2mem(p))); check_mmapped_chunk(m, p); return chunk2mem(p); + } + } + return 0; + } /* Realloc using mmap */ static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb, int flags) { + size_t oldsize = chunksize(oldp); - (void)flags; /* placate people compiling -Wunused */ - if (is_small(nb)) /* Can't shrink mmap regions below small size */ + (void)flags; /* placate people compiling -Wunused */ + if (is_small(nb)) /* Can't shrink mmap regions below small size */ return 0; /* Keep old chunk if big enough but not too big */ if (oldsize >= nb + SIZE_T_SIZE && (oldsize - nb) <= (mparams.granularity << 1)) return oldp; else { + size_t offset = oldp->prev_foot; size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD; size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK); - char* cp = (char*)CALL_MREMAP((char*)oldp - offset, - oldmmsize, newmmsize, flags); + char * cp = + (char *)CALL_MREMAP((char *)oldp - offset, oldmmsize, newmmsize, flags); if (cp != CMFAIL) { + mchunkptr newp = (mchunkptr)(cp + offset); - size_t psize = newmmsize - offset - MMAP_FOOT_PAD; + size_t psize = newmmsize - offset - MMAP_FOOT_PAD; newp->head = psize; mark_inuse_foot(m, newp, psize); chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD; - chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0; + chunk_plus_offset(newp, psize + SIZE_T_SIZE)->head = 0; - if (cp < m->least_addr) - m->least_addr = cp; + if (cp < m->least_addr) m->least_addr = cp; if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint) m->max_footprint = m->footprint; check_mmapped_chunk(m, newp); return newp; + } + } + return 0; -} +} /* -------------------------- mspace management -------------------------- */ /* Initialize top chunk and its size */ static void init_top(mstate m, mchunkptr p, size_t psize) { + /* Ensure alignment */ size_t offset = align_offset(chunk2mem(p)); - p = (mchunkptr)((char*)p + offset); + p = (mchunkptr)((char *)p + offset); psize -= offset; m->top = p; @@ -3955,23 +4308,29 @@ static void init_top(mstate m, mchunkptr p, size_t psize) { p->head = psize | PINUSE_BIT; /* set size of fake trailing chunk holding overhead space only once */ chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE; - m->trim_check = mparams.trim_threshold; /* reset on each update */ + m->trim_check = mparams.trim_threshold; /* reset on each update */ + } /* Initialize bins for a new mstate that is otherwise zeroed out */ static void init_bins(mstate m) { + /* Establish circular links for smallbins */ bindex_t i; for (i = 0; i < NSMALLBINS; ++i) { - sbinptr bin = smallbin_at(m,i); + + sbinptr bin = smallbin_at(m, i); bin->fd = bin->bk = bin; + } + } -#if PROCEED_ON_ERROR + #if PROCEED_ON_ERROR /* default corruption action */ static void reset_on_error(mstate m) { + int i; ++malloc_corruption_error_count; /* Reinitialize fields to forget about all memory */ @@ -3984,67 +4343,78 @@ static void reset_on_error(mstate m) { for (i = 0; i < NTREEBINS; ++i) *treebin_at(m, i) = 0; init_bins(m); + } -#endif /* PROCEED_ON_ERROR */ + + #endif /* PROCEED_ON_ERROR */ /* Allocate chunk and prepend remainder with chunk in successor base. */ -static void* prepend_alloc(mstate m, char* newbase, char* oldbase, - size_t nb) { +static void *prepend_alloc(mstate m, char *newbase, char *oldbase, size_t nb) { + mchunkptr p = align_as_chunk(newbase); mchunkptr oldfirst = align_as_chunk(oldbase); - size_t psize = (char*)oldfirst - (char*)p; + size_t psize = (char *)oldfirst - (char *)p; mchunkptr q = chunk_plus_offset(p, nb); - size_t qsize = psize - nb; + size_t qsize = psize - nb; set_size_and_pinuse_of_inuse_chunk(m, p, nb); - assert((char*)oldfirst > (char*)q); + assert((char *)oldfirst > (char *)q); assert(pinuse(oldfirst)); assert(qsize >= MIN_CHUNK_SIZE); /* consolidate remainder with first chunk of old base */ if (oldfirst == m->top) { + size_t tsize = m->topsize += qsize; m->top = q; q->head = tsize | PINUSE_BIT; check_top_chunk(m, q); - } - else if (oldfirst == m->dv) { + + } else if (oldfirst == m->dv) { + size_t dsize = m->dvsize += qsize; m->dv = q; set_size_and_pinuse_of_free_chunk(q, dsize); - } - else { + + } else { + if (!is_inuse(oldfirst)) { + size_t nsize = chunksize(oldfirst); unlink_chunk(m, oldfirst, nsize); oldfirst = chunk_plus_offset(oldfirst, nsize); qsize += nsize; + } + set_free_with_pinuse(q, qsize, oldfirst); insert_chunk(m, q, qsize); check_free_chunk(m, q); + } check_malloced_chunk(m, chunk2mem(p), nb); return chunk2mem(p); + } /* Add a segment to hold a new noncontiguous region */ -static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) { +static void add_segment(mstate m, char *tbase, size_t tsize, flag_t mmapped) { + /* Determine locations and sizes of segment, fenceposts, old top */ - char* old_top = (char*)m->top; + char * old_top = (char *)m->top; msegmentptr oldsp = segment_holding(m, old_top); - char* old_end = oldsp->base + oldsp->size; - size_t ssize = pad_request(sizeof(struct malloc_segment)); - char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK); - size_t offset = align_offset(chunk2mem(rawsp)); - char* asp = rawsp + offset; - char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp; - mchunkptr sp = (mchunkptr)csp; + char * old_end = oldsp->base + oldsp->size; + size_t ssize = pad_request(sizeof(struct malloc_segment)); + char * rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK); + size_t offset = align_offset(chunk2mem(rawsp)); + char * asp = rawsp + offset; + char * csp = (asp < (old_top + MIN_CHUNK_SIZE)) ? old_top : asp; + mchunkptr sp = (mchunkptr)csp; msegmentptr ss = (msegmentptr)(chunk2mem(sp)); - mchunkptr tnext = chunk_plus_offset(sp, ssize); - mchunkptr p = tnext; - int nfences = 0; + mchunkptr tnext = chunk_plus_offset(sp, ssize); + mchunkptr p = tnext; + int nfences = 0; /* reset top to new space */ init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE); @@ -4052,7 +4422,7 @@ static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) { /* Set up segment record */ assert(is_aligned(ss)); set_size_and_pinuse_of_inuse_chunk(m, sp, ssize); - *ss = m->seg; /* Push current record */ + *ss = m->seg; /* Push current record */ m->seg.base = tbase; m->seg.size = tsize; m->seg.sflags = mmapped; @@ -4060,53 +4430,61 @@ static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) { /* Insert trailing fenceposts */ for (;;) { + mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE); p->head = FENCEPOST_HEAD; ++nfences; - if ((char*)(&(nextp->head)) < old_end) + if ((char *)(&(nextp->head)) < old_end) p = nextp; else break; + } + assert(nfences >= 2); /* Insert the rest of old top into a bin as an ordinary free chunk */ if (csp != old_top) { + mchunkptr q = (mchunkptr)old_top; - size_t psize = csp - old_top; + size_t psize = csp - old_top; mchunkptr tn = chunk_plus_offset(q, psize); set_free_with_pinuse(q, psize, tn); insert_chunk(m, q, psize); + } check_top_chunk(m, m->top); + } /* -------------------------- System allocation -------------------------- */ /* Get memory from system using MORECORE or MMAP */ -static void* sys_alloc(mstate m, size_t nb) { - char* tbase = CMFAIL; +static void *sys_alloc(mstate m, size_t nb) { + + char * tbase = CMFAIL; size_t tsize = 0; flag_t mmap_flag = 0; - size_t asize; /* allocation size */ + size_t asize; /* allocation size */ ensure_initialization(); /* Directly map large chunks, but only if already initialized */ if (use_mmap(m) && nb >= mparams.mmap_threshold && m->topsize != 0) { - void* mem = mmap_alloc(m, nb); - if (mem != 0) - return mem; + + void *mem = mmap_alloc(m, nb); + if (mem != 0) return mem; + } asize = granularity_align(nb + SYS_ALLOC_PADDING); - if (asize <= nb) - return 0; /* wraparound */ + if (asize <= nb) return 0; /* wraparound */ if (m->footprint_limit != 0) { + size_t fp = m->footprint + asize; - if (fp <= m->footprint || fp > m->footprint_limit) - return 0; + if (fp <= m->footprint || fp > m->footprint_limit) return 0; + } /* @@ -4132,91 +4510,119 @@ static void* sys_alloc(mstate m, size_t nb) { */ if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) { - char* br = CMFAIL; - size_t ssize = asize; /* sbrk call size */ - msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (char*)m->top); + + char * br = CMFAIL; + size_t ssize = asize; /* sbrk call size */ + msegmentptr ss = (m->top == 0) ? 0 : segment_holding(m, (char *)m->top); ACQUIRE_MALLOC_GLOBAL_LOCK(); - if (ss == 0) { /* First time through or recovery */ - char* base = (char*)CALL_MORECORE(0); + if (ss == 0) { /* First time through or recovery */ + char *base = (char *)CALL_MORECORE(0); if (base != CMFAIL) { + size_t fp; /* Adjust to end on a page boundary */ if (!is_page_aligned(base)) ssize += (page_align((size_t)base) - (size_t)base); - fp = m->footprint + ssize; /* recheck limits */ + fp = m->footprint + ssize; /* recheck limits */ if (ssize > nb && ssize < HALF_MAX_SIZE_T && (m->footprint_limit == 0 || (fp > m->footprint && fp <= m->footprint_limit)) && - (br = (char*)(CALL_MORECORE(ssize))) == base) { + (br = (char *)(CALL_MORECORE(ssize))) == base) { + tbase = base; tsize = ssize; + } + } - } - else { + + } else { + /* Subtract out existing available top space from MORECORE request. */ ssize = granularity_align(nb - m->topsize + SYS_ALLOC_PADDING); /* Use mem here only if it did continuously extend old space */ if (ssize < HALF_MAX_SIZE_T && - (br = (char*)(CALL_MORECORE(ssize))) == ss->base+ss->size) { + (br = (char *)(CALL_MORECORE(ssize))) == ss->base + ss->size) { + tbase = br; tsize = ssize; + } + } - if (tbase == CMFAIL) { /* Cope with partial failure */ - if (br != CMFAIL) { /* Try to use/extend the space we did get */ - if (ssize < HALF_MAX_SIZE_T && - ssize < nb + SYS_ALLOC_PADDING) { + if (tbase == CMFAIL) { /* Cope with partial failure */ + if (br != CMFAIL) { /* Try to use/extend the space we did get */ + if (ssize < HALF_MAX_SIZE_T && ssize < nb + SYS_ALLOC_PADDING) { + size_t esize = granularity_align(nb + SYS_ALLOC_PADDING - ssize); if (esize < HALF_MAX_SIZE_T) { - char* end = (char*)CALL_MORECORE(esize); + + char *end = (char *)CALL_MORECORE(esize); if (end != CMFAIL) ssize += esize; - else { /* Can't use; try to release */ - (void) CALL_MORECORE(-ssize); + else { /* Can't use; try to release */ + (void)CALL_MORECORE(-ssize); br = CMFAIL; + } + } + } + } - if (br != CMFAIL) { /* Use the space we did get */ + + if (br != CMFAIL) { /* Use the space we did get */ tbase = br; tsize = ssize; - } - else - disable_contiguous(m); /* Don't try contiguous path in the future */ + + } else + + disable_contiguous(m); /* Don't try contiguous path in the future */ + } RELEASE_MALLOC_GLOBAL_LOCK(); + } - if (HAVE_MMAP && tbase == CMFAIL) { /* Try MMAP */ - char* mp = (char*)(CALL_MMAP(asize)); + if (HAVE_MMAP && tbase == CMFAIL) { /* Try MMAP */ + char *mp = (char *)(CALL_MMAP(asize)); if (mp != CMFAIL) { + tbase = mp; tsize = asize; mmap_flag = USE_MMAP_BIT; + } + } - if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */ + if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */ if (asize < HALF_MAX_SIZE_T) { - char* br = CMFAIL; - char* end = CMFAIL; + + char *br = CMFAIL; + char *end = CMFAIL; ACQUIRE_MALLOC_GLOBAL_LOCK(); - br = (char*)(CALL_MORECORE(asize)); - end = (char*)(CALL_MORECORE(0)); + br = (char *)(CALL_MORECORE(asize)); + end = (char *)(CALL_MORECORE(0)); RELEASE_MALLOC_GLOBAL_LOCK(); if (br != CMFAIL && end != CMFAIL && br < end) { + size_t ssize = end - br; if (ssize > nb + TOP_FOOT_SIZE) { + tbase = br; tsize = ssize; + } + } + } + } if (tbase != CMFAIL) { @@ -4224,61 +4630,66 @@ static void* sys_alloc(mstate m, size_t nb) { if ((m->footprint += tsize) > m->max_footprint) m->max_footprint = m->footprint; - if (!is_initialized(m)) { /* first-time initialization */ - if (m->least_addr == 0 || tbase < m->least_addr) - m->least_addr = tbase; + if (!is_initialized(m)) { /* first-time initialization */ + if (m->least_addr == 0 || tbase < m->least_addr) m->least_addr = tbase; m->seg.base = tbase; m->seg.size = tsize; m->seg.sflags = mmap_flag; m->magic = mparams.magic; m->release_checks = MAX_RELEASE_CHECK_RATE; init_bins(m); -#if !ONLY_MSPACES + #if !ONLY_MSPACES if (is_global(m)) init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE); else -#endif + #endif { + /* Offset top by embedded malloc_state */ mchunkptr mn = next_chunk(mem2chunk(m)); - init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE); + init_top(m, mn, (size_t)((tbase + tsize) - (char *)mn) - TOP_FOOT_SIZE); + } + } else { + /* Try to merge with an existing segment */ msegmentptr sp = &m->seg; /* Only consider most recent segment if traversal suppressed */ while (sp != 0 && tbase != sp->base + sp->size) sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next; - if (sp != 0 && - !is_extern_segment(sp) && + if (sp != 0 && !is_extern_segment(sp) && (sp->sflags & USE_MMAP_BIT) == mmap_flag && - segment_holds(sp, m->top)) { /* append */ + segment_holds(sp, m->top)) { /* append */ sp->size += tsize; init_top(m, m->top, m->topsize + tsize); - } - else { - if (tbase < m->least_addr) - m->least_addr = tbase; + + } else { + + if (tbase < m->least_addr) m->least_addr = tbase; sp = &m->seg; while (sp != 0 && sp->base != tbase + tsize) sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next; - if (sp != 0 && - !is_extern_segment(sp) && + if (sp != 0 && !is_extern_segment(sp) && (sp->sflags & USE_MMAP_BIT) == mmap_flag) { - char* oldbase = sp->base; + + char *oldbase = sp->base; sp->base = tbase; sp->size += tsize; return prepend_alloc(m, tbase, oldbase, nb); - } - else + + } else + add_segment(m, tbase, tsize, mmap_flag); + } + } - if (nb < m->topsize) { /* Allocate from new or extended top space */ - size_t rsize = m->topsize -= nb; + if (nb < m->topsize) { /* Allocate from new or extended top space */ + size_t rsize = m->topsize -= nb; mchunkptr p = m->top; mchunkptr r = m->top = chunk_plus_offset(p, nb); r->head = rsize | PINUSE_BIT; @@ -4286,353 +4697,463 @@ static void* sys_alloc(mstate m, size_t nb) { check_top_chunk(m, m->top); check_malloced_chunk(m, chunk2mem(p), nb); return chunk2mem(p); + } + } MALLOC_FAILURE_ACTION; return 0; + } /* ----------------------- system deallocation -------------------------- */ /* Unmap and unlink any mmapped segments that don't contain used chunks */ static size_t release_unused_segments(mstate m) { - size_t released = 0; - int nsegs = 0; + + size_t released = 0; + int nsegs = 0; msegmentptr pred = &m->seg; msegmentptr sp = pred->next; while (sp != 0) { - char* base = sp->base; - size_t size = sp->size; + + char * base = sp->base; + size_t size = sp->size; msegmentptr next = sp->next; ++nsegs; if (is_mmapped_segment(sp) && !is_extern_segment(sp)) { + mchunkptr p = align_as_chunk(base); - size_t psize = chunksize(p); + size_t psize = chunksize(p); /* Can unmap if first chunk holds entire segment and not pinned */ - if (!is_inuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) { + if (!is_inuse(p) && (char *)p + psize >= base + size - TOP_FOOT_SIZE) { + tchunkptr tp = (tchunkptr)p; - assert(segment_holds(sp, (char*)sp)); + assert(segment_holds(sp, (char *)sp)); if (p == m->dv) { + m->dv = 0; m->dvsize = 0; - } - else { + + } else { + unlink_large_chunk(m, tp); + } + if (CALL_MUNMAP(base, size) == 0) { + released += size; m->footprint -= size; /* unlink obsoleted record */ sp = pred; sp->next = next; - } - else { /* back out if cannot unmap */ + + } else { /* back out if cannot unmap */ + insert_large_chunk(m, tp, psize); + } + } + } - if (NO_SEGMENT_TRAVERSAL) /* scan only first segment */ + + if (NO_SEGMENT_TRAVERSAL) /* scan only first segment */ break; pred = sp; sp = next; + } + /* Reset check counter */ - m->release_checks = (((size_t) nsegs > (size_t) MAX_RELEASE_CHECK_RATE)? - (size_t) nsegs : (size_t) MAX_RELEASE_CHECK_RATE); + m->release_checks = (((size_t)nsegs > (size_t)MAX_RELEASE_CHECK_RATE) + ? (size_t)nsegs + : (size_t)MAX_RELEASE_CHECK_RATE); return released; + } static int sys_trim(mstate m, size_t pad) { + size_t released = 0; ensure_initialization(); if (pad < MAX_REQUEST && is_initialized(m)) { - pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */ + + pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */ if (m->topsize > pad) { + /* Shrink top space in granularity-size units, keeping at least one */ size_t unit = mparams.granularity; - size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit - - SIZE_T_ONE) * unit; - msegmentptr sp = segment_holding(m, (char*)m->top); + size_t extra = + ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit - SIZE_T_ONE) * unit; + msegmentptr sp = segment_holding(m, (char *)m->top); if (!is_extern_segment(sp)) { + if (is_mmapped_segment(sp)) { - if (HAVE_MMAP && - sp->size >= extra && - !has_segment_link(m, sp)) { /* can't shrink if pinned */ + + if (HAVE_MMAP && sp->size >= extra && + !has_segment_link(m, sp)) { /* can't shrink if pinned */ size_t newsize = sp->size - extra; - (void)newsize; /* placate people compiling -Wunused-variable */ + (void)newsize; /* placate people compiling -Wunused-variable */ /* Prefer mremap, fall back to munmap */ if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) || (CALL_MUNMAP(sp->base + newsize, extra) == 0)) { + released = extra; + } + } - } - else if (HAVE_MORECORE) { - if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */ + + } else if (HAVE_MORECORE) { + + if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */ extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit; ACQUIRE_MALLOC_GLOBAL_LOCK(); { + /* Make sure end of memory is where we last set it. */ - char* old_br = (char*)(CALL_MORECORE(0)); + char *old_br = (char *)(CALL_MORECORE(0)); if (old_br == sp->base + sp->size) { - char* rel_br = (char*)(CALL_MORECORE(-extra)); - char* new_br = (char*)(CALL_MORECORE(0)); + + char *rel_br = (char *)(CALL_MORECORE(-extra)); + char *new_br = (char *)(CALL_MORECORE(0)); if (rel_br != CMFAIL && new_br < old_br) released = old_br - new_br; + } + } + RELEASE_MALLOC_GLOBAL_LOCK(); + } + } if (released != 0) { + sp->size -= released; m->footprint -= released; init_top(m, m->top, m->topsize - released); check_top_chunk(m, m->top); + } + } /* Unmap any unused mmapped segments */ - if (HAVE_MMAP) - released += release_unused_segments(m); + if (HAVE_MMAP) released += release_unused_segments(m); /* On failure, disable autotrim to avoid repeated failed future calls */ - if (released == 0 && m->topsize > m->trim_check) - m->trim_check = MAX_SIZE_T; + if (released == 0 && m->topsize > m->trim_check) m->trim_check = MAX_SIZE_T; + } - return (released != 0)? 1 : 0; + return (released != 0) ? 1 : 0; + } /* Consolidate and bin a chunk. Differs from exported versions of free mainly in that the chunk need not be marked as inuse. */ static void dispose_chunk(mstate m, mchunkptr p, size_t psize) { + mchunkptr next = chunk_plus_offset(p, psize); if (!pinuse(p)) { + mchunkptr prev; - size_t prevsize = p->prev_foot; + size_t prevsize = p->prev_foot; if (is_mmapped(p)) { + psize += prevsize + MMAP_FOOT_PAD; - if (CALL_MUNMAP((char*)p - prevsize, psize) == 0) - m->footprint -= psize; + if (CALL_MUNMAP((char *)p - prevsize, psize) == 0) m->footprint -= psize; return; + } + prev = chunk_minus_offset(p, prevsize); psize += prevsize; p = prev; - if (RTCHECK(ok_address(m, prev))) { /* consolidate backward */ + if (RTCHECK(ok_address(m, prev))) { /* consolidate backward */ if (p != m->dv) { + unlink_chunk(m, p, prevsize); - } - else if ((next->head & INUSE_BITS) == INUSE_BITS) { + + } else if ((next->head & INUSE_BITS) == INUSE_BITS) { + m->dvsize = psize; set_free_with_pinuse(p, psize, next); return; + } - } - else { + + } else { + CORRUPTION_ERROR_ACTION(m); return; + } + } + if (RTCHECK(ok_address(m, next))) { - if (!cinuse(next)) { /* consolidate forward */ + + if (!cinuse(next)) { /* consolidate forward */ if (next == m->top) { + size_t tsize = m->topsize += psize; m->top = p; p->head = tsize | PINUSE_BIT; if (p == m->dv) { + m->dv = 0; m->dvsize = 0; + } + return; - } - else if (next == m->dv) { + + } else if (next == m->dv) { + size_t dsize = m->dvsize += psize; m->dv = p; set_size_and_pinuse_of_free_chunk(p, dsize); return; - } - else { + + } else { + size_t nsize = chunksize(next); psize += nsize; unlink_chunk(m, next, nsize); set_size_and_pinuse_of_free_chunk(p, psize); if (p == m->dv) { + m->dvsize = psize; return; + } + } - } - else { + + } else { + set_free_with_pinuse(p, psize, next); + } + insert_chunk(m, p, psize); - } - else { + + } else { + CORRUPTION_ERROR_ACTION(m); + } + } /* ---------------------------- malloc --------------------------- */ /* allocate a large request from the best fitting chunk in a treebin */ -static void* tmalloc_large(mstate m, size_t nb) { +static void *tmalloc_large(mstate m, size_t nb) { + tchunkptr v = 0; - size_t rsize = -nb; /* Unsigned negation */ + size_t rsize = -nb; /* Unsigned negation */ tchunkptr t; - bindex_t idx; + bindex_t idx; compute_tree_index(nb, idx); if ((t = *treebin_at(m, idx)) != 0) { + /* Traverse tree for this bin looking for node with size == nb */ - size_t sizebits = nb << leftshift_for_tree_index(idx); - tchunkptr rst = 0; /* The deepest untaken right subtree */ + size_t sizebits = nb << leftshift_for_tree_index(idx); + tchunkptr rst = 0; /* The deepest untaken right subtree */ for (;;) { + tchunkptr rt; - size_t trem = chunksize(t) - nb; + size_t trem = chunksize(t) - nb; if (trem < rsize) { + v = t; - if ((rsize = trem) == 0) - break; + if ((rsize = trem) == 0) break; + } + rt = t->child[1]; - t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]; - if (rt != 0 && rt != t) - rst = rt; + t = t->child[(sizebits >> (SIZE_T_BITSIZE - SIZE_T_ONE)) & 1]; + if (rt != 0 && rt != t) rst = rt; if (t == 0) { - t = rst; /* set t to least subtree holding sizes > nb */ + + t = rst; /* set t to least subtree holding sizes > nb */ break; + } + sizebits <<= 1; + } + } - if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */ + + if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */ binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap; if (leftbits != 0) { + bindex_t i; binmap_t leastbit = least_bit(leftbits); compute_bit2idx(leastbit, i); t = *treebin_at(m, i); + } + } - while (t != 0) { /* find smallest of tree or subtree */ + while (t != 0) { /* find smallest of tree or subtree */ size_t trem = chunksize(t) - nb; if (trem < rsize) { + rsize = trem; v = t; + } + t = leftmost_child(t); + } /* If dv is a better fit, return 0 so malloc will use it */ if (v != 0 && rsize < (size_t)(m->dvsize - nb)) { - if (RTCHECK(ok_address(m, v))) { /* split */ + + if (RTCHECK(ok_address(m, v))) { /* split */ mchunkptr r = chunk_plus_offset(v, nb); assert(chunksize(v) == rsize + nb); if (RTCHECK(ok_next(v, r))) { + unlink_large_chunk(m, v); if (rsize < MIN_CHUNK_SIZE) set_inuse_and_pinuse(m, v, (rsize + nb)); else { + set_size_and_pinuse_of_inuse_chunk(m, v, nb); set_size_and_pinuse_of_free_chunk(r, rsize); insert_chunk(m, r, rsize); + } + return chunk2mem(v); + } + } + CORRUPTION_ERROR_ACTION(m); + } + return 0; + } /* allocate a small request from the best fitting chunk in a treebin */ -static void* tmalloc_small(mstate m, size_t nb) { +static void *tmalloc_small(mstate m, size_t nb) { + tchunkptr t, v; - size_t rsize; - bindex_t i; - binmap_t leastbit = least_bit(m->treemap); + size_t rsize; + bindex_t i; + binmap_t leastbit = least_bit(m->treemap); compute_bit2idx(leastbit, i); v = t = *treebin_at(m, i); rsize = chunksize(t) - nb; while ((t = leftmost_child(t)) != 0) { + size_t trem = chunksize(t) - nb; if (trem < rsize) { + rsize = trem; v = t; + } + } if (RTCHECK(ok_address(m, v))) { + mchunkptr r = chunk_plus_offset(v, nb); assert(chunksize(v) == rsize + nb); if (RTCHECK(ok_next(v, r))) { + unlink_large_chunk(m, v); if (rsize < MIN_CHUNK_SIZE) set_inuse_and_pinuse(m, v, (rsize + nb)); else { + set_size_and_pinuse_of_inuse_chunk(m, v, nb); set_size_and_pinuse_of_free_chunk(r, rsize); replace_dv(m, r, rsize); + } + return chunk2mem(v); + } + } CORRUPTION_ERROR_ACTION(m); return 0; -} -#if !ONLY_MSPACES +} -void* dlmalloc(size_t bytes) { - /* - Basic algorithm: - If a small request (< 256 bytes minus per-chunk overhead): - 1. If one exists, use a remainderless chunk in associated smallbin. - (Remainderless means that there are too few excess bytes to - represent as a chunk.) - 2. If it is big enough, use the dv chunk, which is normally the - chunk adjacent to the one used for the most recent small request. - 3. If one exists, split the smallest available chunk in a bin, - saving remainder in dv. - 4. If it is big enough, use the top chunk. - 5. If available, get memory from system and use it - Otherwise, for a large request: - 1. Find the smallest available binned chunk that fits, and use it - if it is better fitting than dv chunk, splitting if necessary. - 2. If better fitting than any binned chunk, use the dv chunk. - 3. If it is big enough, use the top chunk. - 4. If request size >= mmap threshold, try to directly mmap this chunk. - 5. If available, get memory from system and use it - - The ugly goto's here ensure that postaction occurs along all paths. - */ + #if !ONLY_MSPACES + +void *dlmalloc(size_t bytes) { + + /* + Basic algorithm: + If a small request (< 256 bytes minus per-chunk overhead): + 1. If one exists, use a remainderless chunk in associated smallbin. + (Remainderless means that there are too few excess bytes to + represent as a chunk.) + 2. If it is big enough, use the dv chunk, which is normally the + chunk adjacent to the one used for the most recent small request. + 3. If one exists, split the smallest available chunk in a bin, + saving remainder in dv. + 4. If it is big enough, use the top chunk. + 5. If available, get memory from system and use it + Otherwise, for a large request: + 1. Find the smallest available binned chunk that fits, and use it + if it is better fitting than dv chunk, splitting if necessary. + 2. If better fitting than any binned chunk, use the dv chunk. + 3. If it is big enough, use the top chunk. + 4. If request size >= mmap threshold, try to directly mmap this chunk. + 5. If available, get memory from system and use it + + The ugly goto's here ensure that postaction occurs along all paths. + */ -#if USE_LOCKS - ensure_initialization(); /* initialize in sys_alloc if not using locks */ -#endif + #if USE_LOCKS + ensure_initialization(); /* initialize in sys_alloc if not using locks */ + #endif if (!PREACTION(gm)) { - void* mem; + + void * mem; size_t nb; if (bytes <= MAX_SMALL_REQUEST) { + bindex_t idx; binmap_t smallbits; - nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes); + nb = (bytes < MIN_REQUEST) ? MIN_CHUNK_SIZE : pad_request(bytes); idx = small_index(nb); smallbits = gm->smallmap >> idx; - if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ + if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ mchunkptr b, p; - idx += ~smallbits & 1; /* Uses next bin if idx empty */ + idx += ~smallbits & 1; /* Uses next bin if idx empty */ b = smallbin_at(gm, idx); p = b->fd; assert(chunksize(p) == small_index2size(idx)); @@ -4641,15 +5162,17 @@ void* dlmalloc(size_t bytes) { mem = chunk2mem(p); check_malloced_chunk(gm, mem, nb); goto postaction; + } else if (nb > gm->dvsize) { - if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ + + if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ mchunkptr b, p, r; - size_t rsize; - bindex_t i; - binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx)); - binmap_t leastbit = least_bit(leftbits); + size_t rsize; + bindex_t i; + binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx)); + binmap_t leastbit = least_bit(leftbits); compute_bit2idx(leastbit, i); b = smallbin_at(gm, i); p = b->fd; @@ -4660,54 +5183,71 @@ void* dlmalloc(size_t bytes) { if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) set_inuse_and_pinuse(gm, p, small_index2size(i)); else { + set_size_and_pinuse_of_inuse_chunk(gm, p, nb); r = chunk_plus_offset(p, nb); set_size_and_pinuse_of_free_chunk(r, rsize); replace_dv(gm, r, rsize); + } + mem = chunk2mem(p); check_malloced_chunk(gm, mem, nb); goto postaction; + } else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) { + check_malloced_chunk(gm, mem, nb); goto postaction; + } + } - } - else if (bytes >= MAX_REQUEST) + + } else if (bytes >= MAX_REQUEST) + nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */ else { + nb = pad_request(bytes); if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) { + check_malloced_chunk(gm, mem, nb); goto postaction; + } + } if (nb <= gm->dvsize) { - size_t rsize = gm->dvsize - nb; + + size_t rsize = gm->dvsize - nb; mchunkptr p = gm->dv; - if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ + if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ mchunkptr r = gm->dv = chunk_plus_offset(p, nb); gm->dvsize = rsize; set_size_and_pinuse_of_free_chunk(r, rsize); set_size_and_pinuse_of_inuse_chunk(gm, p, nb); - } - else { /* exhaust dv */ + + } else { /* exhaust dv */ + size_t dvs = gm->dvsize; gm->dvsize = 0; gm->dv = 0; set_inuse_and_pinuse(gm, p, dvs); + } + mem = chunk2mem(p); check_malloced_chunk(gm, mem, nb); goto postaction; + } - else if (nb < gm->topsize) { /* Split top */ - size_t rsize = gm->topsize -= nb; + else if (nb < gm->topsize) { /* Split top */ + size_t rsize = gm->topsize -= nb; mchunkptr p = gm->top; mchunkptr r = gm->top = chunk_plus_offset(p, nb); r->head = rsize | PINUSE_BIT; @@ -4716,6 +5256,7 @@ void* dlmalloc(size_t bytes) { check_top_chunk(gm, gm->top); check_malloced_chunk(gm, mem, nb); goto postaction; + } mem = sys_alloc(gm, nb); @@ -4723,14 +5264,17 @@ void* dlmalloc(size_t bytes) { postaction: POSTACTION(gm); return mem; + } return 0; + } /* ---------------------------- free --------------------------- */ -void dlfree(void* mem) { +void dlfree(void *mem) { + /* Consolidate freed chunks with preceeding or succeeding bordering free chunks, if they exist, and then place in a bin. Intermixed @@ -4738,164 +5282,216 @@ void dlfree(void* mem) { */ if (mem != 0) { - mchunkptr p = mem2chunk(mem); -#if FOOTERS + + mchunkptr p = mem2chunk(mem); + #if FOOTERS mstate fm = get_mstate_for(p); if (!ok_magic(fm)) { + USAGE_ERROR_ACTION(fm, p); return; + } -#else /* FOOTERS */ -#define fm gm -#endif /* FOOTERS */ + + #else /* FOOTERS */ + #define fm gm + #endif /* FOOTERS */ if (!PREACTION(fm)) { + check_inuse_chunk(fm, p); if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) { - size_t psize = chunksize(p); + + size_t psize = chunksize(p); mchunkptr next = chunk_plus_offset(p, psize); if (!pinuse(p)) { + size_t prevsize = p->prev_foot; if (is_mmapped(p)) { + psize += prevsize + MMAP_FOOT_PAD; - if (CALL_MUNMAP((char*)p - prevsize, psize) == 0) + if (CALL_MUNMAP((char *)p - prevsize, psize) == 0) fm->footprint -= psize; goto postaction; - } - else { + + } else { + mchunkptr prev = chunk_minus_offset(p, prevsize); psize += prevsize; p = prev; - if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */ + if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */ if (p != fm->dv) { + unlink_chunk(fm, p, prevsize); - } - else if ((next->head & INUSE_BITS) == INUSE_BITS) { + + } else if ((next->head & INUSE_BITS) == INUSE_BITS) { + fm->dvsize = psize; set_free_with_pinuse(p, psize, next); goto postaction; + } - } - else + + } else + goto erroraction; + } + } if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) { - if (!cinuse(next)) { /* consolidate forward */ + + if (!cinuse(next)) { /* consolidate forward */ if (next == fm->top) { + size_t tsize = fm->topsize += psize; fm->top = p; p->head = tsize | PINUSE_BIT; if (p == fm->dv) { + fm->dv = 0; fm->dvsize = 0; + } - if (should_trim(fm, tsize)) - sys_trim(fm, 0); + + if (should_trim(fm, tsize)) sys_trim(fm, 0); goto postaction; - } - else if (next == fm->dv) { + + } else if (next == fm->dv) { + size_t dsize = fm->dvsize += psize; fm->dv = p; set_size_and_pinuse_of_free_chunk(p, dsize); goto postaction; - } - else { + + } else { + size_t nsize = chunksize(next); psize += nsize; unlink_chunk(fm, next, nsize); set_size_and_pinuse_of_free_chunk(p, psize); if (p == fm->dv) { + fm->dvsize = psize; goto postaction; + } + } - } - else + + } else + set_free_with_pinuse(p, psize, next); if (is_small(psize)) { + insert_small_chunk(fm, p, psize); check_free_chunk(fm, p); - } - else { + + } else { + tchunkptr tp = (tchunkptr)p; insert_large_chunk(fm, tp, psize); check_free_chunk(fm, p); - if (--fm->release_checks == 0) - release_unused_segments(fm); + if (--fm->release_checks == 0) release_unused_segments(fm); + } + goto postaction; + } + } + erroraction: USAGE_ERROR_ACTION(fm, p); postaction: POSTACTION(fm); + } + } -#if !FOOTERS -#undef fm -#endif /* FOOTERS */ + + #if !FOOTERS + #undef fm + #endif /* FOOTERS */ + } -void* dlcalloc(size_t n_elements, size_t elem_size) { - void* mem; +void *dlcalloc(size_t n_elements, size_t elem_size) { + + void * mem; size_t req = 0; if (n_elements != 0) { + req = n_elements * elem_size; if (((n_elements | elem_size) & ~(size_t)0xffff) && (req / n_elements != elem_size)) - req = MAX_SIZE_T; /* force downstream failure on overflow */ + req = MAX_SIZE_T; /* force downstream failure on overflow */ + } + mem = dlmalloc(req); if (mem != 0 && calloc_must_clear(mem2chunk(mem))) __builtin_memset(mem, 0, req); return mem; + } -#endif /* !ONLY_MSPACES */ + #endif /* !ONLY_MSPACES */ /* ------------ Internal support for realloc, memalign, etc -------------- */ /* Try to realloc; only in-place unless can_move true */ static mchunkptr try_realloc_chunk(mstate m, mchunkptr p, size_t nb, int can_move) { + mchunkptr newp = 0; - size_t oldsize = chunksize(p); + size_t oldsize = chunksize(p); mchunkptr next = chunk_plus_offset(p, oldsize); - if (RTCHECK(ok_address(m, p) && ok_inuse(p) && - ok_next(p, next) && ok_pinuse(next))) { + if (RTCHECK(ok_address(m, p) && ok_inuse(p) && ok_next(p, next) && + ok_pinuse(next))) { + if (is_mmapped(p)) { + newp = mmap_resize(m, p, nb, can_move); - } - else if (oldsize >= nb) { /* already big enough */ + + } else if (oldsize >= nb) { /* already big enough */ + size_t rsize = oldsize - nb; - if (rsize >= MIN_CHUNK_SIZE) { /* split off remainder */ + if (rsize >= MIN_CHUNK_SIZE) { /* split off remainder */ mchunkptr r = chunk_plus_offset(p, nb); set_inuse(m, p, nb); set_inuse(m, r, rsize); dispose_chunk(m, r, rsize); + } + newp = p; - } - else if (next == m->top) { /* extend into top */ + + } else if (next == m->top) { /* extend into top */ + if (oldsize + m->topsize > nb) { - size_t newsize = oldsize + m->topsize; - size_t newtopsize = newsize - nb; + + size_t newsize = oldsize + m->topsize; + size_t newtopsize = newsize - nb; mchunkptr newtop = chunk_plus_offset(p, nb); set_inuse(m, p, nb); - newtop->head = newtopsize |PINUSE_BIT; + newtop->head = newtopsize | PINUSE_BIT; m->top = newtop; m->topsize = newtopsize; newp = p; + } - } - else if (next == m->dv) { /* extend into dv */ + + } else if (next == m->dv) { /* extend into dv */ + size_t dvs = m->dvsize; if (oldsize + dvs >= nb) { + size_t dsize = oldsize + dvs - nb; if (dsize >= MIN_CHUNK_SIZE) { + mchunkptr r = chunk_plus_offset(p, nb); mchunkptr n = chunk_plus_offset(r, dsize); set_inuse(m, p, nb); @@ -4903,64 +5499,87 @@ static mchunkptr try_realloc_chunk(mstate m, mchunkptr p, size_t nb, clear_pinuse(n); m->dvsize = dsize; m->dv = r; - } - else { /* exhaust dv */ + + } else { /* exhaust dv */ + size_t newsize = oldsize + dvs; set_inuse(m, p, newsize); m->dvsize = 0; m->dv = 0; + } + newp = p; + } - } - else if (!cinuse(next)) { /* extend into next free chunk */ + + } else if (!cinuse(next)) { /* extend into next free chunk */ + size_t nextsize = chunksize(next); if (oldsize + nextsize >= nb) { + size_t rsize = oldsize + nextsize - nb; unlink_chunk(m, next, nextsize); if (rsize < MIN_CHUNK_SIZE) { + size_t newsize = oldsize + nextsize; set_inuse(m, p, newsize); - } - else { + + } else { + mchunkptr r = chunk_plus_offset(p, nb); set_inuse(m, p, nb); set_inuse(m, r, rsize); dispose_chunk(m, r, rsize); + } + newp = p; + } + } - } - else { + + } else { + USAGE_ERROR_ACTION(m, chunk2mem(p)); + } + return newp; + } -static void* internal_memalign(mstate m, size_t alignment, size_t bytes) { - void* mem = 0; - if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */ +static void *internal_memalign(mstate m, size_t alignment, size_t bytes) { + + void *mem = 0; + if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */ alignment = MIN_CHUNK_SIZE; - if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of 2 */ + if ((alignment & (alignment - SIZE_T_ONE)) != 0) { /* Ensure a power of 2 */ size_t a = MALLOC_ALIGNMENT << 1; - while (a < alignment) a <<= 1; + while (a < alignment) + a <<= 1; alignment = a; + } + if (bytes >= MAX_REQUEST - alignment) { - if (m != 0) { /* Test isn't needed but avoids compiler warning */ + + if (m != 0) { /* Test isn't needed but avoids compiler warning */ MALLOC_FAILURE_ACTION; + } - } - else { + + } else { + size_t nb = request2size(bytes); size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD; mem = internal_malloc(m, req); if (mem != 0) { + mchunkptr p = mem2chunk(mem); - if (PREACTION(m)) - return 0; - if ((((size_t)(mem)) & (alignment - 1)) != 0) { /* misaligned */ + if (PREACTION(m)) return 0; + if ((((size_t)(mem)) & (alignment - 1)) != 0) { /* misaligned */ /* Find an aligned spot inside chunk. Since we need to give back leading space in a chunk of at least MIN_CHUNK_SIZE, if @@ -4969,47 +5588,59 @@ static void* internal_memalign(mstate m, size_t alignment, size_t bytes) { We've allocated enough total room so that this is always possible. */ - char* br = (char*)mem2chunk((size_t)(((size_t)((char*)mem + alignment - - SIZE_T_ONE)) & - -alignment)); - char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)? - br : br+alignment; + char * br = (char *)mem2chunk((size_t)( + ((size_t)((char *)mem + alignment - SIZE_T_ONE)) & -alignment)); + char * pos = ((size_t)(br - (char *)(p)) >= MIN_CHUNK_SIZE) + ? br + : br + alignment; mchunkptr newp = (mchunkptr)pos; - size_t leadsize = pos - (char*)(p); - size_t newsize = chunksize(p) - leadsize; + size_t leadsize = pos - (char *)(p); + size_t newsize = chunksize(p) - leadsize; - if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */ + if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */ newp->prev_foot = p->prev_foot + leadsize; newp->head = newsize; - } - else { /* Otherwise, give back leader, use the rest */ + + } else { /* Otherwise, give back leader, use the rest */ + set_inuse(m, newp, newsize); set_inuse(m, p, leadsize); dispose_chunk(m, p, leadsize); + } + p = newp; + } /* Give back spare room at the end */ if (!is_mmapped(p)) { + size_t size = chunksize(p); if (size > nb + MIN_CHUNK_SIZE) { - size_t remainder_size = size - nb; + + size_t remainder_size = size - nb; mchunkptr remainder = chunk_plus_offset(p, nb); set_inuse(m, p, nb); set_inuse(m, remainder, remainder_size); dispose_chunk(m, remainder, remainder_size); + } + } mem = chunk2mem(p); - assert (chunksize(p) >= nb); + assert(chunksize(p) >= nb); assert(((size_t)mem & (alignment - 1)) == 0); check_inuse_chunk(m, p); POSTACTION(m); + } + } + return mem; + } /* @@ -5019,50 +5650,50 @@ static void* internal_memalign(mstate m, size_t alignment, size_t bytes) { bit 0 set if all elements are same size (using sizes[0]) bit 1 set if elements should be zeroed */ -static void** ialloc(mstate m, - size_t n_elements, - size_t* sizes, - int opts, - void* chunks[]) { - - size_t element_size; /* chunksize of each element, if all same */ - size_t contents_size; /* total size of elements */ - size_t array_size; /* request size of pointer array */ - void* mem; /* malloced aggregate space */ - mchunkptr p; /* corresponding chunk */ - size_t remainder_size; /* remaining bytes while splitting */ - void** marray; /* either "chunks" or malloced ptr array */ - mchunkptr array_chunk; /* chunk for malloced ptr array */ - flag_t was_enabled; /* to disable mmap */ +static void **ialloc(mstate m, size_t n_elements, size_t *sizes, int opts, + void *chunks[]) { + + size_t element_size; /* chunksize of each element, if all same */ + size_t contents_size; /* total size of elements */ + size_t array_size; /* request size of pointer array */ + void * mem; /* malloced aggregate space */ + mchunkptr p; /* corresponding chunk */ + size_t remainder_size; /* remaining bytes while splitting */ + void ** marray; /* either "chunks" or malloced ptr array */ + mchunkptr array_chunk; /* chunk for malloced ptr array */ + flag_t was_enabled; /* to disable mmap */ size_t size; size_t i; ensure_initialization(); /* compute array length, if needed */ if (chunks != 0) { - if (n_elements == 0) - return chunks; /* nothing to do */ + + if (n_elements == 0) return chunks; /* nothing to do */ marray = chunks; array_size = 0; - } - else { + + } else { + /* if empty req, must still return chunk representing empty array */ - if (n_elements == 0) - return (void**)internal_malloc(m, 0); + if (n_elements == 0) return (void **)internal_malloc(m, 0); marray = 0; - array_size = request2size(n_elements * (sizeof(void*))); + array_size = request2size(n_elements * (sizeof(void *))); + } /* compute total element size */ - if (opts & 0x1) { /* all-same-size */ + if (opts & 0x1) { /* all-same-size */ element_size = request2size(*sizes); contents_size = n_elements * element_size; - } - else { /* add up all the sizes */ + + } else { /* add up all the sizes */ + element_size = 0; contents_size = 0; for (i = 0; i != n_elements; ++i) contents_size += request2size(sizes[i]); + } size = contents_size + array_size; @@ -5075,10 +5706,8 @@ static void** ialloc(mstate m, was_enabled = use_mmap(m); disable_mmap(m); mem = internal_malloc(m, size - CHUNK_OVERHEAD); - if (was_enabled) - enable_mmap(m); - if (mem == 0) - return 0; + if (was_enabled) enable_mmap(m); + if (mem == 0) return 0; if (PREACTION(m)) return 0; p = mem2chunk(mem); @@ -5086,24 +5715,30 @@ static void** ialloc(mstate m, assert(!is_mmapped(p)); - if (opts & 0x2) { /* optionally clear the elements */ - __builtin_memset((size_t*)mem, 0, remainder_size - SIZE_T_SIZE - array_size); + if (opts & 0x2) { /* optionally clear the elements */ + __builtin_memset((size_t *)mem, 0, + remainder_size - SIZE_T_SIZE - array_size); + } /* If not provided, allocate the pointer array as final part of chunk */ if (marray == 0) { - size_t array_chunk_size; + + size_t array_chunk_size; array_chunk = chunk_plus_offset(p, contents_size); array_chunk_size = remainder_size - contents_size; - marray = (void**) (chunk2mem(array_chunk)); + marray = (void **)(chunk2mem(array_chunk)); set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size); remainder_size = contents_size; + } /* split out elements */ - for (i = 0; ; ++i) { + for (i = 0;; ++i) { + marray[i] = chunk2mem(p); - if (i != n_elements-1) { + if (i != n_elements - 1) { + if (element_size != 0) size = element_size; else @@ -5111,31 +5746,42 @@ static void** ialloc(mstate m, remainder_size -= size; set_size_and_pinuse_of_inuse_chunk(m, p, size); p = chunk_plus_offset(p, size); - } - else { /* the final element absorbs any overallocation slop */ + + } else { /* the final element absorbs any overallocation slop */ + set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size); break; + } + } -#if DEBUG + #if DEBUG if (marray != chunks) { + /* final element must have exactly exhausted chunk */ if (element_size != 0) { + assert(remainder_size == element_size); - } - else { + + } else { + assert(remainder_size == request2size(sizes[i])); + } + check_inuse_chunk(m, mem2chunk(marray)); + } + for (i = 0; i != n_elements; ++i) check_inuse_chunk(m, mem2chunk(marray[i])); -#endif /* DEBUG */ + #endif /* DEBUG */ POSTACTION(m); return marray; + } /* Try to free all pointers in the given array. @@ -5145,316 +5791,431 @@ static void** ialloc(mstate m, chunks before freeing, which will occur often if allocated with ialloc or the array is sorted. */ -static size_t internal_bulk_free(mstate m, void* array[], size_t nelem) { +static size_t internal_bulk_free(mstate m, void *array[], size_t nelem) { + size_t unfreed = 0; if (!PREACTION(m)) { - void** a; - void** fence = &(array[nelem]); + + void **a; + void **fence = &(array[nelem]); for (a = array; a != fence; ++a) { - void* mem = *a; + + void *mem = *a; if (mem != 0) { + mchunkptr p = mem2chunk(mem); - size_t psize = chunksize(p); -#if FOOTERS + size_t psize = chunksize(p); + #if FOOTERS if (get_mstate_for(p) != m) { + ++unfreed; continue; + } -#endif + + #endif check_inuse_chunk(m, p); *a = 0; if (RTCHECK(ok_address(m, p) && ok_inuse(p))) { - void ** b = a + 1; /* try to merge with next chunk */ + + void ** b = a + 1; /* try to merge with next chunk */ mchunkptr next = next_chunk(p); if (b != fence && *b == chunk2mem(next)) { + size_t newsize = chunksize(next) + psize; set_inuse(m, p, newsize); *b = chunk2mem(p); - } - else + + } else + dispose_chunk(m, p, psize); - } - else { + + } else { + CORRUPTION_ERROR_ACTION(m); break; + } + } + } - if (should_trim(m, m->topsize)) - sys_trim(m, 0); + + if (should_trim(m, m->topsize)) sys_trim(m, 0); POSTACTION(m); + } + return unfreed; + } -/* Traversal */ -#if MALLOC_INSPECT_ALL + /* Traversal */ + #if MALLOC_INSPECT_ALL static void internal_inspect_all(mstate m, - void(*handler)(void *start, - void *end, - size_t used_bytes, - void* callback_arg), - void* arg) { + void (*handler)(void *start, void *end, + size_t used_bytes, + void * callback_arg), + void *arg) { + if (is_initialized(m)) { - mchunkptr top = m->top; + + mchunkptr top = m->top; msegmentptr s; for (s = &m->seg; s != 0; s = s->next) { + mchunkptr q = align_as_chunk(s->base); while (segment_holds(s, q) && q->head != FENCEPOST_HEAD) { + mchunkptr next = next_chunk(q); - size_t sz = chunksize(q); - size_t used; - void* start; + size_t sz = chunksize(q); + size_t used; + void * start; if (is_inuse(q)) { - used = sz - CHUNK_OVERHEAD; /* must not be mmapped */ + + used = sz - CHUNK_OVERHEAD; /* must not be mmapped */ start = chunk2mem(q); - } - else { + + } else { + used = 0; - if (is_small(sz)) { /* offset by possible bookkeeping */ - start = (void*)((char*)q + sizeof(struct malloc_chunk)); - } - else { - start = (void*)((char*)q + sizeof(struct malloc_tree_chunk)); + if (is_small(sz)) { /* offset by possible bookkeeping */ + start = (void *)((char *)q + sizeof(struct malloc_chunk)); + + } else { + + start = (void *)((char *)q + sizeof(struct malloc_tree_chunk)); + } + } - if (start < (void*)next) /* skip if all space is bookkeeping */ + + if (start < (void *)next) /* skip if all space is bookkeeping */ handler(start, next, used, arg); - if (q == top) - break; + if (q == top) break; q = next; + } + } + } + } -#endif /* MALLOC_INSPECT_ALL */ + + #endif /* MALLOC_INSPECT_ALL */ /* ------------------ Exported realloc, memalign, etc -------------------- */ -#if !ONLY_MSPACES + #if !ONLY_MSPACES -void* dlrealloc(void* oldmem, size_t bytes) { - void* mem = 0; +void *dlrealloc(void *oldmem, size_t bytes) { + + void *mem = 0; if (oldmem == 0) { + mem = dlmalloc(bytes); - } - else if (bytes >= MAX_REQUEST) { + + } else if (bytes >= MAX_REQUEST) { + MALLOC_FAILURE_ACTION; + } -#ifdef REALLOC_ZERO_BYTES_FREES + + #ifdef REALLOC_ZERO_BYTES_FREES else if (bytes == 0) { + dlfree(oldmem); + } -#endif /* REALLOC_ZERO_BYTES_FREES */ + + #endif /* REALLOC_ZERO_BYTES_FREES */ else { - size_t nb = request2size(bytes); + + size_t nb = request2size(bytes); mchunkptr oldp = mem2chunk(oldmem); -#if ! FOOTERS + #if !FOOTERS mstate m = gm; -#else /* FOOTERS */ + #else /* FOOTERS */ mstate m = get_mstate_for(oldp); if (!ok_magic(m)) { + USAGE_ERROR_ACTION(m, oldmem); return 0; + } -#endif /* FOOTERS */ + + #endif /* FOOTERS */ if (!PREACTION(m)) { + mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1); POSTACTION(m); if (newp != 0) { + check_inuse_chunk(m, newp); mem = chunk2mem(newp); - } - else { + + } else { + mem = internal_malloc(m, bytes); if (mem != 0) { + size_t oc = chunksize(oldp) - overhead_for(oldp); - __builtin_memcpy(mem, oldmem, (oc < bytes)? oc : bytes); + __builtin_memcpy(mem, oldmem, (oc < bytes) ? oc : bytes); internal_free(m, oldmem); + } + } + } + } + return mem; + } -void* dlrealloc_in_place(void* oldmem, size_t bytes) { - void* mem = 0; +void *dlrealloc_in_place(void *oldmem, size_t bytes) { + + void *mem = 0; if (oldmem != 0) { + if (bytes >= MAX_REQUEST) { + MALLOC_FAILURE_ACTION; - } - else { - size_t nb = request2size(bytes); + + } else { + + size_t nb = request2size(bytes); mchunkptr oldp = mem2chunk(oldmem); -#if ! FOOTERS + #if !FOOTERS mstate m = gm; -#else /* FOOTERS */ + #else /* FOOTERS */ mstate m = get_mstate_for(oldp); if (!ok_magic(m)) { + USAGE_ERROR_ACTION(m, oldmem); return 0; + } -#endif /* FOOTERS */ + + #endif /* FOOTERS */ if (!PREACTION(m)) { + mchunkptr newp = try_realloc_chunk(m, oldp, nb, 0); POSTACTION(m); if (newp == oldp) { + check_inuse_chunk(m, newp); mem = oldmem; + } + } + } + } + return mem; + } -void* dlmemalign(size_t alignment, size_t bytes) { - if (alignment <= MALLOC_ALIGNMENT) { - return dlmalloc(bytes); - } +void *dlmemalign(size_t alignment, size_t bytes) { + + if (alignment <= MALLOC_ALIGNMENT) { return dlmalloc(bytes); } return internal_memalign(gm, alignment, bytes); + } -int dlposix_memalign(void** pp, size_t alignment, size_t bytes) { - void* mem = 0; +int dlposix_memalign(void **pp, size_t alignment, size_t bytes) { + + void *mem = 0; if (alignment == MALLOC_ALIGNMENT) mem = dlmalloc(bytes); else { - size_t d = alignment / sizeof(void*); - size_t r = alignment % sizeof(void*); - if (r != 0 || d == 0 || (d & (d-SIZE_T_ONE)) != 0) + + size_t d = alignment / sizeof(void *); + size_t r = alignment % sizeof(void *); + if (r != 0 || d == 0 || (d & (d - SIZE_T_ONE)) != 0) return EINVAL; else if (bytes <= MAX_REQUEST - alignment) { - if (alignment < MIN_CHUNK_SIZE) - alignment = MIN_CHUNK_SIZE; + + if (alignment < MIN_CHUNK_SIZE) alignment = MIN_CHUNK_SIZE; mem = internal_memalign(gm, alignment, bytes); + } + } + if (mem == 0) return ENOMEM; else { + *pp = mem; return 0; + } + } -void* dlvalloc(size_t bytes) { +void *dlvalloc(size_t bytes) { + size_t pagesz; ensure_initialization(); pagesz = mparams.page_size; return dlmemalign(pagesz, bytes); + } -void* dlpvalloc(size_t bytes) { +void *dlpvalloc(size_t bytes) { + size_t pagesz; ensure_initialization(); pagesz = mparams.page_size; - return dlmemalign(pagesz, (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE)); + return dlmemalign(pagesz, + (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE)); + } -void** dlindependent_calloc(size_t n_elements, size_t elem_size, - void* chunks[]) { - size_t sz = elem_size; /* serves as 1-element array */ +void **dlindependent_calloc(size_t n_elements, size_t elem_size, + void *chunks[]) { + + size_t sz = elem_size; /* serves as 1-element array */ return ialloc(gm, n_elements, &sz, 3, chunks); + } -void** dlindependent_comalloc(size_t n_elements, size_t sizes[], - void* chunks[]) { +void **dlindependent_comalloc(size_t n_elements, size_t sizes[], + void *chunks[]) { + return ialloc(gm, n_elements, sizes, 0, chunks); + } -size_t dlbulk_free(void* array[], size_t nelem) { +size_t dlbulk_free(void *array[], size_t nelem) { + return internal_bulk_free(gm, array, nelem); + } -#if MALLOC_INSPECT_ALL -void dlmalloc_inspect_all(void(*handler)(void *start, - void *end, - size_t used_bytes, - void* callback_arg), - void* arg) { + #if MALLOC_INSPECT_ALL +void dlmalloc_inspect_all(void (*handler)(void *start, void *end, + size_t used_bytes, + void * callback_arg), + void *arg) { + ensure_initialization(); if (!PREACTION(gm)) { + internal_inspect_all(gm, handler, arg); POSTACTION(gm); + } + } -#endif /* MALLOC_INSPECT_ALL */ + + #endif /* MALLOC_INSPECT_ALL */ int dlmalloc_trim(size_t pad) { + int result = 0; ensure_initialization(); if (!PREACTION(gm)) { + result = sys_trim(gm, pad); POSTACTION(gm); + } + return result; + } size_t dlmalloc_footprint(void) { + return gm->footprint; + } size_t dlmalloc_max_footprint(void) { + return gm->max_footprint; + } size_t dlmalloc_footprint_limit(void) { + size_t maf = gm->footprint_limit; return maf == 0 ? MAX_SIZE_T : maf; + } size_t dlmalloc_set_footprint_limit(size_t bytes) { - size_t result; /* invert sense of 0 */ - if (bytes == 0) - result = granularity_align(1); /* Use minimal size */ + + size_t result; /* invert sense of 0 */ + if (bytes == 0) result = granularity_align(1); /* Use minimal size */ if (bytes == MAX_SIZE_T) - result = 0; /* disable */ + result = 0; /* disable */ else result = granularity_align(bytes); return gm->footprint_limit = result; + } -#if !NO_MALLINFO + #if !NO_MALLINFO struct mallinfo dlmallinfo(void) { + return internal_mallinfo(gm); + } -#endif /* NO_MALLINFO */ -#if !NO_MALLOC_STATS + #endif /* NO_MALLINFO */ + + #if !NO_MALLOC_STATS void dlmalloc_stats() { + internal_malloc_stats(gm); + } -#endif /* NO_MALLOC_STATS */ + + #endif /* NO_MALLOC_STATS */ int dlmallopt(int param_number, int value) { + return change_mparam(param_number, value); + } -size_t dlmalloc_usable_size(void* mem) { +size_t dlmalloc_usable_size(void *mem) { + if (mem != 0) { + mchunkptr p = mem2chunk(mem); - if (is_inuse(p)) - return chunksize(p) - overhead_for(p); + if (is_inuse(p)) return chunksize(p) - overhead_for(p); + } + return 0; + } -#endif /* !ONLY_MSPACES */ + #endif /* !ONLY_MSPACES */ /* ----------------------------- user mspaces ---------------------------- */ -#if MSPACES + #if MSPACES + +static mstate init_user_mstate(char *tbase, size_t tsize) { -static mstate init_user_mstate(char* tbase, size_t tsize) { - size_t msize = pad_request(sizeof(struct malloc_state)); + size_t msize = pad_request(sizeof(struct malloc_state)); mchunkptr mn; mchunkptr msp = align_as_chunk(tbase); - mstate m = (mstate)(chunk2mem(msp)); + mstate m = (mstate)(chunk2mem(msp)); __builtin_memset(m, 0, msize); (void)INITIAL_LOCK(&m->mutex); - msp->head = (msize|INUSE_BITS); + msp->head = (msize | INUSE_BITS); m->seg.base = m->least_addr = tbase; m->seg.size = m->footprint = m->max_footprint = tsize; m->magic = mparams.magic; @@ -5465,82 +6226,111 @@ static mstate init_user_mstate(char* tbase, size_t tsize) { disable_contiguous(m); init_bins(m); mn = next_chunk(mem2chunk(m)); - init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) - TOP_FOOT_SIZE); + init_top(m, mn, (size_t)((tbase + tsize) - (char *)mn) - TOP_FOOT_SIZE); check_top_chunk(m, m->top); return m; + } mspace create_mspace(size_t capacity, int locked) { + mstate m = 0; size_t msize; ensure_initialization(); msize = pad_request(sizeof(struct malloc_state)); - if (capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) { - size_t rs = ((capacity == 0)? mparams.granularity : - (capacity + TOP_FOOT_SIZE + msize)); + if (capacity < (size_t) - (msize + TOP_FOOT_SIZE + mparams.page_size)) { + + size_t rs = ((capacity == 0) ? mparams.granularity + : (capacity + TOP_FOOT_SIZE + msize)); size_t tsize = granularity_align(rs); - char* tbase = (char*)(CALL_MMAP(tsize)); + char * tbase = (char *)(CALL_MMAP(tsize)); if (tbase != CMFAIL) { + m = init_user_mstate(tbase, tsize); m->seg.sflags = USE_MMAP_BIT; set_lock(m, locked); + } + } + return (mspace)m; + } -mspace create_mspace_with_base(void* base, size_t capacity, int locked) { +mspace create_mspace_with_base(void *base, size_t capacity, int locked) { + mstate m = 0; size_t msize; ensure_initialization(); msize = pad_request(sizeof(struct malloc_state)); if (capacity > msize + TOP_FOOT_SIZE && - capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) { - m = init_user_mstate((char*)base, capacity); + capacity < (size_t) - (msize + TOP_FOOT_SIZE + mparams.page_size)) { + + m = init_user_mstate((char *)base, capacity); m->seg.sflags = EXTERN_BIT; set_lock(m, locked); + } + return (mspace)m; + } int mspace_track_large_chunks(mspace msp, int enable) { - int ret = 0; + + int ret = 0; mstate ms = (mstate)msp; if (!PREACTION(ms)) { - if (!use_mmap(ms)) { - ret = 1; - } + + if (!use_mmap(ms)) { ret = 1; } if (!enable) { + enable_mmap(ms); + } else { + disable_mmap(ms); + } + POSTACTION(ms); + } + return ret; + } size_t destroy_mspace(mspace msp) { + size_t freed = 0; mstate ms = (mstate)msp; if (ok_magic(ms)) { + msegmentptr sp = &ms->seg; - (void)DESTROY_LOCK(&ms->mutex); /* destroy before unmapped */ + (void)DESTROY_LOCK(&ms->mutex); /* destroy before unmapped */ while (sp != 0) { - char* base = sp->base; + + char * base = sp->base; size_t size = sp->size; flag_t flag = sp->sflags; - (void)base; /* placate people compiling -Wunused-variable */ + (void)base; /* placate people compiling -Wunused-variable */ sp = sp->next; if ((flag & USE_MMAP_BIT) && !(flag & EXTERN_BIT) && CALL_MUNMAP(base, size) == 0) freed += size; + } + + } else { + + USAGE_ERROR_ACTION(ms, ms); + } - else { - USAGE_ERROR_ACTION(ms,ms); - } + return freed; + } /* @@ -5548,25 +6338,31 @@ size_t destroy_mspace(mspace msp) { versions. This is not so nice but better than the alternatives. */ -void* mspace_malloc(mspace msp, size_t bytes) { +void *mspace_malloc(mspace msp, size_t bytes) { + mstate ms = (mstate)msp; if (!ok_magic(ms)) { - USAGE_ERROR_ACTION(ms,ms); + + USAGE_ERROR_ACTION(ms, ms); return 0; + } + if (!PREACTION(ms)) { - void* mem; + + void * mem; size_t nb; if (bytes <= MAX_SMALL_REQUEST) { + bindex_t idx; binmap_t smallbits; - nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes); + nb = (bytes < MIN_REQUEST) ? MIN_CHUNK_SIZE : pad_request(bytes); idx = small_index(nb); smallbits = ms->smallmap >> idx; - if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ + if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ mchunkptr b, p; - idx += ~smallbits & 1; /* Uses next bin if idx empty */ + idx += ~smallbits & 1; /* Uses next bin if idx empty */ b = smallbin_at(ms, idx); p = b->fd; assert(chunksize(p) == small_index2size(idx)); @@ -5575,15 +6371,17 @@ void* mspace_malloc(mspace msp, size_t bytes) { mem = chunk2mem(p); check_malloced_chunk(ms, mem, nb); goto postaction; + } else if (nb > ms->dvsize) { - if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ + + if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ mchunkptr b, p, r; - size_t rsize; - bindex_t i; - binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx)); - binmap_t leastbit = least_bit(leftbits); + size_t rsize; + bindex_t i; + binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx)); + binmap_t leastbit = least_bit(leftbits); compute_bit2idx(leastbit, i); b = smallbin_at(ms, i); p = b->fd; @@ -5594,54 +6392,71 @@ void* mspace_malloc(mspace msp, size_t bytes) { if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) set_inuse_and_pinuse(ms, p, small_index2size(i)); else { + set_size_and_pinuse_of_inuse_chunk(ms, p, nb); r = chunk_plus_offset(p, nb); set_size_and_pinuse_of_free_chunk(r, rsize); replace_dv(ms, r, rsize); + } + mem = chunk2mem(p); check_malloced_chunk(ms, mem, nb); goto postaction; + } else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) { + check_malloced_chunk(ms, mem, nb); goto postaction; + } + } - } - else if (bytes >= MAX_REQUEST) + + } else if (bytes >= MAX_REQUEST) + nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */ else { + nb = pad_request(bytes); if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) { + check_malloced_chunk(ms, mem, nb); goto postaction; + } + } if (nb <= ms->dvsize) { - size_t rsize = ms->dvsize - nb; + + size_t rsize = ms->dvsize - nb; mchunkptr p = ms->dv; - if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ + if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ mchunkptr r = ms->dv = chunk_plus_offset(p, nb); ms->dvsize = rsize; set_size_and_pinuse_of_free_chunk(r, rsize); set_size_and_pinuse_of_inuse_chunk(ms, p, nb); - } - else { /* exhaust dv */ + + } else { /* exhaust dv */ + size_t dvs = ms->dvsize; ms->dvsize = 0; ms->dv = 0; set_inuse_and_pinuse(ms, p, dvs); + } + mem = chunk2mem(p); check_malloced_chunk(ms, mem, nb); goto postaction; + } - else if (nb < ms->topsize) { /* Split top */ - size_t rsize = ms->topsize -= nb; + else if (nb < ms->topsize) { /* Split top */ + size_t rsize = ms->topsize -= nb; mchunkptr p = ms->top; mchunkptr r = ms->top = chunk_plus_offset(p, nb); r->head = rsize | PINUSE_BIT; @@ -5650,6 +6465,7 @@ void* mspace_malloc(mspace msp, size_t bytes) { check_top_chunk(ms, ms->top); check_malloced_chunk(ms, mem, nb); goto postaction; + } mem = sys_alloc(ms, nb); @@ -5657,372 +6473,519 @@ void* mspace_malloc(mspace msp, size_t bytes) { postaction: POSTACTION(ms); return mem; + } return 0; + } -void mspace_free(mspace msp, void* mem) { +void mspace_free(mspace msp, void *mem) { + if (mem != 0) { - mchunkptr p = mem2chunk(mem); -#if FOOTERS + + mchunkptr p = mem2chunk(mem); + #if FOOTERS mstate fm = get_mstate_for(p); - (void)msp; /* placate people compiling -Wunused */ -#else /* FOOTERS */ + (void)msp; /* placate people compiling -Wunused */ + #else /* FOOTERS */ mstate fm = (mstate)msp; -#endif /* FOOTERS */ + #endif /* FOOTERS */ if (!ok_magic(fm)) { + USAGE_ERROR_ACTION(fm, p); return; + } + if (!PREACTION(fm)) { + check_inuse_chunk(fm, p); if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) { - size_t psize = chunksize(p); + + size_t psize = chunksize(p); mchunkptr next = chunk_plus_offset(p, psize); if (!pinuse(p)) { + size_t prevsize = p->prev_foot; if (is_mmapped(p)) { + psize += prevsize + MMAP_FOOT_PAD; - if (CALL_MUNMAP((char*)p - prevsize, psize) == 0) + if (CALL_MUNMAP((char *)p - prevsize, psize) == 0) fm->footprint -= psize; goto postaction; - } - else { + + } else { + mchunkptr prev = chunk_minus_offset(p, prevsize); psize += prevsize; p = prev; - if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */ + if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */ if (p != fm->dv) { + unlink_chunk(fm, p, prevsize); - } - else if ((next->head & INUSE_BITS) == INUSE_BITS) { + + } else if ((next->head & INUSE_BITS) == INUSE_BITS) { + fm->dvsize = psize; set_free_with_pinuse(p, psize, next); goto postaction; + } - } - else + + } else + goto erroraction; + } + } if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) { - if (!cinuse(next)) { /* consolidate forward */ + + if (!cinuse(next)) { /* consolidate forward */ if (next == fm->top) { + size_t tsize = fm->topsize += psize; fm->top = p; p->head = tsize | PINUSE_BIT; if (p == fm->dv) { + fm->dv = 0; fm->dvsize = 0; + } - if (should_trim(fm, tsize)) - sys_trim(fm, 0); + + if (should_trim(fm, tsize)) sys_trim(fm, 0); goto postaction; - } - else if (next == fm->dv) { + + } else if (next == fm->dv) { + size_t dsize = fm->dvsize += psize; fm->dv = p; set_size_and_pinuse_of_free_chunk(p, dsize); goto postaction; - } - else { + + } else { + size_t nsize = chunksize(next); psize += nsize; unlink_chunk(fm, next, nsize); set_size_and_pinuse_of_free_chunk(p, psize); if (p == fm->dv) { + fm->dvsize = psize; goto postaction; + } + } - } - else + + } else + set_free_with_pinuse(p, psize, next); if (is_small(psize)) { + insert_small_chunk(fm, p, psize); check_free_chunk(fm, p); - } - else { + + } else { + tchunkptr tp = (tchunkptr)p; insert_large_chunk(fm, tp, psize); check_free_chunk(fm, p); - if (--fm->release_checks == 0) - release_unused_segments(fm); + if (--fm->release_checks == 0) release_unused_segments(fm); + } + goto postaction; + } + } + erroraction: USAGE_ERROR_ACTION(fm, p); postaction: POSTACTION(fm); + } + } + } -void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) { - void* mem; +void *mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) { + + void * mem; size_t req = 0; mstate ms = (mstate)msp; if (!ok_magic(ms)) { - USAGE_ERROR_ACTION(ms,ms); + + USAGE_ERROR_ACTION(ms, ms); return 0; + } + if (n_elements != 0) { + req = n_elements * elem_size; if (((n_elements | elem_size) & ~(size_t)0xffff) && (req / n_elements != elem_size)) - req = MAX_SIZE_T; /* force downstream failure on overflow */ + req = MAX_SIZE_T; /* force downstream failure on overflow */ + } + mem = internal_malloc(ms, req); if (mem != 0 && calloc_must_clear(mem2chunk(mem))) __builtin_memset(mem, 0, req); return mem; + } -void* mspace_realloc(mspace msp, void* oldmem, size_t bytes) { - void* mem = 0; +void *mspace_realloc(mspace msp, void *oldmem, size_t bytes) { + + void *mem = 0; if (oldmem == 0) { + mem = mspace_malloc(msp, bytes); - } - else if (bytes >= MAX_REQUEST) { + + } else if (bytes >= MAX_REQUEST) { + MALLOC_FAILURE_ACTION; + } -#ifdef REALLOC_ZERO_BYTES_FREES + + #ifdef REALLOC_ZERO_BYTES_FREES else if (bytes == 0) { + mspace_free(msp, oldmem); + } -#endif /* REALLOC_ZERO_BYTES_FREES */ + + #endif /* REALLOC_ZERO_BYTES_FREES */ else { - size_t nb = request2size(bytes); + + size_t nb = request2size(bytes); mchunkptr oldp = mem2chunk(oldmem); -#if ! FOOTERS + #if !FOOTERS mstate m = (mstate)msp; -#else /* FOOTERS */ + #else /* FOOTERS */ mstate m = get_mstate_for(oldp); if (!ok_magic(m)) { + USAGE_ERROR_ACTION(m, oldmem); return 0; + } -#endif /* FOOTERS */ + + #endif /* FOOTERS */ if (!PREACTION(m)) { + mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1); POSTACTION(m); if (newp != 0) { + check_inuse_chunk(m, newp); mem = chunk2mem(newp); - } - else { + + } else { + mem = mspace_malloc(m, bytes); if (mem != 0) { + size_t oc = chunksize(oldp) - overhead_for(oldp); - __builtin_memcpy(mem, oldmem, (oc < bytes)? oc : bytes); + __builtin_memcpy(mem, oldmem, (oc < bytes) ? oc : bytes); mspace_free(m, oldmem); + } + } + } + } + return mem; + } -void* mspace_realloc_in_place(mspace msp, void* oldmem, size_t bytes) { - void* mem = 0; +void *mspace_realloc_in_place(mspace msp, void *oldmem, size_t bytes) { + + void *mem = 0; if (oldmem != 0) { + if (bytes >= MAX_REQUEST) { + MALLOC_FAILURE_ACTION; - } - else { - size_t nb = request2size(bytes); + + } else { + + size_t nb = request2size(bytes); mchunkptr oldp = mem2chunk(oldmem); -#if ! FOOTERS + #if !FOOTERS mstate m = (mstate)msp; -#else /* FOOTERS */ + #else /* FOOTERS */ mstate m = get_mstate_for(oldp); - (void)msp; /* placate people compiling -Wunused */ + (void)msp; /* placate people compiling -Wunused */ if (!ok_magic(m)) { + USAGE_ERROR_ACTION(m, oldmem); return 0; + } -#endif /* FOOTERS */ + + #endif /* FOOTERS */ if (!PREACTION(m)) { + mchunkptr newp = try_realloc_chunk(m, oldp, nb, 0); POSTACTION(m); if (newp == oldp) { + check_inuse_chunk(m, newp); mem = oldmem; + } + } + } + } + return mem; + } -void* mspace_memalign(mspace msp, size_t alignment, size_t bytes) { +void *mspace_memalign(mspace msp, size_t alignment, size_t bytes) { + mstate ms = (mstate)msp; if (!ok_magic(ms)) { - USAGE_ERROR_ACTION(ms,ms); + + USAGE_ERROR_ACTION(ms, ms); return 0; + } - if (alignment <= MALLOC_ALIGNMENT) - return mspace_malloc(msp, bytes); + + if (alignment <= MALLOC_ALIGNMENT) return mspace_malloc(msp, bytes); return internal_memalign(ms, alignment, bytes); + } -void** mspace_independent_calloc(mspace msp, size_t n_elements, - size_t elem_size, void* chunks[]) { - size_t sz = elem_size; /* serves as 1-element array */ +void **mspace_independent_calloc(mspace msp, size_t n_elements, + size_t elem_size, void *chunks[]) { + + size_t sz = elem_size; /* serves as 1-element array */ mstate ms = (mstate)msp; if (!ok_magic(ms)) { - USAGE_ERROR_ACTION(ms,ms); + + USAGE_ERROR_ACTION(ms, ms); return 0; + } + return ialloc(ms, n_elements, &sz, 3, chunks); + } -void** mspace_independent_comalloc(mspace msp, size_t n_elements, - size_t sizes[], void* chunks[]) { +void **mspace_independent_comalloc(mspace msp, size_t n_elements, + size_t sizes[], void *chunks[]) { + mstate ms = (mstate)msp; if (!ok_magic(ms)) { - USAGE_ERROR_ACTION(ms,ms); + + USAGE_ERROR_ACTION(ms, ms); return 0; + } + return ialloc(ms, n_elements, sizes, 0, chunks); + } -size_t mspace_bulk_free(mspace msp, void* array[], size_t nelem) { +size_t mspace_bulk_free(mspace msp, void *array[], size_t nelem) { + return internal_bulk_free((mstate)msp, array, nelem); + } -#if MALLOC_INSPECT_ALL + #if MALLOC_INSPECT_ALL void mspace_inspect_all(mspace msp, - void(*handler)(void *start, - void *end, - size_t used_bytes, - void* callback_arg), - void* arg) { + void (*handler)(void *start, void *end, + size_t used_bytes, void *callback_arg), + void *arg) { + mstate ms = (mstate)msp; if (ok_magic(ms)) { + if (!PREACTION(ms)) { + internal_inspect_all(ms, handler, arg); POSTACTION(ms); + } + + } else { + + USAGE_ERROR_ACTION(ms, ms); + } - else { - USAGE_ERROR_ACTION(ms,ms); - } + } -#endif /* MALLOC_INSPECT_ALL */ + + #endif /* MALLOC_INSPECT_ALL */ int mspace_trim(mspace msp, size_t pad) { - int result = 0; + + int result = 0; mstate ms = (mstate)msp; if (ok_magic(ms)) { + if (!PREACTION(ms)) { + result = sys_trim(ms, pad); POSTACTION(ms); + } + + } else { + + USAGE_ERROR_ACTION(ms, ms); + } - else { - USAGE_ERROR_ACTION(ms,ms); - } + return result; + } -#if !NO_MALLOC_STATS + #if !NO_MALLOC_STATS void mspace_malloc_stats(mspace msp) { + mstate ms = (mstate)msp; if (ok_magic(ms)) { + internal_malloc_stats(ms); + + } else { + + USAGE_ERROR_ACTION(ms, ms); + } - else { - USAGE_ERROR_ACTION(ms,ms); - } + } -#endif /* NO_MALLOC_STATS */ + + #endif /* NO_MALLOC_STATS */ size_t mspace_footprint(mspace msp) { + size_t result = 0; mstate ms = (mstate)msp; if (ok_magic(ms)) { + result = ms->footprint; + + } else { + + USAGE_ERROR_ACTION(ms, ms); + } - else { - USAGE_ERROR_ACTION(ms,ms); - } + return result; + } size_t mspace_max_footprint(mspace msp) { + size_t result = 0; mstate ms = (mstate)msp; if (ok_magic(ms)) { + result = ms->max_footprint; + + } else { + + USAGE_ERROR_ACTION(ms, ms); + } - else { - USAGE_ERROR_ACTION(ms,ms); - } + return result; + } size_t mspace_footprint_limit(mspace msp) { + size_t result = 0; mstate ms = (mstate)msp; if (ok_magic(ms)) { + size_t maf = ms->footprint_limit; result = (maf == 0) ? MAX_SIZE_T : maf; + + } else { + + USAGE_ERROR_ACTION(ms, ms); + } - else { - USAGE_ERROR_ACTION(ms,ms); - } + return result; + } size_t mspace_set_footprint_limit(mspace msp, size_t bytes) { + size_t result = 0; mstate ms = (mstate)msp; if (ok_magic(ms)) { - if (bytes == 0) - result = granularity_align(1); /* Use minimal size */ + + if (bytes == 0) result = granularity_align(1); /* Use minimal size */ if (bytes == MAX_SIZE_T) - result = 0; /* disable */ + result = 0; /* disable */ else result = granularity_align(bytes); ms->footprint_limit = result; + + } else { + + USAGE_ERROR_ACTION(ms, ms); + } - else { - USAGE_ERROR_ACTION(ms,ms); - } + return result; + } -#if !NO_MALLINFO + #if !NO_MALLINFO struct mallinfo mspace_mallinfo(mspace msp) { + mstate ms = (mstate)msp; - if (!ok_magic(ms)) { - USAGE_ERROR_ACTION(ms,ms); - } + if (!ok_magic(ms)) { USAGE_ERROR_ACTION(ms, ms); } return internal_mallinfo(ms); + } -#endif /* NO_MALLINFO */ -size_t mspace_usable_size(const void* mem) { + #endif /* NO_MALLINFO */ + +size_t mspace_usable_size(const void *mem) { + if (mem != 0) { + mchunkptr p = mem2chunk(mem); - if (is_inuse(p)) - return chunksize(p) - overhead_for(p); + if (is_inuse(p)) return chunksize(p) - overhead_for(p); + } + return 0; + } int mspace_mallopt(int param_number, int value) { + return change_mparam(param_number, value); -} -#endif /* MSPACES */ +} + #endif /* MSPACES */ /* -------------------- Alternative MORECORE functions ------------------- */ @@ -6067,35 +7030,48 @@ int mspace_mallopt(int param_number, int value) { void *osMoreCore(int size) { + void *ptr = 0; static void *sbrk_top = 0; if (size > 0) { + if (size < MINIMUM_MORECORE_SIZE) size = MINIMUM_MORECORE_SIZE; if (CurrentExecutionLevel() == kTaskLevel) ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0); if (ptr == 0) { + return (void *) MFAIL; + } + // save ptrs so they can be freed during cleanup our_os_pools[next_os_pool] = ptr; next_os_pool++; ptr = (void *) ((((size_t) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK); sbrk_top = (char *) ptr + size; return ptr; + } + else if (size < 0) { + // we don't currently support shrink behavior return (void *) MFAIL; + } + else { + return sbrk_top; + } + } // cleanup any allocated memory pools @@ -6103,19 +7079,22 @@ int mspace_mallopt(int param_number, int value) { void osCleanupMem(void) { + void **ptr; for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++) if (*ptr) { + PoolDeallocate(*ptr); *ptr = 0; + } + } */ - /* ----------------------------------------------------------------------- History: v2.8.6 Wed Aug 29 06:57:58 2012 Doug Lea @@ -6335,4 +7314,5 @@ History: */ -#endif // __GLIBC__ +#endif // __GLIBC__ + diff --git a/qemu_mode/libqasan/malloc.c b/qemu_mode/libqasan/malloc.c index 54c1096a..5a2d2a0c 100644 --- a/qemu_mode/libqasan/malloc.c +++ b/qemu_mode/libqasan/malloc.c @@ -51,9 +51,9 @@ typedef struct { struct chunk_begin { size_t requested_size; - void* aligned_orig; // NULL if not aligned - struct chunk_begin* next; - struct chunk_begin* prev; + void * aligned_orig; // NULL if not aligned + struct chunk_begin *next; + struct chunk_begin *prev; char redzone[REDZONE_SIZE]; }; @@ -68,45 +68,45 @@ struct chunk_struct { #ifdef __GLIBC__ -void* (*__lq_libc_malloc)(size_t); -void (*__lq_libc_free)(void*); -#define backend_malloc __lq_libc_malloc -#define backend_free __lq_libc_free +void *(*__lq_libc_malloc)(size_t); +void (*__lq_libc_free)(void *); + #define backend_malloc __lq_libc_malloc + #define backend_free __lq_libc_free -#define TMP_ZONE_SIZE 4096 + #define TMP_ZONE_SIZE 4096 static int __tmp_alloc_zone_idx; static unsigned char __tmp_alloc_zone[TMP_ZONE_SIZE]; #else // From dlmalloc.c -void* dlmalloc(size_t); -void dlfree(void*); -#define backend_malloc dlmalloc -#define backend_free dlfree +void * dlmalloc(size_t); +void dlfree(void *); + #define backend_malloc dlmalloc + #define backend_free dlfree #endif int __libqasan_malloc_initialized; -static struct chunk_begin* quarantine_top; -static struct chunk_begin* quarantine_end; +static struct chunk_begin *quarantine_top; +static struct chunk_begin *quarantine_end; static size_t quarantine_bytes; #ifdef __BIONIC__ -static pthread_mutex_t quarantine_lock; -#define LOCK_TRY pthread_mutex_trylock -#define LOCK_INIT pthread_mutex_init -#define LOCK_UNLOCK pthread_mutex_unlock +static pthread_mutex_t quarantine_lock; + #define LOCK_TRY pthread_mutex_trylock + #define LOCK_INIT pthread_mutex_init + #define LOCK_UNLOCK pthread_mutex_unlock #else -static pthread_spinlock_t quarantine_lock; -#define LOCK_TRY pthread_spin_trylock -#define LOCK_INIT pthread_spin_init -#define LOCK_UNLOCK pthread_spin_unlock +static pthread_spinlock_t quarantine_lock; + #define LOCK_TRY pthread_spin_trylock + #define LOCK_INIT pthread_spin_init + #define LOCK_UNLOCK pthread_spin_unlock #endif // need qasan disabled -static int quanratine_push(struct chunk_begin* ck) { +static int quanratine_push(struct chunk_begin *ck) { if (ck->requested_size >= QUARANTINE_MAX_BYTES) return 0; @@ -114,7 +114,7 @@ static int quanratine_push(struct chunk_begin* ck) { while (ck->requested_size + quarantine_bytes >= QUARANTINE_MAX_BYTES) { - struct chunk_begin* tmp = quarantine_end; + struct chunk_begin *tmp = quarantine_end; quarantine_end = tmp->prev; quarantine_bytes -= tmp->requested_size; @@ -154,23 +154,23 @@ void __libqasan_init_malloc(void) { } -size_t __libqasan_malloc_usable_size(void* ptr) { +size_t __libqasan_malloc_usable_size(void *ptr) { - char* p = ptr; + char *p = ptr; p -= sizeof(struct chunk_begin); - return ((struct chunk_begin*)p)->requested_size; + return ((struct chunk_begin *)p)->requested_size; } -void* __libqasan_malloc(size_t size) { +void *__libqasan_malloc(size_t size) { if (!__libqasan_malloc_initialized) { - + __libqasan_init_malloc(); #ifdef __GLIBC__ - void* r = &__tmp_alloc_zone[__tmp_alloc_zone_idx]; + void *r = &__tmp_alloc_zone[__tmp_alloc_zone_idx]; if (size & (ALLOC_ALIGN_SIZE - 1)) __tmp_alloc_zone_idx += @@ -185,7 +185,7 @@ void* __libqasan_malloc(size_t size) { int state = QASAN_SWAP(QASAN_DISABLED); // disable qasan for this thread - struct chunk_begin* p = backend_malloc(sizeof(struct chunk_struct) + size); + struct chunk_begin *p = backend_malloc(sizeof(struct chunk_struct) + size); QASAN_SWAP(state); @@ -197,14 +197,14 @@ void* __libqasan_malloc(size_t size) { p->aligned_orig = NULL; p->next = p->prev = NULL; - QASAN_ALLOC(&p[1], (char*)&p[1] + size); + QASAN_ALLOC(&p[1], (char *)&p[1] + size); QASAN_POISON(p->redzone, REDZONE_SIZE, ASAN_HEAP_LEFT_RZ); if (size & (ALLOC_ALIGN_SIZE - 1)) - QASAN_POISON((char*)&p[1] + size, + QASAN_POISON((char *)&p[1] + size, (size & ~(ALLOC_ALIGN_SIZE - 1)) + 8 - size + REDZONE_SIZE, ASAN_HEAP_RIGHT_RZ); else - QASAN_POISON((char*)&p[1] + size, REDZONE_SIZE, ASAN_HEAP_RIGHT_RZ); + QASAN_POISON((char *)&p[1] + size, REDZONE_SIZE, ASAN_HEAP_RIGHT_RZ); __builtin_memset(&p[1], 0xff, size); @@ -212,17 +212,17 @@ void* __libqasan_malloc(size_t size) { } -void __libqasan_free(void* ptr) { +void __libqasan_free(void *ptr) { if (!ptr) return; - + #ifdef __GLIBC__ - if (ptr >= (void*)__tmp_alloc_zone && - ptr < ((void*)__tmp_alloc_zone + TMP_ZONE_SIZE)) + if (ptr >= (void *)__tmp_alloc_zone && + ptr < ((void *)__tmp_alloc_zone + TMP_ZONE_SIZE)) return; #endif - struct chunk_begin* p = ptr; + struct chunk_begin *p = ptr; p -= 1; size_t n = p->requested_size; @@ -249,21 +249,22 @@ void __libqasan_free(void* ptr) { } -void* __libqasan_calloc(size_t nmemb, size_t size) { +void *__libqasan_calloc(size_t nmemb, size_t size) { size *= nmemb; #ifdef __GLIBC__ if (!__libqasan_malloc_initialized) { - void* r = &__tmp_alloc_zone[__tmp_alloc_zone_idx]; + void *r = &__tmp_alloc_zone[__tmp_alloc_zone_idx]; __tmp_alloc_zone_idx += size; return r; } + #endif - char* p = __libqasan_malloc(size); + char *p = __libqasan_malloc(size); if (!p) return NULL; __builtin_memset(p, 0, size); @@ -272,14 +273,14 @@ void* __libqasan_calloc(size_t nmemb, size_t size) { } -void* __libqasan_realloc(void* ptr, size_t size) { +void *__libqasan_realloc(void *ptr, size_t size) { - char* p = __libqasan_malloc(size); + char *p = __libqasan_malloc(size); if (!p) return NULL; if (!ptr) return p; - size_t n = ((struct chunk_begin*)ptr)[-1].requested_size; + size_t n = ((struct chunk_begin *)ptr)[-1].requested_size; if (size < n) n = size; __builtin_memcpy(p, ptr, n); @@ -289,9 +290,9 @@ void* __libqasan_realloc(void* ptr, size_t size) { } -int __libqasan_posix_memalign(void** ptr, size_t align, size_t len) { +int __libqasan_posix_memalign(void **ptr, size_t align, size_t len) { - if ((align % 2) || (align % sizeof(void*))) return EINVAL; + if ((align % 2) || (align % sizeof(void *))) return EINVAL; if (len == 0) { *ptr = NULL; @@ -305,7 +306,7 @@ int __libqasan_posix_memalign(void** ptr, size_t align, size_t len) { int state = QASAN_SWAP(QASAN_DISABLED); // disable qasan for this thread - char* orig = backend_malloc(sizeof(struct chunk_struct) + size); + char *orig = backend_malloc(sizeof(struct chunk_struct) + size); QASAN_SWAP(state); @@ -313,10 +314,10 @@ int __libqasan_posix_memalign(void** ptr, size_t align, size_t len) { QASAN_UNPOISON(orig, sizeof(struct chunk_struct) + size); - char* data = orig + sizeof(struct chunk_begin); + char *data = orig + sizeof(struct chunk_begin); data += align - ((uintptr_t)data % align); - struct chunk_begin* p = (struct chunk_begin*)data - 1; + struct chunk_begin *p = (struct chunk_begin *)data - 1; p->requested_size = len; p->aligned_orig = orig; @@ -339,9 +340,9 @@ int __libqasan_posix_memalign(void** ptr, size_t align, size_t len) { } -void* __libqasan_memalign(size_t align, size_t len) { +void *__libqasan_memalign(size_t align, size_t len) { - void* ret = NULL; + void *ret = NULL; __libqasan_posix_memalign(&ret, align, len); @@ -349,9 +350,9 @@ void* __libqasan_memalign(size_t align, size_t len) { } -void* __libqasan_aligned_alloc(size_t align, size_t len) { +void *__libqasan_aligned_alloc(size_t align, size_t len) { - void* ret = NULL; + void *ret = NULL; if ((len % align)) return NULL; -- cgit 1.4.1 From 145c673a80878d92013882eda6ef56e6948c397b Mon Sep 17 00:00:00 2001 From: Dominik Maier Date: Mon, 15 Feb 2021 15:04:34 +0100 Subject: finished merge --- qemu_mode/libqasan/dlmalloc.c | 15 --------------- 1 file changed, 15 deletions(-) (limited to 'qemu_mode/libqasan/dlmalloc.c') diff --git a/qemu_mode/libqasan/dlmalloc.c b/qemu_mode/libqasan/dlmalloc.c index 74b05e15..aff58ad5 100644 --- a/qemu_mode/libqasan/dlmalloc.c +++ b/qemu_mode/libqasan/dlmalloc.c @@ -3916,11 +3916,8 @@ static void internal_malloc_stats(mstate m) { clear_smallmap(M, I); \ \ } else if (RTCHECK(B == smallbin_at(M, I) || \ -<<<<<<< HEAD -======= \ \ ->>>>>>> e3a5c31307f323452dc4b5288e0d19a02b596a33 (ok_address(M, B) && B->fd == P))) { \ \ F->bk = B; \ @@ -4131,11 +4128,8 @@ static void internal_malloc_stats(mstate m) { XP->child[1] = R; \ \ } else \ -<<<<<<< HEAD -======= \ \ ->>>>>>> e3a5c31307f323452dc4b5288e0d19a02b596a33 CORRUPTION_ERROR_ACTION(M); \ if (R != 0) { \ \ @@ -4151,11 +4145,8 @@ static void internal_malloc_stats(mstate m) { C0->parent = R; \ \ } else \ -<<<<<<< HEAD -======= \ \ ->>>>>>> e3a5c31307f323452dc4b5288e0d19a02b596a33 CORRUPTION_ERROR_ACTION(M); \ \ } \ @@ -4167,21 +4158,15 @@ static void internal_malloc_stats(mstate m) { C1->parent = R; \ \ } else \ -<<<<<<< HEAD -======= \ \ ->>>>>>> e3a5c31307f323452dc4b5288e0d19a02b596a33 CORRUPTION_ERROR_ACTION(M); \ \ } \ \ } else \ -<<<<<<< HEAD -======= \ \ ->>>>>>> e3a5c31307f323452dc4b5288e0d19a02b596a33 CORRUPTION_ERROR_ACTION(M); \ \ } \ -- cgit 1.4.1 From 6caec2169cef890ba8a62715c2c26cc0608626e3 Mon Sep 17 00:00:00 2001 From: Michael Rodler Date: Mon, 15 Feb 2021 19:14:28 +0100 Subject: Revert "llvm bug workaround for lto extint" This reverts commit e3a5c31307f323452dc4b5288e0d19a02b596a33. --- docs/Changelog.md | 1 - include/envs.h | 1 - instrumentation/cmplog-instructions-pass.cc | 39 +++-------------------------- qemu_mode/libqasan/dlmalloc.c | 5 ---- src/afl-cc.c | 2 -- src/afl-fuzz-redqueen.c | 1 - 6 files changed, 4 insertions(+), 45 deletions(-) (limited to 'qemu_mode/libqasan/dlmalloc.c') diff --git a/docs/Changelog.md b/docs/Changelog.md index e2482f8f..71ef4c2c 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -49,7 +49,6 @@ sending a mail to . CLANG for old afl-clang - fixed a potential crash in the LAF feature - workaround for llvm 13 - - workaround for llvm internal lto bug that lets not bitcast from _ExtInt() - qemuafl - QASan (address sanitizer for Qemu) ported to qemuafl! See qemu_mode/libqasan/README.md diff --git a/include/envs.h b/include/envs.h index 36667ebc..4313e053 100644 --- a/include/envs.h +++ b/include/envs.h @@ -16,7 +16,6 @@ static char *afl_environment_deprecated[] = { static char *afl_environment_variables[] = { - "_AFL_LTO_COMPILE", "AFL_ALIGNED_ALLOC", "AFL_ALLOW_TMP", "AFL_ANALYZE_HEX", diff --git a/instrumentation/cmplog-instructions-pass.cc b/instrumentation/cmplog-instructions-pass.cc index 9cd99f85..50ade9fd 100644 --- a/instrumentation/cmplog-instructions-pass.cc +++ b/instrumentation/cmplog-instructions-pass.cc @@ -114,8 +114,6 @@ bool CmpLogInstructions::hookInstrs(Module &M) { IntegerType *Int64Ty = IntegerType::getInt64Ty(C); IntegerType *Int128Ty = IntegerType::getInt128Ty(C); - char *is_lto = getenv("_AFL_LTO_COMPILE"); - #if LLVM_VERSION_MAJOR < 9 Constant * #else @@ -268,20 +266,10 @@ bool CmpLogInstructions::hookInstrs(Module &M) { unsigned int max_size = Val->getType()->getIntegerBitWidth(), cast_size; unsigned char do_cast = 0; - if (!SI->getNumCases() || max_size < 16) { continue; } - - if (max_size % 8) { - - if (is_lto) { + if (!SI->getNumCases() || max_size < 16 || max_size % 8) { - continue; // LTO cannot bitcast from _ExtInt() :( - - } else { - - max_size = (((max_size / 8) + 1) * 8); - do_cast = 1; - - } + // if (!be_quiet) errs() << "skip trivial switch..\n"; + continue; } @@ -298,7 +286,6 @@ bool CmpLogInstructions::hookInstrs(Module &M) { } - if (is_lto) { continue; } // LTO cannot bitcast _ExtInt() :( max_size = 128; do_cast = 1; @@ -315,7 +302,6 @@ bool CmpLogInstructions::hookInstrs(Module &M) { cast_size = max_size; break; default: - if (is_lto) { continue; } // LTO cannot bitcast _ExtInt() :( cast_size = 128; do_cast = 1; @@ -504,22 +490,7 @@ bool CmpLogInstructions::hookInstrs(Module &M) { } - if (!max_size || max_size < 16) { continue; } - - if (max_size % 8) { - - if (is_lto) { - - continue; // LTO cannot bitcast from _ExtInt() :( - - } else { - - max_size = (((max_size / 8) + 1) * 8); - do_cast = 1; - - } - - } + if (!max_size || max_size % 8 || max_size < 16) { continue; } if (max_size > 128) { @@ -531,7 +502,6 @@ bool CmpLogInstructions::hookInstrs(Module &M) { } - if (is_lto) { continue; } // LTO cannot bitcast from _ExtInt() :( max_size = 128; do_cast = 1; @@ -548,7 +518,6 @@ bool CmpLogInstructions::hookInstrs(Module &M) { cast_size = max_size; break; default: - if (is_lto) { continue; } // LTO cannot bitcast from _ExtInt() :( cast_size = 128; do_cast = 1; diff --git a/qemu_mode/libqasan/dlmalloc.c b/qemu_mode/libqasan/dlmalloc.c index aff58ad5..bace0ff6 100644 --- a/qemu_mode/libqasan/dlmalloc.c +++ b/qemu_mode/libqasan/dlmalloc.c @@ -3917,7 +3917,6 @@ static void internal_malloc_stats(mstate m) { \ } else if (RTCHECK(B == smallbin_at(M, I) || \ \ - \ (ok_address(M, B) && B->fd == P))) { \ \ F->bk = B; \ @@ -4129,7 +4128,6 @@ static void internal_malloc_stats(mstate m) { \ } else \ \ - \ CORRUPTION_ERROR_ACTION(M); \ if (R != 0) { \ \ @@ -4146,7 +4144,6 @@ static void internal_malloc_stats(mstate m) { \ } else \ \ - \ CORRUPTION_ERROR_ACTION(M); \ \ } \ @@ -4159,14 +4156,12 @@ static void internal_malloc_stats(mstate m) { \ } else \ \ - \ CORRUPTION_ERROR_ACTION(M); \ \ } \ \ } else \ \ - \ CORRUPTION_ERROR_ACTION(M); \ \ } \ diff --git a/src/afl-cc.c b/src/afl-cc.c index 959c9a6f..d41f79a2 100644 --- a/src/afl-cc.c +++ b/src/afl-cc.c @@ -1875,8 +1875,6 @@ int main(int argc, char **argv, char **envp) { edit_params(argc, argv, envp); - if (lto_mode) { setenv("_AFL_LTO_COMPILE", "1", 1); } - if (debug) { DEBUGF("cd '%s';", getthecwd()); diff --git a/src/afl-fuzz-redqueen.c b/src/afl-fuzz-redqueen.c index cf65d3c1..275af9c8 100644 --- a/src/afl-fuzz-redqueen.c +++ b/src/afl-fuzz-redqueen.c @@ -1547,7 +1547,6 @@ static u8 cmp_fuzz(afl_state_t *afl, u32 key, u8 *orig_buf, u8 *buf, u8 *cbuf, is_n = 1; } - #endif for (i = 0; i < loggeds; ++i) { -- cgit 1.4.1 From 89cf94f0e6afd4d360f7f139f16a6730a07b478d Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Fri, 19 Feb 2021 20:33:12 +0100 Subject: suggested env vars for lazy ppl --- include/common.h | 1 + qemu_mode/libqasan/dlmalloc.c | 5 ++ src/afl-common.c | 116 ++++++++++++++++++++++++++++++++++++++++++ src/afl-fuzz-state.c | 2 + 4 files changed, 124 insertions(+) (limited to 'qemu_mode/libqasan/dlmalloc.c') diff --git a/include/common.h b/include/common.h index bb8831f2..cd728536 100644 --- a/include/common.h +++ b/include/common.h @@ -39,6 +39,7 @@ #define STRINGIFY_VAL_SIZE_MAX (16) void detect_file_args(char **argv, u8 *prog_in, bool *use_stdin); +void print_suggested_envs(char *mispelled_env); void check_environment_vars(char **env); char **argv_cpy_dup(int argc, char **argv); diff --git a/qemu_mode/libqasan/dlmalloc.c b/qemu_mode/libqasan/dlmalloc.c index bace0ff6..aff58ad5 100644 --- a/qemu_mode/libqasan/dlmalloc.c +++ b/qemu_mode/libqasan/dlmalloc.c @@ -3917,6 +3917,7 @@ static void internal_malloc_stats(mstate m) { \ } else if (RTCHECK(B == smallbin_at(M, I) || \ \ + \ (ok_address(M, B) && B->fd == P))) { \ \ F->bk = B; \ @@ -4128,6 +4129,7 @@ static void internal_malloc_stats(mstate m) { \ } else \ \ + \ CORRUPTION_ERROR_ACTION(M); \ if (R != 0) { \ \ @@ -4144,6 +4146,7 @@ static void internal_malloc_stats(mstate m) { \ } else \ \ + \ CORRUPTION_ERROR_ACTION(M); \ \ } \ @@ -4156,12 +4159,14 @@ static void internal_malloc_stats(mstate m) { \ } else \ \ + \ CORRUPTION_ERROR_ACTION(M); \ \ } \ \ } else \ \ + \ CORRUPTION_ERROR_ACTION(M); \ \ } \ diff --git a/src/afl-common.c b/src/afl-common.c index 589aac71..0b38b222 100644 --- a/src/afl-common.c +++ b/src/afl-common.c @@ -518,6 +518,120 @@ int parse_afl_kill_signal_env(u8 *afl_kill_signal_env, int default_signal) { } +#define HELPER_MIN3(a, b, c) \ + ((a) < (b) ? ((a) < (c) ? (a) : (c)) : ((b) < (c) ? (b) : (c))) + +// from +// https://en.wikibooks.org/wiki/Algorithm_Implementation/Strings/Levenshtein_distance#C +static int string_distance_levenshtein(char *s1, char *s2) { + + unsigned int s1len, s2len, x, y, lastdiag, olddiag; + s1len = strlen(s1); + s2len = strlen(s2); + unsigned int column[s1len + 1]; + for (y = 1; y <= s1len; y++) + column[y] = y; + for (x = 1; x <= s2len; x++) { + + column[0] = x; + for (y = 1, lastdiag = x - 1; y <= s1len; y++) { + + olddiag = column[y]; + column[y] = HELPER_MIN3(column[y] + 1, column[y - 1] + 1, + lastdiag + (s1[y - 1] == s2[x - 1] ? 0 : 1)); + lastdiag = olddiag; + + } + + } + + return column[s1len]; + +} + +#undef HELPER_MIN3 + +void print_suggested_envs(char *mispelled_env) { + + size_t env_name_len = + strcspn(mispelled_env, "=") - 4; // remove the AFL_prefix + char *env_name = ck_alloc(env_name_len + 1); + memcpy(env_name, mispelled_env + 4, env_name_len); + + char *seen = ck_alloc(sizeof(afl_environment_variables) / sizeof(char *)); + + int j; + for (j = 0; afl_environment_variables[j] != NULL; ++j) { + + char *afl_env = afl_environment_variables[j] + 4; + + int distance = string_distance_levenshtein(afl_env, env_name); + if (distance <= 3 && seen[j] == 0) { + + SAYF("Did you mean %s?\n", afl_environment_variables[j]); + seen[j] = 1; + + } + + size_t afl_env_len = strlen(afl_env); + char * reduced = ck_alloc(afl_env_len + 1); + + size_t start = 0; + while (start < afl_env_len) { + + size_t end = start + strcspn(afl_env + start, "_") + 1; + memcpy(reduced, afl_env, start); + if (end < afl_env_len) + memcpy(reduced + start, afl_env + end, afl_env_len - end); + reduced[afl_env_len - end + start] = 0; + + int distance = string_distance_levenshtein(reduced, env_name); + if (distance <= 3 && seen[j] == 0) { + + SAYF("Did you mean %s?\n", afl_environment_variables[j]); + seen[j] = 1; + + } + + start = end; + + }; + + } + + char * reduced = ck_alloc(env_name_len + 1); + size_t start = 0; + while (start < env_name_len) { + + size_t end = start + strcspn(env_name + start, "_") + 1; + memcpy(reduced, env_name, start); + if (end < env_name_len) + memcpy(reduced + start, env_name + end, env_name_len - end); + reduced[env_name_len - end + start] = 0; + + for (j = 0; afl_environment_variables[j] != NULL; ++j) { + + int distance = string_distance_levenshtein( + afl_environment_variables[j] + 4, reduced); + if (distance <= 3 && seen[j] == 0) { + + SAYF("Did you mean %s?\n", afl_environment_variables[j]); + seen[j] = 1; + + } + + } + + start = end; + + }; + + ck_free(env_name); + ck_free(reduced); + ck_free(seen); + +} + void check_environment_vars(char **envp) { if (be_quiet) { return; } @@ -587,6 +701,8 @@ void check_environment_vars(char **envp) { WARNF("Mistyped AFL environment variable: %s", env); issue_detected = 1; + print_suggested_envs(env); + } } diff --git a/src/afl-fuzz-state.c b/src/afl-fuzz-state.c index 5040e3ef..3d36e712 100644 --- a/src/afl-fuzz-state.c +++ b/src/afl-fuzz-state.c @@ -486,6 +486,8 @@ void read_afl_environment(afl_state_t *afl, char **envp) { WARNF("Mistyped AFL environment variable: %s", env); issue_detected = 1; + print_suggested_envs(env); + } } -- cgit 1.4.1