about summary refs log tree commit diff
path: root/qemu_mode/libqasan/dlmalloc.c
diff options
context:
space:
mode:
authorAndrea Fioraldi <andreafioraldi@gmail.com>2021-02-10 15:15:16 +0100
committerAndrea Fioraldi <andreafioraldi@gmail.com>2021-02-10 15:15:16 +0100
commit267b085f80074e61bdacf1e85e99014b6b2cdad2 (patch)
treefd5cb057e7cc2fe620c36943cbfab28421a3ce74 /qemu_mode/libqasan/dlmalloc.c
parentb6643743d6ccba1cc299daf2e9b5272cb4cdd53c (diff)
downloadafl++-267b085f80074e61bdacf1e85e99014b6b2cdad2.tar.gz
dlmalloc only for non glibc qasan and AFL_QEMU_FORCE_DFL
Diffstat (limited to 'qemu_mode/libqasan/dlmalloc.c')
-rw-r--r--qemu_mode/libqasan/dlmalloc.c4769
1 files changed, 1895 insertions, 2874 deletions
diff --git a/qemu_mode/libqasan/dlmalloc.c b/qemu_mode/libqasan/dlmalloc.c
index 3c7dcea8..7e3cb159 100644
--- a/qemu_mode/libqasan/dlmalloc.c
+++ b/qemu_mode/libqasan/dlmalloc.c
@@ -1,3 +1,7 @@
+#include <features.h>
+
+#ifndef __GLIBC__
+
 /*
   This is a version (aka dlmalloc) of malloc/free/realloc written by
   Doug Lea and released to the public domain, as explained at
@@ -203,12 +207,9 @@
   mspaces as thread-locals. For example:
     static __thread mspace tlms = 0;
     void*  tlmalloc(size_t bytes) {
-
       if (tlms == 0) tlms = create_mspace(0, 0);
       return mspace_malloc(tlms, bytes);
-
     }
-
     void  tlfree(void* mem) { mspace_free(tlms, mem); }
 
   Unless FOOTERS is defined, each mspace is completely independent.
@@ -528,198 +529,197 @@ MAX_RELEASE_CHECK_RATE   default: 4095 unless not HAVE_MMAP
 
 /* Version identifier to allow people to support multiple versions */
 #ifndef DLMALLOC_VERSION
-  #define DLMALLOC_VERSION 20806
-#endif                                                  /* DLMALLOC_VERSION */
+#define DLMALLOC_VERSION 20806
+#endif /* DLMALLOC_VERSION */
 
 #ifndef DLMALLOC_EXPORT
-  #define DLMALLOC_EXPORT extern
+#define DLMALLOC_EXPORT extern
 #endif
 
 #ifndef WIN32
-  #ifdef _WIN32
-    #define WIN32 1
-  #endif                                                          /* _WIN32 */
-  #ifdef _WIN32_WCE
-    #define LACKS_FCNTL_H
-    #define WIN32 1
-  #endif                                                      /* _WIN32_WCE */
-#endif                                                             /* WIN32 */
+#ifdef _WIN32
+#define WIN32 1
+#endif  /* _WIN32 */
+#ifdef _WIN32_WCE
+#define LACKS_FCNTL_H
+#define WIN32 1
+#endif /* _WIN32_WCE */
+#endif  /* WIN32 */
 #ifdef WIN32
-  #define WIN32_LEAN_AND_MEAN
-  #include <windows.h>
-  #include <tchar.h>
-  #define HAVE_MMAP 1
-  #define HAVE_MORECORE 0
-  #define LACKS_UNISTD_H
-  #define LACKS_SYS_PARAM_H
-  #define LACKS_SYS_MMAN_H
-  #define LACKS_STRING_H
-  #define LACKS_STRINGS_H
-  #define LACKS_SYS_TYPES_H
-  #define LACKS_ERRNO_H
-  #define LACKS_SCHED_H
-  #ifndef MALLOC_FAILURE_ACTION
-    #define MALLOC_FAILURE_ACTION
-  #endif                                           /* MALLOC_FAILURE_ACTION */
-  #ifndef MMAP_CLEARS
-    #ifdef _WIN32_WCE                    /* WINCE reportedly does not clear */
-      #define MMAP_CLEARS 0
-    #else
-      #define MMAP_CLEARS 1
-    #endif                                                    /* _WIN32_WCE */
-  #endif                                                      /*MMAP_CLEARS */
-#endif                                                             /* WIN32 */
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#include <tchar.h>
+#define HAVE_MMAP 1
+#define HAVE_MORECORE 0
+#define LACKS_UNISTD_H
+#define LACKS_SYS_PARAM_H
+#define LACKS_SYS_MMAN_H
+#define LACKS_STRING_H
+#define LACKS_STRINGS_H
+#define LACKS_SYS_TYPES_H
+#define LACKS_ERRNO_H
+#define LACKS_SCHED_H
+#ifndef MALLOC_FAILURE_ACTION
+#define MALLOC_FAILURE_ACTION
+#endif /* MALLOC_FAILURE_ACTION */
+#ifndef MMAP_CLEARS
+#ifdef _WIN32_WCE /* WINCE reportedly does not clear */
+#define MMAP_CLEARS 0
+#else
+#define MMAP_CLEARS 1
+#endif /* _WIN32_WCE */
+#endif /*MMAP_CLEARS */
+#endif  /* WIN32 */
 
 #if defined(DARWIN) || defined(_DARWIN)
-  /* Mac OSX docs advise not to use sbrk; it seems better to use mmap */
-  #ifndef HAVE_MORECORE
-    #define HAVE_MORECORE 0
-    #define HAVE_MMAP 1
-    /* OSX allocators provide 16 byte alignment */
-    #ifndef MALLOC_ALIGNMENT
-      #define MALLOC_ALIGNMENT ((size_t)16U)
-    #endif
-  #endif                                                   /* HAVE_MORECORE */
-#endif                                                            /* DARWIN */
+/* Mac OSX docs advise not to use sbrk; it seems better to use mmap */
+#ifndef HAVE_MORECORE
+#define HAVE_MORECORE 0
+#define HAVE_MMAP 1
+/* OSX allocators provide 16 byte alignment */
+#ifndef MALLOC_ALIGNMENT
+#define MALLOC_ALIGNMENT ((size_t)16U)
+#endif
+#endif  /* HAVE_MORECORE */
+#endif  /* DARWIN */
 
 #ifndef LACKS_SYS_TYPES_H
-  #include <sys/types.h>                                      /* For size_t */
-#endif                                                 /* LACKS_SYS_TYPES_H */
+#include <sys/types.h>  /* For size_t */
+#endif  /* LACKS_SYS_TYPES_H */
 
 /* The maximum possible size_t value has all bits set */
-#define MAX_SIZE_T (~(size_t)0)
-
-#ifndef USE_LOCKS             /* ensure true if spin or recursive locks set */
-  #define USE_LOCKS                                      \
-    ((defined(USE_SPIN_LOCKS) && USE_SPIN_LOCKS != 0) || \
-     (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0))
-#endif                                                         /* USE_LOCKS */
-
-#if USE_LOCKS   /* Spin locks for gcc >= 4.1, older gcc on x86, MSC >= 1310 */
-  #if ((defined(__GNUC__) &&                                         \
-        ((__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) || \
-         defined(__i386__) || defined(__x86_64__))) ||               \
-       (defined(_MSC_VER) && _MSC_VER >= 1310))
-    #ifndef USE_SPIN_LOCKS
-      #define USE_SPIN_LOCKS 1
-    #endif                                                /* USE_SPIN_LOCKS */
-  #elif USE_SPIN_LOCKS
-    #error "USE_SPIN_LOCKS defined without implementation"
-  #endif                                          /* ... locks available... */
+#define MAX_SIZE_T           (~(size_t)0)
+
+#ifndef USE_LOCKS /* ensure true if spin or recursive locks set */
+#define USE_LOCKS  ((defined(USE_SPIN_LOCKS) && USE_SPIN_LOCKS != 0) || \
+                    (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0))
+#endif /* USE_LOCKS */
+
+#if USE_LOCKS /* Spin locks for gcc >= 4.1, older gcc on x86, MSC >= 1310 */
+#if ((defined(__GNUC__) &&                                              \
+      ((__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) ||      \
+       defined(__i386__) || defined(__x86_64__))) ||                    \
+     (defined(_MSC_VER) && _MSC_VER>=1310))
+#ifndef USE_SPIN_LOCKS
+#define USE_SPIN_LOCKS 1
+#endif /* USE_SPIN_LOCKS */
+#elif USE_SPIN_LOCKS
+#error "USE_SPIN_LOCKS defined without implementation"
+#endif /* ... locks available... */
 #elif !defined(USE_SPIN_LOCKS)
-  #define USE_SPIN_LOCKS 0
-#endif                                                         /* USE_LOCKS */
+#define USE_SPIN_LOCKS 0
+#endif /* USE_LOCKS */
 
 #ifndef ONLY_MSPACES
-  #define ONLY_MSPACES 0
-#endif                                                      /* ONLY_MSPACES */
+#define ONLY_MSPACES 0
+#endif  /* ONLY_MSPACES */
 #ifndef MSPACES
-  #if ONLY_MSPACES
-    #define MSPACES 1
-  #else                                                     /* ONLY_MSPACES */
-    #define MSPACES 0
-  #endif                                                    /* ONLY_MSPACES */
-#endif                                                           /* MSPACES */
+#if ONLY_MSPACES
+#define MSPACES 1
+#else   /* ONLY_MSPACES */
+#define MSPACES 0
+#endif  /* ONLY_MSPACES */
+#endif  /* MSPACES */
 #ifndef MALLOC_ALIGNMENT
-  #define MALLOC_ALIGNMENT ((size_t)(2 * sizeof(void *)))
-#endif                                                  /* MALLOC_ALIGNMENT */
+#define MALLOC_ALIGNMENT ((size_t)(2 * sizeof(void *)))
+#endif  /* MALLOC_ALIGNMENT */
 #ifndef FOOTERS
-  #define FOOTERS 0
-#endif                                                           /* FOOTERS */
+#define FOOTERS 0
+#endif  /* FOOTERS */
 #ifndef ABORT
-  #define ABORT abort()
-#endif                                                             /* ABORT */
+#define ABORT  abort()
+#endif  /* ABORT */
 #ifndef ABORT_ON_ASSERT_FAILURE
-  #define ABORT_ON_ASSERT_FAILURE 1
-#endif                                           /* ABORT_ON_ASSERT_FAILURE */
+#define ABORT_ON_ASSERT_FAILURE 1
+#endif  /* ABORT_ON_ASSERT_FAILURE */
 #ifndef PROCEED_ON_ERROR
-  #define PROCEED_ON_ERROR 0
-#endif                                                  /* PROCEED_ON_ERROR */
+#define PROCEED_ON_ERROR 0
+#endif  /* PROCEED_ON_ERROR */
 
 #ifndef INSECURE
-  #define INSECURE 0
-#endif                                                          /* INSECURE */
+#define INSECURE 0
+#endif  /* INSECURE */
 #ifndef MALLOC_INSPECT_ALL
-  #define MALLOC_INSPECT_ALL 0
-#endif                                                /* MALLOC_INSPECT_ALL */
+#define MALLOC_INSPECT_ALL 0
+#endif  /* MALLOC_INSPECT_ALL */
 #ifndef HAVE_MMAP
-  #define HAVE_MMAP 1
-#endif                                                         /* HAVE_MMAP */
+#define HAVE_MMAP 1
+#endif  /* HAVE_MMAP */
 #ifndef MMAP_CLEARS
-  #define MMAP_CLEARS 1
-#endif                                                       /* MMAP_CLEARS */
+#define MMAP_CLEARS 1
+#endif  /* MMAP_CLEARS */
 #ifndef HAVE_MREMAP
-  #ifdef linux
-    #define HAVE_MREMAP 1
-    #define _GNU_SOURCE                     /* Turns on mremap() definition */
-  #else                                                            /* linux */
-    #define HAVE_MREMAP 0
-  #endif                                                           /* linux */
-#endif                                                       /* HAVE_MREMAP */
+#ifdef linux
+#define HAVE_MREMAP 1
+#define _GNU_SOURCE /* Turns on mremap() definition */
+#else   /* linux */
+#define HAVE_MREMAP 0
+#endif  /* linux */
+#endif  /* HAVE_MREMAP */
 #ifndef MALLOC_FAILURE_ACTION
-  #define MALLOC_FAILURE_ACTION errno = ENOMEM;
-#endif                                             /* MALLOC_FAILURE_ACTION */
+#define MALLOC_FAILURE_ACTION  errno = ENOMEM;
+#endif  /* MALLOC_FAILURE_ACTION */
 #ifndef HAVE_MORECORE
-  #if ONLY_MSPACES
-    #define HAVE_MORECORE 0
-  #else                                                     /* ONLY_MSPACES */
-    #define HAVE_MORECORE 1
-  #endif                                                    /* ONLY_MSPACES */
-#endif                                                     /* HAVE_MORECORE */
+#if ONLY_MSPACES
+#define HAVE_MORECORE 0
+#else   /* ONLY_MSPACES */
+#define HAVE_MORECORE 1
+#endif  /* ONLY_MSPACES */
+#endif  /* HAVE_MORECORE */
 #if !HAVE_MORECORE
-  #define MORECORE_CONTIGUOUS 0
-#else                                                     /* !HAVE_MORECORE */
-  #define MORECORE_DEFAULT sbrk
-  #ifndef MORECORE_CONTIGUOUS
-    #define MORECORE_CONTIGUOUS 1
-  #endif                                             /* MORECORE_CONTIGUOUS */
-#endif                                                     /* HAVE_MORECORE */
+#define MORECORE_CONTIGUOUS 0
+#else   /* !HAVE_MORECORE */
+#define MORECORE_DEFAULT sbrk
+#ifndef MORECORE_CONTIGUOUS
+#define MORECORE_CONTIGUOUS 1
+#endif  /* MORECORE_CONTIGUOUS */
+#endif  /* HAVE_MORECORE */
 #ifndef DEFAULT_GRANULARITY
-  #if (MORECORE_CONTIGUOUS || defined(WIN32))
-    #define DEFAULT_GRANULARITY (0)   /* 0 means to compute in init_mparams */
-  #else                                              /* MORECORE_CONTIGUOUS */
-    #define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U)
-  #endif                                             /* MORECORE_CONTIGUOUS */
-#endif                                               /* DEFAULT_GRANULARITY */
+#if (MORECORE_CONTIGUOUS || defined(WIN32))
+#define DEFAULT_GRANULARITY (0)  /* 0 means to compute in init_mparams */
+#else   /* MORECORE_CONTIGUOUS */
+#define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U)
+#endif  /* MORECORE_CONTIGUOUS */
+#endif  /* DEFAULT_GRANULARITY */
 #ifndef DEFAULT_TRIM_THRESHOLD
-  #ifndef MORECORE_CANNOT_TRIM
-    #define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U)
-  #else                                             /* MORECORE_CANNOT_TRIM */
-    #define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T
-  #endif                                            /* MORECORE_CANNOT_TRIM */
-#endif                                            /* DEFAULT_TRIM_THRESHOLD */
+#ifndef MORECORE_CANNOT_TRIM
+#define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U)
+#else   /* MORECORE_CANNOT_TRIM */
+#define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T
+#endif  /* MORECORE_CANNOT_TRIM */
+#endif  /* DEFAULT_TRIM_THRESHOLD */
 #ifndef DEFAULT_MMAP_THRESHOLD
-  #if HAVE_MMAP
-    #define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U)
-  #else                                                        /* HAVE_MMAP */
-    #define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T
-  #endif                                                       /* HAVE_MMAP */
-#endif                                            /* DEFAULT_MMAP_THRESHOLD */
+#if HAVE_MMAP
+#define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U)
+#else   /* HAVE_MMAP */
+#define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T
+#endif  /* HAVE_MMAP */
+#endif  /* DEFAULT_MMAP_THRESHOLD */
 #ifndef MAX_RELEASE_CHECK_RATE
-  #if HAVE_MMAP
-    #define MAX_RELEASE_CHECK_RATE 4095
-  #else
-    #define MAX_RELEASE_CHECK_RATE MAX_SIZE_T
-  #endif                                                       /* HAVE_MMAP */
-#endif                                            /* MAX_RELEASE_CHECK_RATE */
+#if HAVE_MMAP
+#define MAX_RELEASE_CHECK_RATE 4095
+#else
+#define MAX_RELEASE_CHECK_RATE MAX_SIZE_T
+#endif /* HAVE_MMAP */
+#endif /* MAX_RELEASE_CHECK_RATE */
 #ifndef USE_BUILTIN_FFS
-  #define USE_BUILTIN_FFS 0
-#endif                                                   /* USE_BUILTIN_FFS */
+#define USE_BUILTIN_FFS 0
+#endif  /* USE_BUILTIN_FFS */
 #ifndef USE_DEV_RANDOM
-  #define USE_DEV_RANDOM 0
-#endif                                                    /* USE_DEV_RANDOM */
+#define USE_DEV_RANDOM 0
+#endif  /* USE_DEV_RANDOM */
 #ifndef NO_MALLINFO
-  #define NO_MALLINFO 0
-#endif                                                       /* NO_MALLINFO */
+#define NO_MALLINFO 0
+#endif  /* NO_MALLINFO */
 #ifndef MALLINFO_FIELD_TYPE
-  #define MALLINFO_FIELD_TYPE size_t
-#endif                                               /* MALLINFO_FIELD_TYPE */
+#define MALLINFO_FIELD_TYPE size_t
+#endif  /* MALLINFO_FIELD_TYPE */
 #ifndef NO_MALLOC_STATS
-  #define NO_MALLOC_STATS 0
-#endif                                                   /* NO_MALLOC_STATS */
+#define NO_MALLOC_STATS 0
+#endif  /* NO_MALLOC_STATS */
 #ifndef NO_SEGMENT_TRAVERSAL
-  #define NO_SEGMENT_TRAVERSAL 0
-#endif                                              /* NO_SEGMENT_TRAVERSAL */
+#define NO_SEGMENT_TRAVERSAL 0
+#endif /* NO_SEGMENT_TRAVERSAL */
 
 /*
   mallopt tuning options.  SVID/XPG defines four standard parameter
@@ -731,9 +731,9 @@ MAX_RELEASE_CHECK_RATE   default: 4095 unless not HAVE_MMAP
 #undef M_TRIM_THRESHOLD
 #undef M_GRANULARITY
 #undef M_MMAP_THRESHOLD
-#define M_TRIM_THRESHOLD (-1)
-#define M_GRANULARITY (-2)
-#define M_MMAP_THRESHOLD (-3)
+#define M_TRIM_THRESHOLD     (-1)
+#define M_GRANULARITY        (-2)
+#define M_MMAP_THRESHOLD     (-3)
 
 /* ------------------------ Mallinfo declarations ------------------------ */
 
@@ -762,32 +762,28 @@ MAX_RELEASE_CHECK_RATE   default: 4095 unless not HAVE_MMAP
 
 /* #define HAVE_USR_INCLUDE_MALLOC_H */
 
-  #ifdef HAVE_USR_INCLUDE_MALLOC_H
-    #include "/usr/include/malloc.h"
-  #else                                        /* HAVE_USR_INCLUDE_MALLOC_H */
-    #ifndef STRUCT_MALLINFO_DECLARED
-      /* HP-UX (and others?) redefines mallinfo unless _STRUCT_MALLINFO is
-       * defined */
-      #define _STRUCT_MALLINFO
-      #define STRUCT_MALLINFO_DECLARED 1
+#ifdef HAVE_USR_INCLUDE_MALLOC_H
+#include "/usr/include/malloc.h"
+#else /* HAVE_USR_INCLUDE_MALLOC_H */
+#ifndef STRUCT_MALLINFO_DECLARED
+/* HP-UX (and others?) redefines mallinfo unless _STRUCT_MALLINFO is defined */
+#define _STRUCT_MALLINFO
+#define STRUCT_MALLINFO_DECLARED 1
 struct mallinfo {
-
-  MALLINFO_FIELD_TYPE arena;     /* non-mmapped space allocated from system */
-  MALLINFO_FIELD_TYPE ordblks;                     /* number of free chunks */
-  MALLINFO_FIELD_TYPE smblks;                                   /* always 0 */
-  MALLINFO_FIELD_TYPE hblks;                                    /* always 0 */
-  MALLINFO_FIELD_TYPE hblkhd;                   /* space in mmapped regions */
-  MALLINFO_FIELD_TYPE usmblks;             /* maximum total allocated space */
-  MALLINFO_FIELD_TYPE fsmblks;                                  /* always 0 */
-  MALLINFO_FIELD_TYPE uordblks;                    /* total allocated space */
-  MALLINFO_FIELD_TYPE fordblks;                         /* total free space */
-  MALLINFO_FIELD_TYPE keepcost;       /* releasable (via malloc_trim) space */
-
+  MALLINFO_FIELD_TYPE arena;    /* non-mmapped space allocated from system */
+  MALLINFO_FIELD_TYPE ordblks;  /* number of free chunks */
+  MALLINFO_FIELD_TYPE smblks;   /* always 0 */
+  MALLINFO_FIELD_TYPE hblks;    /* always 0 */
+  MALLINFO_FIELD_TYPE hblkhd;   /* space in mmapped regions */
+  MALLINFO_FIELD_TYPE usmblks;  /* maximum total allocated space */
+  MALLINFO_FIELD_TYPE fsmblks;  /* always 0 */
+  MALLINFO_FIELD_TYPE uordblks; /* total allocated space */
+  MALLINFO_FIELD_TYPE fordblks; /* total free space */
+  MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */
 };
-
-    #endif                                      /* STRUCT_MALLINFO_DECLARED */
-  #endif                                       /* HAVE_USR_INCLUDE_MALLOC_H */
-#endif                                                       /* NO_MALLINFO */
+#endif /* STRUCT_MALLINFO_DECLARED */
+#endif /* HAVE_USR_INCLUDE_MALLOC_H */
+#endif /* NO_MALLINFO */
 
 /*
   Try to persuade compilers to inline. The most critical functions for
@@ -796,14 +792,14 @@ struct mallinfo {
 
 #ifndef FORCEINLINE
   #if defined(__GNUC__)
-    #define FORCEINLINE __inline __attribute__((always_inline))
+#define FORCEINLINE __inline __attribute__ ((always_inline))
   #elif defined(_MSC_VER)
     #define FORCEINLINE __forceinline
   #endif
 #endif
 #ifndef NOINLINE
   #if defined(__GNUC__)
-    #define NOINLINE __attribute__((noinline))
+    #define NOINLINE __attribute__ ((noinline))
   #elif defined(_MSC_VER)
     #define NOINLINE __declspec(noinline)
   #else
@@ -813,43 +809,42 @@ struct mallinfo {
 
 #ifdef __cplusplus
 extern "C" {
-
-  #ifndef FORCEINLINE
-    #define FORCEINLINE inline
-  #endif
-#endif                                                       /* __cplusplus */
 #ifndef FORCEINLINE
-  #define FORCEINLINE
+ #define FORCEINLINE inline
+#endif
+#endif /* __cplusplus */
+#ifndef FORCEINLINE
+ #define FORCEINLINE
 #endif
 
 #if !ONLY_MSPACES
 
 /* ------------------- Declarations of public routines ------------------- */
 
-  #ifndef USE_DL_PREFIX
-    #define dlcalloc calloc
-    #define dlfree free
-    #define dlmalloc malloc
-    #define dlmemalign memalign
-    #define dlposix_memalign posix_memalign
-    #define dlrealloc realloc
-    #define dlrealloc_in_place realloc_in_place
-    #define dlvalloc valloc
-    #define dlpvalloc pvalloc
-    #define dlmallinfo mallinfo
-    #define dlmallopt mallopt
-    #define dlmalloc_trim malloc_trim
-    #define dlmalloc_stats malloc_stats
-    #define dlmalloc_usable_size malloc_usable_size
-    #define dlmalloc_footprint malloc_footprint
-    #define dlmalloc_max_footprint malloc_max_footprint
-    #define dlmalloc_footprint_limit malloc_footprint_limit
-    #define dlmalloc_set_footprint_limit malloc_set_footprint_limit
-    #define dlmalloc_inspect_all malloc_inspect_all
-    #define dlindependent_calloc independent_calloc
-    #define dlindependent_comalloc independent_comalloc
-    #define dlbulk_free bulk_free
-  #endif                                                   /* USE_DL_PREFIX */
+#ifndef USE_DL_PREFIX
+#define dlcalloc               calloc
+#define dlfree                 free
+#define dlmalloc               malloc
+#define dlmemalign             memalign
+#define dlposix_memalign       posix_memalign
+#define dlrealloc              realloc
+#define dlrealloc_in_place     realloc_in_place
+#define dlvalloc               valloc
+#define dlpvalloc              pvalloc
+#define dlmallinfo             mallinfo
+#define dlmallopt              mallopt
+#define dlmalloc_trim          malloc_trim
+#define dlmalloc_stats         malloc_stats
+#define dlmalloc_usable_size   malloc_usable_size
+#define dlmalloc_footprint     malloc_footprint
+#define dlmalloc_max_footprint malloc_max_footprint
+#define dlmalloc_footprint_limit malloc_footprint_limit
+#define dlmalloc_set_footprint_limit malloc_set_footprint_limit
+#define dlmalloc_inspect_all   malloc_inspect_all
+#define dlindependent_calloc   independent_calloc
+#define dlindependent_comalloc independent_comalloc
+#define dlbulk_free            bulk_free
+#endif /* USE_DL_PREFIX */
 
 /*
   malloc(size_t n)
@@ -865,7 +860,7 @@ extern "C" {
   maximum supported value of n differs across systems, but is in all
   cases less than the maximum representable value of a size_t.
 */
-DLMALLOC_EXPORT void *dlmalloc(size_t);
+DLMALLOC_EXPORT void* dlmalloc(size_t);
 
 /*
   free(void* p)
@@ -874,14 +869,14 @@ DLMALLOC_EXPORT void *dlmalloc(size_t);
   It has no effect if p is null. If p was not malloced or already
   freed, free(p) will by default cause the current program to abort.
 */
-DLMALLOC_EXPORT void dlfree(void *);
+DLMALLOC_EXPORT void  dlfree(void*);
 
 /*
   calloc(size_t n_elements, size_t element_size);
   Returns a pointer to n_elements * element_size bytes, with all locations
   set to zero.
 */
-DLMALLOC_EXPORT void *dlcalloc(size_t, size_t);
+DLMALLOC_EXPORT void* dlcalloc(size_t, size_t);
 
 /*
   realloc(void* p, size_t n)
@@ -905,7 +900,7 @@ DLMALLOC_EXPORT void *dlcalloc(size_t, size_t);
   The old unix realloc convention of allowing the last-free'd chunk
   to be used as an argument to realloc is not supported.
 */
-DLMALLOC_EXPORT void *dlrealloc(void *, size_t);
+DLMALLOC_EXPORT void* dlrealloc(void*, size_t);
 
 /*
   realloc_in_place(void* p, size_t n)
@@ -920,7 +915,7 @@ DLMALLOC_EXPORT void *dlrealloc(void *, size_t);
 
   Returns p if successful; otherwise null.
 */
-DLMALLOC_EXPORT void *dlrealloc_in_place(void *, size_t);
+DLMALLOC_EXPORT void* dlrealloc_in_place(void*, size_t);
 
 /*
   memalign(size_t alignment, size_t n);
@@ -934,7 +929,7 @@ DLMALLOC_EXPORT void *dlrealloc_in_place(void *, size_t);
 
   Overreliance on memalign is a sure way to fragment space.
 */
-DLMALLOC_EXPORT void *dlmemalign(size_t, size_t);
+DLMALLOC_EXPORT void* dlmemalign(size_t, size_t);
 
 /*
   int posix_memalign(void** pp, size_t alignment, size_t n);
@@ -944,14 +939,14 @@ DLMALLOC_EXPORT void *dlmemalign(size_t, size_t);
   returns EINVAL if the alignment is not a power of two (3) fails and
   returns ENOMEM if memory cannot be allocated.
 */
-DLMALLOC_EXPORT int dlposix_memalign(void **, size_t, size_t);
+DLMALLOC_EXPORT int dlposix_memalign(void**, size_t, size_t);
 
 /*
   valloc(size_t n);
   Equivalent to memalign(pagesize, n), where pagesize is the page
   size of the system. If the pagesize is unknown, 4096 is used.
 */
-DLMALLOC_EXPORT void *dlvalloc(size_t);
+DLMALLOC_EXPORT void* dlvalloc(size_t);
 
 /*
   mallopt(int parameter_number, int parameter_value)
@@ -1026,7 +1021,7 @@ DLMALLOC_EXPORT size_t dlmalloc_footprint_limit();
 */
 DLMALLOC_EXPORT size_t dlmalloc_set_footprint_limit(size_t bytes);
 
-  #if MALLOC_INSPECT_ALL
+#if MALLOC_INSPECT_ALL
 /*
   malloc_inspect_all(void(*handler)(void *start,
                                     void *end,
@@ -1048,23 +1043,19 @@ DLMALLOC_EXPORT size_t dlmalloc_set_footprint_limit(size_t bytes);
   than 1000, you could write:
   static int count = 0;
   void count_chunks(void* start, void* end, size_t used, void* arg) {
-
     if (used >= 1000) ++count;
-
   }
-
   then:
     malloc_inspect_all(count_chunks, NULL);
 
   malloc_inspect_all is compiled only if MALLOC_INSPECT_ALL is defined.
 */
-DLMALLOC_EXPORT void dlmalloc_inspect_all(void (*handler)(void *, void *,
-                                                          size_t, void *),
-                                          void *arg);
+DLMALLOC_EXPORT void dlmalloc_inspect_all(void(*handler)(void*, void *, size_t, void*),
+                           void* arg);
 
-  #endif                                              /* MALLOC_INSPECT_ALL */
+#endif /* MALLOC_INSPECT_ALL */
 
-  #if !NO_MALLINFO
+#if !NO_MALLINFO
 /*
   mallinfo()
   Returns (by copy) a struct containing various summary statistics:
@@ -1088,7 +1079,7 @@ DLMALLOC_EXPORT void dlmalloc_inspect_all(void (*handler)(void *, void *,
   thus be inaccurate.
 */
 DLMALLOC_EXPORT struct mallinfo dlmallinfo(void);
-  #endif                                                     /* NO_MALLINFO */
+#endif /* NO_MALLINFO */
 
 /*
   independent_calloc(size_t n_elements, size_t element_size, void* chunks[]);
@@ -1126,7 +1117,6 @@ DLMALLOC_EXPORT struct mallinfo dlmallinfo(void);
   struct Node { int item; struct Node* next; };
 
   struct Node* build_list() {
-
     struct Node** pool;
     int n = read_number_of_nodes_needed();
     if (n <= 0) return 0;
@@ -1138,11 +1128,9 @@ DLMALLOC_EXPORT struct mallinfo dlmallinfo(void);
       pool[i]->next = pool[i+1];
     free(pool);     // Can now free the array (or not, if it is needed later)
     return first;
-
   }
-
 */
-DLMALLOC_EXPORT void **dlindependent_calloc(size_t, size_t, void **);
+DLMALLOC_EXPORT void** dlindependent_calloc(size_t, size_t, void**);
 
 /*
   independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
@@ -1181,7 +1169,6 @@ DLMALLOC_EXPORT void **dlindependent_calloc(size_t, size_t, void **);
   struct Foot { ... }
 
   void send_message(char* msg) {
-
     int msglen = strlen(msg);
     size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
     void* chunks[3];
@@ -1191,7 +1178,6 @@ DLMALLOC_EXPORT void **dlindependent_calloc(size_t, size_t, void **);
     char*        body = (char*)(chunks[1]);
     struct Foot* foot = (struct Foot*)(chunks[2]);
     // ...
-
   }
 
   In general though, independent_comalloc is worth using only for
@@ -1202,7 +1188,7 @@ DLMALLOC_EXPORT void **dlindependent_calloc(size_t, size_t, void **);
   since it cannot reuse existing noncontiguous small chunks that
   might be available for some of the elements.
 */
-DLMALLOC_EXPORT void **dlindependent_comalloc(size_t, size_t *, void **);
+DLMALLOC_EXPORT void** dlindependent_comalloc(size_t, size_t*, void**);
 
 /*
   bulk_free(void* array[], size_t n_elements)
@@ -1213,14 +1199,14 @@ DLMALLOC_EXPORT void **dlindependent_comalloc(size_t, size_t *, void **);
   is returned.  For large arrays of pointers with poor locality, it
   may be worthwhile to sort this array before calling bulk_free.
 */
-DLMALLOC_EXPORT size_t dlbulk_free(void **, size_t n_elements);
+DLMALLOC_EXPORT size_t  dlbulk_free(void**, size_t n_elements);
 
 /*
   pvalloc(size_t n);
   Equivalent to valloc(minimum-page-that-holds(n)), that is,
   round up n to nearest pagesize.
  */
-DLMALLOC_EXPORT void *dlpvalloc(size_t);
+DLMALLOC_EXPORT void*  dlpvalloc(size_t);
 
 /*
   malloc_trim(size_t pad);
@@ -1243,7 +1229,7 @@ DLMALLOC_EXPORT void *dlpvalloc(size_t);
 
   Malloc_trim returns 1 if it actually released any memory, else 0.
 */
-DLMALLOC_EXPORT int dlmalloc_trim(size_t);
+DLMALLOC_EXPORT int  dlmalloc_trim(size_t);
 
 /*
   malloc_stats();
@@ -1264,7 +1250,7 @@ DLMALLOC_EXPORT int dlmalloc_trim(size_t);
   malloc_stats prints only the most commonly interesting statistics.
   More information can be obtained by calling mallinfo.
 */
-DLMALLOC_EXPORT void dlmalloc_stats(void);
+DLMALLOC_EXPORT void  dlmalloc_stats(void);
 
 /*
   malloc_usable_size(void* p);
@@ -1280,9 +1266,9 @@ DLMALLOC_EXPORT void dlmalloc_stats(void);
   p = malloc(n);
   assert(malloc_usable_size(p) >= 256);
 */
-size_t dlmalloc_usable_size(void *);
+size_t dlmalloc_usable_size(void*);
 
-#endif                                                      /* ONLY_MSPACES */
+#endif /* ONLY_MSPACES */
 
 #if MSPACES
 
@@ -1290,7 +1276,7 @@ size_t dlmalloc_usable_size(void *);
   mspace is an opaque type representing an independent
   region of space that supports mspace_malloc, etc.
 */
-typedef void *mspace;
+typedef void* mspace;
 
 /*
   create_mspace creates and returns a new independent space with the
@@ -1322,8 +1308,7 @@ DLMALLOC_EXPORT size_t destroy_mspace(mspace msp);
   Destroying this space will deallocate all additionally allocated
   space (if possible) but not the initial base.
 */
-DLMALLOC_EXPORT mspace create_mspace_with_base(void *base, size_t capacity,
-                                               int locked);
+DLMALLOC_EXPORT mspace create_mspace_with_base(void* base, size_t capacity, int locked);
 
 /*
   mspace_track_large_chunks controls whether requests for large chunks
@@ -1338,11 +1323,12 @@ DLMALLOC_EXPORT mspace create_mspace_with_base(void *base, size_t capacity,
 */
 DLMALLOC_EXPORT int mspace_track_large_chunks(mspace msp, int enable);
 
+
 /*
   mspace_malloc behaves as malloc, but operates within
   the given space.
 */
-DLMALLOC_EXPORT void *mspace_malloc(mspace msp, size_t bytes);
+DLMALLOC_EXPORT void* mspace_malloc(mspace msp, size_t bytes);
 
 /*
   mspace_free behaves as free, but operates within
@@ -1352,7 +1338,7 @@ DLMALLOC_EXPORT void *mspace_malloc(mspace msp, size_t bytes);
   free may be called instead of mspace_free because freed chunks from
   any space are handled by their originating spaces.
 */
-DLMALLOC_EXPORT void mspace_free(mspace msp, void *mem);
+DLMALLOC_EXPORT void mspace_free(mspace msp, void* mem);
 
 /*
   mspace_realloc behaves as realloc, but operates within
@@ -1363,38 +1349,33 @@ DLMALLOC_EXPORT void mspace_free(mspace msp, void *mem);
   realloced chunks from any space are handled by their originating
   spaces.
 */
-DLMALLOC_EXPORT void *mspace_realloc(mspace msp, void *mem, size_t newsize);
+DLMALLOC_EXPORT void* mspace_realloc(mspace msp, void* mem, size_t newsize);
 
 /*
   mspace_calloc behaves as calloc, but operates within
   the given space.
 */
-DLMALLOC_EXPORT void *mspace_calloc(mspace msp, size_t n_elements,
-                                    size_t elem_size);
+DLMALLOC_EXPORT void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);
 
 /*
   mspace_memalign behaves as memalign, but operates within
   the given space.
 */
-DLMALLOC_EXPORT void *mspace_memalign(mspace msp, size_t alignment,
-                                      size_t bytes);
+DLMALLOC_EXPORT void* mspace_memalign(mspace msp, size_t alignment, size_t bytes);
 
 /*
   mspace_independent_calloc behaves as independent_calloc, but
   operates within the given space.
 */
-DLMALLOC_EXPORT void **mspace_independent_calloc(mspace msp, size_t n_elements,
-                                                 size_t elem_size,
-                                                 void * chunks[]);
+DLMALLOC_EXPORT void** mspace_independent_calloc(mspace msp, size_t n_elements,
+                                 size_t elem_size, void* chunks[]);
 
 /*
   mspace_independent_comalloc behaves as independent_comalloc, but
   operates within the given space.
 */
-DLMALLOC_EXPORT void **mspace_independent_comalloc(mspace msp,
-                                                   size_t n_elements,
-                                                   size_t sizes[],
-                                                   void * chunks[]);
+DLMALLOC_EXPORT void** mspace_independent_comalloc(mspace msp, size_t n_elements,
+                                   size_t sizes[], void* chunks[]);
 
 /*
   mspace_footprint() returns the number of bytes obtained from the
@@ -1408,18 +1389,19 @@ DLMALLOC_EXPORT size_t mspace_footprint(mspace msp);
 */
 DLMALLOC_EXPORT size_t mspace_max_footprint(mspace msp);
 
-  #if !NO_MALLINFO
+
+#if !NO_MALLINFO
 /*
   mspace_mallinfo behaves as mallinfo, but reports properties of
   the given space.
 */
 DLMALLOC_EXPORT struct mallinfo mspace_mallinfo(mspace msp);
-  #endif                                                     /* NO_MALLINFO */
+#endif /* NO_MALLINFO */
 
 /*
   malloc_usable_size(void* p) behaves the same as malloc_usable_size;
 */
-DLMALLOC_EXPORT size_t mspace_usable_size(const void *mem);
+DLMALLOC_EXPORT size_t mspace_usable_size(const void* mem);
 
 /*
   mspace_malloc_stats behaves as malloc_stats, but reports
@@ -1438,13 +1420,11 @@ DLMALLOC_EXPORT int mspace_trim(mspace msp, size_t pad);
 */
 DLMALLOC_EXPORT int mspace_mallopt(int, int);
 
-#endif                                                           /* MSPACES */
+#endif /* MSPACES */
 
 #ifdef __cplusplus
-
-}                                                      /* end of extern "C" */
-
-#endif                                                       /* __cplusplus */
+}  /* end of extern "C" */
+#endif /* __cplusplus */
 
 /*
   ========================================================================
@@ -1459,207 +1439,195 @@ DLMALLOC_EXPORT int mspace_mallopt(int, int);
 /*------------------------------ internal #includes ---------------------- */
 
 #ifdef _MSC_VER
-  #pragma warning(disable : 4146)                 /* no "unsigned" warnings */
-#endif                                                          /* _MSC_VER */
+#pragma warning( disable : 4146 ) /* no "unsigned" warnings */
+#endif /* _MSC_VER */
 #if !NO_MALLOC_STATS
-  #include <stdio.h>                        /* for printing in malloc_stats */
-#endif                                                   /* NO_MALLOC_STATS */
+#include <stdio.h>       /* for printing in malloc_stats */
+#endif /* NO_MALLOC_STATS */
 #ifndef LACKS_ERRNO_H
-  #include <errno.h>                           /* for MALLOC_FAILURE_ACTION */
-#endif                                                     /* LACKS_ERRNO_H */
+#include <errno.h>       /* for MALLOC_FAILURE_ACTION */
+#endif /* LACKS_ERRNO_H */
 #ifdef DEBUG
-  #if ABORT_ON_ASSERT_FAILURE
-    #undef assert
-    #define assert(x) \
-      if (!(x)) ABORT
-  #else                                          /* ABORT_ON_ASSERT_FAILURE */
-    #include <assert.h>
-  #endif                                         /* ABORT_ON_ASSERT_FAILURE */
-#else                                                              /* DEBUG */
-  #ifndef assert
-    #define assert(x)
-  #endif
-  #define DEBUG 0
-#endif                                                             /* DEBUG */
+#if ABORT_ON_ASSERT_FAILURE
+#undef assert
+#define assert(x) if(!(x)) ABORT
+#else /* ABORT_ON_ASSERT_FAILURE */
+#include <assert.h>
+#endif /* ABORT_ON_ASSERT_FAILURE */
+#else  /* DEBUG */
+#ifndef assert
+#define assert(x)
+#endif
+#define DEBUG 0
+#endif /* DEBUG */
 #if !defined(WIN32) && !defined(LACKS_TIME_H)
-  #include <time.h>                             /* for magic initialization */
-#endif                                                             /* WIN32 */
+#include <time.h>        /* for magic initialization */
+#endif /* WIN32 */
 #ifndef LACKS_STDLIB_H
-  #include <stdlib.h>                                        /* for abort() */
-#endif                                                    /* LACKS_STDLIB_H */
+#include <stdlib.h>      /* for abort() */
+#endif /* LACKS_STDLIB_H */
 #ifndef LACKS_STRING_H
-  #include <string.h>                                     /* for memset etc */
-#endif                                                    /* LACKS_STRING_H */
+#include <string.h>      /* for memset etc */
+#endif  /* LACKS_STRING_H */
 #if USE_BUILTIN_FFS
-  #ifndef LACKS_STRINGS_H
-    #include <strings.h>                                         /* for ffs */
-  #endif                                                 /* LACKS_STRINGS_H */
-#endif                                                   /* USE_BUILTIN_FFS */
+#ifndef LACKS_STRINGS_H
+#include <strings.h>     /* for ffs */
+#endif /* LACKS_STRINGS_H */
+#endif /* USE_BUILTIN_FFS */
 #if HAVE_MMAP
-  #ifndef LACKS_SYS_MMAN_H
-    /* On some versions of linux, mremap decl in mman.h needs __USE_GNU set */
-    #if (defined(linux) && !defined(__USE_GNU))
-      #define __USE_GNU 1
-      #include <sys/mman.h>                                     /* for mmap */
-      #undef __USE_GNU
-    #else
-      #include <sys/mman.h>                                     /* for mmap */
-    #endif                                                         /* linux */
-  #endif                                                /* LACKS_SYS_MMAN_H */
-  #ifndef LACKS_FCNTL_H
-    #include <fcntl.h>
-  #endif                                                   /* LACKS_FCNTL_H */
-#endif                                                         /* HAVE_MMAP */
+#ifndef LACKS_SYS_MMAN_H
+/* On some versions of linux, mremap decl in mman.h needs __USE_GNU set */
+#if (defined(linux) && !defined(__USE_GNU))
+#define __USE_GNU 1
+#include <sys/mman.h>    /* for mmap */
+#undef __USE_GNU
+#else
+#include <sys/mman.h>    /* for mmap */
+#endif /* linux */
+#endif /* LACKS_SYS_MMAN_H */
+#ifndef LACKS_FCNTL_H
+#include <fcntl.h>
+#endif /* LACKS_FCNTL_H */
+#endif /* HAVE_MMAP */
 #ifndef LACKS_UNISTD_H
-  #include <unistd.h>                                  /* for sbrk, sysconf */
-#else                                                     /* LACKS_UNISTD_H */
-  #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
-extern void *sbrk(ptrdiff_t);
-  #endif                                                     /* FreeBSD etc */
-#endif                                                    /* LACKS_UNISTD_H */
+#include <unistd.h>     /* for sbrk, sysconf */
+#else /* LACKS_UNISTD_H */
+#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
+extern void*     sbrk(ptrdiff_t);
+#endif /* FreeBSD etc */
+#endif /* LACKS_UNISTD_H */
 
 /* Declarations for locking */
 #if USE_LOCKS
-  #ifndef WIN32
-    #if defined(__SVR4) && defined(__sun)                        /* solaris */
-      #include <thread.h>
-    #elif !defined(LACKS_SCHED_H)
-      #include <sched.h>
-    #endif                                      /* solaris or LACKS_SCHED_H */
-    #if (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0) || \
-        !USE_SPIN_LOCKS
-      #include <pthread.h>
-    #endif                                       /* USE_RECURSIVE_LOCKS ... */
-  #elif defined(_MSC_VER)
-    #ifndef _M_AMD64
-      /* These are already defined on AMD64 builds */
-      #ifdef __cplusplus
+#ifndef WIN32
+#if defined (__SVR4) && defined (__sun)  /* solaris */
+#include <thread.h>
+#elif !defined(LACKS_SCHED_H)
+#include <sched.h>
+#endif /* solaris or LACKS_SCHED_H */
+#if (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0) || !USE_SPIN_LOCKS
+#include <pthread.h>
+#endif /* USE_RECURSIVE_LOCKS ... */
+#elif defined(_MSC_VER)
+#ifndef _M_AMD64
+/* These are already defined on AMD64 builds */
+#ifdef __cplusplus
 extern "C" {
-
-      #endif                                                 /* __cplusplus */
-LONG __cdecl _InterlockedCompareExchange(LONG volatile *Dest, LONG Exchange,
-                                         LONG Comp);
+#endif /* __cplusplus */
+LONG __cdecl _InterlockedCompareExchange(LONG volatile *Dest, LONG Exchange, LONG Comp);
 LONG __cdecl _InterlockedExchange(LONG volatile *Target, LONG Value);
-      #ifdef __cplusplus
-
+#ifdef __cplusplus
 }
-
-      #endif                                                 /* __cplusplus */
-    #endif                                                      /* _M_AMD64 */
-    #pragma intrinsic(_InterlockedCompareExchange)
-    #pragma intrinsic(_InterlockedExchange)
-    #define interlockedcompareexchange _InterlockedCompareExchange
-    #define interlockedexchange _InterlockedExchange
-  #elif defined(WIN32) && defined(__GNUC__)
-    #define interlockedcompareexchange(a, b, c) \
-      __sync_val_compare_and_swap(a, c, b)
-    #define interlockedexchange __sync_lock_test_and_set
-  #endif                                                           /* Win32 */
-#else                                                          /* USE_LOCKS */
-#endif                                                         /* USE_LOCKS */
+#endif /* __cplusplus */
+#endif /* _M_AMD64 */
+#pragma intrinsic (_InterlockedCompareExchange)
+#pragma intrinsic (_InterlockedExchange)
+#define interlockedcompareexchange _InterlockedCompareExchange
+#define interlockedexchange _InterlockedExchange
+#elif defined(WIN32) && defined(__GNUC__)
+#define interlockedcompareexchange(a, b, c) __sync_val_compare_and_swap(a, c, b)
+#define interlockedexchange __sync_lock_test_and_set
+#endif /* Win32 */
+#else /* USE_LOCKS */
+#endif /* USE_LOCKS */
 
 #ifndef LOCK_AT_FORK
-  #define LOCK_AT_FORK 0
+#define LOCK_AT_FORK 0
 #endif
 
 /* Declarations for bit scanning on win32 */
-#if defined(_MSC_VER) && _MSC_VER >= 1300
-  #ifndef BitScanForward                 /* Try to avoid pulling in WinNT.h */
-    #ifdef __cplusplus
+#if defined(_MSC_VER) && _MSC_VER>=1300
+#ifndef BitScanForward /* Try to avoid pulling in WinNT.h */
+#ifdef __cplusplus
 extern "C" {
-
-    #endif                                                   /* __cplusplus */
+#endif /* __cplusplus */
 unsigned char _BitScanForward(unsigned long *index, unsigned long mask);
 unsigned char _BitScanReverse(unsigned long *index, unsigned long mask);
-    #ifdef __cplusplus
-
+#ifdef __cplusplus
 }
+#endif /* __cplusplus */
 
-    #endif                                                   /* __cplusplus */
-
-    #define BitScanForward _BitScanForward
-    #define BitScanReverse _BitScanReverse
-    #pragma intrinsic(_BitScanForward)
-    #pragma intrinsic(_BitScanReverse)
-  #endif                                                  /* BitScanForward */
-#endif                               /* defined(_MSC_VER) && _MSC_VER>=1300 */
+#define BitScanForward _BitScanForward
+#define BitScanReverse _BitScanReverse
+#pragma intrinsic(_BitScanForward)
+#pragma intrinsic(_BitScanReverse)
+#endif /* BitScanForward */
+#endif /* defined(_MSC_VER) && _MSC_VER>=1300 */
 
 #ifndef WIN32
-  #ifndef malloc_getpagesize
-    #ifdef _SC_PAGESIZE             /* some SVR4 systems omit an underscore */
-      #ifndef _SC_PAGE_SIZE
-        #define _SC_PAGE_SIZE _SC_PAGESIZE
-      #endif
-    #endif
-    #ifdef _SC_PAGE_SIZE
-      #define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
-    #else
-      #if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
-extern size_t getpagesize();
-        #define malloc_getpagesize getpagesize()
-      #else
-        #ifdef WIN32               /* use supplied emulation of getpagesize */
-          #define malloc_getpagesize getpagesize()
-        #else
-          #ifndef LACKS_SYS_PARAM_H
-            #include <sys/param.h>
-          #endif
-          #ifdef EXEC_PAGESIZE
-            #define malloc_getpagesize EXEC_PAGESIZE
-          #else
-            #ifdef NBPG
-              #ifndef CLSIZE
-                #define malloc_getpagesize NBPG
-              #else
-                #define malloc_getpagesize (NBPG * CLSIZE)
-              #endif
-            #else
-              #ifdef NBPC
-                #define malloc_getpagesize NBPC
-              #else
-                #ifdef PAGESIZE
-                  #define malloc_getpagesize PAGESIZE
-                #else                                         /* just guess */
-                  #define malloc_getpagesize ((size_t)4096U)
-                #endif
-              #endif
-            #endif
-          #endif
-        #endif
-      #endif
-    #endif
-  #endif
+#ifndef malloc_getpagesize
+#  ifdef _SC_PAGESIZE         /* some SVR4 systems omit an underscore */
+#    ifndef _SC_PAGE_SIZE
+#      define _SC_PAGE_SIZE _SC_PAGESIZE
+#    endif
+#  endif
+#  ifdef _SC_PAGE_SIZE
+#    define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
+#  else
+#    if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
+       extern size_t getpagesize();
+#      define malloc_getpagesize getpagesize()
+#    else
+#      ifdef WIN32 /* use supplied emulation of getpagesize */
+#        define malloc_getpagesize getpagesize()
+#      else
+#        ifndef LACKS_SYS_PARAM_H
+#          include <sys/param.h>
+#        endif
+#        ifdef EXEC_PAGESIZE
+#          define malloc_getpagesize EXEC_PAGESIZE
+#        else
+#          ifdef NBPG
+#            ifndef CLSIZE
+#              define malloc_getpagesize NBPG
+#            else
+#              define malloc_getpagesize (NBPG * CLSIZE)
+#            endif
+#          else
+#            ifdef NBPC
+#              define malloc_getpagesize NBPC
+#            else
+#              ifdef PAGESIZE
+#                define malloc_getpagesize PAGESIZE
+#              else /* just guess */
+#                define malloc_getpagesize ((size_t)4096U)
+#              endif
+#            endif
+#          endif
+#        endif
+#      endif
+#    endif
+#  endif
+#endif
 #endif
 
 /* ------------------- size_t and alignment properties -------------------- */
 
 /* The byte and bit size of a size_t */
-#define SIZE_T_SIZE (sizeof(size_t))
-#define SIZE_T_BITSIZE (sizeof(size_t) << 3)
+#define SIZE_T_SIZE         (sizeof(size_t))
+#define SIZE_T_BITSIZE      (sizeof(size_t) << 3)
 
 /* Some constants coerced to size_t */
 /* Annoying but necessary to avoid errors on some platforms */
-#define SIZE_T_ZERO ((size_t)0)
-#define SIZE_T_ONE ((size_t)1)
-#define SIZE_T_TWO ((size_t)2)
-#define SIZE_T_FOUR ((size_t)4)
-#define TWO_SIZE_T_SIZES (SIZE_T_SIZE << 1)
-#define FOUR_SIZE_T_SIZES (SIZE_T_SIZE << 2)
-#define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES + TWO_SIZE_T_SIZES)
-#define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U)
+#define SIZE_T_ZERO         ((size_t)0)
+#define SIZE_T_ONE          ((size_t)1)
+#define SIZE_T_TWO          ((size_t)2)
+#define SIZE_T_FOUR         ((size_t)4)
+#define TWO_SIZE_T_SIZES    (SIZE_T_SIZE<<1)
+#define FOUR_SIZE_T_SIZES   (SIZE_T_SIZE<<2)
+#define SIX_SIZE_T_SIZES    (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
+#define HALF_MAX_SIZE_T     (MAX_SIZE_T / 2U)
 
 /* The bit mask value corresponding to MALLOC_ALIGNMENT */
-#define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE)
+#define CHUNK_ALIGN_MASK    (MALLOC_ALIGNMENT - SIZE_T_ONE)
 
 /* True if address a has acceptable alignment */
-#define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
+#define is_aligned(A)       (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
 
 /* the number of bytes to offset an address to align it */
-#define align_offset(A)                                         \
-  ((((size_t)(A)&CHUNK_ALIGN_MASK) == 0)                        \
-       ? 0                                                      \
-       : ((MALLOC_ALIGNMENT - ((size_t)(A)&CHUNK_ALIGN_MASK)) & \
-          CHUNK_ALIGN_MASK))
+#define align_offset(A)\
+ ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
+  ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
 
 /* -------------------------- MMAP preliminaries ------------------------- */
 
@@ -1669,202 +1637,193 @@ extern size_t getpagesize();
    using so many "#if"s.
 */
 
+
 /* MORECORE and MMAP must return MFAIL on failure */
-#define MFAIL ((void *)(MAX_SIZE_T))
-#define CMFAIL ((char *)(MFAIL))                 /* defined for convenience */
+#define MFAIL                ((void*)(MAX_SIZE_T))
+#define CMFAIL               ((char*)(MFAIL)) /* defined for convenience */
 
 #if HAVE_MMAP
 
-  #ifndef WIN32
-    #define MMAP_PROT (PROT_READ | PROT_WRITE)
-    #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
-      #define MAP_ANONYMOUS MAP_ANON
-    #endif                                                      /* MAP_ANON */
-    #ifdef MAP_ANONYMOUS
-
-      #define MMAP_FLAGS (MAP_PRIVATE | MAP_ANONYMOUS)
+#ifndef WIN32
+#define MMAP_PROT            (PROT_READ|PROT_WRITE)
+#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
+#define MAP_ANONYMOUS        MAP_ANON
+#endif /* MAP_ANON */
+#ifdef MAP_ANONYMOUS
 
-static FORCEINLINE void *unixmmap(size_t size) {
+#define MMAP_FLAGS           (MAP_PRIVATE|MAP_ANONYMOUS)
 
-  void *result;
+static FORCEINLINE void* unixmmap(size_t size) {
+  void* result;
 
   result = mmap(0, size, MMAP_PROT, MMAP_FLAGS, -1, 0);
-  if (result == MFAIL) return MFAIL;
+  if (result == MFAIL)
+    return MFAIL;
 
   return result;
-
 }
 
-static FORCEINLINE int unixmunmap(void *ptr, size_t size) {
-
+static FORCEINLINE int unixmunmap(void* ptr, size_t size) {
   int result;
 
   result = munmap(ptr, size);
-  if (result != 0) return result;
+  if (result != 0)
+    return result;
 
   return result;
-
 }
 
-      #define MMAP_DEFAULT(s) unixmmap(s)
-      #define MUNMAP_DEFAULT(a, s) unixmunmap((a), (s))
+#define MMAP_DEFAULT(s)       unixmmap(s)
+#define MUNMAP_DEFAULT(a, s)  unixmunmap((a), (s))
 
-    #else                                                  /* MAP_ANONYMOUS */
-      /*
-         Nearly all versions of mmap support MAP_ANONYMOUS, so the following
-         is unlikely to be needed, but is supplied just in case.
-      */
-      #define MMAP_FLAGS (MAP_PRIVATE)
-static int dev_zero_fd = -1;       /* Cached file descriptor for /dev/zero. */
-      #define MMAP_DEFAULT(s)                                        \
-        ((dev_zero_fd < 0)                                           \
-             ? (dev_zero_fd = open("/dev/zero", O_RDWR),             \
-                mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) \
-             : mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0))
-      #define MUNMAP_DEFAULT(a, s) munmap((a), (s))
-    #endif                                                 /* MAP_ANONYMOUS */
+#else /* MAP_ANONYMOUS */
+/*
+   Nearly all versions of mmap support MAP_ANONYMOUS, so the following
+   is unlikely to be needed, but is supplied just in case.
+*/
+#define MMAP_FLAGS           (MAP_PRIVATE)
+static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
+#define MMAP_DEFAULT(s) ((dev_zero_fd < 0) ? \
+           (dev_zero_fd = open("/dev/zero", O_RDWR), \
+            mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \
+            mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0))
+#define MUNMAP_DEFAULT(a, s)  munmap((a), (s))
+#endif /* MAP_ANONYMOUS */
 
-    #define DIRECT_MMAP_DEFAULT(s) MMAP_DEFAULT(s)
+#define DIRECT_MMAP_DEFAULT(s) MMAP_DEFAULT(s)
 
-  #else                                                            /* WIN32 */
+#else /* WIN32 */
 
 /* Win32 MMAP via VirtualAlloc */
-static FORCEINLINE void *win32mmap(size_t size) {
+static FORCEINLINE void* win32mmap(size_t size) {
+  void* ptr;
 
-  void *ptr;
-
-  ptr = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
-  if (ptr == 0) return MFAIL;
+  ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
+  if (ptr == 0)
+    return MFAIL;
 
   return ptr;
-
 }
 
 /* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
-static FORCEINLINE void *win32direct_mmap(size_t size) {
-
-  void *ptr;
+static FORCEINLINE void* win32direct_mmap(size_t size) {
+  void* ptr;
 
-  ptr = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT | MEM_TOP_DOWN,
-                     PAGE_READWRITE);
-  if (ptr == 0) return MFAIL;
+  ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN,
+                              PAGE_READWRITE);
+  if (ptr == 0)
+    return MFAIL;
 
   return ptr;
-
 }
 
 /* This function supports releasing coalesed segments */
-static FORCEINLINE int win32munmap(void *ptr, size_t size) {
-
+static FORCEINLINE int win32munmap(void* ptr, size_t size) {
   MEMORY_BASIC_INFORMATION minfo;
-  char *cptr = (char *)ptr;
+  char* cptr = (char*)ptr;
 
   while (size) {
-
-    if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0) return -1;
+    if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0)
+      return -1;
     if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
         minfo.State != MEM_COMMIT || minfo.RegionSize > size)
       return -1;
-    if (VirtualFree(cptr, 0, MEM_RELEASE) == 0) return -1;
+    if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
+      return -1;
     cptr += minfo.RegionSize;
     size -= minfo.RegionSize;
-
   }
 
   return 0;
-
 }
 
-    #define MMAP_DEFAULT(s) win32mmap(s)
-    #define MUNMAP_DEFAULT(a, s) win32munmap((a), (s))
-    #define DIRECT_MMAP_DEFAULT(s) win32direct_mmap(s)
-  #endif                                                           /* WIN32 */
-#endif                                                         /* HAVE_MMAP */
+#define MMAP_DEFAULT(s)             win32mmap(s)
+#define MUNMAP_DEFAULT(a, s)        win32munmap((a), (s))
+#define DIRECT_MMAP_DEFAULT(s)      win32direct_mmap(s)
+#endif /* WIN32 */
+#endif /* HAVE_MMAP */
 
 #if HAVE_MREMAP
-  #ifndef WIN32
-
-static FORCEINLINE void *dlmremap(void *old_address, size_t old_size,
-                                  size_t new_size, int flags) {
+#ifndef WIN32
 
-  void *result;
+static FORCEINLINE void* dlmremap(void* old_address, size_t old_size, size_t new_size, int flags) {
+  void* result;
 
   result = mremap(old_address, old_size, new_size, flags);
-  if (result == MFAIL) return MFAIL;
+  if (result == MFAIL)
+    return MFAIL;
 
   return result;
-
 }
 
-    #define MREMAP_DEFAULT(addr, osz, nsz, mv) \
-      dlmremap((addr), (osz), (nsz), (mv))
-  #endif                                                           /* WIN32 */
-#endif                                                       /* HAVE_MREMAP */
+#define MREMAP_DEFAULT(addr, osz, nsz, mv) dlmremap((addr), (osz), (nsz), (mv))
+#endif /* WIN32 */
+#endif /* HAVE_MREMAP */
 
 /**
  * Define CALL_MORECORE
  */
 #if HAVE_MORECORE
-  #ifdef MORECORE
-    #define CALL_MORECORE(S) MORECORE(S)
-  #else                                                         /* MORECORE */
-    #define CALL_MORECORE(S) MORECORE_DEFAULT(S)
-  #endif                                                        /* MORECORE */
-#else                                                      /* HAVE_MORECORE */
-  #define CALL_MORECORE(S) MFAIL
-#endif                                                     /* HAVE_MORECORE */
+    #ifdef MORECORE
+        #define CALL_MORECORE(S)    MORECORE(S)
+    #else  /* MORECORE */
+        #define CALL_MORECORE(S)    MORECORE_DEFAULT(S)
+    #endif /* MORECORE */
+#else  /* HAVE_MORECORE */
+    #define CALL_MORECORE(S)        MFAIL
+#endif /* HAVE_MORECORE */
 
 /**
  * Define CALL_MMAP/CALL_MUNMAP/CALL_DIRECT_MMAP
  */
 #if HAVE_MMAP
-  #define USE_MMAP_BIT (SIZE_T_ONE)
-
-  #ifdef MMAP
-    #define CALL_MMAP(s) MMAP(s)
-  #else                                                             /* MMAP */
-    #define CALL_MMAP(s) MMAP_DEFAULT(s)
-  #endif                                                            /* MMAP */
-  #ifdef MUNMAP
-    #define CALL_MUNMAP(a, s) MUNMAP((a), (s))
-  #else                                                           /* MUNMAP */
-    #define CALL_MUNMAP(a, s) MUNMAP_DEFAULT((a), (s))
-  #endif                                                          /* MUNMAP */
-  #ifdef DIRECT_MMAP
-    #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s)
-  #else                                                      /* DIRECT_MMAP */
-    #define CALL_DIRECT_MMAP(s) DIRECT_MMAP_DEFAULT(s)
-  #endif                                                     /* DIRECT_MMAP */
-#else                                                          /* HAVE_MMAP */
-  #define USE_MMAP_BIT (SIZE_T_ZERO)
-
-  #define MMAP(s) MFAIL
-  #define MUNMAP(a, s) (-1)
-  #define DIRECT_MMAP(s) MFAIL
-  #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s)
-  #define CALL_MMAP(s) MMAP(s)
-  #define CALL_MUNMAP(a, s) MUNMAP((a), (s))
-#endif                                                         /* HAVE_MMAP */
+    #define USE_MMAP_BIT            (SIZE_T_ONE)
+
+    #ifdef MMAP
+        #define CALL_MMAP(s)        MMAP(s)
+    #else /* MMAP */
+        #define CALL_MMAP(s)        MMAP_DEFAULT(s)
+    #endif /* MMAP */
+    #ifdef MUNMAP
+        #define CALL_MUNMAP(a, s)   MUNMAP((a), (s))
+    #else /* MUNMAP */
+        #define CALL_MUNMAP(a, s)   MUNMAP_DEFAULT((a), (s))
+    #endif /* MUNMAP */
+    #ifdef DIRECT_MMAP
+        #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s)
+    #else /* DIRECT_MMAP */
+        #define CALL_DIRECT_MMAP(s) DIRECT_MMAP_DEFAULT(s)
+    #endif /* DIRECT_MMAP */
+#else  /* HAVE_MMAP */
+    #define USE_MMAP_BIT            (SIZE_T_ZERO)
+
+    #define MMAP(s)                 MFAIL
+    #define MUNMAP(a, s)            (-1)
+    #define DIRECT_MMAP(s)          MFAIL
+    #define CALL_DIRECT_MMAP(s)     DIRECT_MMAP(s)
+    #define CALL_MMAP(s)            MMAP(s)
+    #define CALL_MUNMAP(a, s)       MUNMAP((a), (s))
+#endif /* HAVE_MMAP */
 
 /**
  * Define CALL_MREMAP
  */
 #if HAVE_MMAP && HAVE_MREMAP
-  #ifdef MREMAP
-    #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP((addr), (osz), (nsz), (mv))
-  #else                                                           /* MREMAP */
-    #define CALL_MREMAP(addr, osz, nsz, mv) \
-      MREMAP_DEFAULT((addr), (osz), (nsz), (mv))
-  #endif                                                          /* MREMAP */
-#else                                           /* HAVE_MMAP && HAVE_MREMAP */
-  #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL
-#endif                                          /* HAVE_MMAP && HAVE_MREMAP */
+    #ifdef MREMAP
+        #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP((addr), (osz), (nsz), (mv))
+    #else /* MREMAP */
+        #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP_DEFAULT((addr), (osz), (nsz), (mv))
+    #endif /* MREMAP */
+#else  /* HAVE_MMAP && HAVE_MREMAP */
+    #define CALL_MREMAP(addr, osz, nsz, mv)     MFAIL
+#endif /* HAVE_MMAP && HAVE_MREMAP */
 
 /* mstate bit set if continguous morecore disabled or failed */
 #define USE_NONCONTIGUOUS_BIT (4U)
 
 /* segment bit set in create_mspace_with_base */
-#define EXTERN_BIT (8U)
+#define EXTERN_BIT            (8U)
+
 
 /* --------------------------- Lock preliminaries ------------------------ */
 
@@ -1897,284 +1856,247 @@ static FORCEINLINE void *dlmremap(void *old_address, size_t old_size,
 */
 
 #if !USE_LOCKS
-  #define USE_LOCK_BIT (0U)
-  #define INITIAL_LOCK(l) (0)
-  #define DESTROY_LOCK(l) (0)
-  #define ACQUIRE_MALLOC_GLOBAL_LOCK()
-  #define RELEASE_MALLOC_GLOBAL_LOCK()
+#define USE_LOCK_BIT               (0U)
+#define INITIAL_LOCK(l)            (0)
+#define DESTROY_LOCK(l)            (0)
+#define ACQUIRE_MALLOC_GLOBAL_LOCK()
+#define RELEASE_MALLOC_GLOBAL_LOCK()
 
 #else
-  #if USE_LOCKS > 1
-  /* -----------------------  User-defined locks ------------------------ */
-  /* Define your own lock implementation here */
-  /* #define INITIAL_LOCK(lk)  ... */
-  /* #define DESTROY_LOCK(lk)  ... */
-  /* #define ACQUIRE_LOCK(lk)  ... */
-  /* #define RELEASE_LOCK(lk)  ... */
-  /* #define TRY_LOCK(lk) ... */
-  /* static MLOCK_T malloc_global_mutex = ... */
-
-  #elif USE_SPIN_LOCKS
-
-  /* First, define CAS_LOCK and CLEAR_LOCK on ints */
-  /* Note CAS_LOCK defined to return 0 on success */
-
-    #if defined(__GNUC__) && \
-        (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1))
-      #define CAS_LOCK(sl) __sync_lock_test_and_set(sl, 1)
-      #define CLEAR_LOCK(sl) __sync_lock_release(sl)
-
-    #elif (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)))
+#if USE_LOCKS > 1
+/* -----------------------  User-defined locks ------------------------ */
+/* Define your own lock implementation here */
+/* #define INITIAL_LOCK(lk)  ... */
+/* #define DESTROY_LOCK(lk)  ... */
+/* #define ACQUIRE_LOCK(lk)  ... */
+/* #define RELEASE_LOCK(lk)  ... */
+/* #define TRY_LOCK(lk) ... */
+/* static MLOCK_T malloc_global_mutex = ... */
+
+#elif USE_SPIN_LOCKS
+
+/* First, define CAS_LOCK and CLEAR_LOCK on ints */
+/* Note CAS_LOCK defined to return 0 on success */
+
+#if defined(__GNUC__)&& (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1))
+#define CAS_LOCK(sl)     __sync_lock_test_and_set(sl, 1)
+#define CLEAR_LOCK(sl)   __sync_lock_release(sl)
+
+#elif (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)))
 /* Custom spin locks for older gcc on x86 */
 static FORCEINLINE int x86_cas_lock(int *sl) {
-
   int ret;
   int val = 1;
   int cmp = 0;
-  __asm__ __volatile__("lock; cmpxchgl %1, %2"
-                       : "=a"(ret)
-                       : "r"(val), "m"(*(sl)), "0"(cmp)
-                       : "memory", "cc");
+  __asm__ __volatile__  ("lock; cmpxchgl %1, %2"
+                         : "=a" (ret)
+                         : "r" (val), "m" (*(sl)), "0"(cmp)
+                         : "memory", "cc");
   return ret;
-
 }
 
-static FORCEINLINE void x86_clear_lock(int *sl) {
-
+static FORCEINLINE void x86_clear_lock(int* sl) {
   assert(*sl != 0);
   int prev = 0;
   int ret;
-  __asm__ __volatile__("lock; xchgl %0, %1"
-                       : "=r"(ret)
-                       : "m"(*(sl)), "0"(prev)
-                       : "memory");
-
+  __asm__ __volatile__ ("lock; xchgl %0, %1"
+                        : "=r" (ret)
+                        : "m" (*(sl)), "0"(prev)
+                        : "memory");
 }
 
-      #define CAS_LOCK(sl) x86_cas_lock(sl)
-      #define CLEAR_LOCK(sl) x86_clear_lock(sl)
-
-    #else                                                      /* Win32 MSC */
-      #define CAS_LOCK(sl) interlockedexchange((volatile LONG *)sl, (LONG)1)
-      #define CLEAR_LOCK(sl) interlockedexchange((volatile LONG *)sl, (LONG)0)
-
-    #endif                                       /* ... gcc spins locks ... */
-
-    /* How to yield for a spin lock */
-    #define SPINS_PER_YIELD 63
-    #if defined(_MSC_VER)
-      #define SLEEP_EX_DURATION 50                 /* delay for yield/sleep */
-      #define SPIN_LOCK_YIELD SleepEx(SLEEP_EX_DURATION, FALSE)
-    #elif defined(__SVR4) && defined(__sun)                      /* solaris */
-      #define SPIN_LOCK_YIELD thr_yield();
-    #elif !defined(LACKS_SCHED_H)
-      #define SPIN_LOCK_YIELD sched_yield();
-    #else
-      #define SPIN_LOCK_YIELD
-    #endif                                                 /* ... yield ... */
-
-    #if !defined(USE_RECURSIVE_LOCKS) || USE_RECURSIVE_LOCKS == 0
+#define CAS_LOCK(sl)     x86_cas_lock(sl)
+#define CLEAR_LOCK(sl)   x86_clear_lock(sl)
+
+#else /* Win32 MSC */
+#define CAS_LOCK(sl)     interlockedexchange((volatile LONG *)sl, (LONG)1)
+#define CLEAR_LOCK(sl)   interlockedexchange((volatile LONG *)sl, (LONG)0)
+
+#endif /* ... gcc spins locks ... */
+
+/* How to yield for a spin lock */
+#define SPINS_PER_YIELD       63
+#if defined(_MSC_VER)
+#define SLEEP_EX_DURATION     50 /* delay for yield/sleep */
+#define SPIN_LOCK_YIELD  SleepEx(SLEEP_EX_DURATION, FALSE)
+#elif defined (__SVR4) && defined (__sun) /* solaris */
+#define SPIN_LOCK_YIELD   thr_yield();
+#elif !defined(LACKS_SCHED_H)
+#define SPIN_LOCK_YIELD   sched_yield();
+#else
+#define SPIN_LOCK_YIELD
+#endif /* ... yield ... */
+
+#if !defined(USE_RECURSIVE_LOCKS) || USE_RECURSIVE_LOCKS == 0
 /* Plain spin locks use single word (embedded in malloc_states) */
 static int spin_acquire_lock(int *sl) {
-
   int spins = 0;
   while (*(volatile int *)sl != 0 || CAS_LOCK(sl)) {
-
-    if ((++spins & SPINS_PER_YIELD) == 0) { SPIN_LOCK_YIELD; }
-
+    if ((++spins & SPINS_PER_YIELD) == 0) {
+      SPIN_LOCK_YIELD;
+    }
   }
-
   return 0;
-
 }
 
-      #define MLOCK_T int
-      #define TRY_LOCK(sl) !CAS_LOCK(sl)
-      #define RELEASE_LOCK(sl) CLEAR_LOCK(sl)
-      #define ACQUIRE_LOCK(sl) (CAS_LOCK(sl) ? spin_acquire_lock(sl) : 0)
-      #define INITIAL_LOCK(sl) (*sl = 0)
-      #define DESTROY_LOCK(sl) (0)
+#define MLOCK_T               int
+#define TRY_LOCK(sl)          !CAS_LOCK(sl)
+#define RELEASE_LOCK(sl)      CLEAR_LOCK(sl)
+#define ACQUIRE_LOCK(sl)      (CAS_LOCK(sl)? spin_acquire_lock(sl) : 0)
+#define INITIAL_LOCK(sl)      (*sl = 0)
+#define DESTROY_LOCK(sl)      (0)
 static MLOCK_T malloc_global_mutex = 0;
 
-    #else                                            /* USE_RECURSIVE_LOCKS */
-      /* types for lock owners */
-      #ifdef WIN32
-        #define THREAD_ID_T DWORD
-        #define CURRENT_THREAD GetCurrentThreadId()
-        #define EQ_OWNER(X, Y) ((X) == (Y))
-      #else
-        /*
-          Note: the following assume that pthread_t is a type that can be
-          initialized to (casted) zero. If this is not the case, you will need
-          to somehow redefine these or not use spin locks.
-        */
-        #define THREAD_ID_T pthread_t
-        #define CURRENT_THREAD pthread_self()
-        #define EQ_OWNER(X, Y) pthread_equal(X, Y)
-      #endif
+#else /* USE_RECURSIVE_LOCKS */
+/* types for lock owners */
+#ifdef WIN32
+#define THREAD_ID_T           DWORD
+#define CURRENT_THREAD        GetCurrentThreadId()
+#define EQ_OWNER(X,Y)         ((X) == (Y))
+#else
+/*
+  Note: the following assume that pthread_t is a type that can be
+  initialized to (casted) zero. If this is not the case, you will need to
+  somehow redefine these or not use spin locks.
+*/
+#define THREAD_ID_T           pthread_t
+#define CURRENT_THREAD        pthread_self()
+#define EQ_OWNER(X,Y)         pthread_equal(X, Y)
+#endif
 
 struct malloc_recursive_lock {
-
-  int          sl;
+  int sl;
   unsigned int c;
-  THREAD_ID_T  threadid;
-
+  THREAD_ID_T threadid;
 };
 
-      #define MLOCK_T struct malloc_recursive_lock
-static MLOCK_T malloc_global_mutex = {0, 0, (THREAD_ID_T)0};
+#define MLOCK_T  struct malloc_recursive_lock
+static MLOCK_T malloc_global_mutex = { 0, 0, (THREAD_ID_T)0};
 
 static FORCEINLINE void recursive_release_lock(MLOCK_T *lk) {
-
   assert(lk->sl != 0);
-  if (--lk->c == 0) { CLEAR_LOCK(&lk->sl); }
-
+  if (--lk->c == 0) {
+    CLEAR_LOCK(&lk->sl);
+  }
 }
 
 static FORCEINLINE int recursive_acquire_lock(MLOCK_T *lk) {
-
   THREAD_ID_T mythreadid = CURRENT_THREAD;
-  int         spins = 0;
+  int spins = 0;
   for (;;) {
-
     if (*((volatile int *)(&lk->sl)) == 0) {
-
       if (!CAS_LOCK(&lk->sl)) {
-
         lk->threadid = mythreadid;
         lk->c = 1;
         return 0;
-
       }
-
-    } else if (EQ_OWNER(lk->threadid, mythreadid)) {
-
+    }
+    else if (EQ_OWNER(lk->threadid, mythreadid)) {
       ++lk->c;
       return 0;
-
     }
-
-    if ((++spins & SPINS_PER_YIELD) == 0) { SPIN_LOCK_YIELD; }
-
+    if ((++spins & SPINS_PER_YIELD) == 0) {
+      SPIN_LOCK_YIELD;
+    }
   }
-
 }
 
 static FORCEINLINE int recursive_try_lock(MLOCK_T *lk) {
-
   THREAD_ID_T mythreadid = CURRENT_THREAD;
   if (*((volatile int *)(&lk->sl)) == 0) {
-
     if (!CAS_LOCK(&lk->sl)) {
-
       lk->threadid = mythreadid;
       lk->c = 1;
       return 1;
-
     }
-
-  } else if (EQ_OWNER(lk->threadid, mythreadid)) {
-
+  }
+  else if (EQ_OWNER(lk->threadid, mythreadid)) {
     ++lk->c;
     return 1;
-
   }
-
   return 0;
-
 }
 
-      #define RELEASE_LOCK(lk) recursive_release_lock(lk)
-      #define TRY_LOCK(lk) recursive_try_lock(lk)
-      #define ACQUIRE_LOCK(lk) recursive_acquire_lock(lk)
-      #define INITIAL_LOCK(lk) \
-        ((lk)->threadid = (THREAD_ID_T)0, (lk)->sl = 0, (lk)->c = 0)
-      #define DESTROY_LOCK(lk) (0)
-    #endif                                           /* USE_RECURSIVE_LOCKS */
-
-  #elif defined(WIN32)                           /* Win32 critical sections */
-    #define MLOCK_T CRITICAL_SECTION
-    #define ACQUIRE_LOCK(lk) (EnterCriticalSection(lk), 0)
-    #define RELEASE_LOCK(lk) LeaveCriticalSection(lk)
-    #define TRY_LOCK(lk) TryEnterCriticalSection(lk)
-    #define INITIAL_LOCK(lk) \
-      (!InitializeCriticalSectionAndSpinCount((lk), 0x80000000 | 4000))
-    #define DESTROY_LOCK(lk) (DeleteCriticalSection(lk), 0)
-    #define NEED_GLOBAL_LOCK_INIT
-
-static MLOCK_T       malloc_global_mutex;
+#define RELEASE_LOCK(lk)      recursive_release_lock(lk)
+#define TRY_LOCK(lk)          recursive_try_lock(lk)
+#define ACQUIRE_LOCK(lk)      recursive_acquire_lock(lk)
+#define INITIAL_LOCK(lk)      ((lk)->threadid = (THREAD_ID_T)0, (lk)->sl = 0, (lk)->c = 0)
+#define DESTROY_LOCK(lk)      (0)
+#endif /* USE_RECURSIVE_LOCKS */
+
+#elif defined(WIN32) /* Win32 critical sections */
+#define MLOCK_T               CRITICAL_SECTION
+#define ACQUIRE_LOCK(lk)      (EnterCriticalSection(lk), 0)
+#define RELEASE_LOCK(lk)      LeaveCriticalSection(lk)
+#define TRY_LOCK(lk)          TryEnterCriticalSection(lk)
+#define INITIAL_LOCK(lk)      (!InitializeCriticalSectionAndSpinCount((lk), 0x80000000|4000))
+#define DESTROY_LOCK(lk)      (DeleteCriticalSection(lk), 0)
+#define NEED_GLOBAL_LOCK_INIT
+
+static MLOCK_T malloc_global_mutex;
 static volatile LONG malloc_global_mutex_status;
 
 /* Use spin loop to initialize global lock */
 static void init_malloc_global_mutex() {
-
   for (;;) {
-
     long stat = malloc_global_mutex_status;
-    if (stat > 0) return;
+    if (stat > 0)
+      return;
     /* transition to < 0 while initializing, then to > 0) */
-    if (stat == 0 && interlockedcompareexchange(&malloc_global_mutex_status,
-                                                (LONG)-1, (LONG)0) == 0) {
-
+    if (stat == 0 &&
+        interlockedcompareexchange(&malloc_global_mutex_status, (LONG)-1, (LONG)0) == 0) {
       InitializeCriticalSection(&malloc_global_mutex);
       interlockedexchange(&malloc_global_mutex_status, (LONG)1);
       return;
-
     }
-
     SleepEx(0, FALSE);
-
   }
-
 }
 
-  #else                                             /* pthreads-based locks */
-    #define MLOCK_T pthread_mutex_t
-    #define ACQUIRE_LOCK(lk) pthread_mutex_lock(lk)
-    #define RELEASE_LOCK(lk) pthread_mutex_unlock(lk)
-    #define TRY_LOCK(lk) (!pthread_mutex_trylock(lk))
-    #define INITIAL_LOCK(lk) pthread_init_lock(lk)
-    #define DESTROY_LOCK(lk) pthread_mutex_destroy(lk)
+#else /* pthreads-based locks */
+#define MLOCK_T               pthread_mutex_t
+#define ACQUIRE_LOCK(lk)      pthread_mutex_lock(lk)
+#define RELEASE_LOCK(lk)      pthread_mutex_unlock(lk)
+#define TRY_LOCK(lk)          (!pthread_mutex_trylock(lk))
+#define INITIAL_LOCK(lk)      pthread_init_lock(lk)
+#define DESTROY_LOCK(lk)      pthread_mutex_destroy(lk)
 
-    #if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0 && \
-        defined(linux) && !defined(PTHREAD_MUTEX_RECURSIVE)
+#if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0 && defined(linux) && !defined(PTHREAD_MUTEX_RECURSIVE)
 /* Cope with old-style linux recursive lock initialization by adding */
 /* skipped internal declaration from pthread.h */
-extern int pthread_mutexattr_setkind_np __P((pthread_mutexattr_t * __attr,
-                                             int __kind));
-      #define PTHREAD_MUTEX_RECURSIVE PTHREAD_MUTEX_RECURSIVE_NP
-      #define pthread_mutexattr_settype(x, y) pthread_mutexattr_setkind_np(x, y)
-    #endif                                       /* USE_RECURSIVE_LOCKS ... */
+extern int pthread_mutexattr_setkind_np __P ((pthread_mutexattr_t *__attr,
+                                              int __kind));
+#define PTHREAD_MUTEX_RECURSIVE PTHREAD_MUTEX_RECURSIVE_NP
+#define pthread_mutexattr_settype(x,y) pthread_mutexattr_setkind_np(x,y)
+#endif /* USE_RECURSIVE_LOCKS ... */
 
 static MLOCK_T malloc_global_mutex = PTHREAD_MUTEX_INITIALIZER;
 
-static int pthread_init_lock(MLOCK_T *lk) {
-
+static int pthread_init_lock (MLOCK_T *lk) {
   pthread_mutexattr_t attr;
   if (pthread_mutexattr_init(&attr)) return 1;
-    #if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0
+#if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0
   if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE)) return 1;
-    #endif
+#endif
   if (pthread_mutex_init(lk, &attr)) return 1;
   if (pthread_mutexattr_destroy(&attr)) return 1;
   return 0;
-
 }
 
-  #endif                                              /* ... lock types ... */
+#endif /* ... lock types ... */
 
-  /* Common code for all lock types */
-  #define USE_LOCK_BIT (2U)
+/* Common code for all lock types */
+#define USE_LOCK_BIT               (2U)
 
-  #ifndef ACQUIRE_MALLOC_GLOBAL_LOCK
-    #define ACQUIRE_MALLOC_GLOBAL_LOCK() ACQUIRE_LOCK(&malloc_global_mutex);
-  #endif
+#ifndef ACQUIRE_MALLOC_GLOBAL_LOCK
+#define ACQUIRE_MALLOC_GLOBAL_LOCK()  ACQUIRE_LOCK(&malloc_global_mutex);
+#endif
 
-  #ifndef RELEASE_MALLOC_GLOBAL_LOCK
-    #define RELEASE_MALLOC_GLOBAL_LOCK() RELEASE_LOCK(&malloc_global_mutex);
-  #endif
+#ifndef RELEASE_MALLOC_GLOBAL_LOCK
+#define RELEASE_MALLOC_GLOBAL_LOCK()  RELEASE_LOCK(&malloc_global_mutex);
+#endif
 
-#endif                                                         /* USE_LOCKS */
+#endif /* USE_LOCKS */
 
 /* -----------------------  Chunk representations ------------------------ */
 
@@ -2314,56 +2236,56 @@ static int pthread_init_lock(MLOCK_T *lk) {
 */
 
 struct malloc_chunk {
-
-  size_t               prev_foot;     /* Size of previous chunk (if free).  */
-  size_t               head;                        /* Size and inuse bits. */
-  struct malloc_chunk *fd;            /* double links -- used only if free. */
-  struct malloc_chunk *bk;
-
+  size_t               prev_foot;  /* Size of previous chunk (if free).  */
+  size_t               head;       /* Size and inuse bits. */
+  struct malloc_chunk* fd;         /* double links -- used only if free. */
+  struct malloc_chunk* bk;
 };
 
 typedef struct malloc_chunk  mchunk;
-typedef struct malloc_chunk *mchunkptr;
-typedef struct malloc_chunk *sbinptr;         /* The type of bins of chunks */
-typedef unsigned int         bindex_t;                   /* Described below */
-typedef unsigned int         binmap_t;                   /* Described below */
-typedef unsigned int         flag_t;   /* The type of various bit flag sets */
+typedef struct malloc_chunk* mchunkptr;
+typedef struct malloc_chunk* sbinptr;  /* The type of bins of chunks */
+typedef unsigned int bindex_t;         /* Described below */
+typedef unsigned int binmap_t;         /* Described below */
+typedef unsigned int flag_t;           /* The type of various bit flag sets */
 
 /* ------------------- Chunks sizes and alignments ----------------------- */
 
-#define MCHUNK_SIZE (sizeof(mchunk))
+#define MCHUNK_SIZE         (sizeof(mchunk))
 
 #if FOOTERS
-  #define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
-#else                                                            /* FOOTERS */
-  #define CHUNK_OVERHEAD (SIZE_T_SIZE)
-#endif                                                           /* FOOTERS */
+#define CHUNK_OVERHEAD      (TWO_SIZE_T_SIZES)
+#else /* FOOTERS */
+#define CHUNK_OVERHEAD      (SIZE_T_SIZE)
+#endif /* FOOTERS */
 
 /* MMapped chunks need a second word of overhead ... */
 #define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
 /* ... and additional padding for fake next-chunk at foot */
-#define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES)
+#define MMAP_FOOT_PAD       (FOUR_SIZE_T_SIZES)
 
 /* The smallest size we can malloc is an aligned minimal chunk */
-#define MIN_CHUNK_SIZE ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
+#define MIN_CHUNK_SIZE\
+  ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
 
 /* conversion from malloc headers to user pointers, and back */
-#define chunk2mem(p) ((void *)((char *)(p) + TWO_SIZE_T_SIZES))
-#define mem2chunk(mem) ((mchunkptr)((char *)(mem)-TWO_SIZE_T_SIZES))
+#define chunk2mem(p)        ((void*)((char*)(p)       + TWO_SIZE_T_SIZES))
+#define mem2chunk(mem)      ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES))
 /* chunk associated with aligned address A */
-#define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A)))
+#define align_as_chunk(A)   (mchunkptr)((A) + align_offset(chunk2mem(A)))
 
 /* Bounds on request (not chunk) sizes. */
-#define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2)
-#define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
+#define MAX_REQUEST         ((-MIN_CHUNK_SIZE) << 2)
+#define MIN_REQUEST         (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
 
 /* pad request bytes into a usable size */
 #define pad_request(req) \
-  (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
+   (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
 
 /* pad request, checking for minimum (but not maximum) */
 #define request2size(req) \
-  (((req) < MIN_REQUEST) ? MIN_CHUNK_SIZE : pad_request(req))
+  (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
+
 
 /* ------------------ Operations on head and foot fields ----------------- */
 
@@ -2375,60 +2297,61 @@ typedef unsigned int         flag_t;   /* The type of various bit flag sets */
   FLAG4_BIT is not used by this malloc, but might be useful in extensions.
 */
 
-#define PINUSE_BIT (SIZE_T_ONE)
-#define CINUSE_BIT (SIZE_T_TWO)
-#define FLAG4_BIT (SIZE_T_FOUR)
-#define INUSE_BITS (PINUSE_BIT | CINUSE_BIT)
-#define FLAG_BITS (PINUSE_BIT | CINUSE_BIT | FLAG4_BIT)
+#define PINUSE_BIT          (SIZE_T_ONE)
+#define CINUSE_BIT          (SIZE_T_TWO)
+#define FLAG4_BIT           (SIZE_T_FOUR)
+#define INUSE_BITS          (PINUSE_BIT|CINUSE_BIT)
+#define FLAG_BITS           (PINUSE_BIT|CINUSE_BIT|FLAG4_BIT)
 
 /* Head value for fenceposts */
-#define FENCEPOST_HEAD (INUSE_BITS | SIZE_T_SIZE)
+#define FENCEPOST_HEAD      (INUSE_BITS|SIZE_T_SIZE)
 
 /* extraction of fields from head words */
-#define cinuse(p) ((p)->head & CINUSE_BIT)
-#define pinuse(p) ((p)->head & PINUSE_BIT)
-#define flag4inuse(p) ((p)->head & FLAG4_BIT)
-#define is_inuse(p) (((p)->head & INUSE_BITS) != PINUSE_BIT)
-#define is_mmapped(p) (((p)->head & INUSE_BITS) == 0)
+#define cinuse(p)           ((p)->head & CINUSE_BIT)
+#define pinuse(p)           ((p)->head & PINUSE_BIT)
+#define flag4inuse(p)       ((p)->head & FLAG4_BIT)
+#define is_inuse(p)         (((p)->head & INUSE_BITS) != PINUSE_BIT)
+#define is_mmapped(p)       (((p)->head & INUSE_BITS) == 0)
 
-#define chunksize(p) ((p)->head & ~(FLAG_BITS))
+#define chunksize(p)        ((p)->head & ~(FLAG_BITS))
 
-#define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT)
-#define set_flag4(p) ((p)->head |= FLAG4_BIT)
-#define clear_flag4(p) ((p)->head &= ~FLAG4_BIT)
+#define clear_pinuse(p)     ((p)->head &= ~PINUSE_BIT)
+#define set_flag4(p)        ((p)->head |= FLAG4_BIT)
+#define clear_flag4(p)      ((p)->head &= ~FLAG4_BIT)
 
 /* Treat space at ptr +/- offset as a chunk */
-#define chunk_plus_offset(p, s) ((mchunkptr)(((char *)(p)) + (s)))
-#define chunk_minus_offset(p, s) ((mchunkptr)(((char *)(p)) - (s)))
+#define chunk_plus_offset(p, s)  ((mchunkptr)(((char*)(p)) + (s)))
+#define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s)))
 
 /* Ptr to next or previous physical malloc_chunk. */
-#define next_chunk(p) ((mchunkptr)(((char *)(p)) + ((p)->head & ~FLAG_BITS)))
-#define prev_chunk(p) ((mchunkptr)(((char *)(p)) - ((p)->prev_foot)))
+#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~FLAG_BITS)))
+#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) ))
 
 /* extract next chunk's pinuse bit */
-#define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT)
+#define next_pinuse(p)  ((next_chunk(p)->head) & PINUSE_BIT)
 
 /* Get/set size at footer */
-#define get_foot(p, s) (((mchunkptr)((char *)(p) + (s)))->prev_foot)
-#define set_foot(p, s) (((mchunkptr)((char *)(p) + (s)))->prev_foot = (s))
+#define get_foot(p, s)  (((mchunkptr)((char*)(p) + (s)))->prev_foot)
+#define set_foot(p, s)  (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s))
 
 /* Set size, pinuse bit, and foot */
-#define set_size_and_pinuse_of_free_chunk(p, s) \
-  ((p)->head = (s | PINUSE_BIT), set_foot(p, s))
+#define set_size_and_pinuse_of_free_chunk(p, s)\
+  ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
 
 /* Set size, pinuse bit, foot, and clear next pinuse */
-#define set_free_with_pinuse(p, s, n) \
+#define set_free_with_pinuse(p, s, n)\
   (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
 
 /* Get the internal overhead associated with chunk p */
-#define overhead_for(p) (is_mmapped(p) ? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
+#define overhead_for(p)\
+ (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
 
 /* Return true if malloced space is not necessarily cleared */
 #if MMAP_CLEARS
-  #define calloc_must_clear(p) (!is_mmapped(p))
-#else                                                        /* MMAP_CLEARS */
-  #define calloc_must_clear(p) (1)
-#endif                                                       /* MMAP_CLEARS */
+#define calloc_must_clear(p) (!is_mmapped(p))
+#else /* MMAP_CLEARS */
+#define calloc_must_clear(p) (1)
+#endif /* MMAP_CLEARS */
 
 /* ---------------------- Overlaid data structures ----------------------- */
 
@@ -2522,25 +2445,23 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 */
 
 struct malloc_tree_chunk {
-
   /* The first four fields must be compatible with malloc_chunk */
   size_t                    prev_foot;
   size_t                    head;
-  struct malloc_tree_chunk *fd;
-  struct malloc_tree_chunk *bk;
+  struct malloc_tree_chunk* fd;
+  struct malloc_tree_chunk* bk;
 
-  struct malloc_tree_chunk *child[2];
-  struct malloc_tree_chunk *parent;
+  struct malloc_tree_chunk* child[2];
+  struct malloc_tree_chunk* parent;
   bindex_t                  index;
-
 };
 
 typedef struct malloc_tree_chunk  tchunk;
-typedef struct malloc_tree_chunk *tchunkptr;
-typedef struct malloc_tree_chunk *tbinptr;     /* The type of bins of trees */
+typedef struct malloc_tree_chunk* tchunkptr;
+typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */
 
 /* A little helper macro for trees */
-#define leftmost_child(t) ((t)->child[0] != 0 ? (t)->child[0] : (t)->child[1])
+#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
 
 /* ----------------------------- Segments -------------------------------- */
 
@@ -2600,19 +2521,17 @@ typedef struct malloc_tree_chunk *tbinptr;     /* The type of bins of trees */
 */
 
 struct malloc_segment {
-
-  char *                 base;                              /* base address */
-  size_t                 size;                            /* allocated size */
-  struct malloc_segment *next;                       /* ptr to next segment */
-  flag_t                 sflags;                    /* mmap and extern flag */
-
+  char*        base;             /* base address */
+  size_t       size;             /* allocated size */
+  struct malloc_segment* next;   /* ptr to next segment */
+  flag_t       sflags;           /* mmap and extern flag */
 };
 
-#define is_mmapped_segment(S) ((S)->sflags & USE_MMAP_BIT)
-#define is_extern_segment(S) ((S)->sflags & EXTERN_BIT)
+#define is_mmapped_segment(S)  ((S)->sflags & USE_MMAP_BIT)
+#define is_extern_segment(S)   ((S)->sflags & EXTERN_BIT)
 
 typedef struct malloc_segment  msegment;
-typedef struct malloc_segment *msegmentptr;
+typedef struct malloc_segment* msegmentptr;
 
 /* ---------------------------- malloc_state ----------------------------- */
 
@@ -2702,43 +2621,41 @@ typedef struct malloc_segment *msegmentptr;
 */
 
 /* Bin types, widths and sizes */
-#define NSMALLBINS (32U)
-#define NTREEBINS (32U)
-#define SMALLBIN_SHIFT (3U)
-#define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT)
-#define TREEBIN_SHIFT (8U)
-#define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT)
-#define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE)
+#define NSMALLBINS        (32U)
+#define NTREEBINS         (32U)
+#define SMALLBIN_SHIFT    (3U)
+#define SMALLBIN_WIDTH    (SIZE_T_ONE << SMALLBIN_SHIFT)
+#define TREEBIN_SHIFT     (8U)
+#define MIN_LARGE_SIZE    (SIZE_T_ONE << TREEBIN_SHIFT)
+#define MAX_SMALL_SIZE    (MIN_LARGE_SIZE - SIZE_T_ONE)
 #define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
 
 struct malloc_state {
-
-  binmap_t  smallmap;
-  binmap_t  treemap;
-  size_t    dvsize;
-  size_t    topsize;
-  char *    least_addr;
-  mchunkptr dv;
-  mchunkptr top;
-  size_t    trim_check;
-  size_t    release_checks;
-  size_t    magic;
-  mchunkptr smallbins[(NSMALLBINS + 1) * 2];
-  tbinptr   treebins[NTREEBINS];
-  size_t    footprint;
-  size_t    max_footprint;
-  size_t    footprint_limit;                         /* zero means no limit */
-  flag_t    mflags;
+  binmap_t   smallmap;
+  binmap_t   treemap;
+  size_t     dvsize;
+  size_t     topsize;
+  char*      least_addr;
+  mchunkptr  dv;
+  mchunkptr  top;
+  size_t     trim_check;
+  size_t     release_checks;
+  size_t     magic;
+  mchunkptr  smallbins[(NSMALLBINS+1)*2];
+  tbinptr    treebins[NTREEBINS];
+  size_t     footprint;
+  size_t     max_footprint;
+  size_t     footprint_limit; /* zero means no limit */
+  flag_t     mflags;
 #if USE_LOCKS
-  MLOCK_T mutex;             /* locate lock among fields that rarely change */
-#endif                                                         /* USE_LOCKS */
-  msegment seg;
-  void *   extp;                     /* Unused but available for extensions */
-  size_t   exts;
-
+  MLOCK_T    mutex;     /* locate lock among fields that rarely change */
+#endif /* USE_LOCKS */
+  msegment   seg;
+  void*      extp;      /* Unused but available for extensions */
+  size_t     exts;
 };
 
-typedef struct malloc_state *mstate;
+typedef struct malloc_state*    mstate;
 
 /* ------------- Global malloc_state and malloc_params ------------------- */
 
@@ -2750,14 +2667,12 @@ typedef struct malloc_state *mstate;
 */
 
 struct malloc_params {
-
   size_t magic;
   size_t page_size;
   size_t granularity;
   size_t mmap_threshold;
   size_t trim_threshold;
   flag_t default_mflags;
-
 };
 
 static struct malloc_params mparams;
@@ -2769,108 +2684,106 @@ static struct malloc_params mparams;
 
 /* The global malloc_state used for all non-"mspace" calls */
 static struct malloc_state _gm_;
-  #define gm (&_gm_)
-  #define is_global(M) ((M) == &_gm_)
+#define gm                 (&_gm_)
+#define is_global(M)       ((M) == &_gm_)
 
-#endif                                                     /* !ONLY_MSPACES */
+#endif /* !ONLY_MSPACES */
 
-#define is_initialized(M) ((M)->top != 0)
+#define is_initialized(M)  ((M)->top != 0)
 
 /* -------------------------- system alloc setup ------------------------- */
 
 /* Operations on mflags */
 
-#define use_lock(M) ((M)->mflags & USE_LOCK_BIT)
-#define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT)
+#define use_lock(M)           ((M)->mflags &   USE_LOCK_BIT)
+#define enable_lock(M)        ((M)->mflags |=  USE_LOCK_BIT)
 #if USE_LOCKS
-  #define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT)
+#define disable_lock(M)       ((M)->mflags &= ~USE_LOCK_BIT)
 #else
-  #define disable_lock(M)
+#define disable_lock(M)
 #endif
 
-#define use_mmap(M) ((M)->mflags & USE_MMAP_BIT)
-#define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT)
+#define use_mmap(M)           ((M)->mflags &   USE_MMAP_BIT)
+#define enable_mmap(M)        ((M)->mflags |=  USE_MMAP_BIT)
 #if HAVE_MMAP
-  #define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT)
+#define disable_mmap(M)       ((M)->mflags &= ~USE_MMAP_BIT)
 #else
-  #define disable_mmap(M)
+#define disable_mmap(M)
 #endif
 
-#define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT)
-#define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT)
+#define use_noncontiguous(M)  ((M)->mflags &   USE_NONCONTIGUOUS_BIT)
+#define disable_contiguous(M) ((M)->mflags |=  USE_NONCONTIGUOUS_BIT)
 
-#define set_lock(M, L) \
-  ((M)->mflags =       \
-       (L) ? ((M)->mflags | USE_LOCK_BIT) : ((M)->mflags & ~USE_LOCK_BIT))
+#define set_lock(M,L)\
+ ((M)->mflags = (L)?\
+  ((M)->mflags | USE_LOCK_BIT) :\
+  ((M)->mflags & ~USE_LOCK_BIT))
 
 /* page-align a size */
-#define page_align(S) \
-  (((S) + (mparams.page_size - SIZE_T_ONE)) & ~(mparams.page_size - SIZE_T_ONE))
+#define page_align(S)\
+ (((S) + (mparams.page_size - SIZE_T_ONE)) & ~(mparams.page_size - SIZE_T_ONE))
 
 /* granularity-align a size */
-#define granularity_align(S)                    \
-  (((S) + (mparams.granularity - SIZE_T_ONE)) & \
-   ~(mparams.granularity - SIZE_T_ONE))
+#define granularity_align(S)\
+  (((S) + (mparams.granularity - SIZE_T_ONE))\
+   & ~(mparams.granularity - SIZE_T_ONE))
+
 
 /* For mmap, use granularity alignment on windows, else page-align */
 #ifdef WIN32
-  #define mmap_align(S) granularity_align(S)
+#define mmap_align(S) granularity_align(S)
 #else
-  #define mmap_align(S) page_align(S)
+#define mmap_align(S) page_align(S)
 #endif
 
 /* For sys_alloc, enough padding to ensure can malloc request on success */
 #define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT)
 
-#define is_page_aligned(S) \
-  (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)
-#define is_granularity_aligned(S) \
-  (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)
+#define is_page_aligned(S)\
+   (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)
+#define is_granularity_aligned(S)\
+   (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)
 
 /*  True if segment S holds address A */
-#define segment_holds(S, A) \
-  ((char *)(A) >= S->base && (char *)(A) < S->base + S->size)
+#define segment_holds(S, A)\
+  ((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
 
 /* Return segment holding given address */
-static msegmentptr segment_holding(mstate m, char *addr) {
-
+static msegmentptr segment_holding(mstate m, char* addr) {
   msegmentptr sp = &m->seg;
   for (;;) {
-
-    if (addr >= sp->base && addr < sp->base + sp->size) return sp;
-    if ((sp = sp->next) == 0) return 0;
-
+    if (addr >= sp->base && addr < sp->base + sp->size)
+      return sp;
+    if ((sp = sp->next) == 0)
+      return 0;
   }
-
 }
 
 /* Return true if segment contains a segment link */
 static int has_segment_link(mstate m, msegmentptr ss) {
-
   msegmentptr sp = &m->seg;
   for (;;) {
-
-    if ((char *)sp >= ss->base && (char *)sp < ss->base + ss->size) return 1;
-    if ((sp = sp->next) == 0) return 0;
-
+    if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size)
+      return 1;
+    if ((sp = sp->next) == 0)
+      return 0;
   }
-
 }
 
 #ifndef MORECORE_CANNOT_TRIM
-  #define should_trim(M, s) ((s) > (M)->trim_check)
-#else                                               /* MORECORE_CANNOT_TRIM */
-  #define should_trim(M, s) (0)
-#endif                                              /* MORECORE_CANNOT_TRIM */
+#define should_trim(M,s)  ((s) > (M)->trim_check)
+#else  /* MORECORE_CANNOT_TRIM */
+#define should_trim(M,s)  (0)
+#endif /* MORECORE_CANNOT_TRIM */
 
 /*
   TOP_FOOT_SIZE is padding at the end of a segment, including space
   that may be needed to place segment records and fenceposts when new
   noncontiguous segments are added.
 */
-#define TOP_FOOT_SIZE                                                        \
-  (align_offset(chunk2mem(0)) + pad_request(sizeof(struct malloc_segment)) + \
-   MIN_CHUNK_SIZE)
+#define TOP_FOOT_SIZE\
+  (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
+
 
 /* -------------------------------  Hooks -------------------------------- */
 
@@ -2881,24 +2794,19 @@ static int has_segment_link(mstate m, msegmentptr ss) {
 */
 
 #if USE_LOCKS
-  #define PREACTION(M) ((use_lock(M)) ? ACQUIRE_LOCK(&(M)->mutex) : 0)
-  #define POSTACTION(M)                           \
-    {                                             \
-                                                  \
-      if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); \
-                                                  \
-    }
-#else                                                          /* USE_LOCKS */
+#define PREACTION(M)  ((use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0)
+#define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); }
+#else /* USE_LOCKS */
 
-  #ifndef PREACTION
-    #define PREACTION(M) (0)
-  #endif                                                       /* PREACTION */
+#ifndef PREACTION
+#define PREACTION(M) (0)
+#endif  /* PREACTION */
 
-  #ifndef POSTACTION
-    #define POSTACTION(M)
-  #endif                                                      /* POSTACTION */
+#ifndef POSTACTION
+#define POSTACTION(M)
+#endif  /* POSTACTION */
 
-#endif                                                         /* USE_LOCKS */
+#endif /* USE_LOCKS */
 
 /*
   CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses.
@@ -2916,180 +2824,164 @@ int malloc_corruption_error_count;
 /* default corruption action */
 static void reset_on_error(mstate m);
 
-  #define CORRUPTION_ERROR_ACTION(m) reset_on_error(m)
-  #define USAGE_ERROR_ACTION(m, p)
+#define CORRUPTION_ERROR_ACTION(m)  reset_on_error(m)
+#define USAGE_ERROR_ACTION(m, p)
 
-#else                                                   /* PROCEED_ON_ERROR */
+#else /* PROCEED_ON_ERROR */
 
-  #ifndef CORRUPTION_ERROR_ACTION
-    #define CORRUPTION_ERROR_ACTION(m) ABORT
-  #endif                                         /* CORRUPTION_ERROR_ACTION */
+#ifndef CORRUPTION_ERROR_ACTION
+#define CORRUPTION_ERROR_ACTION(m) ABORT
+#endif /* CORRUPTION_ERROR_ACTION */
 
-  #ifndef USAGE_ERROR_ACTION
-    #define USAGE_ERROR_ACTION(m, p) ABORT
-  #endif                                              /* USAGE_ERROR_ACTION */
+#ifndef USAGE_ERROR_ACTION
+#define USAGE_ERROR_ACTION(m,p) ABORT
+#endif /* USAGE_ERROR_ACTION */
+
+#endif /* PROCEED_ON_ERROR */
 
-#endif                                                  /* PROCEED_ON_ERROR */
 
 /* -------------------------- Debugging setup ---------------------------- */
 
-#if !DEBUG
+#if ! DEBUG
 
-  #define check_free_chunk(M, P)
-  #define check_inuse_chunk(M, P)
-  #define check_malloced_chunk(M, P, N)
-  #define check_mmapped_chunk(M, P)
-  #define check_malloc_state(M)
-  #define check_top_chunk(M, P)
+#define check_free_chunk(M,P)
+#define check_inuse_chunk(M,P)
+#define check_malloced_chunk(M,P,N)
+#define check_mmapped_chunk(M,P)
+#define check_malloc_state(M)
+#define check_top_chunk(M,P)
 
-#else                                                              /* DEBUG */
-  #define check_free_chunk(M, P) do_check_free_chunk(M, P)
-  #define check_inuse_chunk(M, P) do_check_inuse_chunk(M, P)
-  #define check_top_chunk(M, P) do_check_top_chunk(M, P)
-  #define check_malloced_chunk(M, P, N) do_check_malloced_chunk(M, P, N)
-  #define check_mmapped_chunk(M, P) do_check_mmapped_chunk(M, P)
-  #define check_malloc_state(M) do_check_malloc_state(M)
+#else /* DEBUG */
+#define check_free_chunk(M,P)       do_check_free_chunk(M,P)
+#define check_inuse_chunk(M,P)      do_check_inuse_chunk(M,P)
+#define check_top_chunk(M,P)        do_check_top_chunk(M,P)
+#define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N)
+#define check_mmapped_chunk(M,P)    do_check_mmapped_chunk(M,P)
+#define check_malloc_state(M)       do_check_malloc_state(M)
 
 static void   do_check_any_chunk(mstate m, mchunkptr p);
 static void   do_check_top_chunk(mstate m, mchunkptr p);
 static void   do_check_mmapped_chunk(mstate m, mchunkptr p);
 static void   do_check_inuse_chunk(mstate m, mchunkptr p);
 static void   do_check_free_chunk(mstate m, mchunkptr p);
-static void   do_check_malloced_chunk(mstate m, void *mem, size_t s);
+static void   do_check_malloced_chunk(mstate m, void* mem, size_t s);
 static void   do_check_tree(mstate m, tchunkptr t);
 static void   do_check_treebin(mstate m, bindex_t i);
 static void   do_check_smallbin(mstate m, bindex_t i);
 static void   do_check_malloc_state(mstate m);
 static int    bin_find(mstate m, mchunkptr x);
 static size_t traverse_and_check(mstate m);
-#endif                                                             /* DEBUG */
+#endif /* DEBUG */
 
 /* ---------------------------- Indexing Bins ---------------------------- */
 
-#define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
-#define small_index(s) (bindex_t)((s) >> SMALLBIN_SHIFT)
-#define small_index2size(i) ((i) << SMALLBIN_SHIFT)
-#define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE))
+#define is_small(s)         (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
+#define small_index(s)      (bindex_t)((s)  >> SMALLBIN_SHIFT)
+#define small_index2size(i) ((i)  << SMALLBIN_SHIFT)
+#define MIN_SMALL_INDEX     (small_index(MIN_CHUNK_SIZE))
 
 /* addressing by index. See above about smallbin repositioning */
-#define smallbin_at(M, i) ((sbinptr)((char *)&((M)->smallbins[(i) << 1])))
-#define treebin_at(M, i) (&((M)->treebins[i]))
+#define smallbin_at(M, i)   ((sbinptr)((char*)&((M)->smallbins[(i)<<1])))
+#define treebin_at(M,i)     (&((M)->treebins[i]))
 
 /* assign tree index for size S to variable I. Use x86 asm if possible  */
 #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
-  #define compute_tree_index(S, I)                                         \
-    {                                                                      \
-                                                                           \
-      unsigned int X = S >> TREEBIN_SHIFT;                                 \
-      if (X == 0)                                                          \
-        I = 0;                                                             \
-      else if (X > 0xFFFF)                                                 \
-        I = NTREEBINS - 1;                                                 \
-      else {                                                               \
-                                                                           \
-        unsigned int K = (unsigned)sizeof(X) * __CHAR_BIT__ - 1 -          \
-                         (unsigned)__builtin_clz(X);                       \
-        I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT - 1)) & 1))); \
-                                                                           \
-      }                                                                    \
-                                                                           \
-    }
+#define compute_tree_index(S, I)\
+{\
+  unsigned int X = S >> TREEBIN_SHIFT;\
+  if (X == 0)\
+    I = 0;\
+  else if (X > 0xFFFF)\
+    I = NTREEBINS-1;\
+  else {\
+    unsigned int K = (unsigned) sizeof(X)*__CHAR_BIT__ - 1 - (unsigned) __builtin_clz(X); \
+    I =  (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
+  }\
+}
 
-#elif defined(__INTEL_COMPILER)
-  #define compute_tree_index(S, I)                                         \
-    {                                                                      \
-                                                                           \
-      size_t X = S >> TREEBIN_SHIFT;                                       \
-      if (X == 0)                                                          \
-        I = 0;                                                             \
-      else if (X > 0xFFFF)                                                 \
-        I = NTREEBINS - 1;                                                 \
-      else {                                                               \
-                                                                           \
-        unsigned int K = _bit_scan_reverse(X);                             \
-        I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT - 1)) & 1))); \
-                                                                           \
-      }                                                                    \
-                                                                           \
-    }
+#elif defined (__INTEL_COMPILER)
+#define compute_tree_index(S, I)\
+{\
+  size_t X = S >> TREEBIN_SHIFT;\
+  if (X == 0)\
+    I = 0;\
+  else if (X > 0xFFFF)\
+    I = NTREEBINS-1;\
+  else {\
+    unsigned int K = _bit_scan_reverse (X); \
+    I =  (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
+  }\
+}
 
-#elif defined(_MSC_VER) && _MSC_VER >= 1300
-  #define compute_tree_index(S, I)                                         \
-    {                                                                      \
-                                                                           \
-      size_t X = S >> TREEBIN_SHIFT;                                       \
-      if (X == 0)                                                          \
-        I = 0;                                                             \
-      else if (X > 0xFFFF)                                                 \
-        I = NTREEBINS - 1;                                                 \
-      else {                                                               \
-                                                                           \
-        unsigned int K;                                                    \
-        _BitScanReverse((DWORD *)&K, (DWORD)X);                            \
-        I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT - 1)) & 1))); \
-                                                                           \
-      }                                                                    \
-                                                                           \
-    }
+#elif defined(_MSC_VER) && _MSC_VER>=1300
+#define compute_tree_index(S, I)\
+{\
+  size_t X = S >> TREEBIN_SHIFT;\
+  if (X == 0)\
+    I = 0;\
+  else if (X > 0xFFFF)\
+    I = NTREEBINS-1;\
+  else {\
+    unsigned int K;\
+    _BitScanReverse((DWORD *) &K, (DWORD) X);\
+    I =  (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
+  }\
+}
 
-#else                                                               /* GNUC */
-  #define compute_tree_index(S, I)                             \
-    {                                                          \
-                                                               \
-      size_t X = S >> TREEBIN_SHIFT;                           \
-      if (X == 0)                                              \
-        I = 0;                                                 \
-      else if (X > 0xFFFF)                                     \
-        I = NTREEBINS - 1;                                     \
-      else {                                                   \
-                                                               \
-        unsigned int Y = (unsigned int)X;                      \
-        unsigned int N = ((Y - 0x100) >> 16) & 8;              \
-        unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;     \
-        N += K;                                                \
-        N += K = (((Y <<= K) - 0x4000) >> 16) & 2;             \
-        K = 14 - N + ((Y <<= K) >> 15);                        \
-        I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT - 1)) & 1)); \
-                                                               \
-      }                                                        \
-                                                               \
-    }
-#endif                                                              /* GNUC */
+#else /* GNUC */
+#define compute_tree_index(S, I)\
+{\
+  size_t X = S >> TREEBIN_SHIFT;\
+  if (X == 0)\
+    I = 0;\
+  else if (X > 0xFFFF)\
+    I = NTREEBINS-1;\
+  else {\
+    unsigned int Y = (unsigned int)X;\
+    unsigned int N = ((Y - 0x100) >> 16) & 8;\
+    unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\
+    N += K;\
+    N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\
+    K = 14 - N + ((Y <<= K) >> 15);\
+    I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\
+  }\
+}
+#endif /* GNUC */
 
 /* Bit representing maximum resolved size in a treebin at i */
 #define bit_for_tree_index(i) \
-  (i == NTREEBINS - 1) ? (SIZE_T_BITSIZE - 1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
+   (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
 
 /* Shift placing maximum resolved bit in a treebin at i as sign bit */
 #define leftshift_for_tree_index(i) \
-  ((i == NTREEBINS - 1)             \
-       ? 0                          \
-       : ((SIZE_T_BITSIZE - SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
+   ((i == NTREEBINS-1)? 0 : \
+    ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
 
 /* The size of the smallest chunk held in bin with index i */
-#define minsize_for_tree_index(i)                 \
-  ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \
-   (((size_t)((i)&SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
+#define minsize_for_tree_index(i) \
+   ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) |  \
+   (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
+
 
 /* ------------------------ Operations on bin maps ----------------------- */
 
 /* bit corresponding to given index */
-#define idx2bit(i) ((binmap_t)(1) << (i))
+#define idx2bit(i)              ((binmap_t)(1) << (i))
 
 /* Mark/Clear bits with given index */
-#define mark_smallmap(M, i) ((M)->smallmap |= idx2bit(i))
-#define clear_smallmap(M, i) ((M)->smallmap &= ~idx2bit(i))
-#define smallmap_is_marked(M, i) ((M)->smallmap & idx2bit(i))
+#define mark_smallmap(M,i)      ((M)->smallmap |=  idx2bit(i))
+#define clear_smallmap(M,i)     ((M)->smallmap &= ~idx2bit(i))
+#define smallmap_is_marked(M,i) ((M)->smallmap &   idx2bit(i))
 
-#define mark_treemap(M, i) ((M)->treemap |= idx2bit(i))
-#define clear_treemap(M, i) ((M)->treemap &= ~idx2bit(i))
-#define treemap_is_marked(M, i) ((M)->treemap & idx2bit(i))
+#define mark_treemap(M,i)       ((M)->treemap  |=  idx2bit(i))
+#define clear_treemap(M,i)      ((M)->treemap  &= ~idx2bit(i))
+#define treemap_is_marked(M,i)  ((M)->treemap  &   idx2bit(i))
 
 /* isolate the least set bit of a bitmap */
-#define least_bit(x) ((x) & -(x))
+#define least_bit(x)         ((x) & -(x))
 
 /* mask with all bits to left of least bit of x on */
-#define left_bits(x) ((x << 1) | -(x << 1))
+#define left_bits(x)         ((x<<1) | -(x<<1))
 
 /* mask with all bits to left of or equal to least bit of x on */
 #define same_or_left_bits(x) ((x) | -(x))
@@ -3097,58 +2989,46 @@ static size_t traverse_and_check(mstate m);
 /* index corresponding to given bit. Use x86 asm if possible */
 
 #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
-  #define compute_bit2idx(X, I) \
-    {                           \
-                                \
-      unsigned int J;           \
-      J = __builtin_ctz(X);     \
-      I = (bindex_t)J;          \
-                                \
-    }
+#define compute_bit2idx(X, I)\
+{\
+  unsigned int J;\
+  J = __builtin_ctz(X); \
+  I = (bindex_t)J;\
+}
 
-#elif defined(__INTEL_COMPILER)
-  #define compute_bit2idx(X, I) \
-    {                           \
-                                \
-      unsigned int J;           \
-      J = _bit_scan_forward(X); \
-      I = (bindex_t)J;          \
-                                \
-    }
+#elif defined (__INTEL_COMPILER)
+#define compute_bit2idx(X, I)\
+{\
+  unsigned int J;\
+  J = _bit_scan_forward (X); \
+  I = (bindex_t)J;\
+}
 
-#elif defined(_MSC_VER) && _MSC_VER >= 1300
-  #define compute_bit2idx(X, I)        \
-    {                                  \
-                                       \
-      unsigned int J;                  \
-      _BitScanForward((DWORD *)&J, X); \
-      I = (bindex_t)J;                 \
-                                       \
-    }
+#elif defined(_MSC_VER) && _MSC_VER>=1300
+#define compute_bit2idx(X, I)\
+{\
+  unsigned int J;\
+  _BitScanForward((DWORD *) &J, X);\
+  I = (bindex_t)J;\
+}
 
 #elif USE_BUILTIN_FFS
-  #define compute_bit2idx(X, I) I = ffs(X) - 1
+#define compute_bit2idx(X, I) I = ffs(X)-1
 
 #else
-  #define compute_bit2idx(X, I)            \
-    {                                      \
-                                           \
-      unsigned int Y = X - 1;              \
-      unsigned int K = Y >> (16 - 4) & 16; \
-      unsigned int N = K;                  \
-      Y >>= K;                             \
-      N += K = Y >> (8 - 3) & 8;           \
-      Y >>= K;                             \
-      N += K = Y >> (4 - 2) & 4;           \
-      Y >>= K;                             \
-      N += K = Y >> (2 - 1) & 2;           \
-      Y >>= K;                             \
-      N += K = Y >> (1 - 0) & 1;           \
-      Y >>= K;                             \
-      I = (bindex_t)(N + Y);               \
-                                           \
-    }
-#endif                                                              /* GNUC */
+#define compute_bit2idx(X, I)\
+{\
+  unsigned int Y = X - 1;\
+  unsigned int K = Y >> (16-4) & 16;\
+  unsigned int N = K;        Y >>= K;\
+  N += K = Y >> (8-3) &  8;  Y >>= K;\
+  N += K = Y >> (4-2) &  4;  Y >>= K;\
+  N += K = Y >> (2-1) &  2;  Y >>= K;\
+  N += K = Y >> (1-0) &  1;  Y >>= K;\
+  I = (bindex_t)(N + Y);\
+}
+#endif /* GNUC */
+
 
 /* ----------------------- Runtime Check Support ------------------------- */
 
@@ -3179,141 +3059,121 @@ static size_t traverse_and_check(mstate m);
 */
 
 #if !INSECURE
-  /* Check if address a is at least as high as any from MORECORE or MMAP */
-  #define ok_address(M, a) ((char *)(a) >= (M)->least_addr)
-  /* Check if address of next chunk n is higher than base chunk p */
-  #define ok_next(p, n) ((char *)(p) < (char *)(n))
-  /* Check if p has inuse status */
-  #define ok_inuse(p) is_inuse(p)
-  /* Check if p has its pinuse bit on */
-  #define ok_pinuse(p) pinuse(p)
-
-#else                                                          /* !INSECURE */
-  #define ok_address(M, a) (1)
-  #define ok_next(b, n) (1)
-  #define ok_inuse(p) (1)
-  #define ok_pinuse(p) (1)
-#endif                                                         /* !INSECURE */
+/* Check if address a is at least as high as any from MORECORE or MMAP */
+#define ok_address(M, a) ((char*)(a) >= (M)->least_addr)
+/* Check if address of next chunk n is higher than base chunk p */
+#define ok_next(p, n)    ((char*)(p) < (char*)(n))
+/* Check if p has inuse status */
+#define ok_inuse(p)     is_inuse(p)
+/* Check if p has its pinuse bit on */
+#define ok_pinuse(p)     pinuse(p)
+
+#else /* !INSECURE */
+#define ok_address(M, a) (1)
+#define ok_next(b, n)    (1)
+#define ok_inuse(p)      (1)
+#define ok_pinuse(p)     (1)
+#endif /* !INSECURE */
 
 #if (FOOTERS && !INSECURE)
-  /* Check if (alleged) mstate m has expected magic field */
-  #define ok_magic(M) ((M)->magic == mparams.magic)
-#else                                             /* (FOOTERS && !INSECURE) */
-  #define ok_magic(M) (1)
-#endif                                            /* (FOOTERS && !INSECURE) */
+/* Check if (alleged) mstate m has expected magic field */
+#define ok_magic(M)      ((M)->magic == mparams.magic)
+#else  /* (FOOTERS && !INSECURE) */
+#define ok_magic(M)      (1)
+#endif /* (FOOTERS && !INSECURE) */
 
 /* In gcc, use __builtin_expect to minimize impact of checks */
 #if !INSECURE
-  #if defined(__GNUC__) && __GNUC__ >= 3
-    #define RTCHECK(e) __builtin_expect(e, 1)
-  #else                                                             /* GNUC */
-    #define RTCHECK(e) (e)
-  #endif                                                            /* GNUC */
-#else                                                          /* !INSECURE */
-  #define RTCHECK(e) (1)
-#endif                                                         /* !INSECURE */
+#if defined(__GNUC__) && __GNUC__ >= 3
+#define RTCHECK(e)  __builtin_expect(e, 1)
+#else /* GNUC */
+#define RTCHECK(e)  (e)
+#endif /* GNUC */
+#else /* !INSECURE */
+#define RTCHECK(e)  (1)
+#endif /* !INSECURE */
 
 /* macros to set up inuse chunks with or without footers */
 
 #if !FOOTERS
 
-  #define mark_inuse_foot(M, p, s)
+#define mark_inuse_foot(M,p,s)
 
-  /* Macros for setting head/foot of non-mmapped chunks */
+/* Macros for setting head/foot of non-mmapped chunks */
 
-  /* Set cinuse bit and pinuse bit of next chunk */
-  #define set_inuse(M, p, s)                                  \
-    ((p)->head = (((p)->head & PINUSE_BIT) | s | CINUSE_BIT), \
-     ((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT)
+/* Set cinuse bit and pinuse bit of next chunk */
+#define set_inuse(M,p,s)\
+  ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
+  ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
 
-  /* Set cinuse and pinuse of this chunk and pinuse of next chunk */
-  #define set_inuse_and_pinuse(M, p, s)         \
-    ((p)->head = (s | PINUSE_BIT | CINUSE_BIT), \
-     ((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT)
+/* Set cinuse and pinuse of this chunk and pinuse of next chunk */
+#define set_inuse_and_pinuse(M,p,s)\
+  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
+  ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
 
-  /* Set size, cinuse and pinuse bit of this chunk */
-  #define set_size_and_pinuse_of_inuse_chunk(M, p, s) \
-    ((p)->head = (s | PINUSE_BIT | CINUSE_BIT))
+/* Set size, cinuse and pinuse bit of this chunk */
+#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
+  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
 
-#else                                                            /* FOOTERS */
+#else /* FOOTERS */
 
-  /* Set foot of inuse chunk to be xor of mstate and seed */
-  #define mark_inuse_foot(M, p, s)                 \
-    (((mchunkptr)((char *)(p) + (s)))->prev_foot = \
-         ((size_t)(M) ^ mparams.magic))
+/* Set foot of inuse chunk to be xor of mstate and seed */
+#define mark_inuse_foot(M,p,s)\
+  (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic))
 
-  #define get_mstate_for(p)                                            \
-    ((mstate)(((mchunkptr)((char *)(p) + (chunksize(p))))->prev_foot ^ \
-              mparams.magic))
+#define get_mstate_for(p)\
+  ((mstate)(((mchunkptr)((char*)(p) +\
+    (chunksize(p))))->prev_foot ^ mparams.magic))
 
-  #define set_inuse(M, p, s)                                   \
-    ((p)->head = (((p)->head & PINUSE_BIT) | s | CINUSE_BIT),  \
-     (((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT), \
-     mark_inuse_foot(M, p, s))
+#define set_inuse(M,p,s)\
+  ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
+  (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \
+  mark_inuse_foot(M,p,s))
 
-  #define set_inuse_and_pinuse(M, p, s)                        \
-    ((p)->head = (s | PINUSE_BIT | CINUSE_BIT),                \
-     (((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT), \
-     mark_inuse_foot(M, p, s))
+#define set_inuse_and_pinuse(M,p,s)\
+  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
+  (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\
+ mark_inuse_foot(M,p,s))
 
-  #define set_size_and_pinuse_of_inuse_chunk(M, p, s) \
-    ((p)->head = (s | PINUSE_BIT | CINUSE_BIT), mark_inuse_foot(M, p, s))
+#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
+  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
+  mark_inuse_foot(M, p, s))
 
-#endif                                                          /* !FOOTERS */
+#endif /* !FOOTERS */
 
 /* ---------------------------- setting mparams -------------------------- */
 
 #if LOCK_AT_FORK
-static void pre_fork(void) {
-
-  ACQUIRE_LOCK(&(gm)->mutex);
-
-}
-
-static void post_fork_parent(void) {
-
-  RELEASE_LOCK(&(gm)->mutex);
-
-}
-
-static void post_fork_child(void) {
-
-  INITIAL_LOCK(&(gm)->mutex);
-
-}
-
-#endif                                                      /* LOCK_AT_FORK */
+static void pre_fork(void)         { ACQUIRE_LOCK(&(gm)->mutex); }
+static void post_fork_parent(void) { RELEASE_LOCK(&(gm)->mutex); }
+static void post_fork_child(void)  { INITIAL_LOCK(&(gm)->mutex); }
+#endif /* LOCK_AT_FORK */
 
 /* Initialize mparams */
 static int init_mparams(void) {
-
 #ifdef NEED_GLOBAL_LOCK_INIT
-  if (malloc_global_mutex_status <= 0) init_malloc_global_mutex();
+  if (malloc_global_mutex_status <= 0)
+    init_malloc_global_mutex();
 #endif
 
   ACQUIRE_MALLOC_GLOBAL_LOCK();
   if (mparams.magic == 0) {
-
     size_t magic;
     size_t psize;
     size_t gsize;
 
 #ifndef WIN32
     psize = malloc_getpagesize;
-    gsize = ((DEFAULT_GRANULARITY != 0) ? DEFAULT_GRANULARITY : psize);
-#else                                                              /* WIN32 */
+    gsize = ((DEFAULT_GRANULARITY != 0)? DEFAULT_GRANULARITY : psize);
+#else /* WIN32 */
     {
-
       SYSTEM_INFO system_info;
       GetSystemInfo(&system_info);
       psize = system_info.dwPageSize;
-      gsize =
-          ((DEFAULT_GRANULARITY != 0) ? DEFAULT_GRANULARITY
-                                      : system_info.dwAllocationGranularity);
-
+      gsize = ((DEFAULT_GRANULARITY != 0)?
+               DEFAULT_GRANULARITY : system_info.dwAllocationGranularity);
     }
-
-#endif                                                             /* WIN32 */
+#endif /* WIN32 */
 
     /* Sanity-check configuration:
        size_t must be unsigned and as wide as pointer type.
@@ -3321,23 +3181,24 @@ static int init_mparams(void) {
        alignment must be at least 8.
        Alignment, min chunk size, and page size must all be powers of 2.
     */
-    if ((sizeof(size_t) != sizeof(char *)) || (MAX_SIZE_T < MIN_CHUNK_SIZE) ||
-        (sizeof(int) < 4) || (MALLOC_ALIGNMENT < (size_t)8U) ||
-        ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT - SIZE_T_ONE)) != 0) ||
-        ((MCHUNK_SIZE & (MCHUNK_SIZE - SIZE_T_ONE)) != 0) ||
-        ((gsize & (gsize - SIZE_T_ONE)) != 0) ||
-        ((psize & (psize - SIZE_T_ONE)) != 0))
+    if ((sizeof(size_t) != sizeof(char*)) ||
+        (MAX_SIZE_T < MIN_CHUNK_SIZE)  ||
+        (sizeof(int) < 4)  ||
+        (MALLOC_ALIGNMENT < (size_t)8U) ||
+        ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-SIZE_T_ONE)) != 0) ||
+        ((MCHUNK_SIZE      & (MCHUNK_SIZE-SIZE_T_ONE))      != 0) ||
+        ((gsize            & (gsize-SIZE_T_ONE))            != 0) ||
+        ((psize            & (psize-SIZE_T_ONE))            != 0))
       ABORT;
     mparams.granularity = gsize;
     mparams.page_size = psize;
     mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD;
     mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD;
 #if MORECORE_CONTIGUOUS
-    mparams.default_mflags = USE_LOCK_BIT | USE_MMAP_BIT;
-#else                                                /* MORECORE_CONTIGUOUS */
-    mparams.default_mflags =
-        USE_LOCK_BIT | USE_MMAP_BIT | USE_NONCONTIGUOUS_BIT;
-#endif                                               /* MORECORE_CONTIGUOUS */
+    mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT;
+#else  /* MORECORE_CONTIGUOUS */
+    mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT;
+#endif /* MORECORE_CONTIGUOUS */
 
 #if !ONLY_MSPACES
     /* Set up lock for main malloc area */
@@ -3349,69 +3210,57 @@ static int init_mparams(void) {
 #endif
 
     {
-
 #if USE_DEV_RANDOM
-      int           fd;
+      int fd;
       unsigned char buf[sizeof(size_t)];
       /* Try to use /dev/urandom, else fall back on using time */
       if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 &&
           read(fd, buf, sizeof(buf)) == sizeof(buf)) {
-
-        magic = *((size_t *)buf);
+        magic = *((size_t *) buf);
         close(fd);
-
-      } else
-
-#endif                                                    /* USE_DEV_RANDOM */
+      }
+      else
+#endif /* USE_DEV_RANDOM */
 #ifdef WIN32
-        magic = (size_t)(GetTickCount() ^ (size_t)0x55555555U);
+      magic = (size_t)(GetTickCount() ^ (size_t)0x55555555U);
 #elif defined(LACKS_TIME_H)
       magic = (size_t)&magic ^ (size_t)0x55555555U;
 #else
       magic = (size_t)(time(0) ^ (size_t)0x55555555U);
 #endif
-      magic |= (size_t)8U;                                /* ensure nonzero */
-      magic &= ~(size_t)7U;      /* improve chances of fault for bad values */
+      magic |= (size_t)8U;    /* ensure nonzero */
+      magic &= ~(size_t)7U;   /* improve chances of fault for bad values */
       /* Until memory modes commonly available, use volatile-write */
       (*(volatile size_t *)(&(mparams.magic))) = magic;
-
     }
-
   }
 
   RELEASE_MALLOC_GLOBAL_LOCK();
   return 1;
-
 }
 
 /* support for mallopt */
 static int change_mparam(int param_number, int value) {
-
   size_t val;
   ensure_initialization();
-  val = (value == -1) ? MAX_SIZE_T : (size_t)value;
-  switch (param_number) {
-
-    case M_TRIM_THRESHOLD:
-      mparams.trim_threshold = val;
-      return 1;
-    case M_GRANULARITY:
-      if (val >= mparams.page_size && ((val & (val - 1)) == 0)) {
-
-        mparams.granularity = val;
-        return 1;
-
-      } else
-
-        return 0;
-    case M_MMAP_THRESHOLD:
-      mparams.mmap_threshold = val;
+  val = (value == -1)? MAX_SIZE_T : (size_t)value;
+  switch(param_number) {
+  case M_TRIM_THRESHOLD:
+    mparams.trim_threshold = val;
+    return 1;
+  case M_GRANULARITY:
+    if (val >= mparams.page_size && ((val & (val-1)) == 0)) {
+      mparams.granularity = val;
       return 1;
-    default:
+    }
+    else
       return 0;
-
+  case M_MMAP_THRESHOLD:
+    mparams.mmap_threshold = val;
+    return 1;
+  default:
+    return 0;
   }
-
 }
 
 #if DEBUG
@@ -3419,118 +3268,100 @@ static int change_mparam(int param_number, int value) {
 
 /* Check properties of any chunk, whether free, inuse, mmapped etc  */
 static void do_check_any_chunk(mstate m, mchunkptr p) {
-
   assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
   assert(ok_address(m, p));
-
 }
 
 /* Check properties of top chunk */
 static void do_check_top_chunk(mstate m, mchunkptr p) {
-
-  msegmentptr sp = segment_holding(m, (char *)p);
-  size_t      sz = p->head & ~INUSE_BITS;   /* third-lowest bit can be set! */
+  msegmentptr sp = segment_holding(m, (char*)p);
+  size_t  sz = p->head & ~INUSE_BITS; /* third-lowest bit can be set! */
   assert(sp != 0);
   assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
   assert(ok_address(m, p));
   assert(sz == m->topsize);
   assert(sz > 0);
-  assert(sz == ((sp->base + sp->size) - (char *)p) - TOP_FOOT_SIZE);
+  assert(sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE);
   assert(pinuse(p));
   assert(!pinuse(chunk_plus_offset(p, sz)));
-
 }
 
 /* Check properties of (inuse) mmapped chunks */
 static void do_check_mmapped_chunk(mstate m, mchunkptr p) {
-
-  size_t sz = chunksize(p);
+  size_t  sz = chunksize(p);
   size_t len = (sz + (p->prev_foot) + MMAP_FOOT_PAD);
   assert(is_mmapped(p));
   assert(use_mmap(m));
   assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
   assert(ok_address(m, p));
   assert(!is_small(sz));
-  assert((len & (mparams.page_size - SIZE_T_ONE)) == 0);
+  assert((len & (mparams.page_size-SIZE_T_ONE)) == 0);
   assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD);
-  assert(chunk_plus_offset(p, sz + SIZE_T_SIZE)->head == 0);
-
+  assert(chunk_plus_offset(p, sz+SIZE_T_SIZE)->head == 0);
 }
 
 /* Check properties of inuse chunks */
 static void do_check_inuse_chunk(mstate m, mchunkptr p) {
-
   do_check_any_chunk(m, p);
   assert(is_inuse(p));
   assert(next_pinuse(p));
   /* If not pinuse and not mmapped, previous chunk has OK offset */
   assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p);
-  if (is_mmapped(p)) do_check_mmapped_chunk(m, p);
-
+  if (is_mmapped(p))
+    do_check_mmapped_chunk(m, p);
 }
 
 /* Check properties of free chunks */
 static void do_check_free_chunk(mstate m, mchunkptr p) {
-
-  size_t    sz = chunksize(p);
+  size_t sz = chunksize(p);
   mchunkptr next = chunk_plus_offset(p, sz);
   do_check_any_chunk(m, p);
   assert(!is_inuse(p));
   assert(!next_pinuse(p));
-  assert(!is_mmapped(p));
+  assert (!is_mmapped(p));
   if (p != m->dv && p != m->top) {
-
     if (sz >= MIN_CHUNK_SIZE) {
-
       assert((sz & CHUNK_ALIGN_MASK) == 0);
       assert(is_aligned(chunk2mem(p)));
       assert(next->prev_foot == sz);
       assert(pinuse(p));
-      assert(next == m->top || is_inuse(next));
+      assert (next == m->top || is_inuse(next));
       assert(p->fd->bk == p);
       assert(p->bk->fd == p);
-
-    } else                        /* markers are always of size SIZE_T_SIZE */
-
+    }
+    else  /* markers are always of size SIZE_T_SIZE */
       assert(sz == SIZE_T_SIZE);
-
   }
-
 }
 
 /* Check properties of malloced chunks at the point they are malloced */
-static void do_check_malloced_chunk(mstate m, void *mem, size_t s) {
-
+static void do_check_malloced_chunk(mstate m, void* mem, size_t s) {
   if (mem != 0) {
-
     mchunkptr p = mem2chunk(mem);
-    size_t    sz = p->head & ~INUSE_BITS;
+    size_t sz = p->head & ~INUSE_BITS;
     do_check_inuse_chunk(m, p);
     assert((sz & CHUNK_ALIGN_MASK) == 0);
     assert(sz >= MIN_CHUNK_SIZE);
     assert(sz >= s);
     /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */
     assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE));
-
   }
-
 }
 
 /* Check a tree and its subtrees.  */
 static void do_check_tree(mstate m, tchunkptr t) {
-
   tchunkptr head = 0;
   tchunkptr u = t;
-  bindex_t  tindex = t->index;
-  size_t    tsize = chunksize(t);
-  bindex_t  idx;
+  bindex_t tindex = t->index;
+  size_t tsize = chunksize(t);
+  bindex_t idx;
   compute_tree_index(tsize, idx);
   assert(tindex == idx);
   assert(tsize >= MIN_LARGE_SIZE);
   assert(tsize >= minsize_for_tree_index(idx));
-  assert((idx == NTREEBINS - 1) || (tsize < minsize_for_tree_index((idx + 1))));
+  assert((idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1))));
 
-  do {                        /* traverse through chain of same-sized nodes */
+  do { /* traverse through chain of same-sized nodes */
     do_check_any_chunk(m, ((mchunkptr)u));
     assert(u->index == tindex);
     assert(chunksize(u) == tsize);
@@ -3539,72 +3370,56 @@ static void do_check_tree(mstate m, tchunkptr t) {
     assert(u->fd->bk == u);
     assert(u->bk->fd == u);
     if (u->parent == 0) {
-
       assert(u->child[0] == 0);
       assert(u->child[1] == 0);
-
-    } else {
-
-      assert(head == 0);               /* only one node on chain has parent */
+    }
+    else {
+      assert(head == 0); /* only one node on chain has parent */
       head = u;
       assert(u->parent != u);
-      assert(u->parent->child[0] == u || u->parent->child[1] == u ||
-             *((tbinptr *)(u->parent)) == u);
+      assert (u->parent->child[0] == u ||
+              u->parent->child[1] == u ||
+              *((tbinptr*)(u->parent)) == u);
       if (u->child[0] != 0) {
-
         assert(u->child[0]->parent == u);
         assert(u->child[0] != u);
         do_check_tree(m, u->child[0]);
-
       }
-
       if (u->child[1] != 0) {
-
         assert(u->child[1]->parent == u);
         assert(u->child[1] != u);
         do_check_tree(m, u->child[1]);
-
       }
-
       if (u->child[0] != 0 && u->child[1] != 0) {
-
         assert(chunksize(u->child[0]) < chunksize(u->child[1]));
-
       }
-
     }
-
     u = u->fd;
-
   } while (u != t);
-
   assert(head != 0);
-
 }
 
 /*  Check all the chunks in a treebin.  */
 static void do_check_treebin(mstate m, bindex_t i) {
-
-  tbinptr * tb = treebin_at(m, i);
+  tbinptr* tb = treebin_at(m, i);
   tchunkptr t = *tb;
-  int       empty = (m->treemap & (1U << i)) == 0;
-  if (t == 0) assert(empty);
-  if (!empty) do_check_tree(m, t);
-
+  int empty = (m->treemap & (1U << i)) == 0;
+  if (t == 0)
+    assert(empty);
+  if (!empty)
+    do_check_tree(m, t);
 }
 
 /*  Check all the chunks in a smallbin.  */
 static void do_check_smallbin(mstate m, bindex_t i) {
-
-  sbinptr      b = smallbin_at(m, i);
-  mchunkptr    p = b->bk;
+  sbinptr b = smallbin_at(m, i);
+  mchunkptr p = b->bk;
   unsigned int empty = (m->smallmap & (1U << i)) == 0;
-  if (p == b) assert(empty);
+  if (p == b)
+    assert(empty);
   if (!empty) {
-
     for (; p != b; p = p->bk) {
-
-      size_t    size = chunksize(p);
+      size_t size = chunksize(p);
       mchunkptr q;
       /* each chunk claims to be free */
       do_check_free_chunk(m, p);
@@ -3613,249 +3428,185 @@ static void do_check_smallbin(mstate m, bindex_t i) {
       assert(p->bk == b || chunksize(p->bk) == chunksize(p));
       /* chunk is followed by an inuse chunk */
       q = next_chunk(p);
-      if (q->head != FENCEPOST_HEAD) do_check_inuse_chunk(m, q);
-
+      if (q->head != FENCEPOST_HEAD)
+        do_check_inuse_chunk(m, q);
     }
-
   }
-
 }
 
 /* Find x in a bin. Used in other check functions. */
 static int bin_find(mstate m, mchunkptr x) {
-
   size_t size = chunksize(x);
   if (is_small(size)) {
-
     bindex_t sidx = small_index(size);
-    sbinptr  b = smallbin_at(m, sidx);
+    sbinptr b = smallbin_at(m, sidx);
     if (smallmap_is_marked(m, sidx)) {
-
       mchunkptr p = b;
       do {
-
-        if (p == x) return 1;
-
+        if (p == x)
+          return 1;
       } while ((p = p->fd) != b);
-
     }
-
-  } else {
-
+  }
+  else {
     bindex_t tidx;
     compute_tree_index(size, tidx);
     if (treemap_is_marked(m, tidx)) {
-
       tchunkptr t = *treebin_at(m, tidx);
-      size_t    sizebits = size << leftshift_for_tree_index(tidx);
+      size_t sizebits = size << leftshift_for_tree_index(tidx);
       while (t != 0 && chunksize(t) != size) {
-
-        t = t->child[(sizebits >> (SIZE_T_BITSIZE - SIZE_T_ONE)) & 1];
+        t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
         sizebits <<= 1;
-
       }
-
       if (t != 0) {
-
         tchunkptr u = t;
         do {
-
-          if (u == (tchunkptr)x) return 1;
-
+          if (u == (tchunkptr)x)
+            return 1;
         } while ((u = u->fd) != t);
-
       }
-
     }
-
   }
-
   return 0;
-
 }
 
 /* Traverse each chunk and check it; return total */
 static size_t traverse_and_check(mstate m) {
-
   size_t sum = 0;
   if (is_initialized(m)) {
-
     msegmentptr s = &m->seg;
     sum += m->topsize + TOP_FOOT_SIZE;
     while (s != 0) {
-
       mchunkptr q = align_as_chunk(s->base);
       mchunkptr lastq = 0;
       assert(pinuse(q));
-      while (segment_holds(s, q) && q != m->top && q->head != FENCEPOST_HEAD) {
-
+      while (segment_holds(s, q) &&
+             q != m->top && q->head != FENCEPOST_HEAD) {
         sum += chunksize(q);
         if (is_inuse(q)) {
-
           assert(!bin_find(m, q));
           do_check_inuse_chunk(m, q);
-
-        } else {
-
+        }
+        else {
           assert(q == m->dv || bin_find(m, q));
-          assert(lastq == 0 || is_inuse(lastq));  /* Not 2 consecutive free */
+          assert(lastq == 0 || is_inuse(lastq)); /* Not 2 consecutive free */
           do_check_free_chunk(m, q);
-
         }
-
         lastq = q;
         q = next_chunk(q);
-
       }
-
       s = s->next;
-
     }
-
   }
-
   return sum;
-
 }
 
+
 /* Check all properties of malloc_state. */
 static void do_check_malloc_state(mstate m) {
-
   bindex_t i;
-  size_t   total;
+  size_t total;
   /* check bins */
   for (i = 0; i < NSMALLBINS; ++i)
     do_check_smallbin(m, i);
   for (i = 0; i < NTREEBINS; ++i)
     do_check_treebin(m, i);
 
-  if (m->dvsize != 0) {                                   /* check dv chunk */
+  if (m->dvsize != 0) { /* check dv chunk */
     do_check_any_chunk(m, m->dv);
     assert(m->dvsize == chunksize(m->dv));
     assert(m->dvsize >= MIN_CHUNK_SIZE);
     assert(bin_find(m, m->dv) == 0);
-
   }
 
-  if (m->top != 0) {                                     /* check top chunk */
+  if (m->top != 0) {   /* check top chunk */
     do_check_top_chunk(m, m->top);
     /*assert(m->topsize == chunksize(m->top)); redundant */
     assert(m->topsize > 0);
     assert(bin_find(m, m->top) == 0);
-
   }
 
   total = traverse_and_check(m);
   assert(total <= m->footprint);
   assert(m->footprint <= m->max_footprint);
-
 }
-
-#endif                                                             /* DEBUG */
+#endif /* DEBUG */
 
 /* ----------------------------- statistics ------------------------------ */
 
 #if !NO_MALLINFO
 static struct mallinfo internal_mallinfo(mstate m) {
-
-  struct mallinfo nm = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+  struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
   ensure_initialization();
   if (!PREACTION(m)) {
-
     check_malloc_state(m);
     if (is_initialized(m)) {
-
-      size_t      nfree = SIZE_T_ONE;                    /* top always free */
-      size_t      mfree = m->topsize + TOP_FOOT_SIZE;
-      size_t      sum = mfree;
+      size_t nfree = SIZE_T_ONE; /* top always free */
+      size_t mfree = m->topsize + TOP_FOOT_SIZE;
+      size_t sum = mfree;
       msegmentptr s = &m->seg;
       while (s != 0) {
-
         mchunkptr q = align_as_chunk(s->base);
-        while (segment_holds(s, q) && q != m->top &&
-               q->head != FENCEPOST_HEAD) {
-
+        while (segment_holds(s, q) &&
+               q != m->top && q->head != FENCEPOST_HEAD) {
           size_t sz = chunksize(q);
           sum += sz;
           if (!is_inuse(q)) {
-
             mfree += sz;
             ++nfree;
-
           }
-
           q = next_chunk(q);
-
         }
-
         s = s->next;
-
       }
 
-      nm.arena = sum;
-      nm.ordblks = nfree;
-      nm.hblkhd = m->footprint - sum;
-      nm.usmblks = m->max_footprint;
+      nm.arena    = sum;
+      nm.ordblks  = nfree;
+      nm.hblkhd   = m->footprint - sum;
+      nm.usmblks  = m->max_footprint;
       nm.uordblks = m->footprint - mfree;
       nm.fordblks = mfree;
       nm.keepcost = m->topsize;
-
     }
 
     POSTACTION(m);
-
   }
-
   return nm;
-
 }
-
-#endif                                                      /* !NO_MALLINFO */
+#endif /* !NO_MALLINFO */
 
 #if !NO_MALLOC_STATS
 static void internal_malloc_stats(mstate m) {
-
   ensure_initialization();
   if (!PREACTION(m)) {
-
     size_t maxfp = 0;
     size_t fp = 0;
     size_t used = 0;
     check_malloc_state(m);
     if (is_initialized(m)) {
-
       msegmentptr s = &m->seg;
       maxfp = m->max_footprint;
       fp = m->footprint;
       used = fp - (m->topsize + TOP_FOOT_SIZE);
 
       while (s != 0) {
-
         mchunkptr q = align_as_chunk(s->base);
-        while (segment_holds(s, q) && q != m->top &&
-               q->head != FENCEPOST_HEAD) {
-
-          if (!is_inuse(q)) used -= chunksize(q);
+        while (segment_holds(s, q) &&
+               q != m->top && q->head != FENCEPOST_HEAD) {
+          if (!is_inuse(q))
+            used -= chunksize(q);
           q = next_chunk(q);
-
         }
-
         s = s->next;
-
       }
-
     }
-
-    POSTACTION(m);                                             /* drop lock */
+    POSTACTION(m); /* drop lock */
     fprintf(stderr, "max system bytes = %10lu\n", (unsigned long)(maxfp));
     fprintf(stderr, "system bytes     = %10lu\n", (unsigned long)(fp));
     fprintf(stderr, "in use bytes     = %10lu\n", (unsigned long)(used));
-
   }
-
 }
-
-#endif                                                   /* NO_MALLOC_STATS */
+#endif /* NO_MALLOC_STATS */
 
 /* ----------------------- Operations on smallbins ----------------------- */
 
@@ -3867,181 +3618,134 @@ static void internal_malloc_stats(mstate m) {
 */
 
 /* Link a free chunk into a smallbin  */
-#define insert_small_chunk(M, P, S)         \
-  {                                         \
-                                            \
-    bindex_t  I = small_index(S);           \
-    mchunkptr B = smallbin_at(M, I);        \
-    mchunkptr F = B;                        \
-    assert(S >= MIN_CHUNK_SIZE);            \
-    if (!smallmap_is_marked(M, I))          \
-      mark_smallmap(M, I);                  \
-    else if (RTCHECK(ok_address(M, B->fd))) \
-      F = B->fd;                            \
-    else {                                  \
-                                            \
-      CORRUPTION_ERROR_ACTION(M);           \
-                                            \
-    }                                       \
-    B->fd = P;                              \
-    F->bk = P;                              \
-    P->fd = F;                              \
-    P->bk = B;                              \
-                                            \
-  }
+#define insert_small_chunk(M, P, S) {\
+  bindex_t I  = small_index(S);\
+  mchunkptr B = smallbin_at(M, I);\
+  mchunkptr F = B;\
+  assert(S >= MIN_CHUNK_SIZE);\
+  if (!smallmap_is_marked(M, I))\
+    mark_smallmap(M, I);\
+  else if (RTCHECK(ok_address(M, B->fd)))\
+    F = B->fd;\
+  else {\
+    CORRUPTION_ERROR_ACTION(M);\
+  }\
+  B->fd = P;\
+  F->bk = P;\
+  P->fd = F;\
+  P->bk = B;\
+}
 
 /* Unlink a chunk from a smallbin  */
-#define unlink_small_chunk(M, P, S)                                            \
-  {                                                                            \
-                                                                               \
-    mchunkptr F = P->fd;                                                       \
-    mchunkptr B = P->bk;                                                       \
-    bindex_t  I = small_index(S);                                              \
-    assert(P != B);                                                            \
-    assert(P != F);                                                            \
-    assert(chunksize(P) == small_index2size(I));                               \
-    if (RTCHECK(F == smallbin_at(M, I) || (ok_address(M, F) && F->bk == P))) { \
-                                                                               \
-      if (B == F) {                                                            \
-                                                                               \
-        clear_smallmap(M, I);                                                  \
-                                                                               \
-      } else if (RTCHECK(B == smallbin_at(M, I) ||                             \
-                                                                               \
-                                                                               \
-                         (ok_address(M, B) && B->fd == P))) {                  \
-                                                                               \
-        F->bk = B;                                                             \
-        B->fd = F;                                                             \
-                                                                               \
-      } else {                                                                 \
-                                                                               \
-        CORRUPTION_ERROR_ACTION(M);                                            \
-                                                                               \
-      }                                                                        \
-                                                                               \
-    } else {                                                                   \
-                                                                               \
-      CORRUPTION_ERROR_ACTION(M);                                              \
-                                                                               \
-    }                                                                          \
-                                                                               \
-  }
+#define unlink_small_chunk(M, P, S) {\
+  mchunkptr F = P->fd;\
+  mchunkptr B = P->bk;\
+  bindex_t I = small_index(S);\
+  assert(P != B);\
+  assert(P != F);\
+  assert(chunksize(P) == small_index2size(I));\
+  if (RTCHECK(F == smallbin_at(M,I) || (ok_address(M, F) && F->bk == P))) { \
+    if (B == F) {\
+      clear_smallmap(M, I);\
+    }\
+    else if (RTCHECK(B == smallbin_at(M,I) ||\
+                     (ok_address(M, B) && B->fd == P))) {\
+      F->bk = B;\
+      B->fd = F;\
+    }\
+    else {\
+      CORRUPTION_ERROR_ACTION(M);\
+    }\
+  }\
+  else {\
+    CORRUPTION_ERROR_ACTION(M);\
+  }\
+}
 
 /* Unlink the first chunk from a smallbin */
-#define unlink_first_small_chunk(M, B, P, I)              \
-  {                                                       \
-                                                          \
-    mchunkptr F = P->fd;                                  \
-    assert(P != B);                                       \
-    assert(P != F);                                       \
-    assert(chunksize(P) == small_index2size(I));          \
-    if (B == F) {                                         \
-                                                          \
-      clear_smallmap(M, I);                               \
-                                                          \
-    } else if (RTCHECK(ok_address(M, F) && F->bk == P)) { \
-                                                          \
-      F->bk = B;                                          \
-      B->fd = F;                                          \
-                                                          \
-    } else {                                              \
-                                                          \
-      CORRUPTION_ERROR_ACTION(M);                         \
-                                                          \
-    }                                                     \
-                                                          \
-  }
+#define unlink_first_small_chunk(M, B, P, I) {\
+  mchunkptr F = P->fd;\
+  assert(P != B);\
+  assert(P != F);\
+  assert(chunksize(P) == small_index2size(I));\
+  if (B == F) {\
+    clear_smallmap(M, I);\
+  }\
+  else if (RTCHECK(ok_address(M, F) && F->bk == P)) {\
+    F->bk = B;\
+    B->fd = F;\
+  }\
+  else {\
+    CORRUPTION_ERROR_ACTION(M);\
+  }\
+}
 
 /* Replace dv node, binning the old one */
 /* Used only when dvsize known to be small */
-#define replace_dv(M, P, S)           \
-  {                                   \
-                                      \
-    size_t DVS = M->dvsize;           \
-    assert(is_small(DVS));            \
-    if (DVS != 0) {                   \
-                                      \
-      mchunkptr DV = M->dv;           \
-      insert_small_chunk(M, DV, DVS); \
-                                      \
-    }                                 \
-    M->dvsize = S;                    \
-    M->dv = P;                        \
-                                      \
-  }
+#define replace_dv(M, P, S) {\
+  size_t DVS = M->dvsize;\
+  assert(is_small(DVS));\
+  if (DVS != 0) {\
+    mchunkptr DV = M->dv;\
+    insert_small_chunk(M, DV, DVS);\
+  }\
+  M->dvsize = S;\
+  M->dv = P;\
+}
 
 /* ------------------------- Operations on trees ------------------------- */
 
 /* Insert chunk into tree */
-#define insert_large_chunk(M, X, S)                                  \
-  {                                                                  \
-                                                                     \
-    tbinptr *H;                                                      \
-    bindex_t I;                                                      \
-    compute_tree_index(S, I);                                        \
-    H = treebin_at(M, I);                                            \
-    X->index = I;                                                    \
-    X->child[0] = X->child[1] = 0;                                   \
-    if (!treemap_is_marked(M, I)) {                                  \
-                                                                     \
-      mark_treemap(M, I);                                            \
-      *H = X;                                                        \
-      X->parent = (tchunkptr)H;                                      \
-      X->fd = X->bk = X;                                             \
-                                                                     \
-    } else {                                                         \
-                                                                     \
-      tchunkptr T = *H;                                              \
-      size_t    K = S << leftshift_for_tree_index(I);                \
-      for (;;) {                                                     \
-                                                                     \
-        if (chunksize(T) != S) {                                     \
-                                                                     \
-          tchunkptr *C =                                             \
-              &(T->child[(K >> (SIZE_T_BITSIZE - SIZE_T_ONE)) & 1]); \
-          K <<= 1;                                                   \
-          if (*C != 0)                                               \
-            T = *C;                                                  \
-          else if (RTCHECK(ok_address(M, C))) {                      \
-                                                                     \
-            *C = X;                                                  \
-            X->parent = T;                                           \
-            X->fd = X->bk = X;                                       \
-            break;                                                   \
-                                                                     \
-          } else {                                                   \
-                                                                     \
-            CORRUPTION_ERROR_ACTION(M);                              \
-            break;                                                   \
-                                                                     \
-          }                                                          \
-                                                                     \
-        } else {                                                     \
-                                                                     \
-          tchunkptr F = T->fd;                                       \
-          if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {       \
-                                                                     \
-            T->fd = F->bk = X;                                       \
-            X->fd = F;                                               \
-            X->bk = T;                                               \
-            X->parent = 0;                                           \
-            break;                                                   \
-                                                                     \
-          } else {                                                   \
-                                                                     \
-            CORRUPTION_ERROR_ACTION(M);                              \
-            break;                                                   \
-                                                                     \
-          }                                                          \
-                                                                     \
-        }                                                            \
-                                                                     \
-      }                                                              \
-                                                                     \
-    }                                                                \
-                                                                     \
-  }
+#define insert_large_chunk(M, X, S) {\
+  tbinptr* H;\
+  bindex_t I;\
+  compute_tree_index(S, I);\
+  H = treebin_at(M, I);\
+  X->index = I;\
+  X->child[0] = X->child[1] = 0;\
+  if (!treemap_is_marked(M, I)) {\
+    mark_treemap(M, I);\
+    *H = X;\
+    X->parent = (tchunkptr)H;\
+    X->fd = X->bk = X;\
+  }\
+  else {\
+    tchunkptr T = *H;\
+    size_t K = S << leftshift_for_tree_index(I);\
+    for (;;) {\
+      if (chunksize(T) != S) {\
+        tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
+        K <<= 1;\
+        if (*C != 0)\
+          T = *C;\
+        else if (RTCHECK(ok_address(M, C))) {\
+          *C = X;\
+          X->parent = T;\
+          X->fd = X->bk = X;\
+          break;\
+        }\
+        else {\
+          CORRUPTION_ERROR_ACTION(M);\
+          break;\
+        }\
+      }\
+      else {\
+        tchunkptr F = T->fd;\
+        if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\
+          T->fd = F->bk = X;\
+          X->fd = F;\
+          X->bk = T;\
+          X->parent = 0;\
+          break;\
+        }\
+        else {\
+          CORRUPTION_ERROR_ACTION(M);\
+          break;\
+        }\
+      }\
+    }\
+  }\
+}
 
 /*
   Unlink steps:
@@ -4060,149 +3764,104 @@ static void internal_malloc_stats(mstate m) {
      x's parent and children to x's replacement (or null if none).
 */
 
-#define unlink_large_chunk(M, X)                                   \
-  {                                                                \
-                                                                   \
-    tchunkptr XP = X->parent;                                      \
-    tchunkptr R;                                                   \
-    if (X->bk != X) {                                              \
-                                                                   \
-      tchunkptr F = X->fd;                                         \
-      R = X->bk;                                                   \
-      if (RTCHECK(ok_address(M, F) && F->bk == X && R->fd == X)) { \
-                                                                   \
-        F->bk = R;                                                 \
-        R->fd = F;                                                 \
-                                                                   \
-      } else {                                                     \
-                                                                   \
-        CORRUPTION_ERROR_ACTION(M);                                \
-                                                                   \
-      }                                                            \
-                                                                   \
-    } else {                                                       \
-                                                                   \
-      tchunkptr *RP;                                               \
-      if (((R = *(RP = &(X->child[1]))) != 0) ||                   \
-          ((R = *(RP = &(X->child[0]))) != 0)) {                   \
-                                                                   \
-        tchunkptr *CP;                                             \
-        while ((*(CP = &(R->child[1])) != 0) ||                    \
-               (*(CP = &(R->child[0])) != 0)) {                    \
-                                                                   \
-          R = *(RP = CP);                                          \
-                                                                   \
-        }                                                          \
-        if (RTCHECK(ok_address(M, RP)))                            \
-          *RP = 0;                                                 \
-        else {                                                     \
-                                                                   \
-          CORRUPTION_ERROR_ACTION(M);                              \
-                                                                   \
-        }                                                          \
-                                                                   \
-      }                                                            \
-                                                                   \
-    }                                                              \
-    if (XP != 0) {                                                 \
-                                                                   \
-      tbinptr *H = treebin_at(M, X->index);                        \
-      if (X == *H) {                                               \
-                                                                   \
-        if ((*H = R) == 0) clear_treemap(M, X->index);             \
-                                                                   \
-      } else if (RTCHECK(ok_address(M, XP))) {                     \
-                                                                   \
-        if (XP->child[0] == X)                                     \
-          XP->child[0] = R;                                        \
-        else                                                       \
-          XP->child[1] = R;                                        \
-                                                                   \
-      } else                                                       \
-                                                                   \
-                                                                   \
-        CORRUPTION_ERROR_ACTION(M);                                \
-      if (R != 0) {                                                \
-                                                                   \
-        if (RTCHECK(ok_address(M, R))) {                           \
-                                                                   \
-          tchunkptr C0, C1;                                        \
-          R->parent = XP;                                          \
-          if ((C0 = X->child[0]) != 0) {                           \
-                                                                   \
-            if (RTCHECK(ok_address(M, C0))) {                      \
-                                                                   \
-              R->child[0] = C0;                                    \
-              C0->parent = R;                                      \
-                                                                   \
-            } else                                                 \
-                                                                   \
-                                                                   \
-              CORRUPTION_ERROR_ACTION(M);                          \
-                                                                   \
-          }                                                        \
-          if ((C1 = X->child[1]) != 0) {                           \
-                                                                   \
-            if (RTCHECK(ok_address(M, C1))) {                      \
-                                                                   \
-              R->child[1] = C1;                                    \
-              C1->parent = R;                                      \
-                                                                   \
-            } else                                                 \
-                                                                   \
-                                                                   \
-              CORRUPTION_ERROR_ACTION(M);                          \
-                                                                   \
-          }                                                        \
-                                                                   \
-        } else                                                     \
-                                                                   \
-                                                                   \
-          CORRUPTION_ERROR_ACTION(M);                              \
-                                                                   \
-      }                                                            \
-                                                                   \
-    }                                                              \
-                                                                   \
-  }
+#define unlink_large_chunk(M, X) {\
+  tchunkptr XP = X->parent;\
+  tchunkptr R;\
+  if (X->bk != X) {\
+    tchunkptr F = X->fd;\
+    R = X->bk;\
+    if (RTCHECK(ok_address(M, F) && F->bk == X && R->fd == X)) {\
+      F->bk = R;\
+      R->fd = F;\
+    }\
+    else {\
+      CORRUPTION_ERROR_ACTION(M);\
+    }\
+  }\
+  else {\
+    tchunkptr* RP;\
+    if (((R = *(RP = &(X->child[1]))) != 0) ||\
+        ((R = *(RP = &(X->child[0]))) != 0)) {\
+      tchunkptr* CP;\
+      while ((*(CP = &(R->child[1])) != 0) ||\
+             (*(CP = &(R->child[0])) != 0)) {\
+        R = *(RP = CP);\
+      }\
+      if (RTCHECK(ok_address(M, RP)))\
+        *RP = 0;\
+      else {\
+        CORRUPTION_ERROR_ACTION(M);\
+      }\
+    }\
+  }\
+  if (XP != 0) {\
+    tbinptr* H = treebin_at(M, X->index);\
+    if (X == *H) {\
+      if ((*H = R) == 0) \
+        clear_treemap(M, X->index);\
+    }\
+    else if (RTCHECK(ok_address(M, XP))) {\
+      if (XP->child[0] == X) \
+        XP->child[0] = R;\
+      else \
+        XP->child[1] = R;\
+    }\
+    else\
+      CORRUPTION_ERROR_ACTION(M);\
+    if (R != 0) {\
+      if (RTCHECK(ok_address(M, R))) {\
+        tchunkptr C0, C1;\
+        R->parent = XP;\
+        if ((C0 = X->child[0]) != 0) {\
+          if (RTCHECK(ok_address(M, C0))) {\
+            R->child[0] = C0;\
+            C0->parent = R;\
+          }\
+          else\
+            CORRUPTION_ERROR_ACTION(M);\
+        }\
+        if ((C1 = X->child[1]) != 0) {\
+          if (RTCHECK(ok_address(M, C1))) {\
+            R->child[1] = C1;\
+            C1->parent = R;\
+          }\
+          else\
+            CORRUPTION_ERROR_ACTION(M);\
+        }\
+      }\
+      else\
+        CORRUPTION_ERROR_ACTION(M);\
+    }\
+  }\
+}
 
 /* Relays to large vs small bin operations */
 
-#define insert_chunk(M, P, S)                         \
-  if (is_small(S)) insert_small_chunk(M, P, S) else { \
-                                                      \
-      tchunkptr TP = (tchunkptr)(P);                  \
-      insert_large_chunk(M, TP, S);                   \
-                                                      \
-    }
+#define insert_chunk(M, P, S)\
+  if (is_small(S)) insert_small_chunk(M, P, S)\
+  else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
+
+#define unlink_chunk(M, P, S)\
+  if (is_small(S)) unlink_small_chunk(M, P, S)\
+  else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
 
-#define unlink_chunk(M, P, S)                         \
-  if (is_small(S)) unlink_small_chunk(M, P, S) else { \
-                                                      \
-      tchunkptr TP = (tchunkptr)(P);                  \
-      unlink_large_chunk(M, TP);                      \
-                                                      \
-    }
 
 /* Relays to internal calls to malloc/free from realloc, memalign etc */
 
 #if ONLY_MSPACES
-  #define internal_malloc(m, b) mspace_malloc(m, b)
-  #define internal_free(m, mem) mspace_free(m, mem);
-#else                                                       /* ONLY_MSPACES */
-  #if MSPACES
-    #define internal_malloc(m, b) \
-      ((m == gm) ? dlmalloc(b) : mspace_malloc(m, b))
-    #define internal_free(m, mem) \
-      if (m == gm)                \
-        dlfree(mem);              \
-      else                        \
-        mspace_free(m, mem);
-  #else                                                          /* MSPACES */
-    #define internal_malloc(m, b) dlmalloc(b)
-    #define internal_free(m, mem) dlfree(mem)
-  #endif                                                         /* MSPACES */
-#endif                                                      /* ONLY_MSPACES */
+#define internal_malloc(m, b) mspace_malloc(m, b)
+#define internal_free(m, mem) mspace_free(m,mem);
+#else /* ONLY_MSPACES */
+#if MSPACES
+#define internal_malloc(m, b)\
+  ((m == gm)? dlmalloc(b) : mspace_malloc(m, b))
+#define internal_free(m, mem)\
+   if (m == gm) dlfree(mem); else mspace_free(m,mem);
+#else /* MSPACES */
+#define internal_malloc(m, b) dlmalloc(b)
+#define internal_free(m, mem) dlfree(mem)
+#endif /* MSPACES */
+#endif /* ONLY_MSPACES */
 
 /* -----------------------  Direct-mmapping chunks ----------------------- */
 
@@ -4215,93 +3874,80 @@ static void internal_malloc_stats(mstate m) {
 */
 
 /* Malloc using mmap */
-static void *mmap_alloc(mstate m, size_t nb) {
-
+static void* mmap_alloc(mstate m, size_t nb) {
   size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
   if (m->footprint_limit != 0) {
-
     size_t fp = m->footprint + mmsize;
-    if (fp <= m->footprint || fp > m->footprint_limit) return 0;
-
+    if (fp <= m->footprint || fp > m->footprint_limit)
+      return 0;
   }
-
-  if (mmsize > nb) {                             /* Check for wrap around 0 */
-    char *mm = (char *)(CALL_DIRECT_MMAP(mmsize));
+  if (mmsize > nb) {     /* Check for wrap around 0 */
+    char* mm = (char*)(CALL_DIRECT_MMAP(mmsize));
     if (mm != CMFAIL) {
-
-      size_t    offset = align_offset(chunk2mem(mm));
-      size_t    psize = mmsize - offset - MMAP_FOOT_PAD;
+      size_t offset = align_offset(chunk2mem(mm));
+      size_t psize = mmsize - offset - MMAP_FOOT_PAD;
       mchunkptr p = (mchunkptr)(mm + offset);
       p->prev_foot = offset;
       p->head = psize;
       mark_inuse_foot(m, p, psize);
       chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
-      chunk_plus_offset(p, psize + SIZE_T_SIZE)->head = 0;
+      chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0;
 
-      if (m->least_addr == 0 || mm < m->least_addr) m->least_addr = mm;
+      if (m->least_addr == 0 || mm < m->least_addr)
+        m->least_addr = mm;
       if ((m->footprint += mmsize) > m->max_footprint)
         m->max_footprint = m->footprint;
       assert(is_aligned(chunk2mem(p)));
       check_mmapped_chunk(m, p);
       return chunk2mem(p);
-
     }
-
   }
-
   return 0;
-
 }
 
 /* Realloc using mmap */
 static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb, int flags) {
-
   size_t oldsize = chunksize(oldp);
-  (void)flags;                         /* placate people compiling -Wunused */
-  if (is_small(nb))           /* Can't shrink mmap regions below small size */
+  (void)flags; /* placate people compiling -Wunused */
+  if (is_small(nb)) /* Can't shrink mmap regions below small size */
     return 0;
   /* Keep old chunk if big enough but not too big */
   if (oldsize >= nb + SIZE_T_SIZE &&
       (oldsize - nb) <= (mparams.granularity << 1))
     return oldp;
   else {
-
     size_t offset = oldp->prev_foot;
     size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
     size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
-    char * cp =
-        (char *)CALL_MREMAP((char *)oldp - offset, oldmmsize, newmmsize, flags);
+    char* cp = (char*)CALL_MREMAP((char*)oldp - offset,
+                                  oldmmsize, newmmsize, flags);
     if (cp != CMFAIL) {
-
       mchunkptr newp = (mchunkptr)(cp + offset);
-      size_t    psize = newmmsize - offset - MMAP_FOOT_PAD;
+      size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
       newp->head = psize;
       mark_inuse_foot(m, newp, psize);
       chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
-      chunk_plus_offset(newp, psize + SIZE_T_SIZE)->head = 0;
+      chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
 
-      if (cp < m->least_addr) m->least_addr = cp;
+      if (cp < m->least_addr)
+        m->least_addr = cp;
       if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint)
         m->max_footprint = m->footprint;
       check_mmapped_chunk(m, newp);
       return newp;
-
     }
-
   }
-
   return 0;
-
 }
 
+
 /* -------------------------- mspace management -------------------------- */
 
 /* Initialize top chunk and its size */
 static void init_top(mstate m, mchunkptr p, size_t psize) {
-
   /* Ensure alignment */
   size_t offset = align_offset(chunk2mem(p));
-  p = (mchunkptr)((char *)p + offset);
+  p = (mchunkptr)((char*)p + offset);
   psize -= offset;
 
   m->top = p;
@@ -4309,29 +3955,23 @@ static void init_top(mstate m, mchunkptr p, size_t psize) {
   p->head = psize | PINUSE_BIT;
   /* set size of fake trailing chunk holding overhead space only once */
   chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
-  m->trim_check = mparams.trim_threshold;           /* reset on each update */
-
+  m->trim_check = mparams.trim_threshold; /* reset on each update */
 }
 
 /* Initialize bins for a new mstate that is otherwise zeroed out */
 static void init_bins(mstate m) {
-
   /* Establish circular links for smallbins */
   bindex_t i;
   for (i = 0; i < NSMALLBINS; ++i) {
-
-    sbinptr bin = smallbin_at(m, i);
+    sbinptr bin = smallbin_at(m,i);
     bin->fd = bin->bk = bin;
-
   }
-
 }
 
 #if PROCEED_ON_ERROR
 
 /* default corruption action */
 static void reset_on_error(mstate m) {
-
   int i;
   ++malloc_corruption_error_count;
   /* Reinitialize fields to forget about all memory */
@@ -4344,78 +3984,67 @@ static void reset_on_error(mstate m) {
   for (i = 0; i < NTREEBINS; ++i)
     *treebin_at(m, i) = 0;
   init_bins(m);
-
 }
-
-#endif                                                  /* PROCEED_ON_ERROR */
+#endif /* PROCEED_ON_ERROR */
 
 /* Allocate chunk and prepend remainder with chunk in successor base. */
-static void *prepend_alloc(mstate m, char *newbase, char *oldbase, size_t nb) {
-
+static void* prepend_alloc(mstate m, char* newbase, char* oldbase,
+                           size_t nb) {
   mchunkptr p = align_as_chunk(newbase);
   mchunkptr oldfirst = align_as_chunk(oldbase);
-  size_t    psize = (char *)oldfirst - (char *)p;
+  size_t psize = (char*)oldfirst - (char*)p;
   mchunkptr q = chunk_plus_offset(p, nb);
-  size_t    qsize = psize - nb;
+  size_t qsize = psize - nb;
   set_size_and_pinuse_of_inuse_chunk(m, p, nb);
 
-  assert((char *)oldfirst > (char *)q);
+  assert((char*)oldfirst > (char*)q);
   assert(pinuse(oldfirst));
   assert(qsize >= MIN_CHUNK_SIZE);
 
   /* consolidate remainder with first chunk of old base */
   if (oldfirst == m->top) {
-
     size_t tsize = m->topsize += qsize;
     m->top = q;
     q->head = tsize | PINUSE_BIT;
     check_top_chunk(m, q);
-
-  } else if (oldfirst == m->dv) {
-
+  }
+  else if (oldfirst == m->dv) {
     size_t dsize = m->dvsize += qsize;
     m->dv = q;
     set_size_and_pinuse_of_free_chunk(q, dsize);
-
-  } else {
-
+  }
+  else {
     if (!is_inuse(oldfirst)) {
-
       size_t nsize = chunksize(oldfirst);
       unlink_chunk(m, oldfirst, nsize);
       oldfirst = chunk_plus_offset(oldfirst, nsize);
       qsize += nsize;
-
     }
-
     set_free_with_pinuse(q, qsize, oldfirst);
     insert_chunk(m, q, qsize);
     check_free_chunk(m, q);
-
   }
 
   check_malloced_chunk(m, chunk2mem(p), nb);
   return chunk2mem(p);
-
 }
 
 /* Add a segment to hold a new noncontiguous region */
-static void add_segment(mstate m, char *tbase, size_t tsize, flag_t mmapped) {
-
+static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) {
   /* Determine locations and sizes of segment, fenceposts, old top */
-  char *      old_top = (char *)m->top;
+  char* old_top = (char*)m->top;
   msegmentptr oldsp = segment_holding(m, old_top);
-  char *      old_end = oldsp->base + oldsp->size;
-  size_t      ssize = pad_request(sizeof(struct malloc_segment));
-  char *      rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
-  size_t      offset = align_offset(chunk2mem(rawsp));
-  char *      asp = rawsp + offset;
-  char *      csp = (asp < (old_top + MIN_CHUNK_SIZE)) ? old_top : asp;
-  mchunkptr   sp = (mchunkptr)csp;
+  char* old_end = oldsp->base + oldsp->size;
+  size_t ssize = pad_request(sizeof(struct malloc_segment));
+  char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
+  size_t offset = align_offset(chunk2mem(rawsp));
+  char* asp = rawsp + offset;
+  char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp;
+  mchunkptr sp = (mchunkptr)csp;
   msegmentptr ss = (msegmentptr)(chunk2mem(sp));
-  mchunkptr   tnext = chunk_plus_offset(sp, ssize);
-  mchunkptr   p = tnext;
-  int         nfences = 0;
+  mchunkptr tnext = chunk_plus_offset(sp, ssize);
+  mchunkptr p = tnext;
+  int nfences = 0;
 
   /* reset top to new space */
   init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
@@ -4423,7 +4052,7 @@ static void add_segment(mstate m, char *tbase, size_t tsize, flag_t mmapped) {
   /* Set up segment record */
   assert(is_aligned(ss));
   set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
-  *ss = m->seg;                                      /* Push current record */
+  *ss = m->seg; /* Push current record */
   m->seg.base = tbase;
   m->seg.size = tsize;
   m->seg.sflags = mmapped;
@@ -4431,61 +4060,53 @@ static void add_segment(mstate m, char *tbase, size_t tsize, flag_t mmapped) {
 
   /* Insert trailing fenceposts */
   for (;;) {
-
     mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
     p->head = FENCEPOST_HEAD;
     ++nfences;
-    if ((char *)(&(nextp->head)) < old_end)
+    if ((char*)(&(nextp->head)) < old_end)
       p = nextp;
     else
       break;
-
   }
-
   assert(nfences >= 2);
 
   /* Insert the rest of old top into a bin as an ordinary free chunk */
   if (csp != old_top) {
-
     mchunkptr q = (mchunkptr)old_top;
-    size_t    psize = csp - old_top;
+    size_t psize = csp - old_top;
     mchunkptr tn = chunk_plus_offset(q, psize);
     set_free_with_pinuse(q, psize, tn);
     insert_chunk(m, q, psize);
-
   }
 
   check_top_chunk(m, m->top);
-
 }
 
 /* -------------------------- System allocation -------------------------- */
 
 /* Get memory from system using MORECORE or MMAP */
-static void *sys_alloc(mstate m, size_t nb) {
-
-  char * tbase = CMFAIL;
+static void* sys_alloc(mstate m, size_t nb) {
+  char* tbase = CMFAIL;
   size_t tsize = 0;
   flag_t mmap_flag = 0;
-  size_t asize;                                          /* allocation size */
+  size_t asize; /* allocation size */
 
   ensure_initialization();
 
   /* Directly map large chunks, but only if already initialized */
   if (use_mmap(m) && nb >= mparams.mmap_threshold && m->topsize != 0) {
-
-    void *mem = mmap_alloc(m, nb);
-    if (mem != 0) return mem;
-
+    void* mem = mmap_alloc(m, nb);
+    if (mem != 0)
+      return mem;
   }
 
   asize = granularity_align(nb + SYS_ALLOC_PADDING);
-  if (asize <= nb) return 0;                                  /* wraparound */
+  if (asize <= nb)
+    return 0; /* wraparound */
   if (m->footprint_limit != 0) {
-
     size_t fp = m->footprint + asize;
-    if (fp <= m->footprint || fp > m->footprint_limit) return 0;
-
+    if (fp <= m->footprint || fp > m->footprint_limit)
+      return 0;
   }
 
   /*
@@ -4511,119 +4132,91 @@ static void *sys_alloc(mstate m, size_t nb) {
   */
 
   if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) {
-
-    char *      br = CMFAIL;
-    size_t      ssize = asize;                            /* sbrk call size */
-    msegmentptr ss = (m->top == 0) ? 0 : segment_holding(m, (char *)m->top);
+    char* br = CMFAIL;
+    size_t ssize = asize; /* sbrk call size */
+    msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (char*)m->top);
     ACQUIRE_MALLOC_GLOBAL_LOCK();
 
-    if (ss == 0) {                        /* First time through or recovery */
-      char *base = (char *)CALL_MORECORE(0);
+    if (ss == 0) {  /* First time through or recovery */
+      char* base = (char*)CALL_MORECORE(0);
       if (base != CMFAIL) {
-
         size_t fp;
         /* Adjust to end on a page boundary */
         if (!is_page_aligned(base))
           ssize += (page_align((size_t)base) - (size_t)base);
-        fp = m->footprint + ssize;                        /* recheck limits */
+        fp = m->footprint + ssize; /* recheck limits */
         if (ssize > nb && ssize < HALF_MAX_SIZE_T &&
             (m->footprint_limit == 0 ||
              (fp > m->footprint && fp <= m->footprint_limit)) &&
-            (br = (char *)(CALL_MORECORE(ssize))) == base) {
-
+            (br = (char*)(CALL_MORECORE(ssize))) == base) {
           tbase = base;
           tsize = ssize;
-
         }
-
       }
-
-    } else {
-
+    }
+    else {
       /* Subtract out existing available top space from MORECORE request. */
       ssize = granularity_align(nb - m->topsize + SYS_ALLOC_PADDING);
       /* Use mem here only if it did continuously extend old space */
       if (ssize < HALF_MAX_SIZE_T &&
-          (br = (char *)(CALL_MORECORE(ssize))) == ss->base + ss->size) {
-
+          (br = (char*)(CALL_MORECORE(ssize))) == ss->base+ss->size) {
         tbase = br;
         tsize = ssize;
-
       }
-
     }
 
-    if (tbase == CMFAIL) {                     /* Cope with partial failure */
-      if (br != CMFAIL) {         /* Try to use/extend the space we did get */
-        if (ssize < HALF_MAX_SIZE_T && ssize < nb + SYS_ALLOC_PADDING) {
-
+    if (tbase == CMFAIL) {    /* Cope with partial failure */
+      if (br != CMFAIL) {    /* Try to use/extend the space we did get */
+        if (ssize < HALF_MAX_SIZE_T &&
+            ssize < nb + SYS_ALLOC_PADDING) {
           size_t esize = granularity_align(nb + SYS_ALLOC_PADDING - ssize);
           if (esize < HALF_MAX_SIZE_T) {
-
-            char *end = (char *)CALL_MORECORE(esize);
+            char* end = (char*)CALL_MORECORE(esize);
             if (end != CMFAIL)
               ssize += esize;
-            else {                             /* Can't use; try to release */
-              (void)CALL_MORECORE(-ssize);
+            else {            /* Can't use; try to release */
+              (void) CALL_MORECORE(-ssize);
               br = CMFAIL;
-
             }
-
           }
-
         }
-
       }
-
-      if (br != CMFAIL) {                       /* Use the space we did get */
+      if (br != CMFAIL) {    /* Use the space we did get */
         tbase = br;
         tsize = ssize;
-
-      } else
-
-        disable_contiguous(m);   /* Don't try contiguous path in the future */
-
+      }
+      else
+        disable_contiguous(m); /* Don't try contiguous path in the future */
     }
 
     RELEASE_MALLOC_GLOBAL_LOCK();
-
   }
 
-  if (HAVE_MMAP && tbase == CMFAIL) {                           /* Try MMAP */
-    char *mp = (char *)(CALL_MMAP(asize));
+  if (HAVE_MMAP && tbase == CMFAIL) {  /* Try MMAP */
+    char* mp = (char*)(CALL_MMAP(asize));
     if (mp != CMFAIL) {
-
       tbase = mp;
       tsize = asize;
       mmap_flag = USE_MMAP_BIT;
-
     }
-
   }
 
-  if (HAVE_MORECORE && tbase == CMFAIL) {     /* Try noncontiguous MORECORE */
+  if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */
     if (asize < HALF_MAX_SIZE_T) {
-
-      char *br = CMFAIL;
-      char *end = CMFAIL;
+      char* br = CMFAIL;
+      char* end = CMFAIL;
       ACQUIRE_MALLOC_GLOBAL_LOCK();
-      br = (char *)(CALL_MORECORE(asize));
-      end = (char *)(CALL_MORECORE(0));
+      br = (char*)(CALL_MORECORE(asize));
+      end = (char*)(CALL_MORECORE(0));
       RELEASE_MALLOC_GLOBAL_LOCK();
       if (br != CMFAIL && end != CMFAIL && br < end) {
-
         size_t ssize = end - br;
         if (ssize > nb + TOP_FOOT_SIZE) {
-
           tbase = br;
           tsize = ssize;
-
         }
-
       }
-
     }
-
   }
 
   if (tbase != CMFAIL) {
@@ -4631,8 +4224,9 @@ static void *sys_alloc(mstate m, size_t nb) {
     if ((m->footprint += tsize) > m->max_footprint)
       m->max_footprint = m->footprint;
 
-    if (!is_initialized(m)) {                  /* first-time initialization */
-      if (m->least_addr == 0 || tbase < m->least_addr) m->least_addr = tbase;
+    if (!is_initialized(m)) { /* first-time initialization */
+      if (m->least_addr == 0 || tbase < m->least_addr)
+        m->least_addr = tbase;
       m->seg.base = tbase;
       m->seg.size = tsize;
       m->seg.sflags = mmap_flag;
@@ -4645,52 +4239,46 @@ static void *sys_alloc(mstate m, size_t nb) {
       else
 #endif
       {
-
         /* Offset top by embedded malloc_state */
         mchunkptr mn = next_chunk(mem2chunk(m));
-        init_top(m, mn, (size_t)((tbase + tsize) - (char *)mn) - TOP_FOOT_SIZE);
-
+        init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE);
       }
-
     }
 
     else {
-
       /* Try to merge with an existing segment */
       msegmentptr sp = &m->seg;
       /* Only consider most recent segment if traversal suppressed */
       while (sp != 0 && tbase != sp->base + sp->size)
         sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;
-      if (sp != 0 && !is_extern_segment(sp) &&
+      if (sp != 0 &&
+          !is_extern_segment(sp) &&
           (sp->sflags & USE_MMAP_BIT) == mmap_flag &&
-          segment_holds(sp, m->top)) {                            /* append */
+          segment_holds(sp, m->top)) { /* append */
         sp->size += tsize;
         init_top(m, m->top, m->topsize + tsize);
-
-      } else {
-
-        if (tbase < m->least_addr) m->least_addr = tbase;
+      }
+      else {
+        if (tbase < m->least_addr)
+          m->least_addr = tbase;
         sp = &m->seg;
         while (sp != 0 && sp->base != tbase + tsize)
           sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;
-        if (sp != 0 && !is_extern_segment(sp) &&
+        if (sp != 0 &&
+            !is_extern_segment(sp) &&
             (sp->sflags & USE_MMAP_BIT) == mmap_flag) {
-
-          char *oldbase = sp->base;
+          char* oldbase = sp->base;
           sp->base = tbase;
           sp->size += tsize;
           return prepend_alloc(m, tbase, oldbase, nb);
-
-        } else
-
+        }
+        else
           add_segment(m, tbase, tsize, mmap_flag);
-
       }
-
     }
 
-    if (nb < m->topsize) {       /* Allocate from new or extended top space */
-      size_t    rsize = m->topsize -= nb;
+    if (nb < m->topsize) { /* Allocate from new or extended top space */
+      size_t rsize = m->topsize -= nb;
       mchunkptr p = m->top;
       mchunkptr r = m->top = chunk_plus_offset(p, nb);
       r->head = rsize | PINUSE_BIT;
@@ -4698,421 +4286,313 @@ static void *sys_alloc(mstate m, size_t nb) {
       check_top_chunk(m, m->top);
       check_malloced_chunk(m, chunk2mem(p), nb);
       return chunk2mem(p);
-
     }
-
   }
 
   MALLOC_FAILURE_ACTION;
   return 0;
-
 }
 
 /* -----------------------  system deallocation -------------------------- */
 
 /* Unmap and unlink any mmapped segments that don't contain used chunks */
 static size_t release_unused_segments(mstate m) {
-
-  size_t      released = 0;
-  int         nsegs = 0;
+  size_t released = 0;
+  int nsegs = 0;
   msegmentptr pred = &m->seg;
   msegmentptr sp = pred->next;
   while (sp != 0) {
-
-    char *      base = sp->base;
-    size_t      size = sp->size;
+    char* base = sp->base;
+    size_t size = sp->size;
     msegmentptr next = sp->next;
     ++nsegs;
     if (is_mmapped_segment(sp) && !is_extern_segment(sp)) {
-
       mchunkptr p = align_as_chunk(base);
-      size_t    psize = chunksize(p);
+      size_t psize = chunksize(p);
       /* Can unmap if first chunk holds entire segment and not pinned */
-      if (!is_inuse(p) && (char *)p + psize >= base + size - TOP_FOOT_SIZE) {
-
+      if (!is_inuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) {
         tchunkptr tp = (tchunkptr)p;
-        assert(segment_holds(sp, (char *)sp));
+        assert(segment_holds(sp, (char*)sp));
         if (p == m->dv) {
-
           m->dv = 0;
           m->dvsize = 0;
-
-        } else {
-
+        }
+        else {
           unlink_large_chunk(m, tp);
-
         }
-
         if (CALL_MUNMAP(base, size) == 0) {
-
           released += size;
           m->footprint -= size;
           /* unlink obsoleted record */
           sp = pred;
           sp->next = next;
-
-        } else {                                /* back out if cannot unmap */
-
+        }
+        else { /* back out if cannot unmap */
           insert_large_chunk(m, tp, psize);
-
         }
-
       }
-
     }
-
-    if (NO_SEGMENT_TRAVERSAL)                    /* scan only first segment */
+    if (NO_SEGMENT_TRAVERSAL) /* scan only first segment */
       break;
     pred = sp;
     sp = next;
-
   }
-
   /* Reset check counter */
-  m->release_checks = (((size_t)nsegs > (size_t)MAX_RELEASE_CHECK_RATE)
-                           ? (size_t)nsegs
-                           : (size_t)MAX_RELEASE_CHECK_RATE);
+  m->release_checks = (((size_t) nsegs > (size_t) MAX_RELEASE_CHECK_RATE)?
+                       (size_t) nsegs : (size_t) MAX_RELEASE_CHECK_RATE);
   return released;
-
 }
 
 static int sys_trim(mstate m, size_t pad) {
-
   size_t released = 0;
   ensure_initialization();
   if (pad < MAX_REQUEST && is_initialized(m)) {
-
-    pad += TOP_FOOT_SIZE;        /* ensure enough room for segment overhead */
+    pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */
 
     if (m->topsize > pad) {
-
       /* Shrink top space in granularity-size units, keeping at least one */
       size_t unit = mparams.granularity;
-      size_t extra =
-          ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit - SIZE_T_ONE) * unit;
-      msegmentptr sp = segment_holding(m, (char *)m->top);
+      size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
+                      SIZE_T_ONE) * unit;
+      msegmentptr sp = segment_holding(m, (char*)m->top);
 
       if (!is_extern_segment(sp)) {
-
         if (is_mmapped_segment(sp)) {
-
-          if (HAVE_MMAP && sp->size >= extra &&
-              !has_segment_link(m, sp)) {         /* can't shrink if pinned */
+          if (HAVE_MMAP &&
+              sp->size >= extra &&
+              !has_segment_link(m, sp)) { /* can't shrink if pinned */
             size_t newsize = sp->size - extra;
-            (void)newsize;    /* placate people compiling -Wunused-variable */
+            (void)newsize; /* placate people compiling -Wunused-variable */
             /* Prefer mremap, fall back to munmap */
             if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) ||
                 (CALL_MUNMAP(sp->base + newsize, extra) == 0)) {
-
               released = extra;
-
             }
-
           }
-
-        } else if (HAVE_MORECORE) {
-
-          if (extra >= HALF_MAX_SIZE_T)          /* Avoid wrapping negative */
+        }
+        else if (HAVE_MORECORE) {
+          if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */
             extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit;
           ACQUIRE_MALLOC_GLOBAL_LOCK();
           {
-
             /* Make sure end of memory is where we last set it. */
-            char *old_br = (char *)(CALL_MORECORE(0));
+            char* old_br = (char*)(CALL_MORECORE(0));
             if (old_br == sp->base + sp->size) {
-
-              char *rel_br = (char *)(CALL_MORECORE(-extra));
-              char *new_br = (char *)(CALL_MORECORE(0));
+              char* rel_br = (char*)(CALL_MORECORE(-extra));
+              char* new_br = (char*)(CALL_MORECORE(0));
               if (rel_br != CMFAIL && new_br < old_br)
                 released = old_br - new_br;
-
             }
-
           }
-
           RELEASE_MALLOC_GLOBAL_LOCK();
-
         }
-
       }
 
       if (released != 0) {
-
         sp->size -= released;
         m->footprint -= released;
         init_top(m, m->top, m->topsize - released);
         check_top_chunk(m, m->top);
-
       }
-
     }
 
     /* Unmap any unused mmapped segments */
-    if (HAVE_MMAP) released += release_unused_segments(m);
+    if (HAVE_MMAP)
+      released += release_unused_segments(m);
 
     /* On failure, disable autotrim to avoid repeated failed future calls */
-    if (released == 0 && m->topsize > m->trim_check) m->trim_check = MAX_SIZE_T;
-
+    if (released == 0 && m->topsize > m->trim_check)
+      m->trim_check = MAX_SIZE_T;
   }
 
-  return (released != 0) ? 1 : 0;
-
+  return (released != 0)? 1 : 0;
 }
 
 /* Consolidate and bin a chunk. Differs from exported versions
    of free mainly in that the chunk need not be marked as inuse.
 */
 static void dispose_chunk(mstate m, mchunkptr p, size_t psize) {
-
   mchunkptr next = chunk_plus_offset(p, psize);
   if (!pinuse(p)) {
-
     mchunkptr prev;
-    size_t    prevsize = p->prev_foot;
+    size_t prevsize = p->prev_foot;
     if (is_mmapped(p)) {
-
       psize += prevsize + MMAP_FOOT_PAD;
-      if (CALL_MUNMAP((char *)p - prevsize, psize) == 0) m->footprint -= psize;
+      if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
+        m->footprint -= psize;
       return;
-
     }
-
     prev = chunk_minus_offset(p, prevsize);
     psize += prevsize;
     p = prev;
-    if (RTCHECK(ok_address(m, prev))) {             /* consolidate backward */
+    if (RTCHECK(ok_address(m, prev))) { /* consolidate backward */
       if (p != m->dv) {
-
         unlink_chunk(m, p, prevsize);
-
-      } else if ((next->head & INUSE_BITS) == INUSE_BITS) {
-
+      }
+      else if ((next->head & INUSE_BITS) == INUSE_BITS) {
         m->dvsize = psize;
         set_free_with_pinuse(p, psize, next);
         return;
-
       }
-
-    } else {
-
+    }
+    else {
       CORRUPTION_ERROR_ACTION(m);
       return;
-
     }
-
   }
-
   if (RTCHECK(ok_address(m, next))) {
-
-    if (!cinuse(next)) {                             /* consolidate forward */
+    if (!cinuse(next)) {  /* consolidate forward */
       if (next == m->top) {
-
         size_t tsize = m->topsize += psize;
         m->top = p;
         p->head = tsize | PINUSE_BIT;
         if (p == m->dv) {
-
           m->dv = 0;
           m->dvsize = 0;
-
         }
-
         return;
-
-      } else if (next == m->dv) {
-
+      }
+      else if (next == m->dv) {
         size_t dsize = m->dvsize += psize;
         m->dv = p;
         set_size_and_pinuse_of_free_chunk(p, dsize);
         return;
-
-      } else {
-
+      }
+      else {
         size_t nsize = chunksize(next);
         psize += nsize;
         unlink_chunk(m, next, nsize);
         set_size_and_pinuse_of_free_chunk(p, psize);
         if (p == m->dv) {
-
           m->dvsize = psize;
           return;
-
         }
-
       }
-
-    } else {
-
+    }
+    else {
       set_free_with_pinuse(p, psize, next);
-
     }
-
     insert_chunk(m, p, psize);
-
-  } else {
-
+  }
+  else {
     CORRUPTION_ERROR_ACTION(m);
-
   }
-
 }
 
 /* ---------------------------- malloc --------------------------- */
 
 /* allocate a large request from the best fitting chunk in a treebin */
-static void *tmalloc_large(mstate m, size_t nb) {
-
+static void* tmalloc_large(mstate m, size_t nb) {
   tchunkptr v = 0;
-  size_t    rsize = -nb;                               /* Unsigned negation */
+  size_t rsize = -nb; /* Unsigned negation */
   tchunkptr t;
-  bindex_t  idx;
+  bindex_t idx;
   compute_tree_index(nb, idx);
   if ((t = *treebin_at(m, idx)) != 0) {
-
     /* Traverse tree for this bin looking for node with size == nb */
-    size_t    sizebits = nb << leftshift_for_tree_index(idx);
-    tchunkptr rst = 0;                 /* The deepest untaken right subtree */
+    size_t sizebits = nb << leftshift_for_tree_index(idx);
+    tchunkptr rst = 0;  /* The deepest untaken right subtree */
     for (;;) {
-
       tchunkptr rt;
-      size_t    trem = chunksize(t) - nb;
+      size_t trem = chunksize(t) - nb;
       if (trem < rsize) {
-
         v = t;
-        if ((rsize = trem) == 0) break;
-
+        if ((rsize = trem) == 0)
+          break;
       }
-
       rt = t->child[1];
-      t = t->child[(sizebits >> (SIZE_T_BITSIZE - SIZE_T_ONE)) & 1];
-      if (rt != 0 && rt != t) rst = rt;
+      t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
+      if (rt != 0 && rt != t)
+        rst = rt;
       if (t == 0) {
-
-        t = rst;               /* set t to least subtree holding sizes > nb */
+        t = rst; /* set t to least subtree holding sizes > nb */
         break;
-
       }
-
       sizebits <<= 1;
-
     }
-
   }
-
-  if (t == 0 && v == 0) {        /* set t to root of next non-empty treebin */
+  if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */
     binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
     if (leftbits != 0) {
-
       bindex_t i;
       binmap_t leastbit = least_bit(leftbits);
       compute_bit2idx(leastbit, i);
       t = *treebin_at(m, i);
-
     }
-
   }
 
-  while (t != 0) {                      /* find smallest of tree or subtree */
+  while (t != 0) { /* find smallest of tree or subtree */
     size_t trem = chunksize(t) - nb;
     if (trem < rsize) {
-
       rsize = trem;
       v = t;
-
     }
-
     t = leftmost_child(t);
-
   }
 
   /*  If dv is a better fit, return 0 so malloc will use it */
   if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {
-
-    if (RTCHECK(ok_address(m, v))) {                               /* split */
+    if (RTCHECK(ok_address(m, v))) { /* split */
       mchunkptr r = chunk_plus_offset(v, nb);
       assert(chunksize(v) == rsize + nb);
       if (RTCHECK(ok_next(v, r))) {
-
         unlink_large_chunk(m, v);
         if (rsize < MIN_CHUNK_SIZE)
           set_inuse_and_pinuse(m, v, (rsize + nb));
         else {
-
           set_size_and_pinuse_of_inuse_chunk(m, v, nb);
           set_size_and_pinuse_of_free_chunk(r, rsize);
           insert_chunk(m, r, rsize);
-
         }
-
         return chunk2mem(v);
-
       }
-
     }
-
     CORRUPTION_ERROR_ACTION(m);
-
   }
-
   return 0;
-
 }
 
 /* allocate a small request from the best fitting chunk in a treebin */
-static void *tmalloc_small(mstate m, size_t nb) {
-
+static void* tmalloc_small(mstate m, size_t nb) {
   tchunkptr t, v;
-  size_t    rsize;
-  bindex_t  i;
-  binmap_t  leastbit = least_bit(m->treemap);
+  size_t rsize;
+  bindex_t i;
+  binmap_t leastbit = least_bit(m->treemap);
   compute_bit2idx(leastbit, i);
   v = t = *treebin_at(m, i);
   rsize = chunksize(t) - nb;
 
   while ((t = leftmost_child(t)) != 0) {
-
     size_t trem = chunksize(t) - nb;
     if (trem < rsize) {
-
       rsize = trem;
       v = t;
-
     }
-
   }
 
   if (RTCHECK(ok_address(m, v))) {
-
     mchunkptr r = chunk_plus_offset(v, nb);
     assert(chunksize(v) == rsize + nb);
     if (RTCHECK(ok_next(v, r))) {
-
       unlink_large_chunk(m, v);
       if (rsize < MIN_CHUNK_SIZE)
         set_inuse_and_pinuse(m, v, (rsize + nb));
       else {
-
         set_size_and_pinuse_of_inuse_chunk(m, v, nb);
         set_size_and_pinuse_of_free_chunk(r, rsize);
         replace_dv(m, r, rsize);
-
       }
-
       return chunk2mem(v);
-
     }
-
   }
 
   CORRUPTION_ERROR_ACTION(m);
   return 0;
-
 }
 
 #if !ONLY_MSPACES
 
-void *dlmalloc(size_t bytes) {
-
+void* dlmalloc(size_t bytes) {
   /*
      Basic algorithm:
      If a small request (< 256 bytes minus per-chunk overhead):
@@ -5136,25 +4616,23 @@ void *dlmalloc(size_t bytes) {
      The ugly goto's here ensure that postaction occurs along all paths.
   */
 
-  #if USE_LOCKS
-  ensure_initialization();    /* initialize in sys_alloc if not using locks */
-  #endif
+#if USE_LOCKS
+  ensure_initialization(); /* initialize in sys_alloc if not using locks */
+#endif
 
   if (!PREACTION(gm)) {
-
-    void * mem;
+    void* mem;
     size_t nb;
     if (bytes <= MAX_SMALL_REQUEST) {
-
       bindex_t idx;
       binmap_t smallbits;
-      nb = (bytes < MIN_REQUEST) ? MIN_CHUNK_SIZE : pad_request(bytes);
+      nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
       idx = small_index(nb);
       smallbits = gm->smallmap >> idx;
 
-      if ((smallbits & 0x3U) != 0) {    /* Remainderless fit to a smallbin. */
+      if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
         mchunkptr b, p;
-        idx += ~smallbits & 1;                /* Uses next bin if idx empty */
+        idx += ~smallbits & 1;       /* Uses next bin if idx empty */
         b = smallbin_at(gm, idx);
         p = b->fd;
         assert(chunksize(p) == small_index2size(idx));
@@ -5163,17 +4641,15 @@ void *dlmalloc(size_t bytes) {
         mem = chunk2mem(p);
         check_malloced_chunk(gm, mem, nb);
         goto postaction;
-
       }
 
       else if (nb > gm->dvsize) {
-
-        if (smallbits != 0) {        /* Use chunk in next nonempty smallbin */
+        if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
           mchunkptr b, p, r;
-          size_t    rsize;
-          bindex_t  i;
-          binmap_t  leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
-          binmap_t  leastbit = least_bit(leftbits);
+          size_t rsize;
+          bindex_t i;
+          binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
+          binmap_t leastbit = least_bit(leftbits);
           compute_bit2idx(leastbit, i);
           b = smallbin_at(gm, i);
           p = b->fd;
@@ -5184,71 +4660,54 @@ void *dlmalloc(size_t bytes) {
           if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
             set_inuse_and_pinuse(gm, p, small_index2size(i));
           else {
-
             set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
             r = chunk_plus_offset(p, nb);
             set_size_and_pinuse_of_free_chunk(r, rsize);
             replace_dv(gm, r, rsize);
-
           }
-
           mem = chunk2mem(p);
           check_malloced_chunk(gm, mem, nb);
           goto postaction;
-
         }
 
         else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) {
-
           check_malloced_chunk(gm, mem, nb);
           goto postaction;
-
         }
-
       }
-
-    } else if (bytes >= MAX_REQUEST)
-
+    }
+    else if (bytes >= MAX_REQUEST)
       nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
     else {
-
       nb = pad_request(bytes);
       if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) {
-
         check_malloced_chunk(gm, mem, nb);
         goto postaction;
-
       }
-
     }
 
     if (nb <= gm->dvsize) {
-
-      size_t    rsize = gm->dvsize - nb;
+      size_t rsize = gm->dvsize - nb;
       mchunkptr p = gm->dv;
-      if (rsize >= MIN_CHUNK_SIZE) {                            /* split dv */
+      if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
         mchunkptr r = gm->dv = chunk_plus_offset(p, nb);
         gm->dvsize = rsize;
         set_size_and_pinuse_of_free_chunk(r, rsize);
         set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
-
-      } else {                                                /* exhaust dv */
-
+      }
+      else { /* exhaust dv */
         size_t dvs = gm->dvsize;
         gm->dvsize = 0;
         gm->dv = 0;
         set_inuse_and_pinuse(gm, p, dvs);
-
       }
-
       mem = chunk2mem(p);
       check_malloced_chunk(gm, mem, nb);
       goto postaction;
-
     }
 
-    else if (nb < gm->topsize) {                               /* Split top */
-      size_t    rsize = gm->topsize -= nb;
+    else if (nb < gm->topsize) { /* Split top */
+      size_t rsize = gm->topsize -= nb;
       mchunkptr p = gm->top;
       mchunkptr r = gm->top = chunk_plus_offset(p, nb);
       r->head = rsize | PINUSE_BIT;
@@ -5257,7 +4716,6 @@ void *dlmalloc(size_t bytes) {
       check_top_chunk(gm, gm->top);
       check_malloced_chunk(gm, mem, nb);
       goto postaction;
-
     }
 
     mem = sys_alloc(gm, nb);
@@ -5265,17 +4723,14 @@ void *dlmalloc(size_t bytes) {
   postaction:
     POSTACTION(gm);
     return mem;
-
   }
 
   return 0;
-
 }
 
 /* ---------------------------- free --------------------------- */
 
-void dlfree(void *mem) {
-
+void dlfree(void* mem) {
   /*
      Consolidate freed chunks with preceeding or succeeding bordering
      free chunks, if they exist, and then place in a bin.  Intermixed
@@ -5283,216 +4738,164 @@ void dlfree(void *mem) {
   */
 
   if (mem != 0) {
-
-    mchunkptr p = mem2chunk(mem);
-  #if FOOTERS
+    mchunkptr p  = mem2chunk(mem);
+#if FOOTERS
     mstate fm = get_mstate_for(p);
     if (!ok_magic(fm)) {
-
       USAGE_ERROR_ACTION(fm, p);
       return;
-
     }
-
-  #else                                                          /* FOOTERS */
-    #define fm gm
-  #endif                                                         /* FOOTERS */
+#else /* FOOTERS */
+#define fm gm
+#endif /* FOOTERS */
     if (!PREACTION(fm)) {
-
       check_inuse_chunk(fm, p);
       if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) {
-
-        size_t    psize = chunksize(p);
+        size_t psize = chunksize(p);
         mchunkptr next = chunk_plus_offset(p, psize);
         if (!pinuse(p)) {
-
           size_t prevsize = p->prev_foot;
           if (is_mmapped(p)) {
-
             psize += prevsize + MMAP_FOOT_PAD;
-            if (CALL_MUNMAP((char *)p - prevsize, psize) == 0)
+            if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
               fm->footprint -= psize;
             goto postaction;
-
-          } else {
-
+          }
+          else {
             mchunkptr prev = chunk_minus_offset(p, prevsize);
             psize += prevsize;
             p = prev;
-            if (RTCHECK(ok_address(fm, prev))) {    /* consolidate backward */
+            if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
               if (p != fm->dv) {
-
                 unlink_chunk(fm, p, prevsize);
-
-              } else if ((next->head & INUSE_BITS) == INUSE_BITS) {
-
+              }
+              else if ((next->head & INUSE_BITS) == INUSE_BITS) {
                 fm->dvsize = psize;
                 set_free_with_pinuse(p, psize, next);
                 goto postaction;
-
               }
-
-            } else
-
+            }
+            else
               goto erroraction;
-
           }
-
         }
 
         if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
-
-          if (!cinuse(next)) {                       /* consolidate forward */
+          if (!cinuse(next)) {  /* consolidate forward */
             if (next == fm->top) {
-
               size_t tsize = fm->topsize += psize;
               fm->top = p;
               p->head = tsize | PINUSE_BIT;
               if (p == fm->dv) {
-
                 fm->dv = 0;
                 fm->dvsize = 0;
-
               }
-
-              if (should_trim(fm, tsize)) sys_trim(fm, 0);
+              if (should_trim(fm, tsize))
+                sys_trim(fm, 0);
               goto postaction;
-
-            } else if (next == fm->dv) {
-
+            }
+            else if (next == fm->dv) {
               size_t dsize = fm->dvsize += psize;
               fm->dv = p;
               set_size_and_pinuse_of_free_chunk(p, dsize);
               goto postaction;
-
-            } else {
-
+            }
+            else {
               size_t nsize = chunksize(next);
               psize += nsize;
               unlink_chunk(fm, next, nsize);
               set_size_and_pinuse_of_free_chunk(p, psize);
               if (p == fm->dv) {
-
                 fm->dvsize = psize;
                 goto postaction;
-
               }
-
             }
-
-          } else
-
+          }
+          else
             set_free_with_pinuse(p, psize, next);
 
           if (is_small(psize)) {
-
             insert_small_chunk(fm, p, psize);
             check_free_chunk(fm, p);
-
-          } else {
-
+          }
+          else {
             tchunkptr tp = (tchunkptr)p;
             insert_large_chunk(fm, tp, psize);
             check_free_chunk(fm, p);
-            if (--fm->release_checks == 0) release_unused_segments(fm);
-
+            if (--fm->release_checks == 0)
+              release_unused_segments(fm);
           }
-
           goto postaction;
-
         }
-
       }
-
     erroraction:
       USAGE_ERROR_ACTION(fm, p);
     postaction:
       POSTACTION(fm);
-
     }
-
   }
-
-  #if !FOOTERS
-    #undef fm
-  #endif                                                         /* FOOTERS */
-
+#if !FOOTERS
+#undef fm
+#endif /* FOOTERS */
 }
 
-void *dlcalloc(size_t n_elements, size_t elem_size) {
-
-  void * mem;
+void* dlcalloc(size_t n_elements, size_t elem_size) {
+  void* mem;
   size_t req = 0;
   if (n_elements != 0) {
-
     req = n_elements * elem_size;
     if (((n_elements | elem_size) & ~(size_t)0xffff) &&
         (req / n_elements != elem_size))
-      req = MAX_SIZE_T;             /* force downstream failure on overflow */
-
+      req = MAX_SIZE_T; /* force downstream failure on overflow */
   }
-
   mem = dlmalloc(req);
   if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
     __builtin_memset(mem, 0, req);
   return mem;
-
 }
 
-#endif                                                     /* !ONLY_MSPACES */
+#endif /* !ONLY_MSPACES */
 
 /* ------------ Internal support for realloc, memalign, etc -------------- */
 
 /* Try to realloc; only in-place unless can_move true */
 static mchunkptr try_realloc_chunk(mstate m, mchunkptr p, size_t nb,
                                    int can_move) {
-
   mchunkptr newp = 0;
-  size_t    oldsize = chunksize(p);
+  size_t oldsize = chunksize(p);
   mchunkptr next = chunk_plus_offset(p, oldsize);
-  if (RTCHECK(ok_address(m, p) && ok_inuse(p) && ok_next(p, next) &&
-              ok_pinuse(next))) {
-
+  if (RTCHECK(ok_address(m, p) && ok_inuse(p) &&
+              ok_next(p, next) && ok_pinuse(next))) {
     if (is_mmapped(p)) {
-
       newp = mmap_resize(m, p, nb, can_move);
-
-    } else if (oldsize >= nb) {                       /* already big enough */
-
+    }
+    else if (oldsize >= nb) {             /* already big enough */
       size_t rsize = oldsize - nb;
-      if (rsize >= MIN_CHUNK_SIZE) {                 /* split off remainder */
+      if (rsize >= MIN_CHUNK_SIZE) {      /* split off remainder */
         mchunkptr r = chunk_plus_offset(p, nb);
         set_inuse(m, p, nb);
         set_inuse(m, r, rsize);
         dispose_chunk(m, r, rsize);
-
       }
-
       newp = p;
-
-    } else if (next == m->top) {                         /* extend into top */
-
+    }
+    else if (next == m->top) {  /* extend into top */
       if (oldsize + m->topsize > nb) {
-
-        size_t    newsize = oldsize + m->topsize;
-        size_t    newtopsize = newsize - nb;
+        size_t newsize = oldsize + m->topsize;
+        size_t newtopsize = newsize - nb;
         mchunkptr newtop = chunk_plus_offset(p, nb);
         set_inuse(m, p, nb);
-        newtop->head = newtopsize | PINUSE_BIT;
+        newtop->head = newtopsize |PINUSE_BIT;
         m->top = newtop;
         m->topsize = newtopsize;
         newp = p;
-
       }
-
-    } else if (next == m->dv) {                           /* extend into dv */
-
+    }
+    else if (next == m->dv) { /* extend into dv */
       size_t dvs = m->dvsize;
       if (oldsize + dvs >= nb) {
-
         size_t dsize = oldsize + dvs - nb;
         if (dsize >= MIN_CHUNK_SIZE) {
-
           mchunkptr r = chunk_plus_offset(p, nb);
           mchunkptr n = chunk_plus_offset(r, dsize);
           set_inuse(m, p, nb);
@@ -5500,87 +4903,64 @@ static mchunkptr try_realloc_chunk(mstate m, mchunkptr p, size_t nb,
           clear_pinuse(n);
           m->dvsize = dsize;
           m->dv = r;
-
-        } else {                                              /* exhaust dv */
-
+        }
+        else { /* exhaust dv */
           size_t newsize = oldsize + dvs;
           set_inuse(m, p, newsize);
           m->dvsize = 0;
           m->dv = 0;
-
         }
-
         newp = p;
-
       }
-
-    } else if (!cinuse(next)) {              /* extend into next free chunk */
-
+    }
+    else if (!cinuse(next)) { /* extend into next free chunk */
       size_t nextsize = chunksize(next);
       if (oldsize + nextsize >= nb) {
-
         size_t rsize = oldsize + nextsize - nb;
         unlink_chunk(m, next, nextsize);
         if (rsize < MIN_CHUNK_SIZE) {
-
           size_t newsize = oldsize + nextsize;
           set_inuse(m, p, newsize);
-
-        } else {
-
+        }
+        else {
           mchunkptr r = chunk_plus_offset(p, nb);
           set_inuse(m, p, nb);
           set_inuse(m, r, rsize);
           dispose_chunk(m, r, rsize);
-
         }
-
         newp = p;
-
       }
-
     }
-
-  } else {
-
+  }
+  else {
     USAGE_ERROR_ACTION(m, chunk2mem(p));
-
   }
-
   return newp;
-
 }
 
-static void *internal_memalign(mstate m, size_t alignment, size_t bytes) {
-
-  void *mem = 0;
-  if (alignment < MIN_CHUNK_SIZE)  /* must be at least a minimum chunk size */
+static void* internal_memalign(mstate m, size_t alignment, size_t bytes) {
+  void* mem = 0;
+  if (alignment <  MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */
     alignment = MIN_CHUNK_SIZE;
-  if ((alignment & (alignment - SIZE_T_ONE)) != 0) { /* Ensure a power of 2 */
+  if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of 2 */
     size_t a = MALLOC_ALIGNMENT << 1;
-    while (a < alignment)
-      a <<= 1;
+    while (a < alignment) a <<= 1;
     alignment = a;
-
   }
-
   if (bytes >= MAX_REQUEST - alignment) {
-
-    if (m != 0) {          /* Test isn't needed but avoids compiler warning */
+    if (m != 0)  { /* Test isn't needed but avoids compiler warning */
       MALLOC_FAILURE_ACTION;
-
     }
-
-  } else {
-
+  }
+  else {
     size_t nb = request2size(bytes);
     size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;
     mem = internal_malloc(m, req);
     if (mem != 0) {
-
       mchunkptr p = mem2chunk(mem);
-      if (PREACTION(m)) return 0;
-      if ((((size_t)(mem)) & (alignment - 1)) != 0) {         /* misaligned */
+      if (PREACTION(m))
+        return 0;
+      if ((((size_t)(mem)) & (alignment - 1)) != 0) { /* misaligned */
         /*
           Find an aligned spot inside chunk.  Since we need to give
           back leading space in a chunk of at least MIN_CHUNK_SIZE, if
@@ -5589,59 +4969,47 @@ static void *internal_memalign(mstate m, size_t alignment, size_t bytes) {
           We've allocated enough total room so that this is always
           possible.
         */
-        char *    br = (char *)mem2chunk((size_t)(
-            ((size_t)((char *)mem + alignment - SIZE_T_ONE)) & -alignment));
-        char *    pos = ((size_t)(br - (char *)(p)) >= MIN_CHUNK_SIZE)
-                            ? br
-                            : br + alignment;
+        char* br = (char*)mem2chunk((size_t)(((size_t)((char*)mem + alignment -
+                                                       SIZE_T_ONE)) &
+                                             -alignment));
+        char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)?
+          br : br+alignment;
         mchunkptr newp = (mchunkptr)pos;
-        size_t    leadsize = pos - (char *)(p);
-        size_t    newsize = chunksize(p) - leadsize;
+        size_t leadsize = pos - (char*)(p);
+        size_t newsize = chunksize(p) - leadsize;
 
-        if (is_mmapped(p)) {      /* For mmapped chunks, just adjust offset */
+        if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */
           newp->prev_foot = p->prev_foot + leadsize;
           newp->head = newsize;
-
-        } else {               /* Otherwise, give back leader, use the rest */
-
+        }
+        else { /* Otherwise, give back leader, use the rest */
           set_inuse(m, newp, newsize);
           set_inuse(m, p, leadsize);
           dispose_chunk(m, p, leadsize);
-
         }
-
         p = newp;
-
       }
 
       /* Give back spare room at the end */
       if (!is_mmapped(p)) {
-
         size_t size = chunksize(p);
         if (size > nb + MIN_CHUNK_SIZE) {
-
-          size_t    remainder_size = size - nb;
+          size_t remainder_size = size - nb;
           mchunkptr remainder = chunk_plus_offset(p, nb);
           set_inuse(m, p, nb);
           set_inuse(m, remainder, remainder_size);
           dispose_chunk(m, remainder, remainder_size);
-
         }
-
       }
 
       mem = chunk2mem(p);
-      assert(chunksize(p) >= nb);
+      assert (chunksize(p) >= nb);
       assert(((size_t)mem & (alignment - 1)) == 0);
       check_inuse_chunk(m, p);
       POSTACTION(m);
-
     }
-
   }
-
   return mem;
-
 }
 
 /*
@@ -5651,50 +5019,50 @@ static void *internal_memalign(mstate m, size_t alignment, size_t bytes) {
     bit 0 set if all elements are same size (using sizes[0])
     bit 1 set if elements should be zeroed
 */
-static void **ialloc(mstate m, size_t n_elements, size_t *sizes, int opts,
-                     void *chunks[]) {
-
-  size_t    element_size;         /* chunksize of each element, if all same */
-  size_t    contents_size;                        /* total size of elements */
-  size_t    array_size;                    /* request size of pointer array */
-  void *    mem;                                /* malloced aggregate space */
-  mchunkptr p;                                       /* corresponding chunk */
-  size_t    remainder_size;              /* remaining bytes while splitting */
-  void **   marray;                /* either "chunks" or malloced ptr array */
-  mchunkptr array_chunk;                    /* chunk for malloced ptr array */
-  flag_t    was_enabled;                                 /* to disable mmap */
+static void** ialloc(mstate m,
+                     size_t n_elements,
+                     size_t* sizes,
+                     int opts,
+                     void* chunks[]) {
+
+  size_t    element_size;   /* chunksize of each element, if all same */
+  size_t    contents_size;  /* total size of elements */
+  size_t    array_size;     /* request size of pointer array */
+  void*     mem;            /* malloced aggregate space */
+  mchunkptr p;              /* corresponding chunk */
+  size_t    remainder_size; /* remaining bytes while splitting */
+  void**    marray;         /* either "chunks" or malloced ptr array */
+  mchunkptr array_chunk;    /* chunk for malloced ptr array */
+  flag_t    was_enabled;    /* to disable mmap */
   size_t    size;
   size_t    i;
 
   ensure_initialization();
   /* compute array length, if needed */
   if (chunks != 0) {
-
-    if (n_elements == 0) return chunks;                    /* nothing to do */
+    if (n_elements == 0)
+      return chunks; /* nothing to do */
     marray = chunks;
     array_size = 0;
-
-  } else {
-
+  }
+  else {
     /* if empty req, must still return chunk representing empty array */
-    if (n_elements == 0) return (void **)internal_malloc(m, 0);
+    if (n_elements == 0)
+      return (void**)internal_malloc(m, 0);
     marray = 0;
-    array_size = request2size(n_elements * (sizeof(void *)));
-
+    array_size = request2size(n_elements * (sizeof(void*)));
   }
 
   /* compute total element size */
-  if (opts & 0x1) {                                        /* all-same-size */
+  if (opts & 0x1) { /* all-same-size */
     element_size = request2size(*sizes);
     contents_size = n_elements * element_size;
-
-  } else {                                          /* add up all the sizes */
-
+  }
+  else { /* add up all the sizes */
     element_size = 0;
     contents_size = 0;
     for (i = 0; i != n_elements; ++i)
       contents_size += request2size(sizes[i]);
-
   }
 
   size = contents_size + array_size;
@@ -5707,8 +5075,10 @@ static void **ialloc(mstate m, size_t n_elements, size_t *sizes, int opts,
   was_enabled = use_mmap(m);
   disable_mmap(m);
   mem = internal_malloc(m, size - CHUNK_OVERHEAD);
-  if (was_enabled) enable_mmap(m);
-  if (mem == 0) return 0;
+  if (was_enabled)
+    enable_mmap(m);
+  if (mem == 0)
+    return 0;
 
   if (PREACTION(m)) return 0;
   p = mem2chunk(mem);
@@ -5716,30 +5086,24 @@ static void **ialloc(mstate m, size_t n_elements, size_t *sizes, int opts,
 
   assert(!is_mmapped(p));
 
-  if (opts & 0x2) {                        /* optionally clear the elements */
-    __builtin_memset((size_t *)mem, 0,
-                     remainder_size - SIZE_T_SIZE - array_size);
-
+  if (opts & 0x2) {       /* optionally clear the elements */
+    __builtin_memset((size_t*)mem, 0, remainder_size - SIZE_T_SIZE - array_size);
   }
 
   /* If not provided, allocate the pointer array as final part of chunk */
   if (marray == 0) {
-
-    size_t array_chunk_size;
+    size_t  array_chunk_size;
     array_chunk = chunk_plus_offset(p, contents_size);
     array_chunk_size = remainder_size - contents_size;
-    marray = (void **)(chunk2mem(array_chunk));
+    marray = (void**) (chunk2mem(array_chunk));
     set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size);
     remainder_size = contents_size;
-
   }
 
   /* split out elements */
-  for (i = 0;; ++i) {
-
+  for (i = 0; ; ++i) {
     marray[i] = chunk2mem(p);
-    if (i != n_elements - 1) {
-
+    if (i != n_elements-1) {
       if (element_size != 0)
         size = element_size;
       else
@@ -5747,42 +5111,31 @@ static void **ialloc(mstate m, size_t n_elements, size_t *sizes, int opts,
       remainder_size -= size;
       set_size_and_pinuse_of_inuse_chunk(m, p, size);
       p = chunk_plus_offset(p, size);
-
-    } else {           /* the final element absorbs any overallocation slop */
-
+    }
+    else { /* the final element absorbs any overallocation slop */
       set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size);
       break;
-
     }
-
   }
 
 #if DEBUG
   if (marray != chunks) {
-
     /* final element must have exactly exhausted chunk */
     if (element_size != 0) {
-
       assert(remainder_size == element_size);
-
-    } else {
-
+    }
+    else {
       assert(remainder_size == request2size(sizes[i]));
-
     }
-
     check_inuse_chunk(m, mem2chunk(marray));
-
   }
-
   for (i = 0; i != n_elements; ++i)
     check_inuse_chunk(m, mem2chunk(marray[i]));
 
-#endif                                                             /* DEBUG */
+#endif /* DEBUG */
 
   POSTACTION(m);
   return marray;
-
 }
 
 /* Try to free all pointers in the given array.
@@ -5792,431 +5145,316 @@ static void **ialloc(mstate m, size_t n_elements, size_t *sizes, int opts,
    chunks before freeing, which will occur often if allocated
    with ialloc or the array is sorted.
 */
-static size_t internal_bulk_free(mstate m, void *array[], size_t nelem) {
-
+static size_t internal_bulk_free(mstate m, void* array[], size_t nelem) {
   size_t unfreed = 0;
   if (!PREACTION(m)) {
-
-    void **a;
-    void **fence = &(array[nelem]);
+    void** a;
+    void** fence = &(array[nelem]);
     for (a = array; a != fence; ++a) {
-
-      void *mem = *a;
+      void* mem = *a;
       if (mem != 0) {
-
         mchunkptr p = mem2chunk(mem);
-        size_t    psize = chunksize(p);
+        size_t psize = chunksize(p);
 #if FOOTERS
         if (get_mstate_for(p) != m) {
-
           ++unfreed;
           continue;
-
         }
-
 #endif
         check_inuse_chunk(m, p);
         *a = 0;
         if (RTCHECK(ok_address(m, p) && ok_inuse(p))) {
-
-          void **   b = a + 1;              /* try to merge with next chunk */
+          void ** b = a + 1; /* try to merge with next chunk */
           mchunkptr next = next_chunk(p);
           if (b != fence && *b == chunk2mem(next)) {
-
             size_t newsize = chunksize(next) + psize;
             set_inuse(m, p, newsize);
             *b = chunk2mem(p);
-
-          } else
-
+          }
+          else
             dispose_chunk(m, p, psize);
-
-        } else {
-
+        }
+        else {
           CORRUPTION_ERROR_ACTION(m);
           break;
-
         }
-
       }
-
     }
-
-    if (should_trim(m, m->topsize)) sys_trim(m, 0);
+    if (should_trim(m, m->topsize))
+      sys_trim(m, 0);
     POSTACTION(m);
-
   }
-
   return unfreed;
-
 }
 
 /* Traversal */
 #if MALLOC_INSPECT_ALL
 static void internal_inspect_all(mstate m,
-                                 void (*handler)(void *start, void *end,
-                                                 size_t used_bytes,
-                                                 void * callback_arg),
-                                 void *arg) {
-
+                                 void(*handler)(void *start,
+                                                void *end,
+                                                size_t used_bytes,
+                                                void* callback_arg),
+                                 void* arg) {
   if (is_initialized(m)) {
-
-    mchunkptr   top = m->top;
+    mchunkptr top = m->top;
     msegmentptr s;
     for (s = &m->seg; s != 0; s = s->next) {
-
       mchunkptr q = align_as_chunk(s->base);
       while (segment_holds(s, q) && q->head != FENCEPOST_HEAD) {
-
         mchunkptr next = next_chunk(q);
-        size_t    sz = chunksize(q);
-        size_t    used;
-        void *    start;
+        size_t sz = chunksize(q);
+        size_t used;
+        void* start;
         if (is_inuse(q)) {
-
-          used = sz - CHUNK_OVERHEAD;                /* must not be mmapped */
+          used = sz - CHUNK_OVERHEAD; /* must not be mmapped */
           start = chunk2mem(q);
-
-        } else {
-
+        }
+        else {
           used = 0;
-          if (is_small(sz)) {             /* offset by possible bookkeeping */
-            start = (void *)((char *)q + sizeof(struct malloc_chunk));
-
-          } else {
-
-            start = (void *)((char *)q + sizeof(struct malloc_tree_chunk));
-
+          if (is_small(sz)) {     /* offset by possible bookkeeping */
+            start = (void*)((char*)q + sizeof(struct malloc_chunk));
+          }
+          else {
+            start = (void*)((char*)q + sizeof(struct malloc_tree_chunk));
           }
-
         }
-
-        if (start < (void *)next)       /* skip if all space is bookkeeping */
+        if (start < (void*)next)  /* skip if all space is bookkeeping */
           handler(start, next, used, arg);
-        if (q == top) break;
+        if (q == top)
+          break;
         q = next;
-
       }
-
     }
-
   }
-
 }
-
-#endif                                                /* MALLOC_INSPECT_ALL */
+#endif /* MALLOC_INSPECT_ALL */
 
 /* ------------------ Exported realloc, memalign, etc -------------------- */
 
 #if !ONLY_MSPACES
 
-void *dlrealloc(void *oldmem, size_t bytes) {
-
-  void *mem = 0;
+void* dlrealloc(void* oldmem, size_t bytes) {
+  void* mem = 0;
   if (oldmem == 0) {
-
     mem = dlmalloc(bytes);
-
-  } else if (bytes >= MAX_REQUEST) {
-
+  }
+  else if (bytes >= MAX_REQUEST) {
     MALLOC_FAILURE_ACTION;
-
   }
-
-  #ifdef REALLOC_ZERO_BYTES_FREES
+#ifdef REALLOC_ZERO_BYTES_FREES
   else if (bytes == 0) {
-
     dlfree(oldmem);
-
   }
-
-  #endif                                        /* REALLOC_ZERO_BYTES_FREES */
+#endif /* REALLOC_ZERO_BYTES_FREES */
   else {
-
-    size_t    nb = request2size(bytes);
+    size_t nb = request2size(bytes);
     mchunkptr oldp = mem2chunk(oldmem);
-  #if !FOOTERS
+#if ! FOOTERS
     mstate m = gm;
-  #else                                                          /* FOOTERS */
+#else /* FOOTERS */
     mstate m = get_mstate_for(oldp);
     if (!ok_magic(m)) {
-
       USAGE_ERROR_ACTION(m, oldmem);
       return 0;
-
     }
-
-  #endif                                                         /* FOOTERS */
+#endif /* FOOTERS */
     if (!PREACTION(m)) {
-
       mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1);
       POSTACTION(m);
       if (newp != 0) {
-
         check_inuse_chunk(m, newp);
         mem = chunk2mem(newp);
-
-      } else {
-
+      }
+      else {
         mem = internal_malloc(m, bytes);
         if (mem != 0) {
-
           size_t oc = chunksize(oldp) - overhead_for(oldp);
-          __builtin_memcpy(mem, oldmem, (oc < bytes) ? oc : bytes);
+          __builtin_memcpy(mem, oldmem, (oc < bytes)? oc : bytes);
           internal_free(m, oldmem);
-
         }
-
       }
-
     }
-
   }
-
   return mem;
-
 }
 
-void *dlrealloc_in_place(void *oldmem, size_t bytes) {
-
-  void *mem = 0;
+void* dlrealloc_in_place(void* oldmem, size_t bytes) {
+  void* mem = 0;
   if (oldmem != 0) {
-
     if (bytes >= MAX_REQUEST) {
-
       MALLOC_FAILURE_ACTION;
-
-    } else {
-
-      size_t    nb = request2size(bytes);
+    }
+    else {
+      size_t nb = request2size(bytes);
       mchunkptr oldp = mem2chunk(oldmem);
-  #if !FOOTERS
+#if ! FOOTERS
       mstate m = gm;
-  #else                                                          /* FOOTERS */
+#else /* FOOTERS */
       mstate m = get_mstate_for(oldp);
       if (!ok_magic(m)) {
-
         USAGE_ERROR_ACTION(m, oldmem);
         return 0;
-
       }
-
-  #endif                                                         /* FOOTERS */
+#endif /* FOOTERS */
       if (!PREACTION(m)) {
-
         mchunkptr newp = try_realloc_chunk(m, oldp, nb, 0);
         POSTACTION(m);
         if (newp == oldp) {
-
           check_inuse_chunk(m, newp);
           mem = oldmem;
-
         }
-
       }
-
     }
-
   }
-
   return mem;
-
 }
 
-void *dlmemalign(size_t alignment, size_t bytes) {
-
-  if (alignment <= MALLOC_ALIGNMENT) { return dlmalloc(bytes); }
+void* dlmemalign(size_t alignment, size_t bytes) {
+  if (alignment <= MALLOC_ALIGNMENT) {
+    return dlmalloc(bytes);
+  }
   return internal_memalign(gm, alignment, bytes);
-
 }
 
-int dlposix_memalign(void **pp, size_t alignment, size_t bytes) {
-
-  void *mem = 0;
+int dlposix_memalign(void** pp, size_t alignment, size_t bytes) {
+  void* mem = 0;
   if (alignment == MALLOC_ALIGNMENT)
     mem = dlmalloc(bytes);
   else {
-
-    size_t d = alignment / sizeof(void *);
-    size_t r = alignment % sizeof(void *);
-    if (r != 0 || d == 0 || (d & (d - SIZE_T_ONE)) != 0)
+    size_t d = alignment / sizeof(void*);
+    size_t r = alignment % sizeof(void*);
+    if (r != 0 || d == 0 || (d & (d-SIZE_T_ONE)) != 0)
       return EINVAL;
     else if (bytes <= MAX_REQUEST - alignment) {
-
-      if (alignment < MIN_CHUNK_SIZE) alignment = MIN_CHUNK_SIZE;
+      if (alignment <  MIN_CHUNK_SIZE)
+        alignment = MIN_CHUNK_SIZE;
       mem = internal_memalign(gm, alignment, bytes);
-
     }
-
   }
-
   if (mem == 0)
     return ENOMEM;
   else {
-
     *pp = mem;
     return 0;
-
   }
-
 }
 
-void *dlvalloc(size_t bytes) {
-
+void* dlvalloc(size_t bytes) {
   size_t pagesz;
   ensure_initialization();
   pagesz = mparams.page_size;
   return dlmemalign(pagesz, bytes);
-
 }
 
-void *dlpvalloc(size_t bytes) {
-
+void* dlpvalloc(size_t bytes) {
   size_t pagesz;
   ensure_initialization();
   pagesz = mparams.page_size;
-  return dlmemalign(pagesz,
-                    (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE));
-
+  return dlmemalign(pagesz, (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE));
 }
 
-void **dlindependent_calloc(size_t n_elements, size_t elem_size,
-                            void *chunks[]) {
-
-  size_t sz = elem_size;                       /* serves as 1-element array */
+void** dlindependent_calloc(size_t n_elements, size_t elem_size,
+                            void* chunks[]) {
+  size_t sz = elem_size; /* serves as 1-element array */
   return ialloc(gm, n_elements, &sz, 3, chunks);
-
 }
 
-void **dlindependent_comalloc(size_t n_elements, size_t sizes[],
-                              void *chunks[]) {
-
+void** dlindependent_comalloc(size_t n_elements, size_t sizes[],
+                              void* chunks[]) {
   return ialloc(gm, n_elements, sizes, 0, chunks);
-
 }
 
-size_t dlbulk_free(void *array[], size_t nelem) {
-
+size_t dlbulk_free(void* array[], size_t nelem) {
   return internal_bulk_free(gm, array, nelem);
-
 }
 
-  #if MALLOC_INSPECT_ALL
-void dlmalloc_inspect_all(void (*handler)(void *start, void *end,
-                                          size_t used_bytes,
-                                          void * callback_arg),
-                          void *arg) {
-
+#if MALLOC_INSPECT_ALL
+void dlmalloc_inspect_all(void(*handler)(void *start,
+                                         void *end,
+                                         size_t used_bytes,
+                                         void* callback_arg),
+                          void* arg) {
   ensure_initialization();
   if (!PREACTION(gm)) {
-
     internal_inspect_all(gm, handler, arg);
     POSTACTION(gm);
-
   }
-
 }
-
-  #endif                                              /* MALLOC_INSPECT_ALL */
+#endif /* MALLOC_INSPECT_ALL */
 
 int dlmalloc_trim(size_t pad) {
-
   int result = 0;
   ensure_initialization();
   if (!PREACTION(gm)) {
-
     result = sys_trim(gm, pad);
     POSTACTION(gm);
-
   }
-
   return result;
-
 }
 
 size_t dlmalloc_footprint(void) {
-
   return gm->footprint;
-
 }
 
 size_t dlmalloc_max_footprint(void) {
-
   return gm->max_footprint;
-
 }
 
 size_t dlmalloc_footprint_limit(void) {
-
   size_t maf = gm->footprint_limit;
   return maf == 0 ? MAX_SIZE_T : maf;
-
 }
 
 size_t dlmalloc_set_footprint_limit(size_t bytes) {
-
-  size_t result;                                       /* invert sense of 0 */
-  if (bytes == 0) result = granularity_align(1);        /* Use minimal size */
+  size_t result;  /* invert sense of 0 */
+  if (bytes == 0)
+    result = granularity_align(1); /* Use minimal size */
   if (bytes == MAX_SIZE_T)
-    result = 0;                                                  /* disable */
+    result = 0;                    /* disable */
   else
     result = granularity_align(bytes);
   return gm->footprint_limit = result;
-
 }
 
-  #if !NO_MALLINFO
+#if !NO_MALLINFO
 struct mallinfo dlmallinfo(void) {
-
   return internal_mallinfo(gm);
-
 }
+#endif /* NO_MALLINFO */
 
-  #endif                                                     /* NO_MALLINFO */
-
-  #if !NO_MALLOC_STATS
+#if !NO_MALLOC_STATS
 void dlmalloc_stats() {
-
   internal_malloc_stats(gm);
-
 }
-
-  #endif                                                 /* NO_MALLOC_STATS */
+#endif /* NO_MALLOC_STATS */
 
 int dlmallopt(int param_number, int value) {
-
   return change_mparam(param_number, value);
-
 }
 
-size_t dlmalloc_usable_size(void *mem) {
-
+size_t dlmalloc_usable_size(void* mem) {
   if (mem != 0) {
-
     mchunkptr p = mem2chunk(mem);
-    if (is_inuse(p)) return chunksize(p) - overhead_for(p);
-
+    if (is_inuse(p))
+      return chunksize(p) - overhead_for(p);
   }
-
   return 0;
-
 }
 
-#endif                                                     /* !ONLY_MSPACES */
+#endif /* !ONLY_MSPACES */
 
 /* ----------------------------- user mspaces ---------------------------- */
 
 #if MSPACES
 
-static mstate init_user_mstate(char *tbase, size_t tsize) {
-
-  size_t    msize = pad_request(sizeof(struct malloc_state));
+static mstate init_user_mstate(char* tbase, size_t tsize) {
+  size_t msize = pad_request(sizeof(struct malloc_state));
   mchunkptr mn;
   mchunkptr msp = align_as_chunk(tbase);
-  mstate    m = (mstate)(chunk2mem(msp));
+  mstate m = (mstate)(chunk2mem(msp));
   __builtin_memset(m, 0, msize);
   (void)INITIAL_LOCK(&m->mutex);
-  msp->head = (msize | INUSE_BITS);
+  msp->head = (msize|INUSE_BITS);
   m->seg.base = m->least_addr = tbase;
   m->seg.size = m->footprint = m->max_footprint = tsize;
   m->magic = mparams.magic;
@@ -6227,111 +5465,82 @@ static mstate init_user_mstate(char *tbase, size_t tsize) {
   disable_contiguous(m);
   init_bins(m);
   mn = next_chunk(mem2chunk(m));
-  init_top(m, mn, (size_t)((tbase + tsize) - (char *)mn) - TOP_FOOT_SIZE);
+  init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) - TOP_FOOT_SIZE);
   check_top_chunk(m, m->top);
   return m;
-
 }
 
 mspace create_mspace(size_t capacity, int locked) {
-
   mstate m = 0;
   size_t msize;
   ensure_initialization();
   msize = pad_request(sizeof(struct malloc_state));
-  if (capacity < (size_t) - (msize + TOP_FOOT_SIZE + mparams.page_size)) {
-
-    size_t rs = ((capacity == 0) ? mparams.granularity
-                                 : (capacity + TOP_FOOT_SIZE + msize));
+  if (capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
+    size_t rs = ((capacity == 0)? mparams.granularity :
+                 (capacity + TOP_FOOT_SIZE + msize));
     size_t tsize = granularity_align(rs);
-    char * tbase = (char *)(CALL_MMAP(tsize));
+    char* tbase = (char*)(CALL_MMAP(tsize));
     if (tbase != CMFAIL) {
-
       m = init_user_mstate(tbase, tsize);
       m->seg.sflags = USE_MMAP_BIT;
       set_lock(m, locked);
-
     }
-
   }
-
   return (mspace)m;
-
 }
 
-mspace create_mspace_with_base(void *base, size_t capacity, int locked) {
-
+mspace create_mspace_with_base(void* base, size_t capacity, int locked) {
   mstate m = 0;
   size_t msize;
   ensure_initialization();
   msize = pad_request(sizeof(struct malloc_state));
   if (capacity > msize + TOP_FOOT_SIZE &&
-      capacity < (size_t) - (msize + TOP_FOOT_SIZE + mparams.page_size)) {
-
-    m = init_user_mstate((char *)base, capacity);
+      capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
+    m = init_user_mstate((char*)base, capacity);
     m->seg.sflags = EXTERN_BIT;
     set_lock(m, locked);
-
   }
-
   return (mspace)m;
-
 }
 
 int mspace_track_large_chunks(mspace msp, int enable) {
-
-  int    ret = 0;
+  int ret = 0;
   mstate ms = (mstate)msp;
   if (!PREACTION(ms)) {
-
-    if (!use_mmap(ms)) { ret = 1; }
+    if (!use_mmap(ms)) {
+      ret = 1;
+    }
     if (!enable) {
-
       enable_mmap(ms);
-
     } else {
-
       disable_mmap(ms);
-
     }
-
     POSTACTION(ms);
-
   }
-
   return ret;
-
 }
 
 size_t destroy_mspace(mspace msp) {
-
   size_t freed = 0;
   mstate ms = (mstate)msp;
   if (ok_magic(ms)) {
-
     msegmentptr sp = &ms->seg;
-    (void)DESTROY_LOCK(&ms->mutex);              /* destroy before unmapped */
+    (void)DESTROY_LOCK(&ms->mutex); /* destroy before unmapped */
     while (sp != 0) {
-
-      char * base = sp->base;
+      char* base = sp->base;
       size_t size = sp->size;
       flag_t flag = sp->sflags;
-      (void)base;             /* placate people compiling -Wunused-variable */
+      (void)base; /* placate people compiling -Wunused-variable */
       sp = sp->next;
       if ((flag & USE_MMAP_BIT) && !(flag & EXTERN_BIT) &&
           CALL_MUNMAP(base, size) == 0)
         freed += size;
-
     }
-
-  } else {
-
-    USAGE_ERROR_ACTION(ms, ms);
-
   }
-
+  else {
+    USAGE_ERROR_ACTION(ms,ms);
+  }
   return freed;
-
 }
 
 /*
@@ -6339,31 +5548,25 @@ size_t destroy_mspace(mspace msp) {
   versions. This is not so nice but better than the alternatives.
 */
 
-void *mspace_malloc(mspace msp, size_t bytes) {
-
+void* mspace_malloc(mspace msp, size_t bytes) {
   mstate ms = (mstate)msp;
   if (!ok_magic(ms)) {
-
-    USAGE_ERROR_ACTION(ms, ms);
+    USAGE_ERROR_ACTION(ms,ms);
     return 0;
-
   }
-
   if (!PREACTION(ms)) {
-
-    void * mem;
+    void* mem;
     size_t nb;
     if (bytes <= MAX_SMALL_REQUEST) {
-
       bindex_t idx;
       binmap_t smallbits;
-      nb = (bytes < MIN_REQUEST) ? MIN_CHUNK_SIZE : pad_request(bytes);
+      nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
       idx = small_index(nb);
       smallbits = ms->smallmap >> idx;
 
-      if ((smallbits & 0x3U) != 0) {    /* Remainderless fit to a smallbin. */
+      if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
         mchunkptr b, p;
-        idx += ~smallbits & 1;                /* Uses next bin if idx empty */
+        idx += ~smallbits & 1;       /* Uses next bin if idx empty */
         b = smallbin_at(ms, idx);
         p = b->fd;
         assert(chunksize(p) == small_index2size(idx));
@@ -6372,17 +5575,15 @@ void *mspace_malloc(mspace msp, size_t bytes) {
         mem = chunk2mem(p);
         check_malloced_chunk(ms, mem, nb);
         goto postaction;
-
       }
 
       else if (nb > ms->dvsize) {
-
-        if (smallbits != 0) {        /* Use chunk in next nonempty smallbin */
+        if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
           mchunkptr b, p, r;
-          size_t    rsize;
-          bindex_t  i;
-          binmap_t  leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
-          binmap_t  leastbit = least_bit(leftbits);
+          size_t rsize;
+          bindex_t i;
+          binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
+          binmap_t leastbit = least_bit(leftbits);
           compute_bit2idx(leastbit, i);
           b = smallbin_at(ms, i);
           p = b->fd;
@@ -6393,71 +5594,54 @@ void *mspace_malloc(mspace msp, size_t bytes) {
           if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
             set_inuse_and_pinuse(ms, p, small_index2size(i));
           else {
-
             set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
             r = chunk_plus_offset(p, nb);
             set_size_and_pinuse_of_free_chunk(r, rsize);
             replace_dv(ms, r, rsize);
-
           }
-
           mem = chunk2mem(p);
           check_malloced_chunk(ms, mem, nb);
           goto postaction;
-
         }
 
         else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) {
-
           check_malloced_chunk(ms, mem, nb);
           goto postaction;
-
         }
-
       }
-
-    } else if (bytes >= MAX_REQUEST)
-
+    }
+    else if (bytes >= MAX_REQUEST)
       nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
     else {
-
       nb = pad_request(bytes);
       if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
-
         check_malloced_chunk(ms, mem, nb);
         goto postaction;
-
       }
-
     }
 
     if (nb <= ms->dvsize) {
-
-      size_t    rsize = ms->dvsize - nb;
+      size_t rsize = ms->dvsize - nb;
       mchunkptr p = ms->dv;
-      if (rsize >= MIN_CHUNK_SIZE) {                            /* split dv */
+      if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
         mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
         ms->dvsize = rsize;
         set_size_and_pinuse_of_free_chunk(r, rsize);
         set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
-
-      } else {                                                /* exhaust dv */
-
+      }
+      else { /* exhaust dv */
         size_t dvs = ms->dvsize;
         ms->dvsize = 0;
         ms->dv = 0;
         set_inuse_and_pinuse(ms, p, dvs);
-
       }
-
       mem = chunk2mem(p);
       check_malloced_chunk(ms, mem, nb);
       goto postaction;
-
     }
 
-    else if (nb < ms->topsize) {                               /* Split top */
-      size_t    rsize = ms->topsize -= nb;
+    else if (nb < ms->topsize) { /* Split top */
+      size_t rsize = ms->topsize -= nb;
       mchunkptr p = ms->top;
       mchunkptr r = ms->top = chunk_plus_offset(p, nb);
       r->head = rsize | PINUSE_BIT;
@@ -6466,7 +5650,6 @@ void *mspace_malloc(mspace msp, size_t bytes) {
       check_top_chunk(ms, ms->top);
       check_malloced_chunk(ms, mem, nb);
       goto postaction;
-
     }
 
     mem = sys_alloc(ms, nb);
@@ -6474,519 +5657,372 @@ void *mspace_malloc(mspace msp, size_t bytes) {
   postaction:
     POSTACTION(ms);
     return mem;
-
   }
 
   return 0;
-
 }
 
-void mspace_free(mspace msp, void *mem) {
-
+void mspace_free(mspace msp, void* mem) {
   if (mem != 0) {
-
-    mchunkptr p = mem2chunk(mem);
-  #if FOOTERS
+    mchunkptr p  = mem2chunk(mem);
+#if FOOTERS
     mstate fm = get_mstate_for(p);
-    (void)msp;                         /* placate people compiling -Wunused */
-  #else                                                          /* FOOTERS */
+    (void)msp; /* placate people compiling -Wunused */
+#else /* FOOTERS */
     mstate fm = (mstate)msp;
-  #endif                                                         /* FOOTERS */
+#endif /* FOOTERS */
     if (!ok_magic(fm)) {
-
       USAGE_ERROR_ACTION(fm, p);
       return;
-
     }
-
     if (!PREACTION(fm)) {
-
       check_inuse_chunk(fm, p);
       if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) {
-
-        size_t    psize = chunksize(p);
+        size_t psize = chunksize(p);
         mchunkptr next = chunk_plus_offset(p, psize);
         if (!pinuse(p)) {
-
           size_t prevsize = p->prev_foot;
           if (is_mmapped(p)) {
-
             psize += prevsize + MMAP_FOOT_PAD;
-            if (CALL_MUNMAP((char *)p - prevsize, psize) == 0)
+            if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
               fm->footprint -= psize;
             goto postaction;
-
-          } else {
-
+          }
+          else {
             mchunkptr prev = chunk_minus_offset(p, prevsize);
             psize += prevsize;
             p = prev;
-            if (RTCHECK(ok_address(fm, prev))) {    /* consolidate backward */
+            if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
               if (p != fm->dv) {
-
                 unlink_chunk(fm, p, prevsize);
-
-              } else if ((next->head & INUSE_BITS) == INUSE_BITS) {
-
+              }
+              else if ((next->head & INUSE_BITS) == INUSE_BITS) {
                 fm->dvsize = psize;
                 set_free_with_pinuse(p, psize, next);
                 goto postaction;
-
               }
-
-            } else
-
+            }
+            else
               goto erroraction;
-
           }
-
         }
 
         if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
-
-          if (!cinuse(next)) {                       /* consolidate forward */
+          if (!cinuse(next)) {  /* consolidate forward */
             if (next == fm->top) {
-
               size_t tsize = fm->topsize += psize;
               fm->top = p;
               p->head = tsize | PINUSE_BIT;
               if (p == fm->dv) {
-
                 fm->dv = 0;
                 fm->dvsize = 0;
-
               }
-
-              if (should_trim(fm, tsize)) sys_trim(fm, 0);
+              if (should_trim(fm, tsize))
+                sys_trim(fm, 0);
               goto postaction;
-
-            } else if (next == fm->dv) {
-
+            }
+            else if (next == fm->dv) {
               size_t dsize = fm->dvsize += psize;
               fm->dv = p;
               set_size_and_pinuse_of_free_chunk(p, dsize);
               goto postaction;
-
-            } else {
-
+            }
+            else {
               size_t nsize = chunksize(next);
               psize += nsize;
               unlink_chunk(fm, next, nsize);
               set_size_and_pinuse_of_free_chunk(p, psize);
               if (p == fm->dv) {
-
                 fm->dvsize = psize;
                 goto postaction;
-
               }
-
             }
-
-          } else
-
+          }
+          else
             set_free_with_pinuse(p, psize, next);
 
           if (is_small(psize)) {
-
             insert_small_chunk(fm, p, psize);
             check_free_chunk(fm, p);
-
-          } else {
-
+          }
+          else {
             tchunkptr tp = (tchunkptr)p;
             insert_large_chunk(fm, tp, psize);
             check_free_chunk(fm, p);
-            if (--fm->release_checks == 0) release_unused_segments(fm);
-
+            if (--fm->release_checks == 0)
+              release_unused_segments(fm);
           }
-
           goto postaction;
-
         }
-
       }
-
     erroraction:
       USAGE_ERROR_ACTION(fm, p);
     postaction:
       POSTACTION(fm);
-
     }
-
   }
-
 }
 
-void *mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) {
-
-  void * mem;
+void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) {
+  void* mem;
   size_t req = 0;
   mstate ms = (mstate)msp;
   if (!ok_magic(ms)) {
-
-    USAGE_ERROR_ACTION(ms, ms);
+    USAGE_ERROR_ACTION(ms,ms);
     return 0;
-
   }
-
   if (n_elements != 0) {
-
     req = n_elements * elem_size;
     if (((n_elements | elem_size) & ~(size_t)0xffff) &&
         (req / n_elements != elem_size))
-      req = MAX_SIZE_T;             /* force downstream failure on overflow */
-
+      req = MAX_SIZE_T; /* force downstream failure on overflow */
   }
-
   mem = internal_malloc(ms, req);
   if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
     __builtin_memset(mem, 0, req);
   return mem;
-
 }
 
-void *mspace_realloc(mspace msp, void *oldmem, size_t bytes) {
-
-  void *mem = 0;
+void* mspace_realloc(mspace msp, void* oldmem, size_t bytes) {
+  void* mem = 0;
   if (oldmem == 0) {
-
     mem = mspace_malloc(msp, bytes);
-
-  } else if (bytes >= MAX_REQUEST) {
-
+  }
+  else if (bytes >= MAX_REQUEST) {
     MALLOC_FAILURE_ACTION;
-
   }
-
-  #ifdef REALLOC_ZERO_BYTES_FREES
+#ifdef REALLOC_ZERO_BYTES_FREES
   else if (bytes == 0) {
-
     mspace_free(msp, oldmem);
-
   }
-
-  #endif                                        /* REALLOC_ZERO_BYTES_FREES */
+#endif /* REALLOC_ZERO_BYTES_FREES */
   else {
-
-    size_t    nb = request2size(bytes);
+    size_t nb = request2size(bytes);
     mchunkptr oldp = mem2chunk(oldmem);
-  #if !FOOTERS
+#if ! FOOTERS
     mstate m = (mstate)msp;
-  #else                                                          /* FOOTERS */
+#else /* FOOTERS */
     mstate m = get_mstate_for(oldp);
     if (!ok_magic(m)) {
-
       USAGE_ERROR_ACTION(m, oldmem);
       return 0;
-
     }
-
-  #endif                                                         /* FOOTERS */
+#endif /* FOOTERS */
     if (!PREACTION(m)) {
-
       mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1);
       POSTACTION(m);
       if (newp != 0) {
-
         check_inuse_chunk(m, newp);
         mem = chunk2mem(newp);
-
-      } else {
-
+      }
+      else {
         mem = mspace_malloc(m, bytes);
         if (mem != 0) {
-
           size_t oc = chunksize(oldp) - overhead_for(oldp);
-          __builtin_memcpy(mem, oldmem, (oc < bytes) ? oc : bytes);
+          __builtin_memcpy(mem, oldmem, (oc < bytes)? oc : bytes);
           mspace_free(m, oldmem);
-
         }
-
       }
-
     }
-
   }
-
   return mem;
-
 }
 
-void *mspace_realloc_in_place(mspace msp, void *oldmem, size_t bytes) {
-
-  void *mem = 0;
+void* mspace_realloc_in_place(mspace msp, void* oldmem, size_t bytes) {
+  void* mem = 0;
   if (oldmem != 0) {
-
     if (bytes >= MAX_REQUEST) {
-
       MALLOC_FAILURE_ACTION;
-
-    } else {
-
-      size_t    nb = request2size(bytes);
+    }
+    else {
+      size_t nb = request2size(bytes);
       mchunkptr oldp = mem2chunk(oldmem);
-  #if !FOOTERS
+#if ! FOOTERS
       mstate m = (mstate)msp;
-  #else                                                          /* FOOTERS */
+#else /* FOOTERS */
       mstate m = get_mstate_for(oldp);
-      (void)msp;                       /* placate people compiling -Wunused */
+      (void)msp; /* placate people compiling -Wunused */
       if (!ok_magic(m)) {
-
         USAGE_ERROR_ACTION(m, oldmem);
         return 0;
-
       }
-
-  #endif                                                         /* FOOTERS */
+#endif /* FOOTERS */
       if (!PREACTION(m)) {
-
         mchunkptr newp = try_realloc_chunk(m, oldp, nb, 0);
         POSTACTION(m);
         if (newp == oldp) {
-
           check_inuse_chunk(m, newp);
           mem = oldmem;
-
         }
-
       }
-
     }
-
   }
-
   return mem;
-
 }
 
-void *mspace_memalign(mspace msp, size_t alignment, size_t bytes) {
-
+void* mspace_memalign(mspace msp, size_t alignment, size_t bytes) {
   mstate ms = (mstate)msp;
   if (!ok_magic(ms)) {
-
-    USAGE_ERROR_ACTION(ms, ms);
+    USAGE_ERROR_ACTION(ms,ms);
     return 0;
-
   }
-
-  if (alignment <= MALLOC_ALIGNMENT) return mspace_malloc(msp, bytes);
+  if (alignment <= MALLOC_ALIGNMENT)
+    return mspace_malloc(msp, bytes);
   return internal_memalign(ms, alignment, bytes);
-
 }
 
-void **mspace_independent_calloc(mspace msp, size_t n_elements,
-                                 size_t elem_size, void *chunks[]) {
-
-  size_t sz = elem_size;                       /* serves as 1-element array */
+void** mspace_independent_calloc(mspace msp, size_t n_elements,
+                                 size_t elem_size, void* chunks[]) {
+  size_t sz = elem_size; /* serves as 1-element array */
   mstate ms = (mstate)msp;
   if (!ok_magic(ms)) {
-
-    USAGE_ERROR_ACTION(ms, ms);
+    USAGE_ERROR_ACTION(ms,ms);
     return 0;
-
   }
-
   return ialloc(ms, n_elements, &sz, 3, chunks);
-
 }
 
-void **mspace_independent_comalloc(mspace msp, size_t n_elements,
-                                   size_t sizes[], void *chunks[]) {
-
+void** mspace_independent_comalloc(mspace msp, size_t n_elements,
+                                   size_t sizes[], void* chunks[]) {
   mstate ms = (mstate)msp;
   if (!ok_magic(ms)) {
-
-    USAGE_ERROR_ACTION(ms, ms);
+    USAGE_ERROR_ACTION(ms,ms);
     return 0;
-
   }
-
   return ialloc(ms, n_elements, sizes, 0, chunks);
-
 }
 
-size_t mspace_bulk_free(mspace msp, void *array[], size_t nelem) {
-
+size_t mspace_bulk_free(mspace msp, void* array[], size_t nelem) {
   return internal_bulk_free((mstate)msp, array, nelem);
-
 }
 
-  #if MALLOC_INSPECT_ALL
+#if MALLOC_INSPECT_ALL
 void mspace_inspect_all(mspace msp,
-                        void (*handler)(void *start, void *end,
-                                        size_t used_bytes, void *callback_arg),
-                        void *arg) {
-
+                        void(*handler)(void *start,
+                                       void *end,
+                                       size_t used_bytes,
+                                       void* callback_arg),
+                        void* arg) {
   mstate ms = (mstate)msp;
   if (ok_magic(ms)) {
-
     if (!PREACTION(ms)) {
-
       internal_inspect_all(ms, handler, arg);
       POSTACTION(ms);
-
     }
-
-  } else {
-
-    USAGE_ERROR_ACTION(ms, ms);
-
   }
-
+  else {
+    USAGE_ERROR_ACTION(ms,ms);
+  }
 }
-
-  #endif                                              /* MALLOC_INSPECT_ALL */
+#endif /* MALLOC_INSPECT_ALL */
 
 int mspace_trim(mspace msp, size_t pad) {
-
-  int    result = 0;
+  int result = 0;
   mstate ms = (mstate)msp;
   if (ok_magic(ms)) {
-
     if (!PREACTION(ms)) {
-
       result = sys_trim(ms, pad);
       POSTACTION(ms);
-
     }
-
-  } else {
-
-    USAGE_ERROR_ACTION(ms, ms);
-
   }
-
+  else {
+    USAGE_ERROR_ACTION(ms,ms);
+  }
   return result;
-
 }
 
-  #if !NO_MALLOC_STATS
+#if !NO_MALLOC_STATS
 void mspace_malloc_stats(mspace msp) {
-
   mstate ms = (mstate)msp;
   if (ok_magic(ms)) {
-
     internal_malloc_stats(ms);
-
-  } else {
-
-    USAGE_ERROR_ACTION(ms, ms);
-
   }
-
+  else {
+    USAGE_ERROR_ACTION(ms,ms);
+  }
 }
-
-  #endif                                                 /* NO_MALLOC_STATS */
+#endif /* NO_MALLOC_STATS */
 
 size_t mspace_footprint(mspace msp) {
-
   size_t result = 0;
   mstate ms = (mstate)msp;
   if (ok_magic(ms)) {
-
     result = ms->footprint;
-
-  } else {
-
-    USAGE_ERROR_ACTION(ms, ms);
-
   }
-
+  else {
+    USAGE_ERROR_ACTION(ms,ms);
+  }
   return result;
-
 }
 
 size_t mspace_max_footprint(mspace msp) {
-
   size_t result = 0;
   mstate ms = (mstate)msp;
   if (ok_magic(ms)) {
-
     result = ms->max_footprint;
-
-  } else {
-
-    USAGE_ERROR_ACTION(ms, ms);
-
   }
-
+  else {
+    USAGE_ERROR_ACTION(ms,ms);
+  }
   return result;
-
 }
 
 size_t mspace_footprint_limit(mspace msp) {
-
   size_t result = 0;
   mstate ms = (mstate)msp;
   if (ok_magic(ms)) {
-
     size_t maf = ms->footprint_limit;
     result = (maf == 0) ? MAX_SIZE_T : maf;
-
-  } else {
-
-    USAGE_ERROR_ACTION(ms, ms);
-
   }
-
+  else {
+    USAGE_ERROR_ACTION(ms,ms);
+  }
   return result;
-
 }
 
 size_t mspace_set_footprint_limit(mspace msp, size_t bytes) {
-
   size_t result = 0;
   mstate ms = (mstate)msp;
   if (ok_magic(ms)) {
-
-    if (bytes == 0) result = granularity_align(1);      /* Use minimal size */
+    if (bytes == 0)
+      result = granularity_align(1); /* Use minimal size */
     if (bytes == MAX_SIZE_T)
-      result = 0;                                                /* disable */
+      result = 0;                    /* disable */
     else
       result = granularity_align(bytes);
     ms->footprint_limit = result;
-
-  } else {
-
-    USAGE_ERROR_ACTION(ms, ms);
-
   }
-
+  else {
+    USAGE_ERROR_ACTION(ms,ms);
+  }
   return result;
-
 }
 
-  #if !NO_MALLINFO
+#if !NO_MALLINFO
 struct mallinfo mspace_mallinfo(mspace msp) {
-
   mstate ms = (mstate)msp;
-  if (!ok_magic(ms)) { USAGE_ERROR_ACTION(ms, ms); }
+  if (!ok_magic(ms)) {
+    USAGE_ERROR_ACTION(ms,ms);
+  }
   return internal_mallinfo(ms);
-
 }
+#endif /* NO_MALLINFO */
 
-  #endif                                                     /* NO_MALLINFO */
-
-size_t mspace_usable_size(const void *mem) {
-
+size_t mspace_usable_size(const void* mem) {
   if (mem != 0) {
-
     mchunkptr p = mem2chunk(mem);
-    if (is_inuse(p)) return chunksize(p) - overhead_for(p);
-
+    if (is_inuse(p))
+      return chunksize(p) - overhead_for(p);
   }
-
   return 0;
-
 }
 
 int mspace_mallopt(int param_number, int value) {
-
   return change_mparam(param_number, value);
-
 }
 
-#endif                                                           /* MSPACES */
+#endif /* MSPACES */
+
 
 /* -------------------- Alternative MORECORE functions ------------------- */
 
@@ -7031,48 +6067,35 @@ int mspace_mallopt(int param_number, int value) {
 
   void *osMoreCore(int size)
   {
-
     void *ptr = 0;
     static void *sbrk_top = 0;
 
     if (size > 0)
     {
-
       if (size < MINIMUM_MORECORE_SIZE)
          size = MINIMUM_MORECORE_SIZE;
       if (CurrentExecutionLevel() == kTaskLevel)
          ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
       if (ptr == 0)
       {
-
         return (void *) MFAIL;
-
       }
-
       // save ptrs so they can be freed during cleanup
       our_os_pools[next_os_pool] = ptr;
       next_os_pool++;
       ptr = (void *) ((((size_t) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
       sbrk_top = (char *) ptr + size;
       return ptr;
-
     }
-
     else if (size < 0)
     {
-
       // we don't currently support shrink behavior
       return (void *) MFAIL;
-
     }
-
     else
     {
-
       return sbrk_top;
-
     }
-
   }
 
   // cleanup any allocated memory pools
@@ -7080,22 +6103,19 @@ int mspace_mallopt(int param_number, int value) {
 
   void osCleanupMem(void)
   {
-
     void **ptr;
 
     for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
       if (*ptr)
       {
-
          PoolDeallocate(*ptr);
          *ptr = 0;
-
       }
-
   }
 
 */
 
+
 /* -----------------------------------------------------------------------
 History:
     v2.8.6 Wed Aug 29 06:57:58 2012  Doug Lea
@@ -7315,3 +6335,4 @@ History:
 
 */
 
+#endif // __GLIBC__