about summary refs log tree commit diff
path: root/libdislocator/libdislocator.so.c
diff options
context:
space:
mode:
authorAndrea Fioraldi <andreafioraldi@gmail.com>2019-11-11 14:36:06 +0100
committerGitHub <noreply@github.com>2019-11-11 14:36:06 +0100
commit659db7e421b47da4b04110a141d9c20307f74ecc (patch)
tree18f9c38cc5270adcf445a62b974712cead4a01c4 /libdislocator/libdislocator.so.c
parentcd84339bccc104a51a5da614a9f82cc4ae615cce (diff)
parent01d55372441960c435af8f3bd6b61d1302042728 (diff)
downloadafl++-659db7e421b47da4b04110a141d9c20307f74ecc.tar.gz
Merge branch 'master' into radamsa
Diffstat (limited to 'libdislocator/libdislocator.so.c')
-rw-r--r--libdislocator/libdislocator.so.c110
1 files changed, 103 insertions, 7 deletions
diff --git a/libdislocator/libdislocator.so.c b/libdislocator/libdislocator.so.c
index 8834a1fc..0268cc52 100644
--- a/libdislocator/libdislocator.so.c
+++ b/libdislocator/libdislocator.so.c
@@ -3,7 +3,7 @@
    american fuzzy lop - dislocator, an abusive allocator
    -----------------------------------------------------
 
-   Written and maintained by Michal Zalewski <lcamtuf@google.com>
+   Written by Michal Zalewski
 
    Copyright 2016 Google Inc. All rights reserved.
 
@@ -14,7 +14,7 @@
      http://www.apache.org/licenses/LICENSE-2.0
 
    This is a companion library that can be used as a drop-in replacement
-   for the libc allocator in the fuzzed binaries. See README.dislocator for
+   for the libc allocator in the fuzzed binaries. See README.dislocator.md for
    more info.
 
  */
@@ -23,8 +23,13 @@
 #include <stdlib.h>
 #include <string.h>
 #include <limits.h>
+#include <errno.h>
 #include <sys/mman.h>
 
+#ifdef __APPLE__
+#include <mach/vm_statistics.h>
+#endif
+
 #include "config.h"
 #include "types.h"
 
@@ -36,6 +41,8 @@
 #define MAP_ANONYMOUS MAP_ANON
 #endif                                                    /* !MAP_ANONYMOUS */
 
+#define SUPER_PAGE_SIZE 1<<21
+
 /* Error / message handling: */
 
 #define DEBUGF(_x...)                 \
@@ -88,6 +95,10 @@ static u8  alloc_verbose,               /* Additional debug messages        */
     hard_fail,                          /* abort() when max_mem exceeded?   */
     no_calloc_over;                     /* abort() on calloc() overflows?   */
 
+#if	defined	__OpenBSD__ || defined __APPLE__
+#define __thread
+#warning no thread support available
+#endif
 static __thread size_t total_mem;       /* Currently allocated mem          */
 
 static __thread u32 call_depth;         /* To avoid recursion via fprintf() */
@@ -100,6 +111,8 @@ static __thread u32 call_depth;         /* To avoid recursion via fprintf() */
 static void* __dislocator_alloc(size_t len) {
 
   void* ret;
+  size_t tlen;
+  int flags, fd, sp;
 
   if (total_mem + len > max_mem || total_mem + len < total_mem) {
 
@@ -111,13 +124,44 @@ static void* __dislocator_alloc(size_t len) {
 
   }
 
+  tlen = (1 + PG_COUNT(len + 8)) * PAGE_SIZE;
+  flags = MAP_PRIVATE | MAP_ANONYMOUS;
+  fd = -1;
+#if defined(USEHUGEPAGE)
+  sp = (len >= SUPER_PAGE_SIZE && !(len % SUPER_PAGE_SIZE));
+
+#if defined(__APPLE__)
+  if (sp) fd = VM_FLAGS_SUPERPAGE_SIZE_2MB;
+#elif defined(__linux__)
+  if (sp) flags |= MAP_HUGETLB;
+#elif defined(__FreeBSD__)
+  if (sp) flags |= MAP_ALIGNED_SUPER;
+#endif
+#else
+  (void)sp;
+#endif
+
   /* We will also store buffer length and a canary below the actual buffer, so
      let's add 8 bytes for that. */
 
-  ret = mmap(NULL, (1 + PG_COUNT(len + 8)) * PAGE_SIZE, PROT_READ | PROT_WRITE,
-             MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+  ret = mmap(NULL, tlen, PROT_READ | PROT_WRITE,
+             flags, fd, 0);
+#if defined(USEHUGEPAGE)
+  /* We try one more time with regular call */
+  if (ret == MAP_FAILED) {
+#if defined(__APPLE__)
+  fd = -1;
+#elif defined(__linux__)
+  flags &= -MAP_HUGETLB;
+#elif defined(__FreeBSD__)
+  flags &= -MAP_ALIGNED_SUPER;
+#endif
+     ret = mmap(NULL, tlen, PROT_READ | PROT_WRITE,
+                flags, fd, 0);
+  }
+#endif
 
-  if (ret == (void*)-1) {
+  if (ret == MAP_FAILED) {
 
     if (hard_fail) FATAL("mmap() failed on alloc (OOM?)");
 
@@ -184,6 +228,10 @@ void* calloc(size_t elem_len, size_t elem_cnt) {
 
 }
 
+/* TODO: add a wrapper for posix_memalign, otherwise apps who use it,
+   will fail when freeing the memory.
+*/
+
 /* The wrapper for malloc(). Roughly the same, also clobbers the returned
    memory (unlike calloc(), malloc() is not guaranteed to return zeroed
    memory). */
@@ -256,13 +304,61 @@ void* realloc(void* ptr, size_t len) {
 
 }
 
+/* posix_memalign we mainly check the proper alignment argument
+   if the requested size fits within the alignment we do
+   a normal request */
+
+int posix_memalign(void** ptr, size_t align, size_t len) {
+   if (*ptr == NULL) 
+     return EINVAL;
+   if ((align % 2) || (align % sizeof(void *)))
+     return EINVAL;
+   if (len == 0) {
+     *ptr = NULL;
+     return 0;
+   }
+   if (align >= 4 * sizeof(size_t)) len += align -1;
+
+   *ptr = malloc(len);
+
+   DEBUGF("posix_memalign(%p %zu, %zu)", ptr, align, len);
+
+   return 0;
+}
+
+/* just the non-posix fashion */
+
+void *memalign(size_t align, size_t len) {
+   void* ret = NULL;
+
+   if (posix_memalign(&ret, align, len)) {
+     DEBUGF("memalign(%zu, %zu) failed", align, len);
+   }
+
+   return ret;
+}
+
+/* sort of C11 alias of memalign only more severe, alignment-wise */
+
+void *aligned_alloc(size_t align, size_t len) {
+   void *ret = NULL;
+
+   if ((len % align)) return NULL;
+
+   if (posix_memalign(&ret, align, len)) {
+     DEBUGF("aligned_alloc(%zu, %zu) failed", align, len);
+   }
+
+   return ret;
+}
+
 __attribute__((constructor)) void __dislocator_init(void) {
 
-  u8* tmp = getenv("AFL_LD_LIMIT_MB");
+  u8* tmp = (u8 *)getenv("AFL_LD_LIMIT_MB");
 
   if (tmp) {
 
-    max_mem = atoi(tmp) * 1024 * 1024;
+    max_mem = atoi((char *)tmp) * 1024 * 1024;
     if (!max_mem) FATAL("Bad value for AFL_LD_LIMIT_MB");
 
   }