about summary refs log tree commit diff
path: root/libdislocator/libdislocator.so.c
diff options
context:
space:
mode:
Diffstat (limited to 'libdislocator/libdislocator.so.c')
-rw-r--r--libdislocator/libdislocator.so.c47
1 files changed, 39 insertions, 8 deletions
diff --git a/libdislocator/libdislocator.so.c b/libdislocator/libdislocator.so.c
index 221a629b..480d2fc6 100644
--- a/libdislocator/libdislocator.so.c
+++ b/libdislocator/libdislocator.so.c
@@ -62,6 +62,8 @@
 #include "config.h"
 #include "types.h"
 
+#define ALLOC_ALIGN_SIZE (sizeof(void*))
+
 #ifndef PAGE_SIZE
 #define PAGE_SIZE 4096
 #endif                                                        /* !PAGE_SIZE */
@@ -114,6 +116,8 @@
 #define ALLOC_CANARY 0xAACCAACC
 #define ALLOC_CLOBBER 0xCC
 
+#define TAIL_ALLOC_CANARY 0xAC
+
 #define PTR_C(_p) (((u32*)(_p))[-1])
 #define PTR_L(_p) (((u32*)(_p))[-2])
 
@@ -122,7 +126,8 @@
 static u32 max_mem = MAX_ALLOC;         /* Max heap usage to permit         */
 static u8  alloc_verbose,               /* Additional debug messages        */
     hard_fail,                          /* abort() when max_mem exceeded?   */
-    no_calloc_over;                     /* abort() on calloc() overflows?   */
+    no_calloc_over,                     /* abort() on calloc() overflows?   */
+    align_allocations;                  /* Force alignment to sizeof(void*) */
 
 #if defined __OpenBSD__ || defined __APPLE__
 #define __thread
@@ -140,7 +145,7 @@ static u32          alloc_canary;
 
 static void* __dislocator_alloc(size_t len) {
 
-  void*  ret;
+  u8*    ret;
   size_t tlen;
   int    flags, fd, sp;
 
@@ -154,11 +159,17 @@ static void* __dislocator_alloc(size_t len) {
 
   }
 
-  tlen = (1 + PG_COUNT(len + 8)) * PAGE_SIZE;
+  size_t rlen;
+  if (align_allocations && (len & (ALLOC_ALIGN_SIZE - 1)))
+    rlen = (len & ~(ALLOC_ALIGN_SIZE - 1)) + ALLOC_ALIGN_SIZE;
+  else
+    rlen = len;
+
+  tlen = (1 + PG_COUNT(rlen + 8)) * PAGE_SIZE;
   flags = MAP_PRIVATE | MAP_ANONYMOUS;
   fd = -1;
 #if defined(USEHUGEPAGE)
-  sp = (len >= SUPER_PAGE_SIZE && !(len % SUPER_PAGE_SIZE));
+  sp = (rlen >= SUPER_PAGE_SIZE && !(rlen % SUPER_PAGE_SIZE));
 
 #if defined(__APPLE__)
   if (sp) fd = VM_FLAGS_SUPERPAGE_SIZE_2MB;
@@ -174,7 +185,7 @@ static void* __dislocator_alloc(size_t len) {
   /* We will also store buffer length and a canary below the actual buffer, so
      let's add 8 bytes for that. */
 
-  ret = mmap(NULL, tlen, PROT_READ | PROT_WRITE, flags, fd, 0);
+  ret = (u8*)mmap(NULL, tlen, PROT_READ | PROT_WRITE, flags, fd, 0);
 #if defined(USEHUGEPAGE)
   /* We try one more time with regular call */
   if (ret == MAP_FAILED) {
@@ -186,7 +197,7 @@ static void* __dislocator_alloc(size_t len) {
 #elif defined(__FreeBSD__)
     flags &= -MAP_ALIGNED_SUPER;
 #endif
-    ret = mmap(NULL, tlen, PROT_READ | PROT_WRITE, flags, fd, 0);
+    ret = (u8*)mmap(NULL, tlen, PROT_READ | PROT_WRITE, flags, fd, 0);
 
   }
 
@@ -204,13 +215,13 @@ static void* __dislocator_alloc(size_t len) {
 
   /* Set PROT_NONE on the last page. */
 
-  if (mprotect(ret + PG_COUNT(len + 8) * PAGE_SIZE, PAGE_SIZE, PROT_NONE))
+  if (mprotect(ret + PG_COUNT(rlen + 8) * PAGE_SIZE, PAGE_SIZE, PROT_NONE))
     FATAL("mprotect() failed when allocating memory");
 
   /* Offset the return pointer so that it's right-aligned to the page
      boundary. */
 
-  ret += PAGE_SIZE * PG_COUNT(len + 8) - len - 8;
+  ret += PAGE_SIZE * PG_COUNT(rlen + 8) - rlen - 8;
 
   /* Store allocation metadata. */
 
@@ -221,6 +232,14 @@ static void* __dislocator_alloc(size_t len) {
 
   total_mem += len;
 
+  if (rlen != len) {
+
+    size_t i;
+    for (i = len; i < rlen; ++i)
+      ret[i] = TAIL_ALLOC_CANARY;
+
+  }
+
   return ret;
 
 }
@@ -299,6 +318,16 @@ void free(void* ptr) {
 
   total_mem -= len;
 
+  if (align_allocations && (len & (ALLOC_ALIGN_SIZE - 1))) {
+
+    u8*    ptr_ = ptr;
+    size_t rlen = (len & ~(ALLOC_ALIGN_SIZE - 1)) + ALLOC_ALIGN_SIZE;
+    for (; len < rlen; ++len)
+      if (ptr_[len] != TAIL_ALLOC_CANARY)
+        FATAL("bad tail allocator canary on free()");
+
+  }
+
   /* Protect everything. Note that the extra page at the end is already
      set as PROT_NONE, so we don't need to touch that. */
 
@@ -323,6 +352,7 @@ void* realloc(void* ptr, size_t len) {
   if (ret && ptr) {
 
     if (PTR_C(ptr) != alloc_canary) FATAL("bad allocator canary on realloc()");
+    // Here the tail canary check is delayed to free()
 
     memcpy(ret, ptr, MIN(len, PTR_L(ptr)));
     free(ptr);
@@ -441,6 +471,7 @@ __attribute__((constructor)) void __dislocator_init(void) {
   alloc_verbose = !!getenv("AFL_LD_VERBOSE");
   hard_fail = !!getenv("AFL_LD_HARD_FAIL");
   no_calloc_over = !!getenv("AFL_LD_NO_CALLOC_OVER");
+  align_allocations = !!getenv("AFL_ALIGNED_ALLOC");
 
 }