From a8e568f248628c39e0bc34173470988034723627 Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Wed, 9 Dec 2020 11:30:04 +0100 Subject: move libdislocator, libtokencap and qbdi_mode to utils/ --- utils/libdislocator/libdislocator.so.c | 544 +++++++++++++++++++++++++++++++++ 1 file changed, 544 insertions(+) create mode 100644 utils/libdislocator/libdislocator.so.c (limited to 'utils/libdislocator/libdislocator.so.c') diff --git a/utils/libdislocator/libdislocator.so.c b/utils/libdislocator/libdislocator.so.c new file mode 100644 index 00000000..2324e390 --- /dev/null +++ b/utils/libdislocator/libdislocator.so.c @@ -0,0 +1,544 @@ +/* + + american fuzzy lop++ - dislocator, an abusive allocator + ----------------------------------------------------- + + Originally written by Michal Zalewski + + Copyright 2016 Google Inc. All rights reserved. + Copyright 2019-2020 AFLplusplus Project. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at: + + http://www.apache.org/licenses/LICENSE-2.0 + + This is a companion library that can be used as a drop-in replacement + for the libc allocator in the fuzzed binaries. See README.dislocator.md for + more info. + + */ + +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include + +#ifdef __APPLE__ + #include +#endif + +#ifdef __FreeBSD__ + #include +#endif + +#if (defined(__linux__) && !defined(__ANDROID__)) || defined(__HAIKU__) + #include + #ifdef __linux__ + #include + #include + #endif + #ifdef __NR_getrandom + #define arc4random_buf(p, l) \ + do { \ + \ + ssize_t rd = syscall(__NR_getrandom, p, l, 0); \ + if (rd != l) DEBUGF("getrandom failed"); \ + \ + } while (0) + + #else + #include + #define arc4random_buf(p, l) \ + do { \ + \ + srand(time(NULL)); \ + u32 i; \ + u8 *ptr = (u8 *)p; \ + for (i = 0; i < l; i++) \ + ptr[i] = rand() % INT_MAX; \ + \ + } while (0) + + #endif +#endif + +#include "config.h" +#include "types.h" + +#if __STDC_VERSION__ < 201112L || \ + (defined(__FreeBSD__) && __FreeBSD_version < 1200000) +// use this hack if not C11 +typedef struct { + + long long __ll; + long double __ld; + +} max_align_t; + +#endif + +#define ALLOC_ALIGN_SIZE (_Alignof(max_align_t)) + +#ifndef PAGE_SIZE + #define PAGE_SIZE 4096 +#endif /* !PAGE_SIZE */ + +#ifndef MAP_ANONYMOUS + #define MAP_ANONYMOUS MAP_ANON +#endif /* !MAP_ANONYMOUS */ + +#define SUPER_PAGE_SIZE 1 << 21 + +/* Error / message handling: */ + +#define DEBUGF(_x...) \ + do { \ + \ + if (alloc_verbose) { \ + \ + if (++call_depth == 1) { \ + \ + fprintf(stderr, "[AFL] " _x); \ + fprintf(stderr, "\n"); \ + \ + } \ + call_depth--; \ + \ + } \ + \ + } while (0) + +#define FATAL(_x...) \ + do { \ + \ + if (++call_depth == 1) { \ + \ + fprintf(stderr, "*** [AFL] " _x); \ + fprintf(stderr, " ***\n"); \ + abort(); \ + \ + } \ + call_depth--; \ + \ + } while (0) + +/* Macro to count the number of pages needed to store a buffer: */ + +#define PG_COUNT(_l) (((_l) + (PAGE_SIZE - 1)) / PAGE_SIZE) + +/* Canary & clobber bytes: */ + +#define ALLOC_CANARY 0xAACCAACC +#define ALLOC_CLOBBER 0xCC + +#define TAIL_ALLOC_CANARY 0xAC + +#define PTR_C(_p) (((u32 *)(_p))[-1]) +#define PTR_L(_p) (((u32 *)(_p))[-2]) + +/* Configurable stuff (use AFL_LD_* to set): */ + +static u32 max_mem = MAX_ALLOC; /* Max heap usage to permit */ +static u8 alloc_verbose, /* Additional debug messages */ + hard_fail, /* abort() when max_mem exceeded? */ + no_calloc_over, /* abort() on calloc() overflows? */ + align_allocations; /* Force alignment to sizeof(void*) */ + +#if defined __OpenBSD__ || defined __APPLE__ + #define __thread + #warning no thread support available +#endif +static __thread size_t total_mem; /* Currently allocated mem */ + +static __thread u32 call_depth; /* To avoid recursion via fprintf() */ +static u32 alloc_canary; + +/* This is the main alloc function. It allocates one page more than necessary, + sets that tailing page to PROT_NONE, and then increments the return address + so that it is right-aligned to that boundary. Since it always uses mmap(), + the returned memory will be zeroed. */ + +static void *__dislocator_alloc(size_t len) { + + u8 * ret, *base; + size_t tlen; + int flags, fd, sp; + + if (total_mem + len > max_mem || total_mem + len < total_mem) { + + if (hard_fail) FATAL("total allocs exceed %u MB", max_mem / 1024 / 1024); + + DEBUGF("total allocs exceed %u MB, returning NULL", max_mem / 1024 / 1024); + + return NULL; + + } + + size_t rlen; + if (align_allocations && (len & (ALLOC_ALIGN_SIZE - 1))) + rlen = (len & ~(ALLOC_ALIGN_SIZE - 1)) + ALLOC_ALIGN_SIZE; + else + rlen = len; + + /* We will also store buffer length and a canary below the actual buffer, so + let's add 8 bytes for that. */ + + base = NULL; + tlen = (1 + PG_COUNT(rlen + 8)) * PAGE_SIZE; + flags = MAP_PRIVATE | MAP_ANONYMOUS; + fd = -1; +#if defined(USEHUGEPAGE) + sp = (rlen >= SUPER_PAGE_SIZE && !(rlen % SUPER_PAGE_SIZE)); + + #if defined(__APPLE__) + if (sp) fd = VM_FLAGS_SUPERPAGE_SIZE_2MB; + #elif defined(__linux__) + if (sp) flags |= MAP_HUGETLB; + #elif defined(__FreeBSD__) + if (sp) flags |= MAP_ALIGNED_SUPER; + #elif defined(__sun) + if (sp) { + + base = (void *)(caddr_t)(1 << 21); + flags |= MAP_ALIGN; + + } + + #endif +#else + (void)sp; +#endif + + ret = (u8 *)mmap(base, tlen, PROT_READ | PROT_WRITE, flags, fd, 0); +#if defined(USEHUGEPAGE) + /* We try one more time with regular call */ + if (ret == MAP_FAILED) { + + #if defined(__APPLE__) + fd = -1; + #elif defined(__linux__) + flags &= -MAP_HUGETLB; + #elif defined(__FreeBSD__) + flags &= -MAP_ALIGNED_SUPER; + #elif defined(__sun) + flags &= -MAP_ALIGN; + #endif + ret = (u8 *)mmap(NULL, tlen, PROT_READ | PROT_WRITE, flags, fd, 0); + + } + +#endif + + if (ret == MAP_FAILED) { + + if (hard_fail) FATAL("mmap() failed on alloc (OOM?)"); + + DEBUGF("mmap() failed on alloc (OOM?)"); + + return NULL; + + } + + /* Set PROT_NONE on the last page. */ + + if (mprotect(ret + PG_COUNT(rlen + 8) * PAGE_SIZE, PAGE_SIZE, PROT_NONE)) + FATAL("mprotect() failed when allocating memory"); + + /* Offset the return pointer so that it's right-aligned to the page + boundary. */ + + ret += PAGE_SIZE * PG_COUNT(rlen + 8) - rlen - 8; + + /* Store allocation metadata. */ + + ret += 8; + + PTR_L(ret) = len; + PTR_C(ret) = alloc_canary; + + total_mem += len; + + if (rlen != len) { + + size_t i; + for (i = len; i < rlen; ++i) + ret[i] = TAIL_ALLOC_CANARY; + + } + + return ret; + +} + +/* The "user-facing" wrapper for calloc(). This just checks for overflows and + displays debug messages if requested. */ + +void *calloc(size_t elem_len, size_t elem_cnt) { + + void *ret; + + size_t len = elem_len * elem_cnt; + + /* Perform some sanity checks to detect obvious issues... */ + + if (elem_cnt && len / elem_cnt != elem_len) { + + if (no_calloc_over) { + + DEBUGF("calloc(%zu, %zu) would overflow, returning NULL", elem_len, + elem_cnt); + return NULL; + + } + + FATAL("calloc(%zu, %zu) would overflow", elem_len, elem_cnt); + + } + + ret = __dislocator_alloc(len); + + DEBUGF("calloc(%zu, %zu) = %p [%zu total]", elem_len, elem_cnt, ret, + total_mem); + + return ret; + +} + +/* The wrapper for malloc(). Roughly the same, also clobbers the returned + memory (unlike calloc(), malloc() is not guaranteed to return zeroed + memory). */ + +void *malloc(size_t len) { + + void *ret; + + ret = __dislocator_alloc(len); + + DEBUGF("malloc(%zu) = %p [%zu total]", len, ret, total_mem); + + if (ret && len) memset(ret, ALLOC_CLOBBER, len); + + return ret; + +} + +/* The wrapper for free(). This simply marks the entire region as PROT_NONE. + If the region is already freed, the code will segfault during the attempt to + read the canary. Not very graceful, but works, right? */ + +void free(void *ptr) { + + u32 len; + + DEBUGF("free(%p)", ptr); + + if (!ptr) return; + + if (PTR_C(ptr) != alloc_canary) FATAL("bad allocator canary on free()"); + + len = PTR_L(ptr); + + total_mem -= len; + + if (align_allocations && (len & (ALLOC_ALIGN_SIZE - 1))) { + + u8 * ptr_ = ptr; + size_t rlen = (len & ~(ALLOC_ALIGN_SIZE - 1)) + ALLOC_ALIGN_SIZE; + for (; len < rlen; ++len) + if (ptr_[len] != TAIL_ALLOC_CANARY) + FATAL("bad tail allocator canary on free()"); + + } + + /* Protect everything. Note that the extra page at the end is already + set as PROT_NONE, so we don't need to touch that. */ + + ptr -= PAGE_SIZE * PG_COUNT(len + 8) - len - 8; + + if (mprotect(ptr - 8, PG_COUNT(len + 8) * PAGE_SIZE, PROT_NONE)) + FATAL("mprotect() failed when freeing memory"); + + /* Keep the mapping; this is wasteful, but prevents ptr reuse. */ + +} + +/* Realloc is pretty straightforward, too. We forcibly reallocate the buffer, + move data, and then free (aka mprotect()) the original one. */ + +void *realloc(void *ptr, size_t len) { + + void *ret; + + ret = malloc(len); + + if (ret && ptr) { + + if (PTR_C(ptr) != alloc_canary) FATAL("bad allocator canary on realloc()"); + // Here the tail canary check is delayed to free() + + memcpy(ret, ptr, MIN(len, PTR_L(ptr))); + free(ptr); + + } + + DEBUGF("realloc(%p, %zu) = %p [%zu total]", ptr, len, ret, total_mem); + + return ret; + +} + +/* posix_memalign we mainly check the proper alignment argument + if the requested size fits within the alignment we do + a normal request */ + +int posix_memalign(void **ptr, size_t align, size_t len) { + + // if (*ptr == NULL) return EINVAL; // (andrea) Why? I comment it out for now + if ((align % 2) || (align % sizeof(void *))) return EINVAL; + if (len == 0) { + + *ptr = NULL; + return 0; + + } + + size_t rem = len % align; + if (rem) len += align - rem; + + *ptr = __dislocator_alloc(len); + + if (*ptr && len) memset(*ptr, ALLOC_CLOBBER, len); + + DEBUGF("posix_memalign(%p %zu, %zu) [*ptr = %p]", ptr, align, len, *ptr); + + return 0; + +} + +/* just the non-posix fashion */ + +void *memalign(size_t align, size_t len) { + + void *ret = NULL; + + if (posix_memalign(&ret, align, len)) { + + DEBUGF("memalign(%zu, %zu) failed", align, len); + + } + + return ret; + +} + +/* sort of C11 alias of memalign only more severe, alignment-wise */ + +void *aligned_alloc(size_t align, size_t len) { + + void *ret = NULL; + + if ((len % align)) return NULL; + + if (posix_memalign(&ret, align, len)) { + + DEBUGF("aligned_alloc(%zu, %zu) failed", align, len); + + } + + return ret; + +} + +/* specific BSD api mainly checking possible overflow for the size */ + +void *reallocarray(void *ptr, size_t elem_len, size_t elem_cnt) { + + const size_t elem_lim = 1UL << (sizeof(size_t) * 4); + const size_t elem_tot = elem_len * elem_cnt; + void * ret = NULL; + + if ((elem_len >= elem_lim || elem_cnt >= elem_lim) && elem_len > 0 && + elem_cnt > (SIZE_MAX / elem_len)) { + + DEBUGF("reallocarray size overflow (%zu)", elem_tot); + + } else { + + ret = realloc(ptr, elem_tot); + + } + + return ret; + +} + +#if !defined(__ANDROID__) +size_t malloc_usable_size(void *ptr) { + +#else +size_t malloc_usable_size(const void *ptr) { + +#endif + + return ptr ? PTR_L(ptr) : 0; + +} + +__attribute__((constructor)) void __dislocator_init(void) { + + u8 *tmp = (u8 *)getenv("AFL_LD_LIMIT_MB"); + + if (tmp) { + + u8 *tok; + s32 mmem = (s32)strtol((char *)tmp, (char **)&tok, 10); + if (*tok != '\0' || errno == ERANGE) FATAL("Bad value for AFL_LD_LIMIT_MB"); + max_mem = mmem * 1024 * 1024; + + } + + alloc_canary = ALLOC_CANARY; + tmp = (u8 *)getenv("AFL_RANDOM_ALLOC_CANARY"); + + if (tmp) arc4random_buf(&alloc_canary, sizeof(alloc_canary)); + + alloc_verbose = !!getenv("AFL_LD_VERBOSE"); + hard_fail = !!getenv("AFL_LD_HARD_FAIL"); + no_calloc_over = !!getenv("AFL_LD_NO_CALLOC_OVER"); + align_allocations = !!getenv("AFL_ALIGNED_ALLOC"); + +} + +/* NetBSD fault handler specific api subset */ + +void (*esetfunc(void (*fn)(int, const char *, ...)))(int, const char *, ...) { + + /* Might not be meaningful to implement; upper calls already report errors */ + return NULL; + +} + +void *emalloc(size_t len) { + + return malloc(len); + +} + +void *ecalloc(size_t elem_len, size_t elem_cnt) { + + return calloc(elem_len, elem_cnt); + +} + +void *erealloc(void *ptr, size_t len) { + + return realloc(ptr, len); + +} + -- cgit 1.4.1 From 935724557f1c9ee59141c7fc4b27c3831ad52435 Mon Sep 17 00:00:00 2001 From: David CARLIER Date: Thu, 31 Dec 2020 11:35:32 +0000 Subject: libdislocator, ptr algo warning fix on clang/Xcode (m1). --- utils/libdislocator/libdislocator.so.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'utils/libdislocator/libdislocator.so.c') diff --git a/utils/libdislocator/libdislocator.so.c b/utils/libdislocator/libdislocator.so.c index 2324e390..c2b200cb 100644 --- a/utils/libdislocator/libdislocator.so.c +++ b/utils/libdislocator/libdislocator.so.c @@ -345,10 +345,10 @@ void free(void *ptr) { len = PTR_L(ptr); total_mem -= len; + u8 * ptr_ = ptr; if (align_allocations && (len & (ALLOC_ALIGN_SIZE - 1))) { - u8 * ptr_ = ptr; size_t rlen = (len & ~(ALLOC_ALIGN_SIZE - 1)) + ALLOC_ALIGN_SIZE; for (; len < rlen; ++len) if (ptr_[len] != TAIL_ALLOC_CANARY) @@ -359,11 +359,13 @@ void free(void *ptr) { /* Protect everything. Note that the extra page at the end is already set as PROT_NONE, so we don't need to touch that. */ - ptr -= PAGE_SIZE * PG_COUNT(len + 8) - len - 8; + ptr_ -= PAGE_SIZE * PG_COUNT(len + 8) - len - 8; - if (mprotect(ptr - 8, PG_COUNT(len + 8) * PAGE_SIZE, PROT_NONE)) + if (mprotect(ptr_ - 8, PG_COUNT(len + 8) * PAGE_SIZE, PROT_NONE)) FATAL("mprotect() failed when freeing memory"); + ptr = ptr_; + /* Keep the mapping; this is wasteful, but prevents ptr reuse. */ } -- cgit 1.4.1 From 880513651d271db538eeb3f04d046cf9ce6021f8 Mon Sep 17 00:00:00 2001 From: van Hauser Date: Sun, 3 Jan 2021 10:13:55 +0100 Subject: cmplog cleanup --- include/afl-fuzz.h | 4 ++-- src/afl-fuzz-redqueen.c | 40 +++------------------------------- utils/libdislocator/libdislocator.so.c | 2 +- 3 files changed, 6 insertions(+), 40 deletions(-) (limited to 'utils/libdislocator/libdislocator.so.c') diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h index a99e4991..d6a322cc 100644 --- a/include/afl-fuzz.h +++ b/include/afl-fuzz.h @@ -590,9 +590,9 @@ typedef struct afl_state { u32 rand_cnt; /* Random number counter */ -/* unsigned long rand_seed[3]; would also work */ + /* unsigned long rand_seed[3]; would also work */ AFL_RAND_RETURN rand_seed[3]; - s64 init_seed; + s64 init_seed; u64 total_cal_us, /* Total calibration time (us) */ total_cal_cycles; /* Total calibration cycles */ diff --git a/src/afl-fuzz-redqueen.c b/src/afl-fuzz-redqueen.c index df215e9c..bbd47ab2 100644 --- a/src/afl-fuzz-redqueen.c +++ b/src/afl-fuzz-redqueen.c @@ -99,12 +99,12 @@ static u8 get_exec_checksum(afl_state_t *afl, u8 *buf, u32 len, u64 *cksum) { } -static void rand_replace(afl_state_t *afl, u8 *buf, u32 len) { +static void xor_replace(u8 *buf, u32 len) { u32 i; for (i = 0; i < len; ++i) { - buf[i] = rand_below(afl, 256); + buf[i] ^= 0xff; } @@ -115,8 +115,6 @@ static u8 colorization(afl_state_t *afl, u8 *buf, u32 len, u64 exec_cksum) { struct range *ranges = add_range(NULL, 0, len); u8 * backup = ck_alloc_nozero(len); - u8 needs_write = 0; - u64 orig_hit_cnt, new_hit_cnt; orig_hit_cnt = afl->queued_paths + afl->unique_crashes; @@ -136,7 +134,7 @@ static u8 colorization(afl_state_t *afl, u8 *buf, u32 len, u64 exec_cksum) { /* Range not empty */ memcpy(backup, buf + rng->start, s); - rand_replace(afl, buf + rng->start, s); + xor_replace(buf + rng->start, s); u64 cksum; u64 start_us = get_cur_time_us(); @@ -158,10 +156,6 @@ static u8 colorization(afl_state_t *afl, u8 *buf, u32 len, u64 exec_cksum) { ranges = add_range(ranges, rng->start + s / 2 + 1, rng->end); memcpy(buf + rng->start, backup, s); - } else { - - needs_write = 1; - } } @@ -191,32 +185,6 @@ static u8 colorization(afl_state_t *afl, u8 *buf, u32 len, u64 exec_cksum) { } - // save the input with the high entropy - - if (needs_write) { - - s32 fd; - - if (afl->no_unlink) { - - fd = open(afl->queue_cur->fname, O_WRONLY | O_CREAT | O_TRUNC, 0600); - - } else { - - unlink(afl->queue_cur->fname); /* ignore errors */ - fd = open(afl->queue_cur->fname, O_WRONLY | O_CREAT | O_EXCL, 0600); - - } - - if (fd < 0) { PFATAL("Unable to create '%s'", afl->queue_cur->fname); } - - ck_write(fd, buf, len, afl->queue_cur->fname); - afl->queue_cur->len = len; // no-op, just to be 100% safe - - close(fd); - - } - return 0; checksum_fail: @@ -232,8 +200,6 @@ checksum_fail: } - // TODO: clang notices a _potential_ leak of mem pointed to by rng - return 1; } diff --git a/utils/libdislocator/libdislocator.so.c b/utils/libdislocator/libdislocator.so.c index c2b200cb..c041fec6 100644 --- a/utils/libdislocator/libdislocator.so.c +++ b/utils/libdislocator/libdislocator.so.c @@ -345,7 +345,7 @@ void free(void *ptr) { len = PTR_L(ptr); total_mem -= len; - u8 * ptr_ = ptr; + u8 *ptr_ = ptr; if (align_allocations && (len & (ALLOC_ALIGN_SIZE - 1))) { -- cgit 1.4.1 From ffd8fae22ae51b6eda9bad6c4ee7173daba427b8 Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Wed, 10 Mar 2021 22:44:43 +0100 Subject: PFATAL for libdislocator --- utils/libdislocator/libdislocator.so.c | 41 ++++------------------------------ 1 file changed, 4 insertions(+), 37 deletions(-) (limited to 'utils/libdislocator/libdislocator.so.c') diff --git a/utils/libdislocator/libdislocator.so.c b/utils/libdislocator/libdislocator.so.c index c041fec6..b5f07c04 100644 --- a/utils/libdislocator/libdislocator.so.c +++ b/utils/libdislocator/libdislocator.so.c @@ -28,6 +28,7 @@ #include #include #include +#include "debug.h" #ifdef __APPLE__ #include @@ -95,39 +96,6 @@ typedef struct { #define SUPER_PAGE_SIZE 1 << 21 -/* Error / message handling: */ - -#define DEBUGF(_x...) \ - do { \ - \ - if (alloc_verbose) { \ - \ - if (++call_depth == 1) { \ - \ - fprintf(stderr, "[AFL] " _x); \ - fprintf(stderr, "\n"); \ - \ - } \ - call_depth--; \ - \ - } \ - \ - } while (0) - -#define FATAL(_x...) \ - do { \ - \ - if (++call_depth == 1) { \ - \ - fprintf(stderr, "*** [AFL] " _x); \ - fprintf(stderr, " ***\n"); \ - abort(); \ - \ - } \ - call_depth--; \ - \ - } while (0) - /* Macro to count the number of pages needed to store a buffer: */ #define PG_COUNT(_l) (((_l) + (PAGE_SIZE - 1)) / PAGE_SIZE) @@ -156,7 +124,6 @@ static u8 alloc_verbose, /* Additional debug messages */ #endif static __thread size_t total_mem; /* Currently allocated mem */ -static __thread u32 call_depth; /* To avoid recursion via fprintf() */ static u32 alloc_canary; /* This is the main alloc function. It allocates one page more than necessary, @@ -237,7 +204,7 @@ static void *__dislocator_alloc(size_t len) { if (ret == MAP_FAILED) { - if (hard_fail) FATAL("mmap() failed on alloc (OOM?)"); + if (hard_fail) PFATAL("mmap() failed on alloc (OOM?)"); DEBUGF("mmap() failed on alloc (OOM?)"); @@ -248,7 +215,7 @@ static void *__dislocator_alloc(size_t len) { /* Set PROT_NONE on the last page. */ if (mprotect(ret + PG_COUNT(rlen + 8) * PAGE_SIZE, PAGE_SIZE, PROT_NONE)) - FATAL("mprotect() failed when allocating memory"); + PFATAL("mprotect() failed when allocating memory"); /* Offset the return pointer so that it's right-aligned to the page boundary. */ @@ -362,7 +329,7 @@ void free(void *ptr) { ptr_ -= PAGE_SIZE * PG_COUNT(len + 8) - len - 8; if (mprotect(ptr_ - 8, PG_COUNT(len + 8) * PAGE_SIZE, PROT_NONE)) - FATAL("mprotect() failed when freeing memory"); + PFATAL("mprotect() failed when freeing memory"); ptr = ptr_; -- cgit 1.4.1 From 966eba50a6892a0c218cdc462ef4dec9dcd01863 Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Wed, 10 Mar 2021 22:51:05 +0100 Subject: Revert "PFATAL for libdislocator" This reverts commit ffd8fae22ae51b6eda9bad6c4ee7173daba427b8. --- utils/libdislocator/libdislocator.so.c | 41 ++++++++++++++++++++++++++++++---- 1 file changed, 37 insertions(+), 4 deletions(-) (limited to 'utils/libdislocator/libdislocator.so.c') diff --git a/utils/libdislocator/libdislocator.so.c b/utils/libdislocator/libdislocator.so.c index b5f07c04..c041fec6 100644 --- a/utils/libdislocator/libdislocator.so.c +++ b/utils/libdislocator/libdislocator.so.c @@ -28,7 +28,6 @@ #include #include #include -#include "debug.h" #ifdef __APPLE__ #include @@ -96,6 +95,39 @@ typedef struct { #define SUPER_PAGE_SIZE 1 << 21 +/* Error / message handling: */ + +#define DEBUGF(_x...) \ + do { \ + \ + if (alloc_verbose) { \ + \ + if (++call_depth == 1) { \ + \ + fprintf(stderr, "[AFL] " _x); \ + fprintf(stderr, "\n"); \ + \ + } \ + call_depth--; \ + \ + } \ + \ + } while (0) + +#define FATAL(_x...) \ + do { \ + \ + if (++call_depth == 1) { \ + \ + fprintf(stderr, "*** [AFL] " _x); \ + fprintf(stderr, " ***\n"); \ + abort(); \ + \ + } \ + call_depth--; \ + \ + } while (0) + /* Macro to count the number of pages needed to store a buffer: */ #define PG_COUNT(_l) (((_l) + (PAGE_SIZE - 1)) / PAGE_SIZE) @@ -124,6 +156,7 @@ static u8 alloc_verbose, /* Additional debug messages */ #endif static __thread size_t total_mem; /* Currently allocated mem */ +static __thread u32 call_depth; /* To avoid recursion via fprintf() */ static u32 alloc_canary; /* This is the main alloc function. It allocates one page more than necessary, @@ -204,7 +237,7 @@ static void *__dislocator_alloc(size_t len) { if (ret == MAP_FAILED) { - if (hard_fail) PFATAL("mmap() failed on alloc (OOM?)"); + if (hard_fail) FATAL("mmap() failed on alloc (OOM?)"); DEBUGF("mmap() failed on alloc (OOM?)"); @@ -215,7 +248,7 @@ static void *__dislocator_alloc(size_t len) { /* Set PROT_NONE on the last page. */ if (mprotect(ret + PG_COUNT(rlen + 8) * PAGE_SIZE, PAGE_SIZE, PROT_NONE)) - PFATAL("mprotect() failed when allocating memory"); + FATAL("mprotect() failed when allocating memory"); /* Offset the return pointer so that it's right-aligned to the page boundary. */ @@ -329,7 +362,7 @@ void free(void *ptr) { ptr_ -= PAGE_SIZE * PG_COUNT(len + 8) - len - 8; if (mprotect(ptr_ - 8, PG_COUNT(len + 8) * PAGE_SIZE, PROT_NONE)) - PFATAL("mprotect() failed when freeing memory"); + FATAL("mprotect() failed when freeing memory"); ptr = ptr_; -- cgit 1.4.1