aboutsummaryrefslogtreecommitdiff
path: root/libdislocator/libdislocator.so.c
diff options
context:
space:
mode:
authorAndrea Fioraldi <andreafioraldi@gmail.com>2019-09-03 11:12:49 +0200
committerGitHub <noreply@github.com>2019-09-03 11:12:49 +0200
commitf3617bd83bcf4de3b10866faca4b83f566ee0e8f (patch)
tree6308bf840cdf24af50fdef4c216d6c9433cd021b /libdislocator/libdislocator.so.c
parent3bfd88aabbf3fdf70cb053aa25944f32d2113d8f (diff)
parentd47ef88fcd842bd13923b1b519544fa2c8d6d0eb (diff)
downloadafl++-f3617bd83bcf4de3b10866faca4b83f566ee0e8f.tar.gz
Merge pull request #53 from vanhauser-thc/code-cleanup
Code cleanup
Diffstat (limited to 'libdislocator/libdislocator.so.c')
-rw-r--r--libdislocator/libdislocator.so.c67
1 files changed, 37 insertions, 30 deletions
diff --git a/libdislocator/libdislocator.so.c b/libdislocator/libdislocator.so.c
index 043480a6..5104fed4 100644
--- a/libdislocator/libdislocator.so.c
+++ b/libdislocator/libdislocator.so.c
@@ -25,8 +25,8 @@
#include <limits.h>
#include <sys/mman.h>
-#include "../config.h"
-#include "../types.h"
+#include "config.h"
+#include "types.h"
#ifndef PAGE_SIZE
# define PAGE_SIZE 4096
@@ -38,23 +38,35 @@
/* Error / message handling: */
-#define DEBUGF(_x...) do { \
- if (alloc_verbose) { \
- if (++call_depth == 1) { \
+#define DEBUGF(_x...) \
+ do { \
+ \
+ if (alloc_verbose) { \
+ \
+ if (++call_depth == 1) { \
+ \
fprintf(stderr, "[AFL] " _x); \
- fprintf(stderr, "\n"); \
- } \
- call_depth--; \
- } \
+ fprintf(stderr, "\n"); \
+ \
+ } \
+ call_depth--; \
+ \
+ } \
+ \
} while (0)
-#define FATAL(_x...) do { \
- if (++call_depth == 1) { \
+#define FATAL(_x...) \
+ do { \
+ \
+ if (++call_depth == 1) { \
+ \
fprintf(stderr, "*** [AFL] " _x); \
- fprintf(stderr, " ***\n"); \
- abort(); \
- } \
- call_depth--; \
+ fprintf(stderr, " ***\n"); \
+ abort(); \
+ \
+ } \
+ call_depth--; \
+ \
} while (0)
/* Macro to count the number of pages needed to store a buffer: */
@@ -63,7 +75,7 @@
/* Canary & clobber bytes: */
-#define ALLOC_CANARY 0xAACCAACC
+#define ALLOC_CANARY 0xAACCAACC
#define ALLOC_CLOBBER 0xCC
#define PTR_C(_p) (((u32*)(_p))[-1])
@@ -73,14 +85,13 @@
static u32 max_mem = MAX_ALLOC; /* Max heap usage to permit */
static u8 alloc_verbose, /* Additional debug messages */
- hard_fail, /* abort() when max_mem exceeded? */
- no_calloc_over; /* abort() on calloc() overflows? */
+ hard_fail, /* abort() when max_mem exceeded? */
+ no_calloc_over; /* abort() on calloc() overflows? */
static __thread size_t total_mem; /* Currently allocated mem */
static __thread u32 call_depth; /* To avoid recursion via fprintf() */
-
/* This is the main alloc function. It allocates one page more than necessary,
sets that tailing page to PROT_NONE, and then increments the return address
so that it is right-aligned to that boundary. Since it always uses mmap(),
@@ -90,14 +101,11 @@ static void* __dislocator_alloc(size_t len) {
void* ret;
-
if (total_mem + len > max_mem || total_mem + len < total_mem) {
- if (hard_fail)
- FATAL("total allocs exceed %u MB", max_mem / 1024 / 1024);
+ if (hard_fail) FATAL("total allocs exceed %u MB", max_mem / 1024 / 1024);
- DEBUGF("total allocs exceed %u MB, returning NULL",
- max_mem / 1024 / 1024);
+ DEBUGF("total allocs exceed %u MB, returning NULL", max_mem / 1024 / 1024);
return NULL;
@@ -142,7 +150,6 @@ static void* __dislocator_alloc(size_t len) {
}
-
/* The "user-facing" wrapper for calloc(). This just checks for overflows and
displays debug messages if requested. */
@@ -157,8 +164,11 @@ void* calloc(size_t elem_len, size_t elem_cnt) {
if (elem_cnt && len / elem_cnt != elem_len) {
if (no_calloc_over) {
- DEBUGF("calloc(%zu, %zu) would overflow, returning NULL", elem_len, elem_cnt);
+
+ DEBUGF("calloc(%zu, %zu) would overflow, returning NULL", elem_len,
+ elem_cnt);
return NULL;
+
}
FATAL("calloc(%zu, %zu) would overflow", elem_len, elem_cnt);
@@ -174,7 +184,6 @@ void* calloc(size_t elem_len, size_t elem_cnt) {
}
-
/* The wrapper for malloc(). Roughly the same, also clobbers the returned
memory (unlike calloc(), malloc() is not guaranteed to return zeroed
memory). */
@@ -193,7 +202,6 @@ void* malloc(size_t len) {
}
-
/* The wrapper for free(). This simply marks the entire region as PROT_NONE.
If the region is already freed, the code will segfault during the attempt to
read the canary. Not very graceful, but works, right? */
@@ -224,7 +232,6 @@ void free(void* ptr) {
}
-
/* Realloc is pretty straightforward, too. We forcibly reallocate the buffer,
move data, and then free (aka mprotect()) the original one. */
@@ -249,7 +256,6 @@ void* realloc(void* ptr, size_t len) {
}
-
__attribute__((constructor)) void __dislocator_init(void) {
u8* tmp = getenv("AFL_LD_LIMIT_MB");
@@ -266,3 +272,4 @@ __attribute__((constructor)) void __dislocator_init(void) {
no_calloc_over = !!getenv("AFL_LD_NO_CALLOC_OVER");
}
+