diff options
author | Maik Betka <9078425+voidptr127@users.noreply.github.com> | 2023-04-21 11:31:22 +0200 |
---|---|---|
committer | Maik Betka <9078425+voidptr127@users.noreply.github.com> | 2023-04-21 11:31:22 +0200 |
commit | 7101ffa1ae79e15d70905b09decbe69cdf53367b (patch) | |
tree | fd34b5686a4522dd6d29c9a40cee3d9826b2c7c6 /instrumentation/afl-compiler-rt.o.c | |
parent | 9ab902402cd33156257fc0355c0105e7e03f5ba3 (diff) | |
parent | 4e5f42cab6b8c501eeaf76ec7ca920089f6e0f3a (diff) | |
download | afl++-7101ffa1ae79e15d70905b09decbe69cdf53367b.tar.gz |
Merge remote-tracking branch 'origin/dev' into atnwalk
# Conflicts: # include/afl-fuzz.h # src/afl-fuzz-run.c
Diffstat (limited to 'instrumentation/afl-compiler-rt.o.c')
-rw-r--r-- | instrumentation/afl-compiler-rt.o.c | 256 |
1 files changed, 144 insertions, 112 deletions
diff --git a/instrumentation/afl-compiler-rt.o.c b/instrumentation/afl-compiler-rt.o.c index 1759898e..e0e40983 100644 --- a/instrumentation/afl-compiler-rt.o.c +++ b/instrumentation/afl-compiler-rt.o.c @@ -3,7 +3,7 @@ ------------------------------------------------ Copyright 2015, 2016 Google Inc. All rights reserved. - Copyright 2019-2022 AFLplusplus Project. All rights reserved. + Copyright 2019-2023 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -97,18 +97,23 @@ u8 *__afl_dictionary; u8 *__afl_fuzz_ptr; static u32 __afl_fuzz_len_dummy; u32 *__afl_fuzz_len = &__afl_fuzz_len_dummy; +int __afl_sharedmem_fuzzing __attribute__((weak)); u32 __afl_final_loc; u32 __afl_map_size = MAP_SIZE; u32 __afl_dictionary_len; u64 __afl_map_addr; +u32 __afl_first_final_loc; + +/* 1 if we are running in afl, and the forkserver was started, else 0 */ +u32 __afl_connected = 0; // for the __AFL_COVERAGE_ON/__AFL_COVERAGE_OFF features to work: int __afl_selective_coverage __attribute__((weak)); int __afl_selective_coverage_start_off __attribute__((weak)); static int __afl_selective_coverage_temp = 1; -#if defined(__ANDROID__) || defined(__HAIKU__) +#if defined(__ANDROID__) || defined(__HAIKU__) || defined(NO_TLS) PREV_LOC_T __afl_prev_loc[NGRAM_SIZE_MAX]; PREV_LOC_T __afl_prev_caller[CTX_MAX_K]; u32 __afl_prev_ctx; @@ -118,8 +123,6 @@ __thread PREV_LOC_T __afl_prev_caller[CTX_MAX_K]; __thread u32 __afl_prev_ctx; #endif -int __afl_sharedmem_fuzzing __attribute__((weak)); - struct cmp_map *__afl_cmp_map; struct cmp_map *__afl_cmp_map_backup; @@ -146,6 +149,7 @@ u32 __afl_already_initialized_shm; u32 __afl_already_initialized_forkserver; u32 __afl_already_initialized_first; u32 __afl_already_initialized_second; +u32 __afl_already_initialized_early; u32 __afl_already_initialized_init; /* Dummy pipe for area_is_valid() */ @@ -159,6 +163,7 @@ static void at_exit(int signal) { if (unlikely(child_pid > 0)) { kill(child_pid, SIGKILL); + waitpid(child_pid, NULL, 0); child_pid = -1; } @@ -319,13 +324,16 @@ static void __afl_map_shm(void) { } else { - if (!getenv("AFL_QUIET")) + if (__afl_final_loc > MAP_INITIAL_SIZE && !getenv("AFL_QUIET")) { + fprintf(stderr, "Warning: AFL++ tools might need to set AFL_MAP_SIZE to %u " "to be able to run this instrumented program if this " "crashes!\n", __afl_final_loc); + } + } } @@ -343,29 +351,51 @@ static void __afl_map_shm(void) { } - if (!id_str && __afl_area_ptr_dummy == __afl_area_initial) { + if (__afl_sharedmem_fuzzing && (!id_str || !getenv(SHM_FUZZ_ENV_VAR) || + fcntl(FORKSRV_FD, F_GETFD) == -1 || + fcntl(FORKSRV_FD + 1, F_GETFD) == -1)) { + + if (__afl_debug) { + + fprintf(stderr, + "DEBUG: running not inside afl-fuzz, disabling shared memory " + "testcases\n"); + + } + + __afl_sharedmem_fuzzing = 0; + + } + + if (!id_str) { u32 val = 0; u8 *ptr; - if ((ptr = getenv("AFL_MAP_SIZE")) != NULL) val = atoi(ptr); + if ((ptr = getenv("AFL_MAP_SIZE")) != NULL) { val = atoi(ptr); } if (val > MAP_INITIAL_SIZE) { __afl_map_size = val; - __afl_area_ptr_dummy = malloc(__afl_map_size); - if (!__afl_area_ptr_dummy) { - fprintf(stderr, - "Error: AFL++ could not aquire %u bytes of memory, exiting!\n", - __afl_map_size); - exit(-1); + } else { + + if (__afl_first_final_loc > MAP_INITIAL_SIZE) { + + // done in second stage constructor + __afl_map_size = __afl_first_final_loc; + + } else { + + __afl_map_size = MAP_INITIAL_SIZE; } - } else { + } - __afl_map_size = MAP_INITIAL_SIZE; + if (__afl_map_size > MAP_INITIAL_SIZE && __afl_final_loc < __afl_map_size) { + + __afl_final_loc = __afl_map_size; } @@ -516,7 +546,9 @@ static void __afl_map_shm(void) { } - } else if (__afl_final_loc > __afl_map_size) { + } else if (__afl_final_loc > MAP_INITIAL_SIZE && + + __afl_final_loc > __afl_first_final_loc) { if (__afl_area_initial != __afl_area_ptr_dummy) { @@ -531,13 +563,13 @@ static void __afl_map_shm(void) { if (!__afl_area_ptr_dummy) { fprintf(stderr, - "Error: AFL++ could not aquire %u bytes of memory, exiting!\n", + "Error: AFL++ could not acquire %u bytes of memory, exiting!\n", __afl_final_loc); exit(-1); } - } + } // else: nothing to be done __afl_area_ptr_backup = __afl_area_ptr; @@ -745,10 +777,10 @@ static void __afl_start_snapshots(void) { assume we're not running in forkserver mode and just execute program. */ status |= (FS_OPT_ENABLED | FS_OPT_SNAPSHOT | FS_OPT_NEWCMPLOG); - if (__afl_sharedmem_fuzzing != 0) status |= FS_OPT_SHDMEM_FUZZ; + if (__afl_sharedmem_fuzzing) { status |= FS_OPT_SHDMEM_FUZZ; } if (__afl_map_size <= FS_OPT_MAX_MAPSIZE) status |= (FS_OPT_SET_MAPSIZE(__afl_map_size) | FS_OPT_MAPSIZE); - if (__afl_dictionary_len && __afl_dictionary) status |= FS_OPT_AUTODICT; + if (__afl_dictionary_len && __afl_dictionary) { status |= FS_OPT_AUTODICT; } memcpy(tmp, &status, 4); if (write(FORKSRV_FD + 1, tmp, 4) != 4) { return; } @@ -1009,7 +1041,7 @@ static void __afl_start_forkserver(void) { } - if (__afl_sharedmem_fuzzing != 0) { status_for_fsrv |= FS_OPT_SHDMEM_FUZZ; } + if (__afl_sharedmem_fuzzing) { status_for_fsrv |= FS_OPT_SHDMEM_FUZZ; } if (status_for_fsrv) { status_for_fsrv |= (FS_OPT_ENABLED | FS_OPT_NEWCMPLOG); @@ -1023,6 +1055,8 @@ static void __afl_start_forkserver(void) { if (write(FORKSRV_FD + 1, tmp, 4) != 4) { return; } + __afl_connected = 1; + if (__afl_sharedmem_fuzzing || (__afl_dictionary_len && __afl_dictionary)) { if (read(FORKSRV_FD, &was_killed, 4) != 4) _exit(1); @@ -1233,13 +1267,9 @@ int __afl_persistent_loop(unsigned int max_cnt) { iteration, it's our job to erase any trace of whatever happened before the loop. */ - if (is_persistent) { - - memset(__afl_area_ptr, 0, __afl_map_size); - __afl_area_ptr[0] = 1; - memset(__afl_prev_loc, 0, NGRAM_SIZE_MAX * sizeof(PREV_LOC_T)); - - } + memset(__afl_area_ptr, 0, __afl_map_size); + __afl_area_ptr[0] = 1; + memset(__afl_prev_loc, 0, NGRAM_SIZE_MAX * sizeof(PREV_LOC_T)); cycle_cnt = max_cnt; first_pass = 0; @@ -1247,34 +1277,28 @@ int __afl_persistent_loop(unsigned int max_cnt) { return 1; - } - - if (is_persistent) { + } else if (--cycle_cnt) { - if (--cycle_cnt) { + raise(SIGSTOP); - raise(SIGSTOP); - - __afl_area_ptr[0] = 1; - memset(__afl_prev_loc, 0, NGRAM_SIZE_MAX * sizeof(PREV_LOC_T)); - __afl_selective_coverage_temp = 1; + __afl_area_ptr[0] = 1; + memset(__afl_prev_loc, 0, NGRAM_SIZE_MAX * sizeof(PREV_LOC_T)); + __afl_selective_coverage_temp = 1; - return 1; + return 1; - } else { + } else { - /* When exiting __AFL_LOOP(), make sure that the subsequent code that - follows the loop is not traced. We do that by pivoting back to the - dummy output region. */ + /* When exiting __AFL_LOOP(), make sure that the subsequent code that + follows the loop is not traced. We do that by pivoting back to the + dummy output region. */ - __afl_area_ptr = __afl_area_ptr_dummy; + __afl_area_ptr = __afl_area_ptr_dummy; - } + return 0; } - return 0; - } /* This one can be called from user code when deferred forkserver mode @@ -1350,6 +1374,9 @@ __attribute__((constructor(EARLY_FS_PRIO))) void __early_forkserver(void) { __attribute__((constructor(CTOR_PRIO))) void __afl_auto_early(void) { + if (__afl_already_initialized_early) return; + __afl_already_initialized_early = 1; + is_persistent = !!getenv(PERSIST_ENV_VAR); if (getenv("AFL_DISABLE_LLVM_INSTRUMENTATION")) return; @@ -1375,21 +1402,24 @@ __attribute__((constructor(1))) void __afl_auto_second(void) { if (getenv("AFL_DISABLE_LLVM_INSTRUMENTATION")) return; u8 *ptr; - if (__afl_final_loc) { + if (__afl_final_loc > MAP_INITIAL_SIZE) { + + __afl_first_final_loc = __afl_final_loc + 1; if (__afl_area_ptr && __afl_area_ptr != __afl_area_initial) free(__afl_area_ptr); if (__afl_map_addr) - ptr = (u8 *)mmap((void *)__afl_map_addr, __afl_final_loc, + ptr = (u8 *)mmap((void *)__afl_map_addr, __afl_first_final_loc, PROT_READ | PROT_WRITE, MAP_FIXED_NOREPLACE | MAP_SHARED | MAP_ANONYMOUS, -1, 0); else - ptr = (u8 *)malloc(__afl_final_loc); + ptr = (u8 *)malloc(__afl_first_final_loc); if (ptr && (ssize_t)ptr != -1) { __afl_area_ptr = ptr; + __afl_area_ptr_dummy = __afl_area_ptr; __afl_area_ptr_backup = __afl_area_ptr; } @@ -1407,14 +1437,18 @@ __attribute__((constructor(0))) void __afl_auto_first(void) { __afl_already_initialized_first = 1; if (getenv("AFL_DISABLE_LLVM_INSTRUMENTATION")) return; - u8 *ptr = (u8 *)malloc(MAP_INITIAL_SIZE); - if (ptr && (ssize_t)ptr != -1) { + /* + u8 *ptr = (u8 *)malloc(MAP_INITIAL_SIZE); + + if (ptr && (ssize_t)ptr != -1) { - __afl_area_ptr = ptr; - __afl_area_ptr_backup = __afl_area_ptr; + __afl_area_ptr = ptr; + __afl_area_ptr_backup = __afl_area_ptr; - } + } + + */ } // ptr memleak report is a false positive @@ -1484,6 +1518,14 @@ void __sanitizer_cov_trace_pc_guard_init(uint32_t *start, uint32_t *stop) { _is_sancov = 1; + if (!getenv("AFL_DUMP_MAP_SIZE")) { + + __afl_auto_first(); + __afl_auto_second(); + __afl_auto_early(); + + } + if (__afl_debug) { fprintf(stderr, @@ -1494,7 +1536,21 @@ void __sanitizer_cov_trace_pc_guard_init(uint32_t *start, uint32_t *stop) { } - if (start == stop || *start) return; + if (start == stop || *start) { return; } + + x = getenv("AFL_INST_RATIO"); + if (x) { + + inst_ratio = (u32)atoi(x); + + if (!inst_ratio || inst_ratio > 100) { + + fprintf(stderr, "[-] ERROR: Invalid AFL_INST_RATIO (must be 1-100).\n"); + abort(); + + } + + } // If a dlopen of an instrumented library happens after the forkserver then // we have a problem as we cannot increase the coverage map anymore. @@ -1507,85 +1563,55 @@ void __sanitizer_cov_trace_pc_guard_init(uint32_t *start, uint32_t *stop) { "[-] FATAL: forkserver is already up, but an instrumented dlopen() " "library loaded afterwards. You must AFL_PRELOAD such libraries to " "be able to fuzz them or LD_PRELOAD to run outside of afl-fuzz.\n" - "To ignore this set AFL_IGNORE_PROBLEMS=1.\n"); + "To ignore this set AFL_IGNORE_PROBLEMS=1 but this will be bad for " + "coverage.\n"); abort(); } else { - static u32 offset = 4; + static u32 offset = 5; while (start < stop) { - *(start++) = offset; - if (unlikely(++offset >= __afl_final_loc)) { offset = 4; } - - } - - } - - } - - x = getenv("AFL_INST_RATIO"); - if (x) { inst_ratio = (u32)atoi(x); } - - if (!inst_ratio || inst_ratio > 100) { - - fprintf(stderr, "[-] ERROR: Invalid AFL_INST_RATIO (must be 1-100).\n"); - abort(); - - } - - /* instrumented code is loaded *after* our forkserver is up. this is a - problem. We cannot prevent collisions then :( */ - /* - if (__afl_already_initialized_forkserver && - __afl_final_loc + 1 + stop - start > __afl_map_size) { - - if (__afl_debug) { - - fprintf(stderr, "Warning: new instrumented code after the forkserver!\n"); - - } - - __afl_final_loc = 2; + if (likely(inst_ratio == 100) || R(100) < inst_ratio) { - if (1 + stop - start > __afl_map_size) { + *(start++) = offset; - *(start++) = ++__afl_final_loc; + } else { - while (start < stop) { + *(start++) = 0; // write to map[0] - if (R(100) < inst_ratio) - *start = ++__afl_final_loc % __afl_map_size; - else - *start = 4; + } - start++; + if (unlikely(++offset >= __afl_final_loc)) { offset = 5; } } - return; - } - } + return; // we are done for this special case - */ + } /* Make sure that the first element in the range is always set - we use that to avoid duplicate calls (which can happen as an artifact of the underlying implementation in LLVM). */ + if (__afl_final_loc < 5) __afl_final_loc = 5; // we skip the first 5 entries + *(start++) = ++__afl_final_loc; while (start < stop) { - if (R(100) < inst_ratio) - *start = ++__afl_final_loc; - else - *start = 4; + if (likely(inst_ratio == 100) || R(100) < inst_ratio) { + + *(start++) = ++__afl_final_loc; + + } else { - start++; + *(start++) = 0; // write to map[0] + + } } @@ -1597,17 +1623,23 @@ void __sanitizer_cov_trace_pc_guard_init(uint32_t *start, uint32_t *stop) { } - if (__afl_already_initialized_shm && __afl_final_loc > __afl_map_size) { + if (__afl_already_initialized_shm) { - if (__afl_debug) { + if (__afl_final_loc > __afl_map_size) { + + if (__afl_debug) { + + fprintf(stderr, "Reinit shm necessary (+%u)\n", + __afl_final_loc - __afl_map_size); + + } - fprintf(stderr, "Reinit shm necessary (+%u)\n", - __afl_final_loc - __afl_map_size); + __afl_unmap_shm(); + __afl_map_shm(); } - __afl_unmap_shm(); - __afl_map_shm(); + __afl_map_size = __afl_final_loc + 1; } |