From c8354d751606e0f7a0364685958036bb7031e35a Mon Sep 17 00:00:00 2001 From: van Hauser Date: Tue, 4 Aug 2020 23:22:42 +0200 Subject: new rand mode for data offsets that prefer low offset values --- src/afl-fuzz-one.c | 73 +++++++++++++++++++++++++++++------------------------- 1 file changed, 39 insertions(+), 34 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index 1f0bf30e..77bce7d0 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -1921,14 +1921,14 @@ havoc_stage: /* Flip a single bit somewhere. Spooky! */ - FLIP_BIT(out_buf, rand_below(afl, temp_len << 3)); + FLIP_BIT(out_buf, rand_below_datalen(afl, temp_len << 3)); break; case 1: /* Set byte to interesting value. */ - out_buf[rand_below(afl, temp_len)] = + out_buf[rand_below_datalen(afl, temp_len)] = interesting_8[rand_below(afl, sizeof(interesting_8))]; break; @@ -1940,12 +1940,12 @@ havoc_stage: if (rand_below(afl, 2)) { - *(u16 *)(out_buf + rand_below(afl, temp_len - 1)) = + *(u16 *)(out_buf + rand_below_datalen(afl, temp_len - 1)) = interesting_16[rand_below(afl, sizeof(interesting_16) >> 1)]; } else { - *(u16 *)(out_buf + rand_below(afl, temp_len - 1)) = SWAP16( + *(u16 *)(out_buf + rand_below_datalen(afl, temp_len - 1)) = SWAP16( interesting_16[rand_below(afl, sizeof(interesting_16) >> 1)]); } @@ -1960,12 +1960,12 @@ havoc_stage: if (rand_below(afl, 2)) { - *(u32 *)(out_buf + rand_below(afl, temp_len - 3)) = + *(u32 *)(out_buf + rand_below_datalen(afl, temp_len - 3)) = interesting_32[rand_below(afl, sizeof(interesting_32) >> 2)]; } else { - *(u32 *)(out_buf + rand_below(afl, temp_len - 3)) = SWAP32( + *(u32 *)(out_buf + rand_below_datalen(afl, temp_len - 3)) = SWAP32( interesting_32[rand_below(afl, sizeof(interesting_32) >> 2)]); } @@ -1976,14 +1976,16 @@ havoc_stage: /* Randomly subtract from byte. */ - out_buf[rand_below(afl, temp_len)] -= 1 + rand_below(afl, ARITH_MAX); + out_buf[rand_below_datalen(afl, temp_len)] -= + 1 + rand_below(afl, ARITH_MAX); break; case 5: /* Randomly add to byte. */ - out_buf[rand_below(afl, temp_len)] += 1 + rand_below(afl, ARITH_MAX); + out_buf[rand_below_datalen(afl, temp_len)] += + 1 + rand_below(afl, ARITH_MAX); break; case 6: @@ -1994,13 +1996,13 @@ havoc_stage: if (rand_below(afl, 2)) { - u32 pos = rand_below(afl, temp_len - 1); + u32 pos = rand_below_datalen(afl, temp_len - 1); *(u16 *)(out_buf + pos) -= 1 + rand_below(afl, ARITH_MAX); } else { - u32 pos = rand_below(afl, temp_len - 1); + u32 pos = rand_below_datalen(afl, temp_len - 1); u16 num = 1 + rand_below(afl, ARITH_MAX); *(u16 *)(out_buf + pos) = @@ -2018,13 +2020,13 @@ havoc_stage: if (rand_below(afl, 2)) { - u32 pos = rand_below(afl, temp_len - 1); + u32 pos = rand_below_datalen(afl, temp_len - 1); *(u16 *)(out_buf + pos) += 1 + rand_below(afl, ARITH_MAX); } else { - u32 pos = rand_below(afl, temp_len - 1); + u32 pos = rand_below_datalen(afl, temp_len - 1); u16 num = 1 + rand_below(afl, ARITH_MAX); *(u16 *)(out_buf + pos) = @@ -2042,13 +2044,13 @@ havoc_stage: if (rand_below(afl, 2)) { - u32 pos = rand_below(afl, temp_len - 3); + u32 pos = rand_below_datalen(afl, temp_len - 3); *(u32 *)(out_buf + pos) -= 1 + rand_below(afl, ARITH_MAX); } else { - u32 pos = rand_below(afl, temp_len - 3); + u32 pos = rand_below_datalen(afl, temp_len - 3); u32 num = 1 + rand_below(afl, ARITH_MAX); *(u32 *)(out_buf + pos) = @@ -2066,13 +2068,13 @@ havoc_stage: if (rand_below(afl, 2)) { - u32 pos = rand_below(afl, temp_len - 3); + u32 pos = rand_below_datalen(afl, temp_len - 3); *(u32 *)(out_buf + pos) += 1 + rand_below(afl, ARITH_MAX); } else { - u32 pos = rand_below(afl, temp_len - 3); + u32 pos = rand_below_datalen(afl, temp_len - 3); u32 num = 1 + rand_below(afl, ARITH_MAX); *(u32 *)(out_buf + pos) = @@ -2088,7 +2090,8 @@ havoc_stage: why not. We use XOR with 1-255 to eliminate the possibility of a no-op. */ - out_buf[rand_below(afl, temp_len)] ^= 1 + rand_below(afl, 255); + out_buf[rand_below_datalen(afl, temp_len)] ^= + 1 + rand_below(afl, 255); break; case 11 ... 12: { @@ -2105,7 +2108,7 @@ havoc_stage: del_len = choose_block_len(afl, temp_len - 1); - del_from = rand_below(afl, temp_len - del_len + 1); + del_from = rand_below_datalen(afl, temp_len - del_len + 1); memmove(out_buf + del_from, out_buf + del_from + del_len, temp_len - del_from - del_len); @@ -2129,7 +2132,7 @@ havoc_stage: if (actually_clone) { clone_len = choose_block_len(afl, temp_len); - clone_from = rand_below(afl, temp_len - clone_len + 1); + clone_from = rand_below_datalen(afl, temp_len - clone_len + 1); } else { @@ -2138,7 +2141,7 @@ havoc_stage: } - clone_to = rand_below(afl, temp_len); + clone_to = rand_below_datalen(afl, temp_len); new_buf = ck_maybe_grow(BUF_PARAMS(out_scratch), temp_len + clone_len); @@ -2156,8 +2159,9 @@ havoc_stage: } else { memset(new_buf + clone_to, - rand_below(afl, 2) ? rand_below(afl, 256) - : out_buf[rand_below(afl, temp_len)], + rand_below(afl, 2) + ? rand_below(afl, 256) + : out_buf[rand_below_datalen(afl, temp_len)], clone_len); } @@ -2186,8 +2190,8 @@ havoc_stage: copy_len = choose_block_len(afl, temp_len - 1); - copy_from = rand_below(afl, temp_len - copy_len + 1); - copy_to = rand_below(afl, temp_len - copy_len + 1); + copy_from = rand_below_datalen(afl, temp_len - copy_len + 1); + copy_to = rand_below_datalen(afl, temp_len - copy_len + 1); if (rand_below(afl, 4)) { @@ -2200,8 +2204,9 @@ havoc_stage: } else { memset(out_buf + copy_to, - rand_below(afl, 2) ? rand_below(afl, 256) - : out_buf[rand_below(afl, temp_len)], + rand_below(afl, 2) + ? rand_below(afl, 256) + : out_buf[rand_below_datalen(afl, temp_len)], copy_len); } @@ -2233,7 +2238,7 @@ havoc_stage: if (extra_len > temp_len) { break; } - insert_at = rand_below(afl, temp_len - extra_len + 1); + insert_at = rand_below_datalen(afl, temp_len - extra_len + 1); memcpy(out_buf + insert_at, afl->a_extras[use_extra].data, extra_len); @@ -2247,7 +2252,7 @@ havoc_stage: if (extra_len > temp_len) { break; } - insert_at = rand_below(afl, temp_len - extra_len + 1); + insert_at = rand_below_datalen(afl, temp_len - extra_len + 1); memcpy(out_buf + insert_at, afl->extras[use_extra].data, extra_len); @@ -2258,7 +2263,7 @@ havoc_stage: } else { // case 16 u32 use_extra, extra_len, - insert_at = rand_below(afl, temp_len + 1); + insert_at = rand_below_datalen(afl, temp_len + 1); u8 *ptr; /* Insert an extra. Do the same dice-rolling stuff as for the @@ -2362,8 +2367,8 @@ havoc_stage: copy_len = choose_block_len(afl, new_len - 1); if (copy_len > temp_len) copy_len = temp_len; - copy_from = rand_below(afl, new_len - copy_len + 1); - copy_to = rand_below(afl, temp_len - copy_len + 1); + copy_from = rand_below_datalen(afl, new_len - copy_len + 1); + copy_to = rand_below_datalen(afl, temp_len - copy_len + 1); memmove(out_buf + copy_to, new_buf + copy_from, copy_len); @@ -2372,9 +2377,9 @@ havoc_stage: u32 clone_from, clone_to, clone_len; clone_len = choose_block_len(afl, new_len); - clone_from = rand_below(afl, new_len - clone_len + 1); + clone_from = rand_below_datalen(afl, new_len - clone_len + 1); - clone_to = rand_below(afl, temp_len); + clone_to = rand_below_datalen(afl, temp_len); u8 *temp_buf = ck_maybe_grow(BUF_PARAMS(out_scratch), temp_len + clone_len); @@ -2523,7 +2528,7 @@ retry_splicing: /* Split somewhere between the first and last differing byte. */ - split_at = f_diff + rand_below(afl, l_diff - f_diff); + split_at = f_diff + rand_below_datalen(afl, l_diff - f_diff); /* Do the thing. */ -- cgit 1.4.1 From e2434cf8c6db86e1e7b67cb3b73e417c2a7fd3bd Mon Sep 17 00:00:00 2001 From: van Hauser Date: Thu, 6 Aug 2020 23:27:50 +0200 Subject: remove datalen in havoc --- examples/aflpp_driver/aflpp_driver.c | 35 ++++++++++------- llvm_mode/afl-llvm-rt.o.c | 2 +- src/afl-fuzz-one.c | 73 +++++++++++++++++------------------- 3 files changed, 56 insertions(+), 54 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/examples/aflpp_driver/aflpp_driver.c b/examples/aflpp_driver/aflpp_driver.c index 6ec37cda..90f9cf99 100644 --- a/examples/aflpp_driver/aflpp_driver.c +++ b/examples/aflpp_driver/aflpp_driver.c @@ -66,7 +66,7 @@ If 1, close stdout at startup. If 2 close stderr; if 3 close both. #endif #ifndef MAP_FIXED_NOREPLACE -#define MAP_FIXED_NOREPLACE 0x100000 + #define MAP_FIXED_NOREPLACE 0x100000 #endif #define MAX_DUMMY_SIZE 256000 @@ -106,10 +106,10 @@ If 1, close stdout at startup. If 2 close stderr; if 3 close both. #error "Support for your platform has not been implemented" #endif -int __afl_sharedmem_fuzzing = 1; -extern unsigned int * __afl_fuzz_len; -extern unsigned char *__afl_fuzz_ptr; -extern unsigned char *__afl_area_ptr; +int __afl_sharedmem_fuzzing = 1; +extern unsigned int * __afl_fuzz_len; +extern unsigned char * __afl_fuzz_ptr; +extern unsigned char * __afl_area_ptr; extern struct cmp_map *__afl_cmp_map; // libFuzzer interface is thin, so we don't include any libFuzzer headers. @@ -249,17 +249,21 @@ static int ExecuteFilesOnyByOne(int argc, char **argv) { } __attribute__((constructor(10))) void __afl_protect(void) { - __afl_area_ptr = (unsigned char*) mmap((void *)0x10000, MAX_DUMMY_SIZE, PROT_READ | PROT_WRITE, - MAP_FIXED_NOREPLACE | MAP_SHARED | MAP_ANONYMOUS, -1, 0); + + __afl_area_ptr = (unsigned char *)mmap( + (void *)0x10000, MAX_DUMMY_SIZE, PROT_READ | PROT_WRITE, + MAP_FIXED_NOREPLACE | MAP_SHARED | MAP_ANONYMOUS, -1, 0); if ((uint64_t)__afl_area_ptr == -1) - __afl_area_ptr = (unsigned char*) mmap((void *)0x10000, MAX_DUMMY_SIZE, PROT_READ | PROT_WRITE, - MAP_SHARED | MAP_ANONYMOUS, -1, 0); + __afl_area_ptr = (unsigned char *)mmap((void *)0x10000, MAX_DUMMY_SIZE, + PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, -1, 0); if ((uint64_t)__afl_area_ptr == -1) - __afl_area_ptr = (unsigned char*) mmap(NULL, MAX_DUMMY_SIZE, PROT_READ | PROT_WRITE, - MAP_SHARED | MAP_ANONYMOUS, -1, 0); - __afl_cmp_map = (struct cmp_map *) __afl_area_ptr; -} + __afl_area_ptr = + (unsigned char *)mmap(NULL, MAX_DUMMY_SIZE, PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, -1, 0); + __afl_cmp_map = (struct cmp_map *)__afl_area_ptr; +} int main(int argc, char **argv) { @@ -272,7 +276,8 @@ int main(int argc, char **argv) { " %s INPUT_FILE1 [INPUT_FILE2 ... ]\n" "To fuzz with afl-fuzz execute this:\n" " afl-fuzz [afl-flags] -- %s [-N]\n" - "afl-fuzz will run N iterations before re-spawning the process (default: 1000)\n" + "afl-fuzz will run N iterations before re-spawning the process (default: " + "1000)\n" "======================================================\n", argv[0], argv[0]); @@ -280,9 +285,11 @@ int main(int argc, char **argv) { maybe_duplicate_stderr(); maybe_close_fd_mask(); if (LLVMFuzzerInitialize) { + fprintf(stderr, "Running LLVMFuzzerInitialize ...\n"); LLVMFuzzerInitialize(&argc, &argv); fprintf(stderr, "continue...\n"); + } // Do any other expensive one-time initialization here. diff --git a/llvm_mode/afl-llvm-rt.o.c b/llvm_mode/afl-llvm-rt.o.c index d67862f8..0d498de7 100644 --- a/llvm_mode/afl-llvm-rt.o.c +++ b/llvm_mode/afl-llvm-rt.o.c @@ -101,7 +101,7 @@ __thread u32 __afl_cmp_counter; int __afl_sharedmem_fuzzing __attribute__((weak)); -struct cmp_map *__afl_cmp_map = (struct cmp_map *) __afl_area_initial; +struct cmp_map *__afl_cmp_map = (struct cmp_map *)__afl_area_initial; /* Running in persistent mode? */ diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index 77bce7d0..1f0bf30e 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -1921,14 +1921,14 @@ havoc_stage: /* Flip a single bit somewhere. Spooky! */ - FLIP_BIT(out_buf, rand_below_datalen(afl, temp_len << 3)); + FLIP_BIT(out_buf, rand_below(afl, temp_len << 3)); break; case 1: /* Set byte to interesting value. */ - out_buf[rand_below_datalen(afl, temp_len)] = + out_buf[rand_below(afl, temp_len)] = interesting_8[rand_below(afl, sizeof(interesting_8))]; break; @@ -1940,12 +1940,12 @@ havoc_stage: if (rand_below(afl, 2)) { - *(u16 *)(out_buf + rand_below_datalen(afl, temp_len - 1)) = + *(u16 *)(out_buf + rand_below(afl, temp_len - 1)) = interesting_16[rand_below(afl, sizeof(interesting_16) >> 1)]; } else { - *(u16 *)(out_buf + rand_below_datalen(afl, temp_len - 1)) = SWAP16( + *(u16 *)(out_buf + rand_below(afl, temp_len - 1)) = SWAP16( interesting_16[rand_below(afl, sizeof(interesting_16) >> 1)]); } @@ -1960,12 +1960,12 @@ havoc_stage: if (rand_below(afl, 2)) { - *(u32 *)(out_buf + rand_below_datalen(afl, temp_len - 3)) = + *(u32 *)(out_buf + rand_below(afl, temp_len - 3)) = interesting_32[rand_below(afl, sizeof(interesting_32) >> 2)]; } else { - *(u32 *)(out_buf + rand_below_datalen(afl, temp_len - 3)) = SWAP32( + *(u32 *)(out_buf + rand_below(afl, temp_len - 3)) = SWAP32( interesting_32[rand_below(afl, sizeof(interesting_32) >> 2)]); } @@ -1976,16 +1976,14 @@ havoc_stage: /* Randomly subtract from byte. */ - out_buf[rand_below_datalen(afl, temp_len)] -= - 1 + rand_below(afl, ARITH_MAX); + out_buf[rand_below(afl, temp_len)] -= 1 + rand_below(afl, ARITH_MAX); break; case 5: /* Randomly add to byte. */ - out_buf[rand_below_datalen(afl, temp_len)] += - 1 + rand_below(afl, ARITH_MAX); + out_buf[rand_below(afl, temp_len)] += 1 + rand_below(afl, ARITH_MAX); break; case 6: @@ -1996,13 +1994,13 @@ havoc_stage: if (rand_below(afl, 2)) { - u32 pos = rand_below_datalen(afl, temp_len - 1); + u32 pos = rand_below(afl, temp_len - 1); *(u16 *)(out_buf + pos) -= 1 + rand_below(afl, ARITH_MAX); } else { - u32 pos = rand_below_datalen(afl, temp_len - 1); + u32 pos = rand_below(afl, temp_len - 1); u16 num = 1 + rand_below(afl, ARITH_MAX); *(u16 *)(out_buf + pos) = @@ -2020,13 +2018,13 @@ havoc_stage: if (rand_below(afl, 2)) { - u32 pos = rand_below_datalen(afl, temp_len - 1); + u32 pos = rand_below(afl, temp_len - 1); *(u16 *)(out_buf + pos) += 1 + rand_below(afl, ARITH_MAX); } else { - u32 pos = rand_below_datalen(afl, temp_len - 1); + u32 pos = rand_below(afl, temp_len - 1); u16 num = 1 + rand_below(afl, ARITH_MAX); *(u16 *)(out_buf + pos) = @@ -2044,13 +2042,13 @@ havoc_stage: if (rand_below(afl, 2)) { - u32 pos = rand_below_datalen(afl, temp_len - 3); + u32 pos = rand_below(afl, temp_len - 3); *(u32 *)(out_buf + pos) -= 1 + rand_below(afl, ARITH_MAX); } else { - u32 pos = rand_below_datalen(afl, temp_len - 3); + u32 pos = rand_below(afl, temp_len - 3); u32 num = 1 + rand_below(afl, ARITH_MAX); *(u32 *)(out_buf + pos) = @@ -2068,13 +2066,13 @@ havoc_stage: if (rand_below(afl, 2)) { - u32 pos = rand_below_datalen(afl, temp_len - 3); + u32 pos = rand_below(afl, temp_len - 3); *(u32 *)(out_buf + pos) += 1 + rand_below(afl, ARITH_MAX); } else { - u32 pos = rand_below_datalen(afl, temp_len - 3); + u32 pos = rand_below(afl, temp_len - 3); u32 num = 1 + rand_below(afl, ARITH_MAX); *(u32 *)(out_buf + pos) = @@ -2090,8 +2088,7 @@ havoc_stage: why not. We use XOR with 1-255 to eliminate the possibility of a no-op. */ - out_buf[rand_below_datalen(afl, temp_len)] ^= - 1 + rand_below(afl, 255); + out_buf[rand_below(afl, temp_len)] ^= 1 + rand_below(afl, 255); break; case 11 ... 12: { @@ -2108,7 +2105,7 @@ havoc_stage: del_len = choose_block_len(afl, temp_len - 1); - del_from = rand_below_datalen(afl, temp_len - del_len + 1); + del_from = rand_below(afl, temp_len - del_len + 1); memmove(out_buf + del_from, out_buf + del_from + del_len, temp_len - del_from - del_len); @@ -2132,7 +2129,7 @@ havoc_stage: if (actually_clone) { clone_len = choose_block_len(afl, temp_len); - clone_from = rand_below_datalen(afl, temp_len - clone_len + 1); + clone_from = rand_below(afl, temp_len - clone_len + 1); } else { @@ -2141,7 +2138,7 @@ havoc_stage: } - clone_to = rand_below_datalen(afl, temp_len); + clone_to = rand_below(afl, temp_len); new_buf = ck_maybe_grow(BUF_PARAMS(out_scratch), temp_len + clone_len); @@ -2159,9 +2156,8 @@ havoc_stage: } else { memset(new_buf + clone_to, - rand_below(afl, 2) - ? rand_below(afl, 256) - : out_buf[rand_below_datalen(afl, temp_len)], + rand_below(afl, 2) ? rand_below(afl, 256) + : out_buf[rand_below(afl, temp_len)], clone_len); } @@ -2190,8 +2186,8 @@ havoc_stage: copy_len = choose_block_len(afl, temp_len - 1); - copy_from = rand_below_datalen(afl, temp_len - copy_len + 1); - copy_to = rand_below_datalen(afl, temp_len - copy_len + 1); + copy_from = rand_below(afl, temp_len - copy_len + 1); + copy_to = rand_below(afl, temp_len - copy_len + 1); if (rand_below(afl, 4)) { @@ -2204,9 +2200,8 @@ havoc_stage: } else { memset(out_buf + copy_to, - rand_below(afl, 2) - ? rand_below(afl, 256) - : out_buf[rand_below_datalen(afl, temp_len)], + rand_below(afl, 2) ? rand_below(afl, 256) + : out_buf[rand_below(afl, temp_len)], copy_len); } @@ -2238,7 +2233,7 @@ havoc_stage: if (extra_len > temp_len) { break; } - insert_at = rand_below_datalen(afl, temp_len - extra_len + 1); + insert_at = rand_below(afl, temp_len - extra_len + 1); memcpy(out_buf + insert_at, afl->a_extras[use_extra].data, extra_len); @@ -2252,7 +2247,7 @@ havoc_stage: if (extra_len > temp_len) { break; } - insert_at = rand_below_datalen(afl, temp_len - extra_len + 1); + insert_at = rand_below(afl, temp_len - extra_len + 1); memcpy(out_buf + insert_at, afl->extras[use_extra].data, extra_len); @@ -2263,7 +2258,7 @@ havoc_stage: } else { // case 16 u32 use_extra, extra_len, - insert_at = rand_below_datalen(afl, temp_len + 1); + insert_at = rand_below(afl, temp_len + 1); u8 *ptr; /* Insert an extra. Do the same dice-rolling stuff as for the @@ -2367,8 +2362,8 @@ havoc_stage: copy_len = choose_block_len(afl, new_len - 1); if (copy_len > temp_len) copy_len = temp_len; - copy_from = rand_below_datalen(afl, new_len - copy_len + 1); - copy_to = rand_below_datalen(afl, temp_len - copy_len + 1); + copy_from = rand_below(afl, new_len - copy_len + 1); + copy_to = rand_below(afl, temp_len - copy_len + 1); memmove(out_buf + copy_to, new_buf + copy_from, copy_len); @@ -2377,9 +2372,9 @@ havoc_stage: u32 clone_from, clone_to, clone_len; clone_len = choose_block_len(afl, new_len); - clone_from = rand_below_datalen(afl, new_len - clone_len + 1); + clone_from = rand_below(afl, new_len - clone_len + 1); - clone_to = rand_below_datalen(afl, temp_len); + clone_to = rand_below(afl, temp_len); u8 *temp_buf = ck_maybe_grow(BUF_PARAMS(out_scratch), temp_len + clone_len); @@ -2528,7 +2523,7 @@ retry_splicing: /* Split somewhere between the first and last differing byte. */ - split_at = f_diff + rand_below_datalen(afl, l_diff - f_diff); + split_at = f_diff + rand_below(afl, l_diff - f_diff); /* Do the thing. */ -- cgit 1.4.1 From 22d3a5e90abd58c6a4bb68bf1b3f7ece8283f5bb Mon Sep 17 00:00:00 2001 From: Dominik Maier Date: Fri, 7 Aug 2020 16:55:58 +0200 Subject: enabled Wextra, fixed bugs --- GNUmakefile | 2 +- include/debug.h | 4 +- src/afl-analyze.c | 5 ++- src/afl-as.c | 4 +- src/afl-forkserver.c | 6 ++- src/afl-fuzz-extras.c | 8 ++-- src/afl-fuzz-init.c | 11 ++++-- src/afl-fuzz-one.c | 99 ++++++++++++++++++++++++++----------------------- src/afl-fuzz-python.c | 7 ++++ src/afl-fuzz-redqueen.c | 14 +++---- src/afl-fuzz-run.c | 4 +- src/afl-fuzz-stats.c | 6 +-- src/afl-fuzz.c | 6 +-- src/afl-showmap.c | 6 +-- src/afl-tmin.c | 17 +++++---- 15 files changed, 110 insertions(+), 89 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/GNUmakefile b/GNUmakefile index 679ccc82..4a0fcdb6 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -98,7 +98,7 @@ ifneq "$(shell uname -m)" "x86_64" endif CFLAGS ?= -O3 -funroll-loops $(CFLAGS_OPT) -override CFLAGS += -Wall -g -Wno-pointer-sign \ +override CFLAGS += -Wall -Wextra -Werror -g -Wno-pointer-sign \ -I include/ -DAFL_PATH=\"$(HELPER_PATH)\" \ -DBIN_PATH=\"$(BIN_PATH)\" -DDOC_PATH=\"$(DOC_PATH)\" diff --git a/include/debug.h b/include/debug.h index d1bd971b..79b05c5f 100644 --- a/include/debug.h +++ b/include/debug.h @@ -281,7 +281,7 @@ #define ck_write(fd, buf, len, fn) \ do { \ \ - u32 _len = (len); \ + s32 _len = (s32)(len); \ s32 _res = write(fd, buf, _len); \ if (_res != _len) RPFATAL(_res, "Short write to %s", fn); \ \ @@ -290,7 +290,7 @@ #define ck_read(fd, buf, len, fn) \ do { \ \ - u32 _len = (len); \ + s32 _len = (s32)(len); \ s32 _res = read(fd, buf, _len); \ if (_res != _len) RPFATAL(_res, "Short read from %s", fn); \ \ diff --git a/src/afl-analyze.c b/src/afl-analyze.c index e6dd0fca..7c1c269a 100644 --- a/src/afl-analyze.c +++ b/src/afl-analyze.c @@ -384,7 +384,7 @@ static void show_legend(void) { /* Interpret and report a pattern in the input file. */ -static void dump_hex(u8 *buf, u32 len, u8 *b_data) { +static void dump_hex(u32 len, u8 *b_data) { u32 i; @@ -678,7 +678,7 @@ static void analyze(char **argv) { } - dump_hex(in_data, in_len, b_data); + dump_hex(in_len, b_data); SAYF("\n"); @@ -700,6 +700,7 @@ static void analyze(char **argv) { static void handle_stop_sig(int sig) { + (void)sig; stop_soon = 1; if (child_pid > 0) { kill(child_pid, SIGKILL); } diff --git a/src/afl-as.c b/src/afl-as.c index f16d6060..4ba4cc71 100644 --- a/src/afl-as.c +++ b/src/afl-as.c @@ -136,7 +136,7 @@ static void edit_params(int argc, char **argv) { as_params[argc] = 0; - for (i = 1; i < argc - 1; i++) { + for (i = 1; (s32)i < argc - 1; i++) { if (!strcmp(argv[i], "--64")) { @@ -591,7 +591,7 @@ int main(int argc, char **argv) { rand_seed = tv.tv_sec ^ tv.tv_usec ^ getpid(); // in fast systems where pids can repeat in the same seconds we need this - for (i = 1; i < argc; i++) + for (i = 1; (s32)i < argc; i++) for (j = 0; j < strlen(argv[i]); j++) rand_seed += argv[i][j]; diff --git a/src/afl-forkserver.c b/src/afl-forkserver.c index 47493eba..15935ab0 100644 --- a/src/afl-forkserver.c +++ b/src/afl-forkserver.c @@ -145,6 +145,10 @@ restart_select: if (likely(sret > 0)) { restart_read: + if (*stop_soon_p) { + // Early return - the user wants to quit. + return 0; + } len_read = read(fd, (u8 *)buf, 4); if (likely(len_read == 4)) { // for speed we put this first @@ -691,7 +695,7 @@ void afl_fsrv_start(afl_forkserver_t *fsrv, char **argv, } offset = 0; - while (offset < status && (u8)dict[offset] + offset < status) { + while (offset < (u32)status && (u8)dict[offset] + offset < (u32)status) { fsrv->function_ptr(fsrv->function_opt, dict + offset + 1, (u8)dict[offset]); diff --git a/src/afl-fuzz-extras.c b/src/afl-fuzz-extras.c index 12771cd7..097871c8 100644 --- a/src/afl-fuzz-extras.c +++ b/src/afl-fuzz-extras.c @@ -115,7 +115,7 @@ void load_extras_file(afl_state_t *afl, u8 *fname, u32 *min_len, u32 *max_len, if (*lptr == '@') { ++lptr; - if (atoi(lptr) > dict_level) { continue; } + if (atoi(lptr) > (s32)dict_level) { continue; } while (isdigit(*lptr)) { ++lptr; @@ -402,7 +402,7 @@ void maybe_add_auto(void *afl_tmp, u8 *mem, u32 len) { while (i--) { - if (*((u32 *)mem) == interesting_32[i] || + if (*((u32 *)mem) == (u32)interesting_32[i] || *((u32 *)mem) == SWAP32(interesting_32[i])) { return; @@ -480,7 +480,7 @@ sort_a_extras: /* Then, sort the top USE_AUTO_EXTRAS entries by size. */ - qsort(afl->a_extras, MIN(USE_AUTO_EXTRAS, afl->a_extras_cnt), + qsort(afl->a_extras, MIN((u32)USE_AUTO_EXTRAS, afl->a_extras_cnt), sizeof(struct extra_data), compare_extras_len); } @@ -494,7 +494,7 @@ void save_auto(afl_state_t *afl) { if (!afl->auto_changed) { return; } afl->auto_changed = 0; - for (i = 0; i < MIN(USE_AUTO_EXTRAS, afl->a_extras_cnt); ++i) { + for (i = 0; i < MIN((u32)USE_AUTO_EXTRAS, afl->a_extras_cnt); ++i) { u8 *fn = alloc_printf("%s/queue/.state/auto_extras/auto_%06u", afl->out_dir, i); diff --git a/src/afl-fuzz-init.c b/src/afl-fuzz-init.c index 2c17ffbb..1abdcede 100644 --- a/src/afl-fuzz-init.c +++ b/src/afl-fuzz-init.c @@ -110,7 +110,7 @@ void bind_to_free_cpu(afl_state_t *afl) { u8 cpu_used[4096] = {0}; u8 lockfile[PATH_MAX] = ""; - u32 i; + s32 i; if (afl->afl_env.afl_no_affinity) { @@ -509,7 +509,7 @@ void read_foreign_testcases(afl_state_t *afl, int first) { afl->stage_cur = 0; afl->stage_max = 0; - for (i = 0; i < nl_cnt; ++i) { + for (i = 0; i < (u32)nl_cnt; ++i) { struct stat st; @@ -667,7 +667,7 @@ void read_testcases(afl_state_t *afl) { } - for (i = 0; i < nl_cnt; ++i) { + for (i = 0; i < (u32)nl_cnt; ++i) { struct stat st; @@ -2147,7 +2147,7 @@ void get_core_count(afl_state_t *afl) { WARNF("System under apparent load, performance may be spotty."); - } else if (cur_runnable + 1 <= afl->cpu_core_count) { + } else if ((s64)cur_runnable + 1 <= (s64)afl->cpu_core_count) { OKF("Try parallel jobs - see %s/parallel_fuzzing.md.", doc_path); @@ -2201,6 +2201,7 @@ void fix_up_sync(afl_state_t *afl) { static void handle_resize(int sig) { + (void)sig; afl_states_clear_screen(); } @@ -2252,6 +2253,7 @@ void check_asan_opts(void) { static void handle_stop_sig(int sig) { + (void)sig; afl_states_stop(); } @@ -2260,6 +2262,7 @@ static void handle_stop_sig(int sig) { static void handle_skipreq(int sig) { + (void)sig; afl_states_request_skip(); } diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index 1f0bf30e..9d09f6af 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -77,7 +77,7 @@ static int select_algorithm(afl_state_t *afl) { static u32 choose_block_len(afl_state_t *afl, u32 limit) { u32 min_value, max_value; - u32 rlim = MIN(afl->queue_cycle, 3); + u32 rlim = MIN(afl->queue_cycle, (u32)3); if (unlikely(!afl->run_over10m)) { rlim = 1; } @@ -292,7 +292,7 @@ static u8 could_be_interest(u32 old_val, u32 new_val, u8 blen, u8 check_le) { /* See if two-byte insertions over old_val could give us new_val. */ - for (i = 0; i < blen - 1; ++i) { + for (i = 0; (s32)i < blen - 1; ++i) { for (j = 0; j < sizeof(interesting_16) / 2; ++j) { @@ -372,7 +372,9 @@ static void locate_diffs(u8 *ptr1, u8 *ptr2, u32 len, s32 *first, s32 *last) { u8 fuzz_one_original(afl_state_t *afl) { - s32 len, fd, temp_len, i, j; + s32 len, fd, temp_len; + u32 j; + u32 i; u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0; u64 havoc_queued = 0, orig_hit_cnt, new_hit_cnt = 0, prev_cksum; u32 splice_cycle = 0, perf_score = 100, orig_perf, eff_cnt = 1; @@ -862,7 +864,7 @@ u8 fuzz_one_original(afl_state_t *afl) { whole thing as worth fuzzing, since we wouldn't be saving much time anyway. */ - if (eff_cnt != EFF_ALEN(len) && + if (eff_cnt != (u32)EFF_ALEN(len) && eff_cnt * 100 / EFF_ALEN(len) > EFF_MAX_PERC) { memset(eff_map, 1, EFF_ALEN(len)); @@ -893,7 +895,7 @@ u8 fuzz_one_original(afl_state_t *afl) { orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len - 1; ++i) { + for (i = 0; (s32)i < len - 1; ++i) { /* Let's consult the effector map... */ @@ -931,7 +933,7 @@ u8 fuzz_one_original(afl_state_t *afl) { orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len - 3; ++i) { + for (i = 0; (s32)i < len - 3; ++i) { /* Let's consult the effector map... */ if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && @@ -977,7 +979,7 @@ skip_bitflip: orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len; ++i) { + for (i = 0; i < (u32)len; ++i) { u8 orig = out_buf[i]; @@ -1051,7 +1053,7 @@ skip_bitflip: orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len - 1; ++i) { + for (i = 0; i < (u32)len - 1; ++i) { u16 orig = *(u16 *)(out_buf + i); @@ -1161,7 +1163,7 @@ skip_bitflip: orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len - 3; ++i) { + for (i = 0; i < (u32)len - 3; ++i) { u32 orig = *(u32 *)(out_buf + i); @@ -1202,7 +1204,7 @@ skip_bitflip: } - if ((orig & 0xffff) < j && !could_be_bitflip(r2)) { + if ((orig & 0xffff) < (u32)j && !could_be_bitflip(r2)) { afl->stage_cur_val = -j; *(u32 *)(out_buf + i) = orig - j; @@ -1234,7 +1236,7 @@ skip_bitflip: } - if ((SWAP32(orig) & 0xffff) < j && !could_be_bitflip(r4)) { + if ((SWAP32(orig) & 0xffff) < (u32)j && !could_be_bitflip(r4)) { afl->stage_cur_val = -j; *(u32 *)(out_buf + i) = SWAP32(SWAP32(orig) - j); @@ -1276,7 +1278,7 @@ skip_arith: /* Setting 8-bit integers. */ - for (i = 0; i < len; ++i) { + for (i = 0; i < (u32)len; ++i) { u8 orig = out_buf[i]; @@ -1291,7 +1293,7 @@ skip_arith: afl->stage_cur_byte = i; - for (j = 0; j < sizeof(interesting_8); ++j) { + for (j = 0; j < (u32)sizeof(interesting_8); ++j) { /* Skip if the value could be a product of bitflips or arithmetics. */ @@ -1331,7 +1333,7 @@ skip_arith: orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len - 1; ++i) { + for (i = 0; (s32)i < len - 1; ++i) { u16 orig = *(u16 *)(out_buf + i); @@ -1409,7 +1411,7 @@ skip_arith: orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len - 3; i++) { + for (i = 0; (s32)i < len - 3; i++) { u32 orig = *(u32 *)(out_buf + i); @@ -1496,7 +1498,7 @@ skip_interest: orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len; ++i) { + for (i = 0; i < (u32)len; ++i) { u32 last_len = 0; @@ -1556,7 +1558,7 @@ skip_interest: ex_tmp = ck_maybe_grow(BUF_PARAMS(ex), len + MAX_DICT_FILE); - for (i = 0; i <= len; ++i) { + for (i = 0; i <= (u32)len; ++i) { afl->stage_cur_byte = i; @@ -1602,19 +1604,20 @@ skip_user_extras: afl->stage_name = "auto extras (over)"; afl->stage_short = "ext_AO"; afl->stage_cur = 0; - afl->stage_max = MIN(afl->a_extras_cnt, USE_AUTO_EXTRAS) * len; + afl->stage_max = MIN(afl->a_extras_cnt, (u32)USE_AUTO_EXTRAS) * len; afl->stage_val_type = STAGE_VAL_NONE; orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len; ++i) { + for (i = 0; i < (u32)len; ++i) { u32 last_len = 0; afl->stage_cur_byte = i; - for (j = 0; j < MIN(afl->a_extras_cnt, USE_AUTO_EXTRAS); ++j) { + u32 min_extra_len = MIN(afl->a_extras_cnt, (u32)USE_AUTO_EXTRAS); + for (j = 0; j < min_extra_len; ++j) { /* See the comment in the earlier code; extras are sorted by size. */ @@ -2231,7 +2234,7 @@ havoc_stage: u32 extra_len = afl->a_extras[use_extra].len; u32 insert_at; - if (extra_len > temp_len) { break; } + if ((s32)extra_len > temp_len) { break; } insert_at = rand_below(afl, temp_len - extra_len + 1); memcpy(out_buf + insert_at, afl->a_extras[use_extra].data, @@ -2245,7 +2248,7 @@ havoc_stage: u32 extra_len = afl->extras[use_extra].len; u32 insert_at; - if (extra_len > temp_len) { break; } + if ((s32)extra_len > temp_len) { break; } insert_at = rand_below(afl, temp_len - extra_len + 1); memcpy(out_buf + insert_at, afl->extras[use_extra].data, @@ -2360,7 +2363,7 @@ havoc_stage: u32 copy_from, copy_to, copy_len; copy_len = choose_block_len(afl, new_len - 1); - if (copy_len > temp_len) copy_len = temp_len; + if ((s32)copy_len > temp_len) copy_len = temp_len; copy_from = rand_below(afl, new_len - copy_len + 1); copy_to = rand_below(afl, temp_len - copy_len + 1); @@ -2517,7 +2520,7 @@ retry_splicing: the last differing byte. Bail out if the difference is just a single byte or so. */ - locate_diffs(in_buf, new_buf, MIN(len, target->len), &f_diff, &l_diff); + locate_diffs(in_buf, new_buf, MIN(len, (s64)target->len), &f_diff, &l_diff); if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) { goto retry_splicing; } @@ -2587,7 +2590,9 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { } - s32 len, fd, temp_len, i, j; + s32 len, fd, temp_len; + u32 i; + u32 j; u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0; u64 havoc_queued = 0, orig_hit_cnt, new_hit_cnt = 0, cur_ms_lv, prev_cksum; u32 splice_cycle = 0, perf_score = 100, orig_perf, eff_cnt = 1; @@ -2761,9 +2766,9 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { cur_ms_lv = get_cur_time(); if (!(afl->key_puppet == 0 && - ((cur_ms_lv - afl->last_path_time < afl->limit_time_puppet) || + ((cur_ms_lv - afl->last_path_time < (u32)afl->limit_time_puppet) || (afl->last_crash_time != 0 && - cur_ms_lv - afl->last_crash_time < afl->limit_time_puppet) || + cur_ms_lv - afl->last_crash_time < (u32)afl->limit_time_puppet) || afl->last_path_time == 0))) { afl->key_puppet = 1; @@ -3058,7 +3063,7 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { whole thing as worth fuzzing, since we wouldn't be saving much time anyway. */ - if (eff_cnt != EFF_ALEN(len) && + if (eff_cnt != (u32)EFF_ALEN(len) && eff_cnt * 100 / EFF_ALEN(len) > EFF_MAX_PERC) { memset(eff_map, 1, EFF_ALEN(len)); @@ -3089,7 +3094,7 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len - 1; ++i) { + for (i = 0; (s32)i < len - 1; ++i) { /* Let's consult the effector map... */ @@ -3127,7 +3132,7 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len - 3; ++i) { + for (i = 0; (s32)i < len - 3; ++i) { /* Let's consult the effector map... */ if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && @@ -3173,7 +3178,7 @@ skip_bitflip: orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len; ++i) { + for (i = 0; i < (u32)len; ++i) { u8 orig = out_buf[i]; @@ -3247,7 +3252,7 @@ skip_bitflip: orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len - 1; ++i) { + for (i = 0; (s32)i < len - 1; ++i) { u16 orig = *(u16 *)(out_buf + i); @@ -3357,7 +3362,7 @@ skip_bitflip: orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len - 3; ++i) { + for (i = 0; (s32)i < len - 3; ++i) { u32 orig = *(u32 *)(out_buf + i); @@ -3472,7 +3477,7 @@ skip_arith: /* Setting 8-bit integers. */ - for (i = 0; i < len; ++i) { + for (i = 0; i < (u32)len; ++i) { u8 orig = out_buf[i]; @@ -3527,7 +3532,7 @@ skip_arith: orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len - 1; ++i) { + for (i = 0; (s32)i < len - 1; ++i) { u16 orig = *(u16 *)(out_buf + i); @@ -3605,7 +3610,7 @@ skip_arith: orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len - 3; ++i) { + for (i = 0; (s32)i < len - 3; ++i) { u32 orig = *(u32 *)(out_buf + i); @@ -3692,7 +3697,7 @@ skip_interest: orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len; ++i) { + for (i = 0; i < (u32)len; ++i) { u32 last_len = 0; @@ -3752,7 +3757,7 @@ skip_interest: ex_tmp = ck_maybe_grow(BUF_PARAMS(ex), len + MAX_DICT_FILE); - for (i = 0; i <= len; ++i) { + for (i = 0; i <= (u32)len; ++i) { afl->stage_cur_byte = i; @@ -3798,23 +3803,23 @@ skip_user_extras: afl->stage_name = "auto extras (over)"; afl->stage_short = "ext_AO"; afl->stage_cur = 0; - afl->stage_max = MIN(afl->a_extras_cnt, USE_AUTO_EXTRAS) * len; + afl->stage_max = MIN(afl->a_extras_cnt, (u32)USE_AUTO_EXTRAS) * len; afl->stage_val_type = STAGE_VAL_NONE; orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len; ++i) { + for (i = 0; i < (u32)len; ++i) { u32 last_len = 0; afl->stage_cur_byte = i; - for (j = 0; j < MIN(afl->a_extras_cnt, USE_AUTO_EXTRAS); ++j) { + for (j = 0; j < MIN(afl->a_extras_cnt, (u32)USE_AUTO_EXTRAS); ++j) { /* See the comment in the earlier code; extras are sorted by size. */ - if (afl->a_extras[j].len > len - i || + if ((s32)(afl->a_extras[j].len) > (s32)(len - i) || !memcmp(afl->a_extras[j].data, out_buf + i, afl->a_extras[j].len) || !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, afl->a_extras[j].len))) { @@ -4276,7 +4281,7 @@ pacemaker_fuzzing: u32 use_extra = rand_below(afl, afl->a_extras_cnt); u32 extra_len = afl->a_extras[use_extra].len; - if (extra_len > temp_len) break; + if (extra_len > (u32)temp_len) break; u32 insert_at = rand_below(afl, temp_len - extra_len + 1); memcpy(out_buf + insert_at, afl->a_extras[use_extra].data, @@ -4289,7 +4294,7 @@ pacemaker_fuzzing: u32 use_extra = rand_below(afl, afl->extras_cnt); u32 extra_len = afl->extras[use_extra].len; - if (extra_len > temp_len) break; + if (extra_len > (u32)temp_len) break; u32 insert_at = rand_below(afl, temp_len - extra_len + 1); memcpy(out_buf + insert_at, afl->extras[use_extra].data, @@ -4449,7 +4454,7 @@ pacemaker_fuzzing: retry_splicing_puppet: - if (afl->use_splicing && splice_cycle++ < afl->SPLICE_CYCLES_puppet && + if (afl->use_splicing && splice_cycle++ < (u32)afl->SPLICE_CYCLES_puppet && afl->queued_paths > 1 && afl->queue_cur->len > 1) { struct queue_entry *target; @@ -4519,7 +4524,7 @@ pacemaker_fuzzing: the last differing byte. Bail out if the difference is just a single byte or so. */ - locate_diffs(in_buf, new_buf, MIN(len, target->len), &f_diff, &l_diff); + locate_diffs(in_buf, new_buf, MIN(len, (s32)target->len), &f_diff, &l_diff); if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) { @@ -4551,7 +4556,7 @@ pacemaker_fuzzing: abandon_entry: abandon_entry_puppet: - if (splice_cycle >= afl->SPLICE_CYCLES_puppet) { + if ((s64)splice_cycle >= afl->SPLICE_CYCLES_puppet) { afl->SPLICE_CYCLES_puppet = (rand_below( diff --git a/src/afl-fuzz-python.c b/src/afl-fuzz-python.c index 2044c97d..a077469e 100644 --- a/src/afl-fuzz-python.c +++ b/src/afl-fuzz-python.c @@ -30,6 +30,9 @@ static void *unsupported(afl_state_t *afl, unsigned int seed) { + (void)afl; + (void)seed; + FATAL("Python Mutator cannot be called twice yet"); return NULL; @@ -111,6 +114,8 @@ static size_t fuzz_py(void *py_mutator, u8 *buf, size_t buf_size, u8 **out_buf, static py_mutator_t *init_py_module(afl_state_t *afl, u8 *module_name) { + (void)afl; + if (!module_name) { return NULL; } py_mutator_t *py = calloc(1, sizeof(py_mutator_t)); @@ -247,6 +252,8 @@ void finalize_py_module(void *py_mutator) { static void init_py(afl_state_t *afl, py_mutator_t *py_mutator, unsigned int seed) { + (void)afl; + PyObject *py_args, *py_value; /* Provide the init function a seed for the Python RNG */ diff --git a/src/afl-fuzz-redqueen.c b/src/afl-fuzz-redqueen.c index cb4c78df..1cfe8802 100644 --- a/src/afl-fuzz-redqueen.c +++ b/src/afl-fuzz-redqueen.c @@ -352,7 +352,7 @@ static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header *h, } - if (use_num && num == pattern) { + if (use_num && (u64) num == pattern) { size_t old_len = endptr - buf_8; size_t num_len = snprintf(NULL, 0, "%lld", num); @@ -659,12 +659,12 @@ static u8 cmp_fuzz(afl_state_t *afl, u32 key, u8 *orig_buf, u8 *buf, u32 len) { } -static u8 rtn_extend_encoding(afl_state_t *afl, struct cmp_header *h, +static u8 rtn_extend_encoding(afl_state_t *afl, u8 *pattern, u8 *repl, u8 *o_pattern, u32 idx, u8 *orig_buf, u8 *buf, u32 len, u8 *status) { u32 i; - u32 its_len = MIN(32, len - idx); + u32 its_len = MIN((u32)32, len - idx); u8 save[32]; memcpy(save, &buf[idx], its_len); @@ -728,7 +728,7 @@ static u8 rtn_fuzz(afl_state_t *afl, u32 key, u8 *orig_buf, u8 *buf, u32 len) { for (idx = 0; idx < len && fails < 8; ++idx) { - if (unlikely(rtn_extend_encoding(afl, h, o->v0, o->v1, orig_o->v0, idx, + if (unlikely(rtn_extend_encoding(afl, o->v0, o->v1, orig_o->v0, idx, orig_buf, buf, len, &status))) { return 1; @@ -745,7 +745,7 @@ static u8 rtn_fuzz(afl_state_t *afl, u32 key, u8 *orig_buf, u8 *buf, u32 len) { } - if (unlikely(rtn_extend_encoding(afl, h, o->v1, o->v0, orig_o->v1, idx, + if (unlikely(rtn_extend_encoding(afl, o->v1, o->v0, orig_o->v1, idx, orig_buf, buf, len, &status))) { return 1; @@ -853,12 +853,12 @@ u8 input_to_state_stage(afl_state_t *afl, u8 *orig_buf, u8 *buf, u32 len, if (afl->shm.cmp_map->headers[k].type == CMP_TYPE_INS) { - afl->stage_max += MIN((u32)afl->shm.cmp_map->headers[k].hits, CMP_MAP_H); + afl->stage_max += MIN((u32)(afl->shm.cmp_map->headers[k].hits), (u32)CMP_MAP_H); } else { afl->stage_max += - MIN((u32)afl->shm.cmp_map->headers[k].hits, CMP_MAP_RTN_H); + MIN((u32)(afl->shm.cmp_map->headers[k].hits), (u32)CMP_MAP_RTN_H); } diff --git a/src/afl-fuzz-run.c b/src/afl-fuzz-run.c index ed4a1081..8d652155 100644 --- a/src/afl-fuzz-run.c +++ b/src/afl-fuzz-run.c @@ -733,12 +733,12 @@ u8 trim_case(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) { len_p2 = next_pow2(q->len); - remove_len = MAX(len_p2 / TRIM_START_STEPS, TRIM_MIN_BYTES); + remove_len = MAX(len_p2 / TRIM_START_STEPS, (u32)TRIM_MIN_BYTES); /* Continue until the number of steps gets too high or the stepover gets too small. */ - while (remove_len >= MAX(len_p2 / TRIM_END_STEPS, TRIM_MIN_BYTES)) { + while (remove_len >= MAX(len_p2 / TRIM_END_STEPS, (u32)TRIM_MIN_BYTES)) { u32 remove_pos = remove_len; diff --git a/src/afl-fuzz-stats.c b/src/afl-fuzz-stats.c index 7b30b5ea..aeb290bd 100644 --- a/src/afl-fuzz-stats.c +++ b/src/afl-fuzz-stats.c @@ -890,12 +890,12 @@ void show_stats(afl_state_t *afl) { if (afl->cpu_aff >= 0) { SAYF("%s" cGRA "[cpu%03u:%s%3u%%" cGRA "]\r" cRST, spacing, - MIN(afl->cpu_aff, 999), cpu_color, MIN(cur_utilization, 999)); + MIN(afl->cpu_aff, 999), cpu_color, MIN(cur_utilization, (u32)999)); } else { SAYF("%s" cGRA " [cpu:%s%3u%%" cGRA "]\r" cRST, spacing, cpu_color, - MIN(cur_utilization, 999)); + MIN(cur_utilization, (u32)999)); } @@ -1081,7 +1081,7 @@ void show_init_stats(afl_state_t *afl) { if (afl->non_instrumented_mode && !(afl->afl_env.afl_hang_tmout)) { - afl->hang_tmout = MIN(EXEC_TIMEOUT, afl->fsrv.exec_tmout * 2 + 100); + afl->hang_tmout = MIN((u32)EXEC_TIMEOUT, afl->fsrv.exec_tmout * 2 + 100); } diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c index c73a76c6..031c4049 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -79,7 +79,7 @@ static void at_exit() { /* Display usage hints. */ -static void usage(afl_state_t *afl, u8 *argv0, int more_help) { +static void usage(u8 *argv0, int more_help) { SAYF( "\n%s [ options ] -- /path/to/fuzzed_app [ ... ]\n\n" @@ -677,7 +677,7 @@ int main(int argc, char **argv_orig, char **envp) { u64 limit_time_puppet2 = afl->limit_time_puppet * 60 * 1000; - if (limit_time_puppet2 < afl->limit_time_puppet) { + if ((s32)limit_time_puppet2 < afl->limit_time_puppet) { FATAL("limit_time overflow"); @@ -811,7 +811,7 @@ int main(int argc, char **argv_orig, char **envp) { if (optind == argc || !afl->in_dir || !afl->out_dir || show_help) { - usage(afl, argv[0], show_help); + usage(argv[0], show_help); } diff --git a/src/afl-showmap.c b/src/afl-showmap.c index 71e975a1..4962006e 100644 --- a/src/afl-showmap.c +++ b/src/afl-showmap.c @@ -256,8 +256,7 @@ static u32 write_results_to_file(afl_forkserver_t *fsrv, u8 *outfile) { /* Execute target application. */ -static void showmap_run_target_forkserver(afl_forkserver_t *fsrv, char **argv, - u8 *mem, u32 len) { +static void showmap_run_target_forkserver(afl_forkserver_t *fsrv, u8 *mem, u32 len) { afl_fsrv_write_to_testcase(fsrv, mem, len); @@ -444,6 +443,7 @@ static void showmap_run_target(afl_forkserver_t *fsrv, char **argv) { static void handle_stop_sig(int sig) { + (void)sig; stop_soon = 1; afl_fsrv_killall(); @@ -1016,7 +1016,7 @@ int main(int argc, char **argv_orig, char **envp) { } - showmap_run_target_forkserver(fsrv, use_argv, in_data, in_len); + showmap_run_target_forkserver(fsrv, in_data, in_len); ck_free(in_data); tcnt = write_results_to_file(fsrv, outfile); diff --git a/src/afl-tmin.c b/src/afl-tmin.c index 68fcdd14..b50d8597 100644 --- a/src/afl-tmin.c +++ b/src/afl-tmin.c @@ -250,7 +250,7 @@ static s32 write_to_file(u8 *path, u8 *mem, u32 len) { /* Execute target application. Returns 0 if the changes are a dud, or 1 if they should be kept. */ -static u8 tmin_run_target(afl_forkserver_t *fsrv, char **argv, u8 *mem, u32 len, +static u8 tmin_run_target(afl_forkserver_t *fsrv, u8 *mem, u32 len, u8 first_run) { afl_fsrv_write_to_testcase(fsrv, mem, len); @@ -342,7 +342,7 @@ static u8 tmin_run_target(afl_forkserver_t *fsrv, char **argv, u8 *mem, u32 len, /* Actually minimize! */ -static void minimize(afl_forkserver_t *fsrv, char **argv) { +static void minimize(afl_forkserver_t *fsrv) { static u32 alpha_map[256]; @@ -380,7 +380,7 @@ static void minimize(afl_forkserver_t *fsrv, char **argv) { memset(tmp_buf + set_pos, '0', use_len); u8 res; - res = tmin_run_target(fsrv, argv, tmp_buf, in_len, 0); + res = tmin_run_target(fsrv, tmp_buf, in_len, 0); if (res) { @@ -453,7 +453,7 @@ next_del_blksize: /* Tail */ memcpy(tmp_buf + del_pos, in_data + del_pos + del_len, tail_len); - res = tmin_run_target(fsrv, argv, tmp_buf, del_pos + tail_len, 0); + res = tmin_run_target(fsrv, tmp_buf, del_pos + tail_len, 0); if (res) { @@ -524,7 +524,7 @@ next_del_blksize: } - res = tmin_run_target(fsrv, argv, tmp_buf, in_len, 0); + res = tmin_run_target(fsrv, tmp_buf, in_len, 0); if (res) { @@ -560,7 +560,7 @@ next_del_blksize: if (orig == '0') { continue; } tmp_buf[i] = '0'; - res = tmin_run_target(fsrv, argv, tmp_buf, in_len, 0); + res = tmin_run_target(fsrv, tmp_buf, in_len, 0); if (res) { @@ -623,6 +623,7 @@ finalize_all: static void handle_stop_sig(int sig) { + (void)sig; stop_soon = 1; afl_fsrv_killall(); @@ -1131,7 +1132,7 @@ int main(int argc, char **argv_orig, char **envp) { ACTF("Performing dry run (mem limit = %llu MB, timeout = %u ms%s)...", fsrv->mem_limit, fsrv->exec_tmout, edges_only ? ", edges only" : ""); - tmin_run_target(fsrv, use_argv, in_data, in_len, 1); + tmin_run_target(fsrv, in_data, in_len, 1); if (hang_mode && !fsrv->last_run_timed_out) { @@ -1169,7 +1170,7 @@ int main(int argc, char **argv_orig, char **envp) { } - minimize(fsrv, use_argv); + minimize(fsrv); ACTF("Writing output to '%s'...", output_file); -- cgit 1.4.1 From 699ebaa8e210e0d72ad7e3ac6f4a580cfbe37eae Mon Sep 17 00:00:00 2001 From: Dominik Maier Date: Fri, 7 Aug 2020 17:32:41 +0200 Subject: code format --- include/debug.h | 4 ++-- src/afl-forkserver.c | 6 +++++- src/afl-fuzz-one.c | 6 ++++-- src/afl-fuzz-redqueen.c | 11 ++++++----- src/afl-showmap.c | 3 ++- 5 files changed, 19 insertions(+), 11 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/include/debug.h b/include/debug.h index 79b05c5f..ae2946f0 100644 --- a/include/debug.h +++ b/include/debug.h @@ -281,7 +281,7 @@ #define ck_write(fd, buf, len, fn) \ do { \ \ - s32 _len = (s32)(len); \ + s32 _len = (s32)(len); \ s32 _res = write(fd, buf, _len); \ if (_res != _len) RPFATAL(_res, "Short write to %s", fn); \ \ @@ -290,7 +290,7 @@ #define ck_read(fd, buf, len, fn) \ do { \ \ - s32 _len = (s32)(len); \ + s32 _len = (s32)(len); \ s32 _res = read(fd, buf, _len); \ if (_res != _len) RPFATAL(_res, "Short read from %s", fn); \ \ diff --git a/src/afl-forkserver.c b/src/afl-forkserver.c index 15935ab0..752641d7 100644 --- a/src/afl-forkserver.c +++ b/src/afl-forkserver.c @@ -146,9 +146,12 @@ restart_select: restart_read: if (*stop_soon_p) { + // Early return - the user wants to quit. return 0; + } + len_read = read(fd, (u8 *)buf, 4); if (likely(len_read == 4)) { // for speed we put this first @@ -695,7 +698,8 @@ void afl_fsrv_start(afl_forkserver_t *fsrv, char **argv, } offset = 0; - while (offset < (u32)status && (u8)dict[offset] + offset < (u32)status) { + while (offset < (u32)status && + (u8)dict[offset] + offset < (u32)status) { fsrv->function_ptr(fsrv->function_opt, dict + offset + 1, (u8)dict[offset]); diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index 9d09f6af..74e09ee3 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -4454,7 +4454,8 @@ pacemaker_fuzzing: retry_splicing_puppet: - if (afl->use_splicing && splice_cycle++ < (u32)afl->SPLICE_CYCLES_puppet && + if (afl->use_splicing && + splice_cycle++ < (u32)afl->SPLICE_CYCLES_puppet && afl->queued_paths > 1 && afl->queue_cur->len > 1) { struct queue_entry *target; @@ -4524,7 +4525,8 @@ pacemaker_fuzzing: the last differing byte. Bail out if the difference is just a single byte or so. */ - locate_diffs(in_buf, new_buf, MIN(len, (s32)target->len), &f_diff, &l_diff); + locate_diffs(in_buf, new_buf, MIN(len, (s32)target->len), &f_diff, + &l_diff); if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) { diff --git a/src/afl-fuzz-redqueen.c b/src/afl-fuzz-redqueen.c index 1cfe8802..9716be95 100644 --- a/src/afl-fuzz-redqueen.c +++ b/src/afl-fuzz-redqueen.c @@ -352,7 +352,7 @@ static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header *h, } - if (use_num && (u64) num == pattern) { + if (use_num && (u64)num == pattern) { size_t old_len = endptr - buf_8; size_t num_len = snprintf(NULL, 0, "%lld", num); @@ -659,9 +659,9 @@ static u8 cmp_fuzz(afl_state_t *afl, u32 key, u8 *orig_buf, u8 *buf, u32 len) { } -static u8 rtn_extend_encoding(afl_state_t *afl, - u8 *pattern, u8 *repl, u8 *o_pattern, u32 idx, - u8 *orig_buf, u8 *buf, u32 len, u8 *status) { +static u8 rtn_extend_encoding(afl_state_t *afl, u8 *pattern, u8 *repl, + u8 *o_pattern, u32 idx, u8 *orig_buf, u8 *buf, + u32 len, u8 *status) { u32 i; u32 its_len = MIN((u32)32, len - idx); @@ -853,7 +853,8 @@ u8 input_to_state_stage(afl_state_t *afl, u8 *orig_buf, u8 *buf, u32 len, if (afl->shm.cmp_map->headers[k].type == CMP_TYPE_INS) { - afl->stage_max += MIN((u32)(afl->shm.cmp_map->headers[k].hits), (u32)CMP_MAP_H); + afl->stage_max += + MIN((u32)(afl->shm.cmp_map->headers[k].hits), (u32)CMP_MAP_H); } else { diff --git a/src/afl-showmap.c b/src/afl-showmap.c index 4962006e..0aa116e5 100644 --- a/src/afl-showmap.c +++ b/src/afl-showmap.c @@ -256,7 +256,8 @@ static u32 write_results_to_file(afl_forkserver_t *fsrv, u8 *outfile) { /* Execute target application. */ -static void showmap_run_target_forkserver(afl_forkserver_t *fsrv, u8 *mem, u32 len) { +static void showmap_run_target_forkserver(afl_forkserver_t *fsrv, u8 *mem, + u32 len) { afl_fsrv_write_to_testcase(fsrv, mem, len); -- cgit 1.4.1 From 9a1d526ed408cbd7d681be15c5512032f7632887 Mon Sep 17 00:00:00 2001 From: murx- Date: Sat, 8 Aug 2020 18:34:54 +0200 Subject: Add support for specific custom mutator name --- include/afl-fuzz.h | 1 + src/afl-fuzz-mutators.c | 1 + src/afl-fuzz-one.c | 2 ++ 3 files changed, 4 insertions(+) (limited to 'src/afl-fuzz-one.c') diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h index bb1bb314..51ab0e85 100644 --- a/include/afl-fuzz.h +++ b/include/afl-fuzz.h @@ -656,6 +656,7 @@ typedef struct afl_state { struct custom_mutator { const char *name; + char * name_short; void * dh; u8 * post_process_buf; size_t post_process_size; diff --git a/src/afl-fuzz-mutators.c b/src/afl-fuzz-mutators.c index b30106a0..0fa646f9 100644 --- a/src/afl-fuzz-mutators.c +++ b/src/afl-fuzz-mutators.c @@ -142,6 +142,7 @@ struct custom_mutator *load_custom_mutator(afl_state_t *afl, const char *fn) { struct custom_mutator *mutator = ck_alloc(sizeof(struct custom_mutator)); mutator->name = fn; + mutator->name_short = strrchr(fn, '/') + 1; ACTF("Loading custom mutator library from '%s'...", fn); dh = dlopen(fn, RTLD_NOW); diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index 74e09ee3..452c5298 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -1683,6 +1683,8 @@ custom_mutator_stage: has_custom_fuzz = true; + afl->stage_short = el->name_short; + for (afl->stage_cur = 0; afl->stage_cur < afl->stage_max; ++afl->stage_cur) { -- cgit 1.4.1 From e99d7e973001adea65c68113b08792144d6aa5c8 Mon Sep 17 00:00:00 2001 From: van Hauser Date: Sun, 9 Aug 2020 20:24:56 +0200 Subject: integration in fuzz_one --- include/afl-fuzz.h | 10 +++-- src/afl-fuzz-one.c | 111 ++++++++++++++++++++++++++++++++++++++++++++++------- src/afl-fuzz-run.c | 25 ++++++++++++ src/afl-fuzz.c | 12 ++++++ 4 files changed, 142 insertions(+), 16 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h index 19807880..88392867 100644 --- a/include/afl-fuzz.h +++ b/include/afl-fuzz.h @@ -430,7 +430,9 @@ typedef struct afl_state { *in_bitmap, /* Input bitmap */ *file_extension, /* File extension */ *orig_cmdline, /* Original command line */ - *infoexec; /* Command to execute on a new crash */ + *infoexec, /* Command to execute on a new crash */ + *taint_input_file, /* fuzz_input_one input file */ + *taint_src, *taint_map; u32 hang_tmout; /* Timeout used for hang det (ms) */ @@ -441,7 +443,8 @@ typedef struct afl_state { custom_only, /* Custom mutator only mode */ python_only, /* Python-only mode */ is_main_node, /* if this is the main node */ - is_secondary_node; /* if this is a secondary instance */ + is_secondary_node, /* if this is a secondary instance */ + taint_needs_splode; /* explode fuzz input */ u32 stats_update_freq; /* Stats update frequency (execs) */ @@ -502,7 +505,8 @@ typedef struct afl_state { useless_at_start, /* Number of useless starting paths */ var_byte_count, /* Bitmap bytes with var behavior */ current_entry, /* Current queue entry ID */ - havoc_div; /* Cycle count divisor for havoc */ + havoc_div, /* Cycle count divisor for havoc */ + taint_len; u64 total_crashes, /* Total number of crashes */ unique_crashes, /* Crashes with unique signatures */ diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index 74e09ee3..ec7c4772 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -458,27 +458,102 @@ u8 fuzz_one_original(afl_state_t *afl) { } - /* Map the test case into memory. */ + if (unlikely(afl->fsrv.taint_mode && (afl->queue_cycle % 3))) { - fd = open(afl->queue_cur->fname, O_RDONLY); + if (unlikely(afl->queue_cur->cal_failed)) goto abandon_entry; - if (unlikely(fd < 0)) { + u32 dst = 0, i; - PFATAL("Unable to open '%s'", afl->queue_cur->fname); + fd = open(afl->queue_cur->fname, O_RDONLY); + afl->taint_src = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); + if (fd < 0 || (size_t)afl->taint_src == -1) + FATAL("unable to open '%s'", afl->queue_cur->fname); + close(fd); - } + switch (afl->queue_cycle % 3) { - len = afl->queue_cur->len; + case 0: // do nothing, but cannot happen -> else + break; - orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); + case 1: // fuzz only tainted bytes + if (!afl->queue_cur->taint_bytes_all) goto abandon_entry; + afl->taint_needs_splode = 1; - if (unlikely(orig_in == MAP_FAILED)) { + fd = open(afl->taint_input_file, O_RDONLY); + len = afl->taint_len = afl->queue_cur->taint_bytes_all; + orig_in = in_buf = + mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); + if (fd < 0 || (size_t)in_buf == -1) + FATAL("unable to open '%s'", afl->taint_input_file); + close(fd); - PFATAL("Unable to mmap '%s' with len %d", afl->queue_cur->fname, len); + fd = open(afl->queue_cur->fname_taint, O_RDWR); + afl->taint_map = mmap(0, afl->queue_cur->len, PROT_READ | PROT_WRITE, + MAP_PRIVATE, fd, 0); + if (fd < 0 || (size_t)in_buf == -1) + FATAL("unable to open '%s'", afl->queue_cur->fname_taint); + close(fd); - } + for (i = 0; i < afl->queue_cur->len && dst < len; i++) + if (afl->taint_map[i]) in_buf[dst++] = afl->taint_src[i]; - close(fd); + break; + + case 2: // fuzz only newly tainted bytes + if (!afl->queue_cur->taint_bytes_new) goto abandon_entry; + afl->taint_needs_splode = 1; + + fd = open(afl->taint_input_file, O_RDONLY); + len = afl->taint_len = afl->queue_cur->taint_bytes_new; + orig_in = in_buf = + mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); + if (fd < 0 || (size_t)in_buf == -1) + FATAL("unable to open '%s'", afl->taint_input_file); + close(fd); + + u8 *fn = alloc_printf("%s.new", afl->queue_cur->fname_taint); + fd = open(fn, O_RDWR); + afl->taint_map = mmap(0, afl->queue_cur->len, PROT_READ | PROT_WRITE, + MAP_PRIVATE, fd, 0); + if (fd < 0 || (size_t)in_buf == -1) FATAL("unable to open '%s'", fn); + close(fd); + ck_free(fn); + + for (i = 0; i < afl->queue_cur->len && dst < len; i++) + if (afl->taint_map[i]) in_buf[dst++] = afl->taint_src[i]; + + break; + + } + + goto havoc_stage; // we let the normal cycles do deterministic mode - if + + } else { + + /* Map the test case into memory. */ + afl->taint_needs_splode = 0; + + fd = open(afl->queue_cur->fname, O_RDONLY); + + if (unlikely(fd < 0)) { + + PFATAL("Unable to open '%s'", afl->queue_cur->fname); + + } + + len = afl->queue_cur->len; + + orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); + + if (unlikely(orig_in == MAP_FAILED)) { + + PFATAL("Unable to mmap '%s' with len %d", afl->queue_cur->fname, len); + + } + + close(fd); + + } /* We could mmap() out_buf as MAP_PRIVATE, but we end up clobbering every single byte anyway, so it wouldn't give us any performance or memory usage @@ -527,7 +602,7 @@ u8 fuzz_one_original(afl_state_t *afl) { ************/ if (!afl->non_instrumented_mode && !afl->queue_cur->trim_done && - !afl->disable_trim) { + !afl->disable_trim && !afl->taint_needs_splode) { u8 res = trim_case(afl, afl->queue_cur, in_buf); @@ -2568,7 +2643,17 @@ abandon_entry: ++afl->queue_cur->fuzz_level; - munmap(orig_in, afl->queue_cur->len); + if (afl->taint_needs_splode) { + + munmap(afl->taint_src, afl->queue_cur->len); + munmap(orig_in, afl->taint_len); + munmap(afl->taint_map, afl->queue_cur->len); + + } else { + + munmap(orig_in, afl->queue_cur->len); + + } return ret_val; diff --git a/src/afl-fuzz-run.c b/src/afl-fuzz-run.c index ddbd5524..31db4d7c 100644 --- a/src/afl-fuzz-run.c +++ b/src/afl-fuzz-run.c @@ -863,6 +863,8 @@ abort_trimming: } +#define BUF_PARAMS(name) (void **)&afl->name##_buf, &afl->name##_size + /* Write a modified test case, run program, process results. Handle error conditions, returning 1 if it's time to bail out. This is a helper function for fuzz_one(). */ @@ -871,6 +873,27 @@ u8 common_fuzz_stuff(afl_state_t *afl, u8 *out_buf, u32 len) { u8 fault; + if (unlikely(afl->taint_needs_splode)) { + + s32 new_len = afl->queue_cur->len + len - afl->taint_len; + if (new_len < 4) new_len = 4; + u8 *new_buf = ck_maybe_grow(BUF_PARAMS(in_scratch), new_len); + + u32 i, taint = 0; + for (i = 0; i < new_len; i++) { + + if (afl->taint_map[i] || i > afl->queue_cur->len) + new_buf[i] = out_buf[taint++]; + else + new_buf[i] = afl->taint_src[i]; + + } + + out_buf = new_buf; + len = new_len; + + } + write_to_testcase(afl, out_buf, len); fault = fuzz_run_target(afl, &afl->fsrv, afl->fsrv.exec_tmout); @@ -918,3 +941,5 @@ u8 common_fuzz_stuff(afl_state_t *afl, u8 *out_buf, u32 len) { } +#undef BUF_PARAMS + diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c index 6f143db7..70a99dec 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -1260,12 +1260,15 @@ int main(int argc, char **argv_orig, char **envp) { if (afl->fsrv.taint_mode) { ACTF("Spawning qemu_taint forkserver"); + u8 *disable = getenv("AFL_DISABLE_LLVM_INSTRUMENTATION"); setenv("AFL_DISABLE_LLVM_INSTRUMENTATION", "1", 0); + afl_fsrv_init_dup(&afl->taint_fsrv, &afl->fsrv); afl->taint_fsrv.qemu_mode = 2; afl->taint_fsrv.taint_mode = 1; afl->taint_fsrv.trace_bits = afl->fsrv.trace_bits; + ck_free(afl->taint_fsrv.target_path); afl->argv_taint = ck_alloc(sizeof(char *) * (argc + 4 - optind)); afl->taint_fsrv.target_path = @@ -1290,7 +1293,16 @@ int main(int argc, char **argv_orig, char **envp) { setenv("AFL_TAINT_INPUT", afl->fsrv.out_file, 1); afl_fsrv_start(&afl->taint_fsrv, afl->argv_taint, &afl->stop_soon, afl->afl_env.afl_debug_child_output); + + afl->taint_input_file = alloc_printf("%s/taint/.input", afl->out_dir); + int fd = open(afl->taint_input_file, O_CREAT | O_TRUNC | O_RDWR, 0644); + if (fd < 0) + FATAL("Cannot create taint inpu file '%s'", afl->taint_input_file); + lseek(fd, MAX_FILE, SEEK_SET); + ck_write(fd, "\0", 1, afl->taint_input_file); + if (!disable) unsetenv("AFL_DISABLE_LLVM_INSTRUMENTATION"); + OKF("Taint forkserver successfully started"); } -- cgit 1.4.1 From ff40359a608f3c14c1025908a2810ca71fd502af Mon Sep 17 00:00:00 2001 From: van Hauser Date: Sun, 9 Aug 2020 21:09:07 +0200 Subject: fixes --- src/afl-fuzz-init.c | 8 +++++++- src/afl-fuzz-one.c | 28 ++++++++++++++++------------ src/afl-fuzz-queue.c | 10 +++++----- 3 files changed, 28 insertions(+), 18 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/src/afl-fuzz-init.c b/src/afl-fuzz-init.c index 350a3b4c..432e0649 100644 --- a/src/afl-fuzz-init.c +++ b/src/afl-fuzz-init.c @@ -961,7 +961,8 @@ void perform_dry_run(afl_state_t *afl) { } /* perform taint gathering on the input seed */ - perform_taint_run(afl, q, q->fname, use_mem, q->len); + if (afl->fsrv.taint_mode) + perform_taint_run(afl, q, q->fname, use_mem, q->len); q = q->next; @@ -1505,6 +1506,11 @@ static void handle_existing_out_dir(afl_state_t *afl) { fn = alloc_printf("%s/taint", afl->out_dir); mkdir(fn, 0755); // ignore errors + + u8 *fn2 = alloc_printf("%s/taint/.input", afl->out_dir); + unlink(fn2); // ignore errors + ck_free(fn2); + if (delete_files(fn, CASE_PREFIX)) { goto dir_cleanup_failed; } ck_free(fn); diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index ec7c4772..e75c2cec 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -458,26 +458,31 @@ u8 fuzz_one_original(afl_state_t *afl) { } - if (unlikely(afl->fsrv.taint_mode && (afl->queue_cycle % 3))) { + u32 tmp_val; + + if (unlikely(afl->fsrv.taint_mode && + (tmp_val = (afl->queue_cycle % 3)) != 1)) { if (unlikely(afl->queue_cur->cal_failed)) goto abandon_entry; + if (tmp_val == 1 && !afl->queue_cur->taint_bytes_all) goto abandon_entry; + if (tmp_val == 2 && !afl->queue_cur->taint_bytes_new) goto abandon_entry; u32 dst = 0, i; + temp_len = len = afl->queue_cur->len; fd = open(afl->queue_cur->fname, O_RDONLY); afl->taint_src = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); if (fd < 0 || (size_t)afl->taint_src == -1) FATAL("unable to open '%s'", afl->queue_cur->fname); close(fd); + afl->taint_needs_splode = 1; - switch (afl->queue_cycle % 3) { + switch (tmp_val) { - case 0: // do nothing, but cannot happen -> else + case 1: // do nothing, but cannot happen -> else break; - case 1: // fuzz only tainted bytes - if (!afl->queue_cur->taint_bytes_all) goto abandon_entry; - afl->taint_needs_splode = 1; + case 2: // fuzz only tainted bytes fd = open(afl->taint_input_file, O_RDONLY); len = afl->taint_len = afl->queue_cur->taint_bytes_all; @@ -499,9 +504,7 @@ u8 fuzz_one_original(afl_state_t *afl) { break; - case 2: // fuzz only newly tainted bytes - if (!afl->queue_cur->taint_bytes_new) goto abandon_entry; - afl->taint_needs_splode = 1; + case 0: // fuzz only newly tainted bytes fd = open(afl->taint_input_file, O_RDONLY); len = afl->taint_len = afl->queue_cur->taint_bytes_new; @@ -515,7 +518,8 @@ u8 fuzz_one_original(afl_state_t *afl) { fd = open(fn, O_RDWR); afl->taint_map = mmap(0, afl->queue_cur->len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); - if (fd < 0 || (size_t)in_buf == -1) FATAL("unable to open '%s'", fn); + if (fd < 0 || (size_t)in_buf == -1) + FATAL("unable to open '%s' for %u bytes", fn, len); close(fd); ck_free(fn); @@ -526,8 +530,6 @@ u8 fuzz_one_original(afl_state_t *afl) { } - goto havoc_stage; // we let the normal cycles do deterministic mode - if - } else { /* Map the test case into memory. */ @@ -653,6 +655,8 @@ u8 fuzz_one_original(afl_state_t *afl) { if it has gone through deterministic testing in earlier, resumed runs (passed_det). */ + if (afl->taint_needs_splode) goto havoc_stage; + if (likely(afl->queue_cur->passed_det) || likely(afl->skip_deterministic) || likely(perf_score < (afl->queue_cur->depth * 30 <= afl->havoc_max_mult * 100 diff --git a/src/afl-fuzz-queue.c b/src/afl-fuzz-queue.c index 36ec0896..3ada9d98 100644 --- a/src/afl-fuzz-queue.c +++ b/src/afl-fuzz-queue.c @@ -118,6 +118,9 @@ void perform_taint_run(afl_state_t *afl, struct queue_entry *q, u8 *fname, if (q->fname_taint) { + u8 *save = ck_maybe_grow(BUF_PARAMS(out_scratch), afl->fsrv.map_size); + memcpy(save, afl->taint_fsrv.trace_bits, afl->fsrv.map_size); + afl->taint_fsrv.map_size = plen; // speed :) write_to_testcase(afl, mem, len); if (afl_fsrv_run_target(&afl->taint_fsrv, afl->fsrv.exec_tmout, @@ -214,6 +217,8 @@ void perform_taint_run(afl_state_t *afl, struct queue_entry *q, u8 *fname, } + memcpy(afl->taint_fsrv.trace_bits, save, afl->fsrv.map_size); + } if (!bytes) { @@ -227,11 +232,6 @@ void perform_taint_run(afl_state_t *afl, struct queue_entry *q, u8 *fname, } - } else { - - if (q->taint_bytes_all && !q->taint_bytes_new) - q->taint_bytes_new = q->taint_bytes_all; - } } -- cgit 1.4.1 From 4fc16b542e7839698f91dca46db52044fdaa9dd2 Mon Sep 17 00:00:00 2001 From: van Hauser Date: Sun, 9 Aug 2020 21:32:15 +0200 Subject: havoc copy --- src/afl-fuzz-one.c | 762 ++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 761 insertions(+), 1 deletion(-) (limited to 'src/afl-fuzz-one.c') diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index e75c2cec..5dc336dc 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -463,10 +463,14 @@ u8 fuzz_one_original(afl_state_t *afl) { if (unlikely(afl->fsrv.taint_mode && (tmp_val = (afl->queue_cycle % 3)) != 1)) { + ret_val = 0; + if (unlikely(afl->queue_cur->cal_failed)) goto abandon_entry; if (tmp_val == 1 && !afl->queue_cur->taint_bytes_all) goto abandon_entry; if (tmp_val == 2 && !afl->queue_cur->taint_bytes_new) goto abandon_entry; + ret_val = 1; + u32 dst = 0, i; temp_len = len = afl->queue_cur->len; @@ -655,7 +659,7 @@ u8 fuzz_one_original(afl_state_t *afl) { if it has gone through deterministic testing in earlier, resumed runs (passed_det). */ - if (afl->taint_needs_splode) goto havoc_stage; + if (afl->taint_needs_splode) goto taint_havoc_stage; if (likely(afl->queue_cur->passed_det) || likely(afl->skip_deterministic) || likely(perf_score < @@ -2628,6 +2632,762 @@ retry_splicing: ret_val = 0; + goto abandon_entry; + + /******************************* + * same RANDOM HAVOC for taint * + *******************************/ + +taint_havoc_stage: + + afl->stage_cur_byte = -1; + + /* The havoc stage mutation code is also invoked when splicing files; if the + splice_cycle variable is set, generate different descriptions and such. */ + + if (!splice_cycle) { + + afl->stage_name = "havoc"; + afl->stage_short = "havoc"; + afl->stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * + perf_score / afl->havoc_div / 100; + + } else { + + perf_score = orig_perf; + + snprintf(afl->stage_name_buf, STAGE_BUF_SIZE, "splice %u", splice_cycle); + afl->stage_name = afl->stage_name_buf; + afl->stage_short = "splice"; + afl->stage_max = SPLICE_HAVOC * perf_score / afl->havoc_div / 100; + + } + + if (afl->stage_max < HAVOC_MIN) { afl->stage_max = HAVOC_MIN; } + + temp_len = len; + + orig_hit_cnt = afl->queued_paths + afl->unique_crashes; + + havoc_queued = afl->queued_paths; + + if (afl->custom_mutators_count) { + + LIST_FOREACH(&afl->custom_mutator_list, struct custom_mutator, { + + if (el->stacked_custom && el->afl_custom_havoc_mutation_probability) { + + el->stacked_custom_prob = + el->afl_custom_havoc_mutation_probability(el->data); + if (el->stacked_custom_prob > 100) { + + FATAL( + "The probability returned by " + "afl_custom_havoc_mutation_propability " + "has to be in the range 0-100."); + + } + + } + + }); + + } + + /* We essentially just do several thousand runs (depending on perf_score) + where we take the input file and make random stacked tweaks. */ + + if (unlikely(afl->expand_havoc)) { + + /* add expensive havoc cases here, they are activated after a full + cycle without finds happened */ + + r_max = 16 + ((afl->extras_cnt + afl->a_extras_cnt) ? 2 : 0); + + } else { + + r_max = 15 + ((afl->extras_cnt + afl->a_extras_cnt) ? 2 : 0); + + } + + for (afl->stage_cur = 0; afl->stage_cur < afl->stage_max; ++afl->stage_cur) { + + u32 use_stacking = 1 << (1 + rand_below(afl, HAVOC_STACK_POW2)); + + afl->stage_cur_val = use_stacking; + + for (i = 0; i < use_stacking; ++i) { + + if (afl->custom_mutators_count) { + + LIST_FOREACH(&afl->custom_mutator_list, struct custom_mutator, { + + if (el->stacked_custom && + rand_below(afl, 100) < el->stacked_custom_prob) { + + u8 * custom_havoc_buf = NULL; + size_t new_len = el->afl_custom_havoc_mutation( + el->data, out_buf, temp_len, &custom_havoc_buf, MAX_FILE); + if (unlikely(!custom_havoc_buf)) { + + FATAL("Error in custom_havoc (return %zd)", new_len); + + } + + if (likely(new_len > 0 && custom_havoc_buf)) { + + temp_len = new_len; + if (out_buf != custom_havoc_buf) { + + ck_maybe_grow(BUF_PARAMS(out), temp_len); + memcpy(out_buf, custom_havoc_buf, temp_len); + + } + + } + + } + + }); + + } + + switch ((r = rand_below(afl, r_max))) { + + case 0: + + /* Flip a single bit somewhere. Spooky! */ + + FLIP_BIT(out_buf, rand_below(afl, temp_len << 3)); + break; + + case 1: + + /* Set byte to interesting value. */ + + out_buf[rand_below(afl, temp_len)] = + interesting_8[rand_below(afl, sizeof(interesting_8))]; + break; + + case 2: + + /* Set word to interesting value, randomly choosing endian. */ + + if (temp_len < 2) { break; } + + if (rand_below(afl, 2)) { + + *(u16 *)(out_buf + rand_below(afl, temp_len - 1)) = + interesting_16[rand_below(afl, sizeof(interesting_16) >> 1)]; + + } else { + + *(u16 *)(out_buf + rand_below(afl, temp_len - 1)) = SWAP16( + interesting_16[rand_below(afl, sizeof(interesting_16) >> 1)]); + + } + + break; + + case 3: + + /* Set dword to interesting value, randomly choosing endian. */ + + if (temp_len < 4) { break; } + + if (rand_below(afl, 2)) { + + *(u32 *)(out_buf + rand_below(afl, temp_len - 3)) = + interesting_32[rand_below(afl, sizeof(interesting_32) >> 2)]; + + } else { + + *(u32 *)(out_buf + rand_below(afl, temp_len - 3)) = SWAP32( + interesting_32[rand_below(afl, sizeof(interesting_32) >> 2)]); + + } + + break; + + case 4: + + /* Randomly subtract from byte. */ + + out_buf[rand_below(afl, temp_len)] -= 1 + rand_below(afl, ARITH_MAX); + break; + + case 5: + + /* Randomly add to byte. */ + + out_buf[rand_below(afl, temp_len)] += 1 + rand_below(afl, ARITH_MAX); + break; + + case 6: + + /* Randomly subtract from word, random endian. */ + + if (temp_len < 2) { break; } + + if (rand_below(afl, 2)) { + + u32 pos = rand_below(afl, temp_len - 1); + + *(u16 *)(out_buf + pos) -= 1 + rand_below(afl, ARITH_MAX); + + } else { + + u32 pos = rand_below(afl, temp_len - 1); + u16 num = 1 + rand_below(afl, ARITH_MAX); + + *(u16 *)(out_buf + pos) = + SWAP16(SWAP16(*(u16 *)(out_buf + pos)) - num); + + } + + break; + + case 7: + + /* Randomly add to word, random endian. */ + + if (temp_len < 2) { break; } + + if (rand_below(afl, 2)) { + + u32 pos = rand_below(afl, temp_len - 1); + + *(u16 *)(out_buf + pos) += 1 + rand_below(afl, ARITH_MAX); + + } else { + + u32 pos = rand_below(afl, temp_len - 1); + u16 num = 1 + rand_below(afl, ARITH_MAX); + + *(u16 *)(out_buf + pos) = + SWAP16(SWAP16(*(u16 *)(out_buf + pos)) + num); + + } + + break; + + case 8: + + /* Randomly subtract from dword, random endian. */ + + if (temp_len < 4) { break; } + + if (rand_below(afl, 2)) { + + u32 pos = rand_below(afl, temp_len - 3); + + *(u32 *)(out_buf + pos) -= 1 + rand_below(afl, ARITH_MAX); + + } else { + + u32 pos = rand_below(afl, temp_len - 3); + u32 num = 1 + rand_below(afl, ARITH_MAX); + + *(u32 *)(out_buf + pos) = + SWAP32(SWAP32(*(u32 *)(out_buf + pos)) - num); + + } + + break; + + case 9: + + /* Randomly add to dword, random endian. */ + + if (temp_len < 4) { break; } + + if (rand_below(afl, 2)) { + + u32 pos = rand_below(afl, temp_len - 3); + + *(u32 *)(out_buf + pos) += 1 + rand_below(afl, ARITH_MAX); + + } else { + + u32 pos = rand_below(afl, temp_len - 3); + u32 num = 1 + rand_below(afl, ARITH_MAX); + + *(u32 *)(out_buf + pos) = + SWAP32(SWAP32(*(u32 *)(out_buf + pos)) + num); + + } + + break; + + case 10: + + /* Just set a random byte to a random value. Because, + why not. We use XOR with 1-255 to eliminate the + possibility of a no-op. */ + + out_buf[rand_below(afl, temp_len)] ^= 1 + rand_below(afl, 255); + break; + + case 11 ... 12: { + + /* Delete bytes. We're making this a bit more likely + than insertion (the next option) in hopes of keeping + files reasonably small. */ + + u32 del_from, del_len; + + if (temp_len < 2) { break; } + + /* Don't delete too much. */ + + del_len = choose_block_len(afl, temp_len - 1); + + del_from = rand_below(afl, temp_len - del_len + 1); + + memmove(out_buf + del_from, out_buf + del_from + del_len, + temp_len - del_from - del_len); + + temp_len -= del_len; + + break; + + } + + case 13: + + if (temp_len + HAVOC_BLK_XL < MAX_FILE) { + + /* Clone bytes (75%) or insert a block of constant bytes (25%). */ + + u8 actually_clone = rand_below(afl, 4); + u32 clone_from, clone_to, clone_len; + u8 *new_buf; + + if (actually_clone) { + + clone_len = choose_block_len(afl, temp_len); + clone_from = rand_below(afl, temp_len - clone_len + 1); + + } else { + + clone_len = choose_block_len(afl, HAVOC_BLK_XL); + clone_from = 0; + + } + + clone_to = rand_below(afl, temp_len); + + new_buf = + ck_maybe_grow(BUF_PARAMS(out_scratch), temp_len + clone_len); + + /* Head */ + + memcpy(new_buf, out_buf, clone_to); + + /* Inserted part */ + + if (actually_clone) { + + memcpy(new_buf + clone_to, out_buf + clone_from, clone_len); + + } else { + + memset(new_buf + clone_to, + rand_below(afl, 2) ? rand_below(afl, 256) + : out_buf[rand_below(afl, temp_len)], + clone_len); + + } + + /* Tail */ + memcpy(new_buf + clone_to + clone_len, out_buf + clone_to, + temp_len - clone_to); + + swap_bufs(BUF_PARAMS(out), BUF_PARAMS(out_scratch)); + out_buf = new_buf; + new_buf = NULL; + temp_len += clone_len; + + } + + break; + + case 14: { + + /* Overwrite bytes with a randomly selected chunk (75%) or fixed + bytes (25%). */ + + u32 copy_from, copy_to, copy_len; + + if (temp_len < 2) { break; } + + copy_len = choose_block_len(afl, temp_len - 1); + + copy_from = rand_below(afl, temp_len - copy_len + 1); + copy_to = rand_below(afl, temp_len - copy_len + 1); + + if (rand_below(afl, 4)) { + + if (copy_from != copy_to) { + + memmove(out_buf + copy_to, out_buf + copy_from, copy_len); + + } + + } else { + + memset(out_buf + copy_to, + rand_below(afl, 2) ? rand_below(afl, 256) + : out_buf[rand_below(afl, temp_len)], + copy_len); + + } + + break; + + } + + default: + + if (likely(r <= 16 && (afl->extras_cnt || afl->a_extras_cnt))) { + + /* Values 15 and 16 can be selected only if there are any extras + present in the dictionaries. */ + + if (r == 15) { + + /* Overwrite bytes with an extra. */ + + if (!afl->extras_cnt || + (afl->a_extras_cnt && rand_below(afl, 2))) { + + /* No user-specified extras or odds in our favor. Let's use an + auto-detected one. */ + + u32 use_extra = rand_below(afl, afl->a_extras_cnt); + u32 extra_len = afl->a_extras[use_extra].len; + u32 insert_at; + + if ((s32)extra_len > temp_len) { break; } + + insert_at = rand_below(afl, temp_len - extra_len + 1); + memcpy(out_buf + insert_at, afl->a_extras[use_extra].data, + extra_len); + + } else { + + /* No auto extras or odds in our favor. Use the dictionary. */ + + u32 use_extra = rand_below(afl, afl->extras_cnt); + u32 extra_len = afl->extras[use_extra].len; + u32 insert_at; + + if ((s32)extra_len > temp_len) { break; } + + insert_at = rand_below(afl, temp_len - extra_len + 1); + memcpy(out_buf + insert_at, afl->extras[use_extra].data, + extra_len); + + } + + break; + + } else { // case 16 + + u32 use_extra, extra_len, + insert_at = rand_below(afl, temp_len + 1); + u8 *ptr; + + /* Insert an extra. Do the same dice-rolling stuff as for the + previous case. */ + + if (!afl->extras_cnt || + (afl->a_extras_cnt && rand_below(afl, 2))) { + + use_extra = rand_below(afl, afl->a_extras_cnt); + extra_len = afl->a_extras[use_extra].len; + ptr = afl->a_extras[use_extra].data; + + } else { + + use_extra = rand_below(afl, afl->extras_cnt); + extra_len = afl->extras[use_extra].len; + ptr = afl->extras[use_extra].data; + + } + + if (temp_len + extra_len >= MAX_FILE) { break; } + + out_buf = ck_maybe_grow(BUF_PARAMS(out), temp_len + extra_len); + + /* Tail */ + memmove(out_buf + insert_at + extra_len, out_buf + insert_at, + temp_len - insert_at); + + /* Inserted part */ + memcpy(out_buf + insert_at, ptr, extra_len); + + temp_len += extra_len; + + break; + + } + + } else { + + /* + switch (r) { + + case 15: // fall through + case 16: + case 17: {*/ + + /* Overwrite bytes with a randomly selected chunk from another + testcase or insert that chunk. */ + + if (afl->queued_paths < 4) break; + + /* Pick a random queue entry and seek to it. */ + + u32 tid; + do + tid = rand_below(afl, afl->queued_paths); + while (tid == afl->current_entry); + + struct queue_entry *target = afl->queue_buf[tid]; + + /* Make sure that the target has a reasonable length. */ + + while (target && (target->len < 2 || target == afl->queue_cur)) + target = target->next; + + if (!target) break; + + /* Read the testcase into a new buffer. */ + + fd = open(target->fname, O_RDONLY); + + if (unlikely(fd < 0)) { + + PFATAL("Unable to open '%s'", target->fname); + + } + + u32 new_len = target->len; + u8 *new_buf = ck_maybe_grow(BUF_PARAMS(in_scratch), new_len); + + ck_read(fd, new_buf, new_len, target->fname); + + close(fd); + + u8 overwrite = 0; + if (temp_len >= 2 && rand_below(afl, 2)) + overwrite = 1; + else if (temp_len + HAVOC_BLK_XL >= MAX_FILE) { + + if (temp_len >= 2) + overwrite = 1; + else + break; + + } + + if (overwrite) { + + u32 copy_from, copy_to, copy_len; + + copy_len = choose_block_len(afl, new_len - 1); + if ((s32)copy_len > temp_len) copy_len = temp_len; + + copy_from = rand_below(afl, new_len - copy_len + 1); + copy_to = rand_below(afl, temp_len - copy_len + 1); + + memmove(out_buf + copy_to, new_buf + copy_from, copy_len); + + } else { + + u32 clone_from, clone_to, clone_len; + + clone_len = choose_block_len(afl, new_len); + clone_from = rand_below(afl, new_len - clone_len + 1); + + clone_to = rand_below(afl, temp_len); + + u8 *temp_buf = + ck_maybe_grow(BUF_PARAMS(out_scratch), temp_len + clone_len); + + /* Head */ + + memcpy(temp_buf, out_buf, clone_to); + + /* Inserted part */ + + memcpy(temp_buf + clone_to, new_buf + clone_from, clone_len); + + /* Tail */ + memcpy(temp_buf + clone_to + clone_len, out_buf + clone_to, + temp_len - clone_to); + + swap_bufs(BUF_PARAMS(out), BUF_PARAMS(out_scratch)); + out_buf = temp_buf; + temp_len += clone_len; + + } + + break; + + } + + // end of default: + + } + + } + + if (common_fuzz_stuff(afl, out_buf, temp_len)) { goto abandon_entry; } + + /* out_buf might have been mangled a bit, so let's restore it to its + original size and shape. */ + + out_buf = ck_maybe_grow(BUF_PARAMS(out), len); + temp_len = len; + memcpy(out_buf, in_buf, len); + + /* If we're finding new stuff, let's run for a bit longer, limits + permitting. */ + + if (afl->queued_paths != havoc_queued) { + + if (perf_score <= afl->havoc_max_mult * 100) { + + afl->stage_max *= 2; + perf_score *= 2; + + } + + havoc_queued = afl->queued_paths; + + } + + } + + new_hit_cnt = afl->queued_paths + afl->unique_crashes; + + if (!splice_cycle) { + + afl->stage_finds[STAGE_HAVOC] += new_hit_cnt - orig_hit_cnt; + afl->stage_cycles[STAGE_HAVOC] += afl->stage_max; + + } else { + + afl->stage_finds[STAGE_SPLICE] += new_hit_cnt - orig_hit_cnt; + afl->stage_cycles[STAGE_SPLICE] += afl->stage_max; + + } + +#ifndef IGNORE_FINDS + + /************ + * SPLICING * + ************/ + + /* This is a last-resort strategy triggered by a full round with no findings. + It takes the current input file, randomly selects another input, and + splices them together at some offset, then relies on the havoc + code to mutate that blob. */ + +taint_retry_splicing: + + if (afl->use_splicing && splice_cycle++ < SPLICE_CYCLES && + afl->queued_paths > 1 && afl->queue_cur->len > 1) { + + struct queue_entry *target; + u32 tid, split_at; + u8 * new_buf; + s32 f_diff, l_diff; + + /* First of all, if we've modified in_buf for havoc, let's clean that + up... */ + + if (in_buf != orig_in) { + + in_buf = orig_in; + len = afl->queue_cur->len; + + } + + /* Pick a random queue entry and seek to it. Don't splice with yourself. */ + + do { + + tid = rand_below(afl, afl->queued_paths); + + } while (tid == afl->current_entry); + + afl->splicing_with = tid; + target = afl->queue_buf[tid]; + + /* Make sure that the target has a reasonable length. */ + + while (target && (target->len < 2 || target == afl->queue_cur)) { + + target = target->next; + ++afl->splicing_with; + + } + + if (!target) { goto taint_retry_splicing; } + + /* Read the testcase into a new buffer. */ + + fd = open(target->fname, O_RDONLY); + + if (unlikely(fd < 0)) { PFATAL("Unable to open '%s'", target->fname); } + + new_buf = ck_maybe_grow(BUF_PARAMS(in_scratch), target->len); + + ck_read(fd, new_buf, target->len, target->fname); + + close(fd); + + /* Find a suitable splicing location, somewhere between the first and + the last differing byte. Bail out if the difference is just a single + byte or so. */ + + locate_diffs(in_buf, new_buf, MIN(len, (s64)target->len), &f_diff, &l_diff); + + if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) { goto taint_retry_splicing; } + + /* Split somewhere between the first and last differing byte. */ + + split_at = f_diff + rand_below(afl, l_diff - f_diff); + + /* Do the thing. */ + + len = target->len; + memcpy(new_buf, in_buf, split_at); + swap_bufs(BUF_PARAMS(in), BUF_PARAMS(in_scratch)); + in_buf = new_buf; + + out_buf = ck_maybe_grow(BUF_PARAMS(out), len); + memcpy(out_buf, in_buf, len); + + goto custom_mutator_stage; + /* ???: While integrating Python module, the author decided to jump to + python stage, but the reason behind this is not clear.*/ + // goto havoc_stage; + + } + +#endif /* !IGNORE_FINDS */ + + + + ret_val = 0; + + goto abandon_entry; + + /* we are through with this queue entry - for this iteration */ abandon_entry: -- cgit 1.4.1 From 558a82891abb7617eb51d49991a128fccdc9be6e Mon Sep 17 00:00:00 2001 From: van Hauser Date: Sun, 9 Aug 2020 22:02:44 +0200 Subject: finalize havoc --- src/afl-fuzz-one.c | 807 +++-------------------------------------------------- 1 file changed, 44 insertions(+), 763 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index 5dc336dc..75381db8 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -489,7 +489,7 @@ u8 fuzz_one_original(afl_state_t *afl) { case 2: // fuzz only tainted bytes fd = open(afl->taint_input_file, O_RDONLY); - len = afl->taint_len = afl->queue_cur->taint_bytes_all; + temp_len = len = afl->taint_len = afl->queue_cur->taint_bytes_all; orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); if (fd < 0 || (size_t)in_buf == -1) @@ -511,7 +511,7 @@ u8 fuzz_one_original(afl_state_t *afl) { case 0: // fuzz only newly tainted bytes fd = open(afl->taint_input_file, O_RDONLY); - len = afl->taint_len = afl->queue_cur->taint_bytes_new; + temp_len = len = afl->taint_len = afl->queue_cur->taint_bytes_new; orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); if (fd < 0 || (size_t)in_buf == -1) @@ -659,7 +659,8 @@ u8 fuzz_one_original(afl_state_t *afl) { if it has gone through deterministic testing in earlier, resumed runs (passed_det). */ - if (afl->taint_needs_splode) goto taint_havoc_stage; + // custom mutators would not work, deterministic is done -> so havoc! + if (afl->taint_needs_splode) goto havoc_stage; if (likely(afl->queue_cur->passed_det) || likely(afl->skip_deterministic) || likely(perf_score < @@ -2214,760 +2215,19 @@ havoc_stage: if (actually_clone) { - clone_len = choose_block_len(afl, temp_len); - clone_from = rand_below(afl, temp_len - clone_len + 1); + if (unlikely(afl->taint_needs_splode)) { - } else { - - clone_len = choose_block_len(afl, HAVOC_BLK_XL); - clone_from = 0; - - } - - clone_to = rand_below(afl, temp_len); - - new_buf = - ck_maybe_grow(BUF_PARAMS(out_scratch), temp_len + clone_len); - - /* Head */ - - memcpy(new_buf, out_buf, clone_to); - - /* Inserted part */ - - if (actually_clone) { - - memcpy(new_buf + clone_to, out_buf + clone_from, clone_len); - - } else { - - memset(new_buf + clone_to, - rand_below(afl, 2) ? rand_below(afl, 256) - : out_buf[rand_below(afl, temp_len)], - clone_len); - - } - - /* Tail */ - memcpy(new_buf + clone_to + clone_len, out_buf + clone_to, - temp_len - clone_to); - - swap_bufs(BUF_PARAMS(out), BUF_PARAMS(out_scratch)); - out_buf = new_buf; - new_buf = NULL; - temp_len += clone_len; - - } - - break; - - case 14: { - - /* Overwrite bytes with a randomly selected chunk (75%) or fixed - bytes (25%). */ - - u32 copy_from, copy_to, copy_len; - - if (temp_len < 2) { break; } - - copy_len = choose_block_len(afl, temp_len - 1); - - copy_from = rand_below(afl, temp_len - copy_len + 1); - copy_to = rand_below(afl, temp_len - copy_len + 1); - - if (rand_below(afl, 4)) { - - if (copy_from != copy_to) { - - memmove(out_buf + copy_to, out_buf + copy_from, copy_len); - - } - - } else { - - memset(out_buf + copy_to, - rand_below(afl, 2) ? rand_below(afl, 256) - : out_buf[rand_below(afl, temp_len)], - copy_len); - - } - - break; - - } - - default: - - if (likely(r <= 16 && (afl->extras_cnt || afl->a_extras_cnt))) { - - /* Values 15 and 16 can be selected only if there are any extras - present in the dictionaries. */ - - if (r == 15) { - - /* Overwrite bytes with an extra. */ - - if (!afl->extras_cnt || - (afl->a_extras_cnt && rand_below(afl, 2))) { - - /* No user-specified extras or odds in our favor. Let's use an - auto-detected one. */ - - u32 use_extra = rand_below(afl, afl->a_extras_cnt); - u32 extra_len = afl->a_extras[use_extra].len; - u32 insert_at; - - if ((s32)extra_len > temp_len) { break; } - - insert_at = rand_below(afl, temp_len - extra_len + 1); - memcpy(out_buf + insert_at, afl->a_extras[use_extra].data, - extra_len); - - } else { - - /* No auto extras or odds in our favor. Use the dictionary. */ - - u32 use_extra = rand_below(afl, afl->extras_cnt); - u32 extra_len = afl->extras[use_extra].len; - u32 insert_at; - - if ((s32)extra_len > temp_len) { break; } - - insert_at = rand_below(afl, temp_len - extra_len + 1); - memcpy(out_buf + insert_at, afl->extras[use_extra].data, - extra_len); - - } - - break; - - } else { // case 16 - - u32 use_extra, extra_len, - insert_at = rand_below(afl, temp_len + 1); - u8 *ptr; - - /* Insert an extra. Do the same dice-rolling stuff as for the - previous case. */ - - if (!afl->extras_cnt || - (afl->a_extras_cnt && rand_below(afl, 2))) { - - use_extra = rand_below(afl, afl->a_extras_cnt); - extra_len = afl->a_extras[use_extra].len; - ptr = afl->a_extras[use_extra].data; - - } else { - - use_extra = rand_below(afl, afl->extras_cnt); - extra_len = afl->extras[use_extra].len; - ptr = afl->extras[use_extra].data; - - } - - if (temp_len + extra_len >= MAX_FILE) { break; } - - out_buf = ck_maybe_grow(BUF_PARAMS(out), temp_len + extra_len); - - /* Tail */ - memmove(out_buf + insert_at + extra_len, out_buf + insert_at, - temp_len - insert_at); - - /* Inserted part */ - memcpy(out_buf + insert_at, ptr, extra_len); - - temp_len += extra_len; - - break; - - } - - } else { - - /* - switch (r) { - - case 15: // fall through - case 16: - case 17: {*/ - - /* Overwrite bytes with a randomly selected chunk from another - testcase or insert that chunk. */ - - if (afl->queued_paths < 4) break; - - /* Pick a random queue entry and seek to it. */ - - u32 tid; - do - tid = rand_below(afl, afl->queued_paths); - while (tid == afl->current_entry); - - struct queue_entry *target = afl->queue_buf[tid]; - - /* Make sure that the target has a reasonable length. */ - - while (target && (target->len < 2 || target == afl->queue_cur)) - target = target->next; - - if (!target) break; - - /* Read the testcase into a new buffer. */ - - fd = open(target->fname, O_RDONLY); - - if (unlikely(fd < 0)) { - - PFATAL("Unable to open '%s'", target->fname); - - } - - u32 new_len = target->len; - u8 *new_buf = ck_maybe_grow(BUF_PARAMS(in_scratch), new_len); - - ck_read(fd, new_buf, new_len, target->fname); - - close(fd); - - u8 overwrite = 0; - if (temp_len >= 2 && rand_below(afl, 2)) - overwrite = 1; - else if (temp_len + HAVOC_BLK_XL >= MAX_FILE) { - - if (temp_len >= 2) - overwrite = 1; - else - break; - - } - - if (overwrite) { - - u32 copy_from, copy_to, copy_len; - - copy_len = choose_block_len(afl, new_len - 1); - if ((s32)copy_len > temp_len) copy_len = temp_len; - - copy_from = rand_below(afl, new_len - copy_len + 1); - copy_to = rand_below(afl, temp_len - copy_len + 1); - - memmove(out_buf + copy_to, new_buf + copy_from, copy_len); - - } else { - - u32 clone_from, clone_to, clone_len; - - clone_len = choose_block_len(afl, new_len); - clone_from = rand_below(afl, new_len - clone_len + 1); - - clone_to = rand_below(afl, temp_len); - - u8 *temp_buf = - ck_maybe_grow(BUF_PARAMS(out_scratch), temp_len + clone_len); - - /* Head */ - - memcpy(temp_buf, out_buf, clone_to); - - /* Inserted part */ - - memcpy(temp_buf + clone_to, new_buf + clone_from, clone_len); - - /* Tail */ - memcpy(temp_buf + clone_to + clone_len, out_buf + clone_to, - temp_len - clone_to); - - swap_bufs(BUF_PARAMS(out), BUF_PARAMS(out_scratch)); - out_buf = temp_buf; - temp_len += clone_len; - - } - - break; - - } - - // end of default: - - } - - } - - if (common_fuzz_stuff(afl, out_buf, temp_len)) { goto abandon_entry; } - - /* out_buf might have been mangled a bit, so let's restore it to its - original size and shape. */ - - out_buf = ck_maybe_grow(BUF_PARAMS(out), len); - temp_len = len; - memcpy(out_buf, in_buf, len); - - /* If we're finding new stuff, let's run for a bit longer, limits - permitting. */ - - if (afl->queued_paths != havoc_queued) { - - if (perf_score <= afl->havoc_max_mult * 100) { - - afl->stage_max *= 2; - perf_score *= 2; - - } - - havoc_queued = afl->queued_paths; - - } - - } - - new_hit_cnt = afl->queued_paths + afl->unique_crashes; - - if (!splice_cycle) { - - afl->stage_finds[STAGE_HAVOC] += new_hit_cnt - orig_hit_cnt; - afl->stage_cycles[STAGE_HAVOC] += afl->stage_max; - - } else { - - afl->stage_finds[STAGE_SPLICE] += new_hit_cnt - orig_hit_cnt; - afl->stage_cycles[STAGE_SPLICE] += afl->stage_max; - - } - -#ifndef IGNORE_FINDS - - /************ - * SPLICING * - ************/ - - /* This is a last-resort strategy triggered by a full round with no findings. - It takes the current input file, randomly selects another input, and - splices them together at some offset, then relies on the havoc - code to mutate that blob. */ - -retry_splicing: - - if (afl->use_splicing && splice_cycle++ < SPLICE_CYCLES && - afl->queued_paths > 1 && afl->queue_cur->len > 1) { - - struct queue_entry *target; - u32 tid, split_at; - u8 * new_buf; - s32 f_diff, l_diff; - - /* First of all, if we've modified in_buf for havoc, let's clean that - up... */ - - if (in_buf != orig_in) { - - in_buf = orig_in; - len = afl->queue_cur->len; - - } - - /* Pick a random queue entry and seek to it. Don't splice with yourself. */ - - do { - - tid = rand_below(afl, afl->queued_paths); - - } while (tid == afl->current_entry); - - afl->splicing_with = tid; - target = afl->queue_buf[tid]; - - /* Make sure that the target has a reasonable length. */ - - while (target && (target->len < 2 || target == afl->queue_cur)) { - - target = target->next; - ++afl->splicing_with; - - } - - if (!target) { goto retry_splicing; } - - /* Read the testcase into a new buffer. */ - - fd = open(target->fname, O_RDONLY); - - if (unlikely(fd < 0)) { PFATAL("Unable to open '%s'", target->fname); } - - new_buf = ck_maybe_grow(BUF_PARAMS(in_scratch), target->len); - - ck_read(fd, new_buf, target->len, target->fname); - - close(fd); - - /* Find a suitable splicing location, somewhere between the first and - the last differing byte. Bail out if the difference is just a single - byte or so. */ - - locate_diffs(in_buf, new_buf, MIN(len, (s64)target->len), &f_diff, &l_diff); - - if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) { goto retry_splicing; } - - /* Split somewhere between the first and last differing byte. */ - - split_at = f_diff + rand_below(afl, l_diff - f_diff); - - /* Do the thing. */ - - len = target->len; - memcpy(new_buf, in_buf, split_at); - swap_bufs(BUF_PARAMS(in), BUF_PARAMS(in_scratch)); - in_buf = new_buf; - - out_buf = ck_maybe_grow(BUF_PARAMS(out), len); - memcpy(out_buf, in_buf, len); - - goto custom_mutator_stage; - /* ???: While integrating Python module, the author decided to jump to - python stage, but the reason behind this is not clear.*/ - // goto havoc_stage; - - } - -#endif /* !IGNORE_FINDS */ - - ret_val = 0; - - goto abandon_entry; - - /******************************* - * same RANDOM HAVOC for taint * - *******************************/ - -taint_havoc_stage: - - afl->stage_cur_byte = -1; - - /* The havoc stage mutation code is also invoked when splicing files; if the - splice_cycle variable is set, generate different descriptions and such. */ - - if (!splice_cycle) { - - afl->stage_name = "havoc"; - afl->stage_short = "havoc"; - afl->stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * - perf_score / afl->havoc_div / 100; - - } else { - - perf_score = orig_perf; - - snprintf(afl->stage_name_buf, STAGE_BUF_SIZE, "splice %u", splice_cycle); - afl->stage_name = afl->stage_name_buf; - afl->stage_short = "splice"; - afl->stage_max = SPLICE_HAVOC * perf_score / afl->havoc_div / 100; - - } - - if (afl->stage_max < HAVOC_MIN) { afl->stage_max = HAVOC_MIN; } - - temp_len = len; - - orig_hit_cnt = afl->queued_paths + afl->unique_crashes; - - havoc_queued = afl->queued_paths; - - if (afl->custom_mutators_count) { - - LIST_FOREACH(&afl->custom_mutator_list, struct custom_mutator, { - - if (el->stacked_custom && el->afl_custom_havoc_mutation_probability) { - - el->stacked_custom_prob = - el->afl_custom_havoc_mutation_probability(el->data); - if (el->stacked_custom_prob > 100) { - - FATAL( - "The probability returned by " - "afl_custom_havoc_mutation_propability " - "has to be in the range 0-100."); - - } - - } - - }); - - } - - /* We essentially just do several thousand runs (depending on perf_score) - where we take the input file and make random stacked tweaks. */ - - if (unlikely(afl->expand_havoc)) { - - /* add expensive havoc cases here, they are activated after a full - cycle without finds happened */ - - r_max = 16 + ((afl->extras_cnt + afl->a_extras_cnt) ? 2 : 0); - - } else { - - r_max = 15 + ((afl->extras_cnt + afl->a_extras_cnt) ? 2 : 0); - - } - - for (afl->stage_cur = 0; afl->stage_cur < afl->stage_max; ++afl->stage_cur) { - - u32 use_stacking = 1 << (1 + rand_below(afl, HAVOC_STACK_POW2)); - - afl->stage_cur_val = use_stacking; - - for (i = 0; i < use_stacking; ++i) { - - if (afl->custom_mutators_count) { - - LIST_FOREACH(&afl->custom_mutator_list, struct custom_mutator, { - - if (el->stacked_custom && - rand_below(afl, 100) < el->stacked_custom_prob) { - - u8 * custom_havoc_buf = NULL; - size_t new_len = el->afl_custom_havoc_mutation( - el->data, out_buf, temp_len, &custom_havoc_buf, MAX_FILE); - if (unlikely(!custom_havoc_buf)) { - - FATAL("Error in custom_havoc (return %zd)", new_len); - - } - - if (likely(new_len > 0 && custom_havoc_buf)) { + clone_len = choose_block_len(afl, afl->queue_cur->len); + clone_from = + rand_below(afl, afl->queue_cur->len - clone_len + 1); - temp_len = new_len; - if (out_buf != custom_havoc_buf) { + } else { - ck_maybe_grow(BUF_PARAMS(out), temp_len); - memcpy(out_buf, custom_havoc_buf, temp_len); + clone_len = choose_block_len(afl, temp_len); + clone_from = rand_below(afl, temp_len - clone_len + 1); } - } - - } - - }); - - } - - switch ((r = rand_below(afl, r_max))) { - - case 0: - - /* Flip a single bit somewhere. Spooky! */ - - FLIP_BIT(out_buf, rand_below(afl, temp_len << 3)); - break; - - case 1: - - /* Set byte to interesting value. */ - - out_buf[rand_below(afl, temp_len)] = - interesting_8[rand_below(afl, sizeof(interesting_8))]; - break; - - case 2: - - /* Set word to interesting value, randomly choosing endian. */ - - if (temp_len < 2) { break; } - - if (rand_below(afl, 2)) { - - *(u16 *)(out_buf + rand_below(afl, temp_len - 1)) = - interesting_16[rand_below(afl, sizeof(interesting_16) >> 1)]; - - } else { - - *(u16 *)(out_buf + rand_below(afl, temp_len - 1)) = SWAP16( - interesting_16[rand_below(afl, sizeof(interesting_16) >> 1)]); - - } - - break; - - case 3: - - /* Set dword to interesting value, randomly choosing endian. */ - - if (temp_len < 4) { break; } - - if (rand_below(afl, 2)) { - - *(u32 *)(out_buf + rand_below(afl, temp_len - 3)) = - interesting_32[rand_below(afl, sizeof(interesting_32) >> 2)]; - - } else { - - *(u32 *)(out_buf + rand_below(afl, temp_len - 3)) = SWAP32( - interesting_32[rand_below(afl, sizeof(interesting_32) >> 2)]); - - } - - break; - - case 4: - - /* Randomly subtract from byte. */ - - out_buf[rand_below(afl, temp_len)] -= 1 + rand_below(afl, ARITH_MAX); - break; - - case 5: - - /* Randomly add to byte. */ - - out_buf[rand_below(afl, temp_len)] += 1 + rand_below(afl, ARITH_MAX); - break; - - case 6: - - /* Randomly subtract from word, random endian. */ - - if (temp_len < 2) { break; } - - if (rand_below(afl, 2)) { - - u32 pos = rand_below(afl, temp_len - 1); - - *(u16 *)(out_buf + pos) -= 1 + rand_below(afl, ARITH_MAX); - - } else { - - u32 pos = rand_below(afl, temp_len - 1); - u16 num = 1 + rand_below(afl, ARITH_MAX); - - *(u16 *)(out_buf + pos) = - SWAP16(SWAP16(*(u16 *)(out_buf + pos)) - num); - - } - - break; - - case 7: - - /* Randomly add to word, random endian. */ - - if (temp_len < 2) { break; } - - if (rand_below(afl, 2)) { - - u32 pos = rand_below(afl, temp_len - 1); - - *(u16 *)(out_buf + pos) += 1 + rand_below(afl, ARITH_MAX); - - } else { - - u32 pos = rand_below(afl, temp_len - 1); - u16 num = 1 + rand_below(afl, ARITH_MAX); - - *(u16 *)(out_buf + pos) = - SWAP16(SWAP16(*(u16 *)(out_buf + pos)) + num); - - } - - break; - - case 8: - - /* Randomly subtract from dword, random endian. */ - - if (temp_len < 4) { break; } - - if (rand_below(afl, 2)) { - - u32 pos = rand_below(afl, temp_len - 3); - - *(u32 *)(out_buf + pos) -= 1 + rand_below(afl, ARITH_MAX); - - } else { - - u32 pos = rand_below(afl, temp_len - 3); - u32 num = 1 + rand_below(afl, ARITH_MAX); - - *(u32 *)(out_buf + pos) = - SWAP32(SWAP32(*(u32 *)(out_buf + pos)) - num); - - } - - break; - - case 9: - - /* Randomly add to dword, random endian. */ - - if (temp_len < 4) { break; } - - if (rand_below(afl, 2)) { - - u32 pos = rand_below(afl, temp_len - 3); - - *(u32 *)(out_buf + pos) += 1 + rand_below(afl, ARITH_MAX); - - } else { - - u32 pos = rand_below(afl, temp_len - 3); - u32 num = 1 + rand_below(afl, ARITH_MAX); - - *(u32 *)(out_buf + pos) = - SWAP32(SWAP32(*(u32 *)(out_buf + pos)) + num); - - } - - break; - - case 10: - - /* Just set a random byte to a random value. Because, - why not. We use XOR with 1-255 to eliminate the - possibility of a no-op. */ - - out_buf[rand_below(afl, temp_len)] ^= 1 + rand_below(afl, 255); - break; - - case 11 ... 12: { - - /* Delete bytes. We're making this a bit more likely - than insertion (the next option) in hopes of keeping - files reasonably small. */ - - u32 del_from, del_len; - - if (temp_len < 2) { break; } - - /* Don't delete too much. */ - - del_len = choose_block_len(afl, temp_len - 1); - - del_from = rand_below(afl, temp_len - del_len + 1); - - memmove(out_buf + del_from, out_buf + del_from + del_len, - temp_len - del_from - del_len); - - temp_len -= del_len; - - break; - - } - - case 13: - - if (temp_len + HAVOC_BLK_XL < MAX_FILE) { - - /* Clone bytes (75%) or insert a block of constant bytes (25%). */ - - u8 actually_clone = rand_below(afl, 4); - u32 clone_from, clone_to, clone_len; - u8 *new_buf; - - if (actually_clone) { - - clone_len = choose_block_len(afl, temp_len); - clone_from = rand_below(afl, temp_len - clone_len + 1); - } else { clone_len = choose_block_len(afl, HAVOC_BLK_XL); @@ -2988,7 +2248,11 @@ taint_havoc_stage: if (actually_clone) { - memcpy(new_buf + clone_to, out_buf + clone_from, clone_len); + if (unlikely(afl->taint_needs_splode)) + memcpy(new_buf + clone_to, afl->taint_src + clone_from, + clone_len); + else + memcpy(new_buf + clone_to, out_buf + clone_from, clone_len); } else { @@ -3021,16 +2285,29 @@ taint_havoc_stage: if (temp_len < 2) { break; } - copy_len = choose_block_len(afl, temp_len - 1); + if (unlikely(afl->taint_needs_splode)) { + + copy_len = choose_block_len(afl, afl->queue_cur->len - 1); + copy_from = rand_below(afl, afl->queue_cur->len - copy_len + 1); + + } else { + + copy_len = choose_block_len(afl, temp_len - 1); + copy_from = rand_below(afl, temp_len - copy_len + 1); + + } - copy_from = rand_below(afl, temp_len - copy_len + 1); copy_to = rand_below(afl, temp_len - copy_len + 1); if (rand_below(afl, 4)) { if (copy_from != copy_to) { - memmove(out_buf + copy_to, out_buf + copy_from, copy_len); + if (unlikely(afl->taint_needs_splode)) + memmove(out_buf + copy_to, afl->taint_src + copy_from, + copy_len); + else + memmove(out_buf + copy_to, out_buf + copy_from, copy_len); } @@ -3296,10 +2573,17 @@ taint_havoc_stage: splices them together at some offset, then relies on the havoc code to mutate that blob. */ -taint_retry_splicing: + u32 saved_len; + + if (unlikely(afl->taint_needs_splode)) + saved_len = afl->taint_len; + else + saved_len = afl->queue_cur->len; + +retry_splicing: if (afl->use_splicing && splice_cycle++ < SPLICE_CYCLES && - afl->queued_paths > 1 && afl->queue_cur->len > 1) { + afl->queued_paths > 1 && saved_len > 1) { struct queue_entry *target; u32 tid, split_at; @@ -3312,7 +2596,7 @@ taint_retry_splicing: if (in_buf != orig_in) { in_buf = orig_in; - len = afl->queue_cur->len; + len = saved_len; } @@ -3336,7 +2620,7 @@ taint_retry_splicing: } - if (!target) { goto taint_retry_splicing; } + if (!target) { goto retry_splicing; } /* Read the testcase into a new buffer. */ @@ -3356,7 +2640,7 @@ taint_retry_splicing: locate_diffs(in_buf, new_buf, MIN(len, (s64)target->len), &f_diff, &l_diff); - if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) { goto taint_retry_splicing; } + if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) { goto retry_splicing; } /* Split somewhere between the first and last differing byte. */ @@ -3381,13 +2665,10 @@ taint_retry_splicing: #endif /* !IGNORE_FINDS */ - - ret_val = 0; goto abandon_entry; - /* we are through with this queue entry - for this iteration */ abandon_entry: -- cgit 1.4.1 From 9ec223c844a9fd9d8b66e309517fd7c64e5d4c5e Mon Sep 17 00:00:00 2001 From: van Hauser Date: Sun, 9 Aug 2020 23:47:51 +0200 Subject: final touches for first testing --- src/afl-fuzz-one.c | 58 ++++++++++++++++++++++++++++++------------------------ src/afl-fuzz-run.c | 4 +++- 2 files changed, 35 insertions(+), 27 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index 75381db8..beb73246 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -458,16 +458,17 @@ u8 fuzz_one_original(afl_state_t *afl) { } - u32 tmp_val; + u32 tmp_val = 0; - if (unlikely(afl->fsrv.taint_mode && - (tmp_val = (afl->queue_cycle % 3)) != 1)) { + if (unlikely(afl->fsrv.taint_mode)) { + tmp_val = afl->queue_cycle % 2; ret_val = 0; if (unlikely(afl->queue_cur->cal_failed)) goto abandon_entry; + if (unlikely(!afl->queue_cur->passed_det) && !tmp_val) goto abandon_entry; if (tmp_val == 1 && !afl->queue_cur->taint_bytes_all) goto abandon_entry; - if (tmp_val == 2 && !afl->queue_cur->taint_bytes_new) goto abandon_entry; + if (tmp_val == 0 && !afl->queue_cur->taint_bytes_new) goto abandon_entry; ret_val = 1; @@ -483,10 +484,7 @@ u8 fuzz_one_original(afl_state_t *afl) { switch (tmp_val) { - case 1: // do nothing, but cannot happen -> else - break; - - case 2: // fuzz only tainted bytes + case 1: // fuzz only tainted bytes fd = open(afl->taint_input_file, O_RDONLY); temp_len = len = afl->taint_len = afl->queue_cur->taint_bytes_all; @@ -536,9 +534,10 @@ u8 fuzz_one_original(afl_state_t *afl) { } else { - /* Map the test case into memory. */ afl->taint_needs_splode = 0; + /* Map the test case into memory. */ + fd = open(afl->queue_cur->fname, O_RDONLY); if (unlikely(fd < 0)) { @@ -565,7 +564,7 @@ u8 fuzz_one_original(afl_state_t *afl) { single byte anyway, so it wouldn't give us any performance or memory usage benefits. */ - out_buf = ck_maybe_grow(BUF_PARAMS(out), len); + out_buf = ck_maybe_grow(BUF_PARAMS(out), len + 4096); afl->subseq_tmouts = 0; @@ -575,7 +574,8 @@ u8 fuzz_one_original(afl_state_t *afl) { * CALIBRATION (only if failed earlier on) * *******************************************/ - if (unlikely(afl->queue_cur->cal_failed)) { + if (unlikely(afl->queue_cur->cal_failed && + (!afl->taint_needs_splode || tmp_val == 1))) { u8 res = FSRV_RUN_TMOUT; @@ -659,9 +659,6 @@ u8 fuzz_one_original(afl_state_t *afl) { if it has gone through deterministic testing in earlier, resumed runs (passed_det). */ - // custom mutators would not work, deterministic is done -> so havoc! - if (afl->taint_needs_splode) goto havoc_stage; - if (likely(afl->queue_cur->passed_det) || likely(afl->skip_deterministic) || likely(perf_score < (afl->queue_cur->depth * 30 <= afl->havoc_max_mult * 100 @@ -1640,7 +1637,7 @@ skip_interest: orig_hit_cnt = new_hit_cnt; - ex_tmp = ck_maybe_grow(BUF_PARAMS(ex), len + MAX_DICT_FILE); + ex_tmp = ck_maybe_grow(BUF_PARAMS(ex), len + MAX_DICT_FILE + 4096); for (i = 0; i <= (u32)len; ++i) { @@ -1814,7 +1811,7 @@ custom_mutator_stage: fd = open(target->fname, O_RDONLY); if (unlikely(fd < 0)) { PFATAL("Unable to open '%s'", target->fname); } - new_buf = ck_maybe_grow(BUF_PARAMS(out_scratch), target->len); + new_buf = ck_maybe_grow(BUF_PARAMS(out_scratch), target->len + 4096); ck_read(fd, new_buf, target->len, target->fname); close(fd); @@ -1989,7 +1986,7 @@ havoc_stage: temp_len = new_len; if (out_buf != custom_havoc_buf) { - ck_maybe_grow(BUF_PARAMS(out), temp_len); + ck_maybe_grow(BUF_PARAMS(out), temp_len + 4096); memcpy(out_buf, custom_havoc_buf, temp_len); } @@ -2237,8 +2234,8 @@ havoc_stage: clone_to = rand_below(afl, temp_len); - new_buf = - ck_maybe_grow(BUF_PARAMS(out_scratch), temp_len + clone_len); + new_buf = ck_maybe_grow(BUF_PARAMS(out_scratch), + temp_len + clone_len + 4096); /* Head */ @@ -2303,10 +2300,18 @@ havoc_stage: if (copy_from != copy_to) { - if (unlikely(afl->taint_needs_splode)) + if (unlikely(afl->taint_needs_splode)) { + + if (copy_to > temp_len) copy_to = rand_below(afl, temp_len); + + // fprintf(stderr, "\nout_buf %p + copy_to %u, src %p + %u, + // copy_len %u -- len %u\n", out_buf , copy_to, afl->taint_src , + // copy_from, copy_len, afl->taint_len, afl->queue_cur->len); memmove(out_buf + copy_to, afl->taint_src + copy_from, copy_len); - else + + } else + memmove(out_buf + copy_to, out_buf + copy_from, copy_len); } @@ -2395,7 +2400,8 @@ havoc_stage: if (temp_len + extra_len >= MAX_FILE) { break; } - out_buf = ck_maybe_grow(BUF_PARAMS(out), temp_len + extra_len); + out_buf = + ck_maybe_grow(BUF_PARAMS(out), temp_len + extra_len + 4096); /* Tail */ memmove(out_buf + insert_at + extra_len, out_buf + insert_at, @@ -2490,8 +2496,8 @@ havoc_stage: clone_to = rand_below(afl, temp_len); - u8 *temp_buf = - ck_maybe_grow(BUF_PARAMS(out_scratch), temp_len + clone_len); + u8 *temp_buf = ck_maybe_grow(BUF_PARAMS(out_scratch), + temp_len + clone_len + 4096); /* Head */ @@ -2526,7 +2532,7 @@ havoc_stage: /* out_buf might have been mangled a bit, so let's restore it to its original size and shape. */ - out_buf = ck_maybe_grow(BUF_PARAMS(out), len); + out_buf = ck_maybe_grow(BUF_PARAMS(out), len + 4096); temp_len = len; memcpy(out_buf, in_buf, len); @@ -2653,7 +2659,7 @@ retry_splicing: swap_bufs(BUF_PARAMS(in), BUF_PARAMS(in_scratch)); in_buf = new_buf; - out_buf = ck_maybe_grow(BUF_PARAMS(out), len); + out_buf = ck_maybe_grow(BUF_PARAMS(out), len + 4096); memcpy(out_buf, in_buf, len); goto custom_mutator_stage; diff --git a/src/afl-fuzz-run.c b/src/afl-fuzz-run.c index 31db4d7c..41de143c 100644 --- a/src/afl-fuzz-run.c +++ b/src/afl-fuzz-run.c @@ -349,7 +349,9 @@ u8 calibrate_case(afl_state_t *afl, struct queue_entry *q, u8 *use_mem, } - if (q->exec_cksum) { + if (unlikely(afl->fsrv.taint_mode)) + q->exec_cksum = 0; + else if (q->exec_cksum) { memcpy(afl->first_trace, afl->fsrv.trace_bits, afl->fsrv.map_size); hnb = has_new_bits(afl, afl->virgin_bits); -- cgit 1.4.1 From 8f8555dfdfeee643795ba04cd4240db40a88711e Mon Sep 17 00:00:00 2001 From: van Hauser Date: Mon, 10 Aug 2020 12:05:30 +0200 Subject: fix segfault --- src/afl-fuzz-one.c | 1 + 1 file changed, 1 insertion(+) (limited to 'src/afl-fuzz-one.c') diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index beb73246..c664f281 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -2295,6 +2295,7 @@ havoc_stage: } copy_to = rand_below(afl, temp_len - copy_len + 1); + if (unlikely(copy_to > temp_len)) copy_to = rand_below(afl, temp_len); if (rand_below(afl, 4)) { -- cgit 1.4.1 From 9c953ab51ff22b2fc3e1b73e6563211e7676b62e Mon Sep 17 00:00:00 2001 From: van Hauser Date: Mon, 10 Aug 2020 13:03:59 +0200 Subject: memory grab at startup to prevent crashes --- src/afl-fuzz-one.c | 18 +++++++++--------- src/afl-fuzz.c | 11 +++++++++++ 2 files changed, 20 insertions(+), 9 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index c664f281..75687703 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -564,7 +564,7 @@ u8 fuzz_one_original(afl_state_t *afl) { single byte anyway, so it wouldn't give us any performance or memory usage benefits. */ - out_buf = ck_maybe_grow(BUF_PARAMS(out), len + 4096); + out_buf = ck_maybe_grow(BUF_PARAMS(out), len); afl->subseq_tmouts = 0; @@ -1637,7 +1637,7 @@ skip_interest: orig_hit_cnt = new_hit_cnt; - ex_tmp = ck_maybe_grow(BUF_PARAMS(ex), len + MAX_DICT_FILE + 4096); + ex_tmp = ck_maybe_grow(BUF_PARAMS(ex), len + MAX_DICT_FILE); for (i = 0; i <= (u32)len; ++i) { @@ -1811,7 +1811,7 @@ custom_mutator_stage: fd = open(target->fname, O_RDONLY); if (unlikely(fd < 0)) { PFATAL("Unable to open '%s'", target->fname); } - new_buf = ck_maybe_grow(BUF_PARAMS(out_scratch), target->len + 4096); + new_buf = ck_maybe_grow(BUF_PARAMS(out_scratch), target->len); ck_read(fd, new_buf, target->len, target->fname); close(fd); @@ -1986,7 +1986,7 @@ havoc_stage: temp_len = new_len; if (out_buf != custom_havoc_buf) { - ck_maybe_grow(BUF_PARAMS(out), temp_len + 4096); + ck_maybe_grow(BUF_PARAMS(out), temp_len); memcpy(out_buf, custom_havoc_buf, temp_len); } @@ -2235,7 +2235,7 @@ havoc_stage: clone_to = rand_below(afl, temp_len); new_buf = ck_maybe_grow(BUF_PARAMS(out_scratch), - temp_len + clone_len + 4096); + temp_len + clone_len); /* Head */ @@ -2402,7 +2402,7 @@ havoc_stage: if (temp_len + extra_len >= MAX_FILE) { break; } out_buf = - ck_maybe_grow(BUF_PARAMS(out), temp_len + extra_len + 4096); + ck_maybe_grow(BUF_PARAMS(out), temp_len + extra_len); /* Tail */ memmove(out_buf + insert_at + extra_len, out_buf + insert_at, @@ -2498,7 +2498,7 @@ havoc_stage: clone_to = rand_below(afl, temp_len); u8 *temp_buf = ck_maybe_grow(BUF_PARAMS(out_scratch), - temp_len + clone_len + 4096); + temp_len + clone_len); /* Head */ @@ -2533,7 +2533,7 @@ havoc_stage: /* out_buf might have been mangled a bit, so let's restore it to its original size and shape. */ - out_buf = ck_maybe_grow(BUF_PARAMS(out), len + 4096); + out_buf = ck_maybe_grow(BUF_PARAMS(out), len); temp_len = len; memcpy(out_buf, in_buf, len); @@ -2660,7 +2660,7 @@ retry_splicing: swap_bufs(BUF_PARAMS(in), BUF_PARAMS(in_scratch)); in_buf = new_buf; - out_buf = ck_maybe_grow(BUF_PARAMS(out), len + 4096); + out_buf = ck_maybe_grow(BUF_PARAMS(out), len); memcpy(out_buf, in_buf, len); goto custom_mutator_stage; diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c index 2b9af94c..5cdd0292 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -1305,6 +1305,17 @@ int main(int argc, char **argv_orig, char **envp) { OKF("Taint forkserver successfully started"); +#define BUF_PARAMS(name) (void **)&afl->name##_buf, &afl->name##_size + u8 *tmp1 = ck_maybe_grow(BUF_PARAMS(eff), MAX_FILE + 4096); + u8 *tmp2 = ck_maybe_grow(BUF_PARAMS(ex), MAX_FILE + 4096); + u8 *tmp3 = ck_maybe_grow(BUF_PARAMS(in_scratch), MAX_FILE + 4096); + u8 *tmp4 = ck_maybe_grow(BUF_PARAMS(out), MAX_FILE + 4096); + u8 *tmp5 = ck_maybe_grow(BUF_PARAMS(out_scratch), MAX_FILE + 4096); +#undef BUF_PARAMS + + if (!tmp1 || !tmp2 || !tmp3 || !tmp4 || !tmp5) + FATAL("memory issues. me hungry, feed me!"); + } perform_dry_run(afl); -- cgit 1.4.1 From 0ba09ee85a65878e70d1a224f9d41fcbac3ff1e5 Mon Sep 17 00:00:00 2001 From: van Hauser Date: Tue, 11 Aug 2020 10:24:45 +0200 Subject: enhancements --- examples/aflpp_driver/aflpp_driver.c | 14 ++++++++------ include/common.h | 2 +- llvm_mode/afl-llvm-rt.o.c | 7 +++---- src/afl-common.c | 30 +++++++++++++++++------------- src/afl-fuzz-one.c | 11 +++++------ src/afl-fuzz.c | 11 +++++------ 6 files changed, 39 insertions(+), 36 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/examples/aflpp_driver/aflpp_driver.c b/examples/aflpp_driver/aflpp_driver.c index 2b7be45f..81782c67 100644 --- a/examples/aflpp_driver/aflpp_driver.c +++ b/examples/aflpp_driver/aflpp_driver.c @@ -306,10 +306,13 @@ int main(int argc, char **argv) { else if (argc > 1) { if (!getenv("AFL_DISABLE_LLVM_INSTRUMENTATION")) { + munmap(__afl_area_ptr, MAX_DUMMY_SIZE); // we need to free 0x10000 __afl_area_ptr = NULL; __afl_manual_init(); + } + return ExecuteFilesOnyByOne(argc, argv); } @@ -317,11 +320,14 @@ int main(int argc, char **argv) { assert(N > 0); if (!getenv("AFL_DISABLE_LLVM_INSTRUMENTATION")) { + munmap(__afl_area_ptr, MAX_DUMMY_SIZE); __afl_area_ptr = NULL; fprintf(stderr, "performing manual init\n"); - __afl_manual_init(); + __afl_manual_init(); + } + fprintf(stderr, "map is now at %p\n", __afl_area_ptr); // Call LLVMFuzzerTestOneInput here so that coverage caused by initialization @@ -333,11 +339,7 @@ int main(int argc, char **argv) { ssize_t r = read(0, buf, sizeof(buf)); - if (r > 0) { - - LLVMFuzzerTestOneInput(buf, r); - - } + if (r > 0) { LLVMFuzzerTestOneInput(buf, r); } } diff --git a/include/common.h b/include/common.h index 42c79c62..c7d57e07 100644 --- a/include/common.h +++ b/include/common.h @@ -55,7 +55,7 @@ extern u8 *doc_path; /* path to documentation dir */ @returns the path, allocating the string */ u8 *find_binary(u8 *fname); -u8 *find_binary_own_loc(u8 *fname, u8 *own_loc); +u8 *find_afl_binary(u8 *fname, u8 *own_loc); /* Read a bitmap from file fname to memory This is for the -B option again. */ diff --git a/llvm_mode/afl-llvm-rt.o.c b/llvm_mode/afl-llvm-rt.o.c index c69d8bb7..20151aea 100644 --- a/llvm_mode/afl-llvm-rt.o.c +++ b/llvm_mode/afl-llvm-rt.o.c @@ -847,9 +847,8 @@ void __afl_manual_init(void) { init_done = 1; is_persistent = 0; __afl_sharedmem_fuzzing = 0; - if (__afl_area_ptr == NULL) - __afl_area_ptr = __afl_area_initial; - + if (__afl_area_ptr == NULL) __afl_area_ptr = __afl_area_initial; + if (getenv("AFL_DEBUG")) fprintf(stderr, "DEBUG: disabled instrumenation because of " @@ -886,7 +885,7 @@ __attribute__((constructor(0))) void __afl_auto_early(void) { is_persistent = !!getenv(PERSIST_ENV_VAR); - __afl_map_shm(); + __afl_map_shm(); } diff --git a/src/afl-common.c b/src/afl-common.c index dabeeedd..c1302080 100644 --- a/src/afl-common.c +++ b/src/afl-common.c @@ -138,7 +138,7 @@ void argv_cpy_free(char **argv) { } -u8 *find_binary_own_loc(u8 *fname, u8 *own_loc) { +u8 *find_afl_binary(u8 *fname, u8 *own_loc) { u8 *tmp, *rsl, *own_copy, *cp; @@ -154,21 +154,25 @@ u8 *find_binary_own_loc(u8 *fname, u8 *own_loc) { } - own_copy = ck_strdup(own_loc); - rsl = strrchr(own_copy, '/'); + if (own_loc) { - if (rsl) { + own_copy = ck_strdup(own_loc); + rsl = strrchr(own_copy, '/'); - *rsl = 0; + if (rsl) { - cp = alloc_printf("%s/%s", own_copy, fname); - ck_free(own_copy); + *rsl = 0; - if (!access(cp, X_OK)) { return cp; } + cp = alloc_printf("%s/%s", own_copy, fname); + ck_free(own_copy); - } else { + if (!access(cp, X_OK)) { return cp; } + + } else { - ck_free(own_copy); + ck_free(own_copy); + + } } @@ -196,7 +200,7 @@ char **get_qemu_argv(u8 *own_loc, u8 **target_path_p, int argc, char **argv) { /* Now we need to actually find the QEMU binary to put in argv[0]. */ - cp = find_binary_own_loc("afl-qemu-trace", own_loc); + cp = find_afl_binary("afl-qemu-trace", own_loc); if (cp) { @@ -241,12 +245,12 @@ char **get_wine_argv(u8 *own_loc, u8 **target_path_p, int argc, char **argv) { /* Now we need to actually find the QEMU binary to put in argv[0]. */ - cp = find_binary_own_loc("afl-qemu-trace", own_loc); + cp = find_afl_binary("afl-qemu-trace", own_loc); if (cp) { ck_free(cp); - cp = find_binary_own_loc("afl-wine-trace", own_loc); + cp = find_afl_binary("afl-wine-trace", own_loc); if (cp) { diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index 9f38b8f8..2f724569 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -2236,8 +2236,8 @@ havoc_stage: clone_to = rand_below(afl, temp_len); - new_buf = ck_maybe_grow(BUF_PARAMS(out_scratch), - temp_len + clone_len); + new_buf = + ck_maybe_grow(BUF_PARAMS(out_scratch), temp_len + clone_len); /* Head */ @@ -2403,8 +2403,7 @@ havoc_stage: if (temp_len + extra_len >= MAX_FILE) { break; } - out_buf = - ck_maybe_grow(BUF_PARAMS(out), temp_len + extra_len); + out_buf = ck_maybe_grow(BUF_PARAMS(out), temp_len + extra_len); /* Tail */ memmove(out_buf + insert_at + extra_len, out_buf + insert_at, @@ -2499,8 +2498,8 @@ havoc_stage: clone_to = rand_below(afl, temp_len); - u8 *temp_buf = ck_maybe_grow(BUF_PARAMS(out_scratch), - temp_len + clone_len); + u8 *temp_buf = + ck_maybe_grow(BUF_PARAMS(out_scratch), temp_len + clone_len); /* Head */ diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c index 11db004d..d2b2c2d9 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -1274,9 +1274,8 @@ int main(int argc, char **argv_orig, char **envp) { ck_free(afl->taint_fsrv.target_path); afl->argv_taint = ck_alloc(sizeof(char *) * (argc + 4 - optind)); - afl->taint_fsrv.target_path = - find_binary_own_loc("afl-qemu-taint", argv[0]); - afl->argv_taint[0] = find_binary_own_loc("afl-qemu-taint", argv[0]); + afl->taint_fsrv.target_path = find_afl_binary("afl-qemu-taint", argv[0]); + afl->argv_taint[0] = find_afl_binary("afl-qemu-taint", argv[0]); if (!afl->argv_taint[0]) FATAL( "Cannot find 'afl-qemu-taint', read qemu_taint/README.md on how to " @@ -1308,19 +1307,19 @@ int main(int argc, char **argv_orig, char **envp) { OKF("Taint forkserver successfully started"); - const rlim_t kStackSize = 256L * 1024L * 1024L; // min stack size = 256 Mb + const rlim_t kStackSize = 256L * 1024L * 1024L; // min stack size = 256 Mb struct rlimit rl; rl.rlim_cur = kStackSize; if (getrlimit(RLIMIT_STACK, &rl) != 0) WARNF("Setting a higher stack size failed!"); -#define BUF_PARAMS(name) (void **)&afl->name##_buf, &afl->name##_size + #define BUF_PARAMS(name) (void **)&afl->name##_buf, &afl->name##_size u8 *tmp1 = ck_maybe_grow(BUF_PARAMS(eff), MAX_FILE + 4096); u8 *tmp2 = ck_maybe_grow(BUF_PARAMS(ex), MAX_FILE + 4096); u8 *tmp3 = ck_maybe_grow(BUF_PARAMS(in_scratch), MAX_FILE + 4096); u8 *tmp4 = ck_maybe_grow(BUF_PARAMS(out), MAX_FILE + 4096); u8 *tmp5 = ck_maybe_grow(BUF_PARAMS(out_scratch), MAX_FILE + 4096); -#undef BUF_PARAMS + #undef BUF_PARAMS if (!tmp1 || !tmp2 || !tmp3 || !tmp4 || !tmp5) FATAL("memory issues. me hungry, feed me!"); -- cgit 1.4.1 From 3ec1b2374336d0b98aa4fc586cd5bc601b711821 Mon Sep 17 00:00:00 2001 From: van Hauser Date: Tue, 11 Aug 2020 10:36:34 +0200 Subject: cleanup minor issues --- src/afl-fuzz-bitmap.c | 4 ++++ src/afl-fuzz-one.c | 22 ++++++++++++---------- src/afl-fuzz-queue.c | 3 +-- src/afl-fuzz-run.c | 2 +- src/afl-fuzz.c | 2 +- 5 files changed, 19 insertions(+), 14 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/src/afl-fuzz-bitmap.c b/src/afl-fuzz-bitmap.c index d4ee36e1..9f58d604 100644 --- a/src/afl-fuzz-bitmap.c +++ b/src/afl-fuzz-bitmap.c @@ -183,6 +183,8 @@ u32 count_bits_len(afl_state_t *afl, u8 *mem, u32 len) { u32 i = (len >> 2); u32 ret = 0; + (void)(afl); + if (len % 4) i++; while (i--) { @@ -241,6 +243,8 @@ u32 count_bytes_len(afl_state_t *afl, u8 *mem, u32 len) { u32 i = (len >> 2); u32 ret = 0; + (void)(afl); + while (i--) { u32 v = *(ptr++); diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index 2f724569..4b2fd90a 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -472,12 +472,12 @@ u8 fuzz_one_original(afl_state_t *afl) { ret_val = 1; - u32 dst = 0, i; + s32 dst = 0, i; temp_len = len = afl->queue_cur->len; fd = open(afl->queue_cur->fname, O_RDONLY); afl->taint_src = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); - if (fd < 0 || (size_t)afl->taint_src == -1) + if (fd < 0 || (ssize_t)afl->taint_src == -1) FATAL("unable to open '%s'", afl->queue_cur->fname); close(fd); afl->taint_needs_splode = 1; @@ -490,18 +490,18 @@ u8 fuzz_one_original(afl_state_t *afl) { temp_len = len = afl->taint_len = afl->queue_cur->taint_bytes_all; orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); - if (fd < 0 || (size_t)in_buf == -1) + if (fd < 0 || (ssize_t)in_buf == -1) FATAL("unable to open '%s'", afl->taint_input_file); close(fd); fd = open(afl->queue_cur->fname_taint, O_RDWR); afl->taint_map = mmap(0, afl->queue_cur->len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); - if (fd < 0 || (size_t)in_buf == -1) + if (fd < 0 || (ssize_t)in_buf == -1) FATAL("unable to open '%s'", afl->queue_cur->fname_taint); close(fd); - for (i = 0; i < afl->queue_cur->len && dst < len; i++) + for (i = 0; i < (s32)afl->queue_cur->len && dst < len; i++) if (afl->taint_map[i]) in_buf[dst++] = afl->taint_src[i]; break; @@ -512,7 +512,7 @@ u8 fuzz_one_original(afl_state_t *afl) { temp_len = len = afl->taint_len = afl->queue_cur->taint_bytes_new; orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); - if (fd < 0 || (size_t)in_buf == -1) + if (fd < 0 || (ssize_t)in_buf == -1) FATAL("unable to open '%s'", afl->taint_input_file); close(fd); @@ -520,12 +520,12 @@ u8 fuzz_one_original(afl_state_t *afl) { fd = open(fn, O_RDWR); afl->taint_map = mmap(0, afl->queue_cur->len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); - if (fd < 0 || (size_t)in_buf == -1) + if (fd < 0 || (ssize_t)in_buf == -1) FATAL("unable to open '%s' for %u bytes", fn, len); close(fd); ck_free(fn); - for (i = 0; i < afl->queue_cur->len && dst < len; i++) + for (i = 0; i < (s32)afl->queue_cur->len && dst < len; i++) if (afl->taint_map[i]) in_buf[dst++] = afl->taint_src[i]; break; @@ -2297,7 +2297,8 @@ havoc_stage: } copy_to = rand_below(afl, temp_len - copy_len + 1); - if (unlikely(copy_to > temp_len)) copy_to = rand_below(afl, temp_len); + if (unlikely(copy_to > (u32)temp_len)) + copy_to = rand_below(afl, temp_len); if (rand_below(afl, 4)) { @@ -2305,7 +2306,8 @@ havoc_stage: if (unlikely(afl->taint_needs_splode)) { - if (copy_to > temp_len) copy_to = rand_below(afl, temp_len); + if (copy_to > (u32)temp_len) + copy_to = rand_below(afl, temp_len); // fprintf(stderr, "\nout_buf %p + copy_to %u, src %p + %u, // copy_len %u -- len %u\n", out_buf , copy_to, afl->taint_src , diff --git a/src/afl-fuzz-queue.c b/src/afl-fuzz-queue.c index 28af17f0..f4b58a9d 100644 --- a/src/afl-fuzz-queue.c +++ b/src/afl-fuzz-queue.c @@ -108,7 +108,6 @@ void perform_taint_run(afl_state_t *afl, struct queue_entry *q, u8 *fname, u8 * ptr, *fn = fname; u32 bytes = 0, plen = len; - s32 fd = -1; struct queue_entry *prev = q->prev; if (plen % 4) plen = plen + 4 - (len % 4); @@ -170,7 +169,7 @@ void perform_taint_run(afl_state_t *afl, struct queue_entry *q, u8 *fname, u8 *bufr = mmap(0, prev->len, PROT_READ, MAP_PRIVATE, r, 0); - if ((size_t)bufr != -1) { + if ((ssize_t)bufr != -1) { u32 i; u8 *tmp = ck_maybe_grow(BUF_PARAMS(in_scratch), plen); diff --git a/src/afl-fuzz-run.c b/src/afl-fuzz-run.c index 9db23134..058f8c2d 100644 --- a/src/afl-fuzz-run.c +++ b/src/afl-fuzz-run.c @@ -883,7 +883,7 @@ u8 common_fuzz_stuff(afl_state_t *afl, u8 *out_buf, u32 len) { u8 *new_buf = ck_maybe_grow(BUF_PARAMS(in_scratch), new_len); u32 i, taint = 0; - for (i = 0; i < new_len; i++) { + for (i = 0; i < (u32)new_len; i++) { if (i >= afl->taint_len || i >= afl->queue_cur->len || afl->taint_map[i]) new_buf[i] = out_buf[taint++]; diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c index d2b2c2d9..e6238366 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -1095,7 +1095,7 @@ int main(int argc, char **argv_orig, char **envp) { if (map_size != real_map_size) { afl->fsrv.map_size = real_map_size; - if (afl->cmplog_binary) afl->cmplog_fsrv.map_size; + if (afl->cmplog_binary) afl->cmplog_fsrv.map_size = real_map_size; } -- cgit 1.4.1 From 457f627101c08b885e9edfd8b491b5be198b6f14 Mon Sep 17 00:00:00 2001 From: van Hauser Date: Tue, 11 Aug 2020 15:10:18 +0200 Subject: move taint_mode var --- src/afl-forkserver.c | 2 +- src/afl-fuzz-init.c | 6 +++--- src/afl-fuzz-one.c | 2 +- src/afl-fuzz-queue.c | 3 ++- src/afl-fuzz-run.c | 2 +- src/afl-fuzz.c | 30 ++++++++++++++++-------------- 6 files changed, 24 insertions(+), 21 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/src/afl-forkserver.c b/src/afl-forkserver.c index adb75a2d..56475320 100644 --- a/src/afl-forkserver.c +++ b/src/afl-forkserver.c @@ -497,7 +497,7 @@ void afl_fsrv_start(afl_forkserver_t *fsrv, char **argv, char pid_buf[16]; sprintf(pid_buf, "%d", fsrv->fsrv_pid); - if (fsrv->qemu_mode == 2) { + if (fsrv->taint_mode) { setenv("__AFL_TARGET_PID3", pid_buf, 1); diff --git a/src/afl-fuzz-init.c b/src/afl-fuzz-init.c index 432e0649..669bd65a 100644 --- a/src/afl-fuzz-init.c +++ b/src/afl-fuzz-init.c @@ -961,7 +961,7 @@ void perform_dry_run(afl_state_t *afl) { } /* perform taint gathering on the input seed */ - if (afl->fsrv.taint_mode) + if (afl->taint_mode) perform_taint_run(afl, q, q->fname, use_mem, q->len); q = q->next; @@ -1502,7 +1502,7 @@ static void handle_existing_out_dir(afl_state_t *afl) { if (delete_files(fn, CASE_PREFIX)) { goto dir_cleanup_failed; } ck_free(fn); - if (afl->fsrv.taint_mode) { + if (afl->taint_mode) { fn = alloc_printf("%s/taint", afl->out_dir); mkdir(fn, 0755); // ignore errors @@ -1745,7 +1745,7 @@ void setup_dirs_fds(afl_state_t *afl) { /* Taint directory if taint_mode. */ - if (afl->fsrv.taint_mode) { + if (afl->taint_mode) { tmp = alloc_printf("%s/taint", afl->out_dir); if (mkdir(tmp, 0700)) { PFATAL("Unable to create '%s'", tmp); } diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index 4b2fd90a..69f885ca 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -460,7 +460,7 @@ u8 fuzz_one_original(afl_state_t *afl) { u32 tmp_val = 0; - if (unlikely(afl->fsrv.taint_mode)) { + if (unlikely(afl->taint_mode)) { tmp_val = afl->queue_cycle % 2; ret_val = 0; diff --git a/src/afl-fuzz-queue.c b/src/afl-fuzz-queue.c index b56e10f8..bb44e465 100644 --- a/src/afl-fuzz-queue.c +++ b/src/afl-fuzz-queue.c @@ -220,6 +220,7 @@ void perform_taint_run(afl_state_t *afl, struct queue_entry *q, u8 *fname, } else { + FATAL("count not create '%s'", fnw); q->taint_bytes_new = 0; } @@ -419,7 +420,7 @@ void add_to_queue(afl_state_t *afl, u8 *fname, u8 *mem, u32 len, afl->last_path_time = get_cur_time(); /* trigger the tain gathering if this is not a dry run */ - if (afl->fsrv.taint_mode && mem) { + if (afl->taint_mode && mem) { perform_taint_run(afl, q, fname, mem, len); diff --git a/src/afl-fuzz-run.c b/src/afl-fuzz-run.c index 058f8c2d..5f928333 100644 --- a/src/afl-fuzz-run.c +++ b/src/afl-fuzz-run.c @@ -349,7 +349,7 @@ u8 calibrate_case(afl_state_t *afl, struct queue_entry *q, u8 *use_mem, } - if (unlikely(afl->fsrv.taint_mode)) + if (unlikely(afl->taint_mode)) q->exec_cksum = 0; else if (q->exec_cksum) { diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c index e6238366..bead2ed9 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -288,7 +288,7 @@ int main(int argc, char **argv_orig, char **envp) { switch (opt) { case 'A': - afl->fsrv.taint_mode = 1; + afl->taint_mode = 1; if (!mem_limit_given) { afl->fsrv.mem_limit = MEM_LIMIT_QEMU; } break; @@ -829,10 +829,10 @@ int main(int argc, char **argv_orig, char **envp) { } - if (afl->fsrv.taint_mode && afl->fsrv.map_size < MAX_FILE) { + if (afl->taint_mode && afl->fsrv.map_size < MAX_FILE) { real_map_size = map_size; - map_size = afl->fsrv.map_size = afl->shm.map_size = MAX_FILE; + map_size = MAX_FILE; } @@ -891,9 +891,12 @@ int main(int argc, char **argv_orig, char **envp) { if (afl->crash_mode) { FATAL("-C and -n are mutually exclusive"); } if (afl->fsrv.qemu_mode) { FATAL("-Q and -n are mutually exclusive"); } if (afl->unicorn_mode) { FATAL("-U and -n are mutually exclusive"); } - if (afl->fsrv.taint_mode) { FATAL("-A and -n are mutually exclusive"); } + if (afl->taint_mode) { FATAL("-A and -n are mutually exclusive"); } } + + if (afl->limit_time_sig != 0 && afl->taint_mode) { FATAL("-A and -L are mutually exclusive"); } + if (afl->unicorn_mode != 0 && afl->taint_mode) { FATAL("-A and -U are mutually exclusive"); } if (get_afl_env("AFL_DISABLE_TRIM")) { afl->disable_trim = 1; } @@ -992,7 +995,7 @@ int main(int argc, char **argv_orig, char **envp) { if (afl->afl_env.afl_preload) { - if (afl->fsrv.qemu_mode || afl->fsrv.taint_mode) { + if (afl->fsrv.qemu_mode || afl->taint_mode) { u8 *qemu_preload = getenv("QEMU_SET_ENV"); u8 *afl_preload = getenv("AFL_PRELOAD"); @@ -1088,17 +1091,17 @@ int main(int argc, char **argv_orig, char **envp) { afl->fsrv.trace_bits = afl_shm_init(&afl->shm, afl->fsrv.map_size, afl->non_instrumented_mode); - if (!afl->in_bitmap) { memset(afl->virgin_bits, 255, afl->fsrv.map_size); } - memset(afl->virgin_tmout, 255, afl->fsrv.map_size); - memset(afl->virgin_crash, 255, afl->fsrv.map_size); - - if (map_size != real_map_size) { + if (real_map_size && map_size != real_map_size) { afl->fsrv.map_size = real_map_size; if (afl->cmplog_binary) afl->cmplog_fsrv.map_size = real_map_size; } + if (!afl->in_bitmap) { memset(afl->virgin_bits, 255, afl->fsrv.map_size); } + memset(afl->virgin_tmout, 255, afl->fsrv.map_size); + memset(afl->virgin_crash, 255, afl->fsrv.map_size); + init_count_class16(); if (afl->is_main_node && check_main_node_exists(afl) == 1) { @@ -1260,7 +1263,7 @@ int main(int argc, char **argv_orig, char **envp) { } - if (afl->fsrv.taint_mode) { + if (afl->taint_mode) { ACTF("Spawning qemu_taint forkserver"); @@ -1268,7 +1271,6 @@ int main(int argc, char **argv_orig, char **envp) { setenv("AFL_DISABLE_LLVM_INSTRUMENTATION", "1", 0); afl_fsrv_init_dup(&afl->taint_fsrv, &afl->fsrv); - afl->taint_fsrv.qemu_mode = 2; afl->taint_fsrv.taint_mode = 1; afl->taint_fsrv.trace_bits = afl->fsrv.trace_bits; @@ -1399,7 +1401,7 @@ int main(int argc, char **argv_orig, char **envp) { break; case 1: if (afl->limit_time_sig == 0 && !afl->custom_only && - !afl->python_only && !afl->fsrv.taint_mode) { + !afl->python_only && !afl->taint_mode) { afl->limit_time_sig = -1; afl->limit_time_puppet = 0; @@ -1588,7 +1590,7 @@ stop_fuzzing: } if (afl->cmplog_binary) afl_fsrv_deinit(&afl->cmplog_fsrv); - if (afl->fsrv.taint_mode) afl_fsrv_deinit(&afl->taint_fsrv); + if (afl->taint_mode) afl_fsrv_deinit(&afl->taint_fsrv); afl_fsrv_deinit(&afl->fsrv); if (afl->orig_cmdline) { ck_free(afl->orig_cmdline); } if (afl->argv_taint) { ck_free(afl->argv_taint); } -- cgit 1.4.1 From 220dc4a43d197f5ff451627a9923b874805c02aa Mon Sep 17 00:00:00 2001 From: van Hauser Date: Tue, 11 Aug 2020 16:25:35 +0200 Subject: review done, pray --- include/afl-fuzz.h | 3 +- src/afl-fuzz-init.c | 3 +- src/afl-fuzz-one.c | 152 ++++++++++++++++++++++++++++++++++++++++----------- src/afl-fuzz-queue.c | 6 +- src/afl-fuzz-run.c | 10 +++- src/afl-fuzz.c | 17 ++++-- 6 files changed, 144 insertions(+), 47 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h index c578c583..e251183c 100644 --- a/include/afl-fuzz.h +++ b/include/afl-fuzz.h @@ -444,7 +444,8 @@ typedef struct afl_state { python_only, /* Python-only mode */ is_main_node, /* if this is the main node */ is_secondary_node, /* if this is a secondary instance */ - taint_needs_splode; /* explode fuzz input */ + taint_needs_splode, /* explode fuzz input */ + taint_mode; u32 stats_update_freq; /* Stats update frequency (execs) */ diff --git a/src/afl-fuzz-init.c b/src/afl-fuzz-init.c index 669bd65a..0150e18a 100644 --- a/src/afl-fuzz-init.c +++ b/src/afl-fuzz-init.c @@ -961,8 +961,7 @@ void perform_dry_run(afl_state_t *afl) { } /* perform taint gathering on the input seed */ - if (afl->taint_mode) - perform_taint_run(afl, q, q->fname, use_mem, q->len); + if (afl->taint_mode) perform_taint_run(afl, q, q->fname, use_mem, q->len); q = q->next; diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index 69f885ca..7718256f 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -462,18 +462,23 @@ u8 fuzz_one_original(afl_state_t *afl) { if (unlikely(afl->taint_mode)) { - tmp_val = afl->queue_cycle % 2; + tmp_val = afl->queue_cycle % 2; // starts with 1 ret_val = 0; - if (unlikely(afl->queue_cur->cal_failed)) goto abandon_entry; - if (unlikely(!afl->queue_cur->passed_det) && !tmp_val) goto abandon_entry; - if (tmp_val == 1 && !afl->queue_cur->taint_bytes_all) goto abandon_entry; - if (tmp_val == 0 && !afl->queue_cur->taint_bytes_new) goto abandon_entry; + if (unlikely(afl->queue_cur->cal_failed && !tmp_val)) goto abandon_entry; + if (unlikely(!afl->skip_deterministic && !afl->queue_cur->passed_det && + !tmp_val)) + goto abandon_entry; + if ((!afl->queue_cur->taint_bytes_new || + afl->queue_cur->taint_bytes_new == afl->queue_cur->len) && + !tmp_val) + goto abandon_entry; ret_val = 1; s32 dst = 0, i; temp_len = len = afl->queue_cur->len; + s32 j = 0; // tmp fd = open(afl->queue_cur->fname, O_RDONLY); afl->taint_src = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); @@ -486,15 +491,27 @@ u8 fuzz_one_original(afl_state_t *afl) { case 1: // fuzz only tainted bytes + // special case: all or nothing tainted. in this case we act like + // nothing is special. this is not the taint you are looking for ... + if (!afl->queue_cur->taint_bytes_all || + afl->queue_cur->taint_bytes_all == (u32)len) { + + orig_in = in_buf = afl->taint_src; + afl->taint_needs_splode = 0; + break; + + } + fd = open(afl->taint_input_file, O_RDONLY); temp_len = len = afl->taint_len = afl->queue_cur->taint_bytes_all; orig_in = in_buf = - mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); + mmap(0, len >= MAX_FILE - 65536 ? MAX_FILE : len + 65536, + PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); if (fd < 0 || (ssize_t)in_buf == -1) FATAL("unable to open '%s'", afl->taint_input_file); close(fd); - fd = open(afl->queue_cur->fname_taint, O_RDWR); + fd = open(afl->queue_cur->fname_taint, O_RDONLY); afl->taint_map = mmap(0, afl->queue_cur->len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); if (fd < 0 || (ssize_t)in_buf == -1) @@ -504,6 +521,29 @@ u8 fuzz_one_original(afl_state_t *afl) { for (i = 0; i < (s32)afl->queue_cur->len && dst < len; i++) if (afl->taint_map[i]) in_buf[dst++] = afl->taint_src[i]; + // FIXME DEBUG TODO XXX + for (i = 0; i < (s32)afl->queue_cur->len; i++) { + + switch (afl->taint_map[i]) { + + case 0x0: + break; + case '!': + j++; + break; + default: + FATAL( + "invalid taint map entry byte 0x%02x at position %d " + "(passed_det:%d)\n", + afl->taint_map[i], i, afl->queue_cur->passed_det); + + } + + } + + if (j != len) + FATAL("different taint values in map vs in queue (%d != %d)", j, len); + break; case 0: // fuzz only newly tainted bytes @@ -511,12 +551,14 @@ u8 fuzz_one_original(afl_state_t *afl) { fd = open(afl->taint_input_file, O_RDONLY); temp_len = len = afl->taint_len = afl->queue_cur->taint_bytes_new; orig_in = in_buf = - mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); + mmap(0, len >= MAX_FILE - 65536 ? MAX_FILE : len + 65536, + PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); if (fd < 0 || (ssize_t)in_buf == -1) FATAL("unable to open '%s'", afl->taint_input_file); close(fd); u8 *fn = alloc_printf("%s.new", afl->queue_cur->fname_taint); + if (!fn) FATAL("OOM"); fd = open(fn, O_RDWR); afl->taint_map = mmap(0, afl->queue_cur->len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); @@ -528,14 +570,35 @@ u8 fuzz_one_original(afl_state_t *afl) { for (i = 0; i < (s32)afl->queue_cur->len && dst < len; i++) if (afl->taint_map[i]) in_buf[dst++] = afl->taint_src[i]; + // FIXME DEBUG TODO XXX + for (i = 0; i < (s32)afl->queue_cur->len; i++) { + + switch (afl->taint_map[i]) { + + case 0x0: + break; + case '!': + j++; + break; + default: + FATAL( + "invalid taint map entry byte 0x%02x at position %d " + "(passed_det:%d)\n", + afl->taint_map[i], i, afl->queue_cur->passed_det); + + } + + } + + if (j != len) + FATAL("different taint values in map vs in queue (%d != %d)", j, len); + break; } } else { - afl->taint_needs_splode = 0; - /* Map the test case into memory. */ fd = open(afl->queue_cur->fname, O_RDONLY); @@ -574,8 +637,7 @@ u8 fuzz_one_original(afl_state_t *afl) { * CALIBRATION (only if failed earlier on) * *******************************************/ - if (unlikely(afl->queue_cur->cal_failed && - (!afl->taint_needs_splode || tmp_val == 1))) { + if (unlikely(afl->queue_cur->cal_failed)) { u8 res = FSRV_RUN_TMOUT; @@ -583,8 +645,12 @@ u8 fuzz_one_original(afl_state_t *afl) { afl->queue_cur->exec_cksum = 0; - res = - calibrate_case(afl, afl->queue_cur, in_buf, afl->queue_cycle - 1, 0); + if (unlikely(afl->taint_needs_splode)) + res = calibrate_case(afl, afl->queue_cur, afl->taint_src, + afl->queue_cycle - 1, 0); + else + res = calibrate_case(afl, afl->queue_cur, in_buf, afl->queue_cycle - 1, + 0); if (unlikely(res == FSRV_RUN_ERROR)) { @@ -607,8 +673,8 @@ u8 fuzz_one_original(afl_state_t *afl) { * TRIMMING * ************/ - if (!afl->non_instrumented_mode && !afl->queue_cur->trim_done && - !afl->disable_trim && !afl->taint_needs_splode) { + if (unlikely(!afl->non_instrumented_mode && !afl->queue_cur->trim_done && + !afl->disable_trim && !afl->taint_needs_splode)) { u8 res = trim_case(afl, afl->queue_cur, in_buf); @@ -645,13 +711,26 @@ u8 fuzz_one_original(afl_state_t *afl) { if (afl->shm.cmplog_mode && !afl->queue_cur->fully_colorized) { - if (input_to_state_stage(afl, in_buf, out_buf, len, - afl->queue_cur->exec_cksum)) { + int res; + if (unlikely(afl->taint_needs_splode)) { - goto abandon_entry; + len = afl->queue_cur->len; + memcpy(out_buf, afl->taint_src, len); + res = input_to_state_stage(afl, afl->taint_src, out_buf, len, + afl->queue_cur->exec_cksum); + // just abandon as success + ret_val = 0; + res = 1; + + } else { + + res = input_to_state_stage(afl, in_buf, out_buf, len, + afl->queue_cur->exec_cksum); } + if (unlikely(res)) { goto abandon_entry; } + } /* Skip right away if -d is given, if it has not been chosen sufficiently @@ -2288,37 +2367,46 @@ havoc_stage: copy_len = choose_block_len(afl, afl->queue_cur->len - 1); copy_from = rand_below(afl, afl->queue_cur->len - copy_len + 1); + copy_to = rand_below(afl, temp_len + 1); } else { copy_len = choose_block_len(afl, temp_len - 1); copy_from = rand_below(afl, temp_len - copy_len + 1); + copy_to = rand_below(afl, temp_len - copy_len + 1); } - copy_to = rand_below(afl, temp_len - copy_len + 1); - if (unlikely(copy_to > (u32)temp_len)) - copy_to = rand_below(afl, temp_len); - if (rand_below(afl, 4)) { if (copy_from != copy_to) { if (unlikely(afl->taint_needs_splode)) { - if (copy_to > (u32)temp_len) - copy_to = rand_below(afl, temp_len); + if (temp_len >= (s32)(copy_to + copy_len)) { - // fprintf(stderr, "\nout_buf %p + copy_to %u, src %p + %u, - // copy_len %u -- len %u\n", out_buf , copy_to, afl->taint_src , - // copy_from, copy_len, afl->taint_len, afl->queue_cur->len); - memmove(out_buf + copy_to, afl->taint_src + copy_from, - copy_len); + memcpy(out_buf + copy_to, afl->taint_src + copy_from, + copy_len); - } else + } else { + + u8 *new_buf = ck_maybe_grow(BUF_PARAMS(out_scratch), + copy_to + copy_len); + memcpy(new_buf, in_buf, copy_to); + memcpy(new_buf + copy_to, afl->taint_src + copy_from, + copy_len); + swap_bufs(BUF_PARAMS(out), BUF_PARAMS(out_scratch)); + out_buf = new_buf; + temp_len = copy_to + copy_len; + + } + + } else { memmove(out_buf + copy_to, out_buf + copy_from, copy_len); + } + } } else { @@ -2698,7 +2786,7 @@ abandon_entry: ++afl->queue_cur->fuzz_level; - if (afl->taint_needs_splode) { + if (unlikely(afl->taint_needs_splode)) { munmap(afl->taint_src, afl->queue_cur->len); munmap(orig_in, afl->taint_len); diff --git a/src/afl-fuzz-queue.c b/src/afl-fuzz-queue.c index bb44e465..a1fe146b 100644 --- a/src/afl-fuzz-queue.c +++ b/src/afl-fuzz-queue.c @@ -420,11 +420,7 @@ void add_to_queue(afl_state_t *afl, u8 *fname, u8 *mem, u32 len, afl->last_path_time = get_cur_time(); /* trigger the tain gathering if this is not a dry run */ - if (afl->taint_mode && mem) { - - perform_taint_run(afl, q, fname, mem, len); - - } + if (afl->taint_mode && mem) { perform_taint_run(afl, q, fname, mem, len); } /* only redqueen currently uses is_ascii */ if (afl->shm.cmplog_mode) q->is_ascii = check_if_text(q); diff --git a/src/afl-fuzz-run.c b/src/afl-fuzz-run.c index 5f928333..94cfc383 100644 --- a/src/afl-fuzz-run.c +++ b/src/afl-fuzz-run.c @@ -878,9 +878,11 @@ u8 common_fuzz_stuff(afl_state_t *afl, u8 *out_buf, u32 len) { if (unlikely(afl->taint_needs_splode)) { s32 new_len = afl->queue_cur->len + len - afl->taint_len; - if (new_len < 4) new_len = 4; - if (new_len > MAX_FILE) new_len = MAX_FILE; - u8 *new_buf = ck_maybe_grow(BUF_PARAMS(in_scratch), new_len); + if (new_len < 4) + new_len = 4; + else if (new_len > MAX_FILE) + new_len = MAX_FILE; + u8 *new_buf = ck_maybe_grow(BUF_PARAMS(out_scratch), new_len); u32 i, taint = 0; for (i = 0; i < (u32)new_len; i++) { @@ -892,6 +894,8 @@ u8 common_fuzz_stuff(afl_state_t *afl, u8 *out_buf, u32 len) { } + swap_bufs(BUF_PARAMS(out), BUF_PARAMS(out_scratch)); + out_buf = new_buf; len = new_len; diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c index bead2ed9..106aa550 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -894,9 +894,18 @@ int main(int argc, char **argv_orig, char **envp) { if (afl->taint_mode) { FATAL("-A and -n are mutually exclusive"); } } - - if (afl->limit_time_sig != 0 && afl->taint_mode) { FATAL("-A and -L are mutually exclusive"); } - if (afl->unicorn_mode != 0 && afl->taint_mode) { FATAL("-A and -U are mutually exclusive"); } + + if (afl->limit_time_sig != 0 && afl->taint_mode) { + + FATAL("-A and -L are mutually exclusive"); + + } + + if (afl->unicorn_mode != 0 && afl->taint_mode) { + + FATAL("-A and -U are mutually exclusive"); + + } if (get_afl_env("AFL_DISABLE_TRIM")) { afl->disable_trim = 1; } @@ -1309,7 +1318,7 @@ int main(int argc, char **argv_orig, char **envp) { OKF("Taint forkserver successfully started"); - const rlim_t kStackSize = 256L * 1024L * 1024L; // min stack size = 256 Mb + const rlim_t kStackSize = 128L * 1024L * 1024L; // min stack size = 128 Mb struct rlimit rl; rl.rlim_cur = kStackSize; if (getrlimit(RLIMIT_STACK, &rl) != 0) -- cgit 1.4.1 From b604f5eafcebb816026e198df0ea66ebcbf18421 Mon Sep 17 00:00:00 2001 From: van Hauser Date: Tue, 11 Aug 2020 18:06:18 +0200 Subject: finalize first beta! yay! --- README.md | 14 ++++++++------ examples/aflpp_driver/aflpp_driver.c | 28 ---------------------------- src/afl-fuzz-init.c | 9 +++++++-- src/afl-fuzz-one.c | 2 +- src/afl-fuzz-queue.c | 2 +- 5 files changed, 17 insertions(+), 38 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/README.md b/README.md index 2b9bc588..6e324cb0 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,7 @@ # qemu_taint variant. +UPDATE: **WORKS NOW** **PLEASE TEST** **:-)** + ## HOWTO cd qemu_taint && ./build_qemu_taint.sh @@ -8,13 +10,13 @@ afl-fuzz -A ... ## CAVEATS - * segfaults ~10-15 minutes in ... - - * shmem persistent mode does not work - * custom mutators? dunno if they work or not - * MOpt works but totally ignores the taint information + * llvm shmem persistent mode does not and can not not work + * MOpt works but totally ignores the taint information, so disabled here + * custom mutators? dunno if they work or not. depends on how they work. * not tested with qemu_mode - * if all seed entries are fully touched it might not work + * there are several debug checks to ensure the data is fine which slows down + fuzzing, if the beta experiment runs fine these will be improved and it + will result in quite a speed gain. ## THE TAINT diff --git a/examples/aflpp_driver/aflpp_driver.c b/examples/aflpp_driver/aflpp_driver.c index 81782c67..8e0b554a 100644 --- a/examples/aflpp_driver/aflpp_driver.c +++ b/examples/aflpp_driver/aflpp_driver.c @@ -107,8 +107,6 @@ If 1, close stdout at startup. If 2 close stderr; if 3 close both. #endif int __afl_sharedmem_fuzzing = 0; -extern unsigned char *__afl_area_ptr; -// extern struct cmp_map *__afl_cmp_map; // libFuzzer interface is thin, so we don't include any libFuzzer headers. int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size); @@ -246,28 +244,8 @@ static int ExecuteFilesOnyByOne(int argc, char **argv) { } -__attribute__((constructor(1))) void __afl_protect(void) { - - setenv("__AFL_DEFER_FORKSRV", "1", 1); - __afl_area_ptr = (unsigned char *)mmap( - (void *)0x10000, MAX_DUMMY_SIZE, PROT_READ | PROT_WRITE, - MAP_FIXED_NOREPLACE | MAP_SHARED | MAP_ANONYMOUS, -1, 0); - if ((uint64_t)__afl_area_ptr == -1) - __afl_area_ptr = (unsigned char *)mmap((void *)0x10000, MAX_DUMMY_SIZE, - PROT_READ | PROT_WRITE, - MAP_SHARED | MAP_ANONYMOUS, -1, 0); - if ((uint64_t)__afl_area_ptr == -1) - __afl_area_ptr = - (unsigned char *)mmap(NULL, MAX_DUMMY_SIZE, PROT_READ | PROT_WRITE, - MAP_SHARED | MAP_ANONYMOUS, -1, 0); - // __afl_cmp_map = (struct cmp_map *)__afl_area_ptr; - -} - int main(int argc, char **argv) { - fprintf(stderr, "map is at %p\n", __afl_area_ptr); - printf( "======================= INFO =========================\n" "This binary is built for afl++.\n" @@ -307,8 +285,6 @@ int main(int argc, char **argv) { if (!getenv("AFL_DISABLE_LLVM_INSTRUMENTATION")) { - munmap(__afl_area_ptr, MAX_DUMMY_SIZE); // we need to free 0x10000 - __afl_area_ptr = NULL; __afl_manual_init(); } @@ -321,15 +297,11 @@ int main(int argc, char **argv) { if (!getenv("AFL_DISABLE_LLVM_INSTRUMENTATION")) { - munmap(__afl_area_ptr, MAX_DUMMY_SIZE); - __afl_area_ptr = NULL; fprintf(stderr, "performing manual init\n"); __afl_manual_init(); } - fprintf(stderr, "map is now at %p\n", __afl_area_ptr); - // Call LLVMFuzzerTestOneInput here so that coverage caused by initialization // on the first execution of LLVMFuzzerTestOneInput is ignored. LLVMFuzzerTestOneInput(dummy_input, 1); diff --git a/src/afl-fuzz-init.c b/src/afl-fuzz-init.c index 0150e18a..359eef85 100644 --- a/src/afl-fuzz-init.c +++ b/src/afl-fuzz-init.c @@ -771,9 +771,13 @@ void perform_dry_run(afl_state_t *afl) { close(fd); res = calibrate_case(afl, q, use_mem, 0, 1); - ck_free(use_mem); - if (afl->stop_soon) { return; } + if (afl->stop_soon) { + + ck_free(use_mem); + return; + + } if (res == afl->crash_mode || res == FSRV_RUN_NOBITS) { @@ -962,6 +966,7 @@ void perform_dry_run(afl_state_t *afl) { /* perform taint gathering on the input seed */ if (afl->taint_mode) perform_taint_run(afl, q, q->fname, use_mem, q->len); + ck_free(use_mem); q = q->next; diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index 7718256f..4db6febf 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -2342,7 +2342,7 @@ havoc_stage: } /* Tail */ - memcpy(new_buf + clone_to + clone_len, out_buf + clone_to, + memmove(new_buf + clone_to + clone_len, out_buf + clone_to, temp_len - clone_to); swap_bufs(BUF_PARAMS(out), BUF_PARAMS(out_scratch)); diff --git a/src/afl-fuzz-queue.c b/src/afl-fuzz-queue.c index a1fe146b..43794018 100644 --- a/src/afl-fuzz-queue.c +++ b/src/afl-fuzz-queue.c @@ -122,7 +122,7 @@ void perform_taint_run(afl_state_t *afl, struct queue_entry *q, u8 *fname, afl->taint_fsrv.map_size = plen; // speed :) write_to_testcase(afl, mem, len); - if (afl_fsrv_run_target(&afl->taint_fsrv, afl->fsrv.exec_tmout, + if (afl_fsrv_run_target(&afl->taint_fsrv, afl->fsrv.exec_tmout * 4, &afl->stop_soon) == 0) { bytes = q->taint_bytes_all = -- cgit 1.4.1 From 83df65a66b8df37d0759bf9b31a61f50234d6c40 Mon Sep 17 00:00:00 2001 From: Dominik Maier Date: Fri, 14 Aug 2020 00:46:15 +0200 Subject: cleaned up maybe_add_auto calls --- include/afl-fuzz.h | 4 +--- include/forkserver.h | 4 ++-- src/afl-forkserver.c | 28 +++++++++++++++++++--------- src/afl-fuzz-extras.c | 8 ++------ src/afl-fuzz-one.c | 8 ++++---- src/afl-fuzz-redqueen.c | 12 ++++++------ src/afl-fuzz-state.c | 5 +++-- 7 files changed, 37 insertions(+), 32 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h index 51ab0e85..cd6f7173 100644 --- a/include/afl-fuzz.h +++ b/include/afl-fuzz.h @@ -608,8 +608,6 @@ typedef struct afl_state { u32 document_counter; #endif - void *maybe_add_auto; - /* statistics file */ double last_bitmap_cvg, last_stability, last_eps; @@ -911,7 +909,7 @@ u8 has_new_bits(afl_state_t *, u8 *); void load_extras_file(afl_state_t *, u8 *, u32 *, u32 *, u32); void load_extras(afl_state_t *, u8 *); -void maybe_add_auto(void *, u8 *, u32); +void maybe_add_auto(afl_state_t *, u8 *, u32); void save_auto(afl_state_t *); void load_auto(afl_state_t *); void destroy_extras(afl_state_t *); diff --git a/include/forkserver.h b/include/forkserver.h index 717493db..b413b4bf 100644 --- a/include/forkserver.h +++ b/include/forkserver.h @@ -89,9 +89,9 @@ typedef struct afl_forkserver { /* Function to kick off the forkserver child */ void (*init_child_func)(struct afl_forkserver *fsrv, char **argv); - u8 *function_opt; /* for autodictionary: afl ptr */ + u8 *afl_ptr; /* for autodictionary: afl ptr */ - void (*function_ptr)(void *afl_tmp, u8 *mem, u32 len); + void (*autodict_func)(void *afl_ptr, u8 *mem, u32 len); } afl_forkserver_t; diff --git a/src/afl-forkserver.c b/src/afl-forkserver.c index 8684bcc0..01fc829a 100644 --- a/src/afl-forkserver.c +++ b/src/afl-forkserver.c @@ -324,8 +324,7 @@ static void report_error_and_exit(int error) { cloning a stopped child. So, we just execute once, and then send commands through a pipe. The other part of this logic is in afl-as.h / llvm_mode */ -void __attribute__((hot)) -afl_fsrv_start(afl_forkserver_t *fsrv, char **argv, volatile u8 *stop_soon_p, +void afl_fsrv_start(afl_forkserver_t *fsrv, char **argv, volatile u8 *stop_soon_p, u8 debug_child_output) { int st_pipe[2], ctl_pipe[2]; @@ -631,13 +630,18 @@ afl_fsrv_start(afl_forkserver_t *fsrv, char **argv, volatile u8 *stop_soon_p, if ((status & FS_OPT_AUTODICT) == FS_OPT_AUTODICT) { - if (fsrv->function_ptr == NULL || fsrv->function_opt == NULL) { + if (fsrv->autodict_func == NULL || fsrv->afl_ptr == NULL) { // this is not afl-fuzz - we deny and return - if (fsrv->use_shmem_fuzz) + if (fsrv->use_shmem_fuzz) { + status = (FS_OPT_ENABLED | FS_OPT_SHDMEM_FUZZ); - else + + } else { + status = (FS_OPT_ENABLED); + + } if (write(fsrv->fsrv_ctl_fd, &status, 4) != 4) { FATAL("Writing to forkserver failed."); @@ -650,11 +654,16 @@ afl_fsrv_start(afl_forkserver_t *fsrv, char **argv, volatile u8 *stop_soon_p, if (!be_quiet) { ACTF("Using AUTODICT feature."); } - if (fsrv->use_shmem_fuzz) + if (fsrv->use_shmem_fuzz) { + status = (FS_OPT_ENABLED | FS_OPT_AUTODICT | FS_OPT_SHDMEM_FUZZ); - else + + } else { + status = (FS_OPT_ENABLED | FS_OPT_AUTODICT); + } + if (write(fsrv->fsrv_ctl_fd, &status, 4) != 4) { FATAL("Writing to forkserver failed."); @@ -673,7 +682,8 @@ afl_fsrv_start(afl_forkserver_t *fsrv, char **argv, volatile u8 *stop_soon_p, } - u32 len = status, offset = 0, count = 0; + u32 offset = 0, count = 0; + u32 len = status; u8 *dict = ck_alloc(len); if (dict == NULL) { @@ -704,7 +714,7 @@ afl_fsrv_start(afl_forkserver_t *fsrv, char **argv, volatile u8 *stop_soon_p, while (offset < (u32)status && (u8)dict[offset] + offset < (u32)status) { - fsrv->function_ptr(fsrv->function_opt, dict + offset + 1, + fsrv->autodict_func(fsrv->afl_ptr, dict + offset + 1, (u8)dict[offset]); offset += (1 + dict[offset]); count++; diff --git a/src/afl-fuzz-extras.c b/src/afl-fuzz-extras.c index 097871c8..2f3a2d53 100644 --- a/src/afl-fuzz-extras.c +++ b/src/afl-fuzz-extras.c @@ -354,13 +354,9 @@ static inline u8 memcmp_nocase(u8 *m1, u8 *m2, u32 len) { } /* Maybe add automatic extra. */ -/* Ugly hack: afl state is transfered as u8* because we import data via - afl-forkserver.c - which is shared with other afl tools that do not - have the afl state struct */ -void maybe_add_auto(void *afl_tmp, u8 *mem, u32 len) { +void maybe_add_auto(afl_state_t *afl, u8 *mem, u32 len) { - afl_state_t *afl = (afl_state_t *)afl_tmp; u32 i; /* Allow users to specify that they don't want auto dictionaries. */ @@ -544,7 +540,7 @@ void load_auto(afl_state_t *afl) { if (len >= MIN_AUTO_EXTRA && len <= MAX_AUTO_EXTRA) { - maybe_add_auto((u8 *)afl, tmp, len); + maybe_add_auto(afl, tmp, len); } diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index 452c5298..57b53c9f 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -681,7 +681,7 @@ u8 fuzz_one_original(afl_state_t *afl) { if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA) { - maybe_add_auto((u8 *)afl, a_collect, a_len); + maybe_add_auto(afl, a_collect, a_len); } @@ -692,7 +692,7 @@ u8 fuzz_one_original(afl_state_t *afl) { if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA) { - maybe_add_auto((u8 *)afl, a_collect, a_len); + maybe_add_auto(afl, a_collect, a_len); } @@ -2882,7 +2882,7 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA) { - maybe_add_auto((u8 *)afl, a_collect, a_len); + maybe_add_auto(afl, a_collect, a_len); } @@ -2893,7 +2893,7 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA) { - maybe_add_auto((u8 *)afl, a_collect, a_len); + maybe_add_auto(afl, a_collect, a_len); } diff --git a/src/afl-fuzz-redqueen.c b/src/afl-fuzz-redqueen.c index 4c0c9155..f21dd0b0 100644 --- a/src/afl-fuzz-redqueen.c +++ b/src/afl-fuzz-redqueen.c @@ -500,7 +500,7 @@ static void try_to_add_to_dict(afl_state_t *afl, u64 v, u8 shape) { } - maybe_add_auto((u8 *)afl, (u8 *)&v, shape); + maybe_add_auto(afl, (u8 *)&v, shape); u64 rev; switch (shape) { @@ -509,15 +509,15 @@ static void try_to_add_to_dict(afl_state_t *afl, u64 v, u8 shape) { break; case 2: rev = SWAP16((u16)v); - maybe_add_auto((u8 *)afl, (u8 *)&rev, shape); + maybe_add_auto(afl, (u8 *)&rev, shape); break; case 4: rev = SWAP32((u32)v); - maybe_add_auto((u8 *)afl, (u8 *)&rev, shape); + maybe_add_auto(afl, (u8 *)&rev, shape); break; case 8: rev = SWAP64(v); - maybe_add_auto((u8 *)afl, (u8 *)&rev, shape); + maybe_add_auto(afl, (u8 *)&rev, shape); break; } @@ -772,8 +772,8 @@ static u8 rtn_fuzz(afl_state_t *afl, u32 key, u8 *orig_buf, u8 *buf, u32 len) { if (afl->pass_stats[key].total == 0) { - maybe_add_auto((u8 *)afl, o->v0, SHAPE_BYTES(h->shape)); - maybe_add_auto((u8 *)afl, o->v1, SHAPE_BYTES(h->shape)); + maybe_add_auto(afl, o->v0, SHAPE_BYTES(h->shape)); + maybe_add_auto(afl, o->v1, SHAPE_BYTES(h->shape)); } diff --git a/src/afl-fuzz-state.c b/src/afl-fuzz-state.c index e2d62bc6..97e4ee93 100644 --- a/src/afl-fuzz-state.c +++ b/src/afl-fuzz-state.c @@ -112,8 +112,9 @@ void afl_state_init(afl_state_t *afl, uint32_t map_size) { afl->fsrv.use_stdin = 1; afl->fsrv.map_size = map_size; - afl->fsrv.function_opt = (u8 *)afl; - afl->fsrv.function_ptr = &maybe_add_auto; + // afl_state_t is not available in forkserver.c + afl->fsrv.afl_ptr = (void *)afl; + afl->fsrv.autodict_func = (void (*)(void *, u8 *, u32))&maybe_add_auto; afl->cal_cycles = CAL_CYCLES; afl->cal_cycles_long = CAL_CYCLES_LONG; -- cgit 1.4.1 From af14acf2c148b1aef10414d1dd6c929c49abc11e Mon Sep 17 00:00:00 2001 From: root Date: Fri, 14 Aug 2020 14:35:05 +0200 Subject: Revert "Merge branch 'debug' into dev" This reverts commit a7537b5511ad767d2240cf2dc6d3e261daa676f9, reversing changes made to 15e799f7ae666418e75c6a79db833c5316b21f97. --- GNUmakefile | 3 - README.md | 33 ---- afl_driver.cpp | 191 ----------------------- examples/aflpp_driver/aflpp_driver.c | 40 ++--- include/afl-fuzz.h | 36 +---- include/common.h | 1 - include/envs.h | 1 - include/forkserver.h | 2 - qemu_taint/README.md | 42 ------ qemu_taint/build_qemu_taint.sh | 7 - qemu_taint/clean.sh | 3 - src/afl-common.c | 155 ++++++++++++------- src/afl-forkserver.c | 17 +-- src/afl-fuzz-bitmap.c | 62 +------- src/afl-fuzz-init.c | 42 +----- src/afl-fuzz-one.c | 284 ++++------------------------------- src/afl-fuzz-queue.c | 177 +--------------------- src/afl-fuzz-run.c | 99 ++++-------- src/afl-fuzz-state.c | 30 ++-- src/afl-fuzz-stats.c | 5 +- src/afl-fuzz.c | 127 ++-------------- 21 files changed, 223 insertions(+), 1134 deletions(-) delete mode 100644 afl_driver.cpp delete mode 100644 qemu_taint/README.md delete mode 100755 qemu_taint/build_qemu_taint.sh delete mode 100755 qemu_taint/clean.sh (limited to 'src/afl-fuzz-one.c') diff --git a/GNUmakefile b/GNUmakefile index ae529ece..f9020a90 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -521,7 +521,6 @@ clean: $(MAKE) -C examples/argv_fuzzing clean $(MAKE) -C qemu_mode/unsigaction clean $(MAKE) -C qemu_mode/libcompcov clean - test -d qemu_taint/qemu && { cd qemu_taint ; ./clean.sh ; } rm -rf qemu_mode/qemu-3.1.1 ifeq "$(IN_REPO)" "1" test -d unicorn_mode/unicornafl && $(MAKE) -C unicorn_mode/unicornafl clean || true @@ -532,7 +531,6 @@ endif deepclean: clean rm -rf qemu_mode/qemu-3.1.1.tar.xz - rm -rf qemu_taint/qemu rm -rf unicorn_mode/unicornafl git reset --hard >/dev/null 2>&1 || true @@ -590,7 +588,6 @@ install: all $(MANPAGES) install -m 755 $(PROGS) $(SH_PROGS) $${DESTDIR}$(BIN_PATH) rm -f $${DESTDIR}$(BIN_PATH)/afl-as if [ -f afl-qemu-trace ]; then install -m 755 afl-qemu-trace $${DESTDIR}$(BIN_PATH); fi - if [ -f afl-qemu-taint ]; then install -m 755 afl-qemu-taint $${DESTDIR}$(BIN_PATH); fi if [ -f afl-gcc-fast ]; then set e; install -m 755 afl-gcc-fast $${DESTDIR}$(BIN_PATH); ln -sf afl-gcc-fast $${DESTDIR}$(BIN_PATH)/afl-g++-fast; install -m 755 afl-gcc-pass.so afl-gcc-rt.o $${DESTDIR}$(HELPER_PATH); fi if [ -f afl-clang-fast ]; then $(MAKE) -C llvm_mode install; fi if [ -f libdislocator.so ]; then set -e; install -m 755 libdislocator.so $${DESTDIR}$(HELPER_PATH); fi diff --git a/README.md b/README.md index b3dc5e45..97c0a0d7 100644 --- a/README.md +++ b/README.md @@ -1,36 +1,3 @@ -# qemu_taint variant. - -UPDATE: **WORKS NOW** **PLEASE TEST** **:-)** - -## HOWTO - -cd qemu_taint && ./build_qemu_taint.sh - -afl-fuzz -A ... - -## CAVEATS - - * llvm shmem persistent mode does not and can not not work - * MOpt works but totally ignores the taint information, so disabled here - * custom mutators? dunno if they work or not. depends on how they work. - * not tested with qemu_mode - * there are several debug checks to ensure the data is fine which slows down - fuzzing, if the beta experiment runs fine these will be improved and it - will result in quite a speed gain. - -## THE TAINT - -taint can be seen in out/taint/ - -the id:000 mirrors the out/queue entry, except the content it 0x00 for -untainted bytes and '!' for tainted bytes. -If a file has new tainted bytes compared to from which previous entry it -was created then there is a id:000[...].new file where the new bytes are -marked '!'. - -the mutation switches between fuzzing all tainted bytes in one cycle and -only new bytes in the other cycle. - # American Fuzzy Lop plus plus (afl++) AFL++ Logo diff --git a/afl_driver.cpp b/afl_driver.cpp deleted file mode 100644 index 7d6a6fd4..00000000 --- a/afl_driver.cpp +++ /dev/null @@ -1,191 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include - -// Platform detection. Copied from FuzzerInternal.h -#ifdef __linux__ -#define LIBFUZZER_LINUX 1 -#define LIBFUZZER_APPLE 0 -#define LIBFUZZER_NETBSD 0 -#define LIBFUZZER_FREEBSD 0 -#define LIBFUZZER_OPENBSD 0 -#elif __APPLE__ -#define LIBFUZZER_LINUX 0 -#define LIBFUZZER_APPLE 1 -#define LIBFUZZER_NETBSD 0 -#define LIBFUZZER_FREEBSD 0 -#define LIBFUZZER_OPENBSD 0 -#elif __NetBSD__ -#define LIBFUZZER_LINUX 0 -#define LIBFUZZER_APPLE 0 -#define LIBFUZZER_NETBSD 1 -#define LIBFUZZER_FREEBSD 0 -#define LIBFUZZER_OPENBSD 0 -#elif __FreeBSD__ -#define LIBFUZZER_LINUX 0 -#define LIBFUZZER_APPLE 0 -#define LIBFUZZER_NETBSD 0 -#define LIBFUZZER_FREEBSD 1 -#define LIBFUZZER_OPENBSD 0 -#elif __OpenBSD__ -#define LIBFUZZER_LINUX 0 -#define LIBFUZZER_APPLE 0 -#define LIBFUZZER_NETBSD 0 -#define LIBFUZZER_FREEBSD 0 -#define LIBFUZZER_OPENBSD 1 -#else -#error "Support for your platform has not been implemented" -#endif - -// libFuzzer interface is thin, so we don't include any libFuzzer headers. -int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size); -__attribute__((weak)) int LLVMFuzzerInitialize(int *argc, char ***argv); - -// Notify AFL about persistent mode. -static volatile char AFL_PERSISTENT[] = "##SIG_AFL_PERSISTENT##"; -int __afl_persistent_loop(unsigned int); -static volatile char suppress_warning2 = AFL_PERSISTENT[0]; - -// Notify AFL about deferred forkserver. -static volatile char AFL_DEFER_FORKSVR[] = "##SIG_AFL_DEFER_FORKSRV##"; -void __afl_manual_init(); -static volatile char suppress_warning1 = AFL_DEFER_FORKSVR[0]; - -// Input buffer. -static const size_t kMaxAflInputSize = 1024000; -static uint8_t AflInputBuf[kMaxAflInputSize]; - -// Use this optionally defined function to output sanitizer messages even if -// user asks to close stderr. -__attribute__((weak)) void __sanitizer_set_report_fd(void *); - -// Keep track of where stderr content is being written to, so that -// dup_and_close_stderr can use the correct one. -static FILE *output_file = stderr; - -// Experimental feature to use afl_driver without AFL's deferred mode. -// Needs to run before __afl_auto_init. -__attribute__((constructor(0))) static void __decide_deferred_forkserver(void) { - if (getenv("AFL_DRIVER_DONT_DEFER")) { - if (unsetenv("__AFL_DEFER_FORKSRV")) { - perror("Failed to unset __AFL_DEFER_FORKSRV"); - abort(); - } - } -} - -// If the user asks us to duplicate stderr, then do it. -static void maybe_duplicate_stderr() { - char *stderr_duplicate_filename = - getenv("AFL_DRIVER_STDERR_DUPLICATE_FILENAME"); - - if (!stderr_duplicate_filename) - return; - - FILE *stderr_duplicate_stream = - freopen(stderr_duplicate_filename, "a+", stderr); - - if (!stderr_duplicate_stream) { - fprintf( - stderr, - "Failed to duplicate stderr to AFL_DRIVER_STDERR_DUPLICATE_FILENAME"); - abort(); - } - output_file = stderr_duplicate_stream; -} - -// Most of these I/O functions were inspired by/copied from libFuzzer's code. -static void discard_output(int fd) { - FILE *temp = fopen("/dev/null", "w"); - if (!temp) - abort(); - dup2(fileno(temp), fd); - fclose(temp); -} - -static void close_stdout() { discard_output(STDOUT_FILENO); } - -// Prevent the targeted code from writing to "stderr" but allow sanitizers and -// this driver to do so. -static void dup_and_close_stderr() { - int output_fileno = fileno(output_file); - int output_fd = dup(output_fileno); - if (output_fd <= 0) - abort(); - FILE *new_output_file = fdopen(output_fd, "w"); - if (!new_output_file) - abort(); - if (!__sanitizer_set_report_fd) - return; - __sanitizer_set_report_fd(reinterpret_cast(output_fd)); - discard_output(output_fileno); -} - -// Close stdout and/or stderr if user asks for it. -static void maybe_close_fd_mask() { - char *fd_mask_str = getenv("AFL_DRIVER_CLOSE_FD_MASK"); - if (!fd_mask_str) - return; - int fd_mask = atoi(fd_mask_str); - if (fd_mask & 2) - dup_and_close_stderr(); - if (fd_mask & 1) - close_stdout(); -} - -// Define LLVMFuzzerMutate to avoid link failures for targets that use it -// with libFuzzer's LLVMFuzzerCustomMutator. -size_t LLVMFuzzerMutate(uint8_t *Data, size_t Size, size_t MaxSize) { - assert(false && "LLVMFuzzerMutate should not be called from afl_driver"); - return 0; -} - -int main(int argc, char **argv) { - printf( - "======================= INFO =========================\n" - "This binary is built for AFL-fuzz.\n" - "To run the target function on individual input(s) execute this:\n" - " %s < INPUT_FILE\n" - "To fuzz with afl-fuzz execute this:\n" - " afl-fuzz [afl-flags] %s [-N]\n" - "afl-fuzz will run N iterations before " - "re-spawning the process (default: 1000)\n" - "======================================================\n", - argv[0], argv[0]); - - maybe_duplicate_stderr(); - maybe_close_fd_mask(); - if (LLVMFuzzerInitialize) - LLVMFuzzerInitialize(&argc, &argv); - // Do any other expensive one-time initialization here. - - int N = 100000; - if (argc == 2 && argv[1][0] == '-') - N = atoi(argv[1] + 1); - else if(argc == 2 && (N = atoi(argv[1])) > 0) - printf("WARNING: using the deprecated call style `%s %d`\n", argv[0], N); - - assert(N > 0); - - if (!getenv("AFL_DRIVER_DONT_DEFER")) - __afl_manual_init(); - - // Call LLVMFuzzerTestOneInput here so that coverage caused by initialization - // on the first execution of LLVMFuzzerTestOneInput is ignored. - uint8_t dummy_input[1] = {0}; - LLVMFuzzerTestOneInput(dummy_input, 1); - - while (__afl_persistent_loop(N)) { - ssize_t n_read = read(0, AflInputBuf, kMaxAflInputSize); - if (n_read > 0) { - LLVMFuzzerTestOneInput(AflInputBuf, n_read); - } - } - - printf("%s: successfully executed input(s)\n", argv[0]); -} diff --git a/examples/aflpp_driver/aflpp_driver.c b/examples/aflpp_driver/aflpp_driver.c index 8e0b554a..b764338e 100644 --- a/examples/aflpp_driver/aflpp_driver.c +++ b/examples/aflpp_driver/aflpp_driver.c @@ -106,7 +106,10 @@ If 1, close stdout at startup. If 2 close stderr; if 3 close both. #error "Support for your platform has not been implemented" #endif -int __afl_sharedmem_fuzzing = 0; +int __afl_sharedmem_fuzzing = 1; +extern unsigned int * __afl_fuzz_len; +extern unsigned char *__afl_fuzz_ptr; +// extern struct cmp_map *__afl_cmp_map; // libFuzzer interface is thin, so we don't include any libFuzzer headers. int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size); @@ -272,7 +275,6 @@ int main(int argc, char **argv) { // Do any other expensive one-time initialization here. uint8_t dummy_input[64] = {0}; - uint8_t buf[1024000]; memcpy(dummy_input, (void *)AFL_PERSISTENT, sizeof(AFL_PERSISTENT)); memcpy(dummy_input + 32, (void *)AFL_DEFER_FORKSVR, sizeof(AFL_DEFER_FORKSVR)); @@ -283,24 +285,16 @@ int main(int argc, char **argv) { printf("WARNING: using the deprecated call style `%s %d`\n", argv[0], N); else if (argc > 1) { - if (!getenv("AFL_DISABLE_LLVM_INSTRUMENTATION")) { - - __afl_manual_init(); - - } - + __afl_sharedmem_fuzzing = 0; + __afl_manual_init(); return ExecuteFilesOnyByOne(argc, argv); } assert(N > 0); - if (!getenv("AFL_DISABLE_LLVM_INSTRUMENTATION")) { - - fprintf(stderr, "performing manual init\n"); - __afl_manual_init(); - - } + // if (!getenv("AFL_DRIVER_DONT_DEFER")) + __afl_manual_init(); // Call LLVMFuzzerTestOneInput here so that coverage caused by initialization // on the first execution of LLVMFuzzerTestOneInput is ignored. @@ -309,13 +303,25 @@ int main(int argc, char **argv) { int num_runs = 0; while (__afl_persistent_loop(N)) { - ssize_t r = read(0, buf, sizeof(buf)); +#ifdef _DEBUG + fprintf(stderr, "CLIENT crc: %016llx len: %u\n", + hash64(__afl_fuzz_ptr, *__afl_fuzz_len, 0xa5b35705), + *__afl_fuzz_len); + fprintf(stderr, "RECV:"); + for (int i = 0; i < *__afl_fuzz_len; i++) + fprintf(stderr, "%02x", __afl_fuzz_ptr[i]); + fprintf(stderr, "\n"); +#endif + if (*__afl_fuzz_len) { + + num_runs++; + LLVMFuzzerTestOneInput(__afl_fuzz_ptr, *__afl_fuzz_len); - if (r > 0) { LLVMFuzzerTestOneInput(buf, r); } + } } - printf("%s: successfully executed input(s)\n", argv[0]); + printf("%s: successfully executed %d input(s)\n", argv[0], num_runs); } diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h index ad7b0cd6..ca7d10fe 100644 --- a/include/afl-fuzz.h +++ b/include/afl-fuzz.h @@ -133,10 +133,8 @@ extern s32 struct queue_entry { - u8 * fname; /* File name for the test case */ - u8 * fname_taint; /* File name for taint data */ - u32 len; /* Input length */ - struct queue_entry *prev; /* previous queue entry, if any */ + u8 *fname; /* File name for the test case */ + u32 len; /* Input length */ u8 cal_failed, /* Calibration failed? */ trim_done, /* Trimmed? */ @@ -150,10 +148,7 @@ struct queue_entry { is_ascii; /* Is the input just ascii text? */ u32 bitmap_size, /* Number of bits set in bitmap */ - fuzz_level, /* Number of fuzzing iterations */ - taint_bytes_all, /* Number of tainted bytes */ - taint_bytes_new, /* Number of new tainted bytes */ - taint_bytes_highest; /* highest offset in input */ + fuzz_level; /* Number of fuzzing iterations */ u64 exec_us, /* Execution time (us) */ handicap, /* Number of queue cycles behind */ @@ -385,8 +380,6 @@ typedef struct afl_state { char **argv; /* argv if needed */ - char **argv_taint; /* argv for taint mode */ - /* MOpt: Lots of globals, but mostly for the status UI and other things where it really makes no sense to haul them around as function parameters. */ @@ -438,9 +431,7 @@ typedef struct afl_state { *in_bitmap, /* Input bitmap */ *file_extension, /* File extension */ *orig_cmdline, /* Original command line */ - *infoexec, /* Command to execute on a new crash */ - *taint_input_file, /* fuzz_input_one input file */ - *taint_src, *taint_map; + *infoexec; /* Command to execute on a new crash */ u32 hang_tmout; /* Timeout used for hang det (ms) */ @@ -451,9 +442,7 @@ typedef struct afl_state { custom_only, /* Custom mutator only mode */ python_only, /* Python-only mode */ is_main_node, /* if this is the main node */ - is_secondary_node, /* if this is a secondary instance */ - taint_needs_splode, /* explode fuzz input */ - taint_mode; + is_secondary_node; /* if this is a secondary instance */ u32 stats_update_freq; /* Stats update frequency (execs) */ @@ -514,8 +503,7 @@ typedef struct afl_state { useless_at_start, /* Number of useless starting paths */ var_byte_count, /* Bitmap bytes with var behavior */ current_entry, /* Current queue entry ID */ - havoc_div, /* Cycle count divisor for havoc */ - taint_len, taint_count; + havoc_div; /* Cycle count divisor for havoc */ u64 total_crashes, /* Total number of crashes */ unique_crashes, /* Crashes with unique signatures */ @@ -602,9 +590,6 @@ typedef struct afl_state { char * cmplog_binary; afl_forkserver_t cmplog_fsrv; /* cmplog has its own little forkserver */ - /* Taint mode */ - afl_forkserver_t taint_fsrv; /* taint mode has its own little forkserver */ - /* Custom mutators */ struct custom_mutator *mutator; @@ -856,8 +841,7 @@ struct custom_mutator { }; -void afl_state_init_1(afl_state_t *, uint32_t map_size); -void afl_state_init_2(afl_state_t *, uint32_t map_size); +void afl_state_init(afl_state_t *, uint32_t map_size); void afl_state_deinit(afl_state_t *); /* Set stop_soon flag on all childs, kill all childs */ @@ -903,7 +887,7 @@ void deinit_py(void *); void mark_as_det_done(afl_state_t *, struct queue_entry *); void mark_as_variable(afl_state_t *, struct queue_entry *); void mark_as_redundant(afl_state_t *, struct queue_entry *, u8); -void add_to_queue(afl_state_t *, u8 *, u8 *, u32, struct queue_entry *, u8); +void add_to_queue(afl_state_t *, u8 *, u32, u8); void destroy_queue(afl_state_t *); void update_bitmap_score(afl_state_t *, struct queue_entry *); void cull_queue(afl_state_t *); @@ -913,9 +897,7 @@ u32 calculate_score(afl_state_t *, struct queue_entry *); void write_bitmap(afl_state_t *); u32 count_bits(afl_state_t *, u8 *); -u32 count_bits_len(afl_state_t *, u8 *, u32); u32 count_bytes(afl_state_t *, u8 *); -u32 count_bytes_len(afl_state_t *, u8 *, u32); u32 count_non_255_bytes(afl_state_t *, u8 *); #ifdef WORD_SIZE_64 void simplify_trace(afl_state_t *, u64 *); @@ -993,8 +975,6 @@ void check_if_tty(afl_state_t *); void setup_signal_handlers(void); void save_cmdline(afl_state_t *, u32, char **); void read_foreign_testcases(afl_state_t *, int); -void perform_taint_run(afl_state_t *afl, struct queue_entry *q, u8 *fname, - u8 *mem, u32 len); /* CmpLog */ diff --git a/include/common.h b/include/common.h index c7d57e07..87a7425b 100644 --- a/include/common.h +++ b/include/common.h @@ -55,7 +55,6 @@ extern u8 *doc_path; /* path to documentation dir */ @returns the path, allocating the string */ u8 *find_binary(u8 *fname); -u8 *find_afl_binary(u8 *fname, u8 *own_loc); /* Read a bitmap from file fname to memory This is for the -B option again. */ diff --git a/include/envs.h b/include/envs.h index bd97b9cd..96ae91ba 100644 --- a/include/envs.h +++ b/include/envs.h @@ -123,7 +123,6 @@ static char *afl_environment_variables[] = { "AFL_SKIP_BIN_CHECK", "AFL_SKIP_CPUFREQ", "AFL_SKIP_CRASHES", - "AFL_TAINT_INPUT", "AFL_TMIN_EXACT", "AFL_TMPDIR", "AFL_TOKEN_FILE", diff --git a/include/forkserver.h b/include/forkserver.h index 59a9f150..0a7390ed 100644 --- a/include/forkserver.h +++ b/include/forkserver.h @@ -80,8 +80,6 @@ typedef struct afl_forkserver { u8 qemu_mode; /* if running in qemu mode or not */ - u8 taint_mode; /* if running taint analysis or not */ - u32 *shmem_fuzz_len; /* length of the fuzzing test case */ u8 *shmem_fuzz; /* allocated memory for fuzzing */ diff --git a/qemu_taint/README.md b/qemu_taint/README.md deleted file mode 100644 index 6a7d19af..00000000 --- a/qemu_taint/README.md +++ /dev/null @@ -1,42 +0,0 @@ -# qemu_taint - -First level taint implementation with qemu for linux user mode - -**THIS IS NOT WORKING YET** **WIP** - -## What is this for - -On new queue entries (newly discovered paths into the target) this tainter -is run with the new input and the data gathered which bytes in the input -file are actually touched. - -Only touched bytes are then fuzzed by afl-fuzz - -## How to build - -./build_qemu_taint.sh - -## How to use - -Add the -A flag to afl-fuzz - -## Caveats - -For some targets this is amazing and improves fuzzing a lot, but if a target -copies all input bytes first (e.g. for creating a crc checksum or just to -safely work with the data), then this is not helping at all. - -## Future - -Two fuzz modes for a queue entry which will be switched back and forth: - - 1. fuzz all touched bytes - 2. fuzz only bytes that are newly touched (compared to the one this queue - entry is based on) - -## TODO - - * Direct trim: trim to highest touched byte, that is all we need to do - * add 5-25% dummy bytes to the queue entries? (maybe create a 2nd one?) - * Disable trim? - diff --git a/qemu_taint/build_qemu_taint.sh b/qemu_taint/build_qemu_taint.sh deleted file mode 100755 index b54c3e04..00000000 --- a/qemu_taint/build_qemu_taint.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash -test -d qemu || git clone https://github.com/vanhauser-thc/qemu_taint qemu || exit 1 -cd qemu || exit 1 -test -d .git && { git stash ; git pull ; } -cp -fv ../../include/config.h ../../include/types.h . || exit 1 -./build.sh || exit 1 -cp -fv ./afl-qemu-taint ../.. diff --git a/qemu_taint/clean.sh b/qemu_taint/clean.sh deleted file mode 100755 index 10c44cac..00000000 --- a/qemu_taint/clean.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/sh -rm -f afl-qemu-taint qemu/afl-qemu-taint ../afl-qemu-taint -test -d qemu && { cd qemu ; ./clean.sh ; } diff --git a/src/afl-common.c b/src/afl-common.c index cefed8dc..367dec72 100644 --- a/src/afl-common.c +++ b/src/afl-common.c @@ -138,73 +138,62 @@ void argv_cpy_free(char **argv) { } -u8 *find_afl_binary(u8 *fname, u8 *own_loc) { - - u8 *tmp, *rsl, *own_copy, *cp; - - tmp = getenv("AFL_PATH"); - - if (tmp) { - - cp = alloc_printf("%s/%s", tmp, fname); - - if (access(cp, X_OK)) { FATAL("Unable to find '%s'", tmp); } - - return cp; +/* Rewrite argv for QEMU. */ - } +char **get_qemu_argv(u8 *own_loc, u8 **target_path_p, int argc, char **argv) { - if (own_loc) { + char **new_argv = ck_alloc(sizeof(char *) * (argc + 4)); + u8 * tmp, *cp = NULL, *rsl, *own_copy; - own_copy = ck_strdup(own_loc); - rsl = strrchr(own_copy, '/'); + memcpy(&new_argv[3], &argv[1], (int)(sizeof(char *)) * (argc - 1)); + new_argv[argc - 1] = NULL; - if (rsl) { + new_argv[2] = *target_path_p; + new_argv[1] = "--"; - *rsl = 0; + /* Now we need to actually find the QEMU binary to put in argv[0]. */ - cp = alloc_printf("%s/%s", own_copy, fname); - ck_free(own_copy); + tmp = getenv("AFL_PATH"); - if (!access(cp, X_OK)) { return cp; } + if (tmp) { - } else { + cp = alloc_printf("%s/afl-qemu-trace", tmp); - ck_free(own_copy); + if (access(cp, X_OK)) { FATAL("Unable to find '%s'", tmp); } - } + *target_path_p = new_argv[0] = cp; + return new_argv; } - cp = alloc_printf("%s/%s", BIN_PATH, fname); - if (!access(cp, X_OK)) { return cp; } + own_copy = ck_strdup(own_loc); + rsl = strrchr(own_copy, '/'); - ck_free(cp); + if (rsl) { - return NULL; + *rsl = 0; -} + cp = alloc_printf("%s/afl-qemu-trace", own_copy); + ck_free(own_copy); -/* Rewrite argv for QEMU. */ + if (!access(cp, X_OK)) { -char **get_qemu_argv(u8 *own_loc, u8 **target_path_p, int argc, char **argv) { + *target_path_p = new_argv[0] = cp; + return new_argv; - char **new_argv = ck_alloc(sizeof(char *) * (argc + 4)); - u8 * cp = NULL; + } - memcpy(&new_argv[3], &argv[1], (int)(sizeof(char *)) * (argc - 1)); - new_argv[argc - 1] = NULL; + } else { - new_argv[2] = *target_path_p; - new_argv[1] = "--"; + ck_free(own_copy); - /* Now we need to actually find the QEMU binary to put in argv[0]. */ + } - cp = find_afl_binary("afl-qemu-trace", own_loc); + if (!access(BIN_PATH "/afl-qemu-trace", X_OK)) { - if (cp) { + if (cp) { ck_free(cp); } + *target_path_p = new_argv[0] = ck_strdup(BIN_PATH "/afl-qemu-trace"); - *target_path_p = new_argv[0] = cp; return new_argv; } @@ -236,7 +225,7 @@ char **get_qemu_argv(u8 *own_loc, u8 **target_path_p, int argc, char **argv) { char **get_wine_argv(u8 *own_loc, u8 **target_path_p, int argc, char **argv) { char **new_argv = ck_alloc(sizeof(char *) * (argc + 3)); - u8 * cp = NULL; + u8 * tmp, *cp = NULL, *rsl, *own_copy; memcpy(&new_argv[2], &argv[1], (int)(sizeof(char *)) * (argc - 1)); new_argv[argc - 1] = NULL; @@ -245,16 +234,66 @@ char **get_wine_argv(u8 *own_loc, u8 **target_path_p, int argc, char **argv) { /* Now we need to actually find the QEMU binary to put in argv[0]. */ - cp = find_afl_binary("afl-qemu-trace", own_loc); + tmp = getenv("AFL_PATH"); + + if (tmp) { + + cp = alloc_printf("%s/afl-qemu-trace", tmp); - if (cp) { + if (access(cp, X_OK)) { FATAL("Unable to find '%s'", tmp); } ck_free(cp); - cp = find_afl_binary("afl-wine-trace", own_loc); - if (cp) { + cp = alloc_printf("%s/afl-wine-trace", tmp); - *target_path_p = new_argv[0] = cp; + if (access(cp, X_OK)) { FATAL("Unable to find '%s'", tmp); } + + *target_path_p = new_argv[0] = cp; + return new_argv; + + } + + own_copy = ck_strdup(own_loc); + rsl = strrchr(own_copy, '/'); + + if (rsl) { + + *rsl = 0; + + cp = alloc_printf("%s/afl-qemu-trace", own_copy); + + if (cp && !access(cp, X_OK)) { + + ck_free(cp); + + cp = alloc_printf("%s/afl-wine-trace", own_copy); + + if (!access(cp, X_OK)) { + + *target_path_p = new_argv[0] = cp; + return new_argv; + + } + + } + + ck_free(own_copy); + + } else { + + ck_free(own_copy); + + } + + u8 *ncp = BIN_PATH "/afl-qemu-trace"; + + if (!access(ncp, X_OK)) { + + ncp = BIN_PATH "/afl-wine-trace"; + + if (!access(ncp, X_OK)) { + + *target_path_p = new_argv[0] = ck_strdup(ncp); return new_argv; } @@ -262,21 +301,25 @@ char **get_wine_argv(u8 *own_loc, u8 **target_path_p, int argc, char **argv) { } SAYF("\n" cLRD "[-] " cRST - "Oops, unable to find the afl-qemu-trace and afl-wine-trace binaries.\n" - "The afl-qemu-trace binary must be built separately by following the " - "instructions\n" - "in qemu_mode/README.md. If you already have the binary installed, you " - "may need\n" - "to specify the location via AFL_PATH in the environment.\n\n" + "Oops, unable to find the '%s' binary. The binary must be " + "built\n" + " separately by following the instructions in " + "qemu_mode/README.md. " + "If you\n" + " already have the binary installed, you may need to specify " + "AFL_PATH in the\n" + " environment.\n\n" + " Of course, even without QEMU, afl-fuzz can still work with " "binaries that are\n" " instrumented at compile time with afl-gcc. It is also possible to " "use it as a\n" " traditional non-instrumented fuzzer by specifying '-n' in the " "command " - "line.\n"); + "line.\n", + ncp); - FATAL("Failed to locate 'afl-qemu-trace' and 'afl-wine-trace'."); + FATAL("Failed to locate '%s'.", ncp); } diff --git a/src/afl-forkserver.c b/src/afl-forkserver.c index 173cc70f..25983f26 100644 --- a/src/afl-forkserver.c +++ b/src/afl-forkserver.c @@ -498,21 +498,11 @@ void afl_fsrv_start(afl_forkserver_t *fsrv, char **argv, char pid_buf[16]; sprintf(pid_buf, "%d", fsrv->fsrv_pid); - - if (fsrv->taint_mode) { - - setenv("__AFL_TARGET_PID3", pid_buf, 1); - - } else if (fsrv->cmplog_binary) { - + if (fsrv->cmplog_binary) setenv("__AFL_TARGET_PID2", pid_buf, 1); - - } else { - + else setenv("__AFL_TARGET_PID1", pid_buf, 1); - } - /* Close the unneeded endpoints. */ close(ctl_pipe[0]); @@ -947,7 +937,7 @@ void afl_fsrv_write_to_testcase(afl_forkserver_t *fsrv, u8 *buf, size_t len) { } else { - s32 fd; + s32 fd = fsrv->out_fd; if (fsrv->out_file) { @@ -966,7 +956,6 @@ void afl_fsrv_write_to_testcase(afl_forkserver_t *fsrv, u8 *buf, size_t len) { } else { - fd = fsrv->out_fd; lseek(fd, 0, SEEK_SET); } diff --git a/src/afl-fuzz-bitmap.c b/src/afl-fuzz-bitmap.c index db57061d..1b9df624 100644 --- a/src/afl-fuzz-bitmap.c +++ b/src/afl-fuzz-bitmap.c @@ -177,40 +177,6 @@ u32 count_bits(afl_state_t *afl, u8 *mem) { } -u32 count_bits_len(afl_state_t *afl, u8 *mem, u32 len) { - - u32 *ptr = (u32 *)mem; - u32 i = (len >> 2); - u32 ret = 0; - - (void)(afl); - - if (len % 4) i++; - - while (i--) { - - u32 v = *(ptr++); - - /* This gets called on the inverse, virgin bitmap; optimize for sparse - data. */ - - if (v == 0xffffffff) { - - ret += 32; - continue; - - } - - v -= ((v >> 1) & 0x55555555); - v = (v & 0x33333333) + ((v >> 2) & 0x33333333); - ret += (((v + (v >> 4)) & 0xF0F0F0F) * 0x01010101) >> 24; - - } - - return ret; - -} - /* Count the number of bytes set in the bitmap. Called fairly sporadically, mostly to update the status screen or calibrate and examine confirmed new paths. */ @@ -237,32 +203,6 @@ u32 count_bytes(afl_state_t *afl, u8 *mem) { } -u32 count_bytes_len(afl_state_t *afl, u8 *mem, u32 len) { - - u32 *ptr = (u32 *)mem; - u32 i = (len >> 2); - u32 ret = 0; - - (void)(afl); - - if (len % 4) i++; - - while (i--) { - - u32 v = *(ptr++); - - if (!v) { continue; } - if (v & 0x000000ff) { ++ret; } - if (v & 0x0000ff00) { ++ret; } - if (v & 0x00ff0000) { ++ret; } - if (v & 0xff000000) { ++ret; } - - } - - return ret; - -} - /* Count the number of non-255 bytes set in the bitmap. Used strictly for the status screen, several calls per second or so. */ @@ -655,7 +595,7 @@ save_if_interesting(afl_state_t *afl, void *mem, u32 len, u8 fault) { #endif /* ^!SIMPLE_FILES */ - add_to_queue(afl, queue_fn, mem, len, afl->queue_top, 0); + add_to_queue(afl, queue_fn, len, 0); if (hnb == 2) { diff --git a/src/afl-fuzz-init.c b/src/afl-fuzz-init.c index 359eef85..350a8599 100644 --- a/src/afl-fuzz-init.c +++ b/src/afl-fuzz-init.c @@ -712,7 +712,7 @@ void read_testcases(afl_state_t *afl) { if (!access(dfn, F_OK)) { passed_det = 1; } - add_to_queue(afl, fn2, NULL, st.st_size, NULL, passed_det); + add_to_queue(afl, fn2, st.st_size, passed_det); } @@ -771,13 +771,9 @@ void perform_dry_run(afl_state_t *afl) { close(fd); res = calibrate_case(afl, q, use_mem, 0, 1); + ck_free(use_mem); - if (afl->stop_soon) { - - ck_free(use_mem); - return; - - } + if (afl->stop_soon) { return; } if (res == afl->crash_mode || res == FSRV_RUN_NOBITS) { @@ -964,10 +960,6 @@ void perform_dry_run(afl_state_t *afl) { } - /* perform taint gathering on the input seed */ - if (afl->taint_mode) perform_taint_run(afl, q, q->fname, use_mem, q->len); - ck_free(use_mem); - q = q->next; } @@ -1446,10 +1438,6 @@ static void handle_existing_out_dir(afl_state_t *afl) { u8 *orig_q = alloc_printf("%s/queue", afl->out_dir); - u8 *fnt = alloc_printf("%s/taint", afl->out_dir); - mkdir(fnt, 0755); // ignore errors - ck_free(fnt); - afl->in_dir = alloc_printf("%s/_resume", afl->out_dir); rename(orig_q, afl->in_dir); /* Ignore errors */ @@ -1506,20 +1494,6 @@ static void handle_existing_out_dir(afl_state_t *afl) { if (delete_files(fn, CASE_PREFIX)) { goto dir_cleanup_failed; } ck_free(fn); - if (afl->taint_mode) { - - fn = alloc_printf("%s/taint", afl->out_dir); - mkdir(fn, 0755); // ignore errors - - u8 *fn2 = alloc_printf("%s/taint/.input", afl->out_dir); - unlink(fn2); // ignore errors - ck_free(fn2); - - if (delete_files(fn, CASE_PREFIX)) { goto dir_cleanup_failed; } - ck_free(fn); - - } - /* All right, let's do out_dir>/crashes/id:* and * out_dir>/hangs/id:*. */ @@ -1747,16 +1721,6 @@ void setup_dirs_fds(afl_state_t *afl) { if (mkdir(tmp, 0700)) { PFATAL("Unable to create '%s'", tmp); } ck_free(tmp); - /* Taint directory if taint_mode. */ - - if (afl->taint_mode) { - - tmp = alloc_printf("%s/taint", afl->out_dir); - if (mkdir(tmp, 0700)) { PFATAL("Unable to create '%s'", tmp); } - ck_free(tmp); - - } - /* Top-level directory for queue metadata used for session resume and related tasks. */ diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index 6d52b2b4..57b53c9f 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -458,171 +458,28 @@ u8 fuzz_one_original(afl_state_t *afl) { } - u32 tmp_val = 0; - - if (unlikely(afl->taint_mode)) { - - tmp_val = afl->queue_cycle % 2; // starts with 1 - ret_val = 0; - - if (unlikely(afl->queue_cur->cal_failed && !tmp_val)) goto abandon_entry; - if (unlikely(!afl->skip_deterministic && !afl->queue_cur->passed_det && - !tmp_val)) - goto abandon_entry; - if ((!afl->queue_cur->taint_bytes_new || - afl->queue_cur->taint_bytes_new == afl->queue_cur->len) && - !tmp_val) - goto abandon_entry; - - ret_val = 1; - - s32 dst = 0, i; - temp_len = len = afl->queue_cur->len; - s32 j = 0; // tmp - - fd = open(afl->queue_cur->fname, O_RDONLY); - afl->taint_src = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); - if (fd < 0 || (ssize_t)afl->taint_src == -1) - FATAL("unable to open '%s'", afl->queue_cur->fname); - close(fd); - afl->taint_needs_splode = 1; - - switch (tmp_val) { - - case 1: // fuzz only tainted bytes - - // special case: all or nothing tainted. in this case we act like - // nothing is special. this is not the taint you are looking for ... - if (!afl->queue_cur->taint_bytes_all || - afl->queue_cur->taint_bytes_all == (u32)len) { - - orig_in = in_buf = afl->taint_src; - afl->taint_needs_splode = 0; - break; - - } - - fd = open(afl->taint_input_file, O_RDONLY); - temp_len = len = afl->taint_len = afl->queue_cur->taint_bytes_all; - orig_in = in_buf = - mmap(0, len >= MAX_FILE - 65536 ? MAX_FILE : len + 65536, - PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); - if (fd < 0 || (ssize_t)in_buf == -1) - FATAL("unable to open '%s'", afl->taint_input_file); - close(fd); - - fd = open(afl->queue_cur->fname_taint, O_RDONLY); - afl->taint_map = mmap(0, afl->queue_cur->len, PROT_READ | PROT_WRITE, - MAP_PRIVATE, fd, 0); - if (fd < 0 || (ssize_t)in_buf == -1) - FATAL("unable to open '%s'", afl->queue_cur->fname_taint); - close(fd); - - for (i = 0; i < (s32)afl->queue_cur->len && dst < len; i++) - if (afl->taint_map[i]) in_buf[dst++] = afl->taint_src[i]; - - // FIXME DEBUG TODO XXX - for (i = 0; i < (s32)afl->queue_cur->len; i++) { - - switch (afl->taint_map[i]) { - - case 0x0: - break; - case '!': - j++; - break; - default: - FATAL( - "invalid taint map entry byte 0x%02x at position %d " - "(passed_det:%d)\n", - afl->taint_map[i], i, afl->queue_cur->passed_det); - - } - - } - - if (j != len) - FATAL("different taint values in map vs in queue (%d != %d)", j, len); - - break; - - case 0: // fuzz only newly tainted bytes - - fd = open(afl->taint_input_file, O_RDONLY); - temp_len = len = afl->taint_len = afl->queue_cur->taint_bytes_new; - orig_in = in_buf = - mmap(0, len >= MAX_FILE - 65536 ? MAX_FILE : len + 65536, - PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); - if (fd < 0 || (ssize_t)in_buf == -1) - FATAL("unable to open '%s'", afl->taint_input_file); - close(fd); - - u8 *fn = alloc_printf("%s.new", afl->queue_cur->fname_taint); - if (!fn) FATAL("OOM"); - fd = open(fn, O_RDWR); - afl->taint_map = mmap(0, afl->queue_cur->len, PROT_READ | PROT_WRITE, - MAP_PRIVATE, fd, 0); - if (fd < 0 || (ssize_t)in_buf == -1) - FATAL("unable to open '%s' for %u bytes", fn, len); - close(fd); - ck_free(fn); - - for (i = 0; i < (s32)afl->queue_cur->len && dst < len; i++) - if (afl->taint_map[i]) in_buf[dst++] = afl->taint_src[i]; - - // FIXME DEBUG TODO XXX - for (i = 0; i < (s32)afl->queue_cur->len; i++) { - - switch (afl->taint_map[i]) { - - case 0x0: - break; - case '!': - j++; - break; - default: - FATAL( - "invalid taint map entry byte 0x%02x at position %d " - "(passed_det:%d)\n", - afl->taint_map[i], i, afl->queue_cur->passed_det); - - } - - } - - if (j != len) - FATAL("different taint values in map vs in queue (%d != %d)", j, len); - - break; - - } - - } else { - - /* Map the test case into memory. */ - - fd = open(afl->queue_cur->fname, O_RDONLY); + /* Map the test case into memory. */ - if (unlikely(fd < 0)) { + fd = open(afl->queue_cur->fname, O_RDONLY); - PFATAL("Unable to open '%s'", afl->queue_cur->fname); + if (unlikely(fd < 0)) { - } + PFATAL("Unable to open '%s'", afl->queue_cur->fname); - len = afl->queue_cur->len; + } - orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); + len = afl->queue_cur->len; - if (unlikely(orig_in == MAP_FAILED)) { + orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); - PFATAL("Unable to mmap '%s' with len %d", afl->queue_cur->fname, len); + if (unlikely(orig_in == MAP_FAILED)) { - } - - close(fd); + PFATAL("Unable to mmap '%s' with len %d", afl->queue_cur->fname, len); } + close(fd); + /* We could mmap() out_buf as MAP_PRIVATE, but we end up clobbering every single byte anyway, so it wouldn't give us any performance or memory usage benefits. */ @@ -645,12 +502,8 @@ u8 fuzz_one_original(afl_state_t *afl) { afl->queue_cur->exec_cksum = 0; - if (unlikely(afl->taint_needs_splode)) - res = calibrate_case(afl, afl->queue_cur, afl->taint_src, - afl->queue_cycle - 1, 0); - else - res = calibrate_case(afl, afl->queue_cur, in_buf, afl->queue_cycle - 1, - 0); + res = + calibrate_case(afl, afl->queue_cur, in_buf, afl->queue_cycle - 1, 0); if (unlikely(res == FSRV_RUN_ERROR)) { @@ -673,8 +526,8 @@ u8 fuzz_one_original(afl_state_t *afl) { * TRIMMING * ************/ - if (unlikely(!afl->non_instrumented_mode && !afl->queue_cur->trim_done && - !afl->disable_trim && !afl->taint_needs_splode)) { + if (!afl->non_instrumented_mode && !afl->queue_cur->trim_done && + !afl->disable_trim) { u8 res = trim_case(afl, afl->queue_cur, in_buf); @@ -711,26 +564,13 @@ u8 fuzz_one_original(afl_state_t *afl) { if (afl->shm.cmplog_mode && !afl->queue_cur->fully_colorized) { - int res; - if (unlikely(afl->taint_needs_splode)) { - - len = afl->queue_cur->len; - memcpy(out_buf, afl->taint_src, len); - res = input_to_state_stage(afl, afl->taint_src, out_buf, len, - afl->queue_cur->exec_cksum); - // just abandon as success - ret_val = 0; - res = 1; - - } else { + if (input_to_state_stage(afl, in_buf, out_buf, len, + afl->queue_cur->exec_cksum)) { - res = input_to_state_stage(afl, in_buf, out_buf, len, - afl->queue_cur->exec_cksum); + goto abandon_entry; } - if (unlikely(res)) { goto abandon_entry; } - } /* Skip right away if -d is given, if it has not been chosen sufficiently @@ -2293,18 +2133,8 @@ havoc_stage: if (actually_clone) { - if (unlikely(afl->taint_needs_splode)) { - - clone_len = choose_block_len(afl, afl->queue_cur->len); - clone_from = - rand_below(afl, afl->queue_cur->len - clone_len + 1); - - } else { - - clone_len = choose_block_len(afl, temp_len); - clone_from = rand_below(afl, temp_len - clone_len + 1); - - } + clone_len = choose_block_len(afl, temp_len); + clone_from = rand_below(afl, temp_len - clone_len + 1); } else { @@ -2326,11 +2156,7 @@ havoc_stage: if (actually_clone) { - if (unlikely(afl->taint_needs_splode)) - memcpy(new_buf + clone_to, afl->taint_src + clone_from, - clone_len); - else - memcpy(new_buf + clone_to, out_buf + clone_from, clone_len); + memcpy(new_buf + clone_to, out_buf + clone_from, clone_len); } else { @@ -2342,7 +2168,7 @@ havoc_stage: } /* Tail */ - memmove(new_buf + clone_to + clone_len, out_buf + clone_to, + memcpy(new_buf + clone_to + clone_len, out_buf + clone_to, temp_len - clone_to); swap_bufs(BUF_PARAMS(out), BUF_PARAMS(out_scratch)); @@ -2363,49 +2189,16 @@ havoc_stage: if (temp_len < 2) { break; } - if (unlikely(afl->taint_needs_splode)) { - - copy_len = choose_block_len(afl, afl->queue_cur->len - 1); - copy_from = rand_below(afl, afl->queue_cur->len - copy_len + 1); - copy_to = rand_below(afl, temp_len + 1); - - } else { + copy_len = choose_block_len(afl, temp_len - 1); - copy_len = choose_block_len(afl, temp_len - 1); - copy_from = rand_below(afl, temp_len - copy_len + 1); - copy_to = rand_below(afl, temp_len - copy_len + 1); - - } + copy_from = rand_below(afl, temp_len - copy_len + 1); + copy_to = rand_below(afl, temp_len - copy_len + 1); if (rand_below(afl, 4)) { if (copy_from != copy_to) { - if (unlikely(afl->taint_needs_splode)) { - - if (temp_len >= (s32)(copy_to + copy_len)) { - - memcpy(out_buf + copy_to, afl->taint_src + copy_from, - copy_len); - - } else { - - u8 *new_buf = ck_maybe_grow(BUF_PARAMS(out_scratch), - copy_to + copy_len); - memcpy(new_buf, in_buf, copy_to); - memcpy(new_buf + copy_to, afl->taint_src + copy_from, - copy_len); - swap_bufs(BUF_PARAMS(out), BUF_PARAMS(out_scratch)); - out_buf = new_buf; - temp_len = copy_to + copy_len; - - } - - } else { - - memmove(out_buf + copy_to, out_buf + copy_from, copy_len); - - } + memmove(out_buf + copy_to, out_buf + copy_from, copy_len); } @@ -2671,17 +2464,10 @@ havoc_stage: splices them together at some offset, then relies on the havoc code to mutate that blob. */ - u32 saved_len; - - if (unlikely(afl->taint_needs_splode)) - saved_len = afl->taint_len; - else - saved_len = afl->queue_cur->len; - retry_splicing: if (afl->use_splicing && splice_cycle++ < SPLICE_CYCLES && - afl->queued_paths > 1 && saved_len > 1) { + afl->queued_paths > 1 && afl->queue_cur->len > 1) { struct queue_entry *target; u32 tid, split_at; @@ -2694,7 +2480,7 @@ retry_splicing: if (in_buf != orig_in) { in_buf = orig_in; - len = saved_len; + len = afl->queue_cur->len; } @@ -2765,8 +2551,6 @@ retry_splicing: ret_val = 0; - goto abandon_entry; - /* we are through with this queue entry - for this iteration */ abandon_entry: @@ -2786,17 +2570,7 @@ abandon_entry: ++afl->queue_cur->fuzz_level; - if (unlikely(afl->taint_needs_splode)) { - - munmap(afl->taint_src, afl->queue_cur->len); - munmap(orig_in, afl->taint_len); - munmap(afl->taint_map, afl->queue_cur->len); - - } else { - - munmap(orig_in, afl->queue_cur->len); - - } + munmap(orig_in, afl->queue_cur->len); return ret_val; diff --git a/src/afl-fuzz-queue.c b/src/afl-fuzz-queue.c index 43794018..f35df914 100644 --- a/src/afl-fuzz-queue.c +++ b/src/afl-fuzz-queue.c @@ -103,169 +103,6 @@ void mark_as_redundant(afl_state_t *afl, struct queue_entry *q, u8 state) { } -void perform_taint_run(afl_state_t *afl, struct queue_entry *q, u8 *fname, - u8 *mem, u32 len) { - - u8 * ptr, *fn = fname; - u32 bytes = 0, plen = len; - struct queue_entry *prev = q->prev; - - if (plen % 4) plen = plen + 4 - (len % 4); - - if ((ptr = strrchr(fname, '/')) != NULL) fn = ptr + 1; - q->fname_taint = alloc_printf("%s/taint/%s", afl->out_dir, fn); - - if (q->fname_taint) { - - u8 *save = ck_maybe_grow(BUF_PARAMS(out_scratch), afl->fsrv.map_size); - memcpy(save, afl->taint_fsrv.trace_bits, afl->fsrv.map_size); - - afl->taint_fsrv.map_size = plen; // speed :) - write_to_testcase(afl, mem, len); - if (afl_fsrv_run_target(&afl->taint_fsrv, afl->fsrv.exec_tmout * 4, - &afl->stop_soon) == 0) { - - bytes = q->taint_bytes_all = - count_bytes_len(afl, afl->taint_fsrv.trace_bits, plen); - if (afl->debug) - fprintf(stderr, "Debug: tainted %u out of %u bytes\n", bytes, len); - - /* DEBUG FIXME TODO XXX */ - u32 i; - for (i = 0; i < len; i++) { - - if (afl->taint_fsrv.trace_bits[i] && - afl->taint_fsrv.trace_bits[i] != '!') - FATAL("invalid taint map value %02x at pos %d", - afl->taint_fsrv.trace_bits[i], i); - - } - - if (len < plen) - for (i = len; i < plen; i++) { - - if (afl->taint_fsrv.trace_bits[i]) - FATAL("invalid taint map value %02x in padding at pos %d", - afl->taint_fsrv.trace_bits[i], i); - - } - - } - - // if all is tainted we do not need to write taint data away - if (bytes && bytes < len) { - - // save the bytes away - int w = open(q->fname_taint, O_CREAT | O_WRONLY, 0644); - if (w >= 0) { - - ck_write(w, afl->taint_fsrv.trace_bits, len, q->fname_taint); - close(w); - - // find the highest tainted offset in the input (for trim opt) - s32 i = len; - while (i > 0 && !afl->taint_fsrv.trace_bits[i - 1]) - i--; - q->taint_bytes_highest = i; - - afl->taint_count++; - - } else { - - FATAL("could not create %s", q->fname_taint); - q->taint_bytes_all = bytes = 0; - - } - - // it is possible that there is no main taint file - if the whole file - // is tainted - but a .new taint file if it had new tainted bytes - - // check if there is a previous queue entry and if it had taint - if (bytes && prev && prev->taint_bytes_all && - prev->taint_bytes_all < prev->len) { - - // check if there are new bytes in the taint vs the previous - int r = open(prev->fname_taint, O_RDONLY); - - if (r >= 0) { - - u8 *bufr = mmap(0, prev->len, PROT_READ, MAP_PRIVATE, r, 0); - - if ((ssize_t)bufr != -1) { - - u32 i; - u8 *tmp = ck_maybe_grow(BUF_PARAMS(in_scratch), plen); - memset(tmp, 0, plen); - - for (i = 0; i < len; i++) - if (afl->taint_fsrv.trace_bits[i] && (i >= prev->len || !bufr[i])) - tmp[i] = '!'; - - q->taint_bytes_new = count_bytes_len(afl, tmp, plen); - - if (afl->debug) - fprintf(stderr, "Debug: %u new taint out of %u bytes\n", bytes, - len); - - if (q->taint_bytes_new) { - - u8 *fnw = alloc_printf("%s.new", q->fname_taint); - if (fnw) { - - int w = open(fnw, O_CREAT | O_WRONLY, 0644); - if (w >= 0) { - - ck_write(w, tmp, plen, fnw); - close(w); - - } else { - - FATAL("count not create '%s'", fnw); - q->taint_bytes_new = 0; - - } - - ck_free(fnw); - - } else { - - q->taint_bytes_new = 0; - - } - - } - - munmap(bufr, prev->len); - - } - - close(r); - - } - - } - - } - - memcpy(afl->taint_fsrv.trace_bits, save, afl->fsrv.map_size); - - } - - if (!bytes) { - - q->taint_bytes_highest = q->taint_bytes_all = q->taint_bytes_new = 0; - - if (q->fname_taint) { - - ck_free(q->fname_taint); - q->fname_taint = NULL; - - } - - } - -} - /* check if ascii or UTF-8 */ static u8 check_if_text(struct queue_entry *q) { @@ -375,12 +212,10 @@ static u8 check_if_text(struct queue_entry *q) { /* Append new test case to the queue. */ -void add_to_queue(afl_state_t *afl, u8 *fname, u8 *mem, u32 len, - struct queue_entry *prev_q, u8 passed_det) { +void add_to_queue(afl_state_t *afl, u8 *fname, u32 len, u8 passed_det) { struct queue_entry *q = ck_alloc(sizeof(struct queue_entry)); - q->prev = prev_q; q->fname = fname; q->len = len; q->depth = afl->cur_depth + 1; @@ -419,13 +254,6 @@ void add_to_queue(afl_state_t *afl, u8 *fname, u8 *mem, u32 len, afl->last_path_time = get_cur_time(); - /* trigger the tain gathering if this is not a dry run */ - if (afl->taint_mode && mem) { perform_taint_run(afl, q, fname, mem, len); } - - /* only redqueen currently uses is_ascii */ - if (afl->shm.cmplog_mode) q->is_ascii = check_if_text(q); - - /* run custom mutators afl_custom_queue_new_entry() */ if (afl->custom_mutators_count) { LIST_FOREACH(&afl->custom_mutator_list, struct custom_mutator, { @@ -445,6 +273,9 @@ void add_to_queue(afl_state_t *afl, u8 *fname, u8 *mem, u32 len, } + /* only redqueen currently uses is_ascii */ + if (afl->shm.cmplog_mode) q->is_ascii = check_if_text(q); + } /* Destroy the entire queue. */ diff --git a/src/afl-fuzz-run.c b/src/afl-fuzz-run.c index 5381723d..d3f823c9 100644 --- a/src/afl-fuzz-run.c +++ b/src/afl-fuzz-run.c @@ -350,9 +350,7 @@ u8 calibrate_case(afl_state_t *afl, struct queue_entry *q, u8 *use_mem, } - if (unlikely(afl->taint_mode)) - q->exec_cksum = 0; - else if (q->exec_cksum) { + if (q->exec_cksum) { memcpy(afl->first_trace, afl->fsrv.trace_bits, afl->fsrv.map_size); hnb = has_new_bits(afl, afl->virgin_bits); @@ -755,65 +753,56 @@ u8 trim_case(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) { while (remove_pos < q->len) { u32 trim_avail = MIN(remove_len, q->len - remove_pos); + u64 cksum; - if (likely((!q->taint_bytes_highest) || - (q->len - trim_avail > q->taint_bytes_highest))) { + write_with_gap(afl, in_buf, q->len, remove_pos, trim_avail); - u64 cksum; + fault = fuzz_run_target(afl, &afl->fsrv, afl->fsrv.exec_tmout); + ++afl->trim_execs; - write_with_gap(afl, in_buf, q->len, remove_pos, trim_avail); + if (afl->stop_soon || fault == FSRV_RUN_ERROR) { goto abort_trimming; } - fault = fuzz_run_target(afl, &afl->fsrv, afl->fsrv.exec_tmout); - ++afl->trim_execs; - - if (afl->stop_soon || fault == FSRV_RUN_ERROR) { goto abort_trimming; } - - /* Note that we don't keep track of crashes or hangs here; maybe TODO? - */ - - cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST); - - /* If the deletion had no impact on the trace, make it permanent. This - isn't perfect for variable-path inputs, but we're just making a - best-effort pass, so it's not a big deal if we end up with false - negatives every now and then. */ - - if (cksum == q->exec_cksum) { + /* Note that we don't keep track of crashes or hangs here; maybe TODO? + */ - u32 move_tail = q->len - remove_pos - trim_avail; + cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST); - q->len -= trim_avail; - len_p2 = next_pow2(q->len); + /* If the deletion had no impact on the trace, make it permanent. This + isn't perfect for variable-path inputs, but we're just making a + best-effort pass, so it's not a big deal if we end up with false + negatives every now and then. */ - memmove(in_buf + remove_pos, in_buf + remove_pos + trim_avail, - move_tail); + if (cksum == q->exec_cksum) { - /* Let's save a clean trace, which will be needed by - update_bitmap_score once we're done with the trimming stuff. */ + u32 move_tail = q->len - remove_pos - trim_avail; - if (!needs_write) { + q->len -= trim_avail; + len_p2 = next_pow2(q->len); - needs_write = 1; - memcpy(afl->clean_trace, afl->fsrv.trace_bits, afl->fsrv.map_size); + memmove(in_buf + remove_pos, in_buf + remove_pos + trim_avail, + move_tail); - } + /* Let's save a clean trace, which will be needed by + update_bitmap_score once we're done with the trimming stuff. */ - } else { + if (!needs_write) { - remove_pos += remove_len; + needs_write = 1; + memcpy(afl->clean_trace, afl->fsrv.trace_bits, afl->fsrv.map_size); } - /* Since this can be slow, update the screen every now and then. */ - if (!(trim_exec++ % afl->stats_update_freq)) { show_stats(afl); } - ++afl->stage_cur; - } else { remove_pos += remove_len; } + /* Since this can be slow, update the screen every now and then. */ + + if (!(trim_exec++ % afl->stats_update_freq)) { show_stats(afl); } + ++afl->stage_cur; + } remove_len >>= 1; @@ -866,8 +855,6 @@ abort_trimming: } -#define BUF_PARAMS(name) (void **)&afl->name##_buf, &afl->name##_size - /* Write a modified test case, run program, process results. Handle error conditions, returning 1 if it's time to bail out. This is a helper function for fuzz_one(). */ @@ -877,32 +864,6 @@ common_fuzz_stuff(afl_state_t *afl, u8 *out_buf, u32 len) { u8 fault; - if (unlikely(afl->taint_needs_splode)) { - - s32 new_len = afl->queue_cur->len + len - afl->taint_len; - if (new_len < 4) - new_len = 4; - else if (new_len > MAX_FILE) - new_len = MAX_FILE; - u8 *new_buf = ck_maybe_grow(BUF_PARAMS(out_scratch), new_len); - - u32 i, taint = 0; - for (i = 0; i < (u32)new_len; i++) { - - if (i >= afl->taint_len || i >= afl->queue_cur->len || afl->taint_map[i]) - new_buf[i] = out_buf[taint++]; - else - new_buf[i] = afl->taint_src[i]; - - } - - swap_bufs(BUF_PARAMS(out), BUF_PARAMS(out_scratch)); - - out_buf = new_buf; - len = new_len; - - } - write_to_testcase(afl, out_buf, len); fault = fuzz_run_target(afl, &afl->fsrv, afl->fsrv.exec_tmout); @@ -950,5 +911,3 @@ common_fuzz_stuff(afl_state_t *afl, u8 *out_buf, u32 len) { } -#undef BUF_PARAMS - diff --git a/src/afl-fuzz-state.c b/src/afl-fuzz-state.c index a8416eb1..d4de91a4 100644 --- a/src/afl-fuzz-state.c +++ b/src/afl-fuzz-state.c @@ -75,7 +75,7 @@ static list_t afl_states = {.element_prealloc_count = 0}; /* Initializes an afl_state_t. */ -void afl_state_init_1(afl_state_t *afl, uint32_t map_size) { +void afl_state_init(afl_state_t *afl, uint32_t map_size) { /* thanks to this memset, growing vars like out_buf and out_size are NULL/0 by default. */ @@ -100,6 +100,16 @@ void afl_state_init_1(afl_state_t *afl, uint32_t map_size) { afl->cpu_aff = -1; /* Selected CPU core */ #endif /* HAVE_AFFINITY */ + afl->virgin_bits = ck_alloc(map_size); + afl->virgin_tmout = ck_alloc(map_size); + afl->virgin_crash = ck_alloc(map_size); + afl->var_bytes = ck_alloc(map_size); + afl->top_rated = ck_alloc(map_size * sizeof(void *)); + afl->clean_trace = ck_alloc(map_size); + afl->clean_trace_custom = ck_alloc(map_size); + afl->first_trace = ck_alloc(map_size); + afl->map_tmp_buf = ck_alloc(map_size); + afl->fsrv.use_stdin = 1; afl->fsrv.map_size = map_size; // afl_state_t is not available in forkserver.c @@ -151,24 +161,6 @@ void afl_state_init_1(afl_state_t *afl, uint32_t map_size) { } -void afl_state_init_2(afl_state_t *afl, uint32_t map_size) { - - afl->shm.map_size = map_size ? map_size : MAP_SIZE; - - afl->virgin_bits = ck_alloc(map_size); - afl->virgin_tmout = ck_alloc(map_size); - afl->virgin_crash = ck_alloc(map_size); - afl->var_bytes = ck_alloc(map_size); - afl->top_rated = ck_alloc(map_size * sizeof(void *)); - afl->clean_trace = ck_alloc(map_size); - afl->clean_trace_custom = ck_alloc(map_size); - afl->first_trace = ck_alloc(map_size); - afl->map_tmp_buf = ck_alloc(map_size); - - afl->fsrv.map_size = map_size; - -} - /*This sets up the environment variables for afl-fuzz into the afl_state * struct*/ diff --git a/src/afl-fuzz-stats.c b/src/afl-fuzz-stats.c index 0cc06e12..aeb290bd 100644 --- a/src/afl-fuzz-stats.c +++ b/src/afl-fuzz-stats.c @@ -116,7 +116,6 @@ void write_stats_file(afl_state_t *afl, double bitmap_cvg, double stability, "edges_found : %u\n" "var_byte_count : %u\n" "havoc_expansion : %u\n" - "tainted_inputs : %u\n" "afl_banner : %s\n" "afl_version : " VERSION "\n" @@ -150,8 +149,8 @@ void write_stats_file(afl_state_t *afl, double bitmap_cvg, double stability, #else -1, #endif - t_bytes, afl->var_byte_count, afl->expand_havoc, afl->taint_count, - afl->use_banner, afl->unicorn_mode ? "unicorn" : "", + t_bytes, afl->var_byte_count, afl->expand_havoc, afl->use_banner, + afl->unicorn_mode ? "unicorn" : "", afl->fsrv.qemu_mode ? "qemu " : "", afl->non_instrumented_mode ? " non_instrumented " : "", afl->no_forkserver ? "no_fsrv " : "", afl->crash_mode ? "crash " : "", diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c index 106aa550..5dd092f2 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -53,9 +53,6 @@ static void at_exit() { ptr = getenv("__AFL_TARGET_PID2"); if (ptr && *ptr && (i = atoi(ptr)) > 0) kill(i, SIGKILL); - ptr = getenv("__AFL_TARGET_PID3"); - if (ptr && *ptr && (i = atoi(ptr)) > 0) kill(i, SIGKILL); - i = 0; while (list[i] != NULL) { @@ -92,8 +89,6 @@ static void usage(u8 *argv0, int more_help) { " -o dir - output directory for fuzzer findings\n\n" "Execution control settings:\n" - " -A - use first level taint analysis (see " - "qemu_taint/README.md)\n" " -p schedule - power schedules compute a seed's performance score. " "debug = 1; } map_size = get_map_size(); - afl_state_init_1(afl, map_size); + afl_state_init(afl, map_size); afl->debug = debug; afl_fsrv_init(&afl->fsrv); @@ -283,15 +277,10 @@ int main(int argc, char **argv_orig, char **envp) { while ((opt = getopt( argc, argv, - "+b:c:i:I:o:f:F:m:t:T:dDnCB:S:M:x:QANUWe:p:s:V:E:L:hRP:")) > 0) { + "+b:c:i:I:o:f:F:m:t:T:dDnCB:S:M:x:QNUWe:p:s:V:E:L:hRP:")) > 0) { switch (opt) { - case 'A': - afl->taint_mode = 1; - if (!mem_limit_given) { afl->fsrv.mem_limit = MEM_LIMIT_QEMU; } - break; - case 'I': afl->infoexec = optarg; break; @@ -499,7 +488,7 @@ int main(int argc, char **argv_orig, char **envp) { if (!optarg) { FATAL("Wrong usage of -m"); } - if (!strcmp(optarg, "none") || !strcmp(optarg, "0")) { + if (!strcmp(optarg, "none")) { afl->fsrv.mem_limit = 0; break; @@ -829,15 +818,6 @@ int main(int argc, char **argv_orig, char **envp) { } - if (afl->taint_mode && afl->fsrv.map_size < MAX_FILE) { - - real_map_size = map_size; - map_size = MAX_FILE; - - } - - afl_state_init_2(afl, map_size); - if (!mem_limit_given && afl->shm.cmplog_mode) afl->fsrv.mem_limit += 260; OKF("afl++ is maintained by Marc \"van Hauser\" Heuse, Heiko \"hexcoder\" " @@ -845,7 +825,8 @@ int main(int argc, char **argv_orig, char **envp) { OKF("afl++ is open source, get it at " "https://github.com/AFLplusplus/AFLplusplus"); OKF("Power schedules from github.com/mboehme/aflfast"); - OKF("Python Mutator from github.com/choller/afl"); + OKF("Python Mutator and llvm_mode instrument file list from " + "github.com/choller/afl"); OKF("MOpt Mutator from github.com/puppet-meteor/MOpt-AFL"); if (afl->sync_id && afl->is_main_node && @@ -891,19 +872,6 @@ int main(int argc, char **argv_orig, char **envp) { if (afl->crash_mode) { FATAL("-C and -n are mutually exclusive"); } if (afl->fsrv.qemu_mode) { FATAL("-Q and -n are mutually exclusive"); } if (afl->unicorn_mode) { FATAL("-U and -n are mutually exclusive"); } - if (afl->taint_mode) { FATAL("-A and -n are mutually exclusive"); } - - } - - if (afl->limit_time_sig != 0 && afl->taint_mode) { - - FATAL("-A and -L are mutually exclusive"); - - } - - if (afl->unicorn_mode != 0 && afl->taint_mode) { - - FATAL("-A and -U are mutually exclusive"); } @@ -1004,7 +972,7 @@ int main(int argc, char **argv_orig, char **envp) { if (afl->afl_env.afl_preload) { - if (afl->fsrv.qemu_mode || afl->taint_mode) { + if (afl->fsrv.qemu_mode) { u8 *qemu_preload = getenv("QEMU_SET_ENV"); u8 *afl_preload = getenv("AFL_PRELOAD"); @@ -1100,13 +1068,6 @@ int main(int argc, char **argv_orig, char **envp) { afl->fsrv.trace_bits = afl_shm_init(&afl->shm, afl->fsrv.map_size, afl->non_instrumented_mode); - if (real_map_size && map_size != real_map_size) { - - afl->fsrv.map_size = real_map_size; - if (afl->cmplog_binary) afl->cmplog_fsrv.map_size = real_map_size; - - } - if (!afl->in_bitmap) { memset(afl->virgin_bits, 255, afl->fsrv.map_size); } memset(afl->virgin_tmout, 255, afl->fsrv.map_size); memset(afl->virgin_crash, 255, afl->fsrv.map_size); @@ -1262,6 +1223,7 @@ int main(int argc, char **argv_orig, char **envp) { ACTF("Spawning cmplog forkserver"); afl_fsrv_init_dup(&afl->cmplog_fsrv, &afl->fsrv); + // TODO: this is semi-nice afl->cmplog_fsrv.trace_bits = afl->fsrv.trace_bits; afl->cmplog_fsrv.qemu_mode = afl->fsrv.qemu_mode; afl->cmplog_fsrv.cmplog_binary = afl->cmplog_binary; @@ -1272,70 +1234,6 @@ int main(int argc, char **argv_orig, char **envp) { } - if (afl->taint_mode) { - - ACTF("Spawning qemu_taint forkserver"); - - u8 *disable = getenv("AFL_DISABLE_LLVM_INSTRUMENTATION"); - setenv("AFL_DISABLE_LLVM_INSTRUMENTATION", "1", 0); - - afl_fsrv_init_dup(&afl->taint_fsrv, &afl->fsrv); - afl->taint_fsrv.taint_mode = 1; - afl->taint_fsrv.trace_bits = afl->fsrv.trace_bits; - - ck_free(afl->taint_fsrv.target_path); - afl->argv_taint = ck_alloc(sizeof(char *) * (argc + 4 - optind)); - afl->taint_fsrv.target_path = find_afl_binary("afl-qemu-taint", argv[0]); - afl->argv_taint[0] = find_afl_binary("afl-qemu-taint", argv[0]); - if (!afl->argv_taint[0]) - FATAL( - "Cannot find 'afl-qemu-taint', read qemu_taint/README.md on how to " - "build it."); - u32 idx = optind - 1, offset = 0; - do { - - idx++; - offset++; - afl->argv_taint[offset] = argv[idx]; - - } while (argv[idx] != NULL); - - if (afl->fsrv.use_stdin) - unsetenv("AFL_TAINT_INPUT"); - else - setenv("AFL_TAINT_INPUT", afl->fsrv.out_file, 1); - afl_fsrv_start(&afl->taint_fsrv, afl->argv_taint, &afl->stop_soon, - afl->afl_env.afl_debug_child_output); - - afl->taint_input_file = alloc_printf("%s/taint/.input", afl->out_dir); - int fd = open(afl->taint_input_file, O_CREAT | O_TRUNC | O_RDWR, 0644); - if (fd < 0) - FATAL("Cannot create taint inpu file '%s'", afl->taint_input_file); - lseek(fd, MAX_FILE, SEEK_SET); - ck_write(fd, "\0", 1, afl->taint_input_file); - - if (!disable) unsetenv("AFL_DISABLE_LLVM_INSTRUMENTATION"); - - OKF("Taint forkserver successfully started"); - - const rlim_t kStackSize = 128L * 1024L * 1024L; // min stack size = 128 Mb - struct rlimit rl; - rl.rlim_cur = kStackSize; - if (getrlimit(RLIMIT_STACK, &rl) != 0) - WARNF("Setting a higher stack size failed!"); - - #define BUF_PARAMS(name) (void **)&afl->name##_buf, &afl->name##_size - u8 *tmp1 = ck_maybe_grow(BUF_PARAMS(eff), MAX_FILE + 4096); - u8 *tmp2 = ck_maybe_grow(BUF_PARAMS(ex), MAX_FILE + 4096); - u8 *tmp3 = ck_maybe_grow(BUF_PARAMS(in_scratch), MAX_FILE + 4096); - u8 *tmp4 = ck_maybe_grow(BUF_PARAMS(out), MAX_FILE + 4096); - u8 *tmp5 = ck_maybe_grow(BUF_PARAMS(out_scratch), MAX_FILE + 4096); - #undef BUF_PARAMS - if (!tmp1 || !tmp2 || !tmp3 || !tmp4 || !tmp5) - FATAL("memory issues. me hungry, feed me!"); - - } - perform_dry_run(afl); cull_queue(afl); @@ -1410,7 +1308,7 @@ int main(int argc, char **argv_orig, char **envp) { break; case 1: if (afl->limit_time_sig == 0 && !afl->custom_only && - !afl->python_only && !afl->taint_mode) { + !afl->python_only) { afl->limit_time_sig = -1; afl->limit_time_puppet = 0; @@ -1598,11 +1496,8 @@ stop_fuzzing: } - if (afl->cmplog_binary) afl_fsrv_deinit(&afl->cmplog_fsrv); - if (afl->taint_mode) afl_fsrv_deinit(&afl->taint_fsrv); afl_fsrv_deinit(&afl->fsrv); if (afl->orig_cmdline) { ck_free(afl->orig_cmdline); } - if (afl->argv_taint) { ck_free(afl->argv_taint); } ck_free(afl->fsrv.target_path); ck_free(afl->fsrv.out_file); ck_free(afl->sync_id); -- cgit 1.4.1 From 2f28ecd3a522faff6bcdf42b8cb0fd8cd3dc28ce Mon Sep 17 00:00:00 2001 From: van Hauser Date: Sat, 15 Aug 2020 20:51:57 +0200 Subject: more unlikely --- src/afl-fuzz-one.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index 57b53c9f..5a4ac8ef 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -526,8 +526,8 @@ u8 fuzz_one_original(afl_state_t *afl) { * TRIMMING * ************/ - if (!afl->non_instrumented_mode && !afl->queue_cur->trim_done && - !afl->disable_trim) { + if (unlikely(!afl->non_instrumented_mode && !afl->queue_cur->trim_done && + !afl->disable_trim)) { u8 res = trim_case(afl, afl->queue_cur, in_buf); @@ -562,7 +562,7 @@ u8 fuzz_one_original(afl_state_t *afl) { if (unlikely(perf_score == 0)) { goto abandon_entry; } - if (afl->shm.cmplog_mode && !afl->queue_cur->fully_colorized) { + if (unlikely(afl->shm.cmplog_mode && !afl->queue_cur->fully_colorized)) { if (input_to_state_stage(afl, in_buf, out_buf, len, afl->queue_cur->exec_cksum)) { @@ -591,8 +591,9 @@ u8 fuzz_one_original(afl_state_t *afl) { /* Skip deterministic fuzzing if exec path checksum puts this out of scope for this main instance. */ - if (afl->main_node_max && (afl->queue_cur->exec_cksum % afl->main_node_max) != - afl->main_node_id - 1) { + if (unlikely(afl->main_node_max && + (afl->queue_cur->exec_cksum % afl->main_node_max) != + afl->main_node_id - 1)) { goto custom_mutator_stage; @@ -2753,7 +2754,7 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { orig_perf = perf_score = calculate_score(afl, afl->queue_cur); - if (afl->shm.cmplog_mode && !afl->queue_cur->fully_colorized) { + if (unlikely(afl->shm.cmplog_mode && !afl->queue_cur->fully_colorized)) { if (input_to_state_stage(afl, in_buf, out_buf, len, afl->queue_cur->exec_cksum)) { -- cgit 1.4.1 From 43214d6b46643f70fc7979dd37a23bda76ed3171 Mon Sep 17 00:00:00 2001 From: van Hauser Date: Sat, 15 Aug 2020 22:10:28 +0200 Subject: more likely --- src/afl-fuzz-one.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index 5a4ac8ef..0a4be320 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -95,7 +95,7 @@ static u32 choose_block_len(afl_state_t *afl, u32 limit) { default: - if (rand_below(afl, 10)) { + if (likely(rand_below(afl, 10))) { min_value = HAVOC_BLK_MEDIUM; max_value = HAVOC_BLK_LARGE; @@ -421,7 +421,7 @@ u8 fuzz_one_original(afl_state_t *afl) { if (((afl->queue_cur->was_fuzzed > 0 || afl->queue_cur->fuzz_level > 0) || !afl->queue_cur->favored) && - rand_below(afl, 100) < SKIP_TO_NEW_PROB) { + likely(rand_below(afl, 100) < SKIP_TO_NEW_PROB)) { return 1; @@ -438,11 +438,11 @@ u8 fuzz_one_original(afl_state_t *afl) { if (afl->queue_cycle > 1 && (afl->queue_cur->fuzz_level == 0 || afl->queue_cur->was_fuzzed)) { - if (rand_below(afl, 100) < SKIP_NFAV_NEW_PROB) { return 1; } + if (likely(rand_below(afl, 100) < SKIP_NFAV_NEW_PROB)) { return 1; } } else { - if (rand_below(afl, 100) < SKIP_NFAV_OLD_PROB) { return 1; } + if (likely(rand_below(afl, 100) < SKIP_NFAV_OLD_PROB)) { return 1; } } @@ -2132,7 +2132,7 @@ havoc_stage: u32 clone_from, clone_to, clone_len; u8 *new_buf; - if (actually_clone) { + if (likely(actually_clone)) { clone_len = choose_block_len(afl, temp_len); clone_from = rand_below(afl, temp_len - clone_len + 1); @@ -2155,7 +2155,7 @@ havoc_stage: /* Inserted part */ - if (actually_clone) { + if (likely(actually_clone)) { memcpy(new_buf + clone_to, out_buf + clone_from, clone_len); @@ -2195,7 +2195,7 @@ havoc_stage: copy_from = rand_below(afl, temp_len - copy_len + 1); copy_to = rand_below(afl, temp_len - copy_len + 1); - if (rand_below(afl, 4)) { + if (likely(rand_below(afl, 4))) { if (copy_from != copy_to) { -- cgit 1.4.1 From 7470b475a9b5e65afa78ca493867d8c980bd66db Mon Sep 17 00:00:00 2001 From: Dominik Maier Date: Tue, 18 Aug 2020 00:50:52 +0200 Subject: Reworked maybe_grow to take a single ptr, renamed to afl_realloc (#505) * maybe_grow takes a single ptr * fixed use_deflate * reworked maybe_grow_bufsize * helper to access underlying buf * remove redundant realloc_block * code format * fixes * added unit tests * renamed maybe_grow to afl_realloc * BUF_PARAMS -> AFL_BUF_PARAM --- custom_mutators/radamsa/custom_mutator_helpers.h | 2 +- examples/afl_network_proxy/afl-network-server.c | 34 +++-- examples/custom_mutators/custom_mutator_helpers.h | 4 +- include/afl-fuzz.h | 25 ++- include/alloc-inl.h | 177 ++++++++++------------ src/afl-fuzz-extras.c | 12 +- src/afl-fuzz-mutators.c | 3 +- src/afl-fuzz-one.c | 79 ++++++---- src/afl-fuzz-python.c | 20 ++- src/afl-fuzz-queue.c | 7 +- src/afl-fuzz-redqueen.c | 8 +- src/afl-fuzz-run.c | 7 +- src/afl-fuzz-state.c | 14 +- test/unittests/unit_maybe_alloc.c | 109 +++++++++---- 14 files changed, 272 insertions(+), 229 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/custom_mutators/radamsa/custom_mutator_helpers.h b/custom_mutators/radamsa/custom_mutator_helpers.h index 0848321f..e23c0b6a 100644 --- a/custom_mutators/radamsa/custom_mutator_helpers.h +++ b/custom_mutators/radamsa/custom_mutator_helpers.h @@ -324,7 +324,7 @@ static inline void *maybe_grow(void **buf, size_t *size, size_t size_needed) { } /* Swaps buf1 ptr and buf2 ptr, as well as their sizes */ -static inline void swap_bufs(void **buf1, size_t *size1, void **buf2, +static inline void afl_swap_bufs(void **buf1, size_t *size1, void **buf2, size_t *size2) { void * scratch_buf = *buf1; diff --git a/examples/afl_network_proxy/afl-network-server.c b/examples/afl_network_proxy/afl-network-server.c index ab7874fd..c70fd47d 100644 --- a/examples/afl_network_proxy/afl-network-server.c +++ b/examples/afl_network_proxy/afl-network-server.c @@ -73,9 +73,8 @@ static u8 *in_file, /* Minimizer input test case */ static u8 *in_data; /* Input data for trimming */ static u8 *buf2; -static s32 in_len; -static u32 map_size = MAP_SIZE; -static size_t buf2_len; +static s32 in_len; +static u32 map_size = MAP_SIZE; static volatile u8 stop_soon; /* Ctrl-C pressed? */ @@ -272,7 +271,7 @@ static void set_up_environment(afl_forkserver_t *fsrv) { setenv("QEMU_SET_ENV", buf, 1); - ck_free(buf); + afl_free(buf); } else { @@ -343,7 +342,7 @@ static void usage(u8 *argv0) { } -int recv_testcase(int s, void **buf, size_t *max_len) { +int recv_testcase(int s, void **buf) { u32 size; s32 ret; @@ -358,7 +357,8 @@ int recv_testcase(int s, void **buf, size_t *max_len) { if ((size & 0xff000000) != 0xff000000) { - *buf = ck_maybe_grow(buf, max_len, size); + *buf = afl_realloc((void **)&buf, size); + if (unlikely(!buf)) { PFATAL("Alloc"); } received = 0; // fprintf(stderr, "unCOMPRESS (%u)\n", size); while (received < size && @@ -370,7 +370,8 @@ int recv_testcase(int s, void **buf, size_t *max_len) { #ifdef USE_DEFLATE u32 clen; size -= 0xff000000; - *buf = ck_maybe_grow(buf, max_len, size); + *buf = afl_realloc((void **)&buf, size); + if (unlikely(!buf)) { PFATAL("Alloc"); } received = 0; while (received < 4 && (ret = recv(s, &clen + received, 4 - received, 0)) > 0) @@ -379,15 +380,15 @@ int recv_testcase(int s, void **buf, size_t *max_len) { // fprintf(stderr, "received clen information of %d\n", clen); if (clen < 1) FATAL("did not receive valid compressed len information: %u", clen); - buf2 = ck_maybe_grow((void **)&buf2, &buf2_len, clen); + buf2 = afl_realloc((void **)&buf2, clen); + if (unlikely(!buf2)) { PFATAL("Alloc"); } received = 0; while (received < clen && (ret = recv(s, buf2 + received, clen - received, 0)) > 0) received += ret; if (received != clen) FATAL("did not receive compressed information"); if (libdeflate_deflate_decompress(decompressor, buf2, clen, (char *)*buf, - *max_len, - &received) != LIBDEFLATE_SUCCESS) + size, &received) != LIBDEFLATE_SUCCESS) FATAL("decompression failed"); // fprintf(stderr, "DECOMPRESS (%u->%u):\n", clen, received); // for (u32 i = 0; i < clen; i++) fprintf(stderr, "%02x", buf2[i]); @@ -413,7 +414,6 @@ int recv_testcase(int s, void **buf, size_t *max_len) { int main(int argc, char **argv_orig, char **envp) { s32 opt, s, sock, on = 1, port = -1; - size_t max_len = 0; u8 mem_limit_given = 0, timeout_given = 0, unicorn_mode = 0, use_wine = 0; char **use_argv; struct sockaddr_in6 serveraddr, clientaddr; @@ -568,7 +568,8 @@ int main(int argc, char **argv_orig, char **envp) { sharedmem_t shm = {0}; fsrv->trace_bits = afl_shm_init(&shm, map_size, 0); - in_data = ck_maybe_grow((void **)&in_data, &max_len, 65536); + in_data = afl_realloc((void **)&in_data, 65536); + if (unlikely(!in_data)) { PFATAL("Alloc"); } atexit(at_exit_handler); setup_signal_handlers(); @@ -639,7 +640,8 @@ int main(int argc, char **argv_orig, char **envp) { #ifdef USE_DEFLATE compressor = libdeflate_alloc_compressor(1); decompressor = libdeflate_alloc_decompressor(); - buf2 = ck_maybe_grow((void **)&buf2, &buf2_len, map_size + 16); + buf2 = afl_realloc((void **)&buf2, map_size + 16); + if (unlikely(!buf2)) { PFATAL("alloc"); } lenptr = (u32 *)(buf2 + 4); fprintf(stderr, "Compiled with compression support\n"); #endif @@ -664,7 +666,7 @@ int main(int argc, char **argv_orig, char **envp) { #endif - while ((in_len = recv_testcase(s, (void **)&in_data, &max_len)) > 0) { + while ((in_len = recv_testcase(s, (void **)&in_data)) > 0) { // fprintf(stderr, "received %u\n", in_len); (void)run_target(fsrv, use_argv, in_data, in_len, 1); @@ -697,9 +699,9 @@ int main(int argc, char **argv_orig, char **envp) { afl_shm_deinit(&shm); afl_fsrv_deinit(fsrv); if (fsrv->target_path) { ck_free(fsrv->target_path); } - if (in_data) { ck_free(in_data); } + afl_free(in_data); #if USE_DEFLATE - if (buf2) { ck_free(buf2); } + afl_free(buf2); libdeflate_free_compressor(compressor); libdeflate_free_decompressor(decompressor); #endif diff --git a/examples/custom_mutators/custom_mutator_helpers.h b/examples/custom_mutators/custom_mutator_helpers.h index 0848321f..ad5acb08 100644 --- a/examples/custom_mutators/custom_mutator_helpers.h +++ b/examples/custom_mutators/custom_mutator_helpers.h @@ -324,8 +324,8 @@ static inline void *maybe_grow(void **buf, size_t *size, size_t size_needed) { } /* Swaps buf1 ptr and buf2 ptr, as well as their sizes */ -static inline void swap_bufs(void **buf1, size_t *size1, void **buf2, - size_t *size2) { +static inline void afl_swap_bufs(void **buf1, size_t *size1, void **buf2, + size_t *size2) { void * scratch_buf = *buf1; size_t scratch_size = *size1; diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h index ca7d10fe..dca395aa 100644 --- a/include/afl-fuzz.h +++ b/include/afl-fuzz.h @@ -126,6 +126,9 @@ #define STAGE_BUF_SIZE (64) /* usable size for stage name buf in afl_state */ +// Little helper to access the ptr to afl->##name_buf - for use in afl_realloc. +#define AFL_BUF_PARAM(name) ((void **)&afl->name##_buf) + extern s8 interesting_8[INTERESTING_8_LEN]; extern s16 interesting_16[INTERESTING_8_LEN + INTERESTING_16_LEN]; extern s32 @@ -572,7 +575,6 @@ typedef struct afl_state { // growing buf struct queue_entry **queue_buf; - size_t queue_size; struct queue_entry **top_rated; /* Top entries for bitmap bytes */ @@ -633,24 +635,18 @@ typedef struct afl_state { /*needed for afl_fuzz_one */ // TODO: see which we can reuse - u8 * out_buf; - size_t out_size; + u8 *out_buf; - u8 * out_scratch_buf; - size_t out_scratch_size; + u8 *out_scratch_buf; - u8 * eff_buf; - size_t eff_size; + u8 *eff_buf; - u8 * in_buf; - size_t in_size; + u8 *in_buf; - u8 * in_scratch_buf; - size_t in_scratch_size; + u8 *in_scratch_buf; - u8 * ex_buf; - size_t ex_size; - u32 custom_mutators_count; + u8 *ex_buf; + u32 custom_mutators_count; list_t custom_mutator_list; @@ -666,7 +662,6 @@ struct custom_mutator { char * name_short; void * dh; u8 * post_process_buf; - size_t post_process_size; u8 stacked_custom_prob, stacked_custom; void *data; /* custom mutator data ptr */ diff --git a/include/alloc-inl.h b/include/alloc-inl.h index 306cc622..90701d18 100644 --- a/include/alloc-inl.h +++ b/include/alloc-inl.h @@ -30,12 +30,13 @@ #include #include #include +#include #include "config.h" #include "types.h" #include "debug.h" -/* Initial size used for ck_maybe_grow */ +/* Initial size used for afl_realloc */ #define INITIAL_GROWTH_SIZE (64) // Be careful! _WANT_ORIGINAL_AFL_ALLOC is not compatible with custom mutators @@ -76,10 +77,6 @@ \ } while (0) - /* Allocator increments for ck_realloc_block(). */ - - #define ALLOC_BLK_INC 256 - /* Allocate a buffer, explicitly not zeroing it. Returns NULL for zero-sized requests. */ @@ -149,15 +146,6 @@ static inline void *DFL_ck_realloc(void *orig, u32 size) { } -/* Re-allocate a buffer with ALLOC_BLK_INC increments (used to speed up - repeated small reallocs without complicating the user code). */ - -static inline void *DFL_ck_realloc_block(void *orig, u32 size) { - - return DFL_ck_realloc(orig, size); - -} - /* Create a buffer with a copy of a string. Returns NULL for NULL inputs. */ static inline u8 *DFL_ck_strdup(u8 *str) { @@ -183,7 +171,6 @@ static inline u8 *DFL_ck_strdup(u8 *str) { #define ck_alloc DFL_ck_alloc #define ck_alloc_nozero DFL_ck_alloc_nozero #define ck_realloc DFL_ck_realloc - #define ck_realloc_block DFL_ck_realloc_block #define ck_strdup DFL_ck_strdup #define ck_free DFL_ck_free @@ -239,10 +226,6 @@ static inline u8 *DFL_ck_strdup(u8 *str) { #define ALLOC_OFF_HEAD 8 #define ALLOC_OFF_TOTAL (ALLOC_OFF_HEAD + 1) - /* Allocator increments for ck_realloc_block(). */ - - #define ALLOC_BLK_INC 256 - /* Sanity-checking macros for pointers. */ #define CHECK_PTR(_p) \ @@ -402,29 +385,6 @@ static inline void *DFL_ck_realloc(void *orig, u32 size) { } -/* Re-allocate a buffer with ALLOC_BLK_INC increments (used to speed up - repeated small reallocs without complicating the user code). */ - -static inline void *DFL_ck_realloc_block(void *orig, u32 size) { - - #ifndef DEBUG_BUILD - - if (orig) { - - CHECK_PTR(orig); - - if (ALLOC_S(orig) >= size) return orig; - - size += ALLOC_BLK_INC; - - } - - #endif /* !DEBUG_BUILD */ - - return DFL_ck_realloc(orig, size); - -} - /* Create a buffer with a copy of a string. Returns NULL for NULL inputs. */ static inline u8 *DFL_ck_strdup(u8 *str) { @@ -458,7 +418,6 @@ static inline u8 *DFL_ck_strdup(u8 *str) { #define ck_alloc DFL_ck_alloc #define ck_alloc_nozero DFL_ck_alloc_nozero #define ck_realloc DFL_ck_realloc - #define ck_realloc_block DFL_ck_realloc_block #define ck_strdup DFL_ck_strdup #define ck_free DFL_ck_free @@ -528,8 +487,8 @@ static inline void TRK_alloc_buf(void *ptr, const char *file, const char *func, /* No space available - allocate more. */ - TRK[bucket] = DFL_ck_realloc_block( - TRK[bucket], (TRK_cnt[bucket] + 1) * sizeof(struct TRK_obj)); + TRK[bucket] = DFL_ck_realloc(TRK[bucket], + (TRK_cnt[bucket] + 1) * sizeof(struct TRK_obj)); TRK[bucket][i].ptr = ptr; TRK[bucket][i].file = (char *)file; @@ -604,16 +563,6 @@ static inline void *TRK_ck_realloc(void *orig, u32 size, const char *file, } -static inline void *TRK_ck_realloc_block(void *orig, u32 size, const char *file, - const char *func, u32 line) { - - void *ret = DFL_ck_realloc_block(orig, size); - TRK_free_buf(orig, file, func, line); - TRK_alloc_buf(ret, file, func, line); - return ret; - -} - static inline void *TRK_ck_strdup(u8 *str, const char *file, const char *func, u32 line) { @@ -641,9 +590,6 @@ static inline void TRK_ck_free(void *ptr, const char *file, const char *func, #define ck_realloc(_p1, _p2) \ TRK_ck_realloc(_p1, _p2, __FILE__, __FUNCTION__, __LINE__) - #define ck_realloc_block(_p1, _p2) \ - TRK_ck_realloc_block(_p1, _p2, __FILE__, __FUNCTION__, __LINE__) - #define ck_strdup(_p1) TRK_ck_strdup(_p1, __FILE__, __FUNCTION__, __LINE__) #define ck_free(_p1) TRK_ck_free(_p1, __FILE__, __FUNCTION__, __LINE__) @@ -657,11 +603,14 @@ static inline void TRK_ck_free(void *ptr, const char *file, const char *func, */ static inline size_t next_pow2(size_t in) { - if (in == 0 || in > (size_t)-1) { - - return 0; /* avoid undefined behaviour under-/overflow */ + // Commented this out as this behavior doesn't change, according to unittests + // if (in == 0 || in > (size_t)-1) { - } + // + // return 0; /* avoid undefined behaviour under-/overflow + // */ + // + // } size_t out = in - 1; out |= out >> 1; @@ -673,32 +622,32 @@ static inline size_t next_pow2(size_t in) { } -/* This function makes sure *size is > size_needed after call. - It will realloc *buf otherwise. - *size will grow exponentially as per: - https://blog.mozilla.org/nnethercote/2014/11/04/please-grow-your-buffers-exponentially/ - Will return NULL and free *buf if size_needed is <1 or realloc failed. - @return For convenience, this function returns *buf. - */ -static inline void *maybe_grow(void **buf, size_t *size, size_t size_needed) { +/* AFL alloc buffer, the struct is here so we don't need to do fancy ptr + * arithmetics */ +struct afl_alloc_buf { - /* No need to realloc */ - if (likely(size_needed && *size >= size_needed)) { return *buf; } + /* The complete allocated size, including the header of len + * AFL_ALLOC_SIZE_OFFSET */ + size_t complete_size; + /* ptr to the first element of the actual buffer */ + u8 buf[0]; - /* No initial size was set */ - if (size_needed < INITIAL_GROWTH_SIZE) { size_needed = INITIAL_GROWTH_SIZE; } +}; - /* grow exponentially */ - size_t next_size = next_pow2(size_needed); +#define AFL_ALLOC_SIZE_OFFSET (offsetof(struct afl_alloc_buf, buf)) - /* handle overflow and zero size_needed */ - if (!next_size) { next_size = size_needed; } +/* Returs the container element to this ptr */ +static inline struct afl_alloc_buf *afl_alloc_bufptr(void *buf) { - /* alloc */ - *buf = realloc(*buf, next_size); - *size = *buf ? next_size : 0; + return (struct afl_alloc_buf *)((u8 *)buf - AFL_ALLOC_SIZE_OFFSET); - return *buf; +} + +/* Gets the maximum size of the buf contents (ptr->complete_size - + * AFL_ALLOC_SIZE_OFFSET) */ +static inline size_t afl_alloc_bufsize(void *buf) { + + return afl_alloc_bufptr(buf)->complete_size - AFL_ALLOC_SIZE_OFFSET; } @@ -706,45 +655,71 @@ static inline void *maybe_grow(void **buf, size_t *size, size_t size_needed) { It will realloc *buf otherwise. *size will grow exponentially as per: https://blog.mozilla.org/nnethercote/2014/11/04/please-grow-your-buffers-exponentially/ - Will FATAL if size_needed is <1. + Will return NULL and free *buf if size_needed is <1 or realloc failed. @return For convenience, this function returns *buf. */ -static inline void *ck_maybe_grow(void **buf, size_t *size, - size_t size_needed) { +static inline void *afl_realloc(void **buf, size_t size_needed) { + + struct afl_alloc_buf *new_buf = NULL; - /* Oops. found a bug? */ - if (unlikely(size_needed < 1)) { FATAL("cannot grow to non-positive size"); } + size_t current_size = 0; + size_t next_size = 0; + + if (likely(*buf)) { + + /* the size is always stored at buf - 1*size_t */ + new_buf = afl_alloc_bufptr(*buf); + current_size = new_buf->complete_size; + + } + + size_needed += AFL_ALLOC_SIZE_OFFSET; /* No need to realloc */ - if (likely(*size >= size_needed)) { return *buf; } + if (likely(current_size >= size_needed)) { return *buf; } /* No initial size was set */ - if (size_needed < INITIAL_GROWTH_SIZE) { size_needed = INITIAL_GROWTH_SIZE; } + if (size_needed < INITIAL_GROWTH_SIZE) { - /* grow exponentially */ - size_t next_size = next_pow2(size_needed); + next_size = INITIAL_GROWTH_SIZE; - /* handle overflow */ - if (!next_size) { next_size = size_needed; } + } else { + + /* grow exponentially */ + next_size = next_pow2(size_needed); + + /* handle overflow: fall back to the original size_needed */ + if (unlikely(!next_size)) { next_size = size_needed; } + + } /* alloc */ - *buf = ck_realloc(*buf, next_size); - *size = next_size; + new_buf = realloc(new_buf, next_size); + if (unlikely(!new_buf)) { + *buf = NULL; + return NULL; + + } + + new_buf->complete_size = next_size; + *buf = (void *)(new_buf->buf); return *buf; } +static inline void afl_free(void *buf) { + + if (buf) { free(afl_alloc_bufptr(buf)); } + +} + /* Swaps buf1 ptr and buf2 ptr, as well as their sizes */ -static inline void swap_bufs(void **buf1, size_t *size1, void **buf2, - size_t *size2) { +static inline void afl_swap_bufs(void **buf1, void **buf2) { - void * scratch_buf = *buf1; - size_t scratch_size = *size1; + void *scratch_buf = *buf1; *buf1 = *buf2; - *size1 = *size2; *buf2 = scratch_buf; - *size2 = scratch_size; } diff --git a/src/afl-fuzz-extras.c b/src/afl-fuzz-extras.c index 17f02984..88262a98 100644 --- a/src/afl-fuzz-extras.c +++ b/src/afl-fuzz-extras.c @@ -152,8 +152,10 @@ void load_extras_file(afl_state_t *afl, u8 *fname, u32 *min_len, u32 *max_len, /* Okay, let's allocate memory and copy data between "...", handling \xNN escaping, \\, and \". */ - afl->extras = ck_realloc_block( - afl->extras, (afl->extras_cnt + 1) * sizeof(struct extra_data)); + afl->extras = + afl_realloc((void **)&afl->extras, + (afl->extras_cnt + 1) * sizeof(struct extra_data)); + if (unlikely(!afl->extras)) { PFATAL("alloc"); } wptr = afl->extras[afl->extras_cnt].data = ck_alloc(rptr - lptr); @@ -296,8 +298,10 @@ void load_extras(afl_state_t *afl, u8 *dir) { if (min_len > st.st_size) { min_len = st.st_size; } if (max_len < st.st_size) { max_len = st.st_size; } - afl->extras = ck_realloc_block( - afl->extras, (afl->extras_cnt + 1) * sizeof(struct extra_data)); + afl->extras = + afl_realloc((void **)&afl->extras, + (afl->extras_cnt + 1) * sizeof(struct extra_data)); + if (unlikely(!afl->extras)) { PFATAL("alloc"); } afl->extras[afl->extras_cnt].data = ck_alloc(st.st_size); afl->extras[afl->extras_cnt].len = st.st_size; diff --git a/src/afl-fuzz-mutators.c b/src/afl-fuzz-mutators.c index 0fa646f9..22578df9 100644 --- a/src/afl-fuzz-mutators.c +++ b/src/afl-fuzz-mutators.c @@ -122,9 +122,8 @@ void destroy_custom_mutators(afl_state_t *afl) { if (el->post_process_buf) { - ck_free(el->post_process_buf); + afl_free(el->post_process_buf); el->post_process_buf = NULL; - el->post_process_size = 0; } diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index 0a4be320..3bf0c195 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -364,8 +364,6 @@ static void locate_diffs(u8 *ptr1, u8 *ptr2, u32 len, s32 *first, s32 *last) { #endif /* !IGNORE_FINDS */ -#define BUF_PARAMS(name) (void **)&afl->name##_buf, &afl->name##_size - /* Take the current entry from the queue, fuzz it for a while. This function is a tad too long... returns 0 if fuzzed successfully, 1 if skipped or bailed out. */ @@ -384,9 +382,6 @@ u8 fuzz_one_original(afl_state_t *afl) { u8 a_collect[MAX_AUTO_EXTRA]; u32 a_len = 0; -/* Not pretty, but saves a lot of writing */ -#define BUF_PARAMS(name) (void **)&afl->name##_buf, &afl->name##_size - #ifdef IGNORE_FINDS /* In IGNORE_FINDS mode, skip any entries that weren't in the @@ -484,7 +479,8 @@ u8 fuzz_one_original(afl_state_t *afl) { single byte anyway, so it wouldn't give us any performance or memory usage benefits. */ - out_buf = ck_maybe_grow(BUF_PARAMS(out), len); + out_buf = afl_realloc(AFL_BUF_PARAM(out), len); + if (unlikely(!out_buf)) { PFATAL("alloc"); } afl->subseq_tmouts = 0; @@ -800,7 +796,8 @@ u8 fuzz_one_original(afl_state_t *afl) { /* Initialize effector map for the next step (see comments below). Always flag first and last byte as doing something. */ - eff_map = ck_maybe_grow(BUF_PARAMS(eff), EFF_ALEN(len)); + eff_map = afl_realloc(AFL_BUF_PARAM(eff), EFF_ALEN(len)); + if (unlikely(!eff_map)) { PFATAL("alloc"); } eff_map[0] = 1; if (EFF_APOS(len - 1) != 0) { @@ -1557,7 +1554,8 @@ skip_interest: orig_hit_cnt = new_hit_cnt; - ex_tmp = ck_maybe_grow(BUF_PARAMS(ex), len + MAX_DICT_FILE); + ex_tmp = afl_realloc(AFL_BUF_PARAM(ex), len + MAX_DICT_FILE); + if (unlikely(!ex_tmp)) { PFATAL("alloc"); } for (i = 0; i <= (u32)len; ++i) { @@ -1733,7 +1731,8 @@ custom_mutator_stage: fd = open(target->fname, O_RDONLY); if (unlikely(fd < 0)) { PFATAL("Unable to open '%s'", target->fname); } - new_buf = ck_maybe_grow(BUF_PARAMS(out_scratch), target->len); + new_buf = afl_realloc(AFL_BUF_PARAM(out_scratch), target->len); + if (unlikely(!new_buf)) { PFATAL("alloc"); } ck_read(fd, new_buf, target->len, target->fname); close(fd); @@ -1908,7 +1907,8 @@ havoc_stage: temp_len = new_len; if (out_buf != custom_havoc_buf) { - ck_maybe_grow(BUF_PARAMS(out), temp_len); + afl_realloc(AFL_BUF_PARAM(out), temp_len); + if (unlikely(!afl->out_buf)) { PFATAL("alloc"); } memcpy(out_buf, custom_havoc_buf, temp_len); } @@ -2147,7 +2147,8 @@ havoc_stage: clone_to = rand_below(afl, temp_len); new_buf = - ck_maybe_grow(BUF_PARAMS(out_scratch), temp_len + clone_len); + afl_realloc(AFL_BUF_PARAM(out_scratch), temp_len + clone_len); + if (unlikely(!new_buf)) { PFATAL("alloc"); } /* Head */ @@ -2172,7 +2173,7 @@ havoc_stage: memcpy(new_buf + clone_to + clone_len, out_buf + clone_to, temp_len - clone_to); - swap_bufs(BUF_PARAMS(out), BUF_PARAMS(out_scratch)); + afl_swap_bufs(AFL_BUF_PARAM(out), AFL_BUF_PARAM(out_scratch)); out_buf = new_buf; new_buf = NULL; temp_len += clone_len; @@ -2287,7 +2288,8 @@ havoc_stage: if (temp_len + extra_len >= MAX_FILE) { break; } - out_buf = ck_maybe_grow(BUF_PARAMS(out), temp_len + extra_len); + out_buf = afl_realloc(AFL_BUF_PARAM(out), temp_len + extra_len); + if (unlikely(!out_buf)) { PFATAL("alloc"); } /* Tail */ memmove(out_buf + insert_at + extra_len, out_buf + insert_at, @@ -2343,7 +2345,8 @@ havoc_stage: } u32 new_len = target->len; - u8 *new_buf = ck_maybe_grow(BUF_PARAMS(in_scratch), new_len); + u8 *new_buf = afl_realloc(AFL_BUF_PARAM(in_scratch), new_len); + if (unlikely(!new_buf)) { PFATAL("alloc"); } ck_read(fd, new_buf, new_len, target->fname); @@ -2383,7 +2386,8 @@ havoc_stage: clone_to = rand_below(afl, temp_len); u8 *temp_buf = - ck_maybe_grow(BUF_PARAMS(out_scratch), temp_len + clone_len); + afl_realloc(AFL_BUF_PARAM(out_scratch), temp_len + clone_len); + if (unlikely(!temp_buf)) { PFATAL("alloc"); } /* Head */ @@ -2397,7 +2401,7 @@ havoc_stage: memcpy(temp_buf + clone_to + clone_len, out_buf + clone_to, temp_len - clone_to); - swap_bufs(BUF_PARAMS(out), BUF_PARAMS(out_scratch)); + afl_swap_bufs(AFL_BUF_PARAM(out), AFL_BUF_PARAM(out_scratch)); out_buf = temp_buf; temp_len += clone_len; @@ -2418,7 +2422,8 @@ havoc_stage: /* out_buf might have been mangled a bit, so let's restore it to its original size and shape. */ - out_buf = ck_maybe_grow(BUF_PARAMS(out), len); + out_buf = afl_realloc(AFL_BUF_PARAM(out), len); + if (unlikely(!out_buf)) { PFATAL("alloc"); } temp_len = len; memcpy(out_buf, in_buf, len); @@ -2513,7 +2518,8 @@ retry_splicing: if (unlikely(fd < 0)) { PFATAL("Unable to open '%s'", target->fname); } - new_buf = ck_maybe_grow(BUF_PARAMS(in_scratch), target->len); + new_buf = afl_realloc(AFL_BUF_PARAM(in_scratch), target->len); + if (unlikely(!new_buf)) { PFATAL("alloc"); } ck_read(fd, new_buf, target->len, target->fname); @@ -2535,10 +2541,11 @@ retry_splicing: len = target->len; memcpy(new_buf, in_buf, split_at); - swap_bufs(BUF_PARAMS(in), BUF_PARAMS(in_scratch)); + afl_swap_bufs(AFL_BUF_PARAM(in), AFL_BUF_PARAM(in_scratch)); in_buf = new_buf; - out_buf = ck_maybe_grow(BUF_PARAMS(out), len); + out_buf = afl_realloc(AFL_BUF_PARAM(out), len); + if (unlikely(!out_buf)) { PFATAL("alloc"); } memcpy(out_buf, in_buf, len); goto custom_mutator_stage; @@ -2679,7 +2686,8 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { single byte anyway, so it wouldn't give us any performance or memory usage benefits. */ - out_buf = ck_maybe_grow(BUF_PARAMS(out), len); + out_buf = afl_realloc(AFL_BUF_PARAM(out), len); + if (unlikely(!out_buf)) { PFATAL("alloc"); } afl->subseq_tmouts = 0; @@ -3001,7 +3009,8 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { /* Initialize effector map for the next step (see comments below). Always flag first and last byte as doing something. */ - eff_map = ck_maybe_grow(BUF_PARAMS(eff), EFF_ALEN(len)); + eff_map = afl_realloc(AFL_BUF_PARAM(eff), EFF_ALEN(len)); + if (unlikely(!eff_map)) { PFATAL("alloc"); } eff_map[0] = 1; if (EFF_APOS(len - 1) != 0) { @@ -3758,7 +3767,8 @@ skip_interest: orig_hit_cnt = new_hit_cnt; - ex_tmp = ck_maybe_grow(BUF_PARAMS(ex), len + MAX_DICT_FILE); + ex_tmp = afl_realloc(AFL_BUF_PARAM(ex), len + MAX_DICT_FILE); + if (unlikely(!ex_tmp)) { PFATAL("alloc"); } for (i = 0; i <= (u32)len; ++i) { @@ -4196,8 +4206,9 @@ pacemaker_fuzzing: clone_to = rand_below(afl, temp_len); - new_buf = ck_maybe_grow(BUF_PARAMS(out_scratch), - temp_len + clone_len); + new_buf = afl_realloc(AFL_BUF_PARAM(out_scratch), + temp_len + clone_len); + if (unlikely(!new_buf)) { PFATAL("alloc"); } /* Head */ @@ -4223,7 +4234,7 @@ pacemaker_fuzzing: memcpy(new_buf + clone_to + clone_len, out_buf + clone_to, temp_len - clone_to); - swap_bufs(BUF_PARAMS(out), BUF_PARAMS(out_scratch)); + afl_swap_bufs(AFL_BUF_PARAM(out), AFL_BUF_PARAM(out_scratch)); out_buf = new_buf; temp_len += clone_len; MOpt_globals.cycles_v2[STAGE_Clone75] += 1; @@ -4340,7 +4351,8 @@ pacemaker_fuzzing: if (temp_len + extra_len >= MAX_FILE) break; - out_buf = ck_maybe_grow(BUF_PARAMS(out), temp_len + extra_len); + out_buf = afl_realloc(AFL_BUF_PARAM(out), temp_len + extra_len); + if (unlikely(!out_buf)) { PFATAL("alloc"); } /* Tail */ memmove(out_buf + insert_at + extra_len, out_buf + insert_at, @@ -4373,7 +4385,8 @@ pacemaker_fuzzing: /* out_buf might have been mangled a bit, so let's restore it to its original size and shape. */ - out_buf = ck_maybe_grow(BUF_PARAMS(out), len); + out_buf = afl_realloc(AFL_BUF_PARAM(out), len); + if (unlikely(!out_buf)) { PFATAL("alloc"); } temp_len = len; memcpy(out_buf, in_buf, len); @@ -4518,7 +4531,8 @@ pacemaker_fuzzing: if (fd < 0) { PFATAL("Unable to open '%s'", target->fname); } - new_buf = ck_maybe_grow(BUF_PARAMS(in_scratch), target->len); + new_buf = afl_realloc(AFL_BUF_PARAM(in_scratch), target->len); + if (unlikely(!new_buf)) { PFATAL("alloc"); } ck_read(fd, new_buf, target->len, target->fname); @@ -4545,9 +4559,10 @@ pacemaker_fuzzing: len = target->len; memcpy(new_buf, in_buf, split_at); - swap_bufs(BUF_PARAMS(in), BUF_PARAMS(in_scratch)); + afl_swap_bufs(AFL_BUF_PARAM(in), AFL_BUF_PARAM(in_scratch)); in_buf = new_buf; - out_buf = ck_maybe_grow(BUF_PARAMS(out), len); + out_buf = afl_realloc(AFL_BUF_PARAM(out), len); + if (unlikely(!out_buf)) { PFATAL("alloc"); } memcpy(out_buf, in_buf, len); goto havoc_stage_puppet; @@ -4880,5 +4895,3 @@ u8 fuzz_one(afl_state_t *afl) { } -#undef BUF_PARAMS - diff --git a/src/afl-fuzz-python.c b/src/afl-fuzz-python.c index a077469e..e540f548 100644 --- a/src/afl-fuzz-python.c +++ b/src/afl-fuzz-python.c @@ -40,9 +40,7 @@ static void *unsupported(afl_state_t *afl, unsigned int seed) { /* sorry for this makro... it just fills in `&py_mutator->something_buf, &py_mutator->something_size`. */ - #define BUF_PARAMS(name) \ - (void **)&((py_mutator_t *)py_mutator)->name##_buf, \ - &((py_mutator_t *)py_mutator)->name##_size + #define BUF_PARAMS(name) (void **)&((py_mutator_t *)py_mutator)->name##_buf static size_t fuzz_py(void *py_mutator, u8 *buf, size_t buf_size, u8 **out_buf, u8 *add_buf, size_t add_buf_size, size_t max_size) { @@ -97,7 +95,8 @@ static size_t fuzz_py(void *py_mutator, u8 *buf, size_t buf_size, u8 **out_buf, mutated_size = PyByteArray_Size(py_value); - *out_buf = ck_maybe_grow(BUF_PARAMS(fuzz), mutated_size); + *out_buf = afl_realloc(BUF_PARAMS(fuzz), mutated_size); + if (unlikely(!out_buf)) { PFATAL("alloc"); } memcpy(*out_buf, PyByteArray_AsString(py_value), mutated_size); Py_DECREF(py_value); @@ -317,7 +316,6 @@ struct custom_mutator *load_custom_mutator_py(afl_state_t *afl, mutator = ck_alloc(sizeof(struct custom_mutator)); mutator->post_process_buf = NULL; - mutator->post_process_size = 0; mutator->name = module_name; ACTF("Loading Python mutator library from '%s'...", module_name); @@ -419,7 +417,11 @@ size_t post_process_py(void *py_mutator, u8 *buf, size_t buf_size, py_out_buf_size = PyByteArray_Size(py_value); - ck_maybe_grow(BUF_PARAMS(post_process), py_out_buf_size); + if (unlikely(!afl_realloc(BUF_PARAMS(post_process), py_out_buf_size))) { + + PFATAL("alloc"); + + } memcpy(py->post_process_buf, PyByteArray_AsString(py_value), py_out_buf_size); @@ -527,7 +529,8 @@ size_t trim_py(void *py_mutator, u8 **out_buf) { if (py_value != NULL) { ret = PyByteArray_Size(py_value); - *out_buf = ck_maybe_grow(BUF_PARAMS(trim), ret); + *out_buf = afl_realloc(BUF_PARAMS(trim), ret); + if (unlikely(!out_buf)) { PFATAL("alloc"); } memcpy(*out_buf, PyByteArray_AsString(py_value), ret); Py_DECREF(py_value); @@ -592,7 +595,8 @@ size_t havoc_mutation_py(void *py_mutator, u8 *buf, size_t buf_size, } else { /* A new buf is needed... */ - *out_buf = ck_maybe_grow(BUF_PARAMS(havoc), mutated_size); + *out_buf = afl_realloc(BUF_PARAMS(havoc), mutated_size); + if (unlikely(!out_buf)) { PFATAL("alloc"); } } diff --git a/src/afl-fuzz-queue.c b/src/afl-fuzz-queue.c index f35df914..0c472845 100644 --- a/src/afl-fuzz-queue.c +++ b/src/afl-fuzz-queue.c @@ -26,8 +26,6 @@ #include #include -#define BUF_PARAMS(name) (void **)&afl->name##_buf, &afl->name##_size - /* Mark deterministic checks as done for a particular queue entry. We use the .state file to avoid repeating deterministic fuzzing when resuming aborted scans. */ @@ -248,8 +246,9 @@ void add_to_queue(afl_state_t *afl, u8 *fname, u32 len, u8 passed_det) { } - struct queue_entry **queue_buf = ck_maybe_grow( - BUF_PARAMS(queue), afl->queued_paths * sizeof(struct queue_entry *)); + struct queue_entry **queue_buf = afl_realloc( + AFL_BUF_PARAM(queue), afl->queued_paths * sizeof(struct queue_entry *)); + if (unlikely(!queue_buf)) { PFATAL("alloc"); } queue_buf[afl->queued_paths - 1] = q; afl->last_path_time = get_cur_time(); diff --git a/src/afl-fuzz-redqueen.c b/src/afl-fuzz-redqueen.c index f21dd0b0..1ae6ab54 100644 --- a/src/afl-fuzz-redqueen.c +++ b/src/afl-fuzz-redqueen.c @@ -313,8 +313,6 @@ static unsigned long long strntoull(const char *str, size_t sz, char **end, } -#define BUF_PARAMS(name) (void **)&afl->name##_buf, &afl->name##_size - static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header *h, u64 pattern, u64 repl, u64 o_pattern, u32 idx, u8 *orig_buf, u8 *buf, u32 len, u8 do_reverse, @@ -358,7 +356,8 @@ static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header *h, size_t old_len = endptr - buf_8; size_t num_len = snprintf(NULL, 0, "%lld", num); - u8 *new_buf = ck_maybe_grow(BUF_PARAMS(out_scratch), len + num_len); + u8 *new_buf = afl_realloc((void **)&afl->out_scratch_buf, len + num_len); + if (unlikely(!new_buf)) { PFATAL("alloc"); } memcpy(new_buf, buf, idx); snprintf(new_buf + idx, num_len, "%lld", num); @@ -371,7 +370,8 @@ static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header *h, size_t old_len = endptr - buf_8; size_t num_len = snprintf(NULL, 0, "%llu", unum); - u8 *new_buf = ck_maybe_grow(BUF_PARAMS(out_scratch), len + num_len); + u8 *new_buf = afl_realloc((void **)&afl->out_scratch_buf, len + num_len); + if (unlikely(!new_buf)) { PFATAL("alloc"); } memcpy(new_buf, buf, idx); snprintf(new_buf + idx, num_len, "%llu", unum); diff --git a/src/afl-fuzz-run.c b/src/afl-fuzz-run.c index d3f823c9..d71ec339 100644 --- a/src/afl-fuzz-run.c +++ b/src/afl-fuzz-run.c @@ -135,8 +135,6 @@ write_to_testcase(afl_state_t *afl, void *mem, u32 len) { } -#define BUF_PARAMS(name) (void **)&afl->name##_buf, &afl->name##_size - /* The same, but with an adjustable gap. Used for trimming. */ static void write_with_gap(afl_state_t *afl, u8 *mem, u32 len, u32 skip_at, @@ -149,7 +147,8 @@ static void write_with_gap(afl_state_t *afl, u8 *mem, u32 len, u32 skip_at, This memory is used to carry out the post_processing(if present) after copying the testcase by removing the gaps. This can break though */ - u8 *mem_trimmed = ck_maybe_grow(BUF_PARAMS(out_scratch), len - skip_len + 1); + u8 *mem_trimmed = afl_realloc(AFL_BUF_PARAM(out_scratch), len - skip_len + 1); + if (unlikely(!mem_trimmed)) { PFATAL("alloc"); } ssize_t new_size = len - skip_len; void * new_mem = mem; @@ -288,8 +287,6 @@ static void write_with_gap(afl_state_t *afl, u8 *mem, u32 len, u32 skip_at, } -#undef BUF_PARAMS - /* Calibrate a new test case. This is done when processing the input directory to warn about flaky or otherwise problematic test cases early on; and when new paths are discovered to detect variable behavior and so on. */ diff --git a/src/afl-fuzz-state.c b/src/afl-fuzz-state.c index d4de91a4..e68e7786 100644 --- a/src/afl-fuzz-state.c +++ b/src/afl-fuzz-state.c @@ -421,13 +421,13 @@ void afl_state_deinit(afl_state_t *afl) { if (afl->pass_stats) { ck_free(afl->pass_stats); } if (afl->orig_cmp_map) { ck_free(afl->orig_cmp_map); } - if (afl->queue_buf) { free(afl->queue_buf); } - if (afl->out_buf) { free(afl->out_buf); } - if (afl->out_scratch_buf) { free(afl->out_scratch_buf); } - if (afl->eff_buf) { free(afl->eff_buf); } - if (afl->in_buf) { free(afl->in_buf); } - if (afl->in_scratch_buf) { free(afl->in_scratch_buf); } - if (afl->ex_buf) { free(afl->ex_buf); } + afl_free(afl->queue_buf); + afl_free(afl->out_buf); + afl_free(afl->out_scratch_buf); + afl_free(afl->eff_buf); + afl_free(afl->in_buf); + afl_free(afl->in_scratch_buf); + afl_free(afl->ex_buf); ck_free(afl->virgin_bits); ck_free(afl->virgin_tmout); diff --git a/test/unittests/unit_maybe_alloc.c b/test/unittests/unit_maybe_alloc.c index 889ced8a..e452e2f2 100644 --- a/test/unittests/unit_maybe_alloc.c +++ b/test/unittests/unit_maybe_alloc.c @@ -42,7 +42,24 @@ int __wrap_printf(const char *format, ...) { return 1; } -#define BUF_PARAMS (void **)&buf, &size +#define VOID_BUF (void **)&buf + +static void *create_fake_maybe_grow_of(size_t size) { + + size += AFL_ALLOC_SIZE_OFFSET; + + // fake a realloc buf + + struct afl_alloc_buf *buf = malloc(size); + if (!buf) { + perror("Could not allocate fake buf"); + return NULL; + } + buf->complete_size = size; // The size + void *actual_buf = (void *)(buf->buf); + return actual_buf; + +} /* static int setup(void **state) { @@ -52,29 +69,55 @@ static int setup(void **state) { } */ +static void test_pow2(void **state) { + (void)state; + + assert_int_equal(next_pow2(64), 64); + assert_int_equal(next_pow2(63), 64); + assert_int_not_equal(next_pow2(65), 65); + assert_int_equal(next_pow2(0x100), 0x100); + assert_int_equal(next_pow2(0x180), 0x200); + assert_int_equal(next_pow2(108), 0x80); + assert_int_equal(next_pow2(0), 0); + assert_int_equal(next_pow2(1), 1); + assert_int_equal(next_pow2(2), 2); + assert_int_equal(next_pow2(3), 4); + assert_int_equal(next_pow2(0xFFFFFF), 0x1000000); + assert_int_equal(next_pow2(0xFFFFFFF), 0x10000000); + assert_int_equal(next_pow2(0xFFFFFF0), 0x10000000); + assert_int_equal(next_pow2(SIZE_MAX), 0); + assert_int_equal(next_pow2(-1), 0); + assert_int_equal(next_pow2(-2), 0); + +} + static void test_null_allocs(void **state) { (void)state; void *buf = NULL; - size_t size = 0; - void *ptr = ck_maybe_grow(BUF_PARAMS, 100); + void *ptr = afl_realloc(VOID_BUF, 100); + if (unlikely(!buf)) { PFATAL("alloc"); } + size_t size = afl_alloc_bufsize(buf); assert_true(buf == ptr); assert_true(size >= 100); - ck_free(ptr); + afl_free(ptr); } static void test_nonpow2_size(void **state) { (void)state; - char *buf = ck_alloc(150); - size_t size = 150; + char *buf = create_fake_maybe_grow_of(150); + buf[140] = '5'; - char *ptr = ck_maybe_grow(BUF_PARAMS, 160); + + char *ptr = afl_realloc(VOID_BUF, 160); + if (unlikely(!ptr)) { PFATAL("alloc"); } + size_t size = afl_alloc_bufsize(buf); assert_ptr_equal(buf, ptr); assert_true(size >= 160); assert_true(buf[140] == '5'); - ck_free(ptr); + afl_free(ptr); } @@ -83,32 +126,37 @@ static void test_zero_size(void **state) { char *buf = NULL; size_t size = 0; - assert_non_null(maybe_grow(BUF_PARAMS, 0)); - free(buf); + char *new_buf = afl_realloc(VOID_BUF, 0); + assert_non_null(new_buf); + assert_ptr_equal(buf, new_buf); + afl_free(buf); buf = NULL; size = 0; - char *ptr = ck_maybe_grow(BUF_PARAMS, 100); + char *ptr = afl_realloc(VOID_BUF, 100); + if (unlikely(!ptr)) { PFATAL("alloc"); } + size = afl_alloc_bufsize(buf); assert_non_null(ptr); assert_ptr_equal(buf, ptr); assert_true(size >= 100); - expect_assert_failure(ck_maybe_grow(BUF_PARAMS, 0)); - - ck_free(ptr); + afl_free(ptr); } + static void test_unchanged_size(void **state) { (void)state; - void *buf = ck_alloc(100); - size_t size = 100; - void *buf_before = buf; - void *buf_after = ck_maybe_grow(BUF_PARAMS, 100); - assert_ptr_equal(buf, buf_after); + // fake a realloc buf + void *actual_buf = create_fake_maybe_grow_of(100); + + void *buf_before = actual_buf; + void *buf_after = afl_realloc(&actual_buf, 100); + if (unlikely(!buf_after)) { PFATAL("alloc"); } + assert_ptr_equal(actual_buf, buf_after); assert_ptr_equal(buf_after, buf_before); - ck_free(buf); + afl_free(buf_after); } @@ -118,29 +166,35 @@ static void test_grow_multiple(void **state) { char *buf = NULL; size_t size = 0; - char *ptr = ck_maybe_grow(BUF_PARAMS, 100); + char *ptr = afl_realloc(VOID_BUF, 100); + if (unlikely(!ptr)) { PFATAL("alloc"); } + size = afl_alloc_bufsize(ptr); assert_ptr_equal(ptr, buf); assert_true(size >= 100); - assert_int_equal(size, next_pow2(size)); + assert_int_equal(size, next_pow2(size) - AFL_ALLOC_SIZE_OFFSET); buf[50] = '5'; - ptr = (char *)ck_maybe_grow(BUF_PARAMS, 1000); + ptr = (char *)afl_realloc(VOID_BUF, 1000); + if (unlikely(!ptr)) { PFATAL("alloc"); } + size = afl_alloc_bufsize(ptr); assert_ptr_equal(ptr, buf); assert_true(size >= 100); - assert_int_equal(size, next_pow2(size)); + assert_int_equal(size, next_pow2(size) - AFL_ALLOC_SIZE_OFFSET); buf[500] = '5'; - ptr = (char *)ck_maybe_grow(BUF_PARAMS, 10000); + ptr = (char *)afl_realloc(VOID_BUF, 10000); + if (unlikely(!ptr)) { PFATAL("alloc"); } + size = afl_alloc_bufsize(ptr); assert_ptr_equal(ptr, buf); assert_true(size >= 10000); - assert_int_equal(size, next_pow2(size)); + assert_int_equal(size, next_pow2(size) - AFL_ALLOC_SIZE_OFFSET); buf[5000] = '5'; assert_int_equal(buf[50], '5'); assert_int_equal(buf[500], '5'); assert_int_equal(buf[5000], '5'); - ck_free(buf); + afl_free(buf); } @@ -157,6 +211,7 @@ int main(int argc, char **argv) { (void)argv; const struct CMUnitTest tests[] = { + cmocka_unit_test(test_pow2), cmocka_unit_test(test_null_allocs), cmocka_unit_test(test_nonpow2_size), cmocka_unit_test(test_zero_size), -- cgit 1.4.1 From 1301552101af899557a93a7535d8a57874fe6edf Mon Sep 17 00:00:00 2001 From: Dominik Maier Date: Sun, 23 Aug 2020 01:48:36 +0200 Subject: added AFL_MAX_DET_EXTRAS env var --- include/afl-fuzz.h | 5 +++-- include/envs.h | 1 + src/afl-forkserver.c | 2 +- src/afl-fuzz-extras.c | 8 ++++---- src/afl-fuzz-one.c | 16 ++++++++-------- src/afl-fuzz-state.c | 7 +++++++ src/afl-fuzz.c | 19 +++++++++++++++++-- 7 files changed, 41 insertions(+), 17 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h index 1deeddd3..148e6e84 100644 --- a/include/afl-fuzz.h +++ b/include/afl-fuzz.h @@ -353,7 +353,7 @@ typedef struct afl_env_vars { afl_cal_fast, afl_cycle_schedules, afl_expand_havoc; u8 *afl_tmpdir, *afl_custom_mutator_library, *afl_python_module, *afl_path, - *afl_hang_tmout, *afl_skip_crashes, *afl_preload; + *afl_hang_tmout, *afl_skip_crashes, *afl_preload, *afl_max_det_extras; } afl_env_vars_t; @@ -506,7 +506,8 @@ typedef struct afl_state { useless_at_start, /* Number of useless starting paths */ var_byte_count, /* Bitmap bytes with var behavior */ current_entry, /* Current queue entry ID */ - havoc_div; /* Cycle count divisor for havoc */ + havoc_div, /* Cycle count divisor for havoc */ + max_det_extras; /* deterministic extra count (dicts)*/ u64 total_crashes, /* Total number of crashes */ unique_crashes, /* Crashes with unique signatures */ diff --git a/include/envs.h b/include/envs.h index 96ae91ba..4d50d0ff 100644 --- a/include/envs.h +++ b/include/envs.h @@ -102,6 +102,7 @@ static char *afl_environment_variables[] = { "AFL_NO_X86", // not really an env but we dont want to warn on it "AFL_MAP_SIZE", "AFL_MAPSIZE", + "AFL_MAX_DET_EXTRAS", "AFL_PATH", "AFL_PERFORMANCE_FILE", "AFL_PRELOAD", diff --git a/src/afl-forkserver.c b/src/afl-forkserver.c index 52a14602..9d9e81cd 100644 --- a/src/afl-forkserver.c +++ b/src/afl-forkserver.c @@ -115,7 +115,7 @@ void afl_fsrv_init_dup(afl_forkserver_t *fsrv_to, afl_forkserver_t *from) { fsrv_to->out_file = NULL; fsrv_to->init_child_func = fsrv_exec_child; - //Note: do not copy ->add_extra_func + // Note: do not copy ->add_extra_func list_append(&fsrv_list, fsrv_to); diff --git a/src/afl-fuzz-extras.c b/src/afl-fuzz-extras.c index 1452c55e..03c5152a 100644 --- a/src/afl-fuzz-extras.c +++ b/src/afl-fuzz-extras.c @@ -248,10 +248,10 @@ static void extras_check_and_sort(afl_state_t *afl, u32 min_len, u32 max_len, } - if (afl->extras_cnt > MAX_DET_EXTRAS) { + if (afl->extras_cnt > afl->max_det_extras) { WARNF("More than %d tokens - will use them probabilistically.", - MAX_DET_EXTRAS); + afl->max_det_extras); } @@ -403,10 +403,10 @@ void add_extra(afl_state_t *afl, u8 *mem, u32 len) { /* We only want to print this once */ - if (afl->extras_cnt == MAX_DET_EXTRAS + 1) { + if (afl->extras_cnt == afl->max_det_extras + 1) { WARNF("More than %d tokens - will use them probabilistically.", - MAX_DET_EXTRAS); + afl->max_det_extras); } diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index 3bf0c195..c0c036db 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -1509,13 +1509,13 @@ skip_interest: for (j = 0; j < afl->extras_cnt; ++j) { - /* Skip extras probabilistically if afl->extras_cnt > MAX_DET_EXTRAS. Also - skip them if there's no room to insert the payload, if the token + /* Skip extras probabilistically if afl->extras_cnt > AFL_MAX_DET_EXTRAS. + Also skip them if there's no room to insert the payload, if the token is redundant, or if its entire span has no bytes set in the effector map. */ - if ((afl->extras_cnt > MAX_DET_EXTRAS && - rand_below(afl, afl->extras_cnt) >= MAX_DET_EXTRAS) || + if ((afl->extras_cnt > afl->max_det_extras && + rand_below(afl, afl->extras_cnt) >= afl->max_det_extras) || afl->extras[j].len > len - i || !memcmp(afl->extras[j].data, out_buf + i, afl->extras[j].len) || !memchr(eff_map + EFF_APOS(i), 1, @@ -3722,13 +3722,13 @@ skip_interest: for (j = 0; j < afl->extras_cnt; ++j) { - /* Skip extras probabilistically if afl->extras_cnt > MAX_DET_EXTRAS. Also - skip them if there's no room to insert the payload, if the token + /* Skip extras probabilistically if afl->extras_cnt > AFL_MAX_DET_EXTRAS. + Also skip them if there's no room to insert the payload, if the token is redundant, or if its entire span has no bytes set in the effector map. */ - if ((afl->extras_cnt > MAX_DET_EXTRAS && - rand_below(afl, afl->extras_cnt) >= MAX_DET_EXTRAS) || + if ((afl->extras_cnt > afl->max_det_extras && + rand_below(afl, afl->extras_cnt) >= afl->max_det_extras) || afl->extras[j].len > len - i || !memcmp(afl->extras[j].data, out_buf + i, afl->extras[j].len) || !memchr(eff_map + EFF_APOS(i), 1, diff --git a/src/afl-fuzz-state.c b/src/afl-fuzz-state.c index dd0e316c..74798584 100644 --- a/src/afl-fuzz-state.c +++ b/src/afl-fuzz-state.c @@ -349,6 +349,13 @@ void read_afl_environment(afl_state_t *afl, char **envp) { afl->afl_env.afl_preload = (u8 *)get_afl_env(afl_environment_variables[i]); + } else if (!strncmp(env, "AFL_MAX_DET_EXTRAS", + + afl_environment_variable_len)) { + + afl->afl_env.afl_max_det_extras = + (u8 *)get_afl_env(afl_environment_variables[i]); + } } else { diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c index 5dd092f2..664cc076 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -177,6 +177,8 @@ static void usage(u8 *argv0, int more_help) { "AFL_IMPORT_FIRST: sync and import test cases from other fuzzer instances first\n" "AFL_MAP_SIZE: the shared memory size for that target. must be >= the size\n" " the target was compiled for\n" + "AFL_MAX_DET_EXTRAS: if the dict/extras file contains more tokens than this threshold,\n" + " the tokens will sometimes be skipped during fuzzing.\n" "AFL_NO_AFFINITY: do not check for an unused cpu core to use for fuzzing\n" "AFL_NO_ARITH: skip arithmetic mutations in deterministic stage\n" "AFL_NO_CPU_RED: avoid red color for showing very high cpu usage\n" @@ -949,8 +951,21 @@ int main(int argc, char **argv_orig, char **envp) { if (afl->afl_env.afl_hang_tmout) { - afl->hang_tmout = atoi(afl->afl_env.afl_hang_tmout); - if (!afl->hang_tmout) { FATAL("Invalid value of AFL_HANG_TMOUT"); } + s32 hang_tmout = atoi(afl->afl_env.afl_hang_tmout); + if (hang_tmout < 1) { FATAL("Invalid value for AFL_HANG_TMOUT"); } + afl->hang_tmout = (u32)hang_tmout; + + } + + if (afl->afl_env.afl_max_det_extras) { + + s32 max_det_extras = atoi(afl->afl_env.afl_max_det_extras); + if (max_det_extras < 1) { FATAL("Invalid value for AFL_MAX_DET_EXTRAS"); } + afl->max_det_extras = (u32)max_det_extras; + + } else { + + afl->max_det_extras = MAX_DET_EXTRAS; } -- cgit 1.4.1 From c7f0d3066875bca0ec28e9429df40293339dc05c Mon Sep 17 00:00:00 2001 From: van Hauser Date: Mon, 24 Aug 2020 17:32:41 +0200 Subject: added afl_custom_fuzz_count --- GNUmakefile | 12 ++--- docs/Changelog.md | 3 ++ docs/custom_mutators.md | 9 ++++ include/afl-fuzz.h | 20 ++++++++ src/afl-forkserver.c | 2 +- src/afl-fuzz-extras.c | 4 +- src/afl-fuzz-mutators.c | 5 ++ src/afl-fuzz-one.c | 129 +++++++++++++++++++++++++++--------------------- src/afl-fuzz-python.c | 44 +++++++++++++++++ 9 files changed, 162 insertions(+), 66 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/GNUmakefile b/GNUmakefile index 3c5e10ed..cae172dd 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -37,18 +37,18 @@ MANPAGES=$(foreach p, $(PROGS) $(SH_PROGS), $(p).8) afl-as.8 ASAN_OPTIONS=detect_leaks=0 ifeq "$(findstring android, $(shell $(CC) --version 2>/dev/null))" "" -ifeq "$(shell echo 'int main() {return 0; }' | $(CC) $(CFLAGS) -Werror -x c - -flto=full -o .test 2>/dev/null && echo 1 || echo 0 ; rm -f .test )" "1" + ifeq "$(shell echo 'int main() {return 0; }' | $(CC) $(CFLAGS) -Werror -x c - -flto=full -o .test 2>/dev/null && echo 1 || echo 0 ; rm -f .test )" "1" CFLAGS_FLTO ?= -flto=full -else - ifeq "$(shell echo 'int main() {return 0; }' | $(CC) $(CFLAGS) -Werror -x c - -flto=thin -o .test 2>/dev/null && echo 1 || echo 0 ; rm -f .test )" "1" - CFLAGS_FLTO ?= -flto=thin else - ifeq "$(shell echo 'int main() {return 0; }' | $(CC) $(CFLAGS) -Werror -x c - -flto -o .test 2>/dev/null && echo 1 || echo 0 ; rm -f .test )" "1" + ifeq "$(shell echo 'int main() {return 0; }' | $(CC) $(CFLAGS) -Werror -x c - -flto=thin -o .test 2>/dev/null && echo 1 || echo 0 ; rm -f .test )" "1" + CFLAGS_FLTO ?= -flto=thin + else + ifeq "$(shell echo 'int main() {return 0; }' | $(CC) $(CFLAGS) -Werror -x c - -flto -o .test 2>/dev/null && echo 1 || echo 0 ; rm -f .test )" "1" CFLAGS_FLTO ?= -flto + endif endif endif endif -endif ifeq "$(shell echo 'int main() {return 0; }' | $(CC) -fno-move-loop-invariants -fdisable-tree-cunrolli -x c - -o .test 2>/dev/null && echo 1 || echo 0 ; rm -f .test )" "1" SPECIAL_PERFORMANCE += -fno-move-loop-invariants -fdisable-tree-cunrolli diff --git a/docs/Changelog.md b/docs/Changelog.md index f7bc9600..45fbd528 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -18,6 +18,9 @@ sending a mail to . dict entries without recompiling. - AFL_FORKSRV_INIT_TMOUT env variable added to control the time to wait for the forkserver to come up without the need to increase the overall timeout. + - custom mutators: + - added afl_custom_fuzz_count/fuzz_count function to allow specifying the + number of fuzz attempts for custom_fuzz - llvm_mode: - Ported SanCov to LTO, and made it the default for LTO. better instrumentation locations diff --git a/docs/custom_mutators.md b/docs/custom_mutators.md index a22c809b..75dbea21 100644 --- a/docs/custom_mutators.md +++ b/docs/custom_mutators.md @@ -32,6 +32,7 @@ performed with the custom mutator. C/C++: ```c void *afl_custom_init(afl_t *afl, unsigned int seed); +uint32_t afl_custom_fuzz_count(void *data, const u8 *buf, size_t buf_size); size_t afl_custom_fuzz(void *data, uint8_t *buf, size_t buf_size, u8 **out_buf, uint8_t *add_buf, size_t add_buf_size, size_t max_size); size_t afl_custom_post_process(void *data, uint8_t *buf, size_t buf_size, uint8_t **out_buf); int32_t afl_custom_init_trim(void *data, uint8_t *buf, size_t buf_size); @@ -49,6 +50,9 @@ Python: def init(seed): pass +def fuzz_count(buf, add_buf, max_size): + return cnt + def fuzz(buf, add_buf, max_size): return mutated_out @@ -88,6 +92,11 @@ def queue_new_entry(filename_new_queue, filename_orig_queue): This method determines whether the custom fuzzer should fuzz the current queue entry or not +- `fuzz_count` (optional): + + This method can be used to instruct afl-fuzz how often to perform a fuzz + attempt on this input data. + - `fuzz` (optional): This method performs custom mutations on a given input. It also accepts an diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h index 1f1dda3a..01aa1a73 100644 --- a/include/afl-fuzz.h +++ b/include/afl-fuzz.h @@ -288,6 +288,7 @@ enum { enum { /* 00 */ PY_FUNC_INIT, + /* 01 */ PY_FUNC_FUZZ_COUNT, /* 01 */ PY_FUNC_FUZZ, /* 02 */ PY_FUNC_POST_PROCESS, /* 03 */ PY_FUNC_INIT_TRIM, @@ -679,6 +680,24 @@ struct custom_mutator { */ void *(*afl_custom_init)(afl_state_t *afl, unsigned int seed); + /** + * This method is called just before fuzzing a queue entry with the custom + * mutator, and receives the initial buffer. It should return the number of + * fuzzes to perform. + * + * A value of 0 means no fuzzing of this queue entry. + * + * The function is now allowed to change the data. + * + * (Optional) + * + * @param data pointer returned in afl_custom_init for this fuzz case + * @param buf Buffer containing the test case + * @param buf_size Size of the test case + * @return The amount of fuzzes to perform on this queue entry, 0 = skip + */ + u32 (*afl_custom_fuzz_count)(void *data, const u8 *buf, size_t buf_size); + /** * Perform custom mutations on a given input * @@ -867,6 +886,7 @@ u8 trim_case_custom(afl_state_t *, struct queue_entry *q, u8 *in_buf, struct custom_mutator *load_custom_mutator_py(afl_state_t *, char *); void finalize_py_module(void *); +u32 fuzz_count_py(void *, const u8 *, size_t); size_t post_process_py(void *, u8 *, size_t, u8 **); s32 init_trim_py(void *, u8 *, size_t); s32 post_trim_py(void *, u8); diff --git a/src/afl-forkserver.c b/src/afl-forkserver.c index c496975f..72f3dc3b 100644 --- a/src/afl-forkserver.c +++ b/src/afl-forkserver.c @@ -634,7 +634,7 @@ void afl_fsrv_start(afl_forkserver_t *fsrv, char **argv, if (fsrv->add_extra_func == NULL || fsrv->afl_ptr == NULL) { - // this is not afl-fuzz - we deny and return + // this is not afl-fuzz - or it is cmplog - we deny and return if (fsrv->use_shmem_fuzz) { status = (FS_OPT_ENABLED | FS_OPT_SHDMEM_FUZZ); diff --git a/src/afl-fuzz-extras.c b/src/afl-fuzz-extras.c index 8cc2425f..d6c368d1 100644 --- a/src/afl-fuzz-extras.c +++ b/src/afl-fuzz-extras.c @@ -251,7 +251,7 @@ static void extras_check_and_sort(afl_state_t *afl, u32 min_len, u32 max_len, if (afl->extras_cnt > afl->max_det_extras) { OKF("More than %d tokens - will use them probabilistically.", - afl->max_det_extras); + afl->max_det_extras); } @@ -406,7 +406,7 @@ void add_extra(afl_state_t *afl, u8 *mem, u32 len) { if (afl->extras_cnt == afl->max_det_extras + 1) { OKF("More than %d tokens - will use them probabilistically.", - afl->max_det_extras); + afl->max_det_extras); } diff --git a/src/afl-fuzz-mutators.c b/src/afl-fuzz-mutators.c index 22578df9..d24b7db9 100644 --- a/src/afl-fuzz-mutators.c +++ b/src/afl-fuzz-mutators.c @@ -166,6 +166,11 @@ struct custom_mutator *load_custom_mutator(afl_state_t *afl, const char *fn) { } + /* "afl_custom_fuzz_count", optional */ + mutator->afl_custom_fuzz_count = dlsym(dh, "afl_custom_fuzz_count"); + if (!mutator->afl_custom_fuzz_count) + ACTF("optional symbol 'afl_custom_fuzz_count' not found."); + /* "afl_custom_deinit", optional for backward compatibility */ mutator->afl_custom_deinit = dlsym(dh, "afl_custom_deinit"); if (!mutator->afl_custom_deinit) diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index c0c036db..03c0d3a1 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -1672,7 +1672,7 @@ custom_mutator_stage: if (afl->stage_max < HAVOC_MIN) { afl->stage_max = HAVOC_MIN; } - const u32 max_seed_size = MAX_FILE; + const u32 max_seed_size = MAX_FILE, saved_max = afl->stage_max; orig_hit_cnt = afl->queued_paths + afl->unique_crashes; @@ -1680,104 +1680,119 @@ custom_mutator_stage: if (el->afl_custom_fuzz) { + if (el->afl_custom_fuzz_count) + afl->stage_max = el->afl_custom_fuzz_count(el->data, out_buf, len); + else + afl->stage_max = saved_max; + has_custom_fuzz = true; afl->stage_short = el->name_short; - for (afl->stage_cur = 0; afl->stage_cur < afl->stage_max; - ++afl->stage_cur) { + if (afl->stage_max) { - struct queue_entry *target; - u32 tid; - u8 * new_buf; + for (afl->stage_cur = 0; afl->stage_cur < afl->stage_max; + ++afl->stage_cur) { - retry_external_pick: - /* Pick a random other queue entry for passing to external API */ + struct queue_entry *target; + u32 tid; + u8 * new_buf; - do { + retry_external_pick: + /* Pick a random other queue entry for passing to external API */ - tid = rand_below(afl, afl->queued_paths); + do { - } while (tid == afl->current_entry && afl->queued_paths > 1); + tid = rand_below(afl, afl->queued_paths); - target = afl->queue; + } while (tid == afl->current_entry && afl->queued_paths > 1); - while (tid >= 100) { + target = afl->queue; - target = target->next_100; - tid -= 100; + while (tid >= 100) { - } - - while (tid--) { + target = target->next_100; + tid -= 100; - target = target->next; + } - } + while (tid--) { - /* Make sure that the target has a reasonable length. */ + target = target->next; - while (target && (target->len < 2 || target == afl->queue_cur) && - afl->queued_paths > 3) { + } - target = target->next; - ++afl->splicing_with; + /* Make sure that the target has a reasonable length. */ - } + while (target && (target->len < 2 || target == afl->queue_cur) && + afl->queued_paths > 3) { - if (!target) { goto retry_external_pick; } + target = target->next; + ++afl->splicing_with; - /* Read the additional testcase into a new buffer. */ - fd = open(target->fname, O_RDONLY); - if (unlikely(fd < 0)) { PFATAL("Unable to open '%s'", target->fname); } + } - new_buf = afl_realloc(AFL_BUF_PARAM(out_scratch), target->len); - if (unlikely(!new_buf)) { PFATAL("alloc"); } - ck_read(fd, new_buf, target->len, target->fname); - close(fd); + if (!target) { goto retry_external_pick; } - u8 *mutated_buf = NULL; + /* Read the additional testcase into a new buffer. */ + fd = open(target->fname, O_RDONLY); + if (unlikely(fd < 0)) { - size_t mutated_size = - el->afl_custom_fuzz(el->data, out_buf, len, &mutated_buf, new_buf, - target->len, max_seed_size); + PFATAL("Unable to open '%s'", target->fname); - if (unlikely(!mutated_buf)) { + } - FATAL("Error in custom_fuzz. Size returned: %zd", mutated_size); + new_buf = afl_realloc(AFL_BUF_PARAM(out_scratch), target->len); + if (unlikely(!new_buf)) { PFATAL("alloc"); } + ck_read(fd, new_buf, target->len, target->fname); + close(fd); - } + u8 *mutated_buf = NULL; - if (mutated_size > 0) { + size_t mutated_size = + el->afl_custom_fuzz(el->data, out_buf, len, &mutated_buf, new_buf, + target->len, max_seed_size); - if (common_fuzz_stuff(afl, mutated_buf, (u32)mutated_size)) { + if (unlikely(!mutated_buf)) { - goto abandon_entry; + FATAL("Error in custom_fuzz. Size returned: %zd", mutated_size); } - /* If we're finding new stuff, let's run for a bit longer, limits - permitting. */ - - if (afl->queued_paths != havoc_queued) { + if (mutated_size > 0) { - if (perf_score <= afl->havoc_max_mult * 100) { + if (common_fuzz_stuff(afl, mutated_buf, (u32)mutated_size)) { - afl->stage_max *= 2; - perf_score *= 2; + goto abandon_entry; } - havoc_queued = afl->queued_paths; + /* If we're finding new stuff, let's run for a bit longer, limits + permitting. */ + + if (afl->queued_paths != havoc_queued) { + + if (perf_score <= afl->havoc_max_mult * 100) { + + afl->stage_max *= 2; + perf_score *= 2; + + } + + havoc_queued = afl->queued_paths; + + } } - } + /* `(afl->)out_buf` may have been changed by the call to custom_fuzz + */ + /* TODO: Only do this when `mutated_buf` == `out_buf`? Branch vs + * Memcpy. + */ + memcpy(out_buf, in_buf, len); - /* `(afl->)out_buf` may have been changed by the call to custom_fuzz */ - /* TODO: Only do this when `mutated_buf` == `out_buf`? Branch vs Memcpy. - */ - memcpy(out_buf, in_buf, len); + } } diff --git a/src/afl-fuzz-python.c b/src/afl-fuzz-python.c index e540f548..68540dd7 100644 --- a/src/afl-fuzz-python.c +++ b/src/afl-fuzz-python.c @@ -347,6 +347,12 @@ struct custom_mutator *load_custom_mutator_py(afl_state_t *afl, } + if (py_functions[PY_FUNC_FUZZ_COUNT]) { + + mutator->afl_custom_fuzz_count = fuzz_count_py; + + } + if (py_functions[PY_FUNC_POST_TRIM]) { mutator->afl_custom_post_trim = post_trim_py; @@ -477,6 +483,44 @@ s32 init_trim_py(void *py_mutator, u8 *buf, size_t buf_size) { } +u32 fuzz_count_py(void *py_mutator, const u8 *buf, size_t buf_size) { + + PyObject *py_args, *py_value; + + py_args = PyTuple_New(1); + py_value = PyByteArray_FromStringAndSize(buf, buf_size); + if (!py_value) { + + Py_DECREF(py_args); + FATAL("Failed to convert arguments"); + + } + + PyTuple_SetItem(py_args, 0, py_value); + + py_value = PyObject_CallObject( + ((py_mutator_t *)py_mutator)->py_functions[PY_FUNC_FUZZ_COUNT], py_args); + Py_DECREF(py_args); + + if (py_value != NULL) { + + #if PY_MAJOR_VERSION >= 3 + u32 retcnt = (u32)PyLong_AsLong(py_value); + #else + u32 retcnt = PyInt_AsLong(py_value); + #endif + Py_DECREF(py_value); + return retcnt; + + } else { + + PyErr_Print(); + FATAL("Call failed"); + + } + +} + s32 post_trim_py(void *py_mutator, u8 success) { PyObject *py_args, *py_value; -- cgit 1.4.1 From 9a6a32775f03e6fbd6df131742bee4c30bcd94a6 Mon Sep 17 00:00:00 2001 From: h1994st Date: Thu, 27 Aug 2020 00:32:53 -0400 Subject: Prevent afl-fuzz from modifying stage_max during fuzzing --- src/afl-fuzz-one.c | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index 03c0d3a1..bf568c38 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -1767,19 +1767,23 @@ custom_mutator_stage: } - /* If we're finding new stuff, let's run for a bit longer, limits - permitting. */ + if (!el->afl_custom_fuzz_count) { - if (afl->queued_paths != havoc_queued) { + /* If we're finding new stuff, let's run for a bit longer, limits + permitting. */ - if (perf_score <= afl->havoc_max_mult * 100) { + if (afl->queued_paths != havoc_queued) { - afl->stage_max *= 2; - perf_score *= 2; + if (perf_score <= afl->havoc_max_mult * 100) { - } + afl->stage_max *= 2; + perf_score *= 2; + + } - havoc_queued = afl->queued_paths; + havoc_queued = afl->queued_paths; + + } } -- cgit 1.4.1 From 4b3ad5f037ee9a36aa057bf55a69acca1f573922 Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Sat, 5 Sep 2020 12:32:10 +0200 Subject: add cull queue, -i subdir traversal --- GNUmakefile | 230 ++++++++++++++++++++++++-------------------------- GNUmakefile.llvm | 7 +- README.md | 179 +++++++++++++++++++++++---------------- include/afl-fuzz.h | 6 +- src/afl-fuzz-extras.c | 113 ++++++++++++++++++++----- src/afl-fuzz-init.c | 108 +++++++++++++++++++++--- src/afl-fuzz-one.c | 31 +------ src/afl-fuzz-queue.c | 7 -- src/afl-fuzz.c | 33 +++++--- 9 files changed, 437 insertions(+), 277 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/GNUmakefile b/GNUmakefile index e1f6da95..0046a481 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -24,30 +24,31 @@ BIN_PATH = $(PREFIX)/bin HELPER_PATH = $(PREFIX)/lib/afl DOC_PATH = $(PREFIX)/share/doc/afl MISC_PATH = $(PREFIX)/share/afl -MAN_PATH = $(PREFIX)/share/man/man8 +MAN_PATH = $(PREFIX)/man/man8 PROGNAME = afl VERSION = $(shell grep '^$(HASH)define VERSION ' ../config.h | cut -d '"' -f2) # PROGS intentionally omit afl-as, which gets installed elsewhere. -PROGS = afl-gcc afl-g++ afl-fuzz afl-showmap afl-tmin afl-gotcpu afl-analyze +PROGS = afl-fuzz afl-showmap afl-tmin afl-gotcpu afl-analyze SH_PROGS = afl-plot afl-cmin afl-cmin.bash afl-whatsup afl-system-config MANPAGES=$(foreach p, $(PROGS) $(SH_PROGS), $(p).8) afl-as.8 +ASAN_OPTIONS=detect_leaks=0 ifeq "$(findstring android, $(shell $(CC) --version 2>/dev/null))" "" - ifeq "$(shell echo 'int main() {return 0; }' | $(CC) $(CFLAGS) -Werror -x c - -flto=full -o .test 2>/dev/null && echo 1 || echo 0 ; rm -f .test )" "1" +ifeq "$(shell echo 'int main() {return 0; }' | $(CC) $(CFLAGS) -Werror -x c - -flto=full -o .test 2>/dev/null && echo 1 || echo 0 ; rm -f .test )" "1" CFLAGS_FLTO ?= -flto=full - else - ifeq "$(shell echo 'int main() {return 0; }' | $(CC) $(CFLAGS) -Werror -x c - -flto=thin -o .test 2>/dev/null && echo 1 || echo 0 ; rm -f .test )" "1" +else + ifeq "$(shell echo 'int main() {return 0; }' | $(CC) $(CFLAGS) -Werror -x c - -flto=thin -o .test 2>/dev/null && echo 1 || echo 0 ; rm -f .test )" "1" CFLAGS_FLTO ?= -flto=thin - else - ifeq "$(shell echo 'int main() {return 0; }' | $(CC) $(CFLAGS) -Werror -x c - -flto -o .test 2>/dev/null && echo 1 || echo 0 ; rm -f .test )" "1" + else + ifeq "$(shell echo 'int main() {return 0; }' | $(CC) $(CFLAGS) -Werror -x c - -flto -o .test 2>/dev/null && echo 1 || echo 0 ; rm -f .test )" "1" CFLAGS_FLTO ?= -flto - endif endif endif endif +endif ifeq "$(shell echo 'int main() {return 0; }' | $(CC) -fno-move-loop-invariants -fdisable-tree-cunrolli -x c - -o .test 2>/dev/null && echo 1 || echo 0 ; rm -f .test )" "1" SPECIAL_PERFORMANCE += -fno-move-loop-invariants -fdisable-tree-cunrolli @@ -61,10 +62,7 @@ ifneq "$(shell uname)" "Darwin" endif endif # OS X does not like _FORTIFY_SOURCE=2 - # _FORTIFY_SOURCE=2 does not like -O0 - ifndef DEBUG - CFLAGS_OPT += -D_FORTIFY_SOURCE=2 - endif + CFLAGS_OPT += -D_FORTIFY_SOURCE=2 endif ifeq "$(shell uname)" "SunOS" @@ -206,10 +204,7 @@ else endif ifneq "$(filter Linux GNU%,$(shell uname))" "" - # _FORTIFY_SOURCE=2 does not like -O0 - ifndef DEBUG override CFLAGS += -D_FORTIFY_SOURCE=2 - endif LDFLAGS += -ldl -lrt endif @@ -223,11 +218,7 @@ ifneq "$(findstring NetBSD, $(shell uname))" "" LDFLAGS += -lpthread endif -ifeq "$(findstring clang, $(shell $(CC) --version 2>/dev/null))" "" - TEST_CC = afl-gcc -else - TEST_CC = afl-clang -endif +TEST_CC = afl-gcc COMM_HDR = include/alloc-inl.h include/config.h include/debug.h include/types.h @@ -277,28 +268,47 @@ ifdef TEST_MMAP LDFLAGS += -Wno-deprecated-declarations endif -all: test_x86 test_shm test_python ready $(PROGS) afl-as test_build all_done +.PHONY: all +all: test_x86 test_shm test_python ready $(PROGS) afl-as llvm gcc_plugin test_build all_done + +.PHONY: llvm +llvm: + -$(MAKE) -f GNUmakefile.llvm + @test -e afl-cc || { echo "[-] Compiling afl-cc failed. You seem not to have a working compiler." ; exit 1; } -man: afl-gcc all $(MANPAGES) +.PHONY: gcc_plugin +gcc_plugin: + -$(MAKE) -f GNUmakefile.gcc_plugin +.PHONY: man +man: $(MANPAGES) + +.PHONY: test +test: tests + +.PHONY: tests tests: source-only @cd test ; ./test-all.sh @rm -f test/errors +.PHONY: performance-tests performance-tests: performance-test +.PHONY: test-performance test-performance: performance-test +.PHONY: performance-test performance-test: source-only @cd test ; ./test-performance.sh # hint: make targets are also listed in the top level README.md +.PHONY: help help: @echo "HELP --- the following make targets exist:" @echo "==========================================" @echo "all: just the main afl++ binaries" @echo "binary-only: everything for binary-only fuzzing: qemu_mode, unicorn_mode, libdislocator, libtokencap" - @echo "source-only: everything for source code fuzzing: llvm_mode, gcc_plugin, libdislocator, libtokencap" + @echo "source-only: everything for source code fuzzing: gcc_plugin, libdislocator, libtokencap" @echo "distrib: everything (for both binary-only and source code fuzzing)" @echo "man: creates simple man pages from the help option of the programs" @echo "install: installs everything you have compiled with the build option above" @@ -322,8 +332,8 @@ help: @echo "==========================================" @echo e.g.: make ASAN_BUILD=1 +.PHONY: test_x86 ifndef AFL_NO_X86 - test_x86: @echo "[*] Checking for the default compiler cc..." @type $(CC) >/dev/null || ( echo; echo "Oops, looks like there is no compiler '"$(CC)"' in your path."; echo; echo "Don't panic! You can restart with '"$(_)" CC='."; echo; exit 1 ) @@ -332,148 +342,129 @@ test_x86: @echo "[*] Checking for the ability to compile x86 code..." @echo 'main() { __asm__("xorb %al, %al"); }' | $(CC) $(CFLAGS) -w -x c - -o .test1 || ( echo; echo "Oops, looks like your compiler can't generate x86 code."; echo; echo "Don't panic! You can use the LLVM or QEMU mode, but see docs/INSTALL first."; echo "(To ignore this error, set AFL_NO_X86=1 and try again.)"; echo; exit 1 ) @rm -f .test1 - else - test_x86: @echo "[!] Note: skipping x86 compilation checks (AFL_NO_X86 set)." - endif - +.PHONY: test_shm ifeq "$(SHMAT_OK)" "1" - test_shm: @echo "[+] shmat seems to be working." @rm -f .test2 - else - test_shm: @echo "[-] shmat seems not to be working, switching to mmap implementation" - endif - +.PHONY: test_python ifeq "$(PYTHON_OK)" "1" - test_python: @rm -f .test 2> /dev/null @echo "[+] $(PYTHON_VERSION) support seems to be working." - else - test_python: @echo "[-] You seem to need to install the package python3-dev, python2-dev or python-dev (and perhaps python[23]-apt), but it is optional so we continue" - endif - +.PHONY: ready ready: @echo "[+] Everything seems to be working, ready to compile." -afl-g++: afl-gcc - -afl-gcc: src/afl-gcc.c $(COMM_HDR) | test_x86 - $(CC) $(CFLAGS) $(CPPFLAGS) src/$@.c -o $@ $(LDFLAGS) - set -e; for i in afl-g++ afl-clang afl-clang++; do ln -sf afl-gcc $$i; done - afl-as: src/afl-as.c include/afl-as.h $(COMM_HDR) | test_x86 - $(CC) $(CFLAGS) $(CPPFLAGS) src/$@.c -o $@ $(LDFLAGS) - ln -sf afl-as as + $(CC) $(CFLAGS) src/$@.c -o $@ $(LDFLAGS) + @ln -sf afl-as as src/afl-performance.o : $(COMM_HDR) src/afl-performance.c include/hash.h - $(CC) $(CFLAGS) $(CPPFLAGS) -Iinclude $(SPECIAL_PERFORMANCE) -O3 -fno-unroll-loops -c src/afl-performance.c -o src/afl-performance.o + $(CC) -Iinclude $(SPECIAL_PERFORMANCE) -O3 -fno-unroll-loops -c src/afl-performance.c -o src/afl-performance.o src/afl-common.o : $(COMM_HDR) src/afl-common.c include/common.h - $(CC) $(CFLAGS) $(CFLAGS_FLTO) $(CPPFLAGS) -c src/afl-common.c -o src/afl-common.o + $(CC) $(CFLAGS) $(CFLAGS_FLTO) -c src/afl-common.c -o src/afl-common.o src/afl-forkserver.o : $(COMM_HDR) src/afl-forkserver.c include/forkserver.h - $(CC) $(CFLAGS) $(CFLAGS_FLTO) $(CPPFLAGS) -c src/afl-forkserver.c -o src/afl-forkserver.o + $(CC) $(CFLAGS) $(CFLAGS_FLTO) -c src/afl-forkserver.c -o src/afl-forkserver.o src/afl-sharedmem.o : $(COMM_HDR) src/afl-sharedmem.c include/sharedmem.h - $(CC) $(CFLAGS) $(CFLAGS_FLTO) $(CPPFLAGS) -c src/afl-sharedmem.c -o src/afl-sharedmem.o + $(CC) $(CFLAGS) $(CFLAGS_FLTO) -c src/afl-sharedmem.c -o src/afl-sharedmem.o afl-fuzz: $(COMM_HDR) include/afl-fuzz.h $(AFL_FUZZ_FILES) src/afl-common.o src/afl-sharedmem.o src/afl-forkserver.o src/afl-performance.o | test_x86 - $(CC) $(CFLAGS) $(COMPILE_STATIC) $(CFLAGS_FLTO) $(AFL_FUZZ_FILES) $(CPPFLAGS) src/afl-common.o src/afl-sharedmem.o src/afl-forkserver.o src/afl-performance.o -o $@ $(PYFLAGS) $(LDFLAGS) + $(CC) $(CFLAGS) $(COMPILE_STATIC) $(CFLAGS_FLTO) $(AFL_FUZZ_FILES) src/afl-common.o src/afl-sharedmem.o src/afl-forkserver.o src/afl-performance.o -o $@ $(PYFLAGS) $(LDFLAGS) afl-showmap: src/afl-showmap.c src/afl-common.o src/afl-sharedmem.o src/afl-forkserver.o $(COMM_HDR) | test_x86 - $(CC) $(CFLAGS) $(COMPILE_STATIC) $(CFLAGS_FLTO) $(CPPFLAGS) src/$@.c src/afl-common.o src/afl-sharedmem.o src/afl-forkserver.o -o $@ $(LDFLAGS) + $(CC) $(CFLAGS) $(COMPILE_STATIC) $(CFLAGS_FLTO) src/$@.c src/afl-common.o src/afl-sharedmem.o src/afl-forkserver.o -o $@ $(LDFLAGS) afl-tmin: src/afl-tmin.c src/afl-common.o src/afl-sharedmem.o src/afl-forkserver.o src/afl-performance.o $(COMM_HDR) | test_x86 - $(CC) $(CFLAGS) $(COMPILE_STATIC) $(CFLAGS_FLTO) $(CPPFLAGS) src/$@.c src/afl-common.o src/afl-sharedmem.o src/afl-forkserver.o src/afl-performance.o -o $@ $(LDFLAGS) + $(CC) $(CFLAGS) $(COMPILE_STATIC) $(CFLAGS_FLTO) src/$@.c src/afl-common.o src/afl-sharedmem.o src/afl-forkserver.o src/afl-performance.o -o $@ $(LDFLAGS) afl-analyze: src/afl-analyze.c src/afl-common.o src/afl-sharedmem.o src/afl-performance.o $(COMM_HDR) | test_x86 - $(CC) $(CFLAGS) $(COMPILE_STATIC) $(CFLAGS_FLTO) $(CPPFLAGS) src/$@.c src/afl-common.o src/afl-sharedmem.o src/afl-performance.o -o $@ $(LDFLAGS) + $(CC) $(CFLAGS) $(COMPILE_STATIC) $(CFLAGS_FLTO) src/$@.c src/afl-common.o src/afl-sharedmem.o src/afl-performance.o -o $@ $(LDFLAGS) afl-gotcpu: src/afl-gotcpu.c src/afl-common.o $(COMM_HDR) | test_x86 - $(CC) $(CFLAGS) $(COMPILE_STATIC) $(CFLAGS_FLTO) $(CPPFLAGS) src/$@.c src/afl-common.o -o $@ $(LDFLAGS) + $(CC) $(CFLAGS) $(COMPILE_STATIC) $(CFLAGS_FLTO) src/$@.c src/afl-common.o -o $@ $(LDFLAGS) +.PHONY: document +document: afl-fuzz-document # document all mutations and only do one run (use with only one input file!) -document: $(COMM_HDR) include/afl-fuzz.h $(AFL_FUZZ_FILES) src/afl-common.o src/afl-sharedmem.o src/afl-performance.o | test_x86 - $(CC) -D_DEBUG=\"1\" -D_AFL_DOCUMENT_MUTATIONS $(CFLAGS) $(CFLAGS_FLTO) $(AFL_FUZZ_FILES) $(CPPFLAGS) src/afl-common.o src/afl-sharedmem.o src/afl-forkserver.c src/afl-performance.o -o afl-fuzz-document $(PYFLAGS) $(LDFLAGS) +afl-fuzz-document: $(COMM_HDR) include/afl-fuzz.h $(AFL_FUZZ_FILES) src/afl-common.o src/afl-sharedmem.o src/afl-performance.o | test_x86 + $(CC) -D_DEBUG=\"1\" -D_AFL_DOCUMENT_MUTATIONS $(CFLAGS) $(CFLAGS_FLTO) $(AFL_FUZZ_FILES) src/afl-common.o src/afl-sharedmem.o src/afl-forkserver.c src/afl-performance.o -o afl-fuzz-document $(PYFLAGS) $(LDFLAGS) test/unittests/unit_maybe_alloc.o : $(COMM_HDR) include/alloc-inl.h test/unittests/unit_maybe_alloc.c $(AFL_FUZZ_FILES) - @$(CC) $(CFLAGS) $(ASAN_CFLAGS) $(CPPFLAGS) -c test/unittests/unit_maybe_alloc.c -o test/unittests/unit_maybe_alloc.o + @$(CC) $(CFLAGS) $(ASAN_CFLAGS) -c test/unittests/unit_maybe_alloc.c -o test/unittests/unit_maybe_alloc.o unit_maybe_alloc: test/unittests/unit_maybe_alloc.o - @$(CC) $(CFLAGS) $(CPPFLAGS) -Wl,--wrap=exit -Wl,--wrap=printf test/unittests/unit_maybe_alloc.o -o test/unittests/unit_maybe_alloc $(LDFLAGS) $(ASAN_LDFLAGS) -lcmocka + @$(CC) $(CFLAGS) -Wl,--wrap=exit -Wl,--wrap=printf test/unittests/unit_maybe_alloc.o -o test/unittests/unit_maybe_alloc $(LDFLAGS) $(ASAN_LDFLAGS) -lcmocka ./test/unittests/unit_maybe_alloc test/unittests/unit_hash.o : $(COMM_HDR) include/alloc-inl.h test/unittests/unit_hash.c $(AFL_FUZZ_FILES) src/afl-performance.o - @$(CC) $(CFLAGS) $(ASAN_CFLAGS) $(CPPFLAGS) -c test/unittests/unit_hash.c -o test/unittests/unit_hash.o + @$(CC) $(CFLAGS) $(ASAN_CFLAGS) -c test/unittests/unit_hash.c -o test/unittests/unit_hash.o unit_hash: test/unittests/unit_hash.o src/afl-performance.o - @$(CC) $(CFLAGS) $(CPPFLAGS) -Wl,--wrap=exit -Wl,--wrap=printf $^ -o test/unittests/unit_hash $(LDFLAGS) $(ASAN_LDFLAGS) -lcmocka + @$(CC) $(CFLAGS) -Wl,--wrap=exit -Wl,--wrap=printf $^ -o test/unittests/unit_hash $(LDFLAGS) $(ASAN_LDFLAGS) -lcmocka ./test/unittests/unit_hash test/unittests/unit_rand.o : $(COMM_HDR) include/alloc-inl.h test/unittests/unit_rand.c $(AFL_FUZZ_FILES) src/afl-performance.o - @$(CC) $(CFLAGS) $(ASAN_CFLAGS) $(CPPFLAGS) -c test/unittests/unit_rand.c -o test/unittests/unit_rand.o + @$(CC) $(CFLAGS) $(ASAN_CFLAGS) -c test/unittests/unit_rand.c -o test/unittests/unit_rand.o unit_rand: test/unittests/unit_rand.o src/afl-common.o src/afl-performance.o - @$(CC) $(CFLAGS) $(ASAN_CFLAGS) $(CPPFLAGS) -Wl,--wrap=exit -Wl,--wrap=printf $^ -o test/unittests/unit_rand $(LDFLAGS) $(ASAN_LDFLAGS) -lcmocka + @$(CC) $(CFLAGS) $(ASAN_CFLAGS) -Wl,--wrap=exit -Wl,--wrap=printf $^ -o test/unittests/unit_rand $(LDFLAGS) $(ASAN_LDFLAGS) -lcmocka ./test/unittests/unit_rand test/unittests/unit_list.o : $(COMM_HDR) include/list.h test/unittests/unit_list.c $(AFL_FUZZ_FILES) - @$(CC) $(CFLAGS) $(ASAN_CFLAGS) $(CPPFLAGS) -c test/unittests/unit_list.c -o test/unittests/unit_list.o + @$(CC) $(CFLAGS) $(ASAN_CFLAGS) -c test/unittests/unit_list.c -o test/unittests/unit_list.o unit_list: test/unittests/unit_list.o - @$(CC) $(CFLAGS) $(ASAN_CFLAGS) $(CPPFLAGS) -Wl,--wrap=exit -Wl,--wrap=printf test/unittests/unit_list.o -o test/unittests/unit_list $(LDFLAGS) $(ASAN_LDFLAGS) -lcmocka + @$(CC) $(CFLAGS) $(ASAN_CFLAGS) -Wl,--wrap=exit -Wl,--wrap=printf test/unittests/unit_list.o -o test/unittests/unit_list $(LDFLAGS) $(ASAN_LDFLAGS) -lcmocka ./test/unittests/unit_list test/unittests/unit_preallocable.o : $(COMM_HDR) include/alloc-inl.h test/unittests/unit_preallocable.c $(AFL_FUZZ_FILES) - @$(CC) $(CFLAGS) $(ASAN_CFLAGS) $(CPPFLAGS) -c test/unittests/unit_preallocable.c -o test/unittests/unit_preallocable.o + @$(CC) $(CFLAGS) $(ASAN_CFLAGS) -c test/unittests/unit_preallocable.c -o test/unittests/unit_preallocable.o unit_preallocable: test/unittests/unit_preallocable.o - @$(CC) $(CFLAGS) $(ASAN_CFLAGS) $(CPPFLAGS) -Wl,--wrap=exit -Wl,--wrap=printf test/unittests/unit_preallocable.o -o test/unittests/unit_preallocable $(LDFLAGS) $(ASAN_LDFLAGS) -lcmocka + @$(CC) $(CFLAGS) $(ASAN_CFLAGS) -Wl,--wrap=exit -Wl,--wrap=printf test/unittests/unit_preallocable.o -o test/unittests/unit_preallocable $(LDFLAGS) $(ASAN_LDFLAGS) -lcmocka ./test/unittests/unit_preallocable +.PHONY: unit_clean unit_clean: @rm -f ./test/unittests/unit_preallocable ./test/unittests/unit_list ./test/unittests/unit_maybe_alloc test/unittests/*.o +.PHONY: unit ifneq "$(shell uname)" "Darwin" - -unit: unit_maybe_alloc unit_preallocable unit_list unit_clean unit_rand unit_hash - +unit: unit_maybe_alloc unit_preallocable unit_list unit_clean unit_rand unit_hash else - unit: @echo [-] unit tests are skipped on Darwin \(lacks GNU linker feature --wrap\) - endif +.PHONY: code-format code-format: ./.custom-format.py -i src/*.c ./.custom-format.py -i include/*.h ./.custom-format.py -i libdislocator/*.c ./.custom-format.py -i libtokencap/*.c - ./.custom-format.py -i llvm_mode/*.c - ./.custom-format.py -i llvm_mode/*.h - ./.custom-format.py -i llvm_mode/*.cc - ./.custom-format.py -i gcc_plugin/*.c - @#./.custom-format.py -i gcc_plugin/*.h - ./.custom-format.py -i gcc_plugin/*.cc + ./.custom-format.py -i instrumentation/*.h + ./.custom-format.py -i instrumentation/*.cc + ./.custom-format.py -i instrumentation/*.c ./.custom-format.py -i custom_mutators/*/*.c @#./.custom-format.py -i custom_mutators/*/*.h # destroys input.h :-( ./.custom-format.py -i examples/*/*.c @@ -489,38 +480,40 @@ code-format: ./.custom-format.py -i *.c +.PHONY: test_build ifndef AFL_NO_X86 - -test_build: afl-gcc afl-as afl-showmap +test_build: afl-cc afl-as afl-showmap @echo "[*] Testing the CC wrapper and instrumentation output..." - @unset AFL_USE_ASAN AFL_USE_MSAN AFL_CC; AFL_DEBUG=1 AFL_INST_RATIO=100 AFL_AS_FORCE_INSTRUMENT=1 AFL_PATH=. ./$(TEST_CC) $(CFLAGS) test-instr.c -o test-instr $(LDFLAGS) 2>&1 | grep 'afl-as' >/dev/null || (echo "Oops, afl-as did not get called from "$(TEST_CC)". This is normally achieved by "$(CC)" honoring the -B option."; exit 1 ) + @unset AFL_USE_ASAN AFL_USE_MSAN AFL_CC; AFL_DEBUG=1 AFL_INST_RATIO=100 AFL_PATH=. ./$(TEST_CC) $(CFLAGS) test-instr.c -o test-instr $(LDFLAGS) 2>&1 | grep 'afl-as' >/dev/null || (echo "Oops, afl-as did not get called from "$(TEST_CC)". This is normally achieved by "$(CC)" honoring the -B option."; exit 1 ) ASAN_OPTIONS=detect_leaks=0 ./afl-showmap -m none -q -o .test-instr0 ./test-instr < /dev/null echo 1 | ASAN_OPTIONS=detect_leaks=0 ./afl-showmap -m none -q -o .test-instr1 ./test-instr @rm -f test-instr @cmp -s .test-instr0 .test-instr1; DR="$$?"; rm -f .test-instr0 .test-instr1; if [ "$$DR" = "0" ]; then echo; echo "Oops, the instrumentation does not seem to be behaving correctly!"; echo; echo "Please post to https://github.com/AFLplusplus/AFLplusplus/issues to troubleshoot the issue."; echo; exit 1; fi + @echo @echo "[+] All right, the instrumentation seems to be working!" - else - -test_build: afl-gcc afl-as afl-showmap +test_build: afl-cc afl-as afl-showmap @echo "[!] Note: skipping build tests (you may need to use LLVM or QEMU mode)." - endif - +.PHONY: all_done all_done: test_build - @if [ ! "`type clang 2>/dev/null`" = "" ]; then echo "[+] LLVM users: see llvm_mode/README.md for a faster alternative to afl-gcc."; fi + @test -e afl-cc && echo "[+] Main compiler 'afl-cc' successfully built!" || { echo "[-] Main compiler 'afl-cc' failed to built, set up a working build environment first!" ; exit 1 ; } + @test -e cmplog-instructions-pass.so && echo "[+] LLVM mode for 'afl-cc' successfully built!" || echo "[-] LLVM mode for 'afl-cc' failed to built, likely you either have not llvm installed or you have not set LLVM_CONFIG pointing to e.g. llvm-config-11. See instrumenation/README.llvm.md how to do this. Highly recommended!" + @test -e SanitizerCoverageLTO.so && echo "[+] LLVM LTO mode for 'afl-cc' successfully built!" || echo "[-] LLVM LTO mode for 'afl-cc' failed to built, this would need LLVM 11+, see instrumentation/README.lto.md how to build it" + @test -e afl-gcc-pass.so && echo "[+] gcc_plugin for 'afl-cc' successfully built!" || echo "[-] gcc_plugin for 'afl-cc' failed to built, unless you really need it that is fine - or read instrumentation/README.gcc_plugin.md how to build it" @echo "[+] All done! Be sure to review the README.md - it's pretty short and useful." @if [ "`uname`" = "Darwin" ]; then printf "\nWARNING: Fuzzing on MacOS X is slow because of the unusually high overhead of\nfork() on this OS. Consider using Linux or *BSD. You can also use VirtualBox\n(virtualbox.org) to put AFL inside a Linux or *BSD VM.\n\n"; fi @! tty <&1 >/dev/null || printf "\033[0;30mNOTE: If you can read this, your terminal probably uses white background.\nThis will make the UI hard to read. See docs/status_screen.md for advice.\033[0m\n" 2>/dev/null .NOTPARALLEL: clean all +.PHONY: clean clean: - rm -f $(PROGS) libradamsa.so afl-fuzz-document afl-as as afl-g++ afl-clang afl-clang++ *.o src/*.o *~ a.out core core.[1-9][0-9]* *.stackdump .test .test1 .test2 test-instr .test-instr0 .test-instr1 afl-qemu-trace afl-gcc-fast afl-gcc-pass.so afl-gcc-rt.o afl-g++-fast ld *.so *.8 test/unittests/*.o test/unittests/unit_maybe_alloc test/unittests/preallocable .afl-* + rm -f $(PROGS) libradamsa.so afl-fuzz-document afl-as as afl-g++ afl-clang afl-clang++ *.o src/*.o *~ a.out core core.[1-9][0-9]* *.stackdump .test .test1 .test2 test-instr .test-instr0 .test-instr1 afl-qemu-trace afl-gcc-fast afl-gcc-pass.so afl-g++-fast ld *.so *.8 test/unittests/*.o test/unittests/unit_maybe_alloc test/unittests/preallocable .afl-* afl-gcc afl-g++ rm -rf out_dir qemu_mode/qemu-3.1.1 *.dSYM */*.dSYM - -$(MAKE) -C llvm_mode clean - -$(MAKE) -C gcc_plugin clean + -$(MAKE) -f GNUmakefile.llvm clean + -$(MAKE) -f GNUmakefile.gcc_plugin clean $(MAKE) -C libdislocator clean $(MAKE) -C libtokencap clean $(MAKE) -C examples/afl_network_proxy clean @@ -530,20 +523,22 @@ clean: $(MAKE) -C qemu_mode/libcompcov clean rm -rf qemu_mode/qemu-3.1.1 ifeq "$(IN_REPO)" "1" - test -e unicorn_mode/unicornafl/Makefile && $(MAKE) -C unicorn_mode/unicornafl clean || true + test -d unicorn_mode/unicornafl && $(MAKE) -C unicorn_mode/unicornafl clean || true else rm -rf qemu_mode/qemu-3.1.1.tar.xz rm -rf unicorn_mode/unicornafl endif +.PHONY: deepclean deepclean: clean rm -rf qemu_mode/qemu-3.1.1.tar.xz rm -rf unicorn_mode/unicornafl git reset --hard >/dev/null 2>&1 || true +.PHONY: distrib distrib: all - -$(MAKE) -C llvm_mode - -$(MAKE) -C gcc_plugin + -$(MAKE) -f GNUmakefile.llvm + -$(MAKE) -f GNUmakefile.gcc_plugin $(MAKE) -C libdislocator $(MAKE) -C libtokencap $(MAKE) -C examples/afl_network_proxy @@ -552,6 +547,7 @@ distrib: all -cd qemu_mode && sh ./build_qemu_support.sh cd unicorn_mode && unset CFLAGS && sh ./build_unicorn_support.sh +.PHONY: binary-only binary-only: all $(MAKE) -C libdislocator $(MAKE) -C libtokencap @@ -561,9 +557,10 @@ binary-only: all -cd qemu_mode && sh ./build_qemu_support.sh cd unicorn_mode && unset CFLAGS && sh ./build_unicorn_support.sh +.PHONY: source-only source-only: all - -$(MAKE) -C llvm_mode - -$(MAKE) -C gcc_plugin + -$(MAKE) -f GNUmakefile.llvm + -$(MAKE) -f GNUmakefile.gcc_plugin $(MAKE) -C libdislocator $(MAKE) -C libtokencap @#$(MAKE) -C examples/afl_network_proxy @@ -573,8 +570,7 @@ source-only: all %.8: % @echo .TH $* 8 $(BUILD_DATE) "afl++" > $@ @echo .SH NAME >> $@ - @printf "%s" ".B $* \- " >> $@ - @./$* -h 2>&1 | head -n 1 | sed -e "s/$$(printf '\e')[^m]*m//g" >> $@ + @echo .B $* >> $@ @echo >> $@ @echo .SH SYNOPSIS >> $@ @./$* -h 2>&1 | head -n 3 | tail -n 1 | sed 's/^\.\///' >> $@ @@ -590,30 +586,28 @@ source-only: all @echo .SH LICENSE >> $@ @echo Apache License Version 2.0, January 2004 >> $@ +.PHONY: install install: all $(MANPAGES) - install -d -m 755 $${DESTDIR}$(BIN_PATH) $${DESTDIR}$(HELPER_PATH) $${DESTDIR}$(DOC_PATH) $${DESTDIR}$(MISC_PATH) - rm -f $${DESTDIR}$(BIN_PATH)/afl-plot.sh + @install -d -m 755 $${DESTDIR}$(BIN_PATH) $${DESTDIR}$(HELPER_PATH) $${DESTDIR}$(DOC_PATH) $${DESTDIR}$(MISC_PATH) + @rm -f $${DESTDIR}$(BIN_PATH)/afl-plot.sh + @rm -f $${DESTDIR}$(BIN_PATH)/afl-as install -m 755 $(PROGS) $(SH_PROGS) $${DESTDIR}$(BIN_PATH) - rm -f $${DESTDIR}$(BIN_PATH)/afl-as - if [ -f afl-qemu-trace ]; then install -m 755 afl-qemu-trace $${DESTDIR}$(BIN_PATH); fi - if [ -f afl-gcc-fast ]; then set e; install -m 755 afl-gcc-fast $${DESTDIR}$(BIN_PATH); ln -sf afl-gcc-fast $${DESTDIR}$(BIN_PATH)/afl-g++-fast; install -m 755 afl-gcc-pass.so afl-gcc-rt.o $${DESTDIR}$(HELPER_PATH); fi - if [ -f afl-clang-fast ]; then $(MAKE) -C llvm_mode install; fi - if [ -f libdislocator.so ]; then set -e; install -m 755 libdislocator.so $${DESTDIR}$(HELPER_PATH); fi - if [ -f libtokencap.so ]; then set -e; install -m 755 libtokencap.so $${DESTDIR}$(HELPER_PATH); fi - if [ -f libcompcov.so ]; then set -e; install -m 755 libcompcov.so $${DESTDIR}$(HELPER_PATH); fi - if [ -f afl-fuzz-document ]; then set -e; install -m 755 afl-fuzz-document $${DESTDIR}$(BIN_PATH); fi - if [ -f socketfuzz32.so -o -f socketfuzz64.so ]; then $(MAKE) -C examples/socket_fuzzing install; fi - if [ -f argvfuzz32.so -o -f argvfuzz64.so ]; then $(MAKE) -C examples/argv_fuzzing install; fi - if [ -f examples/afl_network_proxy/afl-network-server ]; then $(MAKE) -C examples/afl_network_proxy install; fi - if [ -f libAFLDriver.a ]; then install -m 644 libAFLDriver.a $${DESTDIR}$(HELPER_PATH); fi - if [ -f libAFLQemuDriver.a ]; then install -m 644 libAFLQemuDriver.a $${DESTDIR}$(HELPER_PATH); fi - - set -e; ln -sf afl-gcc $${DESTDIR}$(BIN_PATH)/afl-g++ - set -e; if [ -f afl-clang-fast ] ; then ln -sf afl-clang-fast $${DESTDIR}$(BIN_PATH)/afl-clang ; ln -sf afl-clang-fast $${DESTDIR}$(BIN_PATH)/afl-clang++ ; else ln -sf afl-gcc $${DESTDIR}$(BIN_PATH)/afl-clang ; ln -sf afl-gcc $${DESTDIR}$(BIN_PATH)/afl-clang++; fi - - mkdir -m 0755 -p ${DESTDIR}$(MAN_PATH) + @if [ -f afl-qemu-trace ]; then install -m 755 afl-qemu-trace $${DESTDIR}$(BIN_PATH); fi + @if [ -f libdislocator.so ]; then set -e; install -m 755 libdislocator.so $${DESTDIR}$(HELPER_PATH); fi + @if [ -f libtokencap.so ]; then set -e; install -m 755 libtokencap.so $${DESTDIR}$(HELPER_PATH); fi + @if [ -f libcompcov.so ]; then set -e; install -m 755 libcompcov.so $${DESTDIR}$(HELPER_PATH); fi + @if [ -f afl-fuzz-document ]; then set -e; install -m 755 afl-fuzz-document $${DESTDIR}$(BIN_PATH); fi + @if [ -f socketfuzz32.so -o -f socketfuzz64.so ]; then $(MAKE) -C examples/socket_fuzzing install; fi + @if [ -f argvfuzz32.so -o -f argvfuzz64.so ]; then $(MAKE) -C examples/argv_fuzzing install; fi + @if [ -f examples/afl_network_proxy/afl-network-server ]; then $(MAKE) -C examples/afl_network_proxy install; fi + @if [ -f examples/aflpp_driver/libAFLDriver.a ]; then install -m 644 examples/aflpp_driver/libAFLDriver.a $${DESTDIR}$(HELPER_PATH); fi + @if [ -f examples/aflpp_driver/libAFLQemuDriver.a ]; then install -m 644 examples/aflpp_driver/libAFLQemuDriver.a $${DESTDIR}$(HELPER_PATH); fi + -$(MAKE) -f GNUmakefile.llvm install + -$(MAKE) -f GNUmakefile.gcc_plugin install + ln -sf afl-cc $${DESTDIR}$(BIN_PATH)/afl-gcc + ln -sf afl-cc $${DESTDIR}$(BIN_PATH)/afl-g++ + @mkdir -m 0755 -p ${DESTDIR}$(MAN_PATH) install -m0644 *.8 ${DESTDIR}$(MAN_PATH) - install -m 755 afl-as $${DESTDIR}$(HELPER_PATH) ln -sf afl-as $${DESTDIR}$(HELPER_PATH)/as install -m 644 docs/*.md $${DESTDIR}$(DOC_PATH) diff --git a/GNUmakefile.llvm b/GNUmakefile.llvm index d4502319..d76e0b28 100644 --- a/GNUmakefile.llvm +++ b/GNUmakefile.llvm @@ -419,17 +419,14 @@ document: ./afl-compiler-rt.o: instrumentation/afl-compiler-rt.o.c | test_deps $(CLANG_BIN) $(CLANG_CFL) $(CFLAGS_SAFE) $(CPPFLAGS) -O3 -Wno-unused-result -fPIC -c $< -o $@ - ln -sf ./afl-compiler-rt.o ./afl-llvm-rt.o ./afl-compiler-rt-32.o: instrumentation/afl-compiler-rt.o.c | test_deps @printf "[*] Building 32-bit variant of the runtime (-m32)... " @$(CLANG_BIN) $(CLANG_CFL) $(CFLAGS_SAFE) $(CPPFLAGS) -O3 -Wno-unused-result -m32 -fPIC -c $< -o $@ 2>/dev/null; if [ "$$?" = "0" ]; then echo "success!"; else echo "failed (that's fine)"; fi - @test -e ./afl-compiler-rt-32.o && ln -sf ./afl-compiler-rt-32.o ./afl-llvm-rt-32.o ./afl-compiler-rt-64.o: instrumentation/afl-compiler-rt.o.c | test_deps @printf "[*] Building 64-bit variant of the runtime (-m64)... " @$(CLANG_BIN) $(CLANG_CFL) $(CFLAGS_SAFE) $(CPPFLAGS) -O3 -Wno-unused-result -m64 -fPIC -c $< -o $@ 2>/dev/null; if [ "$$?" = "0" ]; then echo "success!"; else echo "failed (that's fine)"; fi - @test -e ./afl-compiler-rt-64.o && ln -sf ./afl-compiler-rt-64.o ./afl-llvm-rt-64.o .PHONY: test_build test_build: $(PROGS) @@ -454,8 +451,8 @@ install: all @if [ -f ./afl-compiler-rt.o ]; then set -e; install -m 755 ./afl-compiler-rt.o $${DESTDIR}$(HELPER_PATH); fi @if [ -f ./afl-lto ]; then set -e; ln -sf afl-cc $${DESTDIR}$(BIN_PATH)/afl-lto; ln -sf afl-cc $${DESTDIR}$(BIN_PATH)/afl-lto++; ln -sf afl-cc $${DESTDIR}$(BIN_PATH)/afl-clang-lto; ln -sf afl-cc $${DESTDIR}$(BIN_PATH)/afl-clang-lto++; install -m 755 ./afl-llvm-lto-instrumentation.so ./afl-llvm-rt-lto*.o ./afl-llvm-lto-instrumentlist.so $${DESTDIR}$(HELPER_PATH); fi @if [ -f ./afl-ld-lto ]; then set -e; install -m 755 ./afl-ld-lto $${DESTDIR}$(BIN_PATH); fi - @if [ -f ./afl-compiler-rt-32.o ]; then set -e; install -m 755 ./afl-compiler-rt-32.o $${DESTDIR}$(HELPER_PATH); ln -sf afl-compiler-rt-32.o $${DESTDIR}$(HELPER_PATH)/afl-llvm-rt-32.o; fi - @if [ -f ./afl-compiler-rt-64.o ]; then set -e; install -m 755 ./afl-compiler-rt-64.o $${DESTDIR}$(HELPER_PATH); ln -sf afl-compiler-rt-64.o $${DESTDIR}$(HELPER_PATH)/afl-llvm-rt-64.o; fi + @if [ -f ./afl-compiler-rt-32.o ]; then set -e; install -m 755 ./afl-compiler-rt-32.o $${DESTDIR}$(HELPER_PATH); fi + @if [ -f ./afl-compiler-rt-64.o ]; then set -e; install -m 755 ./afl-compiler-rt-64.o $${DESTDIR}$(HELPER_PATH); fi @if [ -f ./compare-transform-pass.so ]; then set -e; install -m 755 ./*.so $${DESTDIR}$(HELPER_PATH); fi @if [ -f ./compare-transform-pass.so ]; then set -e; ln -sf afl-cc $${DESTDIR}$(BIN_PATH)/afl-clang-fast ; ln -sf ./afl-c++ $${DESTDIR}$(BIN_PATH)/afl-clang-fast++ ; ln -sf afl-cc $${DESTDIR}$(BIN_PATH)/afl-clang ; ln -sf ./afl-c++ $${DESTDIR}$(BIN_PATH)/afl-clang++ ; fi @if [ -f ./SanitizerCoverageLTO.so ]; then set -e; ln -sf afl-cc $${DESTDIR}$(BIN_PATH)/afl-clang-lto ; ln -sf ./afl-c++ $${DESTDIR}$(BIN_PATH)/afl-clang-lto++ ; fi diff --git a/README.md b/README.md index 4cad6b47..96b34260 100644 --- a/README.md +++ b/README.md @@ -4,9 +4,9 @@ ![Travis State](https://api.travis-ci.com/AFLplusplus/AFLplusplus.svg?branch=stable) - Release Version: [2.68c](https://github.com/AFLplusplus/AFLplusplus/releases) + Release Version: [2.67c](https://github.com/AFLplusplus/AFLplusplus/releases) - Github Version: 3.00a + Github Version: 2.67d Repository: [https://github.com/AFLplusplus/AFLplusplus](https://github.com/AFLplusplus/AFLplusplus) @@ -22,6 +22,26 @@ afl++ is a superior fork to Google's afl - more speed, more and better mutations, more and better instrumentation, custom module support, etc. +## Major changes in afl++ 3.0 + +With afl++ 3.0 we introduced changes that break some previous afl and afl++ +behaviours: + + * There are no llvm_mode and gcc_plugin subdirectories anymore and there is + only one compiler: afl-cc. All previous compilers now symlink to this one + compiler. All instrumentation source code is now in the `instrumentation/` + folder. + * qemu_mode got upgraded to QEMU 5.1, but to be able to build this a current + ninja build tool version and python3 setuptools are required. + qemu_mode also got new options like snapshotting, instrumenting specific + shared libraries, etc. and QEMU 5.1 supports more CPU targets so this is + worth it. + * When instrumenting targets, afl-cc will not supersede optimizations. This + allows to fuzz targets as same as they are built for debug or release. + * afl-fuzz' `-i` option now descends into subdirectories. + * afl-fuzz will skip over empty dictionaries and too large test cases instead + of failing. + ## Contents 1. [Features](#important-features-of-afl) @@ -39,7 +59,7 @@ with laf-intel and redqueen, unicorn mode, gcc plugin, full *BSD, Solaris and Android support and much, much, much more. - | Feature/Instrumentation | afl-gcc | llvm_mode | gcc_plugin | qemu_mode | unicorn_mode | + | Feature/Instrumentation | afl-gcc | llvm | gcc_plugin | qemu_mode | unicorn_mode | | -------------------------|:-------:|:---------:|:----------:|:----------------:|:------------:| | NeverZero | x86[_64]| x(1) | (2) | x | x | | Persistent Mode | | x | x | x86[_64]/arm[64] | x | @@ -47,9 +67,8 @@ | CmpLog | | x | | x86[_64]/arm[64] | | | Selective Instrumentation| | x | x | (x)(3) | | | Non-Colliding Coverage | | x(4) | | (x)(5) | | - | InsTrim | | x | | | | | Ngram prev_loc Coverage | | x(6) | | | | - | Context Coverage | | x | | | | + | Context Coverage | | x(6) | | | | | Auto Dictionary | | x(7) | | | | | Snapshot LKM Support | | x | | (x)(5) | | @@ -59,11 +78,11 @@ 4. with pcguard mode and LTO mode for LLVM >= 11 5. upcoming, development in the branch 6. not compatible with LTO instrumentation and needs at least LLVM >= 4.1 - 7. only in LTO mode with LLVM >= 11 + 7. automatic in LTO mode with LLVM >= 11, an extra pass for all LLVM version that writes to a file to use with afl-fuzz' `-x` Among others, the following features and patches have been integrated: - * NeverZero patch for afl-gcc, llvm_mode, qemu_mode and unicorn_mode which prevents a wrapping map value to zero, increases coverage + * NeverZero patch for afl-gcc, instrumentation, qemu_mode and unicorn_mode which prevents a wrapping map value to zero, increases coverage * Persistent mode, deferred forkserver and in-memory fuzzing for qemu_mode * Unicorn mode which allows fuzzing of binaries from completely different platforms (integration provided by domenukk) * The new CmpLog instrumentation for LLVM and QEMU inspired by [Redqueen](https://www.syssec.ruhr-uni-bochum.de/media/emma/veroeffentlichungen/2018/12/17/NDSS19-Redqueen.pdf) @@ -71,10 +90,9 @@ * AFLfast's power schedules by Marcel Böhme: [https://github.com/mboehme/aflfast](https://github.com/mboehme/aflfast) * The MOpt mutator: [https://github.com/puppet-meteor/MOpt-AFL](https://github.com/puppet-meteor/MOpt-AFL) * LLVM mode Ngram coverage by Adrian Herrera [https://github.com/adrianherrera/afl-ngram-pass](https://github.com/adrianherrera/afl-ngram-pass) - * InsTrim, a CFG llvm_mode instrumentation implementation: [https://github.com/csienslab/instrim](https://github.com/csienslab/instrim) * C. Holler's afl-fuzz Python mutator module: [https://github.com/choller/afl](https://github.com/choller/afl) * Custom mutator by a library (instead of Python) by kyakdan - * LAF-Intel/CompCov support for llvm_mode, qemu_mode and unicorn_mode (with enhanced capabilities) + * LAF-Intel/CompCov support for instrumentation, qemu_mode and unicorn_mode (with enhanced capabilities) * Radamsa and honggfuzz mutators (as custom mutators). * QBDI mode to fuzz android native libraries via Quarkslab's [QBDI](https://github.com/QBDI/QBDI) framework * Frida and ptrace mode to fuzz binary-only libraries, etc. @@ -88,7 +106,7 @@ send a mail to . See [docs/QuickStartGuide.md](docs/QuickStartGuide.md) if you don't have time to - read this file. + read this file - however this is not recommended! ## Branches @@ -105,13 +123,14 @@ ## Help wanted -We are happy to be part of [Google Summer of Code 2020](https://summerofcode.withgoogle.com/organizations/5100744400699392/)! :-) +We were happy to be part of [Google Summer of Code 2020](https://summerofcode.withgoogle.com/organizations/5100744400699392/) +and we will try to participate again in 2021! We have several ideas we would like to see in AFL++ to make it even better. However, we already work on so many things that we do not have the time for all the big ideas. -This can be your way to support and contribute to AFL++ - extend it to +This can be your way to support and contribute to AFL++ - extend it to do something cool. We have an idea list in [docs/ideas.md](docs/ideas.md). @@ -132,7 +151,7 @@ This image is automatically generated when a push to the stable repo happens. You will find your target source code in /src in the container. If you want to build afl++ yourself you have many options. -The easiest is to build and install everything: +The easiest choice is to build and install everything: ```shell sudo apt install build-essential libtool-bin python3-dev automake flex bison libglib2.0-dev libpixman-1-dev clang python3-setuptools llvm @@ -142,9 +161,9 @@ sudo make install It is recommended to install the newest available gcc, clang and llvm-dev possible in your distribution! -Note that "make distrib" also builds llvm_mode, qemu_mode, unicorn_mode and +Note that "make distrib" also builds instrumentation, qemu_mode, unicorn_mode and more. If you just want plain afl++ then do "make all", however compiling and -using at least llvm_mode is highly recommended for much better results - +using at least instrumentation is highly recommended for much better results - hence in this case ```shell @@ -156,7 +175,7 @@ These build targets exist: * all: just the main afl++ binaries * binary-only: everything for binary-only fuzzing: qemu_mode, unicorn_mode, libdislocator, libtokencap -* source-only: everything for source code fuzzing: llvm_mode, libdislocator, libtokencap +* source-only: everything for source code fuzzing: instrumentation, libdislocator, libtokencap * distrib: everything (for both binary-only and source code fuzzing) * man: creates simple man pages from the help option of the programs * install: installs everything you have compiled with the build options above @@ -212,18 +231,19 @@ If you have a binary-only target please skip to [#Instrumenting binary-only apps Fuzzing source code is a three-step process. -1. compile the target with a special compiler that prepares the target to be +1. Compile the target with a special compiler that prepares the target to be fuzzed efficiently. This step is called "instrumenting a target". 2. Prepare the fuzzing by selecting and optimizing the input corpus for the target. -3. perform the fuzzing of the target by randomly mutating input and assessing +3. Perform the fuzzing of the target by randomly mutating input and assessing if a generated input was processed in a new path in the target binary. ### 1. Instrumenting that target #### a) Selecting the best afl++ compiler for instrumenting the target -afl++ comes with different compilers and instrumentation options. +afl++ comes with a central compiler `afl-cc` that incorporates various different +kinds of compiler targets and and instrumentation options. The following evaluation flow will help you to select the best possible. It is highly recommended to have the newest llvm version possible installed, @@ -231,49 +251,62 @@ anything below 9 is not recommended. ``` +--------------------------------+ -| clang/clang++ 11+ is available | --> use afl-clang-lto and afl-clang-lto++ -+--------------------------------+ see [llvm/README.lto.md](llvm/README.lto.md) +| clang/clang++ 11+ is available | --> use LTO mode (afl-clang-lto/afl-clang-lto++) ++--------------------------------+ see [instrumentation/README.lto.md](instrumentation/README.lto.md) | - | if not, or if the target fails with afl-clang-lto/++ + | if not, or if the target fails with LTO afl-clang-lto/++ | v +---------------------------------+ -| clang/clang++ 3.3+ is available | --> use afl-clang-fast and afl-clang-fast++ -+---------------------------------+ see [llvm/README.md](llvm/README.md) +| clang/clang++ 3.3+ is available | --> use LLVM mode (afl-clang-fast/afl-clang-fast++) ++---------------------------------+ see [instrumentation/README.md](instrumentation/README.md) | - | if not, or if the target fails with afl-clang-fast/++ + | if not, or if the target fails with LLVM afl-clang-fast/++ | v +--------------------------------+ - | if you want to instrument only | -> use afl-gcc-fast and afl-gcc-fast++ - | parts of the target | see [gcc_plugin/README.md](gcc_plugin/README.md) and - +--------------------------------+ [gcc_plugin/README.instrument_list.md](gcc_plugin/README.instrument_list.md) + | if you want to instrument only | -> use GCC_PLUGIN mode (afl-gcc-fast/afl-g++-fast) + | parts of the target | see [instrumentation/README.gcc_plugin.md](instrumentation/README.gcc_plugin.md) and + +--------------------------------+ [instrumentation/README.instrument_list.md](instrumentation/README.instrument_list.md) | | if not, or if you do not have a gcc with plugin support | v - use afl-gcc and afl-g++ (or afl-clang and afl-clang++) + use GCC mode (afl-gcc/afl-g++) (or afl-clang/afl-clang++ for clang) ``` Clickable README links for the chosen compiler: - * [afl-clang-lto](llvm/README.lto.md) - * [afl-clang-fast](llvm/README.md) - * [afl-gcc-fast](gcc_plugin/README.md) - * afl-gcc has no README as it has no features + * [LTO mode - afl-clang-lto](instrumentation/README.lto.md) + * [LLVM mode - afl-clang-fast](instrumentation/README.md) + * [GCC_PLUGIN mode - afl-gcc-fast](instrumentation/README.gcc_plugin.md) + * GCC mode (afl-gcc) has no README as it has no own features + +You can select the mode for the afl-cc compiler by: + 1. passing --afl-MODE command line options to the compiler via CFLAGS/CXXFLAGS/CPPFLAGS + 2. use a symlink to afl-cc: afl-gcc, afl-g++, afl-clang, afl-clang++, + afl-clang-fast, afl-clang-fast++, afl-clang-lto, afl-clang-lto++, + afl-gcc-fast, afl-g++-fast + 3. using the environment variable AFL_CC_COMPILER with MODE + +MODE can be one of: LTO (afl-clang-lto*), LLVM (afl-clang-fast*), GCC_PLUGIN +(afl-g*-fast) or GCC (afl-gcc/afl-g++). + +Because no afl specific command-line options are accepted (beside the +--afl-MODE command), the compile-time tools make fairly broad use of environment +variables, which can be listed with `afl-cc -hh` or by reading [docs/env_variables.md](docs/env_variables.md). #### b) Selecting instrumentation options -The following options are available when you instrument with afl-clang-fast or -afl-clang-lto: +The following options are available when you instrument with LTO mode (afl-clang-fast/afl-clang-lto): * Splitting integer, string, float and switch comparisons so afl++ can easier solve these. This is an important option if you do not have a very good and large input corpus. This technique is called laf-intel or COMPCOV. To use this set the following environment variable before compiling the target: `export AFL_LLVM_LAF_ALL=1` - You can read more about this in [llvm/README.laf-intel.md](llvm/README.laf-intel.md) - * A different technique (and usually a better than laf-intel) is to + You can read more about this in [instrumentation/README.laf-intel.md](instrumentation/README.laf-intel.md) + * A different technique (and usually a better one than laf-intel) is to instrument the target so that any compare values in the target are sent to afl++ which then tries to put these values into the fuzzing data at different locations. This technique is very fast and good - if the target does not @@ -282,12 +315,13 @@ afl-clang-lto: If you want to use this technique, then you have to compile the target twice, once specifically with/for this mode, and pass this binary to afl-fuzz via the `-c` parameter. - Not that you can compile also just a cmplog binary and use that for both - however there will a performance penality. - You can read more about this in [llvm_mode/README.cmplog.md](llvm_mode/README.cmplog.md) + Note that you can compile also just a cmplog binary and use that for both + however there will be a performance penality. + You can read more about this in [instrumentation/README.cmplog.md](instrumentation/README.cmplog.md) -If you use afl-clang-fast, afl-clang-lto or afl-gcc-fast you have the option to -selectively only instrument parts of the target that you are interested in: +If you use LTO, LLVM or GCC_PLUGIN mode (afl-clang-fast/afl-clang-lto/afl-gcc-fast) + you have the option to selectively only instrument parts of the target that you +are interested in: * To instrument only those parts of the target that you are interested in create a file with all the filenames of the source code that should be @@ -299,29 +333,29 @@ selectively only instrument parts of the target that you are interested in: `export AFL_LLVM_DENYLIST=denylist.txt` - depending on if you want per default to instrument unless noted (DENYLIST) or not perform instrumentation unless requested (ALLOWLIST). - **NOTE:** In optimization functions might be inlined and then not match! - see [llvm_mode/README.instrument_list.md](llvm_mode/README.instrument_list.md) + **NOTE:** During optimization functions might be inlined and then would not match! + See [instrumentation/README.instrument_list.md](instrumentation/README.instrument_list.md) For afl-clang-fast > 6.0 or if PCGUARD instrumentation is used then use the llvm sancov allow-list feature: [http://clang.llvm.org/docs/SanitizerCoverage.html](http://clang.llvm.org/docs/SanitizerCoverage.html) The llvm sancov format works with the allowlist/denylist feature of afl++ - however afl++ is more flexible in the format. + however afl++'s format is more flexible. There are many more options and modes available however these are most of the time less effective. See: - * [llvm_mode/README.ctx.md](llvm_mode/README.ctx.md) - * [llvm_mode/README.ngram.md](llvm_mode/README.ngram.md) - * [llvm_mode/README.instrim.md](llvm_mode/README.instrim.md) + * [instrumentation/README.ctx.md](instrumentation/README.ctx.md) + * [instrumentation/README.ngram.md](instrumentation/README.ngram.md) + * [instrumentation/README.instrim.md](instrumentation/README.instrim.md) -afl++ employs never zero counting in its bitmap. You can read more about this +afl++ performs "never zero" counting in its bitmap. You can read more about this here: - * [llvm_mode/README.neverzero.md](llvm_mode/README.neverzero.md) + * [instrumentation/README.neverzero.md](instrumentation/README.neverzero.md) #### c) Modify the target If the target has features that make fuzzing more difficult, e.g. checksums, HMAC, etc. then modify the source code so that this is removed. -This can even be done for productional source code be eliminating +This can even be done for operational source code by eliminating these checks within this specific defines: ``` @@ -332,13 +366,15 @@ these checks within this specific defines: #endif ``` +All afl++ compilers will set this preprocessor definition automatically. + #### d) Instrument the target In this step the target source code is compiled so that it can be fuzzed. Basically you have to tell the target build system that the selected afl++ compiler is used. Also - if possible - you should always configure the -build system that the target is compiled statically and not dynamically. +build system such that the target is compiled statically and not dynamically. How to do this is described below. Then build the target. (Usually with `make`) @@ -349,20 +385,22 @@ For `configure` build systems this is usually done by: `CC=afl-clang-fast CXX=afl-clang-fast++ ./configure --disable-shared` Note that if you are using the (better) afl-clang-lto compiler you also have to -set AR to llvm-ar[-VERSION] and RANLIB to llvm-ranlib[-VERSION] - as it is -described in [llvm/README.lto.md](llvm/README.lto.md) +set AR to llvm-ar[-VERSION] and RANLIB to llvm-ranlib[-VERSION] - as is +described in [instrumentation/README.lto.md](instrumentation/README.lto.md). ##### cmake -For `configure` build systems this is usually done by: -`mkdir build; cd build; CC=afl-clang-fast CXX=afl-clang-fast++ cmake ..` - -Some cmake scripts require something like `-DCMAKE_CC=... -DCMAKE_CXX=...` -or `-DCMAKE_C_COMPILER=... DCMAKE_CPP_COMPILER=...` instead. +For `cmake` build systems this is usually done by: +`mkdir build; cmake -DCMAKE_C_COMPILERC=afl-cc -DCMAKE_CXX_COMPILER=afl-c++ ..` Note that if you are using the (better) afl-clang-lto compiler you also have to -set AR to llvm-ar[-VERSION] and RANLIB to llvm-ranlib[-VERSION] - as it is -described in [llvm/README.lto.md](llvm/README.lto.md) +set AR to llvm-ar[-VERSION] and RANLIB to llvm-ranlib[-VERSION] - as is +described in [instrumentation/README.lto.md](instrumentation/README.lto.md). + +##### meson + +For meson you have to set the afl++ compiler with the very first command! +`CC=afl-cc CXX=afl-c++ meson` ##### other build systems or if configure/cmake didn't work @@ -370,7 +408,7 @@ Sometimes cmake and configure do not pick up the afl++ compiler, or the ranlib/ar that is needed - because this was just not foreseen by the developer of the target. Or they have non-standard options. Figure out if there is a non-standard way to set this, otherwise set up the build normally and edit the -generated build environment afterwards manually to point to the right compiler +generated build environment afterwards manually to point it to the right compiler (and/or ranlib and ar). #### d) Better instrumentation @@ -383,12 +421,12 @@ This requires the usage of afl-clang-lto or afl-clang-fast. This is the so-called `persistent mode`, which is much, much faster but requires that you code a source file that is specifically calling the target functions that you want to fuzz, plus a few specific afl++ functions around -it. See [llvm_mode/README.persistent_mode.md](llvm_mode/README.persistent_mode.md) for details. +it. See [instrumentation/README.persistent_mode.md](instrumentation/README.persistent_mode.md) for details. Basically if you do not fuzz a target in persistent mode then you are just doing it for a hobby and not professionally :-) -### 2. Preparing the fuzzing +### 2. Preparing the fuzzing campaign As you fuzz the target with mutated input, having as diverse inputs for the target as possible improves the efficiency a lot. @@ -401,7 +439,7 @@ reported bugs, test suites, random downloads from the internet, unit test case data - from all kind of PNG software. If the input format is not known, you can also modify a target program to write -away normal data it receives and processes to a file and use these. +normal data it receives and processes to a file and use these. #### b) Making the input corpus unique @@ -415,7 +453,7 @@ the run afl-cmin like this: `afl-cmin -i INPUTS -o INPUTS_UNIQUE -- bin/target -d @@` Note that the INPUTFILE argument that the target program would read from has to be set as `@@`. -If the target reads from stdin instead, just omit the `@@` as this is the +If the target reads from stdin instead, just omit the `@@` as this is the default. #### c) Minimizing all corpus files @@ -432,7 +470,7 @@ for i in *; do done ``` -This can also be parallelized, e.g. with `parallel` +This step can also be parallelized, e.g. with `parallel` #### Done! @@ -456,7 +494,7 @@ before the start of afl-fuzz as this improves performance by a x2 speed increase #### a) Running afl-fuzz -Before to do even a test run of afl-fuzz execute `sudo afl-system-config` (on +Before you do even a test run of afl-fuzz execute `sudo afl-system-config` (on the host if you execute afl-fuzz in a docker container). This reconfigures the system for optimal speed - which afl-fuzz checks and bails otherwise. Set `export AFL_SKIP_CPUFREQ=1` for afl-fuzz to skip this check if you cannot @@ -588,7 +626,7 @@ then terminate it. The main node will pick it up and make it available to the other secondary nodes over time. Set `export AFL_NO_AFFINITY=1` if you have no free core. -Note that you in nearly all cases you can never reach full coverage. A lot of +Note that you in nearly all cases can never reach full coverage. A lot of functionality is usually behind options that were not activated or fuzz e.g. if you fuzz a library to convert image formats and your target is the png to tiff API then you will not touch any of the other library APIs and features. @@ -607,7 +645,7 @@ switch or honggfuzz. #### f) Improve the speed! - * Use [persistent mode](llvm_mode/README.persistent_mode.md) (x2-x20 speed increase) + * Use [persistent mode](instrumentation/README.persistent_mode.md) (x2-x20 speed increase) * If you do not use shmem persistent mode, use `AFL_TMPDIR` to point the input file on a tempfs location, see [docs/env_variables.md](docs/env_variables.md) * Linux: Use the [afl++ snapshot module](https://github.com/AFLplusplus/AFL-Snapshot-LKM) (x2 speed increase) * Linux: Improve kernel performance: modify `/etc/default/grub`, set `GRUB_CMDLINE_LINUX_DEFAULT="ibpb=off ibrs=off kpti=off l1tf=off mds=off mitigations=off no_stf_barrier noibpb noibrs nopcid nopti nospec_store_bypass_disable nospectre_v1 nospectre_v2 pcid=off pti=off spec_store_bypass_disable=off spectre_v2=off stf_barrier=off"`; then `update-grub` and `reboot` (warning: makes the system more insecure) @@ -1035,7 +1073,6 @@ without feedback, bug reports, or patches from: Andrea Biondo Vincent Le Garrec Khaled Yakdan Kuang-che Wu Josephine Calliotte Konrad Welc - Thomas Rooijakkers ``` Thank you! diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h index 1a05f4f4..4281c554 100644 --- a/include/afl-fuzz.h +++ b/include/afl-fuzz.h @@ -162,8 +162,7 @@ struct queue_entry { u8 *trace_mini; /* Trace bytes, if kept */ u32 tc_ref; /* Trace bytes ref count */ - struct queue_entry *next, /* Next element, if any */ - *next_100; /* 100 elements ahead */ + struct queue_entry *next; /* Next element, if any */ }; @@ -575,8 +574,7 @@ typedef struct afl_state { struct queue_entry *queue, /* Fuzzing queue (linked list) */ *queue_cur, /* Current offset within the queue */ - *queue_top, /* Top of the list */ - *q_prev100; /* Previous 100 marker */ + *queue_top; /* Top of the list */ // growing buf struct queue_entry **queue_buf; diff --git a/src/afl-fuzz-extras.c b/src/afl-fuzz-extras.c index d6c368d1..58ce5b6f 100644 --- a/src/afl-fuzz-extras.c +++ b/src/afl-fuzz-extras.c @@ -101,7 +101,8 @@ void load_extras_file(afl_state_t *afl, u8 *fname, u32 *min_len, u32 *max_len, if (rptr < lptr || *rptr != '"') { - FATAL("Malformed name=\"value\" pair in line %u.", cur_line); + WARNF("Malformed name=\"value\" pair in line %u.", cur_line); + continue; } @@ -141,13 +142,19 @@ void load_extras_file(afl_state_t *afl, u8 *fname, u32 *min_len, u32 *max_len, if (*lptr != '"') { - FATAL("Malformed name=\"keyword\" pair in line %u.", cur_line); + WARNF("Malformed name=\"keyword\" pair in line %u.", cur_line); + continue; } ++lptr; - if (!*lptr) { FATAL("Empty keyword in line %u.", cur_line); } + if (!*lptr) { + + WARNF("Empty keyword in line %u.", cur_line); + continue; + + } /* Okay, let's allocate memory and copy data between "...", handling \xNN escaping, \\, and \". */ @@ -169,7 +176,9 @@ void load_extras_file(afl_state_t *afl, u8 *fname, u32 *min_len, u32 *max_len, case 1 ... 31: case 128 ... 255: - FATAL("Non-printable characters in line %u.", cur_line); + WARNF("Non-printable characters in line %u.", cur_line); + continue; + break; case '\\': @@ -185,7 +194,8 @@ void load_extras_file(afl_state_t *afl, u8 *fname, u32 *min_len, u32 *max_len, if (*lptr != 'x' || !isxdigit(lptr[1]) || !isxdigit(lptr[2])) { - FATAL("Invalid escaping (not \\xNN) in line %u.", cur_line); + WARNF("Invalid escaping (not \\xNN) in line %u.", cur_line); + continue; } @@ -209,10 +219,11 @@ void load_extras_file(afl_state_t *afl, u8 *fname, u32 *min_len, u32 *max_len, if (afl->extras[afl->extras_cnt].len > MAX_DICT_FILE) { - FATAL( + WARNF( "Keyword too big in line %u (%s, limit is %s)", cur_line, stringify_mem_size(val_bufs[0], sizeof(val_bufs[0]), klen), stringify_mem_size(val_bufs[1], sizeof(val_bufs[1]), MAX_DICT_FILE)); + continue; } @@ -232,14 +243,19 @@ static void extras_check_and_sort(afl_state_t *afl, u32 min_len, u32 max_len, u8 val_bufs[2][STRINGIFY_VAL_SIZE_MAX]; - if (!afl->extras_cnt) { FATAL("No usable files in '%s'", dir); } + if (!afl->extras_cnt) { + + WARNF("No usable data in '%s'", dir); + return; + + } qsort(afl->extras, afl->extras_cnt, sizeof(struct extra_data), compare_extras_len); - OKF("Loaded %u extra tokens, size range %s to %s.", afl->extras_cnt, - stringify_mem_size(val_bufs[0], sizeof(val_bufs[0]), min_len), - stringify_mem_size(val_bufs[1], sizeof(val_bufs[1]), max_len)); + ACTF("Loaded %u extra tokens, size range %s to %s.", afl->extras_cnt, + stringify_mem_size(val_bufs[0], sizeof(val_bufs[0]), min_len), + stringify_mem_size(val_bufs[1], sizeof(val_bufs[1]), max_len)); if (max_len > 32) { @@ -250,8 +266,8 @@ static void extras_check_and_sort(afl_state_t *afl, u32 min_len, u32 max_len, if (afl->extras_cnt > afl->max_det_extras) { - OKF("More than %d tokens - will use them probabilistically.", - afl->max_det_extras); + WARNF("More than %d tokens - will use them probabilistically.", + afl->max_det_extras); } @@ -320,9 +336,10 @@ void load_extras(afl_state_t *afl, u8 *dir) { if (st.st_size > MAX_DICT_FILE) { WARNF( - "Extra '%s' is very big (%s, limit is %s)", fn, + "Extra '%s' is too big (%s, limit is %s)", fn, stringify_mem_size(val_bufs[0], sizeof(val_bufs[0]), st.st_size), stringify_mem_size(val_bufs[1], sizeof(val_bufs[1]), MAX_DICT_FILE)); + continue; } @@ -370,16 +387,74 @@ static inline u8 memcmp_nocase(u8 *m1, u8 *m2, u32 len) { } -/* Adds a new extra / dict entry. Used for LTO autodict. */ +/* Removes duplicates from the loaded extras. This can happen if multiple files + are loaded */ + +void dedup_extras(afl_state_t *afl) { + + if (afl->extras_cnt < 2) return; + + u32 i, j, orig_cnt = afl->extras_cnt; + + for (i = 0; i < afl->extras_cnt - 1; i++) { + + for (j = i + 1; j < afl->extras_cnt; j++) { + + restart_dedup: + + // if the goto was used we could be at the end of the list + if (j >= afl->extras_cnt || afl->extras[i].len != afl->extras[j].len) + break; + + if (memcmp(afl->extras[i].data, afl->extras[j].data, + afl->extras[i].len) == 0) { + + ck_free(afl->extras[j].data); + if (j + 1 < afl->extras_cnt) // not at the end of the list? + memmove((char *)&afl->extras[j], (char *)&afl->extras[j + 1], + (afl->extras_cnt - j - 1) * sizeof(struct extra_data)); + afl->extras_cnt--; + goto restart_dedup; // restart if several duplicates are in a row + + } + + } + + } + + if (afl->extras_cnt != orig_cnt) + afl->extras = afl_realloc((void **)&afl->extras, + afl->extras_cnt * sizeof(struct extra_data)); + +} + +/* Adds a new extra / dict entry. */ void add_extra(afl_state_t *afl, u8 *mem, u32 len) { - u8 val_bufs[2][STRINGIFY_VAL_SIZE_MAX]; + u8 val_bufs[2][STRINGIFY_VAL_SIZE_MAX]; + u32 i, found = 0; + + for (i = 0; i < afl->extras_cnt; i++) { + + if (afl->extras[i].len == len) { + + if (memcmp(afl->extras[i].data, mem, len) == 0) return; + found = 1; + + } else { + + if (found) break; + + } + + } if (len > MAX_DICT_FILE) { - WARNF("Extra '%.*s' is very big (%s, limit is %s)", (int)len, mem, + WARNF("Extra '%.*s' is too big (%s, limit is %s)", (int)len, mem, stringify_mem_size(val_bufs[0], sizeof(val_bufs[0]), len), stringify_mem_size(val_bufs[1], sizeof(val_bufs[1]), MAX_DICT_FILE)); + return; } else if (len > 32) { @@ -405,8 +480,8 @@ void add_extra(afl_state_t *afl, u8 *mem, u32 len) { if (afl->extras_cnt == afl->max_det_extras + 1) { - OKF("More than %d tokens - will use them probabilistically.", - afl->max_det_extras); + WARNF("More than %d tokens - will use them probabilistically.", + afl->max_det_extras); } @@ -609,7 +684,7 @@ void load_auto(afl_state_t *afl) { } else { - OKF("No auto-generated dictionary tokens to reuse."); + ACTF("No auto-generated dictionary tokens to reuse."); } diff --git a/src/afl-fuzz-init.c b/src/afl-fuzz-init.c index 102f04b9..713849a1 100644 --- a/src/afl-fuzz-init.c +++ b/src/afl-fuzz-init.c @@ -611,17 +611,17 @@ void read_foreign_testcases(afl_state_t *afl, int first) { /* Read all testcases from the input directory, then queue them for testing. Called at startup. */ -void read_testcases(afl_state_t *afl) { +void read_testcases(afl_state_t *afl, u8 *directory) { struct dirent **nl; - s32 nl_cnt; + s32 nl_cnt, subdirs = 1; u32 i; - u8 * fn1; - + u8 * fn1, *dir = directory; u8 val_buf[2][STRINGIFY_VAL_SIZE_MAX]; /* Auto-detect non-in-place resumption attempts. */ +if (dir == NULL) { fn1 = alloc_printf("%s/queue", afl->in_dir); if (!access(fn1, F_OK)) { @@ -632,16 +632,18 @@ void read_testcases(afl_state_t *afl) { ck_free(fn1); } + dir = afl->in_dir; +} - ACTF("Scanning '%s'...", afl->in_dir); + ACTF("Scanning '%s'...", dir); /* We use scandir() + alphasort() rather than readdir() because otherwise, the ordering of test cases would vary somewhat randomly and would be difficult to control. */ - nl_cnt = scandir(afl->in_dir, &nl, NULL, alphasort); + nl_cnt = scandir(dir, &nl, NULL, alphasort); - if (nl_cnt < 0) { + if (nl_cnt < 0 && directory == NULL) { if (errno == ENOENT || errno == ENOTDIR) { @@ -656,7 +658,7 @@ void read_testcases(afl_state_t *afl) { } - PFATAL("Unable to open '%s'", afl->in_dir); + PFATAL("Unable to open '%s'", dir); } @@ -674,19 +676,29 @@ void read_testcases(afl_state_t *afl) { u8 dfn[PATH_MAX]; snprintf(dfn, PATH_MAX, "%s/.state/deterministic_done/%s", afl->in_dir, nl[i]->d_name); - u8 *fn2 = alloc_printf("%s/%s", afl->in_dir, nl[i]->d_name); + u8 *fn2 = alloc_printf("%s/%s", dir, nl[i]->d_name); u8 passed_det = 0; - free(nl[i]); /* not tracked */ - if (lstat(fn2, &st) || access(fn2, R_OK)) { PFATAL("Unable to access '%s'", fn2); } - /* This also takes care of . and .. */ + /* obviously we want to skip "descending" into . and .. directories, + however it is a good idea to skip also directories that start with + a dot */ + if (subdirs && S_ISDIR(st.st_mode) && nl[i]->d_name[0] != '.') { + + free(nl[i]); /* not tracked */ + read_testcases(afl, fn2); + ck_free(fn2); + continue; + + } + + free(nl[i]); if (!S_ISREG(st.st_mode) || !st.st_size || strstr(fn2, "/README.txt")) { @@ -718,7 +730,7 @@ void read_testcases(afl_state_t *afl) { free(nl); /* not tracked */ - if (!afl->queued_paths) { + if (!afl->queued_paths && directory == NULL) { SAYF("\n" cLRD "[-] " cRST "Looks like there are no valid test cases in the input directory! The " @@ -985,6 +997,76 @@ void perform_dry_run(afl_state_t *afl) { } + /* Now we remove all entries from the queue that have a duplicate trace map */ + + q = afl->queue; + struct queue_entry *p, *prev = NULL; + int duplicates = 0; + +restart_outer_cull_loop: + + while (q) { + + if (q->cal_failed || !q->exec_cksum) continue; + + restart_inner_cull_loop: + + p = q->next; + + while (p) { + + if (!p->cal_failed && p->exec_cksum == q->exec_cksum) { + + duplicates = 1; + --afl->pending_not_fuzzed; + + // We do not remove any of the memory allocated because for + // splicing the data might still be interesting. + // We only decouple them from the linked list. + // This will result in some leaks at exit, but who cares. + + // we keep the shorter file + if (p->len >= q->len) { + + q->next = p->next; + goto restart_inner_cull_loop; + + } else { + + if (prev) + prev->next = q = p; + else + afl->queue = q = p; + goto restart_outer_cull_loop; + + } + + } + + p = p->next; + + } + + prev = q; + q = q->next; + + } + + if (duplicates) { + + afl->max_depth = 0; + q = afl->queue; + while (q) { + + if (q->depth > afl->max_depth) afl->max_depth = q->depth; + q = q->next; + + } + + afl->q_prev100 = afl->queue = afl->queue_top = afl->queue; + + } + OKF("All test cases processed."); } diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index bf568c38..5737c1f5 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -1707,20 +1707,8 @@ custom_mutator_stage: } while (tid == afl->current_entry && afl->queued_paths > 1); - target = afl->queue; - - while (tid >= 100) { - - target = target->next_100; - tid -= 100; - - } - - while (tid--) { - - target = target->next; - - } + afl->splicing_with = tid; + target = afl->queue_buf[tid]; /* Make sure that the target has a reasonable length. */ @@ -4518,20 +4506,7 @@ pacemaker_fuzzing: } while (tid == afl->current_entry); afl->splicing_with = tid; - target = afl->queue; - - while (tid >= 100) { - - target = target->next_100; - tid -= 100; - - } - - while (tid--) { - - target = target->next; - - } + target = afl->queue_buf[tid]; /* Make sure that the target has a reasonable length. */ diff --git a/src/afl-fuzz-queue.c b/src/afl-fuzz-queue.c index c6d8225f..db91813b 100644 --- a/src/afl-fuzz-queue.c +++ b/src/afl-fuzz-queue.c @@ -239,13 +239,6 @@ void add_to_queue(afl_state_t *afl, u8 *fname, u32 len, u8 passed_det) { afl->cycles_wo_finds = 0; - if (!(afl->queued_paths % 100)) { - - afl->q_prev100->next_100 = q; - afl->q_prev100 = q; - - } - struct queue_entry **queue_buf = afl_realloc( AFL_BUF_PARAM(queue), afl->queued_paths * sizeof(struct queue_entry *)); if (unlikely(!queue_buf)) { PFATAL("alloc"); } diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c index c12d5db5..bfaa22e8 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -119,8 +119,8 @@ static void usage(u8 *argv0, int more_help) { "etc.)\n" " -d - quick & dirty mode (skips deterministic steps)\n" " -n - fuzz without instrumentation (non-instrumented mode)\n" - " -x dict_file - optional fuzzer dictionary (see README.md, its really " - "good!)\n\n" + " -x dict_file - fuzzer dictionary (see README.md, specify up to 4 " + "times)\n\n" "Testing settings:\n" " -s seed - use a fixed seed for the RNG\n" @@ -243,11 +243,11 @@ static int stricmp(char const *a, char const *b) { int main(int argc, char **argv_orig, char **envp) { - s32 opt; + s32 opt, i; u64 prev_queued = 0; u32 sync_interval_cnt = 0, seek_to, show_help = 0, map_size = MAP_SIZE; - u8 * extras_dir = 0; - u8 mem_limit_given = 0, exit_1 = 0, debug = 0; + u8 * extras_dir[4]; + u8 mem_limit_given = 0, exit_1 = 0, debug = 0, extras_dir_cnt = 0; char **use_argv; struct timeval tv; @@ -450,8 +450,13 @@ int main(int argc, char **argv_orig, char **envp) { case 'x': /* dictionary */ - if (extras_dir) { FATAL("Multiple -x options not supported"); } - extras_dir = optarg; + if (extras_dir_cnt >= 4) { + + FATAL("More than four -x options are not supported"); + + } + + extras_dir[extras_dir_cnt++] = optarg; break; case 't': { /* timeout */ @@ -828,10 +833,6 @@ int main(int argc, char **argv_orig, char **envp) { "Eißfeldt, Andrea Fioraldi and Dominik Maier"); OKF("afl++ is open source, get it at " "https://github.com/AFLplusplus/AFLplusplus"); - OKF("Power schedules from github.com/mboehme/aflfast"); - OKF("Python Mutator and llvm_mode instrument file list from " - "github.com/choller/afl"); - OKF("MOpt Mutator from github.com/puppet-meteor/MOpt-AFL"); if (afl->sync_id && afl->is_main_node && afl->afl_env.afl_custom_mutator_only) { @@ -1139,7 +1140,15 @@ int main(int argc, char **argv_orig, char **envp) { pivot_inputs(afl); - if (extras_dir) { load_extras(afl, extras_dir); } + if (extras_dir_cnt) { + + for (i = 0; i < extras_dir_cnt; i++) + load_extras(afl, extras_dir[i]); + + dedup_extras(afl); + OKF("Loaded a total of %u extras.", afl->extras_cnt); + + } if (!afl->timeout_given) { find_timeout(afl); } -- cgit 1.4.1 From 888d63748a3c6aafd974cb9d96cdb8d3916e82bb Mon Sep 17 00:00:00 2001 From: Vitalii Akolzin Date: Thu, 24 Sep 2020 18:25:32 +0300 Subject: Fix potential endless loop in custom_mutator_stage Co-authored-by: Ivan Gulakov --- include/afl-fuzz.h | 3 +++ src/afl-fuzz-one.c | 64 ++++++++++++++++++++++++++++++++-------------------- src/afl-fuzz-queue.c | 2 ++ src/afl-fuzz-state.c | 2 ++ 4 files changed, 46 insertions(+), 25 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h index 9404c417..0efd48ec 100644 --- a/include/afl-fuzz.h +++ b/include/afl-fuzz.h @@ -657,6 +657,9 @@ typedef struct afl_state { * they do not call another function */ u8 *map_tmp_buf; + /* queue entries ready for splicing count (len > 1) */ + u32 ready_for_splicing_count; + } afl_state_t; struct custom_mutator { diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index 5737c1f5..edae2a88 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -1696,50 +1696,58 @@ custom_mutator_stage: struct queue_entry *target; u32 tid; - u8 * new_buf; + u8 * new_buf = NULL; + u32 target_len = 0; - retry_external_pick: - /* Pick a random other queue entry for passing to external API */ + if (afl->ready_for_splicing_count > 1 || + (afl->ready_for_splicing_count == 1 && + afl->queue_cur->len == 1)) { - do { + retry_external_pick: + /* Pick a random other queue entry for passing to external API */ - tid = rand_below(afl, afl->queued_paths); + do { - } while (tid == afl->current_entry && afl->queued_paths > 1); + tid = rand_below(afl, afl->queued_paths); - afl->splicing_with = tid; - target = afl->queue_buf[tid]; + } while (tid == afl->current_entry && afl->queued_paths > 1); - /* Make sure that the target has a reasonable length. */ + afl->splicing_with = tid; + target = afl->queue_buf[tid]; - while (target && (target->len < 2 || target == afl->queue_cur) && - afl->queued_paths > 3) { + /* Make sure that the target has a reasonable length. */ - target = target->next; - ++afl->splicing_with; + while (target && (target->len < 2 || target == afl->queue_cur) && + afl->queued_paths > 2) { - } + target = target->next; + ++afl->splicing_with; - if (!target) { goto retry_external_pick; } + } - /* Read the additional testcase into a new buffer. */ - fd = open(target->fname, O_RDONLY); - if (unlikely(fd < 0)) { + if (!target) { goto retry_external_pick; } - PFATAL("Unable to open '%s'", target->fname); + /* Read the additional testcase into a new buffer. */ + fd = open(target->fname, O_RDONLY); + if (unlikely(fd < 0)) { - } + PFATAL("Unable to open '%s'", target->fname); - new_buf = afl_realloc(AFL_BUF_PARAM(out_scratch), target->len); - if (unlikely(!new_buf)) { PFATAL("alloc"); } - ck_read(fd, new_buf, target->len, target->fname); - close(fd); + } + + new_buf = afl_realloc(AFL_BUF_PARAM(out_scratch), target->len); + if (unlikely(!new_buf)) { PFATAL("alloc"); } + ck_read(fd, new_buf, target->len, target->fname); + close(fd); + target_len = target->len; + + } u8 *mutated_buf = NULL; size_t mutated_size = el->afl_custom_fuzz(el->data, out_buf, len, &mutated_buf, new_buf, - target->len, max_seed_size); + target_len, max_seed_size); if (unlikely(!mutated_buf)) { @@ -2738,6 +2746,8 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { if (!afl->non_instrumented_mode && !afl->queue_cur->trim_done) { + u32 old_len = afl->queue_cur->len; + u8 res = trim_case(afl, afl->queue_cur, in_buf); if (res == FSRV_RUN_ERROR) { @@ -2759,6 +2769,10 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { len = afl->queue_cur->len; + /* maybe current entry stop being ready for splicing */ + if (old_len > 1 && afl->queue_cur->len == 1) + afl->ready_for_splicing_count--; + } memcpy(out_buf, in_buf, len); diff --git a/src/afl-fuzz-queue.c b/src/afl-fuzz-queue.c index ddd08f1c..14aa34fc 100644 --- a/src/afl-fuzz-queue.c +++ b/src/afl-fuzz-queue.c @@ -234,6 +234,8 @@ void add_to_queue(afl_state_t *afl, u8 *fname, u32 len, u8 passed_det) { } + if (q->len > 1) afl->ready_for_splicing_count++; + ++afl->queued_paths; ++afl->pending_not_fuzzed; diff --git a/src/afl-fuzz-state.c b/src/afl-fuzz-state.c index ae45d571..9f68bb51 100644 --- a/src/afl-fuzz-state.c +++ b/src/afl-fuzz-state.c @@ -155,6 +155,8 @@ void afl_state_init(afl_state_t *afl, uint32_t map_size) { afl->stats_last_execs = 0; afl->stats_avg_exec = -1; + afl->ready_for_splicing_count = 0; + init_mopt_globals(afl); list_append(&afl_states, afl); -- cgit 1.4.1 From a75e7594f78454a11e3d93b3cb4878a21e4e943f Mon Sep 17 00:00:00 2001 From: Vitalii Akolzin Date: Thu, 24 Sep 2020 18:50:59 +0300 Subject: Add comments Co-authored-by: Ivan Gulakov --- src/afl-fuzz-one.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'src/afl-fuzz-one.c') diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index edae2a88..8c1aa179 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -1699,6 +1699,9 @@ custom_mutator_stage: u8 * new_buf = NULL; u32 target_len = 0; + /* check if splicing is possible (if the only entry has len > 1 + * check it is not current entry) + */ if (afl->ready_for_splicing_count > 1 || (afl->ready_for_splicing_count == 1 && afl->queue_cur->len == 1)) { @@ -2769,7 +2772,7 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { len = afl->queue_cur->len; - /* maybe current entry stop being ready for splicing */ + /* maybe current entry is not ready for splicing anymore */ if (old_len > 1 && afl->queue_cur->len == 1) afl->ready_for_splicing_count--; -- cgit 1.4.1 From 6b3b1775b6b274bc62f9c79f686fc79fa110d0a8 Mon Sep 17 00:00:00 2001 From: van Hauser Date: Fri, 25 Sep 2020 12:03:24 +0200 Subject: improving on splice candidate check patch --- include/afl-fuzz.h | 4 ++-- include/config.h | 2 +- src/afl-fuzz-one.c | 33 +++++++++------------------------ src/afl-fuzz-queue.c | 2 +- src/afl-fuzz-state.c | 48 +++++++++++++----------------------------------- 5 files changed, 26 insertions(+), 63 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h index 0efd48ec..441ecc61 100644 --- a/include/afl-fuzz.h +++ b/include/afl-fuzz.h @@ -483,7 +483,7 @@ typedef struct afl_state { disable_trim, /* Never trim in fuzz_one */ shmem_testcase_mode, /* If sharedmem testcases are used */ expand_havoc, /* perform expensive havoc after no find */ - cycle_schedules; /* cycle power schedules ? */ + cycle_schedules; /* cycle power schedules? */ u8 *virgin_bits, /* Regions yet untouched by fuzzing */ *virgin_tmout, /* Bits we haven't seen in tmouts */ @@ -657,7 +657,7 @@ typedef struct afl_state { * they do not call another function */ u8 *map_tmp_buf; - /* queue entries ready for splicing count (len > 1) */ + /* queue entries ready for splicing count (len > 4) */ u32 ready_for_splicing_count; } afl_state_t; diff --git a/include/config.h b/include/config.h index a01491e7..7c8e0c7d 100644 --- a/include/config.h +++ b/include/config.h @@ -136,7 +136,7 @@ two cycles where smaller blocks are favored: */ #define HAVOC_BLK_SMALL 32 -#define HAVOC_BLK_MEDIUM 128 +#define HAVOC_BLK_MEDIUM 128 #define HAVOC_BLK_LARGE 1500 /* Extra-large blocks, selected very rarely (<5% of the time): */ diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index 8c1aa179..e96c4311 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -1699,36 +1699,22 @@ custom_mutator_stage: u8 * new_buf = NULL; u32 target_len = 0; - /* check if splicing is possible (if the only entry has len > 1 - * check it is not current entry) - */ - if (afl->ready_for_splicing_count > 1 || - (afl->ready_for_splicing_count == 1 && - afl->queue_cur->len == 1)) { + /* check if splicing makes sense yet (enough entries) */ + if (likely(afl->ready_for_splicing_count > 1)) { - retry_external_pick: - /* Pick a random other queue entry for passing to external API */ + /* Pick a random other queue entry for passing to external API + that has the necessary length */ do { tid = rand_below(afl, afl->queued_paths); - } while (tid == afl->current_entry && afl->queued_paths > 1); - - afl->splicing_with = tid; - target = afl->queue_buf[tid]; - - /* Make sure that the target has a reasonable length. */ - - while (target && (target->len < 2 || target == afl->queue_cur) && - afl->queued_paths > 2) { - - target = target->next; - ++afl->splicing_with; + } while (unlikely(tid == afl->current_entry && - } + afl->queue_buf[tid]->len >= 4)); - if (!target) { goto retry_external_pick; } + target = afl->queue_buf[tid]; + afl->splicing_with = tid; /* Read the additional testcase into a new buffer. */ fd = open(target->fname, O_RDONLY); @@ -2773,8 +2759,7 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { len = afl->queue_cur->len; /* maybe current entry is not ready for splicing anymore */ - if (old_len > 1 && afl->queue_cur->len == 1) - afl->ready_for_splicing_count--; + if (unlikely(len <= 4 && old_len > 4)) afl->ready_for_splicing_count--; } diff --git a/src/afl-fuzz-queue.c b/src/afl-fuzz-queue.c index 14aa34fc..53c3e984 100644 --- a/src/afl-fuzz-queue.c +++ b/src/afl-fuzz-queue.c @@ -234,7 +234,7 @@ void add_to_queue(afl_state_t *afl, u8 *fname, u32 len, u8 passed_det) { } - if (q->len > 1) afl->ready_for_splicing_count++; + if (likely(q->len > 4)) afl->ready_for_splicing_count++; ++afl->queued_paths; ++afl->pending_not_fuzzed; diff --git a/src/afl-fuzz-state.c b/src/afl-fuzz-state.c index 9f68bb51..5e0995fe 100644 --- a/src/afl-fuzz-state.c +++ b/src/afl-fuzz-state.c @@ -95,6 +95,11 @@ void afl_state_init(afl_state_t *afl, uint32_t map_size) { afl->stage_name = "init"; /* Name of the current fuzz stage */ afl->splicing_with = -1; /* Splicing with which test case? */ afl->cpu_to_bind = -1; + afl->cal_cycles = CAL_CYCLES; + afl->cal_cycles_long = CAL_CYCLES_LONG; + afl->hang_tmout = EXEC_TIMEOUT; + afl->stats_update_freq = 1; + afl->stats_avg_exec = -1; #ifdef HAVE_AFFINITY afl->cpu_aff = -1; /* Selected CPU core */ @@ -115,48 +120,13 @@ void afl_state_init(afl_state_t *afl, uint32_t map_size) { // afl_state_t is not available in forkserver.c afl->fsrv.afl_ptr = (void *)afl; afl->fsrv.add_extra_func = (void (*)(void *, u8 *, u32)) & add_extra; - - afl->cal_cycles = CAL_CYCLES; - afl->cal_cycles_long = CAL_CYCLES_LONG; - afl->fsrv.exec_tmout = EXEC_TIMEOUT; - afl->hang_tmout = EXEC_TIMEOUT; - afl->fsrv.mem_limit = MEM_LIMIT; - - afl->stats_update_freq = 1; - afl->fsrv.dev_urandom_fd = -1; afl->fsrv.dev_null_fd = -1; - afl->fsrv.child_pid = -1; afl->fsrv.out_dir_fd = -1; - afl->cmplog_prev_timed_out = 0; - - /* statis file */ - afl->last_bitmap_cvg = 0; - afl->last_stability = 0; - afl->last_eps = 0; - - /* plot file saves from last run */ - afl->plot_prev_qp = 0; - afl->plot_prev_pf = 0; - afl->plot_prev_pnf = 0; - afl->plot_prev_ce = 0; - afl->plot_prev_md = 0; - afl->plot_prev_qc = 0; - afl->plot_prev_uc = 0; - afl->plot_prev_uh = 0; - - afl->stats_last_stats_ms = 0; - afl->stats_last_plot_ms = 0; - afl->stats_last_ms = 0; - afl->stats_last_execs = 0; - afl->stats_avg_exec = -1; - - afl->ready_for_splicing_count = 0; - init_mopt_globals(afl); list_append(&afl_states, afl); @@ -177,6 +147,14 @@ void read_afl_environment(afl_state_t *afl, char **envp) { WARNF("Potentially mistyped AFL environment variable: %s", env); issue_detected = 1; + } else if (strncmp(env, "USE_", 4) == 0) { + + WARNF( + "Potentially mistyped AFL environment variable: %s, did you mean " + "AFL_%s?", + env, env); + issue_detected = 1; + } else if (strncmp(env, "AFL_", 4) == 0) { int i = 0, match = 0; -- cgit 1.4.1 From e69b25e34be8028921389bbb114135c3028d0a3d Mon Sep 17 00:00:00 2001 From: van Hauser Date: Mon, 28 Sep 2020 10:13:00 +0200 Subject: increase havoc_stack_pow2 on no finds --- include/afl-fuzz.h | 1 + src/afl-fuzz-one.c | 4 ++-- src/afl-fuzz-state.c | 1 + src/afl-fuzz.c | 5 +++++ 4 files changed, 9 insertions(+), 2 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h index 441ecc61..aa278820 100644 --- a/include/afl-fuzz.h +++ b/include/afl-fuzz.h @@ -443,6 +443,7 @@ typedef struct afl_state { u8 cal_cycles, /* Calibration cycles defaults */ cal_cycles_long, /* Calibration cycles defaults */ + havoc_stack_pow2, /* HAVOC_STACK_POW2 */ no_unlink, /* do not unlink cur_input */ debug, /* Debug mode */ custom_only, /* Custom mutator only mode */ diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index e96c4311..c04b492b 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -1884,7 +1884,7 @@ havoc_stage: for (afl->stage_cur = 0; afl->stage_cur < afl->stage_max; ++afl->stage_cur) { - u32 use_stacking = 1 << (1 + rand_below(afl, HAVOC_STACK_POW2)); + u32 use_stacking = 1 << (1 + rand_below(afl, afl->havoc_stack_pow2)); afl->stage_cur_val = use_stacking; @@ -3970,7 +3970,7 @@ pacemaker_fuzzing: for (afl->stage_cur = 0; afl->stage_cur < afl->stage_max; ++afl->stage_cur) { - u32 use_stacking = 1 << (1 + rand_below(afl, HAVOC_STACK_POW2)); + u32 use_stacking = 1 << (1 + rand_below(afl, afl->havoc_stack_pow2)); afl->stage_cur_val = use_stacking; diff --git a/src/afl-fuzz-state.c b/src/afl-fuzz-state.c index 5e0995fe..a8e56e60 100644 --- a/src/afl-fuzz-state.c +++ b/src/afl-fuzz-state.c @@ -95,6 +95,7 @@ void afl_state_init(afl_state_t *afl, uint32_t map_size) { afl->stage_name = "init"; /* Name of the current fuzz stage */ afl->splicing_with = -1; /* Splicing with which test case? */ afl->cpu_to_bind = -1; + afl->havoc_stack_pow2 = HAVOC_STACK_POW2; afl->cal_cycles = CAL_CYCLES; afl->cal_cycles_long = CAL_CYCLES_LONG; afl->hang_tmout = EXEC_TIMEOUT; diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c index 002be0be..28507857 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -1368,9 +1368,14 @@ int main(int argc, char **argv_orig, char **envp) { break; case 2: // if (!have_p) afl->schedule = EXPLOIT; + afl->havoc_stack_pow2++; afl->expand_havoc = 3; break; case 3: + afl->havoc_stack_pow2++; + afl->expand_havoc = 4; + break; + case 4: // nothing else currently break; -- cgit 1.4.1 From a4b60ca5b61c9bca5fa7b67528baeb3a8ea9320e Mon Sep 17 00:00:00 2001 From: Dominik Maier Date: Tue, 6 Oct 2020 15:37:59 +0200 Subject: testcase cache added --- include/afl-fuzz.h | 15 ++++++ include/config.h | 8 +++ src/afl-fuzz-one.c | 148 ++++++++++++++++++--------------------------------- src/afl-fuzz-queue.c | 65 ++++++++++++++++++++++ src/afl-fuzz.c | 3 +- 5 files changed, 142 insertions(+), 97 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h index fb661ce5..46da8c7d 100644 --- a/include/afl-fuzz.h +++ b/include/afl-fuzz.h @@ -162,6 +162,9 @@ struct queue_entry { u8 *trace_mini; /* Trace bytes, if kept */ u32 tc_ref; /* Trace bytes ref count */ + u8 *testcase_buf; /* The testcase buffer, if loaded. */ + u32 testcase_refs; /* count of users of testcase buf */ + struct queue_entry *next; /* Next element, if any */ }; @@ -664,6 +667,11 @@ typedef struct afl_state { /* queue entries ready for splicing count (len > 4) */ u32 ready_for_splicing_count; + /* How many queue entries currently have cached testcases */ + u32 q_testcase_cache_count; + /* Refs to each queue entry with cached testcase (for eviction, if cache_count is too large) */ + struct queue_entry *q_testcase_cache[TESTCASE_CACHE_SIZE]; + } afl_state_t; struct custom_mutator { @@ -1101,5 +1109,12 @@ static inline u64 next_p2(u64 val) { } +/* Returns the testcase buf from the file behind this queue entry. + Increases the refcount. */ +u8 *queue_testcase_take(afl_state_t *afl, struct queue_entry *q); + +/* Tell afl that this testcase may be evicted from the cache */ +void queue_testcase_release(afl_state_t *afl, struct queue_entry *q); + #endif diff --git a/include/config.h b/include/config.h index 7c8e0c7d..38a734ce 100644 --- a/include/config.h +++ b/include/config.h @@ -295,6 +295,14 @@ #define RESEED_RNG 100000 +/* The amount of entries in the testcase cache, held in memory. +Decrease if RAM usage is high. */ +#define TESTCASE_CACHE_SIZE 2048 + +#if TESTCASE_CACHE_SIZE < 4 + #error "Dangerously low cache size: Set TESTCASE_CACHE_SIZE to 4 or more in config.h! +#endif + /* Maximum line length passed from GCC to 'as' and used for parsing configuration files: */ diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index c04b492b..20558618 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -370,7 +370,7 @@ static void locate_diffs(u8 *ptr1, u8 *ptr2, u32 len, s32 *first, s32 *last) { u8 fuzz_one_original(afl_state_t *afl) { - s32 len, fd, temp_len; + s32 len, temp_len; u32 j; u32 i; u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0; @@ -453,28 +453,9 @@ u8 fuzz_one_original(afl_state_t *afl) { } - /* Map the test case into memory. */ - - fd = open(afl->queue_cur->fname, O_RDONLY); - - if (unlikely(fd < 0)) { - - PFATAL("Unable to open '%s'", afl->queue_cur->fname); - - } - + orig_in = in_buf = queue_testcase_take(afl, afl->queue_cur); len = afl->queue_cur->len; - orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); - - if (unlikely(orig_in == MAP_FAILED)) { - - PFATAL("Unable to mmap '%s' with len %d", afl->queue_cur->fname, len); - - } - - close(fd); - /* We could mmap() out_buf as MAP_PRIVATE, but we end up clobbering every single byte anyway, so it wouldn't give us any performance or memory usage benefits. */ @@ -1694,7 +1675,7 @@ custom_mutator_stage: for (afl->stage_cur = 0; afl->stage_cur < afl->stage_max; ++afl->stage_cur) { - struct queue_entry *target; + struct queue_entry *target = NULL; u32 tid; u8 * new_buf = NULL; u32 target_len = 0; @@ -1717,17 +1698,7 @@ custom_mutator_stage: afl->splicing_with = tid; /* Read the additional testcase into a new buffer. */ - fd = open(target->fname, O_RDONLY); - if (unlikely(fd < 0)) { - - PFATAL("Unable to open '%s'", target->fname); - - } - - new_buf = afl_realloc(AFL_BUF_PARAM(out_scratch), target->len); - if (unlikely(!new_buf)) { PFATAL("alloc"); } - ck_read(fd, new_buf, target->len, target->fname); - close(fd); + new_buf = queue_testcase_take(afl, target); target_len = target->len; } @@ -1738,6 +1709,11 @@ custom_mutator_stage: el->afl_custom_fuzz(el->data, out_buf, len, &mutated_buf, new_buf, target_len, max_seed_size); + if (new_buf) { + queue_testcase_release(afl, target); + new_buf = NULL; + } + if (unlikely(!mutated_buf)) { FATAL("Error in custom_fuzz. Size returned: %zd", mutated_size); @@ -2320,51 +2296,44 @@ havoc_stage: /* Overwrite bytes with a randomly selected chunk from another testcase or insert that chunk. */ - if (afl->queued_paths < 4) break; + if (afl->queued_paths < 4) { break; } /* Pick a random queue entry and seek to it. */ u32 tid; - do + do { tid = rand_below(afl, afl->queued_paths); - while (tid == afl->current_entry); + } while (tid == afl->current_entry); struct queue_entry *target = afl->queue_buf[tid]; /* Make sure that the target has a reasonable length. */ - while (target && (target->len < 2 || target == afl->queue_cur)) + while (target && (target->len < 2 || target == afl->queue_cur)) { target = target->next; + } - if (!target) break; - - /* Read the testcase into a new buffer. */ - - fd = open(target->fname, O_RDONLY); - - if (unlikely(fd < 0)) { + if (!target) { break; } - PFATAL("Unable to open '%s'", target->fname); - - } u32 new_len = target->len; - u8 *new_buf = afl_realloc(AFL_BUF_PARAM(in_scratch), new_len); - if (unlikely(!new_buf)) { PFATAL("alloc"); } - - ck_read(fd, new_buf, new_len, target->fname); - close(fd); + /* Get the testcase contents for splicing. */ + u8 *new_buf = queue_testcase_take(afl, target); u8 overwrite = 0; - if (temp_len >= 2 && rand_below(afl, 2)) + if (temp_len >= 2 && rand_below(afl, 2)) { overwrite = 1; + } else if (temp_len + HAVOC_BLK_XL >= MAX_FILE) { - if (temp_len >= 2) + if (temp_len >= 2) { overwrite = 1; - else + } else { + queue_testcase_release(afl, target); + new_buf = NULL; break; + } } @@ -2411,6 +2380,9 @@ havoc_stage: } + /* We don't need this splice testcase anymore */ + queue_testcase_release(afl, target); + new_buf = NULL; break; } @@ -2516,24 +2488,16 @@ retry_splicing: if (!target) { goto retry_splicing; } - /* Read the testcase into a new buffer. */ - - fd = open(target->fname, O_RDONLY); - - if (unlikely(fd < 0)) { PFATAL("Unable to open '%s'", target->fname); } - + /* Get the testcase buffer */ + u8 *splice_buf = queue_testcase_take(afl, target); new_buf = afl_realloc(AFL_BUF_PARAM(in_scratch), target->len); if (unlikely(!new_buf)) { PFATAL("alloc"); } - ck_read(fd, new_buf, target->len, target->fname); - - close(fd); - /* Find a suitable splicing location, somewhere between the first and the last differing byte. Bail out if the difference is just a single byte or so. */ - locate_diffs(in_buf, new_buf, MIN(len, (s64)target->len), &f_diff, &l_diff); + locate_diffs(in_buf, splice_buf, MIN(len, (s64)target->len), &f_diff, &l_diff); if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) { goto retry_splicing; } @@ -2545,6 +2509,7 @@ retry_splicing: len = target->len; memcpy(new_buf, in_buf, split_at); + memcpy(new_buf + split_at, splice_buf + split_at, target->len - split_at); afl_swap_bufs(AFL_BUF_PARAM(in), AFL_BUF_PARAM(in_scratch)); in_buf = new_buf; @@ -2552,6 +2517,9 @@ retry_splicing: if (unlikely(!out_buf)) { PFATAL("alloc"); } memcpy(out_buf, in_buf, len); + queue_testcase_release(afl, target); + splice_buf = NULL; + goto custom_mutator_stage; /* ???: While integrating Python module, the author decided to jump to python stage, but the reason behind this is not clear.*/ @@ -2582,7 +2550,8 @@ abandon_entry: ++afl->queue_cur->fuzz_level; - munmap(orig_in, afl->queue_cur->len); + queue_testcase_release(afl, afl->queue_cur); + orig_in = NULL; return ret_val; @@ -2604,7 +2573,7 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { } - s32 len, fd, temp_len; + s32 len, temp_len; u32 i; u32 j; u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0; @@ -2669,23 +2638,9 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { } /* Map the test case into memory. */ - - fd = open(afl->queue_cur->fname, O_RDONLY); - - if (fd < 0) { PFATAL("Unable to open '%s'", afl->queue_cur->fname); } - + orig_in = in_buf = queue_testcase_take(afl, afl->queue_cur); len = afl->queue_cur->len; - orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); - - if (orig_in == MAP_FAILED) { - - PFATAL("Unable to mmap '%s'", afl->queue_cur->fname); - - } - - close(fd); - /* We could mmap() out_buf as MAP_PRIVATE, but we end up clobbering every single byte anyway, so it wouldn't give us any performance or memory usage benefits. */ @@ -4522,31 +4477,24 @@ pacemaker_fuzzing: if (!target) { goto retry_splicing_puppet; } /* Read the testcase into a new buffer. */ - - fd = open(target->fname, O_RDONLY); - - if (fd < 0) { PFATAL("Unable to open '%s'", target->fname); } - - new_buf = afl_realloc(AFL_BUF_PARAM(in_scratch), target->len); - if (unlikely(!new_buf)) { PFATAL("alloc"); } - - ck_read(fd, new_buf, target->len, target->fname); - - close(fd); + u8 *splicing_buf = queue_testcase_take(afl, target); /* Find a suitable splicin g location, somewhere between the first and the last differing byte. Bail out if the difference is just a single byte or so. */ - locate_diffs(in_buf, new_buf, MIN(len, (s32)target->len), &f_diff, + locate_diffs(in_buf, splicing_buf, MIN(len, (s32)target->len), &f_diff, &l_diff); if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) { + queue_testcase_release(afl, target); goto retry_splicing_puppet; } + new_buf = afl_realloc(AFL_BUF_PARAM(in_scratch), target->len); + /* Split somewhere between the first and last differing byte. */ split_at = f_diff + rand_below(afl, l_diff - f_diff); @@ -4555,12 +4503,16 @@ pacemaker_fuzzing: len = target->len; memcpy(new_buf, in_buf, split_at); + memcpy(new_buf + split_at, splicing_buf + split_at, target->len - split_at); afl_swap_bufs(AFL_BUF_PARAM(in), AFL_BUF_PARAM(in_scratch)); in_buf = new_buf; out_buf = afl_realloc(AFL_BUF_PARAM(out), len); if (unlikely(!out_buf)) { PFATAL("alloc"); } memcpy(out_buf, in_buf, len); + queue_testcase_release(afl, target); + splicing_buf = NULL; + goto havoc_stage_puppet; } /* if splice_cycle */ @@ -4594,7 +4546,8 @@ pacemaker_fuzzing: // if (afl->queue_cur->favored) --afl->pending_favored; // } - munmap(orig_in, afl->queue_cur->len); + queue_testcase_release(afl, afl->queue_cur); + orig_in = NULL; if (afl->key_puppet == 1) { @@ -4730,6 +4683,9 @@ pacemaker_fuzzing: } /* block */ + queue_testcase_release(afl, afl->queue_cur); + orig_in = NULL; + return ret_val; } diff --git a/src/afl-fuzz-queue.c b/src/afl-fuzz-queue.c index 0d7d0314..e2387aaa 100644 --- a/src/afl-fuzz-queue.c +++ b/src/afl-fuzz-queue.c @@ -220,6 +220,7 @@ void add_to_queue(afl_state_t *afl, u8 *fname, u32 len, u8 passed_det) { q->depth = afl->cur_depth + 1; q->passed_det = passed_det; q->trace_mini = NULL; + q->testcase_buf = NULL; if (q->depth > afl->max_depth) { afl->max_depth = q->depth; } @@ -767,3 +768,67 @@ u32 calculate_score(afl_state_t *afl, struct queue_entry *q) { } +/* Tell afl that this testcase may be evicted from the cache */ +inline void queue_testcase_release(afl_state_t *afl, struct queue_entry *q) { + (void) afl; + q->testcase_refs--; + if (unlikely(q->testcase_refs < 0)) { FATAL("Testcase refcount smaller than 0"); } +} + +/* Returns the testcase buf from the file behind this queue entry. + Increases the refcount. */ +u8 *queue_testcase_take(afl_state_t *afl, struct queue_entry *q) { + if (!q->testcase_buf) { + u32 tid = 0; + /* Buf not cached, let's do that now */ + + if (likely(afl->q_testcase_cache_count == TESTCASE_CACHE_SIZE)) { + /* Cache full. We neet to evict one to map one. + Get a random one which is not in use */ + do { + + tid = rand_below(afl, afl->q_testcase_cache_count); + + } while (afl->q_testcase_cache[tid]->testcase_refs > 0); + + struct queue_entry *old_cached = afl->q_testcase_cache[tid]; + /* free the current buf from cache */ + munmap(old_cached->testcase_buf, old_cached->len); + old_cached->testcase_buf = NULL; + + } else { + tid = afl->q_testcase_cache_count; + afl->q_testcase_cache_count++; + } + + /* Map the test case into memory. */ + + int fd = open(q->fname, O_RDONLY); + + if (unlikely(fd < 0)) { + + PFATAL("Unable to open '%s'", q->fname); + + } + + u32 len = q->len; + + q->testcase_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); + + if (unlikely(q->testcase_buf == MAP_FAILED)) { + + PFATAL("Unable to mmap '%s' with len %d", q->fname, len); + + } + + close(fd); + + /* Register us as cached */ + afl->q_testcase_cache[tid] = q; + + } + q->testcase_refs++; + if (!q->testcase_buf) { FATAL("Testcase buf is NULL, this should never happen"); } + return q->testcase_buf; + +} diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c index 2f8aa3fd..dd9aaa8f 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -1152,8 +1152,9 @@ int main(int argc, char **argv_orig, char **envp) { if (extras_dir_cnt) { - for (i = 0; i < extras_dir_cnt; i++) + for (i = 0; i < extras_dir_cnt; i++) { load_extras(afl, extras_dir[i]); + } dedup_extras(afl); OKF("Loaded a total of %u extras.", afl->extras_cnt); -- cgit 1.4.1 From 74dc227c4412d0121c9b972e5d89db89f54c6b3a Mon Sep 17 00:00:00 2001 From: Dominik Maier Date: Tue, 6 Oct 2020 15:38:36 +0200 Subject: code format --- include/afl-fuzz.h | 5 +++-- include/config.h | 3 ++- src/afl-fuzz-one.c | 22 +++++++++++++++++----- src/afl-fuzz-queue.c | 34 ++++++++++++++++++++++++---------- src/afl-fuzz.c | 2 ++ 5 files changed, 48 insertions(+), 18 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h index 46da8c7d..5ab787e0 100644 --- a/include/afl-fuzz.h +++ b/include/afl-fuzz.h @@ -163,7 +163,7 @@ struct queue_entry { u32 tc_ref; /* Trace bytes ref count */ u8 *testcase_buf; /* The testcase buffer, if loaded. */ - u32 testcase_refs; /* count of users of testcase buf */ + u32 testcase_refs; /* count of users of testcase buf */ struct queue_entry *next; /* Next element, if any */ @@ -669,7 +669,8 @@ typedef struct afl_state { /* How many queue entries currently have cached testcases */ u32 q_testcase_cache_count; - /* Refs to each queue entry with cached testcase (for eviction, if cache_count is too large) */ + /* Refs to each queue entry with cached testcase (for eviction, if cache_count + * is too large) */ struct queue_entry *q_testcase_cache[TESTCASE_CACHE_SIZE]; } afl_state_t; diff --git a/include/config.h b/include/config.h index 38a734ce..ec378036 100644 --- a/include/config.h +++ b/include/config.h @@ -300,7 +300,8 @@ Decrease if RAM usage is high. */ #define TESTCASE_CACHE_SIZE 2048 #if TESTCASE_CACHE_SIZE < 4 - #error "Dangerously low cache size: Set TESTCASE_CACHE_SIZE to 4 or more in config.h! + #error \ + "Dangerously low cache size: Set TESTCASE_CACHE_SIZE to 4 or more in config.h! #endif /* Maximum line length passed from GCC to 'as' and used for parsing diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index 20558618..a5f77f11 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -1710,8 +1710,10 @@ custom_mutator_stage: target_len, max_seed_size); if (new_buf) { + queue_testcase_release(afl, target); new_buf = NULL; + } if (unlikely(!mutated_buf)) { @@ -2302,7 +2304,9 @@ havoc_stage: u32 tid; do { + tid = rand_below(afl, afl->queued_paths); + } while (tid == afl->current_entry); struct queue_entry *target = afl->queue_buf[tid]; @@ -2310,12 +2314,13 @@ havoc_stage: /* Make sure that the target has a reasonable length. */ while (target && (target->len < 2 || target == afl->queue_cur)) { + target = target->next; + } if (!target) { break; } - u32 new_len = target->len; /* Get the testcase contents for splicing. */ @@ -2323,16 +2328,21 @@ havoc_stage: u8 overwrite = 0; if (temp_len >= 2 && rand_below(afl, 2)) { + overwrite = 1; - } - else if (temp_len + HAVOC_BLK_XL >= MAX_FILE) { + + } else if (temp_len + HAVOC_BLK_XL >= MAX_FILE) { if (temp_len >= 2) { + overwrite = 1; + } else { + queue_testcase_release(afl, target); new_buf = NULL; break; + } } @@ -2497,7 +2507,8 @@ retry_splicing: the last differing byte. Bail out if the difference is just a single byte or so. */ - locate_diffs(in_buf, splice_buf, MIN(len, (s64)target->len), &f_diff, &l_diff); + locate_diffs(in_buf, splice_buf, MIN(len, (s64)target->len), &f_diff, + &l_diff); if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) { goto retry_splicing; } @@ -4503,7 +4514,8 @@ pacemaker_fuzzing: len = target->len; memcpy(new_buf, in_buf, split_at); - memcpy(new_buf + split_at, splicing_buf + split_at, target->len - split_at); + memcpy(new_buf + split_at, splicing_buf + split_at, + target->len - split_at); afl_swap_bufs(AFL_BUF_PARAM(in), AFL_BUF_PARAM(in_scratch)); in_buf = new_buf; out_buf = afl_realloc(AFL_BUF_PARAM(out), len); diff --git a/src/afl-fuzz-queue.c b/src/afl-fuzz-queue.c index e2387aaa..721f9ac7 100644 --- a/src/afl-fuzz-queue.c +++ b/src/afl-fuzz-queue.c @@ -770,24 +770,33 @@ u32 calculate_score(afl_state_t *afl, struct queue_entry *q) { /* Tell afl that this testcase may be evicted from the cache */ inline void queue_testcase_release(afl_state_t *afl, struct queue_entry *q) { - (void) afl; + + (void)afl; q->testcase_refs--; - if (unlikely(q->testcase_refs < 0)) { FATAL("Testcase refcount smaller than 0"); } + if (unlikely(q->testcase_refs < 0)) { + + FATAL("Testcase refcount smaller than 0"); + + } + } /* Returns the testcase buf from the file behind this queue entry. Increases the refcount. */ u8 *queue_testcase_take(afl_state_t *afl, struct queue_entry *q) { + if (!q->testcase_buf) { + u32 tid = 0; /* Buf not cached, let's do that now */ if (likely(afl->q_testcase_cache_count == TESTCASE_CACHE_SIZE)) { + /* Cache full. We neet to evict one to map one. Get a random one which is not in use */ do { - tid = rand_below(afl, afl->q_testcase_cache_count); + tid = rand_below(afl, afl->q_testcase_cache_count); } while (afl->q_testcase_cache[tid]->testcase_refs > 0); @@ -795,21 +804,19 @@ u8 *queue_testcase_take(afl_state_t *afl, struct queue_entry *q) { /* free the current buf from cache */ munmap(old_cached->testcase_buf, old_cached->len); old_cached->testcase_buf = NULL; - + } else { + tid = afl->q_testcase_cache_count; afl->q_testcase_cache_count++; + } /* Map the test case into memory. */ int fd = open(q->fname, O_RDONLY); - if (unlikely(fd < 0)) { - - PFATAL("Unable to open '%s'", q->fname); - - } + if (unlikely(fd < 0)) { PFATAL("Unable to open '%s'", q->fname); } u32 len = q->len; @@ -827,8 +834,15 @@ u8 *queue_testcase_take(afl_state_t *afl, struct queue_entry *q) { afl->q_testcase_cache[tid] = q; } + q->testcase_refs++; - if (!q->testcase_buf) { FATAL("Testcase buf is NULL, this should never happen"); } + if (!q->testcase_buf) { + + FATAL("Testcase buf is NULL, this should never happen"); + + } + return q->testcase_buf; } + diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c index dd9aaa8f..9b7c1445 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -1153,7 +1153,9 @@ int main(int argc, char **argv_orig, char **envp) { if (extras_dir_cnt) { for (i = 0; i < extras_dir_cnt; i++) { + load_extras(afl, extras_dir[i]); + } dedup_extras(afl); -- cgit 1.4.1 From 2d5fadc1e6a684b5e3e527a64b614f6b1ba8415f Mon Sep 17 00:00:00 2001 From: Dominik Maier Date: Tue, 6 Oct 2020 16:45:25 +0200 Subject: hunting ref underflow --- src/afl-fuzz-one.c | 3 --- src/afl-fuzz-queue.c | 11 +++++++++-- 2 files changed, 9 insertions(+), 5 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index a5f77f11..f25ab4ee 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -4695,9 +4695,6 @@ pacemaker_fuzzing: } /* block */ - queue_testcase_release(afl, afl->queue_cur); - orig_in = NULL; - return ret_val; } diff --git a/src/afl-fuzz-queue.c b/src/afl-fuzz-queue.c index 58e026f5..0b491202 100644 --- a/src/afl-fuzz-queue.c +++ b/src/afl-fuzz-queue.c @@ -837,10 +837,17 @@ u8 *queue_testcase_take(afl_state_t *afl, struct queue_entry *q) { } q->testcase_refs++; - if (!q->testcase_buf) { + if (unlikely(!q->testcase_buf || !q->testcase_refs)) { + if (!q->testcase_buf) { + + FATAL("Testcase buf is NULL, this should never happen"); - FATAL("Testcase buf is NULL, this should never happen"); + } + if (!q->testcase_refs) { + FATAL("Testcase ref overflow. Missing a testcase release somwhere?"); + + } } return q->testcase_buf; -- cgit 1.4.1 From 6a397d6111a21ebbf736237609c1c69d47c40f03 Mon Sep 17 00:00:00 2001 From: van Hauser Date: Sun, 11 Oct 2020 14:31:31 +0200 Subject: add new seed selection algo and make it the default --- docs/Changelog.md | 4 ++ include/afl-fuzz.h | 14 +++++- src/afl-fuzz-init.c | 8 ++++ src/afl-fuzz-one.c | 10 ++++- src/afl-fuzz-queue.c | 124 +++++++++++++++++++++++++++++++++++++++++++++++++++ src/afl-fuzz.c | 100 +++++++++++++++++++++++++++++------------ 6 files changed, 227 insertions(+), 33 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/docs/Changelog.md b/docs/Changelog.md index 9eb47e18..f15f1d93 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -17,6 +17,10 @@ sending a mail to . - memory limits are now disabled by default, set them with -m if required - deterministic fuzzing is now disabled by default and can be enabled with -D. It is still enabled by default for -M. + - a new seed selection was implemented that uses weighted randoms based on + a schedule performance score, which is much better that the previous + walk the whole queue approach. Select the old mode with -Z (auto enabled + with -M) - statsd support by Edznux, thanks a lot! - Marcel Boehme submitted a patch that improves all AFFast schedules :) - reading testcases from -i now descends into subdirectories diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h index e9d148e9..45de197d 100644 --- a/include/afl-fuzz.h +++ b/include/afl-fuzz.h @@ -151,7 +151,8 @@ struct queue_entry { favored, /* Currently favored? */ fs_redundant, /* Marked as redundant in the fs? */ fully_colorized, /* Do not run redqueen stage again */ - is_ascii; /* Is the input just ascii text? */ + is_ascii, /* Is the input just ascii text? */ + disabled; /* Is disabled from fuzz selection */ u32 bitmap_size, /* Number of bits set in bitmap */ fuzz_level, /* Number of fuzzing iterations */ @@ -165,6 +166,8 @@ struct queue_entry { u8 *trace_mini; /* Trace bytes, if kept */ u32 tc_ref; /* Trace bytes ref count */ + double perf_score; /* performance score */ + struct queue_entry *next; /* Next element, if any */ }; @@ -488,12 +491,17 @@ typedef struct afl_state { disable_trim, /* Never trim in fuzz_one */ shmem_testcase_mode, /* If sharedmem testcases are used */ expand_havoc, /* perform expensive havoc after no find */ - cycle_schedules; /* cycle power schedules? */ + cycle_schedules, /* cycle power schedules? */ + old_seed_selection; /* use vanilla afl seed selection */ u8 *virgin_bits, /* Regions yet untouched by fuzzing */ *virgin_tmout, /* Bits we haven't seen in tmouts */ *virgin_crash; /* Bits we haven't seen in crashes */ + double *alias_probability; /* alias weighted probabilities */ + u32 * alias_table; /* alias weighted random lookup table */ + u32 active_paths; /* enabled entries in the queue */ + u8 *var_bytes; /* Bytes that appear to be variable */ #define N_FUZZ_SIZE (1 << 21) @@ -1009,6 +1017,8 @@ void find_timeout(afl_state_t *); double get_runnable_processes(void); void nuke_resume_dir(afl_state_t *); int check_main_node_exists(afl_state_t *); +u32 select_next_queue_entry(afl_state_t *afl); +void create_alias_table(afl_state_t *afl); void setup_dirs_fds(afl_state_t *); void setup_cmdline_file(afl_state_t *, char **); void setup_stdio_file(afl_state_t *); diff --git a/src/afl-fuzz-init.c b/src/afl-fuzz-init.c index 65478a78..881bf10f 100644 --- a/src/afl-fuzz-init.c +++ b/src/afl-fuzz-init.c @@ -959,6 +959,8 @@ void perform_dry_run(afl_state_t *afl) { /* Remove from fuzzing queue but keep for splicing */ struct queue_entry *p = afl->queue; + p->disabled = 1; + p->perf_score = 0; while (p && p->next != q) p = p->next; @@ -968,6 +970,7 @@ void perform_dry_run(afl_state_t *afl) { afl->queue = q->next; --afl->pending_not_fuzzed; + --afl->active_paths; afl->max_depth = 0; p = afl->queue; @@ -1054,6 +1057,7 @@ restart_outer_cull_loop: duplicates = 1; --afl->pending_not_fuzzed; + afl->active_paths--; // We do not remove any of the memory allocated because for // splicing the data might still be interesting. @@ -1063,11 +1067,15 @@ restart_outer_cull_loop: // we keep the shorter file if (p->len >= q->len) { + p->disabled = 1; + p->perf_score = 0; q->next = p->next; goto restart_inner_cull_loop; } else { + q->disabled = 1; + q->perf_score = 0; if (prev) prev->next = q = p; else diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index c04b492b..6ef728e0 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -554,7 +554,10 @@ u8 fuzz_one_original(afl_state_t *afl) { * PERFORMANCE SCORE * *********************/ - orig_perf = perf_score = calculate_score(afl, afl->queue_cur); + if (likely(!afl->old_seed_selection)) + orig_perf = perf_score = afl->queue_cur->perf_score; + else + orig_perf = perf_score = calculate_score(afl, afl->queue_cur); if (unlikely(perf_score == 0)) { goto abandon_entry; } @@ -2769,7 +2772,10 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { * PERFORMANCE SCORE * *********************/ - orig_perf = perf_score = calculate_score(afl, afl->queue_cur); + if (likely(!afl->old_seed_selection)) + orig_perf = perf_score = afl->queue_cur->perf_score; + else + orig_perf = perf_score = calculate_score(afl, afl->queue_cur); if (unlikely(afl->shm.cmplog_mode && !afl->queue_cur->fully_colorized)) { diff --git a/src/afl-fuzz-queue.c b/src/afl-fuzz-queue.c index 0d7d0314..d608e890 100644 --- a/src/afl-fuzz-queue.c +++ b/src/afl-fuzz-queue.c @@ -27,6 +27,129 @@ #include #include +inline u32 select_next_queue_entry(afl_state_t *afl) { + + u32 r = rand_below(afl, 0xffffffff); + u32 s = r % afl->queued_paths; + // fprintf(stderr, "select: r=%u s=%u ... r < prob[s]=%f ? s=%u : + // alias[%u]=%u\n", r, s, afl->alias_probability[s], s, s, + // afl->alias_table[s]); + return (r < afl->alias_probability[s] ? s : afl->alias_table[s]); + +} + +void create_alias_table(afl_state_t *afl) { + + u32 n = afl->queued_paths, i = 0, a, g; + + afl->alias_table = + (u32 *)afl_realloc((void **)&afl->alias_table, n * sizeof(u32)); + afl->alias_probability = (double *)afl_realloc( + (void **)&afl->alias_probability, n * sizeof(double)); + double *P = (double *)afl_realloc(AFL_BUF_PARAM(out), n * sizeof(double)); + int * S = (u32 *)afl_realloc(AFL_BUF_PARAM(out_scratch), n * sizeof(u32)); + int * L = (u32 *)afl_realloc(AFL_BUF_PARAM(in_scratch), n * sizeof(u32)); + + if (!P || !S || !L) FATAL("could not aquire memory for alias table"); + memset((void *)afl->alias_table, 0, n * sizeof(u32)); + memset((void *)afl->alias_probability, 0, n * sizeof(double)); + + double sum = 0; + + for (i = 0; i < n; i++) { + + struct queue_entry *q = afl->queue_buf[i]; + + if (!q->disabled) q->perf_score = calculate_score(afl, q); + + sum += q->perf_score; + /* + if (afl->debug) + fprintf(stderr, "entry %u: score=%f %s (sum: %f)\n", i, q->perf_score, + q->disabled ? "disabled" : "", sum); + */ + + } + + for (i = 0; i < n; i++) { + + struct queue_entry *q = afl->queue_buf[i]; + + P[i] = q->perf_score * n / sum; + + } + + int nS = 0, nL = 0, s; + for (s = (s32)n - 1; s >= 0; --s) { + + if (P[s] < 1) + S[nS++] = s; + else + L[nL++] = s; + + } + + while (nS && nL) { + + a = S[--nS]; + g = L[--nL]; + afl->alias_probability[a] = P[a]; + afl->alias_table[a] = g; + P[g] = P[g] + P[a] - 1; + if (P[g] < 1) + S[nS++] = g; + else + L[nL++] = g; + + } + + while (nL) + afl->alias_probability[L[--nL]] = 1; + + while (nS) + afl->alias_probability[S[--nS]] = 1; + + /* + if (afl->debug) { + + fprintf(stderr, " %-3s %-3s %-9s\n", "entry", "alias", "prob"); + for (u32 i = 0; i < n; ++i) + fprintf(stderr, " %3i %3i %9.7f\n", i, afl->alias_table[i], + afl->alias_probability[i]); + + } + + int prob = 0; + fprintf(stderr, "Alias:"); + for (i = 0; i < n; i++) { + + fprintf(stderr, " [%u]=%u", i, afl->alias_table[i]); + if (afl->alias_table[i] >= n) + prob = i; + + } + + fprintf(stderr, "\n"); + + if (prob) { + + fprintf(stderr, "PROBLEM! alias[%u] = %u\n", prob, + afl->alias_table[prob]); + + for (i = 0; i < n; i++) { + + struct queue_entry *q = afl->queue_buf[i]; + + fprintf(stderr, "%u: score=%f\n", i, q->perf_score); + + } + + } + + */ + +} + /* Mark deterministic checks as done for a particular queue entry. We use the .state file to avoid repeating deterministic fuzzing when resuming aborted scans. */ @@ -237,6 +360,7 @@ void add_to_queue(afl_state_t *afl, u8 *fname, u32 len, u8 passed_det) { if (likely(q->len > 4)) afl->ready_for_splicing_count++; ++afl->queued_paths; + ++afl->active_paths; ++afl->pending_not_fuzzed; afl->cycles_wo_finds = 0; diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c index 24df2997..004adffe 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -115,6 +115,8 @@ static void usage(u8 *argv0, int more_help) { " if using QEMU, just use -c 0.\n\n" "Fuzzing behavior settings:\n" + " -Z - sequential queue selection instead of weighted " + "random\n" " -N - do not unlink the fuzzing input file (for devices " "etc.)\n" " -n - fuzz without instrumentation (non-instrumented mode)\n" @@ -131,8 +133,7 @@ static void usage(u8 *argv0, int more_help) { "Other stuff:\n" " -M/-S id - distributed mode (see docs/parallel_fuzzing.md)\n" - " use -D to force -S secondary to perform deterministic " - "fuzzing\n" + " -M auto-sets -D and -Z (use -d to disable -D)\n" " -F path - sync to a foreign fuzzer queue directory (requires " "-M, can\n" " be specified up to %u times)\n" @@ -250,7 +251,7 @@ int main(int argc, char **argv_orig, char **envp) { s32 opt, i; u64 prev_queued = 0; - u32 sync_interval_cnt = 0, seek_to, show_help = 0, map_size = MAP_SIZE; + u32 sync_interval_cnt = 0, seek_to = 0, show_help = 0, map_size = MAP_SIZE; u8 *extras_dir[4]; u8 mem_limit_given = 0, exit_1 = 0, debug = 0, extras_dir_cnt = 0 /*, have_p = 0*/; @@ -287,10 +288,14 @@ int main(int argc, char **argv_orig, char **envp) { while ((opt = getopt( argc, argv, - "+b:c:i:I:o:f:F:m:t:T:dDnCB:S:M:x:QNUWe:p:s:V:E:L:hRP:")) > 0) { + "+b:c:i:I:o:f:F:m:t:T:dDnCB:S:M:x:QNUWe:p:s:V:E:L:hRP:Z")) > 0) { switch (opt) { + case 'Z': + afl->old_seed_selection = 1; + break; + case 'I': afl->infoexec = optarg; break; @@ -355,14 +360,16 @@ int main(int argc, char **argv_orig, char **envp) { afl->schedule = RARE; - } else if (!stricmp(optarg, "explore") || !stricmp(optarg, "afl")) { - - afl->schedule = EXPLORE; + } else if (!stricmp(optarg, "explore") || !stricmp(optarg, "afl") || - } else if (!stricmp(optarg, "seek") || !stricmp(optarg, "default") || + !stricmp(optarg, "default") || !stricmp(optarg, "normal")) { + afl->schedule = EXPLORE; + + } else if (!stricmp(optarg, "seek")) { + afl->schedule = SEEK; } else { @@ -404,7 +411,8 @@ int main(int argc, char **argv_orig, char **envp) { if (afl->sync_id) { FATAL("Multiple -S or -M options not supported"); } afl->sync_id = ck_strdup(optarg); - afl->skip_deterministic = 0; + afl->skip_deterministic = 0; // force determinsitic fuzzing + afl->old_seed_selection = 1; // force old queue walking seed selection if ((c = strchr(afl->sync_id, ':'))) { @@ -1131,8 +1139,10 @@ int main(int argc, char **argv_orig, char **envp) { if (afl->is_secondary_node && check_main_node_exists(afl) == 0) { - WARNF("no -M main node found. You need to run one main instance!"); - sleep(3); + WARNF( + "no -M main node found. It is recommended to run exactly one main " + "instance."); + sleep(1); } @@ -1302,7 +1312,7 @@ int main(int argc, char **argv_orig, char **envp) { show_init_stats(afl); - seek_to = find_start_position(afl); + if (unlikely(afl->old_seed_selection)) seek_to = find_start_position(afl); write_stats_file(afl, 0, 0, 0); maybe_update_plot_file(afl, 0, 0); @@ -1324,28 +1334,37 @@ int main(int argc, char **argv_orig, char **envp) { // real start time, we reset, so this works correctly with -V afl->start_time = get_cur_time(); + u32 runs_in_current_cycle = (u32)-1; + u32 prev_queued_paths = 0; + while (1) { u8 skipped_fuzz; cull_queue(afl); - if (!afl->queue_cur) { + if (unlikely((!afl->old_seed_selection && + runs_in_current_cycle > afl->queued_paths) || + (afl->old_seed_selection && !afl->queue_cur))) { ++afl->queue_cycle; - afl->current_entry = 0; + runs_in_current_cycle = 0; afl->cur_skipped_paths = 0; - afl->queue_cur = afl->queue; - if (seek_to) { + if (unlikely(afl->old_seed_selection)) { - afl->current_entry = seek_to; - afl->queue_cur = afl->queue_buf[seek_to]; - seek_to = 0; + afl->current_entry = 0; + afl->queue_cur = afl->queue; - } + if (unlikely(seek_to)) { - // show_stats(afl); + afl->current_entry = seek_to; + afl->queue_cur = afl->queue_buf[seek_to]; + seek_to = 0; + + } + + } if (unlikely(afl->not_on_tty)) { @@ -1366,9 +1385,11 @@ int main(int argc, char **argv_orig, char **envp) { switch (afl->expand_havoc) { case 0: + // this adds extra splicing mutation options to havoc mode afl->expand_havoc = 1; break; case 1: + // add MOpt mutator if (afl->limit_time_sig == 0 && !afl->custom_only && !afl->python_only) { @@ -1381,25 +1402,26 @@ int main(int argc, char **argv_orig, char **envp) { break; case 2: // if (!have_p) afl->schedule = EXPLOIT; + // increase havoc mutations per fuzz attempt afl->havoc_stack_pow2++; afl->expand_havoc = 3; break; case 3: + // further increase havoc mutations per fuzz attempt afl->havoc_stack_pow2++; afl->expand_havoc = 4; break; case 4: + // if not in sync mode, enable deterministic mode? + // if (!afl->sync_dir) afl->skip_deterministic = 0; + afl->expand_havoc = 5; + break; + case 5: // nothing else currently break; } - if (afl->expand_havoc) { - - } else - - afl->expand_havoc = 1; - } else { afl->use_splicing = 1; @@ -1470,6 +1492,22 @@ int main(int argc, char **argv_orig, char **envp) { } + if (likely(!afl->old_seed_selection)) { + + ++runs_in_current_cycle; + if (unlikely(prev_queued_paths < afl->queued_paths)) { + + // we have new queue entries since the last run, recreate alias table + prev_queued_paths = afl->queued_paths; + create_alias_table(afl); + + } + + afl->current_entry = select_next_queue_entry(afl); + afl->queue_cur = afl->queue_buf[afl->current_entry]; + + } + skipped_fuzz = fuzz_one(afl); if (!skipped_fuzz && !afl->stop_soon && afl->sync_id) { @@ -1490,8 +1528,12 @@ int main(int argc, char **argv_orig, char **envp) { if (afl->stop_soon) { break; } - afl->queue_cur = afl->queue_cur->next; - ++afl->current_entry; + if (unlikely(afl->old_seed_selection)) { + + afl->queue_cur = afl->queue_cur->next; + ++afl->current_entry; + + } } -- cgit 1.4.1 From b7e0490bcdaa7fa792a9dccfa5983e03af92730e Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Mon, 12 Oct 2020 03:44:34 +0200 Subject: Revert "Merge branch 'memcache_marc' into dev" This reverts commit c03fbcedaa68db5324423975a34331287426f7c2, reversing changes made to dab017dddaaab6d836a590f7bba3eea3549758d2. --- include/afl-fuzz.h | 16 ------ include/config.h | 9 --- src/afl-fuzz-one.c | 157 +++++++++++++++++++++++++++++++-------------------- src/afl-fuzz-queue.c | 87 ---------------------------- src/afl-fuzz.c | 5 +- 5 files changed, 97 insertions(+), 177 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h index a3e87129..45de197d 100644 --- a/include/afl-fuzz.h +++ b/include/afl-fuzz.h @@ -168,9 +168,6 @@ struct queue_entry { double perf_score; /* performance score */ - u8 *testcase_buf; /* The testcase buffer, if loaded. */ - u32 testcase_refs; /* count of users of testcase buf */ - struct queue_entry *next; /* Next element, if any */ }; @@ -689,12 +686,6 @@ typedef struct afl_state { /* queue entries ready for splicing count (len > 4) */ u32 ready_for_splicing_count; - /* How many queue entries currently have cached testcases */ - u32 q_testcase_cache_count; - /* Refs to each queue entry with cached testcase (for eviction, if cache_count - * is too large) */ - struct queue_entry *q_testcase_cache[TESTCASE_CACHE_SIZE]; - } afl_state_t; struct custom_mutator { @@ -1141,12 +1132,5 @@ static inline u64 next_p2(u64 val) { } -/* Returns the testcase buf from the file behind this queue entry. - Increases the refcount. */ -u8 *queue_testcase_take(afl_state_t *afl, struct queue_entry *q); - -/* Tell afl that this testcase may be evicted from the cache */ -void queue_testcase_release(afl_state_t *afl, struct queue_entry *q); - #endif diff --git a/include/config.h b/include/config.h index 3f498275..7dd045e3 100644 --- a/include/config.h +++ b/include/config.h @@ -295,15 +295,6 @@ #define RESEED_RNG 100000 -/* The amount of entries in the testcase cache, held in memory. -Decrease if RAM usage is high. */ -#define TESTCASE_CACHE_SIZE 3072 - -#if TESTCASE_CACHE_SIZE < 4 - #error \ - "Dangerously low cache size: Set TESTCASE_CACHE_SIZE to 4 or more in config.h!" -#endif - /* Maximum line length passed from GCC to 'as' and used for parsing configuration files: */ diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index ebe541a2..6ef728e0 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -370,7 +370,7 @@ static void locate_diffs(u8 *ptr1, u8 *ptr2, u32 len, s32 *first, s32 *last) { u8 fuzz_one_original(afl_state_t *afl) { - s32 len, temp_len; + s32 len, fd, temp_len; u32 j; u32 i; u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0; @@ -453,9 +453,28 @@ u8 fuzz_one_original(afl_state_t *afl) { } - orig_in = in_buf = queue_testcase_take(afl, afl->queue_cur); + /* Map the test case into memory. */ + + fd = open(afl->queue_cur->fname, O_RDONLY); + + if (unlikely(fd < 0)) { + + PFATAL("Unable to open '%s'", afl->queue_cur->fname); + + } + len = afl->queue_cur->len; + orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); + + if (unlikely(orig_in == MAP_FAILED)) { + + PFATAL("Unable to mmap '%s' with len %d", afl->queue_cur->fname, len); + + } + + close(fd); + /* We could mmap() out_buf as MAP_PRIVATE, but we end up clobbering every single byte anyway, so it wouldn't give us any performance or memory usage benefits. */ @@ -1678,7 +1697,7 @@ custom_mutator_stage: for (afl->stage_cur = 0; afl->stage_cur < afl->stage_max; ++afl->stage_cur) { - struct queue_entry *target = NULL; + struct queue_entry *target; u32 tid; u8 * new_buf = NULL; u32 target_len = 0; @@ -1701,7 +1720,17 @@ custom_mutator_stage: afl->splicing_with = tid; /* Read the additional testcase into a new buffer. */ - new_buf = queue_testcase_take(afl, target); + fd = open(target->fname, O_RDONLY); + if (unlikely(fd < 0)) { + + PFATAL("Unable to open '%s'", target->fname); + + } + + new_buf = afl_realloc(AFL_BUF_PARAM(out_scratch), target->len); + if (unlikely(!new_buf)) { PFATAL("alloc"); } + ck_read(fd, new_buf, target->len, target->fname); + close(fd); target_len = target->len; } @@ -1712,13 +1741,6 @@ custom_mutator_stage: el->afl_custom_fuzz(el->data, out_buf, len, &mutated_buf, new_buf, target_len, max_seed_size); - if (new_buf) { - - queue_testcase_release(afl, target); - new_buf = NULL; - - } - if (unlikely(!mutated_buf)) { FATAL("Error in custom_fuzz. Size returned: %zd", mutated_size); @@ -2301,53 +2323,52 @@ havoc_stage: /* Overwrite bytes with a randomly selected chunk from another testcase or insert that chunk. */ - if (afl->queued_paths < 4) { break; } + if (afl->queued_paths < 4) break; /* Pick a random queue entry and seek to it. */ u32 tid; - do { - + do tid = rand_below(afl, afl->queued_paths); - - } while (tid == afl->current_entry); + while (tid == afl->current_entry); struct queue_entry *target = afl->queue_buf[tid]; /* Make sure that the target has a reasonable length. */ - while (target && (target->len < 2 || target == afl->queue_cur)) { - + while (target && (target->len < 2 || target == afl->queue_cur)) target = target->next; - } + if (!target) break; - if (!target) { break; } + /* Read the testcase into a new buffer. */ - u32 new_len = target->len; + fd = open(target->fname, O_RDONLY); - /* Get the testcase contents for splicing. */ - u8 *new_buf = queue_testcase_take(afl, target); + if (unlikely(fd < 0)) { - u8 overwrite = 0; - if (temp_len >= 2 && rand_below(afl, 2)) { + PFATAL("Unable to open '%s'", target->fname); - overwrite = 1; + } - } else if (temp_len + HAVOC_BLK_XL >= MAX_FILE) { + u32 new_len = target->len; + u8 *new_buf = afl_realloc(AFL_BUF_PARAM(in_scratch), new_len); + if (unlikely(!new_buf)) { PFATAL("alloc"); } - if (temp_len >= 2) { + ck_read(fd, new_buf, new_len, target->fname); - overwrite = 1; + close(fd); - } else { + u8 overwrite = 0; + if (temp_len >= 2 && rand_below(afl, 2)) + overwrite = 1; + else if (temp_len + HAVOC_BLK_XL >= MAX_FILE) { - queue_testcase_release(afl, target); - new_buf = NULL; + if (temp_len >= 2) + overwrite = 1; + else break; - } - } if (overwrite) { @@ -2393,9 +2414,6 @@ havoc_stage: } - /* We don't need this splice testcase anymore */ - queue_testcase_release(afl, target); - new_buf = NULL; break; } @@ -2501,17 +2519,24 @@ retry_splicing: if (!target) { goto retry_splicing; } - /* Get the testcase buffer */ - u8 *splice_buf = queue_testcase_take(afl, target); + /* Read the testcase into a new buffer. */ + + fd = open(target->fname, O_RDONLY); + + if (unlikely(fd < 0)) { PFATAL("Unable to open '%s'", target->fname); } + new_buf = afl_realloc(AFL_BUF_PARAM(in_scratch), target->len); if (unlikely(!new_buf)) { PFATAL("alloc"); } + ck_read(fd, new_buf, target->len, target->fname); + + close(fd); + /* Find a suitable splicing location, somewhere between the first and the last differing byte. Bail out if the difference is just a single byte or so. */ - locate_diffs(in_buf, splice_buf, MIN(len, (s64)target->len), &f_diff, - &l_diff); + locate_diffs(in_buf, new_buf, MIN(len, (s64)target->len), &f_diff, &l_diff); if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) { goto retry_splicing; } @@ -2523,7 +2548,6 @@ retry_splicing: len = target->len; memcpy(new_buf, in_buf, split_at); - memcpy(new_buf + split_at, splice_buf + split_at, target->len - split_at); afl_swap_bufs(AFL_BUF_PARAM(in), AFL_BUF_PARAM(in_scratch)); in_buf = new_buf; @@ -2531,9 +2555,6 @@ retry_splicing: if (unlikely(!out_buf)) { PFATAL("alloc"); } memcpy(out_buf, in_buf, len); - queue_testcase_release(afl, target); - splice_buf = NULL; - goto custom_mutator_stage; /* ???: While integrating Python module, the author decided to jump to python stage, but the reason behind this is not clear.*/ @@ -2564,8 +2585,7 @@ abandon_entry: ++afl->queue_cur->fuzz_level; - queue_testcase_release(afl, afl->queue_cur); - orig_in = NULL; + munmap(orig_in, afl->queue_cur->len); return ret_val; @@ -2587,7 +2607,7 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { } - s32 len, temp_len; + s32 len, fd, temp_len; u32 i; u32 j; u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0; @@ -2652,9 +2672,23 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { } /* Map the test case into memory. */ - orig_in = in_buf = queue_testcase_take(afl, afl->queue_cur); + + fd = open(afl->queue_cur->fname, O_RDONLY); + + if (fd < 0) { PFATAL("Unable to open '%s'", afl->queue_cur->fname); } + len = afl->queue_cur->len; + orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); + + if (orig_in == MAP_FAILED) { + + PFATAL("Unable to mmap '%s'", afl->queue_cur->fname); + + } + + close(fd); + /* We could mmap() out_buf as MAP_PRIVATE, but we end up clobbering every single byte anyway, so it wouldn't give us any performance or memory usage benefits. */ @@ -4494,24 +4528,31 @@ pacemaker_fuzzing: if (!target) { goto retry_splicing_puppet; } /* Read the testcase into a new buffer. */ - u8 *splicing_buf = queue_testcase_take(afl, target); + + fd = open(target->fname, O_RDONLY); + + if (fd < 0) { PFATAL("Unable to open '%s'", target->fname); } + + new_buf = afl_realloc(AFL_BUF_PARAM(in_scratch), target->len); + if (unlikely(!new_buf)) { PFATAL("alloc"); } + + ck_read(fd, new_buf, target->len, target->fname); + + close(fd); /* Find a suitable splicin g location, somewhere between the first and the last differing byte. Bail out if the difference is just a single byte or so. */ - locate_diffs(in_buf, splicing_buf, MIN(len, (s32)target->len), &f_diff, + locate_diffs(in_buf, new_buf, MIN(len, (s32)target->len), &f_diff, &l_diff); if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) { - queue_testcase_release(afl, target); goto retry_splicing_puppet; } - new_buf = afl_realloc(AFL_BUF_PARAM(in_scratch), target->len); - /* Split somewhere between the first and last differing byte. */ split_at = f_diff + rand_below(afl, l_diff - f_diff); @@ -4520,17 +4561,12 @@ pacemaker_fuzzing: len = target->len; memcpy(new_buf, in_buf, split_at); - memcpy(new_buf + split_at, splicing_buf + split_at, - target->len - split_at); afl_swap_bufs(AFL_BUF_PARAM(in), AFL_BUF_PARAM(in_scratch)); in_buf = new_buf; out_buf = afl_realloc(AFL_BUF_PARAM(out), len); if (unlikely(!out_buf)) { PFATAL("alloc"); } memcpy(out_buf, in_buf, len); - queue_testcase_release(afl, target); - splicing_buf = NULL; - goto havoc_stage_puppet; } /* if splice_cycle */ @@ -4564,8 +4600,7 @@ pacemaker_fuzzing: // if (afl->queue_cur->favored) --afl->pending_favored; // } - queue_testcase_release(afl, afl->queue_cur); - orig_in = NULL; + munmap(orig_in, afl->queue_cur->len); if (afl->key_puppet == 1) { diff --git a/src/afl-fuzz-queue.c b/src/afl-fuzz-queue.c index a034b168..d608e890 100644 --- a/src/afl-fuzz-queue.c +++ b/src/afl-fuzz-queue.c @@ -343,7 +343,6 @@ void add_to_queue(afl_state_t *afl, u8 *fname, u32 len, u8 passed_det) { q->depth = afl->cur_depth + 1; q->passed_det = passed_det; q->trace_mini = NULL; - q->testcase_buf = NULL; if (q->depth > afl->max_depth) { afl->max_depth = q->depth; } @@ -892,89 +891,3 @@ u32 calculate_score(afl_state_t *afl, struct queue_entry *q) { } -/* Tell afl that this testcase may be evicted from the cache */ -inline void queue_testcase_release(afl_state_t *afl, struct queue_entry *q) { - - (void)afl; - if (unlikely(q->testcase_refs == 0)) { - - FATAL("Testcase refcount reduced past 0"); - - } - - q->testcase_refs--; - -} - -/* Returns the testcase buf from the file behind this queue entry. - Increases the refcount. */ -u8 *queue_testcase_take(afl_state_t *afl, struct queue_entry *q) { - - if (!q->testcase_buf) { - - u32 tid = 0; - /* Buf not cached, let's do that now */ - - if (likely(afl->q_testcase_cache_count == TESTCASE_CACHE_SIZE)) { - - /* Cache full. We neet to evict one to map one. - Get a random one which is not in use */ - do { - - tid = rand_below(afl, afl->q_testcase_cache_count); - - } while (afl->q_testcase_cache[tid]->testcase_refs > 0); - - struct queue_entry *old_cached = afl->q_testcase_cache[tid]; - /* free the current buf from cache */ - munmap(old_cached->testcase_buf, old_cached->len); - old_cached->testcase_buf = NULL; - - } else { - - tid = afl->q_testcase_cache_count; - afl->q_testcase_cache_count++; - - } - - /* Map the test case into memory. */ - - int fd = open(q->fname, O_RDONLY); - - if (unlikely(fd < 0)) { PFATAL("Unable to open '%s'", q->fname); } - - u32 len = q->len; - - q->testcase_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); - - if (unlikely(q->testcase_buf == MAP_FAILED)) { - - PFATAL("Unable to mmap '%s' with len %d", q->fname, len); - - } - - close(fd); - - /* Register us as cached */ - afl->q_testcase_cache[tid] = q; - - } - - q->testcase_refs++; - if (unlikely(!q->testcase_buf || !q->testcase_refs)) { - if (!q->testcase_buf) { - - FATAL("Testcase buf is NULL, this should never happen"); - - } - if (!q->testcase_refs) { - - FATAL("Testcase ref overflow. Missing a testcase release somwhere?"); - - } - } - - return q->testcase_buf; - -} - diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c index cb5eb37a..d42a0d36 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -1174,12 +1174,9 @@ int main(int argc, char **argv_orig, char **envp) { if (extras_dir_cnt) { - for (i = 0; i < extras_dir_cnt; i++) { - + for (i = 0; i < extras_dir_cnt; i++) load_extras(afl, extras_dir[i]); - } - dedup_extras(afl); OKF("Loaded a total of %u extras.", afl->extras_cnt); -- cgit 1.4.1 From d6da5605c80d65091375c08ae5389d14d671500a Mon Sep 17 00:00:00 2001 From: van Hauser Date: Mon, 12 Oct 2020 04:03:42 +0200 Subject: fix splicing selection --- src/afl-fuzz-one.c | 47 ++++++++--------------------------------------- 1 file changed, 8 insertions(+), 39 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index 6ef728e0..fc092f8d 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -1712,9 +1712,9 @@ custom_mutator_stage: tid = rand_below(afl, afl->queued_paths); - } while (unlikely(tid == afl->current_entry && + } while (unlikely(tid == afl->current_entry || - afl->queue_buf[tid]->len >= 4)); + afl->queue_buf[tid]->len < 4)); target = afl->queue_buf[tid]; afl->splicing_with = tid; @@ -1872,7 +1872,7 @@ havoc_stage: u32 r_max, r; - if (unlikely(afl->expand_havoc)) { + if (unlikely(afl->expand_havoc && afl->ready_for_splicing_count > 1)) { /* add expensive havoc cases here, they are activated after a full cycle without finds happened */ @@ -2323,24 +2323,15 @@ havoc_stage: /* Overwrite bytes with a randomly selected chunk from another testcase or insert that chunk. */ - if (afl->queued_paths < 4) break; - /* Pick a random queue entry and seek to it. */ u32 tid; do tid = rand_below(afl, afl->queued_paths); - while (tid == afl->current_entry); + while (tid == afl->current_entry || afl->queue_buf[tid]->len < 4); struct queue_entry *target = afl->queue_buf[tid]; - /* Make sure that the target has a reasonable length. */ - - while (target && (target->len < 2 || target == afl->queue_cur)) - target = target->next; - - if (!target) break; - /* Read the testcase into a new buffer. */ fd = open(target->fname, O_RDONLY); @@ -2480,7 +2471,7 @@ havoc_stage: retry_splicing: if (afl->use_splicing && splice_cycle++ < SPLICE_CYCLES && - afl->queued_paths > 1 && afl->queue_cur->len > 1) { + afl->ready_for_splicing_count > 1 && afl->queue_cur->len >= 4) { struct queue_entry *target; u32 tid, split_at; @@ -2503,22 +2494,11 @@ retry_splicing: tid = rand_below(afl, afl->queued_paths); - } while (tid == afl->current_entry); + } while (tid == afl->current_entry || afl->queue_buf[tid]->len < 4); afl->splicing_with = tid; target = afl->queue_buf[tid]; - /* Make sure that the target has a reasonable length. */ - - while (target && (target->len < 2 || target == afl->queue_cur)) { - - target = target->next; - ++afl->splicing_with; - - } - - if (!target) { goto retry_splicing; } - /* Read the testcase into a new buffer. */ fd = open(target->fname, O_RDONLY); @@ -4487,7 +4467,7 @@ pacemaker_fuzzing: if (afl->use_splicing && splice_cycle++ < (u32)afl->SPLICE_CYCLES_puppet && - afl->queued_paths > 1 && afl->queue_cur->len > 1) { + afl->ready_for_splicing_count > 1 && afl->queue_cur->len >= 4) { struct queue_entry *target; u32 tid, split_at; @@ -4511,22 +4491,11 @@ pacemaker_fuzzing: tid = rand_below(afl, afl->queued_paths); - } while (tid == afl->current_entry); + } while (tid == afl->current_entry || afl->queue_buf[tid]->len < 4); afl->splicing_with = tid; target = afl->queue_buf[tid]; - /* Make sure that the target has a reasonable length. */ - - while (target && (target->len < 2 || target == afl->queue_cur)) { - - target = target->next; - ++afl->splicing_with; - - } - - if (!target) { goto retry_splicing_puppet; } - /* Read the testcase into a new buffer. */ fd = open(target->fname, O_RDONLY); -- cgit 1.4.1 From 56ac3fcdc511d124ad058412021ead21bbbcf4bf Mon Sep 17 00:00:00 2001 From: van Hauser Date: Wed, 14 Oct 2020 15:30:30 +0200 Subject: configurable testcache with malloc (#581) * cache item number to cache memory size * reload testcase if trimming changed the size * fix splicing selection * slim splicing * import sync fix * write testcache stats to fuzzer_stats * fix new seed selection algo * malloc+read instead of mmap * fix * testcache is configurable now and no reference counts * fixes compilation, test script * fixes * switch TEST_CC to afl-cc in makefile * code format * fix * fix crash * fix crash * fix env help output * remove unnecessary pointer resets * fix endless loop bug * actually use the cache if set * one more fix * increase default cache entries, add default cache size value to config.h Co-authored-by: hexcoder- --- GNUmakefile | 4 +- include/afl-fuzz.h | 31 +++++++- include/config.h | 9 +++ include/envs.h | 1 + src/afl-fuzz-init.c | 4 +- src/afl-fuzz-one.c | 169 +++++++++---------------------------------- src/afl-fuzz-queue.c | 167 ++++++++++++++++++++++++++++++++++++++---- src/afl-fuzz-run.c | 6 ++ src/afl-fuzz-state.c | 8 ++ src/afl-fuzz-stats.c | 7 +- src/afl-fuzz.c | 29 ++++++-- src/afl-performance.c | 2 +- test/test-custom-mutators.sh | 2 +- 13 files changed, 276 insertions(+), 163 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/GNUmakefile b/GNUmakefile index c885a935..80b7b68b 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -223,8 +223,6 @@ ifneq "$(findstring OpenBSD, $(shell uname))" "" LDFLAGS += -lpthread endif -TEST_CC = afl-gcc - COMM_HDR = include/alloc-inl.h include/config.h include/debug.h include/types.h ifeq "$(shell echo '$(HASH)include @int main() {return 0; }' | tr @ '\n' | $(CC) $(CFLAGS) -x c - -o .test $(PYTHON_INCLUDE) $(LDFLAGS) $(PYTHON_LIB) 2>/dev/null && echo 1 || echo 0 ; rm -f .test )" "1" @@ -488,7 +486,7 @@ code-format: ifndef AFL_NO_X86 test_build: afl-cc afl-as afl-showmap @echo "[*] Testing the CC wrapper and instrumentation output..." - @unset AFL_USE_ASAN AFL_USE_MSAN AFL_CC; AFL_DEBUG=1 AFL_INST_RATIO=100 AFL_PATH=. ./$(TEST_CC) $(CFLAGS) test-instr.c -o test-instr $(LDFLAGS) 2>&1 | grep 'afl-as' >/dev/null || (echo "Oops, afl-as did not get called from "$(TEST_CC)". This is normally achieved by "$(CC)" honoring the -B option."; exit 1 ) + @unset AFL_MAP_SIZE AFL_USE_UBSAN AFL_USE_CFISAN AFL_USE_ASAN AFL_USE_MSAN AFL_CC; AFL_INST_RATIO=100 AFL_PATH=. ./afl-cc $(CFLAGS) test-instr.c -o test-instr $(LDFLAGS) 2>&1 || (echo "Oops, afl-cc failed"; exit 1 ) ASAN_OPTIONS=detect_leaks=0 ./afl-showmap -m none -q -o .test-instr0 ./test-instr < /dev/null echo 1 | ASAN_OPTIONS=detect_leaks=0 ./afl-showmap -m none -q -o .test-instr1 ./test-instr @rm -f test-instr diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h index 85597150..940c5602 100644 --- a/include/afl-fuzz.h +++ b/include/afl-fuzz.h @@ -168,6 +168,8 @@ struct queue_entry { double perf_score; /* performance score */ + u8 *testcase_buf; /* The testcase buffer, if loaded. */ + struct queue_entry *next; /* Next element, if any */ }; @@ -363,7 +365,7 @@ typedef struct afl_env_vars { u8 *afl_tmpdir, *afl_custom_mutator_library, *afl_python_module, *afl_path, *afl_hang_tmout, *afl_forksrv_init_tmout, *afl_skip_crashes, *afl_preload, *afl_max_det_extras, *afl_statsd_host, *afl_statsd_port, - *afl_statsd_tags_flavor; + *afl_statsd_tags_flavor, *afl_testcache_size; } afl_env_vars_t; @@ -675,6 +677,9 @@ typedef struct afl_state { u8 *in_scratch_buf; u8 *ex_buf; + + u8 *testcase_buf, *splicecase_buf; + u32 custom_mutators_count; list_t custom_mutator_list; @@ -686,6 +691,22 @@ typedef struct afl_state { /* queue entries ready for splicing count (len > 4) */ u32 ready_for_splicing_count; + /* This is the user specified maximum size to use for the testcase cache */ + u64 q_testcase_max_cache_size; + + /* How much of the testcase cache is used so far */ + u64 q_testcase_cache_size; + + /* highest cache count so far */ + u32 q_testcase_max_cache_count; + + /* How many queue entries currently have cached testcases */ + u32 q_testcase_cache_count; + + /* Refs to each queue entry with cached testcase (for eviction, if cache_count + * is too large) */ + struct queue_entry *q_testcase_cache[TESTCASE_ENTRIES]; + } afl_state_t; struct custom_mutator { @@ -1135,5 +1156,13 @@ static inline u64 next_p2(u64 val) { } +/* Returns the testcase buf from the file behind this queue entry. + Increases the refcount. */ +u8 *queue_testcase_get(afl_state_t *afl, struct queue_entry *q); + +/* If trimming changes the testcase size we have to reload it */ +void queue_testcase_retake(afl_state_t *afl, struct queue_entry *q, + u32 old_len); + #endif diff --git a/include/config.h b/include/config.h index 7dd045e3..b4f3a775 100644 --- a/include/config.h +++ b/include/config.h @@ -295,6 +295,15 @@ #define RESEED_RNG 100000 +/* The maximum number of testcases to cache */ + +#define TESTCASE_ENTRIES 16384 + +/* The default maximum testcase cache size in MB, 0 = disable. + A value between 50 and 250 is a good default value. */ + +#define TESTCASE_CACHE 0 + /* Maximum line length passed from GCC to 'as' and used for parsing configuration files: */ diff --git a/include/envs.h b/include/envs.h index 51520312..a1b3ad12 100644 --- a/include/envs.h +++ b/include/envs.h @@ -139,6 +139,7 @@ static char *afl_environment_variables[] = { "AFL_STATSD_HOST", "AFL_STATSD_PORT", "AFL_STATSD_TAGS_FLAVOR", + "AFL_TESTCACHE_SIZE", "AFL_TMIN_EXACT", "AFL_TMPDIR", "AFL_TOKEN_FILE", diff --git a/src/afl-fuzz-init.c b/src/afl-fuzz-init.c index 881bf10f..607b652f 100644 --- a/src/afl-fuzz-init.c +++ b/src/afl-fuzz-init.c @@ -1045,7 +1045,7 @@ restart_outer_cull_loop: while (q) { - if (q->cal_failed || !q->exec_cksum) continue; + if (q->cal_failed || !q->exec_cksum) { goto next_entry; } restart_inner_cull_loop: @@ -1090,6 +1090,8 @@ restart_outer_cull_loop: } + next_entry: + prev = q; q = q->next; diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index fc092f8d..154e4b45 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -370,7 +370,7 @@ static void locate_diffs(u8 *ptr1, u8 *ptr2, u32 len, s32 *first, s32 *last) { u8 fuzz_one_original(afl_state_t *afl) { - s32 len, fd, temp_len; + s32 len, temp_len; u32 j; u32 i; u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0; @@ -453,32 +453,9 @@ u8 fuzz_one_original(afl_state_t *afl) { } - /* Map the test case into memory. */ - - fd = open(afl->queue_cur->fname, O_RDONLY); - - if (unlikely(fd < 0)) { - - PFATAL("Unable to open '%s'", afl->queue_cur->fname); - - } - + orig_in = in_buf = queue_testcase_get(afl, afl->queue_cur); len = afl->queue_cur->len; - orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); - - if (unlikely(orig_in == MAP_FAILED)) { - - PFATAL("Unable to mmap '%s' with len %d", afl->queue_cur->fname, len); - - } - - close(fd); - - /* We could mmap() out_buf as MAP_PRIVATE, but we end up clobbering every - single byte anyway, so it wouldn't give us any performance or memory usage - benefits. */ - out_buf = afl_realloc(AFL_BUF_PARAM(out), len); if (unlikely(!out_buf)) { PFATAL("alloc"); } @@ -526,6 +503,7 @@ u8 fuzz_one_original(afl_state_t *afl) { !afl->disable_trim)) { u8 res = trim_case(afl, afl->queue_cur, in_buf); + orig_in = in_buf = queue_testcase_get(afl, afl->queue_cur); if (unlikely(res == FSRV_RUN_ERROR)) { @@ -1720,17 +1698,7 @@ custom_mutator_stage: afl->splicing_with = tid; /* Read the additional testcase into a new buffer. */ - fd = open(target->fname, O_RDONLY); - if (unlikely(fd < 0)) { - - PFATAL("Unable to open '%s'", target->fname); - - } - - new_buf = afl_realloc(AFL_BUF_PARAM(out_scratch), target->len); - if (unlikely(!new_buf)) { PFATAL("alloc"); } - ck_read(fd, new_buf, target->len, target->fname); - close(fd); + new_buf = queue_testcase_get(afl, target); target_len = target->len; } @@ -2182,7 +2150,6 @@ havoc_stage: afl_swap_bufs(AFL_BUF_PARAM(out), AFL_BUF_PARAM(out_scratch)); out_buf = new_buf; - new_buf = NULL; temp_len += clone_len; } @@ -2326,43 +2293,21 @@ havoc_stage: /* Pick a random queue entry and seek to it. */ u32 tid; - do - tid = rand_below(afl, afl->queued_paths); - while (tid == afl->current_entry || afl->queue_buf[tid]->len < 4); - - struct queue_entry *target = afl->queue_buf[tid]; - - /* Read the testcase into a new buffer. */ - - fd = open(target->fname, O_RDONLY); - - if (unlikely(fd < 0)) { - - PFATAL("Unable to open '%s'", target->fname); - - } - - u32 new_len = target->len; - u8 *new_buf = afl_realloc(AFL_BUF_PARAM(in_scratch), new_len); - if (unlikely(!new_buf)) { PFATAL("alloc"); } - - ck_read(fd, new_buf, new_len, target->fname); + do { - close(fd); + tid = rand_below(afl, afl->queued_paths); - u8 overwrite = 0; - if (temp_len >= 2 && rand_below(afl, 2)) - overwrite = 1; - else if (temp_len + HAVOC_BLK_XL >= MAX_FILE) { + } while (tid == afl->current_entry || afl->queue_buf[tid]->len < 4); - if (temp_len >= 2) - overwrite = 1; - else - break; + /* Get the testcase for splicing. */ + struct queue_entry *target = afl->queue_buf[tid]; + u32 new_len = target->len; + u8 * new_buf = queue_testcase_get(afl, target); - } + if ((temp_len >= 2 && rand_below(afl, 2)) || + temp_len + HAVOC_BLK_XL >= MAX_FILE) { - if (overwrite) { + /* overwrite mode */ u32 copy_from, copy_to, copy_len; @@ -2376,15 +2321,16 @@ havoc_stage: } else { + /* insert mode */ + u32 clone_from, clone_to, clone_len; clone_len = choose_block_len(afl, new_len); clone_from = rand_below(afl, new_len - clone_len + 1); + clone_to = rand_below(afl, temp_len + 1); - clone_to = rand_below(afl, temp_len); - - u8 *temp_buf = - afl_realloc(AFL_BUF_PARAM(out_scratch), temp_len + clone_len); + u8 *temp_buf = afl_realloc(AFL_BUF_PARAM(out_scratch), + temp_len + clone_len + 1); if (unlikely(!temp_buf)) { PFATAL("alloc"); } /* Head */ @@ -2496,21 +2442,10 @@ retry_splicing: } while (tid == afl->current_entry || afl->queue_buf[tid]->len < 4); + /* Get the testcase */ afl->splicing_with = tid; target = afl->queue_buf[tid]; - - /* Read the testcase into a new buffer. */ - - fd = open(target->fname, O_RDONLY); - - if (unlikely(fd < 0)) { PFATAL("Unable to open '%s'", target->fname); } - - new_buf = afl_realloc(AFL_BUF_PARAM(in_scratch), target->len); - if (unlikely(!new_buf)) { PFATAL("alloc"); } - - ck_read(fd, new_buf, target->len, target->fname); - - close(fd); + new_buf = queue_testcase_get(afl, target); /* Find a suitable splicing location, somewhere between the first and the last differing byte. Bail out if the difference is just a single @@ -2527,18 +2462,16 @@ retry_splicing: /* Do the thing. */ len = target->len; - memcpy(new_buf, in_buf, split_at); - afl_swap_bufs(AFL_BUF_PARAM(in), AFL_BUF_PARAM(in_scratch)); - in_buf = new_buf; + afl->in_scratch_buf = afl_realloc(AFL_BUF_PARAM(in_scratch), len); + memcpy(afl->in_scratch_buf, in_buf, split_at); + memcpy(afl->in_scratch_buf + split_at, new_buf, len - split_at); + in_buf = afl->in_scratch_buf; out_buf = afl_realloc(AFL_BUF_PARAM(out), len); if (unlikely(!out_buf)) { PFATAL("alloc"); } memcpy(out_buf, in_buf, len); goto custom_mutator_stage; - /* ???: While integrating Python module, the author decided to jump to - python stage, but the reason behind this is not clear.*/ - // goto havoc_stage; } @@ -2564,9 +2497,7 @@ abandon_entry: } ++afl->queue_cur->fuzz_level; - - munmap(orig_in, afl->queue_cur->len); - + orig_in = NULL; return ret_val; #undef FLIP_BIT @@ -2587,7 +2518,7 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { } - s32 len, fd, temp_len; + s32 len, temp_len; u32 i; u32 j; u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0; @@ -2652,32 +2583,11 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { } /* Map the test case into memory. */ - - fd = open(afl->queue_cur->fname, O_RDONLY); - - if (fd < 0) { PFATAL("Unable to open '%s'", afl->queue_cur->fname); } - + orig_in = in_buf = queue_testcase_get(afl, afl->queue_cur); len = afl->queue_cur->len; - - orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); - - if (orig_in == MAP_FAILED) { - - PFATAL("Unable to mmap '%s'", afl->queue_cur->fname); - - } - - close(fd); - - /* We could mmap() out_buf as MAP_PRIVATE, but we end up clobbering every - single byte anyway, so it wouldn't give us any performance or memory usage - benefits. */ - out_buf = afl_realloc(AFL_BUF_PARAM(out), len); if (unlikely(!out_buf)) { PFATAL("alloc"); } - afl->subseq_tmouts = 0; - afl->cur_depth = afl->queue_cur->depth; /******************************************* @@ -2721,6 +2631,7 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { u32 old_len = afl->queue_cur->len; u8 res = trim_case(afl, afl->queue_cur, in_buf); + orig_in = in_buf = queue_testcase_get(afl, afl->queue_cur); if (res == FSRV_RUN_ERROR) { @@ -4497,17 +4408,7 @@ pacemaker_fuzzing: target = afl->queue_buf[tid]; /* Read the testcase into a new buffer. */ - - fd = open(target->fname, O_RDONLY); - - if (fd < 0) { PFATAL("Unable to open '%s'", target->fname); } - - new_buf = afl_realloc(AFL_BUF_PARAM(in_scratch), target->len); - if (unlikely(!new_buf)) { PFATAL("alloc"); } - - ck_read(fd, new_buf, target->len, target->fname); - - close(fd); + new_buf = queue_testcase_get(afl, target); /* Find a suitable splicin g location, somewhere between the first and the last differing byte. Bail out if the difference is just a single @@ -4529,9 +4430,11 @@ pacemaker_fuzzing: /* Do the thing. */ len = target->len; - memcpy(new_buf, in_buf, split_at); - afl_swap_bufs(AFL_BUF_PARAM(in), AFL_BUF_PARAM(in_scratch)); - in_buf = new_buf; + afl->in_scratch_buf = afl_realloc(AFL_BUF_PARAM(in_scratch), len); + memcpy(afl->in_scratch_buf, in_buf, split_at); + memcpy(afl->in_scratch_buf + split_at, new_buf, len - split_at); + in_buf = afl->in_scratch_buf; + out_buf = afl_realloc(AFL_BUF_PARAM(out), len); if (unlikely(!out_buf)) { PFATAL("alloc"); } memcpy(out_buf, in_buf, len); @@ -4569,7 +4472,7 @@ pacemaker_fuzzing: // if (afl->queue_cur->favored) --afl->pending_favored; // } - munmap(orig_in, afl->queue_cur->len); + orig_in = NULL; if (afl->key_puppet == 1) { diff --git a/src/afl-fuzz-queue.c b/src/afl-fuzz-queue.c index f224d851..c634328f 100644 --- a/src/afl-fuzz-queue.c +++ b/src/afl-fuzz-queue.c @@ -31,11 +31,12 @@ inline u32 select_next_queue_entry(afl_state_t *afl) { - u32 s = rand_below(afl, afl->queued_paths); + u32 s = rand_below(afl, afl->queued_paths); double p = rand_next_percent(afl); /* fprintf(stderr, "select: p=%f s=%u ... p < prob[s]=%f ? s=%u : alias[%u]=%u" - " ==> %u\n", p, s, afl->alias_probability[s], s, s, afl->alias_table[s], p < afl->alias_probability[s] ? s : afl->alias_table[s]); + " ==> %u\n", p, s, afl->alias_probability[s], s, s, afl->alias_table[s], p < + afl->alias_probability[s] ? s : afl->alias_table[s]); */ return (p < afl->alias_probability[s] ? s : afl->alias_table[s]); @@ -55,7 +56,7 @@ void create_alias_table(afl_state_t *afl) { int * S = (u32 *)afl_realloc(AFL_BUF_PARAM(out_scratch), n * sizeof(u32)); int * L = (u32 *)afl_realloc(AFL_BUF_PARAM(in_scratch), n * sizeof(u32)); - if (!P || !S || !L) FATAL("could not aquire memory for alias table"); + if (!P || !S || !L) { FATAL("could not aquire memory for alias table"); } memset((void *)afl->alias_table, 0, n * sizeof(u32)); memset((void *)afl->alias_probability, 0, n * sizeof(double)); @@ -65,7 +66,7 @@ void create_alias_table(afl_state_t *afl) { struct queue_entry *q = afl->queue_buf[i]; - if (!q->disabled) q->perf_score = calculate_score(afl, q); + if (!q->disabled) { q->perf_score = calculate_score(afl, q); } sum += q->perf_score; @@ -74,19 +75,23 @@ void create_alias_table(afl_state_t *afl) { for (i = 0; i < n; i++) { struct queue_entry *q = afl->queue_buf[i]; - - P[i] = q->perf_score * n / sum; + P[i] = (q->perf_score * n) / sum; } int nS = 0, nL = 0, s; for (s = (s32)n - 1; s >= 0; --s) { - if (P[s] < 1) + if (P[s] < 1) { + S[nS++] = s; - else + + } else { + L[nL++] = s; + } + } while (nS && nL) { @@ -96,11 +101,16 @@ void create_alias_table(afl_state_t *afl) { afl->alias_probability[a] = P[a]; afl->alias_table[a] = g; P[g] = P[g] + P[a] - 1; - if (P[g] < 1) + if (P[g] < 1) { + S[nS++] = g; - else + + } else { + L[nL++] = g; + } + } while (nL) @@ -110,11 +120,10 @@ void create_alias_table(afl_state_t *afl) { afl->alias_probability[S[--nS]] = 1; /* - fprintf(stderr, " %-3s %-3s %-9s %-9s\n", "entry", "alias", "prob", "perf"); - for (u32 i = 0; i < n; ++i) - fprintf(stderr, " %3i %3i %9.7f %9.7f\n", i, afl->alias_table[i], - afl->alias_probability[i], afl->queue_buf[i]->perf_score); - + fprintf(stderr, " entry alias probability perf_score\n"); + for (u32 i = 0; i < n; ++i) + fprintf(stderr, " %5u %5u %11u %0.9f\n", i, afl->alias_table[i], + afl->alias_probability[i], afl->queue_buf[i]->perf_score); */ } @@ -860,3 +869,131 @@ u32 calculate_score(afl_state_t *afl, struct queue_entry *q) { } +void queue_testcase_retake(afl_state_t *afl, struct queue_entry *q, + u32 old_len) { + + if (likely(q->testcase_buf)) { + + free(q->testcase_buf); + int fd = open(q->fname, O_RDONLY); + + if (unlikely(fd < 0)) { PFATAL("Unable to open '%s'", q->fname); } + + u32 len = q->len; + q->testcase_buf = malloc(len); + + if (unlikely(!q->testcase_buf)) { + + PFATAL("Unable to mmap '%s' with len %d", q->fname, len); + + } + + close(fd); + afl->q_testcase_cache_size = afl->q_testcase_cache_size + q->len - old_len; + + } + +} + +/* Returns the testcase buf from the file behind this queue entry. + Increases the refcount. */ +inline u8 *queue_testcase_get(afl_state_t *afl, struct queue_entry *q) { + + u32 len = q->len; + + /* first handle if no testcase cache is configured */ + + if (unlikely(!afl->q_testcase_max_cache_size)) { + + u8 *buf; + + if (q == afl->queue_cur) { + + buf = afl_realloc((void **)&afl->testcase_buf, len); + + } else { + + buf = afl_realloc((void **)&afl->splicecase_buf, len); + + } + + if (unlikely(!buf)) { + + PFATAL("Unable to malloc '%s' with len %u", q->fname, len); + + } + + int fd = open(q->fname, O_RDONLY); + + if (unlikely(fd < 0)) { PFATAL("Unable to open '%s'", q->fname); } + + ck_read(fd, buf, len, q->fname); + close(fd); + return buf; + + } + + /* now handle the testcase cache */ + + if (unlikely(!q->testcase_buf)) { + + /* Buf not cached, let's load it */ + u32 tid = 0; + + while (unlikely(afl->q_testcase_cache_size + len >= + afl->q_testcase_max_cache_size || + afl->q_testcase_cache_count >= TESTCASE_ENTRIES - 1)) { + + /* Cache full. We neet to evict one to map one. + Get a random one which is not in use */ + + do { + + tid = rand_below(afl, afl->q_testcase_max_cache_count); + + } while (afl->q_testcase_cache[tid] == NULL || + + afl->q_testcase_cache[tid] == afl->queue_cur); + + struct queue_entry *old_cached = afl->q_testcase_cache[tid]; + free(old_cached->testcase_buf); + old_cached->testcase_buf = NULL; + afl->q_testcase_cache_size -= old_cached->len; + afl->q_testcase_cache[tid] = NULL; + --afl->q_testcase_cache_count; + + } + + while (likely(afl->q_testcase_cache[tid] != NULL)) + ++tid; + + /* Map the test case into memory. */ + + int fd = open(q->fname, O_RDONLY); + + if (unlikely(fd < 0)) { PFATAL("Unable to open '%s'", q->fname); } + + q->testcase_buf = malloc(len); + + if (unlikely(!q->testcase_buf)) { + + PFATAL("Unable to malloc '%s' with len %u", q->fname, len); + + } + + ck_read(fd, q->testcase_buf, len, q->fname); + close(fd); + + /* Register testcase as cached */ + afl->q_testcase_cache[tid] = q; + afl->q_testcase_cache_size += q->len; + ++afl->q_testcase_cache_count; + if (tid >= afl->q_testcase_max_cache_count) + afl->q_testcase_max_cache_count = tid + 1; + + } + + return q->testcase_buf; + +} + diff --git a/src/afl-fuzz-run.c b/src/afl-fuzz-run.c index ee22b0f6..ab870319 100644 --- a/src/afl-fuzz-run.c +++ b/src/afl-fuzz-run.c @@ -692,6 +692,8 @@ void sync_fuzzers(afl_state_t *afl) { u8 trim_case(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) { + u32 orig_len = q->len; + /* Custom mutator trimmer */ if (afl->custom_mutators_count) { @@ -709,6 +711,8 @@ u8 trim_case(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) { }); + if (orig_len != q->len) { queue_testcase_retake(afl, q, orig_len); } + if (custom_trimmed) return trimmed_case; } @@ -842,6 +846,8 @@ u8 trim_case(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) { close(fd); + if (orig_len != q->len) queue_testcase_retake(afl, q, orig_len); + memcpy(afl->fsrv.trace_bits, afl->clean_trace, afl->fsrv.map_size); update_bitmap_score(afl, q); diff --git a/src/afl-fuzz-state.c b/src/afl-fuzz-state.c index a0a2795e..0824b77f 100644 --- a/src/afl-fuzz-state.c +++ b/src/afl-fuzz-state.c @@ -103,6 +103,7 @@ void afl_state_init(afl_state_t *afl, uint32_t map_size) { afl->stats_avg_exec = -1; afl->skip_deterministic = 1; afl->use_splicing = 1; + afl->q_testcase_max_cache_size = TESTCASE_CACHE * 1024000; #ifdef HAVE_AFFINITY afl->cpu_aff = -1; /* Selected CPU core */ @@ -353,6 +354,13 @@ void read_afl_environment(afl_state_t *afl, char **envp) { afl->afl_env.afl_forksrv_init_tmout = (u8 *)get_afl_env(afl_environment_variables[i]); + } else if (!strncmp(env, "AFL_TESTCACHE_SIZE", + + afl_environment_variable_len)) { + + afl->afl_env.afl_testcache_size = + (u8 *)get_afl_env(afl_environment_variables[i]); + } else if (!strncmp(env, "AFL_STATSD_HOST", afl_environment_variable_len)) { diff --git a/src/afl-fuzz-stats.c b/src/afl-fuzz-stats.c index 76f24977..4f0cab4c 100644 --- a/src/afl-fuzz-stats.c +++ b/src/afl-fuzz-stats.c @@ -165,6 +165,8 @@ void write_stats_file(afl_state_t *afl, double bitmap_cvg, double stability, "edges_found : %u\n" "var_byte_count : %u\n" "havoc_expansion : %u\n" + "testcache_size : %llu\n" + "testcache_count : %u\n" "afl_banner : %s\n" "afl_version : " VERSION "\n" @@ -198,8 +200,9 @@ void write_stats_file(afl_state_t *afl, double bitmap_cvg, double stability, #else -1, #endif - t_bytes, afl->var_byte_count, afl->expand_havoc, afl->use_banner, - afl->unicorn_mode ? "unicorn" : "", + t_bytes, afl->var_byte_count, afl->expand_havoc, + afl->q_testcase_cache_size, afl->q_testcase_cache_count, + afl->use_banner, afl->unicorn_mode ? "unicorn" : "", afl->fsrv.qemu_mode ? "qemu " : "", afl->non_instrumented_mode ? " non_instrumented " : "", afl->no_forkserver ? "no_fsrv " : "", afl->crash_mode ? "crash " : "", diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c index 6498eb30..a59abb7d 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -196,11 +196,13 @@ static void usage(u8 *argv0, int more_help) { "AFL_SKIP_BIN_CHECK: skip the check, if the target is an executable\n" "AFL_SKIP_CPUFREQ: do not warn about variable cpu clocking\n" "AFL_SKIP_CRASHES: during initial dry run do not terminate for crashing inputs\n" - "AFL_STATSD: enables StatsD metrics collection" - "AFL_STATSD_HOST: change default statsd host (default 127.0.0.1)" - "AFL_STATSD_PORT: change default statsd port (default: 8125)" - "AFL_STATSD_TAGS_FLAVOR: change default statsd tags format (default will disable tags)." - " Supported formats are: 'dogstatsd', 'librato', 'signalfx' and 'influxdb'" + "AFL_STATSD: enables StatsD metrics collection\n" + "AFL_STATSD_HOST: change default statsd host (default 127.0.0.1)\n" + "AFL_STATSD_PORT: change default statsd port (default: 8125)\n" + "AFL_STATSD_TAGS_FLAVOR: set statsd tags format (default: disable tags)\n" + " Supported formats are: 'dogstatsd', 'librato', 'signalfx'\n" + " and 'influxdb'\n" + "AFL_TESTCACHE_SIZE: use a cache for testcases, improves performance (in MB)\n" "AFL_TMPDIR: directory to use for input file generation (ramdisk recommended)\n" //"AFL_PERSISTENT: not supported anymore -> no effect, just a warning\n" //"AFL_DEFER_FORKSRV: not supported anymore -> no effect, just a warning\n" @@ -885,7 +887,7 @@ int main(int argc, char **argv_orig, char **envp) { auto_sync = 1; afl->sync_id = ck_strdup("default"); afl->is_secondary_node = 1; - OKF("no -M/-S set, autoconfiguring for \"-S %s\"", afl->sync_id); + OKF("No -M/-S set, autoconfiguring for \"-S %s\"", afl->sync_id); } @@ -1006,6 +1008,21 @@ int main(int argc, char **argv_orig, char **envp) { } + if (afl->afl_env.afl_testcache_size) { + + afl->q_testcase_max_cache_size = + (u64)atoi(afl->afl_env.afl_testcache_size) * 1024000; + OKF("Enabled testcache with %llu MB", + afl->q_testcase_max_cache_size / 1024000); + + } else { + + ACTF( + "No testcache was configured. it is recommended to use a testcache, it " + "improves performance: set AFL_TESTCACHE_SIZE=(value in MB)"); + + } + if (afl->afl_env.afl_forksrv_init_tmout) { afl->fsrv.init_tmout = atoi(afl->afl_env.afl_forksrv_init_tmout); diff --git a/src/afl-performance.c b/src/afl-performance.c index 6fa95dea..e070a05e 100644 --- a/src/afl-performance.c +++ b/src/afl-performance.c @@ -71,7 +71,7 @@ inline uint64_t rand_next(afl_state_t *afl) { inline double rand_next_percent(afl_state_t *afl) { - return (double)(((double)rand_next(afl)) / (double) 0xffffffffffffffff); + return (double)(((double)rand_next(afl)) / (double)0xffffffffffffffff); } diff --git a/test/test-custom-mutators.sh b/test/test-custom-mutators.sh index f7677ac5..d4d21048 100755 --- a/test/test-custom-mutators.sh +++ b/test/test-custom-mutators.sh @@ -1,4 +1,4 @@ -f#!/bin/sh +#!/bin/sh . ./test-pre.sh -- cgit 1.4.1 From 24e0c9cf65428efce181eaecc8c69ee030b8dfcc Mon Sep 17 00:00:00 2001 From: van Hauser Date: Thu, 15 Oct 2020 10:22:40 +0200 Subject: add missing swap bufs --- src/afl-fuzz-one.c | 1 + 1 file changed, 1 insertion(+) (limited to 'src/afl-fuzz-one.c') diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index 154e4b45..9c5e2e3c 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -2465,6 +2465,7 @@ retry_splicing: afl->in_scratch_buf = afl_realloc(AFL_BUF_PARAM(in_scratch), len); memcpy(afl->in_scratch_buf, in_buf, split_at); memcpy(afl->in_scratch_buf + split_at, new_buf, len - split_at); + afl_swap_bufs(AFL_BUF_PARAM(in), AFL_BUF_PARAM(in_scratch)); in_buf = afl->in_scratch_buf; out_buf = afl_realloc(AFL_BUF_PARAM(out), len); -- cgit 1.4.1 From 0139b8cdcb81ec1ed873f182946b686a04f46ac6 Mon Sep 17 00:00:00 2001 From: van Hauser Date: Thu, 15 Oct 2020 10:28:11 +0200 Subject: add missing swap bufs --- src/afl-fuzz-one.c | 1 + 1 file changed, 1 insertion(+) (limited to 'src/afl-fuzz-one.c') diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index 9c5e2e3c..cbfbbc58 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -4434,6 +4434,7 @@ pacemaker_fuzzing: afl->in_scratch_buf = afl_realloc(AFL_BUF_PARAM(in_scratch), len); memcpy(afl->in_scratch_buf, in_buf, split_at); memcpy(afl->in_scratch_buf + split_at, new_buf, len - split_at); + afl_swap_bufs(AFL_BUF_PARAM(in), AFL_BUF_PARAM(in_scratch)); in_buf = afl->in_scratch_buf; out_buf = afl_realloc(AFL_BUF_PARAM(out), len); -- cgit 1.4.1 From d1e18f9edf43dc71ab81619eeed7a0f5fa0bb15f Mon Sep 17 00:00:00 2001 From: van Hauser Date: Thu, 15 Oct 2020 12:20:33 +0200 Subject: fix afl_swap_bufs usage? --- src/afl-fuzz-one.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index cbfbbc58..1899193e 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -2149,7 +2149,6 @@ havoc_stage: temp_len - clone_to); afl_swap_bufs(AFL_BUF_PARAM(out), AFL_BUF_PARAM(out_scratch)); - out_buf = new_buf; temp_len += clone_len; } @@ -2346,7 +2345,6 @@ havoc_stage: temp_len - clone_to); afl_swap_bufs(AFL_BUF_PARAM(out), AFL_BUF_PARAM(out_scratch)); - out_buf = temp_buf; temp_len += clone_len; } @@ -2465,8 +2463,8 @@ retry_splicing: afl->in_scratch_buf = afl_realloc(AFL_BUF_PARAM(in_scratch), len); memcpy(afl->in_scratch_buf, in_buf, split_at); memcpy(afl->in_scratch_buf + split_at, new_buf, len - split_at); - afl_swap_bufs(AFL_BUF_PARAM(in), AFL_BUF_PARAM(in_scratch)); in_buf = afl->in_scratch_buf; + afl_swap_bufs(AFL_BUF_PARAM(in), AFL_BUF_PARAM(in_scratch)); out_buf = afl_realloc(AFL_BUF_PARAM(out), len); if (unlikely(!out_buf)) { PFATAL("alloc"); } @@ -4142,7 +4140,6 @@ pacemaker_fuzzing: temp_len - clone_to); afl_swap_bufs(AFL_BUF_PARAM(out), AFL_BUF_PARAM(out_scratch)); - out_buf = new_buf; temp_len += clone_len; MOpt_globals.cycles_v2[STAGE_Clone75] += 1; @@ -4434,8 +4431,8 @@ pacemaker_fuzzing: afl->in_scratch_buf = afl_realloc(AFL_BUF_PARAM(in_scratch), len); memcpy(afl->in_scratch_buf, in_buf, split_at); memcpy(afl->in_scratch_buf + split_at, new_buf, len - split_at); - afl_swap_bufs(AFL_BUF_PARAM(in), AFL_BUF_PARAM(in_scratch)); in_buf = afl->in_scratch_buf; + afl_swap_bufs(AFL_BUF_PARAM(in), AFL_BUF_PARAM(in_scratch)); out_buf = afl_realloc(AFL_BUF_PARAM(out), len); if (unlikely(!out_buf)) { PFATAL("alloc"); } -- cgit 1.4.1 From ea0851c654285cc33ac25637d2054044ee6ee2ee Mon Sep 17 00:00:00 2001 From: van Hauser Date: Thu, 15 Oct 2020 12:54:18 +0200 Subject: fix previous commit --- src/afl-fuzz-one.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'src/afl-fuzz-one.c') diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index 1899193e..02550d36 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -2148,6 +2148,7 @@ havoc_stage: memcpy(new_buf + clone_to + clone_len, out_buf + clone_to, temp_len - clone_to); + out_buf = new_buf; afl_swap_bufs(AFL_BUF_PARAM(out), AFL_BUF_PARAM(out_scratch)); temp_len += clone_len; @@ -2344,6 +2345,7 @@ havoc_stage: memcpy(temp_buf + clone_to + clone_len, out_buf + clone_to, temp_len - clone_to); + out_buf = temp_buf; afl_swap_bufs(AFL_BUF_PARAM(out), AFL_BUF_PARAM(out_scratch)); temp_len += clone_len; @@ -4139,6 +4141,7 @@ pacemaker_fuzzing: memcpy(new_buf + clone_to + clone_len, out_buf + clone_to, temp_len - clone_to); + out_buf = new_buf; afl_swap_bufs(AFL_BUF_PARAM(out), AFL_BUF_PARAM(out_scratch)); temp_len += clone_len; MOpt_globals.cycles_v2[STAGE_Clone75] += 1; -- cgit 1.4.1 From ac1c3b87015dd2c9b1bae0198f7925816aa63aec Mon Sep 17 00:00:00 2001 From: van Hauser Date: Mon, 19 Oct 2020 11:34:57 +0200 Subject: mini improvements --- TODO.md | 1 + src/afl-fuzz-one.c | 2 +- src/afl-fuzz-queue.c | 8 ++++---- src/afl-fuzz.c | 56 +++++++++++++++++++++++++++------------------------- src/afl-showmap.c | 10 ++++++---- 5 files changed, 41 insertions(+), 36 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/TODO.md b/TODO.md index 6b43d6be..7e203d26 100644 --- a/TODO.md +++ b/TODO.md @@ -7,6 +7,7 @@ - afl-plot to support multiple plot_data - afl_custom_fuzz_splice_optin() - intel-pt tracer + - own sancov for llvm 12 ## Further down the road diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index 02550d36..1e63abc7 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -2540,7 +2540,7 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { #else - if (afl->pending_favored) { + if (likely(afl->pending_favored)) { /* If we have any favored, non-fuzzed new arrivals in the queue, possibly skip to them at the expense of already-fuzzed or non-favored diff --git a/src/afl-fuzz-queue.c b/src/afl-fuzz-queue.c index e0df7206..7f157121 100644 --- a/src/afl-fuzz-queue.c +++ b/src/afl-fuzz-queue.c @@ -179,9 +179,9 @@ void mark_as_variable(afl_state_t *afl, struct queue_entry *q) { void mark_as_redundant(afl_state_t *afl, struct queue_entry *q, u8 state) { - u8 fn[PATH_MAX]; + if (likely(state == q->fs_redundant)) { return; } - if (state == q->fs_redundant) { return; } + u8 fn[PATH_MAX]; q->fs_redundant = state; @@ -521,13 +521,13 @@ void update_bitmap_score(afl_state_t *afl, struct queue_entry *q) { void cull_queue(afl_state_t *afl) { + if (likely(!afl->score_changed || afl->non_instrumented_mode)) { return; } + struct queue_entry *q; u32 len = (afl->fsrv.map_size >> 3); u32 i; u8 * temp_v = afl->map_tmp_buf; - if (afl->non_instrumented_mode || !afl->score_changed) { return; } - afl->score_changed = 0; memset(temp_v, 255, len); diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c index 9a82edeb..7215ecec 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -1378,10 +1378,9 @@ int main(int argc, char **argv_orig, char **envp) { u32 runs_in_current_cycle = (u32)-1; u32 prev_queued_paths = 0; + u8 skipped_fuzz; - while (1) { - - u8 skipped_fuzz; + while (likely(!afl->stop_soon)) { cull_queue(afl); @@ -1418,8 +1417,8 @@ int main(int argc, char **argv_orig, char **envp) { /* If we had a full queue cycle with no new finds, try recombination strategies next. */ - if (afl->queued_paths == prev_queued && - (get_cur_time() - afl->start_time) >= 3600) { + if (unlikely(afl->queued_paths == prev_queued && + (get_cur_time() - afl->start_time) >= 3600)) { if (afl->use_splicing) { @@ -1534,46 +1533,49 @@ int main(int argc, char **argv_orig, char **envp) { } - if (likely(!afl->old_seed_selection)) { + ++runs_in_current_cycle; - ++runs_in_current_cycle; - if (unlikely(prev_queued_paths < afl->queued_paths)) { + do { - // we have new queue entries since the last run, recreate alias table - prev_queued_paths = afl->queued_paths; - create_alias_table(afl); + if (likely(!afl->old_seed_selection)) { - } + if (unlikely(prev_queued_paths < afl->queued_paths)) { - afl->current_entry = select_next_queue_entry(afl); - afl->queue_cur = afl->queue_buf[afl->current_entry]; + // we have new queue entries since the last run, recreate alias table + prev_queued_paths = afl->queued_paths; + create_alias_table(afl); - } + } - skipped_fuzz = fuzz_one(afl); + afl->current_entry = select_next_queue_entry(afl); + afl->queue_cur = afl->queue_buf[afl->current_entry]; - if (!skipped_fuzz && !afl->stop_soon && afl->sync_id) { + } - if (unlikely(afl->is_main_node)) { + skipped_fuzz = fuzz_one(afl); - if (!(sync_interval_cnt++ % (SYNC_INTERVAL / 3))) { sync_fuzzers(afl); } + if (unlikely(!afl->stop_soon && exit_1)) { afl->stop_soon = 2; } - } else { + if (unlikely(afl->old_seed_selection)) { - if (!(sync_interval_cnt++ % SYNC_INTERVAL)) { sync_fuzzers(afl); } + afl->queue_cur = afl->queue_cur->next; + ++afl->current_entry; } - } + } while (skipped_fuzz && afl->queue_cur && !afl->stop_soon); - if (!afl->stop_soon && exit_1) { afl->stop_soon = 2; } + if (!afl->stop_soon && afl->sync_id) { - if (afl->stop_soon) { break; } + if (unlikely(afl->is_main_node)) { - if (unlikely(afl->old_seed_selection)) { + if (!(sync_interval_cnt++ % (SYNC_INTERVAL / 3))) { sync_fuzzers(afl); } - afl->queue_cur = afl->queue_cur->next; - ++afl->current_entry; + } else { + + if (!(sync_interval_cnt++ % SYNC_INTERVAL)) { sync_fuzzers(afl); } + + } } diff --git a/src/afl-showmap.c b/src/afl-showmap.c index bd0d1a29..4b357254 100644 --- a/src/afl-showmap.c +++ b/src/afl-showmap.c @@ -209,10 +209,10 @@ static u32 write_results_to_file(afl_forkserver_t *fsrv, u8 *outfile) { if (!outfile) { FATAL("Output filename not set (Bug in AFL++?)"); } - if (cmin_mode && (fsrv->last_run_timed_out - || (!caa && child_crashed != cco))) { + if (cmin_mode && + (fsrv->last_run_timed_out || (!caa && child_crashed != cco))) { - return ret; + return ret; } @@ -298,7 +298,8 @@ static void showmap_run_target_forkserver(afl_forkserver_t *fsrv, u8 *mem, if (!quiet_mode) { SAYF(cRST "-- Program output ends --\n"); } - if (!fsrv->last_run_timed_out && !stop_soon && WIFSIGNALED(fsrv->child_status)) { + if (!fsrv->last_run_timed_out && !stop_soon && + WIFSIGNALED(fsrv->child_status)) { child_crashed = 1; @@ -1202,6 +1203,7 @@ int main(int argc, char **argv_orig, char **envp) { ret = child_crashed * 2 + fsrv->last_run_timed_out; } + if (fsrv->target_path) { ck_free(fsrv->target_path); } afl_fsrv_deinit(fsrv); -- cgit 1.4.1 From fe705bb9567341427ce1ea39d5fc6b19fdee1646 Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Wed, 28 Oct 2020 14:32:53 +0100 Subject: expand havoc if not new findings in the last 5 seconds --- src/afl-fuzz-one.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index bf568c38..2e186b90 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -1884,16 +1884,22 @@ havoc_stage: u32 r_max, r; + r_max = 15 + ((afl->extras_cnt + afl->a_extras_cnt) ? 2 : 0); + if (unlikely(afl->expand_havoc)) { /* add expensive havoc cases here, they are activated after a full cycle without finds happened */ - r_max = 16 + ((afl->extras_cnt + afl->a_extras_cnt) ? 2 : 0); + r_max += 1; - } else { + } + + if (unlikely(get_cur_time() - afl->last_path_time > 5000)) { + + /* add expensive havoc cases here if there is no findings in the last 5s */ - r_max = 15 + ((afl->extras_cnt + afl->a_extras_cnt) ? 2 : 0); + r_max += 1; } -- cgit 1.4.1 From b5686eb63e1fcd6dac49cc458c50e52b51709f8c Mon Sep 17 00:00:00 2001 From: van Hauser Date: Thu, 29 Oct 2020 00:05:28 +0100 Subject: fixes two huge bugs --- src/afl-fuzz-one.c | 5 +++-- src/afl-fuzz-run.c | 3 ++- src/afl-fuzz-stats.c | 3 ++- 3 files changed, 7 insertions(+), 4 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index 35ff5466..0f3393d2 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -1850,8 +1850,9 @@ havoc_stage: r_max += 1; } - - if (unlikely(get_cur_time() - afl->last_path_time > 5000)) { + + if (unlikely(get_cur_time() - afl->last_path_time > 5000 && + afl->ready_for_splicing_count > 1)) { /* add expensive havoc cases here if there is no findings in the last 5s */ diff --git a/src/afl-fuzz-run.c b/src/afl-fuzz-run.c index dfd3abfb..fb259b5d 100644 --- a/src/afl-fuzz-run.c +++ b/src/afl-fuzz-run.c @@ -587,9 +587,10 @@ void sync_fuzzers(afl_state_t *afl) { u8 entry[12]; sprintf(entry, "id:%06u", next_min_accept); + while (m < n) { - if (memcmp(namelist[m]->d_name, entry, 9)) { + if (strcmp(namelist[m]->d_name, entry)) { m++; diff --git a/src/afl-fuzz-stats.c b/src/afl-fuzz-stats.c index 6841eb88..321bbb35 100644 --- a/src/afl-fuzz-stats.c +++ b/src/afl-fuzz-stats.c @@ -1154,7 +1154,8 @@ void show_init_stats(afl_state_t *afl) { } else { - OKF("-t option specified. We'll use an exec timeout of %s ms.", afl->fsrv.exec_tmout); + ACTF("-t option specified. We'll use an exec timeout of %d ms.", + afl->fsrv.exec_tmout); } -- cgit 1.4.1 From 0fd98ae8b070b05a72b2c47a76f4ea145f9d51c2 Mon Sep 17 00:00:00 2001 From: van Hauser Date: Sun, 1 Nov 2020 21:34:08 +0100 Subject: added mutation introspection make target --- GNUmakefile | 6 + README.md | 1 + docs/Changelog.md | 2 + include/afl-fuzz.h | 6 + include/alloc-inl.h | 36 ++ instrumentation/SanitizerCoveragePCGUARD.so.cc | 14 +- src/afl-fuzz-bitmap.c | 11 + src/afl-fuzz-extras.c | 16 +- src/afl-fuzz-one.c | 503 ++++++++++++++++++++++++- src/afl-fuzz.c | 17 + 10 files changed, 592 insertions(+), 20 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/GNUmakefile b/GNUmakefile index c8d155e4..764c9baa 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -110,6 +110,11 @@ ifdef PROFILING LDFLAGS += -pg endif +ifdef INTROSPECTION + $(info Compiling with introspection documentation) + CFLAGS_OPT += -DINTROSPECTION=1 +endif + ifneq "$(shell uname -m)" "x86_64" ifneq "$(patsubst i%86,i386,$(shell uname -m))" "i386" ifneq "$(shell uname -m)" "amd64" @@ -348,6 +353,7 @@ help: @echo ASAN_BUILD - compiles with memory sanitizer for debug purposes @echo DEBUG - no optimization, -ggdb3, all warnings and -Werror @echo PROFILING - compile afl-fuzz with profiling information + @echo INTROSPECTION - compile afl-fuzz with mutation introspection @echo NO_PYTHON - disable python support @echo NO_SPLICING - disables splicing mutation in afl-fuzz, not recommended for normal fuzzing @echo AFL_NO_X86 - if compiling on non-intel/amd platforms diff --git a/README.md b/README.md index 7c3b6ecf..d954a236 100644 --- a/README.md +++ b/README.md @@ -211,6 +211,7 @@ These build options exist: * ASAN_BUILD - compiles with memory sanitizer for debug purposes * DEBUG - no optimization, -ggdb3, all warnings and -Werror * PROFILING - compile with profiling information (gprof) +* INTROSPECTION - compile afl-fuzz with mutation introspection * NO_PYTHON - disable python support * NO_SPLICING - disables splicing mutation in afl-fuzz, not recommended for normal fuzzing * AFL_NO_X86 - if compiling on non-intel/amd platforms diff --git a/docs/Changelog.md b/docs/Changelog.md index 798a056f..f11a1178 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -35,6 +35,8 @@ sending a mail to . skipped. They are used for splicing though. - set the default power schedule to the superiour "seek" schedule - added NO_SPLICING compile option and makefile define + - added INTROSPECTION make target that writes all mutations to + out/NAME/introspection.txt - print special compile time options used in help output - instrumentation - We received an enhanced gcc_plugin module from AdaCore, thank you diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h index 85b31795..5ff7672b 100644 --- a/include/afl-fuzz.h +++ b/include/afl-fuzz.h @@ -717,6 +717,12 @@ typedef struct afl_state { * is too large) */ struct queue_entry **q_testcase_cache; +#ifdef INTROSPECTION + char mutation[8072]; + char m_tmp[4096]; + FILE *introspection_file; +#endif + } afl_state_t; struct custom_mutator { diff --git a/include/alloc-inl.h b/include/alloc-inl.h index 36e47810..d7aa51a7 100644 --- a/include/alloc-inl.h +++ b/include/alloc-inl.h @@ -708,6 +708,42 @@ static inline void *afl_realloc(void **buf, size_t size_needed) { } +/* afl_realloc_exact uses afl alloc buffers but sets it to a specific size */ + +static inline void *afl_realloc_exact(void **buf, size_t size_needed) { + + struct afl_alloc_buf *new_buf = NULL; + + size_t current_size = 0; + + if (likely(*buf)) { + + /* the size is always stored at buf - 1*size_t */ + new_buf = (struct afl_alloc_buf *)afl_alloc_bufptr(*buf); + current_size = new_buf->complete_size; + + } + + size_needed += AFL_ALLOC_SIZE_OFFSET; + + /* No need to realloc */ + if (unlikely(current_size == size_needed)) { return *buf; } + + /* alloc */ + new_buf = (struct afl_alloc_buf *)realloc(new_buf, size_needed); + if (unlikely(!new_buf)) { + + *buf = NULL; + return NULL; + + } + + new_buf->complete_size = size_needed; + *buf = (void *)(new_buf->buf); + return *buf; + +} + static inline void afl_free(void *buf) { if (buf) { free(afl_alloc_bufptr(buf)); } diff --git a/instrumentation/SanitizerCoveragePCGUARD.so.cc b/instrumentation/SanitizerCoveragePCGUARD.so.cc index 97e8d32b..2f87e4f9 100644 --- a/instrumentation/SanitizerCoveragePCGUARD.so.cc +++ b/instrumentation/SanitizerCoveragePCGUARD.so.cc @@ -247,13 +247,13 @@ SanitizerCoverageOptions OverrideFromCL(SanitizerCoverageOptions Options) { Options.CoverageType = SanitizerCoverageOptions::SCK_Edge; // std::max(Options.CoverageType, // CLOpts.CoverageType); - Options.IndirectCalls = true; // CLOpts.IndirectCalls; - Options.TraceCmp = false; //|= ClCMPTracing; - Options.TraceDiv = false; //|= ClDIVTracing; - Options.TraceGep = false; //|= ClGEPTracing; - Options.TracePC = false; //|= ClTracePC; - Options.TracePCGuard = true; // |= ClTracePCGuard; - Options.Inline8bitCounters = 0; //|= ClInline8bitCounters; + Options.IndirectCalls = true; // CLOpts.IndirectCalls; + Options.TraceCmp = false; //|= ClCMPTracing; + Options.TraceDiv = false; //|= ClDIVTracing; + Options.TraceGep = false; //|= ClGEPTracing; + Options.TracePC = false; //|= ClTracePC; + Options.TracePCGuard = true; // |= ClTracePCGuard; + Options.Inline8bitCounters = 0; //|= ClInline8bitCounters; // Options.InlineBoolFlag = 0; //|= ClInlineBoolFlag; Options.PCTable = false; //|= ClCreatePCTable; Options.NoPrune = false; //|= !ClPruneBlocks; diff --git a/src/afl-fuzz-bitmap.c b/src/afl-fuzz-bitmap.c index 2653b9fd..735420c3 100644 --- a/src/afl-fuzz-bitmap.c +++ b/src/afl-fuzz-bitmap.c @@ -587,6 +587,11 @@ save_if_interesting(afl_state_t *afl, void *mem, u32 len, u8 fault) { add_to_queue(afl, queue_fn, len, 0); +#ifdef INTROSPECTION + fprintf(afl->introspection_file, "QUEUE %s = %s\n", afl->mutation, + afl->queue_top->fname); +#endif + if (hnb == 2) { afl->queue_top->has_new_cov = 1; @@ -659,6 +664,9 @@ save_if_interesting(afl_state_t *afl, void *mem, u32 len, u8 fault) { } ++afl->unique_tmouts; +#ifdef INTROSPECTION + fprintf(afl->introspection_file, "UNIQUE_TIMEOUT %s\n", afl->mutation); +#endif /* Before saving, we make sure that it's a genuine hang by re-running the target with a more generous timeout (unless the default timeout @@ -742,6 +750,9 @@ save_if_interesting(afl_state_t *afl, void *mem, u32 len, u8 fault) { #endif /* ^!SIMPLE_FILES */ ++afl->unique_crashes; +#ifdef INTROSPECTION + fprintf(afl->introspection_file, "UNIQUE_CRASH %s\n", afl->mutation); +#endif if (unlikely(afl->infoexec)) { // if the user wants to be informed on new crashes - do that diff --git a/src/afl-fuzz-extras.c b/src/afl-fuzz-extras.c index adec986e..171cce96 100644 --- a/src/afl-fuzz-extras.c +++ b/src/afl-fuzz-extras.c @@ -423,8 +423,8 @@ void dedup_extras(afl_state_t *afl) { } if (afl->extras_cnt != orig_cnt) - afl->extras = ck_realloc((void **)&afl->extras, - afl->extras_cnt * sizeof(struct extra_data)); + afl->extras = afl_realloc_exact( + (void **)&afl->extras, afl->extras_cnt * sizeof(struct extra_data)); } @@ -462,16 +462,8 @@ void add_extra(afl_state_t *afl, u8 *mem, u32 len) { } - if (afl->extras) { - - afl->extras = ck_realloc((void **)&afl->extras, - (afl->extras_cnt + 1) * sizeof(struct extra_data)); - - } else { - - afl->extras = ck_alloc((afl->extras_cnt + 1) * sizeof(struct extra_data)); - - } + afl->extras = afl_realloc((void **)&afl->extras, + (afl->extras_cnt + 1) * sizeof(struct extra_data)); if (unlikely(!afl->extras)) { PFATAL("alloc"); } diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index 0f3393d2..5337b7f8 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -609,6 +609,11 @@ u8 fuzz_one_original(afl_state_t *afl) { FLIP_BIT(out_buf, afl->stage_cur); +#ifdef INTROSPECTION + snprintf(afl->mutation, sizeof(afl->mutation), "%s FLIP_BIT1 %u", + afl->queue_cur->fname, afl->stage_cur); +#endif + if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } FLIP_BIT(out_buf, afl->stage_cur); @@ -718,6 +723,11 @@ u8 fuzz_one_original(afl_state_t *afl) { FLIP_BIT(out_buf, afl->stage_cur); FLIP_BIT(out_buf, afl->stage_cur + 1); +#ifdef INTROSPECTION + snprintf(afl->mutation, sizeof(afl->mutation), "%s FLIP_BIT2 %u", + afl->queue_cur->fname, afl->stage_cur); +#endif + if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } FLIP_BIT(out_buf, afl->stage_cur); @@ -747,6 +757,11 @@ u8 fuzz_one_original(afl_state_t *afl) { FLIP_BIT(out_buf, afl->stage_cur + 2); FLIP_BIT(out_buf, afl->stage_cur + 3); +#ifdef INTROSPECTION + snprintf(afl->mutation, sizeof(afl->mutation), "%s FLIP_BIT4 %u", + afl->queue_cur->fname, afl->stage_cur); +#endif + if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } FLIP_BIT(out_buf, afl->stage_cur); @@ -802,6 +817,11 @@ u8 fuzz_one_original(afl_state_t *afl) { out_buf[afl->stage_cur] ^= 0xFF; +#ifdef INTROSPECTION + snprintf(afl->mutation, sizeof(afl->mutation), "%s FLIP_BIT8 %u", + afl->queue_cur->fname, afl->stage_cur); +#endif + if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } /* We also use this stage to pull off a simple trick: we identify @@ -889,6 +909,11 @@ u8 fuzz_one_original(afl_state_t *afl) { *(u16 *)(out_buf + i) ^= 0xFFFF; +#ifdef INTROSPECTION + snprintf(afl->mutation, sizeof(afl->mutation), "%s FLIP_BIT16 %u", + afl->queue_cur->fname, afl->stage_cur); +#endif + if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } ++afl->stage_cur; @@ -927,6 +952,11 @@ u8 fuzz_one_original(afl_state_t *afl) { *(u32 *)(out_buf + i) ^= 0xFFFFFFFF; +#ifdef INTROSPECTION + snprintf(afl->mutation, sizeof(afl->mutation), "%s FLIP_BIT32 %u", + afl->queue_cur->fname, afl->stage_cur); +#endif + if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } ++afl->stage_cur; @@ -985,6 +1015,11 @@ skip_bitflip: afl->stage_cur_val = j; out_buf[i] = orig + j; +#ifdef INTROSPECTION + snprintf(afl->mutation, sizeof(afl->mutation), "%s ARITH8+ %u %u", + afl->queue_cur->fname, i, j); +#endif + if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } ++afl->stage_cur; @@ -1001,6 +1036,11 @@ skip_bitflip: afl->stage_cur_val = -j; out_buf[i] = orig - j; +#ifdef INTROSPECTION + snprintf(afl->mutation, sizeof(afl->mutation), "%s ARITH8- %u %u", + afl->queue_cur->fname, i, j); +#endif + if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } ++afl->stage_cur; @@ -1065,6 +1105,11 @@ skip_bitflip: afl->stage_cur_val = j; *(u16 *)(out_buf + i) = orig + j; +#ifdef INTROSPECTION + snprintf(afl->mutation, sizeof(afl->mutation), "%s ARITH16+ %u %u", + afl->queue_cur->fname, i, j); +#endif + if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } ++afl->stage_cur; @@ -1079,6 +1124,11 @@ skip_bitflip: afl->stage_cur_val = -j; *(u16 *)(out_buf + i) = orig - j; +#ifdef INTROSPECTION + snprintf(afl->mutation, sizeof(afl->mutation), "%s ARITH16- %u %u", + afl->queue_cur->fname, i, j); +#endif + if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } ++afl->stage_cur; @@ -1097,6 +1147,11 @@ skip_bitflip: afl->stage_cur_val = j; *(u16 *)(out_buf + i) = SWAP16(SWAP16(orig) + j); +#ifdef INTROSPECTION + snprintf(afl->mutation, sizeof(afl->mutation), "%s ARITH16+BE %u %u", + afl->queue_cur->fname, i, j); +#endif + if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } ++afl->stage_cur; @@ -1111,6 +1166,11 @@ skip_bitflip: afl->stage_cur_val = -j; *(u16 *)(out_buf + i) = SWAP16(SWAP16(orig) - j); +#ifdef INTROSPECTION + snprintf(afl->mutation, sizeof(afl->mutation), "%s ARITH16-BE %u %u", + afl->queue_cur->fname, i, j); +#endif + if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } ++afl->stage_cur; @@ -1174,6 +1234,11 @@ skip_bitflip: afl->stage_cur_val = j; *(u32 *)(out_buf + i) = orig + j; +#ifdef INTROSPECTION + snprintf(afl->mutation, sizeof(afl->mutation), "%s ARITH32+ %u %u", + afl->queue_cur->fname, i, j); +#endif + if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } ++afl->stage_cur; @@ -1188,6 +1253,11 @@ skip_bitflip: afl->stage_cur_val = -j; *(u32 *)(out_buf + i) = orig - j; +#ifdef INTROSPECTION + snprintf(afl->mutation, sizeof(afl->mutation), "%s ARITH32- %u %u", + afl->queue_cur->fname, i, j); +#endif + if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } ++afl->stage_cur; @@ -1206,6 +1276,11 @@ skip_bitflip: afl->stage_cur_val = j; *(u32 *)(out_buf + i) = SWAP32(SWAP32(orig) + j); +#ifdef INTROSPECTION + snprintf(afl->mutation, sizeof(afl->mutation), "%s ARITH32+BE %u %u", + afl->queue_cur->fname, i, j); +#endif + if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } ++afl->stage_cur; @@ -1220,6 +1295,11 @@ skip_bitflip: afl->stage_cur_val = -j; *(u32 *)(out_buf + i) = SWAP32(SWAP32(orig) - j); +#ifdef INTROSPECTION + snprintf(afl->mutation, sizeof(afl->mutation), "%s ARITH32-BE %u %u", + afl->queue_cur->fname, i, j); +#endif + if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } ++afl->stage_cur; @@ -1287,6 +1367,11 @@ skip_arith: afl->stage_cur_val = interesting_8[j]; out_buf[i] = interesting_8[j]; +#ifdef INTROSPECTION + snprintf(afl->mutation, sizeof(afl->mutation), "%s INTERESTING8 %u %u", + afl->queue_cur->fname, i, j); +#endif + if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } out_buf[i] = orig; @@ -1342,6 +1427,11 @@ skip_arith: *(u16 *)(out_buf + i) = interesting_16[j]; +#ifdef INTROSPECTION + snprintf(afl->mutation, sizeof(afl->mutation), "%s INTERESTING16 %u %u", + afl->queue_cur->fname, i, j); +#endif + if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } ++afl->stage_cur; @@ -1358,6 +1448,11 @@ skip_arith: afl->stage_val_type = STAGE_VAL_BE; +#ifdef INTROSPECTION + snprintf(afl->mutation, sizeof(afl->mutation), + "%s INTERESTING16BE %u %u", afl->queue_cur->fname, i, j); +#endif + *(u16 *)(out_buf + i) = SWAP16(interesting_16[j]); if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } ++afl->stage_cur; @@ -1421,6 +1516,11 @@ skip_arith: *(u32 *)(out_buf + i) = interesting_32[j]; +#ifdef INTROSPECTION + snprintf(afl->mutation, sizeof(afl->mutation), "%s INTERESTING32 %u %u", + afl->queue_cur->fname, i, j); +#endif + if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } ++afl->stage_cur; @@ -1437,6 +1537,11 @@ skip_arith: afl->stage_val_type = STAGE_VAL_BE; +#ifdef INTROSPECTION + snprintf(afl->mutation, sizeof(afl->mutation), + "%s INTERESTING32BE %u %u", afl->queue_cur->fname, i, j); +#endif + *(u32 *)(out_buf + i) = SWAP32(interesting_32[j]); if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } ++afl->stage_cur; @@ -1510,6 +1615,12 @@ skip_interest: last_len = afl->extras[j].len; memcpy(out_buf + i, afl->extras[j].data, last_len); +#ifdef INTROSPECTION + snprintf(afl->mutation, sizeof(afl->mutation), + "%s EXTRAS overwrite %u %u:%s", afl->queue_cur->fname, i, j, + afl->extras[j].data); +#endif + if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } ++afl->stage_cur; @@ -1557,6 +1668,12 @@ skip_interest: /* Copy tail */ memcpy(ex_tmp + i + afl->extras[j].len, out_buf + i, len - i); +#ifdef INTROSPECTION + snprintf(afl->mutation, sizeof(afl->mutation), + "%s EXTRAS insert %u %u:%s", afl->queue_cur->fname, i, j, + afl->extras[j].data); +#endif + if (common_fuzz_stuff(afl, ex_tmp, len + afl->extras[j].len)) { goto abandon_entry; @@ -1614,6 +1731,12 @@ skip_user_extras: last_len = afl->a_extras[j].len; memcpy(out_buf + i, afl->a_extras[j].data, last_len); +#ifdef INTROSPECTION + snprintf(afl->mutation, sizeof(afl->mutation), + "%s AUTO_EXTRAS overwrite %u %u:%s", afl->queue_cur->fname, i, j, + afl->a_extras[j].data); +#endif + if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } ++afl->stage_cur; @@ -1675,7 +1798,7 @@ custom_mutator_stage: for (afl->stage_cur = 0; afl->stage_cur < afl->stage_max; ++afl->stage_cur) { - struct queue_entry *target; + struct queue_entry *target = NULL; u32 tid; u8 * new_buf = NULL; u32 target_len = 0; @@ -1717,6 +1840,12 @@ custom_mutator_stage: if (mutated_size > 0) { +#ifdef INTROSPECTION + snprintf(afl->mutation, sizeof(afl->mutation), "%s CUSTOM %s", + afl->queue_cur->fname, + target != NULL ? (char *)target->fname : "none"); +#endif + if (common_fuzz_stuff(afl, mutated_buf, (u32)mutated_size)) { goto abandon_entry; @@ -1866,6 +1995,11 @@ havoc_stage: afl->stage_cur_val = use_stacking; +#ifdef INTROSPECTION + snprintf(afl->mutation, sizeof(afl->mutation), "%s HAVOC %u", + afl->queue_cur->fname, use_stacking); +#endif + for (i = 0; i < use_stacking; ++i) { if (afl->custom_mutators_count) { @@ -1909,6 +2043,10 @@ havoc_stage: /* Flip a single bit somewhere. Spooky! */ +#ifdef INTROSPECTION + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " FLIP_BIT1"); + strcat(afl->mutation, afl->m_tmp); +#endif FLIP_BIT(out_buf, rand_below(afl, temp_len << 3)); break; @@ -1916,6 +2054,10 @@ havoc_stage: /* Set byte to interesting value. */ +#ifdef INTROSPECTION + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " INTERESTING8"); + strcat(afl->mutation, afl->m_tmp); +#endif out_buf[rand_below(afl, temp_len)] = interesting_8[rand_below(afl, sizeof(interesting_8))]; break; @@ -1928,11 +2070,19 @@ havoc_stage: if (rand_below(afl, 2)) { +#ifdef INTROSPECTION + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " INTERESTING16"); + strcat(afl->mutation, afl->m_tmp); +#endif *(u16 *)(out_buf + rand_below(afl, temp_len - 1)) = interesting_16[rand_below(afl, sizeof(interesting_16) >> 1)]; } else { +#ifdef INTROSPECTION + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " INTERESTING16BE"); + strcat(afl->mutation, afl->m_tmp); +#endif *(u16 *)(out_buf + rand_below(afl, temp_len - 1)) = SWAP16( interesting_16[rand_below(afl, sizeof(interesting_16) >> 1)]); @@ -1948,11 +2098,19 @@ havoc_stage: if (rand_below(afl, 2)) { +#ifdef INTROSPECTION + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " INTERESTING32"); + strcat(afl->mutation, afl->m_tmp); +#endif *(u32 *)(out_buf + rand_below(afl, temp_len - 3)) = interesting_32[rand_below(afl, sizeof(interesting_32) >> 2)]; } else { +#ifdef INTROSPECTION + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " INTERESTING32BE"); + strcat(afl->mutation, afl->m_tmp); +#endif *(u32 *)(out_buf + rand_below(afl, temp_len - 3)) = SWAP32( interesting_32[rand_below(afl, sizeof(interesting_32) >> 2)]); @@ -1964,6 +2122,10 @@ havoc_stage: /* Randomly subtract from byte. */ +#ifdef INTROSPECTION + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH8-"); + strcat(afl->mutation, afl->m_tmp); +#endif out_buf[rand_below(afl, temp_len)] -= 1 + rand_below(afl, ARITH_MAX); break; @@ -1971,6 +2133,10 @@ havoc_stage: /* Randomly add to byte. */ +#ifdef INTROSPECTION + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH8+"); + strcat(afl->mutation, afl->m_tmp); +#endif out_buf[rand_below(afl, temp_len)] += 1 + rand_below(afl, ARITH_MAX); break; @@ -1984,6 +2150,10 @@ havoc_stage: u32 pos = rand_below(afl, temp_len - 1); +#ifdef INTROSPECTION + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH16-_%u", pos); + strcat(afl->mutation, afl->m_tmp); +#endif *(u16 *)(out_buf + pos) -= 1 + rand_below(afl, ARITH_MAX); } else { @@ -1991,6 +2161,11 @@ havoc_stage: u32 pos = rand_below(afl, temp_len - 1); u16 num = 1 + rand_below(afl, ARITH_MAX); +#ifdef INTROSPECTION + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH16-BE_%u_%u", pos, + num); + strcat(afl->mutation, afl->m_tmp); +#endif *(u16 *)(out_buf + pos) = SWAP16(SWAP16(*(u16 *)(out_buf + pos)) - num); @@ -2008,6 +2183,10 @@ havoc_stage: u32 pos = rand_below(afl, temp_len - 1); +#ifdef INTROSPECTION + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH16+_%u", pos); + strcat(afl->mutation, afl->m_tmp); +#endif *(u16 *)(out_buf + pos) += 1 + rand_below(afl, ARITH_MAX); } else { @@ -2015,6 +2194,11 @@ havoc_stage: u32 pos = rand_below(afl, temp_len - 1); u16 num = 1 + rand_below(afl, ARITH_MAX); +#ifdef INTROSPECTION + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH16+BE_%u_%u", pos, + num); + strcat(afl->mutation, afl->m_tmp); +#endif *(u16 *)(out_buf + pos) = SWAP16(SWAP16(*(u16 *)(out_buf + pos)) + num); @@ -2032,6 +2216,10 @@ havoc_stage: u32 pos = rand_below(afl, temp_len - 3); +#ifdef INTROSPECTION + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH32-_%u", pos); + strcat(afl->mutation, afl->m_tmp); +#endif *(u32 *)(out_buf + pos) -= 1 + rand_below(afl, ARITH_MAX); } else { @@ -2039,6 +2227,11 @@ havoc_stage: u32 pos = rand_below(afl, temp_len - 3); u32 num = 1 + rand_below(afl, ARITH_MAX); +#ifdef INTROSPECTION + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH32-BE_%u_%u", pos, + num); + strcat(afl->mutation, afl->m_tmp); +#endif *(u32 *)(out_buf + pos) = SWAP32(SWAP32(*(u32 *)(out_buf + pos)) - num); @@ -2056,6 +2249,10 @@ havoc_stage: u32 pos = rand_below(afl, temp_len - 3); +#ifdef INTROSPECTION + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH32+_%u", pos); + strcat(afl->mutation, afl->m_tmp); +#endif *(u32 *)(out_buf + pos) += 1 + rand_below(afl, ARITH_MAX); } else { @@ -2063,6 +2260,11 @@ havoc_stage: u32 pos = rand_below(afl, temp_len - 3); u32 num = 1 + rand_below(afl, ARITH_MAX); +#ifdef INTROSPECTION + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH32+BE_%u_%u", pos, + num); + strcat(afl->mutation, afl->m_tmp); +#endif *(u32 *)(out_buf + pos) = SWAP32(SWAP32(*(u32 *)(out_buf + pos)) + num); @@ -2076,6 +2278,10 @@ havoc_stage: why not. We use XOR with 1-255 to eliminate the possibility of a no-op. */ +#ifdef INTROSPECTION + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " RAND8"); + strcat(afl->mutation, afl->m_tmp); +#endif out_buf[rand_below(afl, temp_len)] ^= 1 + rand_below(afl, 255); break; @@ -2095,6 +2301,11 @@ havoc_stage: del_from = rand_below(afl, temp_len - del_len + 1); +#ifdef INTROSPECTION + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " DEL_%u_%u", del_from, + del_len); + strcat(afl->mutation, afl->m_tmp); +#endif memmove(out_buf + del_from, out_buf + del_from + del_len, temp_len - del_from - del_len); @@ -2128,6 +2339,12 @@ havoc_stage: clone_to = rand_below(afl, temp_len); +#ifdef INTROSPECTION + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " CLONE_%s_%u_%u_%u", + actually_clone ? "clone" : "insert", clone_from, clone_to, + clone_len); + strcat(afl->mutation, afl->m_tmp); +#endif new_buf = afl_realloc(AFL_BUF_PARAM(out_scratch), temp_len + clone_len); if (unlikely(!new_buf)) { PFATAL("alloc"); } @@ -2181,12 +2398,23 @@ havoc_stage: if (copy_from != copy_to) { +#ifdef INTROSPECTION + snprintf(afl->m_tmp, sizeof(afl->m_tmp), + " OVERWRITE_COPY_%u_%u_%u", copy_from, copy_to, + copy_len); + strcat(afl->mutation, afl->m_tmp); +#endif memmove(out_buf + copy_to, out_buf + copy_from, copy_len); } } else { +#ifdef INTROSPECTION + snprintf(afl->m_tmp, sizeof(afl->m_tmp), + " OVERWRITE_FIXED_%u_%u_%u", copy_from, copy_to, copy_len); + strcat(afl->mutation, afl->m_tmp); +#endif memset(out_buf + copy_to, rand_below(afl, 2) ? rand_below(afl, 256) : out_buf[rand_below(afl, temp_len)], @@ -2222,6 +2450,12 @@ havoc_stage: if ((s32)extra_len > temp_len) { break; } insert_at = rand_below(afl, temp_len - extra_len + 1); +#ifdef INTROSPECTION + snprintf(afl->m_tmp, sizeof(afl->m_tmp), + " AUTO_EXTRA_OVERWRITE_%u_%u_%s", insert_at, extra_len, + afl->a_extras[use_extra].data); + strcat(afl->mutation, afl->m_tmp); +#endif memcpy(out_buf + insert_at, afl->a_extras[use_extra].data, extra_len); @@ -2236,6 +2470,12 @@ havoc_stage: if ((s32)extra_len > temp_len) { break; } insert_at = rand_below(afl, temp_len - extra_len + 1); +#ifdef INTROSPECTION + snprintf(afl->m_tmp, sizeof(afl->m_tmp), + " EXTRA_OVERWRITE_%u_%u_%s", insert_at, extra_len, + afl->a_extras[use_extra].data); + strcat(afl->mutation, afl->m_tmp); +#endif memcpy(out_buf + insert_at, afl->extras[use_extra].data, extra_len); @@ -2258,12 +2498,23 @@ havoc_stage: use_extra = rand_below(afl, afl->a_extras_cnt); extra_len = afl->a_extras[use_extra].len; ptr = afl->a_extras[use_extra].data; +#ifdef INTROSPECTION + snprintf(afl->m_tmp, sizeof(afl->m_tmp), + " AUTO_EXTRA_INSERT_%u_%u_%s", insert_at, extra_len, + ptr); + strcat(afl->mutation, afl->m_tmp); +#endif } else { use_extra = rand_below(afl, afl->extras_cnt); extra_len = afl->extras[use_extra].len; ptr = afl->extras[use_extra].data; +#ifdef INTROSPECTION + snprintf(afl->m_tmp, sizeof(afl->m_tmp), + " EXTRA_INSERT_%u_%u_%s", insert_at, extra_len, ptr); + strcat(afl->mutation, afl->m_tmp); +#endif } @@ -2324,6 +2575,12 @@ havoc_stage: copy_from = rand_below(afl, new_len - copy_len + 1); copy_to = rand_below(afl, temp_len - copy_len + 1); +#ifdef INTROSPECTION + snprintf(afl->m_tmp, sizeof(afl->m_tmp), + " SPLICE_OVERWRITE_%u_%u_%u_%s", copy_from, copy_to, + copy_len, target->fname); + strcat(afl->mutation, afl->m_tmp); +#endif memmove(out_buf + copy_to, new_buf + copy_from, copy_len); } else { @@ -2340,6 +2597,12 @@ havoc_stage: temp_len + clone_len + 1); if (unlikely(!temp_buf)) { PFATAL("alloc"); } +#ifdef INTROSPECTION + snprintf(afl->m_tmp, sizeof(afl->m_tmp), + " SPLICE_INSERT_%u_%u_%u_%s", clone_from, clone_to, + clone_len, target->fname); + strcat(afl->mutation, afl->m_tmp); +#endif /* Head */ memcpy(temp_buf, out_buf, clone_to); @@ -2755,6 +3018,10 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { FLIP_BIT(out_buf, afl->stage_cur); +#ifdef INTROSPECTION + snprintf(afl->mutation, sizeof(afl->mutation), "%s MOPT_FLIP_BIT1 %u", + afl->queue_cur->fname, afl->stage_cur); +#endif if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } FLIP_BIT(out_buf, afl->stage_cur); @@ -2864,6 +3131,10 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { FLIP_BIT(out_buf, afl->stage_cur); FLIP_BIT(out_buf, afl->stage_cur + 1); +#ifdef INTROSPECTION + snprintf(afl->mutation, sizeof(afl->mutation), "%s MOPT_FLIP_BIT2 %u", + afl->queue_cur->fname, afl->stage_cur); +#endif if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } FLIP_BIT(out_buf, afl->stage_cur); @@ -2893,6 +3164,10 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { FLIP_BIT(out_buf, afl->stage_cur + 2); FLIP_BIT(out_buf, afl->stage_cur + 3); +#ifdef INTROSPECTION + snprintf(afl->mutation, sizeof(afl->mutation), "%s MOPT_FLIP_BIT4 %u", + afl->queue_cur->fname, afl->stage_cur); +#endif if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } FLIP_BIT(out_buf, afl->stage_cur); @@ -2948,6 +3223,10 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { out_buf[afl->stage_cur] ^= 0xFF; +#ifdef INTROSPECTION + snprintf(afl->mutation, sizeof(afl->mutation), "%s MOPT_FLIP_BIT8 %u", + afl->queue_cur->fname, afl->stage_cur); +#endif if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } /* We also use this stage to pull off a simple trick: we identify @@ -3035,6 +3314,10 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { *(u16 *)(out_buf + i) ^= 0xFFFF; +#ifdef INTROSPECTION + snprintf(afl->mutation, sizeof(afl->mutation), "%s MOPT_FLIP_BIT16 %u", + afl->queue_cur->fname, afl->stage_cur); +#endif if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } ++afl->stage_cur; @@ -3073,6 +3356,10 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { *(u32 *)(out_buf + i) ^= 0xFFFFFFFF; +#ifdef INTROSPECTION + snprintf(afl->mutation, sizeof(afl->mutation), "%s MOPT_FLIP_BIT32 %u", + afl->queue_cur->fname, afl->stage_cur); +#endif if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } ++afl->stage_cur; @@ -3131,6 +3418,10 @@ skip_bitflip: afl->stage_cur_val = j; out_buf[i] = orig + j; +#ifdef INTROSPECTION + snprintf(afl->mutation, sizeof(afl->mutation), "%s MOPT_ARITH8+ %u %u", + afl->queue_cur->fname, i, j); +#endif if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } ++afl->stage_cur; @@ -3147,6 +3438,10 @@ skip_bitflip: afl->stage_cur_val = -j; out_buf[i] = orig - j; +#ifdef INTROSPECTION + snprintf(afl->mutation, sizeof(afl->mutation), "%s MOPT_ARITH8- %u %u", + afl->queue_cur->fname, i, j); +#endif if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } ++afl->stage_cur; @@ -3211,6 +3506,10 @@ skip_bitflip: afl->stage_cur_val = j; *(u16 *)(out_buf + i) = orig + j; +#ifdef INTROSPECTION + snprintf(afl->mutation, sizeof(afl->mutation), "%s MOPT_ARITH16+ %u %u", + afl->queue_cur->fname, i, j); +#endif if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } ++afl->stage_cur; @@ -3225,6 +3524,10 @@ skip_bitflip: afl->stage_cur_val = -j; *(u16 *)(out_buf + i) = orig - j; +#ifdef INTROSPECTION + snprintf(afl->mutation, sizeof(afl->mutation), "%s MOPT_ARITH16- %u %u", + afl->queue_cur->fname, i, j); +#endif if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } ++afl->stage_cur; @@ -3243,6 +3546,10 @@ skip_bitflip: afl->stage_cur_val = j; *(u16 *)(out_buf + i) = SWAP16(SWAP16(orig) + j); +#ifdef INTROSPECTION + snprintf(afl->mutation, sizeof(afl->mutation), + "%s MOPT_ARITH16+BE %u %u", afl->queue_cur->fname, i, j); +#endif if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } ++afl->stage_cur; @@ -3257,6 +3564,10 @@ skip_bitflip: afl->stage_cur_val = -j; *(u16 *)(out_buf + i) = SWAP16(SWAP16(orig) - j); +#ifdef INTROSPECTION + snprintf(afl->mutation, sizeof(afl->mutation), + "%s MOPT_ARITH16-BE %u %u", afl->queue_cur->fname, i, j); +#endif if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } ++afl->stage_cur; @@ -3320,6 +3631,10 @@ skip_bitflip: afl->stage_cur_val = j; *(u32 *)(out_buf + i) = orig + j; +#ifdef INTROSPECTION + snprintf(afl->mutation, sizeof(afl->mutation), "%s MOPT_ARITH32+ %u %u", + afl->queue_cur->fname, i, j); +#endif if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } ++afl->stage_cur; @@ -3334,6 +3649,10 @@ skip_bitflip: afl->stage_cur_val = -j; *(u32 *)(out_buf + i) = orig - j; +#ifdef INTROSPECTION + snprintf(afl->mutation, sizeof(afl->mutation), "%s MOPT_ARITH32- %u %u", + afl->queue_cur->fname, i, j); +#endif if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } ++afl->stage_cur; @@ -3352,6 +3671,10 @@ skip_bitflip: afl->stage_cur_val = j; *(u32 *)(out_buf + i) = SWAP32(SWAP32(orig) + j); +#ifdef INTROSPECTION + snprintf(afl->mutation, sizeof(afl->mutation), + "%s MOPT_ARITH32+BE %u %u", afl->queue_cur->fname, i, j); +#endif if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } ++afl->stage_cur; @@ -3366,6 +3689,10 @@ skip_bitflip: afl->stage_cur_val = -j; *(u32 *)(out_buf + i) = SWAP32(SWAP32(orig) - j); +#ifdef INTROSPECTION + snprintf(afl->mutation, sizeof(afl->mutation), + "%s MOPT_ARITH32-BE %u %u", afl->queue_cur->fname, i, j); +#endif if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } ++afl->stage_cur; @@ -3433,6 +3760,10 @@ skip_arith: afl->stage_cur_val = interesting_8[j]; out_buf[i] = interesting_8[j]; +#ifdef INTROSPECTION + snprintf(afl->mutation, sizeof(afl->mutation), + "%s MOPT_INTERESTING8 %u %u", afl->queue_cur->fname, i, j); +#endif if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } out_buf[i] = orig; @@ -3488,6 +3819,10 @@ skip_arith: *(u16 *)(out_buf + i) = interesting_16[j]; +#ifdef INTROSPECTION + snprintf(afl->mutation, sizeof(afl->mutation), + "%s MOPT_INTERESTING16 %u %u", afl->queue_cur->fname, i, j); +#endif if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } ++afl->stage_cur; @@ -3504,6 +3839,10 @@ skip_arith: afl->stage_val_type = STAGE_VAL_BE; +#ifdef INTROSPECTION + snprintf(afl->mutation, sizeof(afl->mutation), + "%s MOPT_INTERESTING16BE %u %u", afl->queue_cur->fname, i, j); +#endif *(u16 *)(out_buf + i) = SWAP16(interesting_16[j]); if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } ++afl->stage_cur; @@ -3567,6 +3906,10 @@ skip_arith: *(u32 *)(out_buf + i) = interesting_32[j]; +#ifdef INTROSPECTION + snprintf(afl->mutation, sizeof(afl->mutation), + "%s MOPT_INTERESTING32 %u %u", afl->queue_cur->fname, i, j); +#endif if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } ++afl->stage_cur; @@ -3583,6 +3926,10 @@ skip_arith: afl->stage_val_type = STAGE_VAL_BE; +#ifdef INTROSPECTION + snprintf(afl->mutation, sizeof(afl->mutation), + "%s MOPT_INTERESTING32BE %u %u", afl->queue_cur->fname, i, j); +#endif *(u32 *)(out_buf + i) = SWAP32(interesting_32[j]); if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } ++afl->stage_cur; @@ -3656,6 +4003,11 @@ skip_interest: last_len = afl->extras[j].len; memcpy(out_buf + i, afl->extras[j].data, last_len); +#ifdef INTROSPECTION + snprintf(afl->mutation, sizeof(afl->mutation), + "%s MOPT_EXTRAS overwrite %u %u:%s", afl->queue_cur->fname, i, j, + afl->extras[j].data); +#endif if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } ++afl->stage_cur; @@ -3703,6 +4055,11 @@ skip_interest: /* Copy tail */ memcpy(ex_tmp + i + afl->extras[j].len, out_buf + i, len - i); +#ifdef INTROSPECTION + snprintf(afl->mutation, sizeof(afl->mutation), + "%s MOPT_EXTRAS insert %u %u:%s", afl->queue_cur->fname, i, j, + afl->extras[j].data); +#endif if (common_fuzz_stuff(afl, ex_tmp, len + afl->extras[j].len)) { goto abandon_entry; @@ -3759,6 +4116,11 @@ skip_user_extras: last_len = afl->a_extras[j].len; memcpy(out_buf + i, afl->a_extras[j].data, last_len); +#ifdef INTROSPECTION + snprintf(afl->mutation, sizeof(afl->mutation), + "%s MOPT_AUTO_EXTRAS overwrite %u %u:%s", afl->queue_cur->fname, + i, j, afl->a_extras[j].data); +#endif if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } ++afl->stage_cur; @@ -3885,6 +4247,11 @@ pacemaker_fuzzing: } +#ifdef INTROSPECTION + snprintf(afl->mutation, sizeof(afl->mutation), "%s MOPT_HAVOC %u", + afl->queue_cur->fname, use_stacking); +#endif + for (i = 0; i < use_stacking; ++i) { switch (select_algorithm(afl)) { @@ -3893,6 +4260,10 @@ pacemaker_fuzzing: /* Flip a single bit somewhere. Spooky! */ FLIP_BIT(out_buf, rand_below(afl, temp_len << 3)); MOpt_globals.cycles_v2[STAGE_FLIP1] += 1; +#ifdef INTROSPECTION + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " FLIP_BIT1"); + strcat(afl->mutation, afl->m_tmp); +#endif break; case 1: @@ -3901,6 +4272,10 @@ pacemaker_fuzzing: FLIP_BIT(out_buf, temp_len_puppet); FLIP_BIT(out_buf, temp_len_puppet + 1); MOpt_globals.cycles_v2[STAGE_FLIP2] += 1; +#ifdef INTROSPECTION + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " FLIP_BIT2"); + strcat(afl->mutation, afl->m_tmp); +#endif break; case 2: @@ -3911,24 +4286,40 @@ pacemaker_fuzzing: FLIP_BIT(out_buf, temp_len_puppet + 2); FLIP_BIT(out_buf, temp_len_puppet + 3); MOpt_globals.cycles_v2[STAGE_FLIP4] += 1; +#ifdef INTROSPECTION + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " FLIP_BIT4"); + strcat(afl->mutation, afl->m_tmp); +#endif break; case 3: if (temp_len < 4) { break; } out_buf[rand_below(afl, temp_len)] ^= 0xFF; MOpt_globals.cycles_v2[STAGE_FLIP8] += 1; +#ifdef INTROSPECTION + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " FLIP_BIT8"); + strcat(afl->mutation, afl->m_tmp); +#endif break; case 4: if (temp_len < 8) { break; } *(u16 *)(out_buf + rand_below(afl, temp_len - 1)) ^= 0xFFFF; MOpt_globals.cycles_v2[STAGE_FLIP16] += 1; +#ifdef INTROSPECTION + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " FLIP_BIT16"); + strcat(afl->mutation, afl->m_tmp); +#endif break; case 5: if (temp_len < 8) { break; } *(u32 *)(out_buf + rand_below(afl, temp_len - 3)) ^= 0xFFFFFFFF; MOpt_globals.cycles_v2[STAGE_FLIP32] += 1; +#ifdef INTROSPECTION + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " FLIP_BIT32"); + strcat(afl->mutation, afl->m_tmp); +#endif break; case 6: @@ -3937,6 +4328,10 @@ pacemaker_fuzzing: out_buf[rand_below(afl, temp_len)] += 1 + rand_below(afl, ARITH_MAX); MOpt_globals.cycles_v2[STAGE_ARITH8] += 1; +#ifdef INTROSPECTION + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH+-"); + strcat(afl->mutation, afl->m_tmp); +#endif break; case 7: @@ -3946,11 +4341,20 @@ pacemaker_fuzzing: u32 pos = rand_below(afl, temp_len - 1); *(u16 *)(out_buf + pos) -= 1 + rand_below(afl, ARITH_MAX); +#ifdef INTROSPECTION + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH16-%u", pos); + strcat(afl->mutation, afl->m_tmp); +#endif } else { u32 pos = rand_below(afl, temp_len - 1); u16 num = 1 + rand_below(afl, ARITH_MAX); +#ifdef INTROSPECTION + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH16BE-%u-%u", + pos, num); + strcat(afl->mutation, afl->m_tmp); +#endif *(u16 *)(out_buf + pos) = SWAP16(SWAP16(*(u16 *)(out_buf + pos)) - num); @@ -3960,12 +4364,21 @@ pacemaker_fuzzing: if (rand_below(afl, 2)) { u32 pos = rand_below(afl, temp_len - 1); +#ifdef INTROSPECTION + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH16+%u", pos); + strcat(afl->mutation, afl->m_tmp); +#endif *(u16 *)(out_buf + pos) += 1 + rand_below(afl, ARITH_MAX); } else { u32 pos = rand_below(afl, temp_len - 1); u16 num = 1 + rand_below(afl, ARITH_MAX); +#ifdef INTROSPECTION + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH16BE+%u-%u", + pos, num); + strcat(afl->mutation, afl->m_tmp); +#endif *(u16 *)(out_buf + pos) = SWAP16(SWAP16(*(u16 *)(out_buf + pos)) + num); @@ -3980,12 +4393,21 @@ pacemaker_fuzzing: if (rand_below(afl, 2)) { u32 pos = rand_below(afl, temp_len - 3); +#ifdef INTROSPECTION + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH32-%u", pos); + strcat(afl->mutation, afl->m_tmp); +#endif *(u32 *)(out_buf + pos) -= 1 + rand_below(afl, ARITH_MAX); } else { u32 pos = rand_below(afl, temp_len - 3); u32 num = 1 + rand_below(afl, ARITH_MAX); +#ifdef INTROSPECTION + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH32BE-%u-%u", + pos, num); + strcat(afl->mutation, afl->m_tmp); +#endif *(u32 *)(out_buf + pos) = SWAP32(SWAP32(*(u32 *)(out_buf + pos)) - num); @@ -3996,12 +4418,21 @@ pacemaker_fuzzing: if (rand_below(afl, 2)) { u32 pos = rand_below(afl, temp_len - 3); +#ifdef INTROSPECTION + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH32+%u", pos); + strcat(afl->mutation, afl->m_tmp); +#endif *(u32 *)(out_buf + pos) += 1 + rand_below(afl, ARITH_MAX); } else { u32 pos = rand_below(afl, temp_len - 3); u32 num = 1 + rand_below(afl, ARITH_MAX); +#ifdef INTROSPECTION + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH32BE+%u-%u", + pos, num); + strcat(afl->mutation, afl->m_tmp); +#endif *(u32 *)(out_buf + pos) = SWAP32(SWAP32(*(u32 *)(out_buf + pos)) + num); @@ -4016,6 +4447,10 @@ pacemaker_fuzzing: out_buf[rand_below(afl, temp_len)] = interesting_8[rand_below(afl, sizeof(interesting_8))]; MOpt_globals.cycles_v2[STAGE_INTEREST8] += 1; +#ifdef INTROSPECTION + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " INTERESTING8"); + strcat(afl->mutation, afl->m_tmp); +#endif break; case 10: @@ -4023,12 +4458,20 @@ pacemaker_fuzzing: if (temp_len < 8) { break; } if (rand_below(afl, 2)) { +#ifdef INTROSPECTION + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " INTERESTING16"); + strcat(afl->mutation, afl->m_tmp); +#endif *(u16 *)(out_buf + rand_below(afl, temp_len - 1)) = interesting_16[rand_below(afl, sizeof(interesting_16) >> 1)]; } else { +#ifdef INTROSPECTION + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " INTERESTING16BE"); + strcat(afl->mutation, afl->m_tmp); +#endif *(u16 *)(out_buf + rand_below(afl, temp_len - 1)) = SWAP16(interesting_16[rand_below( afl, sizeof(interesting_16) >> 1)]); @@ -4045,12 +4488,20 @@ pacemaker_fuzzing: if (rand_below(afl, 2)) { +#ifdef INTROSPECTION + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " INTERESTING32"); + strcat(afl->mutation, afl->m_tmp); +#endif *(u32 *)(out_buf + rand_below(afl, temp_len - 3)) = interesting_32[rand_below(afl, sizeof(interesting_32) >> 2)]; } else { +#ifdef INTROSPECTION + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " INTERESTING32BE"); + strcat(afl->mutation, afl->m_tmp); +#endif *(u32 *)(out_buf + rand_below(afl, temp_len - 3)) = SWAP32(interesting_32[rand_below( afl, sizeof(interesting_32) >> 2)]); @@ -4068,6 +4519,10 @@ pacemaker_fuzzing: out_buf[rand_below(afl, temp_len)] ^= 1 + rand_below(afl, 255); MOpt_globals.cycles_v2[STAGE_RANDOMBYTE] += 1; +#ifdef INTROSPECTION + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " RAND8"); + strcat(afl->mutation, afl->m_tmp); +#endif break; case 13: { @@ -4091,6 +4546,11 @@ pacemaker_fuzzing: temp_len -= del_len; MOpt_globals.cycles_v2[STAGE_DELETEBYTE] += 1; +#ifdef INTROSPECTION + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " DEL-%u%u", del_from, + del_len); + strcat(afl->mutation, afl->m_tmp); +#endif break; } @@ -4120,6 +4580,12 @@ pacemaker_fuzzing: clone_to = rand_below(afl, temp_len); +#ifdef INTROSPECTION + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " CLONE_%s_%u_%u_%u", + actually_clone ? "clone" : "insert", clone_from, + clone_to, clone_len); + strcat(afl->mutation, afl->m_tmp); +#endif new_buf = afl_realloc(AFL_BUF_PARAM(out_scratch), temp_len + clone_len); if (unlikely(!new_buf)) { PFATAL("alloc"); } @@ -4175,12 +4641,24 @@ pacemaker_fuzzing: if (copy_from != copy_to) { +#ifdef INTROSPECTION + snprintf(afl->m_tmp, sizeof(afl->m_tmp), + " OVERWRITE_COPY_%u_%u_%u", copy_from, copy_to, + copy_len); + strcat(afl->mutation, afl->m_tmp); +#endif memmove(out_buf + copy_to, out_buf + copy_from, copy_len); } } else { +#ifdef INTROSPECTION + snprintf(afl->m_tmp, sizeof(afl->m_tmp), + " OVERWRITE_FIXED_%u_%u_%u", copy_from, copy_to, + copy_len); + strcat(afl->mutation, afl->m_tmp); +#endif memset(out_buf + copy_to, rand_below(afl, 2) ? rand_below(afl, 256) : out_buf[rand_below(afl, temp_len)], @@ -4212,6 +4690,12 @@ pacemaker_fuzzing: if (extra_len > (u32)temp_len) break; u32 insert_at = rand_below(afl, temp_len - extra_len + 1); +#ifdef INTROSPECTION + snprintf(afl->m_tmp, sizeof(afl->m_tmp), + " AUTO_EXTRA_OVERWRITE_%u_%u_%s", insert_at, extra_len, + afl->a_extras[use_extra].data); + strcat(afl->mutation, afl->m_tmp); +#endif memcpy(out_buf + insert_at, afl->a_extras[use_extra].data, extra_len); @@ -4225,6 +4709,12 @@ pacemaker_fuzzing: if (extra_len > (u32)temp_len) break; u32 insert_at = rand_below(afl, temp_len - extra_len + 1); +#ifdef INTROSPECTION + snprintf(afl->m_tmp, sizeof(afl->m_tmp), + " EXTRA_OVERWRITE_%u_%u_%s", insert_at, extra_len, + afl->a_extras[use_extra].data); + strcat(afl->mutation, afl->m_tmp); +#endif memcpy(out_buf + insert_at, afl->extras[use_extra].data, extra_len); @@ -4254,12 +4744,23 @@ pacemaker_fuzzing: use_extra = rand_below(afl, afl->a_extras_cnt); extra_len = afl->a_extras[use_extra].len; ptr = afl->a_extras[use_extra].data; +#ifdef INTROSPECTION + snprintf(afl->m_tmp, sizeof(afl->m_tmp), + " AUTO_EXTRA_INSERT_%u_%u_%s", insert_at, extra_len, + ptr); + strcat(afl->mutation, afl->m_tmp); +#endif } else { use_extra = rand_below(afl, afl->extras_cnt); extra_len = afl->extras[use_extra].len; ptr = afl->extras[use_extra].data; +#ifdef INTROSPECTION + snprintf(afl->m_tmp, sizeof(afl->m_tmp), + " EXTRA_INSERT_%u_%u_%s", insert_at, extra_len, ptr); + strcat(afl->mutation, afl->m_tmp); +#endif } diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c index cad26841..575e6b74 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -236,6 +236,10 @@ static void usage(u8 *argv0, int more_help) { SAYF("Compiled with PROFILING\n\n"); #endif +#ifdef INTROSPECTION + SAYF("Compiled with INTROSPECTION\n\n"); +#endif + #ifdef _DEBUG SAYF("Compiled with _DEBUG\n\n"); #endif @@ -1462,6 +1466,19 @@ int main(int argc, char **argv_orig, char **envp) { u32 prev_queued_paths = 0; u8 skipped_fuzz; + #ifdef INTROSPECTION + char ifn[4096]; + snprintf(ifn, sizeof(ifn), "%s/introspection.txt", afl->out_dir); + if ((afl->introspection_file = fopen(ifn, "w")) == NULL) { + + PFATAL("could not create '%s'", ifn); + + } + + setvbuf(afl->introspection_file, NULL, _IONBF, 0); + OKF("Writing mutation introspection to '%s'", ifn); + #endif + while (likely(!afl->stop_soon)) { cull_queue(afl); -- cgit 1.4.1 From 416e01d3c6277988ed39e67f9404bd7a6034820b Mon Sep 17 00:00:00 2001 From: van Hauser Date: Mon, 2 Nov 2020 11:04:35 +0100 Subject: match mopt to havoc --- docs/Changelog.md | 6 +- include/afl-fuzz.h | 3 +- src/afl-fuzz-one.c | 209 ++++++++++++++++++++++++++++++++++++++++------------- 3 files changed, 164 insertions(+), 54 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/docs/Changelog.md b/docs/Changelog.md index f11a1178..50c1d48a 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -21,11 +21,11 @@ sending a mail to . a schedule performance score, which is much better that the previous walk the whole queue approach. Select the old mode with -Z (auto enabled with -M) - - statsd support by Edznux, thanks a lot! + - rpc.statsd support by Edznux, thanks a lot! - Marcel Boehme submitted a patch that improves all AFFast schedules :) - not specifying -M or -S will now auto-set "-S default" - reading testcases from -i now descends into subdirectories - - allow up to 4 -x command line options + - allow up to 4 times the -x command line option - loaded extras now have a duplicate protection - If test cases are too large we do a partial read on the maximum supported size @@ -33,7 +33,7 @@ sending a mail to . for fuzzing but still be used for splicing - crashing seeds are now not prohibiting a run anymore but are skipped. They are used for splicing though. - - set the default power schedule to the superiour "seek" schedule + - update MOpt for expanded havoc modes - added NO_SPLICING compile option and makefile define - added INTROSPECTION make target that writes all mutations to out/NAME/introspection.txt diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h index 5ff7672b..e59d5f90 100644 --- a/include/afl-fuzz.h +++ b/include/afl-fuzz.h @@ -231,7 +231,7 @@ enum { }; -#define operator_num 18 +#define operator_num 19 #define swarm_num 5 #define period_core 500000 @@ -247,6 +247,7 @@ enum { #define STAGE_OverWrite75 15 #define STAGE_OverWriteExtra 16 #define STAGE_InsertExtra 17 +#define STAGE_Splice 18 #define period_pilot 50000 enum { diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index 5337b7f8..e4016773 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -29,11 +29,9 @@ /* MOpt */ -static int select_algorithm(afl_state_t *afl) { +static int select_algorithm(afl_state_t *afl, u32 max_algorithm) { - int i_puppet, j_puppet = 0, operator_number = operator_num; - - if (!afl->extras_cnt && !afl->a_extras_cnt) operator_number -= 2; + int i_puppet, j_puppet = 0, operator_number = max_algorithm; double range_sele = (double)afl->probability_now[afl->swarm_now][operator_number - 1]; @@ -502,6 +500,8 @@ u8 fuzz_one_original(afl_state_t *afl) { if (unlikely(!afl->non_instrumented_mode && !afl->queue_cur->trim_done && !afl->disable_trim)) { + u32 old_len = afl->queue_cur->len; + u8 res = trim_case(afl, afl->queue_cur, in_buf); orig_in = in_buf = queue_testcase_get(afl, afl->queue_cur); @@ -524,6 +524,9 @@ u8 fuzz_one_original(afl_state_t *afl) { len = afl->queue_cur->len; + /* maybe current entry is not ready for splicing anymore */ + if (unlikely(len <= 4 && old_len > 4)) afl->ready_for_splicing_count--; + } memcpy(out_buf, in_buf, len); @@ -537,7 +540,7 @@ u8 fuzz_one_original(afl_state_t *afl) { else orig_perf = perf_score = calculate_score(afl, afl->queue_cur); - if (unlikely(perf_score == 0)) { goto abandon_entry; } + if (unlikely(perf_score <= 0)) { goto abandon_entry; } if (unlikely(afl->shm.cmplog_mode && !afl->queue_cur->fully_colorized)) { @@ -1976,7 +1979,7 @@ havoc_stage: /* add expensive havoc cases here, they are activated after a full cycle without finds happened */ - r_max += 1; + r_max++; } @@ -1985,7 +1988,7 @@ havoc_stage: /* add expensive havoc cases here if there is no findings in the last 5s */ - r_max += 1; + r_max++; } @@ -2396,7 +2399,7 @@ havoc_stage: if (likely(rand_below(afl, 4))) { - if (copy_from != copy_to) { + if (likely(copy_from != copy_to)) { #ifdef INTROSPECTION snprintf(afl->m_tmp, sizeof(afl->m_tmp), @@ -2445,11 +2448,10 @@ havoc_stage: u32 use_extra = rand_below(afl, afl->a_extras_cnt); u32 extra_len = afl->a_extras[use_extra].len; - u32 insert_at; if ((s32)extra_len > temp_len) { break; } - insert_at = rand_below(afl, temp_len - extra_len + 1); + u32 insert_at = rand_below(afl, temp_len - extra_len + 1); #ifdef INTROSPECTION snprintf(afl->m_tmp, sizeof(afl->m_tmp), " AUTO_EXTRA_OVERWRITE_%u_%u_%s", insert_at, extra_len, @@ -2465,11 +2467,10 @@ havoc_stage: u32 use_extra = rand_below(afl, afl->extras_cnt); u32 extra_len = afl->extras[use_extra].len; - u32 insert_at; if ((s32)extra_len > temp_len) { break; } - insert_at = rand_below(afl, temp_len - extra_len + 1); + u32 insert_at = rand_below(afl, temp_len - extra_len + 1); #ifdef INTROSPECTION snprintf(afl->m_tmp, sizeof(afl->m_tmp), " EXTRA_OVERWRITE_%u_%u_%s", insert_at, extra_len, @@ -2816,7 +2817,8 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { possibly skip to them at the expense of already-fuzzed or non-favored cases. */ - if ((afl->queue_cur->was_fuzzed || !afl->queue_cur->favored) && + if (((afl->queue_cur->was_fuzzed > 0 || afl->queue_cur->fuzz_level > 0) || + !afl->queue_cur->favored) && rand_below(afl, 100) < SKIP_TO_NEW_PROB) { return 1; @@ -2831,13 +2833,14 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { The odds of skipping stuff are higher for already-fuzzed inputs and lower for never-fuzzed entries. */ - if (afl->queue_cycle > 1 && !afl->queue_cur->was_fuzzed) { + if (afl->queue_cycle > 1 && + (afl->queue_cur->fuzz_level == 0 || afl->queue_cur->was_fuzzed)) { - if (rand_below(afl, 100) < SKIP_NFAV_NEW_PROB) { return 1; } + if (likely(rand_below(afl, 100) < SKIP_NFAV_NEW_PROB)) { return 1; } } else { - if (rand_below(afl, 100) < SKIP_NFAV_OLD_PROB) { return 1; } + if (likely(rand_below(afl, 100) < SKIP_NFAV_OLD_PROB)) { return 1; } } @@ -2856,16 +2859,19 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { /* Map the test case into memory. */ orig_in = in_buf = queue_testcase_get(afl, afl->queue_cur); len = afl->queue_cur->len; + out_buf = afl_realloc(AFL_BUF_PARAM(out), len); if (unlikely(!out_buf)) { PFATAL("alloc"); } + afl->subseq_tmouts = 0; + afl->cur_depth = afl->queue_cur->depth; /******************************************* * CALIBRATION (only if failed earlier on) * *******************************************/ - if (afl->queue_cur->cal_failed) { + if (unlikely(afl->queue_cur->cal_failed)) { u8 res = FSRV_RUN_TMOUT; @@ -2897,7 +2903,8 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { * TRIMMING * ************/ - if (!afl->non_instrumented_mode && !afl->queue_cur->trim_done) { + if (unlikely(!afl->non_instrumented_mode && !afl->queue_cur->trim_done && + !afl->disable_trim)) { u32 old_len = afl->queue_cur->len; @@ -2939,6 +2946,8 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { else orig_perf = perf_score = calculate_score(afl, afl->queue_cur); + if (unlikely(perf_score <= 0)) { goto abandon_entry; } + if (unlikely(afl->shm.cmplog_mode && !afl->queue_cur->fully_colorized)) { if (input_to_state_stage(afl, in_buf, out_buf, len, @@ -2968,8 +2977,8 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { this entry ourselves (was_fuzzed), or if it has gone through deterministic testing in earlier, resumed runs (passed_det). */ - if (afl->skip_deterministic || afl->queue_cur->was_fuzzed || - afl->queue_cur->passed_det) { + if (likely(afl->skip_deterministic || afl->queue_cur->was_fuzzed || + afl->queue_cur->passed_det)) { goto havoc_stage; @@ -2978,8 +2987,9 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { /* Skip deterministic fuzzing if exec path checksum puts this out of scope for this main instance. */ - if (afl->main_node_max && (afl->queue_cur->exec_cksum % afl->main_node_max) != - afl->main_node_id - 1) { + if (unlikely(afl->main_node_max && + (afl->queue_cur->exec_cksum % afl->main_node_max) != + afl->main_node_id - 1)) { goto havoc_stage; @@ -4008,6 +4018,7 @@ skip_interest: "%s MOPT_EXTRAS overwrite %u %u:%s", afl->queue_cur->fname, i, j, afl->extras[j].data); #endif + if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } ++afl->stage_cur; @@ -4060,6 +4071,7 @@ skip_interest: "%s MOPT_EXTRAS insert %u %u:%s", afl->queue_cur->fname, i, j, afl->extras[j].data); #endif + if (common_fuzz_stuff(afl, ex_tmp, len + afl->extras[j].len)) { goto abandon_entry; @@ -4099,7 +4111,8 @@ skip_user_extras: afl->stage_cur_byte = i; - for (j = 0; j < MIN(afl->a_extras_cnt, (u32)USE_AUTO_EXTRAS); ++j) { + u32 min_extra_len = MIN(afl->a_extras_cnt, (u32)USE_AUTO_EXTRAS); + for (j = 0; j < min_extra_len; ++j) { /* See the comment in the earlier code; extras are sorted by size. */ @@ -4121,6 +4134,7 @@ skip_user_extras: "%s MOPT_AUTO_EXTRAS overwrite %u %u:%s", afl->queue_cur->fname, i, j, afl->a_extras[j].data); #endif + if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } ++afl->stage_cur; @@ -4234,6 +4248,19 @@ pacemaker_fuzzing: havoc_queued = afl->queued_paths; + u32 r_max; + + r_max = 15 + ((afl->extras_cnt + afl->a_extras_cnt) ? 2 : 0); + + if (unlikely(afl->expand_havoc && afl->ready_for_splicing_count > 1)) { + + /* add expensive havoc cases here, they are activated after a full + cycle without finds happened */ + + ++r_max; + + } + for (afl->stage_cur = 0; afl->stage_cur < afl->stage_max; ++afl->stage_cur) { @@ -4254,12 +4281,12 @@ pacemaker_fuzzing: for (i = 0; i < use_stacking; ++i) { - switch (select_algorithm(afl)) { + switch (select_algorithm(afl, r_max)) { case 0: /* Flip a single bit somewhere. Spooky! */ FLIP_BIT(out_buf, rand_below(afl, temp_len << 3)); - MOpt_globals.cycles_v2[STAGE_FLIP1] += 1; + MOpt_globals.cycles_v2[STAGE_FLIP1]++; #ifdef INTROSPECTION snprintf(afl->m_tmp, sizeof(afl->m_tmp), " FLIP_BIT1"); strcat(afl->mutation, afl->m_tmp); @@ -4271,7 +4298,7 @@ pacemaker_fuzzing: temp_len_puppet = rand_below(afl, (temp_len << 3) - 1); FLIP_BIT(out_buf, temp_len_puppet); FLIP_BIT(out_buf, temp_len_puppet + 1); - MOpt_globals.cycles_v2[STAGE_FLIP2] += 1; + MOpt_globals.cycles_v2[STAGE_FLIP2]++; #ifdef INTROSPECTION snprintf(afl->m_tmp, sizeof(afl->m_tmp), " FLIP_BIT2"); strcat(afl->mutation, afl->m_tmp); @@ -4285,7 +4312,7 @@ pacemaker_fuzzing: FLIP_BIT(out_buf, temp_len_puppet + 1); FLIP_BIT(out_buf, temp_len_puppet + 2); FLIP_BIT(out_buf, temp_len_puppet + 3); - MOpt_globals.cycles_v2[STAGE_FLIP4] += 1; + MOpt_globals.cycles_v2[STAGE_FLIP4]++; #ifdef INTROSPECTION snprintf(afl->m_tmp, sizeof(afl->m_tmp), " FLIP_BIT4"); strcat(afl->mutation, afl->m_tmp); @@ -4295,7 +4322,7 @@ pacemaker_fuzzing: case 3: if (temp_len < 4) { break; } out_buf[rand_below(afl, temp_len)] ^= 0xFF; - MOpt_globals.cycles_v2[STAGE_FLIP8] += 1; + MOpt_globals.cycles_v2[STAGE_FLIP8]++; #ifdef INTROSPECTION snprintf(afl->m_tmp, sizeof(afl->m_tmp), " FLIP_BIT8"); strcat(afl->mutation, afl->m_tmp); @@ -4305,7 +4332,7 @@ pacemaker_fuzzing: case 4: if (temp_len < 8) { break; } *(u16 *)(out_buf + rand_below(afl, temp_len - 1)) ^= 0xFFFF; - MOpt_globals.cycles_v2[STAGE_FLIP16] += 1; + MOpt_globals.cycles_v2[STAGE_FLIP16]++; #ifdef INTROSPECTION snprintf(afl->m_tmp, sizeof(afl->m_tmp), " FLIP_BIT16"); strcat(afl->mutation, afl->m_tmp); @@ -4315,7 +4342,7 @@ pacemaker_fuzzing: case 5: if (temp_len < 8) { break; } *(u32 *)(out_buf + rand_below(afl, temp_len - 3)) ^= 0xFFFFFFFF; - MOpt_globals.cycles_v2[STAGE_FLIP32] += 1; + MOpt_globals.cycles_v2[STAGE_FLIP32]++; #ifdef INTROSPECTION snprintf(afl->m_tmp, sizeof(afl->m_tmp), " FLIP_BIT32"); strcat(afl->mutation, afl->m_tmp); @@ -4327,7 +4354,7 @@ pacemaker_fuzzing: 1 + rand_below(afl, ARITH_MAX); out_buf[rand_below(afl, temp_len)] += 1 + rand_below(afl, ARITH_MAX); - MOpt_globals.cycles_v2[STAGE_ARITH8] += 1; + MOpt_globals.cycles_v2[STAGE_ARITH8]++; #ifdef INTROSPECTION snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH+-"); strcat(afl->mutation, afl->m_tmp); @@ -4384,7 +4411,7 @@ pacemaker_fuzzing: } - MOpt_globals.cycles_v2[STAGE_ARITH16] += 1; + MOpt_globals.cycles_v2[STAGE_ARITH16]++; break; case 8: @@ -4438,7 +4465,7 @@ pacemaker_fuzzing: } - MOpt_globals.cycles_v2[STAGE_ARITH32] += 1; + MOpt_globals.cycles_v2[STAGE_ARITH32]++; break; case 9: @@ -4446,7 +4473,7 @@ pacemaker_fuzzing: if (temp_len < 4) { break; } out_buf[rand_below(afl, temp_len)] = interesting_8[rand_below(afl, sizeof(interesting_8))]; - MOpt_globals.cycles_v2[STAGE_INTEREST8] += 1; + MOpt_globals.cycles_v2[STAGE_INTEREST8]++; #ifdef INTROSPECTION snprintf(afl->m_tmp, sizeof(afl->m_tmp), " INTERESTING8"); strcat(afl->mutation, afl->m_tmp); @@ -4478,7 +4505,7 @@ pacemaker_fuzzing: } - MOpt_globals.cycles_v2[STAGE_INTEREST16] += 1; + MOpt_globals.cycles_v2[STAGE_INTEREST16]++; break; case 11: @@ -4508,7 +4535,7 @@ pacemaker_fuzzing: } - MOpt_globals.cycles_v2[STAGE_INTEREST32] += 1; + MOpt_globals.cycles_v2[STAGE_INTEREST32]++; break; case 12: @@ -4518,7 +4545,7 @@ pacemaker_fuzzing: possibility of a no-op. */ out_buf[rand_below(afl, temp_len)] ^= 1 + rand_below(afl, 255); - MOpt_globals.cycles_v2[STAGE_RANDOMBYTE] += 1; + MOpt_globals.cycles_v2[STAGE_RANDOMBYTE]++; #ifdef INTROSPECTION snprintf(afl->m_tmp, sizeof(afl->m_tmp), " RAND8"); strcat(afl->mutation, afl->m_tmp); @@ -4541,16 +4568,16 @@ pacemaker_fuzzing: del_from = rand_below(afl, temp_len - del_len + 1); - memmove(out_buf + del_from, out_buf + del_from + del_len, - temp_len - del_from - del_len); - - temp_len -= del_len; - MOpt_globals.cycles_v2[STAGE_DELETEBYTE] += 1; #ifdef INTROSPECTION snprintf(afl->m_tmp, sizeof(afl->m_tmp), " DEL-%u%u", del_from, del_len); strcat(afl->mutation, afl->m_tmp); #endif + memmove(out_buf + del_from, out_buf + del_from + del_len, + temp_len - del_from - del_len); + + temp_len -= del_len; + MOpt_globals.cycles_v2[STAGE_DELETEBYTE]++; break; } @@ -4566,7 +4593,7 @@ pacemaker_fuzzing: u32 clone_from, clone_to, clone_len; u8 *new_buf; - if (actually_clone) { + if (likely(actually_clone)) { clone_len = choose_block_len(afl, temp_len); clone_from = rand_below(afl, temp_len - clone_len + 1); @@ -4617,7 +4644,7 @@ pacemaker_fuzzing: out_buf = new_buf; afl_swap_bufs(AFL_BUF_PARAM(out), AFL_BUF_PARAM(out_scratch)); temp_len += clone_len; - MOpt_globals.cycles_v2[STAGE_Clone75] += 1; + MOpt_globals.cycles_v2[STAGE_Clone75]++; } @@ -4637,9 +4664,9 @@ pacemaker_fuzzing: copy_from = rand_below(afl, temp_len - copy_len + 1); copy_to = rand_below(afl, temp_len - copy_len + 1); - if (rand_below(afl, 4)) { + if (likely(rand_below(afl, 4))) { - if (copy_from != copy_to) { + if (likely(copy_from != copy_to)) { #ifdef INTROSPECTION snprintf(afl->m_tmp, sizeof(afl->m_tmp), @@ -4666,7 +4693,7 @@ pacemaker_fuzzing: } - MOpt_globals.cycles_v2[STAGE_OverWrite75] += 1; + MOpt_globals.cycles_v2[STAGE_OverWrite75]++; break; } /* case 15 */ @@ -4721,7 +4748,7 @@ pacemaker_fuzzing: } afl->stage_cycles_puppet_v2[afl->swarm_now] - [STAGE_OverWriteExtra] += 1; + [STAGE_OverWriteExtra]++; break; @@ -4783,11 +4810,93 @@ pacemaker_fuzzing: } + default: { + + if (unlikely(afl->ready_for_splicing_count < 2)) break; + + u32 tid; + do { + + tid = rand_below(afl, afl->queued_paths); + + } while (tid == afl->current_entry || + + afl->queue_buf[tid]->len < 4); + + /* Get the testcase for splicing. */ + struct queue_entry *target = afl->queue_buf[tid]; + u32 new_len = target->len; + u8 * new_buf = queue_testcase_get(afl, target); + + if ((temp_len >= 2 && rand_below(afl, 2)) || + temp_len + HAVOC_BLK_XL >= MAX_FILE) { + + /* overwrite mode */ + + u32 copy_from, copy_to, copy_len; + + copy_len = choose_block_len(afl, new_len - 1); + if ((s32)copy_len > temp_len) copy_len = temp_len; + + copy_from = rand_below(afl, new_len - copy_len + 1); + copy_to = rand_below(afl, temp_len - copy_len + 1); + +#ifdef INTROSPECTION + snprintf(afl->m_tmp, sizeof(afl->m_tmp), + " SPLICE_OVERWRITE_%u_%u_%u_%s", copy_from, copy_to, + copy_len, target->fname); + strcat(afl->mutation, afl->m_tmp); +#endif + memmove(out_buf + copy_to, new_buf + copy_from, copy_len); + + } else { + + /* insert mode */ + + u32 clone_from, clone_to, clone_len; + + clone_len = choose_block_len(afl, new_len); + clone_from = rand_below(afl, new_len - clone_len + 1); + clone_to = rand_below(afl, temp_len + 1); + + u8 *temp_buf = afl_realloc(AFL_BUF_PARAM(out_scratch), + temp_len + clone_len + 1); + if (unlikely(!temp_buf)) { PFATAL("alloc"); } + +#ifdef INTROSPECTION + snprintf(afl->m_tmp, sizeof(afl->m_tmp), + " SPLICE_INSERT_%u_%u_%u_%s", clone_from, clone_to, + clone_len, target->fname); + strcat(afl->mutation, afl->m_tmp); +#endif + /* Head */ + + memcpy(temp_buf, out_buf, clone_to); + + /* Inserted part */ + + memcpy(temp_buf + clone_to, new_buf + clone_from, clone_len); + + /* Tail */ + memcpy(temp_buf + clone_to + clone_len, out_buf + clone_to, + temp_len - clone_to); + + out_buf = temp_buf; + afl_swap_bufs(AFL_BUF_PARAM(out), AFL_BUF_PARAM(out_scratch)); + temp_len += clone_len; + + } + + afl->stage_cycles_puppet_v2[afl->swarm_now][STAGE_Splice]++; + break; + + } // end of default: + } /* switch select_algorithm() */ } /* for i=0; i < use_stacking */ - *MOpt_globals.pTime += 1; + ++*MOpt_globals.pTime; u64 temp_total_found = afl->queued_paths + afl->unique_crashes; @@ -5138,7 +5247,7 @@ u8 pilot_fuzzing(afl_state_t *afl) { void pso_updating(afl_state_t *afl) { - afl->g_now += 1; + afl->g_now++; if (afl->g_now > afl->g_max) { afl->g_now = 0; } afl->w_now = (afl->w_init - afl->w_end) * (afl->g_max - afl->g_now) / (afl->g_max) + -- cgit 1.4.1 From a728e8f9a5518017cde12bdb6ba795a6bcb47b0a Mon Sep 17 00:00:00 2001 From: van Hauser Date: Fri, 6 Nov 2020 16:42:02 +0100 Subject: better scriptable output from introspection --- README.md | 2 +- src/afl-fuzz-one.c | 191 +++++++++++++++++++++++++---------------------------- 2 files changed, 91 insertions(+), 102 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/README.md b/README.md index f955aedd..d7c5694e 100644 --- a/README.md +++ b/README.md @@ -69,7 +69,7 @@ behaviours and defaults: ## Important features of afl++ - afl++ supports llvm up to version 12, very fast binary fuzzing with QEMU 3.1 + afl++ supports llvm up to version 12, very fast binary fuzzing with QEMU 5.1 with laf-intel and redqueen, unicorn mode, gcc plugin, full *BSD, Solaris and Android support and much, much, much more. diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index e4016773..91bbced6 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -613,7 +613,7 @@ u8 fuzz_one_original(afl_state_t *afl) { FLIP_BIT(out_buf, afl->stage_cur); #ifdef INTROSPECTION - snprintf(afl->mutation, sizeof(afl->mutation), "%s FLIP_BIT1 %u", + snprintf(afl->mutation, sizeof(afl->mutation), "%s FLIP_BIT1-%u", afl->queue_cur->fname, afl->stage_cur); #endif @@ -727,7 +727,7 @@ u8 fuzz_one_original(afl_state_t *afl) { FLIP_BIT(out_buf, afl->stage_cur + 1); #ifdef INTROSPECTION - snprintf(afl->mutation, sizeof(afl->mutation), "%s FLIP_BIT2 %u", + snprintf(afl->mutation, sizeof(afl->mutation), "%s FLIP_BIT2-%u", afl->queue_cur->fname, afl->stage_cur); #endif @@ -761,7 +761,7 @@ u8 fuzz_one_original(afl_state_t *afl) { FLIP_BIT(out_buf, afl->stage_cur + 3); #ifdef INTROSPECTION - snprintf(afl->mutation, sizeof(afl->mutation), "%s FLIP_BIT4 %u", + snprintf(afl->mutation, sizeof(afl->mutation), "%s FLIP_BIT4-%u", afl->queue_cur->fname, afl->stage_cur); #endif @@ -821,7 +821,7 @@ u8 fuzz_one_original(afl_state_t *afl) { out_buf[afl->stage_cur] ^= 0xFF; #ifdef INTROSPECTION - snprintf(afl->mutation, sizeof(afl->mutation), "%s FLIP_BIT8 %u", + snprintf(afl->mutation, sizeof(afl->mutation), "%s FLIP_BIT8-%u", afl->queue_cur->fname, afl->stage_cur); #endif @@ -913,7 +913,7 @@ u8 fuzz_one_original(afl_state_t *afl) { *(u16 *)(out_buf + i) ^= 0xFFFF; #ifdef INTROSPECTION - snprintf(afl->mutation, sizeof(afl->mutation), "%s FLIP_BIT16 %u", + snprintf(afl->mutation, sizeof(afl->mutation), "%s FLIP_BIT16-%u", afl->queue_cur->fname, afl->stage_cur); #endif @@ -956,7 +956,7 @@ u8 fuzz_one_original(afl_state_t *afl) { *(u32 *)(out_buf + i) ^= 0xFFFFFFFF; #ifdef INTROSPECTION - snprintf(afl->mutation, sizeof(afl->mutation), "%s FLIP_BIT32 %u", + snprintf(afl->mutation, sizeof(afl->mutation), "%s FLIP_BIT32-%u", afl->queue_cur->fname, afl->stage_cur); #endif @@ -1019,7 +1019,7 @@ skip_bitflip: out_buf[i] = orig + j; #ifdef INTROSPECTION - snprintf(afl->mutation, sizeof(afl->mutation), "%s ARITH8+ %u %u", + snprintf(afl->mutation, sizeof(afl->mutation), "%s ARITH8+-%u-%u", afl->queue_cur->fname, i, j); #endif @@ -1040,7 +1040,7 @@ skip_bitflip: out_buf[i] = orig - j; #ifdef INTROSPECTION - snprintf(afl->mutation, sizeof(afl->mutation), "%s ARITH8- %u %u", + snprintf(afl->mutation, sizeof(afl->mutation), "%s ARITH8--%u-%u", afl->queue_cur->fname, i, j); #endif @@ -1109,7 +1109,7 @@ skip_bitflip: *(u16 *)(out_buf + i) = orig + j; #ifdef INTROSPECTION - snprintf(afl->mutation, sizeof(afl->mutation), "%s ARITH16+ %u %u", + snprintf(afl->mutation, sizeof(afl->mutation), "%s ARITH16+-%u-%u", afl->queue_cur->fname, i, j); #endif @@ -1128,7 +1128,7 @@ skip_bitflip: *(u16 *)(out_buf + i) = orig - j; #ifdef INTROSPECTION - snprintf(afl->mutation, sizeof(afl->mutation), "%s ARITH16- %u %u", + snprintf(afl->mutation, sizeof(afl->mutation), "%s ARITH16--%u-%u", afl->queue_cur->fname, i, j); #endif @@ -1151,7 +1151,7 @@ skip_bitflip: *(u16 *)(out_buf + i) = SWAP16(SWAP16(orig) + j); #ifdef INTROSPECTION - snprintf(afl->mutation, sizeof(afl->mutation), "%s ARITH16+BE %u %u", + snprintf(afl->mutation, sizeof(afl->mutation), "%s ARITH16+BE-%u-%u", afl->queue_cur->fname, i, j); #endif @@ -1170,7 +1170,7 @@ skip_bitflip: *(u16 *)(out_buf + i) = SWAP16(SWAP16(orig) - j); #ifdef INTROSPECTION - snprintf(afl->mutation, sizeof(afl->mutation), "%s ARITH16-BE %u %u", + snprintf(afl->mutation, sizeof(afl->mutation), "%s ARITH16_BE-%u-%u", afl->queue_cur->fname, i, j); #endif @@ -1238,7 +1238,7 @@ skip_bitflip: *(u32 *)(out_buf + i) = orig + j; #ifdef INTROSPECTION - snprintf(afl->mutation, sizeof(afl->mutation), "%s ARITH32+ %u %u", + snprintf(afl->mutation, sizeof(afl->mutation), "%s ARITH32+-%u-%u", afl->queue_cur->fname, i, j); #endif @@ -1257,7 +1257,7 @@ skip_bitflip: *(u32 *)(out_buf + i) = orig - j; #ifdef INTROSPECTION - snprintf(afl->mutation, sizeof(afl->mutation), "%s ARITH32- %u %u", + snprintf(afl->mutation, sizeof(afl->mutation), "%s ARITH32_-%u-%u", afl->queue_cur->fname, i, j); #endif @@ -1280,7 +1280,7 @@ skip_bitflip: *(u32 *)(out_buf + i) = SWAP32(SWAP32(orig) + j); #ifdef INTROSPECTION - snprintf(afl->mutation, sizeof(afl->mutation), "%s ARITH32+BE %u %u", + snprintf(afl->mutation, sizeof(afl->mutation), "%s ARITH32+BE-%u-%u", afl->queue_cur->fname, i, j); #endif @@ -1299,7 +1299,7 @@ skip_bitflip: *(u32 *)(out_buf + i) = SWAP32(SWAP32(orig) - j); #ifdef INTROSPECTION - snprintf(afl->mutation, sizeof(afl->mutation), "%s ARITH32-BE %u %u", + snprintf(afl->mutation, sizeof(afl->mutation), "%s ARITH32_BE-%u-%u", afl->queue_cur->fname, i, j); #endif @@ -1371,7 +1371,7 @@ skip_arith: out_buf[i] = interesting_8[j]; #ifdef INTROSPECTION - snprintf(afl->mutation, sizeof(afl->mutation), "%s INTERESTING8 %u %u", + snprintf(afl->mutation, sizeof(afl->mutation), "%s INTERESTING8_%u_%u", afl->queue_cur->fname, i, j); #endif @@ -1431,7 +1431,7 @@ skip_arith: *(u16 *)(out_buf + i) = interesting_16[j]; #ifdef INTROSPECTION - snprintf(afl->mutation, sizeof(afl->mutation), "%s INTERESTING16 %u %u", + snprintf(afl->mutation, sizeof(afl->mutation), "%s INTERESTING16_%u_%u", afl->queue_cur->fname, i, j); #endif @@ -1453,7 +1453,7 @@ skip_arith: #ifdef INTROSPECTION snprintf(afl->mutation, sizeof(afl->mutation), - "%s INTERESTING16BE %u %u", afl->queue_cur->fname, i, j); + "%s INTERESTING16BE_%u_%u", afl->queue_cur->fname, i, j); #endif *(u16 *)(out_buf + i) = SWAP16(interesting_16[j]); @@ -1520,7 +1520,7 @@ skip_arith: *(u32 *)(out_buf + i) = interesting_32[j]; #ifdef INTROSPECTION - snprintf(afl->mutation, sizeof(afl->mutation), "%s INTERESTING32 %u %u", + snprintf(afl->mutation, sizeof(afl->mutation), "%s INTERESTING32_%u_%u", afl->queue_cur->fname, i, j); #endif @@ -1542,7 +1542,7 @@ skip_arith: #ifdef INTROSPECTION snprintf(afl->mutation, sizeof(afl->mutation), - "%s INTERESTING32BE %u %u", afl->queue_cur->fname, i, j); + "%s INTERESTING32BE_%u_%u", afl->queue_cur->fname, i, j); #endif *(u32 *)(out_buf + i) = SWAP32(interesting_32[j]); @@ -1620,8 +1620,7 @@ skip_interest: #ifdef INTROSPECTION snprintf(afl->mutation, sizeof(afl->mutation), - "%s EXTRAS overwrite %u %u:%s", afl->queue_cur->fname, i, j, - afl->extras[j].data); + "%s EXTRAS_overwrite-%u-%u", afl->queue_cur->fname, i, j); #endif if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } @@ -1672,9 +1671,8 @@ skip_interest: memcpy(ex_tmp + i + afl->extras[j].len, out_buf + i, len - i); #ifdef INTROSPECTION - snprintf(afl->mutation, sizeof(afl->mutation), - "%s EXTRAS insert %u %u:%s", afl->queue_cur->fname, i, j, - afl->extras[j].data); + snprintf(afl->mutation, sizeof(afl->mutation), "%s EXTRAS_insert-%u-%u", + afl->queue_cur->fname, i, j); #endif if (common_fuzz_stuff(afl, ex_tmp, len + afl->extras[j].len)) { @@ -1736,8 +1734,7 @@ skip_user_extras: #ifdef INTROSPECTION snprintf(afl->mutation, sizeof(afl->mutation), - "%s AUTO_EXTRAS overwrite %u %u:%s", afl->queue_cur->fname, i, j, - afl->a_extras[j].data); + "%s AUTO_EXTRAS_overwrite-%u-%u", afl->queue_cur->fname, i, j); #endif if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } @@ -1844,7 +1841,7 @@ custom_mutator_stage: if (mutated_size > 0) { #ifdef INTROSPECTION - snprintf(afl->mutation, sizeof(afl->mutation), "%s CUSTOM %s", + snprintf(afl->mutation, sizeof(afl->mutation), "%s CUSTOM-%s", afl->queue_cur->fname, target != NULL ? (char *)target->fname : "none"); #endif @@ -1999,7 +1996,7 @@ havoc_stage: afl->stage_cur_val = use_stacking; #ifdef INTROSPECTION - snprintf(afl->mutation, sizeof(afl->mutation), "%s HAVOC %u", + snprintf(afl->mutation, sizeof(afl->mutation), "%s HAVOC-%u", afl->queue_cur->fname, use_stacking); #endif @@ -2126,7 +2123,7 @@ havoc_stage: /* Randomly subtract from byte. */ #ifdef INTROSPECTION - snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH8-"); + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH8_"); strcat(afl->mutation, afl->m_tmp); #endif out_buf[rand_below(afl, temp_len)] -= 1 + rand_below(afl, ARITH_MAX); @@ -2154,7 +2151,7 @@ havoc_stage: u32 pos = rand_below(afl, temp_len - 1); #ifdef INTROSPECTION - snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH16-_%u", pos); + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH16_-%u", pos); strcat(afl->mutation, afl->m_tmp); #endif *(u16 *)(out_buf + pos) -= 1 + rand_below(afl, ARITH_MAX); @@ -2165,7 +2162,7 @@ havoc_stage: u16 num = 1 + rand_below(afl, ARITH_MAX); #ifdef INTROSPECTION - snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH16-BE_%u_%u", pos, + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH16_BE-%u_%u", pos, num); strcat(afl->mutation, afl->m_tmp); #endif @@ -2187,7 +2184,7 @@ havoc_stage: u32 pos = rand_below(afl, temp_len - 1); #ifdef INTROSPECTION - snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH16+_%u", pos); + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH16+-%u", pos); strcat(afl->mutation, afl->m_tmp); #endif *(u16 *)(out_buf + pos) += 1 + rand_below(afl, ARITH_MAX); @@ -2198,7 +2195,7 @@ havoc_stage: u16 num = 1 + rand_below(afl, ARITH_MAX); #ifdef INTROSPECTION - snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH16+BE_%u_%u", pos, + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH16+BE-%u_%u", pos, num); strcat(afl->mutation, afl->m_tmp); #endif @@ -2220,7 +2217,7 @@ havoc_stage: u32 pos = rand_below(afl, temp_len - 3); #ifdef INTROSPECTION - snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH32-_%u", pos); + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH32_-%u", pos); strcat(afl->mutation, afl->m_tmp); #endif *(u32 *)(out_buf + pos) -= 1 + rand_below(afl, ARITH_MAX); @@ -2231,7 +2228,7 @@ havoc_stage: u32 num = 1 + rand_below(afl, ARITH_MAX); #ifdef INTROSPECTION - snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH32-BE_%u_%u", pos, + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH32_BE-%u-%u", pos, num); strcat(afl->mutation, afl->m_tmp); #endif @@ -2253,7 +2250,7 @@ havoc_stage: u32 pos = rand_below(afl, temp_len - 3); #ifdef INTROSPECTION - snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH32+_%u", pos); + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH32+-%u", pos); strcat(afl->mutation, afl->m_tmp); #endif *(u32 *)(out_buf + pos) += 1 + rand_below(afl, ARITH_MAX); @@ -2264,7 +2261,7 @@ havoc_stage: u32 num = 1 + rand_below(afl, ARITH_MAX); #ifdef INTROSPECTION - snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH32+BE_%u_%u", pos, + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH32+BE-%u-%u", pos, num); strcat(afl->mutation, afl->m_tmp); #endif @@ -2305,7 +2302,7 @@ havoc_stage: del_from = rand_below(afl, temp_len - del_len + 1); #ifdef INTROSPECTION - snprintf(afl->m_tmp, sizeof(afl->m_tmp), " DEL_%u_%u", del_from, + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " DEL-%u-%u", del_from, del_len); strcat(afl->mutation, afl->m_tmp); #endif @@ -2343,7 +2340,7 @@ havoc_stage: clone_to = rand_below(afl, temp_len); #ifdef INTROSPECTION - snprintf(afl->m_tmp, sizeof(afl->m_tmp), " CLONE_%s_%u_%u_%u", + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " CLONE-%s-%u-%u-%u", actually_clone ? "clone" : "insert", clone_from, clone_to, clone_len); strcat(afl->mutation, afl->m_tmp); @@ -2403,7 +2400,7 @@ havoc_stage: #ifdef INTROSPECTION snprintf(afl->m_tmp, sizeof(afl->m_tmp), - " OVERWRITE_COPY_%u_%u_%u", copy_from, copy_to, + " OVERWRITE_COPY-%u-%u-%u", copy_from, copy_to, copy_len); strcat(afl->mutation, afl->m_tmp); #endif @@ -2415,7 +2412,7 @@ havoc_stage: #ifdef INTROSPECTION snprintf(afl->m_tmp, sizeof(afl->m_tmp), - " OVERWRITE_FIXED_%u_%u_%u", copy_from, copy_to, copy_len); + " OVERWRITE_FIXED-%u-%u-%u", copy_from, copy_to, copy_len); strcat(afl->mutation, afl->m_tmp); #endif memset(out_buf + copy_to, @@ -2454,8 +2451,7 @@ havoc_stage: u32 insert_at = rand_below(afl, temp_len - extra_len + 1); #ifdef INTROSPECTION snprintf(afl->m_tmp, sizeof(afl->m_tmp), - " AUTO_EXTRA_OVERWRITE_%u_%u_%s", insert_at, extra_len, - afl->a_extras[use_extra].data); + " AUTO_EXTRA_OVERWRITE-%u-%u", insert_at, extra_len); strcat(afl->mutation, afl->m_tmp); #endif memcpy(out_buf + insert_at, afl->a_extras[use_extra].data, @@ -2473,8 +2469,7 @@ havoc_stage: u32 insert_at = rand_below(afl, temp_len - extra_len + 1); #ifdef INTROSPECTION snprintf(afl->m_tmp, sizeof(afl->m_tmp), - " EXTRA_OVERWRITE_%u_%u_%s", insert_at, extra_len, - afl->a_extras[use_extra].data); + " EXTRA_OVERWRITE-%u-%u", insert_at, extra_len); strcat(afl->mutation, afl->m_tmp); #endif memcpy(out_buf + insert_at, afl->extras[use_extra].data, @@ -2501,8 +2496,7 @@ havoc_stage: ptr = afl->a_extras[use_extra].data; #ifdef INTROSPECTION snprintf(afl->m_tmp, sizeof(afl->m_tmp), - " AUTO_EXTRA_INSERT_%u_%u_%s", insert_at, extra_len, - ptr); + " AUTO_EXTRA_INSERT-%u-%u", insert_at, extra_len); strcat(afl->mutation, afl->m_tmp); #endif @@ -2512,8 +2506,8 @@ havoc_stage: extra_len = afl->extras[use_extra].len; ptr = afl->extras[use_extra].data; #ifdef INTROSPECTION - snprintf(afl->m_tmp, sizeof(afl->m_tmp), - " EXTRA_INSERT_%u_%u_%s", insert_at, extra_len, ptr); + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " EXTRA_INSERT-%u-%u", + insert_at, extra_len); strcat(afl->mutation, afl->m_tmp); #endif @@ -2578,7 +2572,7 @@ havoc_stage: #ifdef INTROSPECTION snprintf(afl->m_tmp, sizeof(afl->m_tmp), - " SPLICE_OVERWRITE_%u_%u_%u_%s", copy_from, copy_to, + " SPLICE_OVERWRITE-%u-%u-%u-%s", copy_from, copy_to, copy_len, target->fname); strcat(afl->mutation, afl->m_tmp); #endif @@ -2600,7 +2594,7 @@ havoc_stage: #ifdef INTROSPECTION snprintf(afl->m_tmp, sizeof(afl->m_tmp), - " SPLICE_INSERT_%u_%u_%u_%s", clone_from, clone_to, + " SPLICE_INSERT-%u-%u-%u-%s", clone_from, clone_to, clone_len, target->fname); strcat(afl->mutation, afl->m_tmp); #endif @@ -3029,7 +3023,7 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { FLIP_BIT(out_buf, afl->stage_cur); #ifdef INTROSPECTION - snprintf(afl->mutation, sizeof(afl->mutation), "%s MOPT_FLIP_BIT1 %u", + snprintf(afl->mutation, sizeof(afl->mutation), "%s MOPT_FLIP_BIT1-%u", afl->queue_cur->fname, afl->stage_cur); #endif if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } @@ -3142,7 +3136,7 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { FLIP_BIT(out_buf, afl->stage_cur + 1); #ifdef INTROSPECTION - snprintf(afl->mutation, sizeof(afl->mutation), "%s MOPT_FLIP_BIT2 %u", + snprintf(afl->mutation, sizeof(afl->mutation), "%s MOPT_FLIP_BIT2-%u", afl->queue_cur->fname, afl->stage_cur); #endif if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } @@ -3175,7 +3169,7 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { FLIP_BIT(out_buf, afl->stage_cur + 3); #ifdef INTROSPECTION - snprintf(afl->mutation, sizeof(afl->mutation), "%s MOPT_FLIP_BIT4 %u", + snprintf(afl->mutation, sizeof(afl->mutation), "%s MOPT_FLIP_BIT4-%u", afl->queue_cur->fname, afl->stage_cur); #endif if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } @@ -3234,7 +3228,7 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { out_buf[afl->stage_cur] ^= 0xFF; #ifdef INTROSPECTION - snprintf(afl->mutation, sizeof(afl->mutation), "%s MOPT_FLIP_BIT8 %u", + snprintf(afl->mutation, sizeof(afl->mutation), "%s MOPT_FLIP_BIT8-%u", afl->queue_cur->fname, afl->stage_cur); #endif if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } @@ -3325,7 +3319,7 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { *(u16 *)(out_buf + i) ^= 0xFFFF; #ifdef INTROSPECTION - snprintf(afl->mutation, sizeof(afl->mutation), "%s MOPT_FLIP_BIT16 %u", + snprintf(afl->mutation, sizeof(afl->mutation), "%s MOPT_FLIP_BIT16-%u", afl->queue_cur->fname, afl->stage_cur); #endif if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } @@ -3367,7 +3361,7 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { *(u32 *)(out_buf + i) ^= 0xFFFFFFFF; #ifdef INTROSPECTION - snprintf(afl->mutation, sizeof(afl->mutation), "%s MOPT_FLIP_BIT32 %u", + snprintf(afl->mutation, sizeof(afl->mutation), "%s MOPT_FLIP_BIT32-%u", afl->queue_cur->fname, afl->stage_cur); #endif if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } @@ -3429,7 +3423,7 @@ skip_bitflip: out_buf[i] = orig + j; #ifdef INTROSPECTION - snprintf(afl->mutation, sizeof(afl->mutation), "%s MOPT_ARITH8+ %u %u", + snprintf(afl->mutation, sizeof(afl->mutation), "%s MOPT_ARITH8+-%u-%u", afl->queue_cur->fname, i, j); #endif if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } @@ -3449,7 +3443,7 @@ skip_bitflip: out_buf[i] = orig - j; #ifdef INTROSPECTION - snprintf(afl->mutation, sizeof(afl->mutation), "%s MOPT_ARITH8- %u %u", + snprintf(afl->mutation, sizeof(afl->mutation), "%s MOPT_ARITH8_-%u-%u", afl->queue_cur->fname, i, j); #endif if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } @@ -3517,7 +3511,7 @@ skip_bitflip: *(u16 *)(out_buf + i) = orig + j; #ifdef INTROSPECTION - snprintf(afl->mutation, sizeof(afl->mutation), "%s MOPT_ARITH16+ %u %u", + snprintf(afl->mutation, sizeof(afl->mutation), "%s MOPT_ARITH16+-%u-%u", afl->queue_cur->fname, i, j); #endif if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } @@ -3535,7 +3529,7 @@ skip_bitflip: *(u16 *)(out_buf + i) = orig - j; #ifdef INTROSPECTION - snprintf(afl->mutation, sizeof(afl->mutation), "%s MOPT_ARITH16- %u %u", + snprintf(afl->mutation, sizeof(afl->mutation), "%s MOPT_ARITH16_-%u-%u", afl->queue_cur->fname, i, j); #endif if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } @@ -3558,7 +3552,7 @@ skip_bitflip: #ifdef INTROSPECTION snprintf(afl->mutation, sizeof(afl->mutation), - "%s MOPT_ARITH16+BE %u %u", afl->queue_cur->fname, i, j); + "%s MOPT_ARITH16+BE-%u-%u", afl->queue_cur->fname, i, j); #endif if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } ++afl->stage_cur; @@ -3576,7 +3570,7 @@ skip_bitflip: #ifdef INTROSPECTION snprintf(afl->mutation, sizeof(afl->mutation), - "%s MOPT_ARITH16-BE %u %u", afl->queue_cur->fname, i, j); + "%s MOPT_ARITH16_BE+%u+%u", afl->queue_cur->fname, i, j); #endif if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } ++afl->stage_cur; @@ -3642,7 +3636,7 @@ skip_bitflip: *(u32 *)(out_buf + i) = orig + j; #ifdef INTROSPECTION - snprintf(afl->mutation, sizeof(afl->mutation), "%s MOPT_ARITH32+ %u %u", + snprintf(afl->mutation, sizeof(afl->mutation), "%s MOPT_ARITH32+-%u-%u", afl->queue_cur->fname, i, j); #endif if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } @@ -3660,7 +3654,7 @@ skip_bitflip: *(u32 *)(out_buf + i) = orig - j; #ifdef INTROSPECTION - snprintf(afl->mutation, sizeof(afl->mutation), "%s MOPT_ARITH32- %u %u", + snprintf(afl->mutation, sizeof(afl->mutation), "%s MOPT_ARITH32_-%u-%u", afl->queue_cur->fname, i, j); #endif if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } @@ -3683,7 +3677,7 @@ skip_bitflip: #ifdef INTROSPECTION snprintf(afl->mutation, sizeof(afl->mutation), - "%s MOPT_ARITH32+BE %u %u", afl->queue_cur->fname, i, j); + "%s MOPT_ARITH32+BE-%u-%u", afl->queue_cur->fname, i, j); #endif if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } ++afl->stage_cur; @@ -3701,7 +3695,7 @@ skip_bitflip: #ifdef INTROSPECTION snprintf(afl->mutation, sizeof(afl->mutation), - "%s MOPT_ARITH32-BE %u %u", afl->queue_cur->fname, i, j); + "%s MOPT_ARITH32_BE-%u-%u", afl->queue_cur->fname, i, j); #endif if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } ++afl->stage_cur; @@ -3772,7 +3766,7 @@ skip_arith: #ifdef INTROSPECTION snprintf(afl->mutation, sizeof(afl->mutation), - "%s MOPT_INTERESTING8 %u %u", afl->queue_cur->fname, i, j); + "%s MOPT_INTERESTING8-%u-%u", afl->queue_cur->fname, i, j); #endif if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } @@ -3831,7 +3825,7 @@ skip_arith: #ifdef INTROSPECTION snprintf(afl->mutation, sizeof(afl->mutation), - "%s MOPT_INTERESTING16 %u %u", afl->queue_cur->fname, i, j); + "%s MOPT_INTERESTING16-%u-%u", afl->queue_cur->fname, i, j); #endif if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } ++afl->stage_cur; @@ -3851,7 +3845,7 @@ skip_arith: #ifdef INTROSPECTION snprintf(afl->mutation, sizeof(afl->mutation), - "%s MOPT_INTERESTING16BE %u %u", afl->queue_cur->fname, i, j); + "%s MOPT_INTERESTING16BE-%u-%u", afl->queue_cur->fname, i, j); #endif *(u16 *)(out_buf + i) = SWAP16(interesting_16[j]); if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } @@ -3918,7 +3912,7 @@ skip_arith: #ifdef INTROSPECTION snprintf(afl->mutation, sizeof(afl->mutation), - "%s MOPT_INTERESTING32 %u %u", afl->queue_cur->fname, i, j); + "%s MOPT_INTERESTING32-%u-%u", afl->queue_cur->fname, i, j); #endif if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } ++afl->stage_cur; @@ -3938,7 +3932,7 @@ skip_arith: #ifdef INTROSPECTION snprintf(afl->mutation, sizeof(afl->mutation), - "%s MOPT_INTERESTING32BE %u %u", afl->queue_cur->fname, i, j); + "%s MOPT_INTERESTING32BE-%u-%u", afl->queue_cur->fname, i, j); #endif *(u32 *)(out_buf + i) = SWAP32(interesting_32[j]); if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } @@ -4015,8 +4009,7 @@ skip_interest: #ifdef INTROSPECTION snprintf(afl->mutation, sizeof(afl->mutation), - "%s MOPT_EXTRAS overwrite %u %u:%s", afl->queue_cur->fname, i, j, - afl->extras[j].data); + "%s MOPT_EXTRAS_overwrite-%u-%u", afl->queue_cur->fname, i, j); #endif if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } @@ -4068,8 +4061,7 @@ skip_interest: #ifdef INTROSPECTION snprintf(afl->mutation, sizeof(afl->mutation), - "%s MOPT_EXTRAS insert %u %u:%s", afl->queue_cur->fname, i, j, - afl->extras[j].data); + "%s MOPT_EXTRAS_insert-%u-%u", afl->queue_cur->fname, i, j); #endif if (common_fuzz_stuff(afl, ex_tmp, len + afl->extras[j].len)) { @@ -4131,8 +4123,8 @@ skip_user_extras: #ifdef INTROSPECTION snprintf(afl->mutation, sizeof(afl->mutation), - "%s MOPT_AUTO_EXTRAS overwrite %u %u:%s", afl->queue_cur->fname, - i, j, afl->a_extras[j].data); + "%s MOPT_AUTO_EXTRAS_overwrite-%u-%u", afl->queue_cur->fname, i, + j); #endif if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; } @@ -4275,7 +4267,7 @@ pacemaker_fuzzing: } #ifdef INTROSPECTION - snprintf(afl->mutation, sizeof(afl->mutation), "%s MOPT_HAVOC %u", + snprintf(afl->mutation, sizeof(afl->mutation), "%s MOPT_HAVOC-%u", afl->queue_cur->fname, use_stacking); #endif @@ -4356,7 +4348,7 @@ pacemaker_fuzzing: 1 + rand_below(afl, ARITH_MAX); MOpt_globals.cycles_v2[STAGE_ARITH8]++; #ifdef INTROSPECTION - snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH+-"); + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH8"); strcat(afl->mutation, afl->m_tmp); #endif break; @@ -4392,7 +4384,7 @@ pacemaker_fuzzing: u32 pos = rand_below(afl, temp_len - 1); #ifdef INTROSPECTION - snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH16+%u", pos); + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH16+-%u", pos); strcat(afl->mutation, afl->m_tmp); #endif *(u16 *)(out_buf + pos) += 1 + rand_below(afl, ARITH_MAX); @@ -4402,7 +4394,7 @@ pacemaker_fuzzing: u32 pos = rand_below(afl, temp_len - 1); u16 num = 1 + rand_below(afl, ARITH_MAX); #ifdef INTROSPECTION - snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH16BE+%u-%u", + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH16BE+-%u-%u", pos, num); strcat(afl->mutation, afl->m_tmp); #endif @@ -4421,7 +4413,7 @@ pacemaker_fuzzing: u32 pos = rand_below(afl, temp_len - 3); #ifdef INTROSPECTION - snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH32-%u", pos); + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH32_-%u", pos); strcat(afl->mutation, afl->m_tmp); #endif *(u32 *)(out_buf + pos) -= 1 + rand_below(afl, ARITH_MAX); @@ -4431,7 +4423,7 @@ pacemaker_fuzzing: u32 pos = rand_below(afl, temp_len - 3); u32 num = 1 + rand_below(afl, ARITH_MAX); #ifdef INTROSPECTION - snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH32BE-%u-%u", + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH32BE_-%u-%u", pos, num); strcat(afl->mutation, afl->m_tmp); #endif @@ -4446,7 +4438,7 @@ pacemaker_fuzzing: u32 pos = rand_below(afl, temp_len - 3); #ifdef INTROSPECTION - snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH32+%u", pos); + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH32+-%u", pos); strcat(afl->mutation, afl->m_tmp); #endif *(u32 *)(out_buf + pos) += 1 + rand_below(afl, ARITH_MAX); @@ -4456,7 +4448,7 @@ pacemaker_fuzzing: u32 pos = rand_below(afl, temp_len - 3); u32 num = 1 + rand_below(afl, ARITH_MAX); #ifdef INTROSPECTION - snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH32BE+%u-%u", + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH32BE+-%u-%u", pos, num); strcat(afl->mutation, afl->m_tmp); #endif @@ -4608,7 +4600,7 @@ pacemaker_fuzzing: clone_to = rand_below(afl, temp_len); #ifdef INTROSPECTION - snprintf(afl->m_tmp, sizeof(afl->m_tmp), " CLONE_%s_%u_%u_%u", + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " CLONE_%s-%u-%u-%u", actually_clone ? "clone" : "insert", clone_from, clone_to, clone_len); strcat(afl->mutation, afl->m_tmp); @@ -4670,7 +4662,7 @@ pacemaker_fuzzing: #ifdef INTROSPECTION snprintf(afl->m_tmp, sizeof(afl->m_tmp), - " OVERWRITE_COPY_%u_%u_%u", copy_from, copy_to, + " OVERWRITE_COPY-%u-%u-%u", copy_from, copy_to, copy_len); strcat(afl->mutation, afl->m_tmp); #endif @@ -4682,7 +4674,7 @@ pacemaker_fuzzing: #ifdef INTROSPECTION snprintf(afl->m_tmp, sizeof(afl->m_tmp), - " OVERWRITE_FIXED_%u_%u_%u", copy_from, copy_to, + " OVERWRITE_FIXED-%u-%u-%u", copy_from, copy_to, copy_len); strcat(afl->mutation, afl->m_tmp); #endif @@ -4719,8 +4711,7 @@ pacemaker_fuzzing: u32 insert_at = rand_below(afl, temp_len - extra_len + 1); #ifdef INTROSPECTION snprintf(afl->m_tmp, sizeof(afl->m_tmp), - " AUTO_EXTRA_OVERWRITE_%u_%u_%s", insert_at, extra_len, - afl->a_extras[use_extra].data); + " AUTO_EXTRA_OVERWRITE-%u-%u", insert_at, extra_len); strcat(afl->mutation, afl->m_tmp); #endif memcpy(out_buf + insert_at, afl->a_extras[use_extra].data, @@ -4738,8 +4729,7 @@ pacemaker_fuzzing: u32 insert_at = rand_below(afl, temp_len - extra_len + 1); #ifdef INTROSPECTION snprintf(afl->m_tmp, sizeof(afl->m_tmp), - " EXTRA_OVERWRITE_%u_%u_%s", insert_at, extra_len, - afl->a_extras[use_extra].data); + " EXTRA_OVERWRITE-%u-%u", insert_at, extra_len); strcat(afl->mutation, afl->m_tmp); #endif memcpy(out_buf + insert_at, afl->extras[use_extra].data, @@ -4773,8 +4763,7 @@ pacemaker_fuzzing: ptr = afl->a_extras[use_extra].data; #ifdef INTROSPECTION snprintf(afl->m_tmp, sizeof(afl->m_tmp), - " AUTO_EXTRA_INSERT_%u_%u_%s", insert_at, extra_len, - ptr); + " AUTO_EXTRA_INSERT-%u-%u", insert_at, extra_len); strcat(afl->mutation, afl->m_tmp); #endif @@ -4784,8 +4773,8 @@ pacemaker_fuzzing: extra_len = afl->extras[use_extra].len; ptr = afl->extras[use_extra].data; #ifdef INTROSPECTION - snprintf(afl->m_tmp, sizeof(afl->m_tmp), - " EXTRA_INSERT_%u_%u_%s", insert_at, extra_len, ptr); + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " EXTRA_INSERT-%u-%u", + insert_at, extra_len); strcat(afl->mutation, afl->m_tmp); #endif @@ -4843,7 +4832,7 @@ pacemaker_fuzzing: #ifdef INTROSPECTION snprintf(afl->m_tmp, sizeof(afl->m_tmp), - " SPLICE_OVERWRITE_%u_%u_%u_%s", copy_from, copy_to, + " SPLICE_OVERWRITE-%u-%u-%u-%s", copy_from, copy_to, copy_len, target->fname); strcat(afl->mutation, afl->m_tmp); #endif @@ -4865,7 +4854,7 @@ pacemaker_fuzzing: #ifdef INTROSPECTION snprintf(afl->m_tmp, sizeof(afl->m_tmp), - " SPLICE_INSERT_%u_%u_%u_%s", clone_from, clone_to, + " SPLICE_INSERT-%u-%u-%u-%s", clone_from, clone_to, clone_len, target->fname); strcat(afl->mutation, afl->m_tmp); #endif -- cgit 1.4.1 From 8e1047f5efaece663bba9b8ef86d181198db5101 Mon Sep 17 00:00:00 2001 From: van Hauser Date: Tue, 10 Nov 2020 14:08:21 +0100 Subject: support custom mutator introspection --- docs/Changelog.md | 1 + docs/custom_mutators.md | 10 ++++++ include/afl-fuzz.h | 33 ++++++++++++++------ src/afl-fuzz-bitmap.c | 81 ++++++++++++++++++++++++++++++++++++++++++++++--- src/afl-fuzz-mutators.c | 7 +++++ src/afl-fuzz-one.c | 8 +++++ src/afl-fuzz-python.c | 33 ++++++++++++++++++++ 7 files changed, 159 insertions(+), 14 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/docs/Changelog.md b/docs/Changelog.md index 50c1d48a..a69f2ff4 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -37,6 +37,7 @@ sending a mail to . - added NO_SPLICING compile option and makefile define - added INTROSPECTION make target that writes all mutations to out/NAME/introspection.txt + - added INTROSPECTION support for custom modules - print special compile time options used in help output - instrumentation - We received an enhanced gcc_plugin module from AdaCore, thank you diff --git a/docs/custom_mutators.md b/docs/custom_mutators.md index 81ee9de4..2516e511 100644 --- a/docs/custom_mutators.md +++ b/docs/custom_mutators.md @@ -42,6 +42,7 @@ size_t afl_custom_havoc_mutation(void *data, unsigned char *buf, size_t buf_size unsigned char afl_custom_havoc_mutation_probability(void *data); unsigned char afl_custom_queue_get(void *data, const unsigned char *filename); void afl_custom_queue_new_entry(void *data, const unsigned char *filename_new_queue, const unsigned int *filename_orig_queue); +const char* afl_custom_introspection(my_mutator_t *data); void afl_custom_deinit(void *data); ``` @@ -81,6 +82,9 @@ def queue_new_entry(filename_new_queue, filename_orig_queue): pass ``` +def introspection(): + return string + ### Custom Mutation - `init`: @@ -130,6 +134,12 @@ def queue_new_entry(filename_new_queue, filename_orig_queue): This methods is called after adding a new test case to the queue. +- `introspection` (optional): + + This method is called after a new queue entry, crash or timeout is + discovered if compiled with INTROSPECTION. The custom mutator can then + return a string (const char *) that reports the exact mutations used. + - `deinit`: The last method to be called, deinitializing the state. diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h index e59d5f90..c355263b 100644 --- a/include/afl-fuzz.h +++ b/include/afl-fuzz.h @@ -310,6 +310,7 @@ enum { /* 09 */ PY_FUNC_HAVOC_MUTATION_PROBABILITY, /* 10 */ PY_FUNC_QUEUE_GET, /* 11 */ PY_FUNC_QUEUE_NEW_ENTRY, + /* 12 */ PY_FUNC_INTROSPECTION, PY_FUNC_COUNT }; @@ -684,6 +685,8 @@ typedef struct afl_state { u32 custom_mutators_count; + struct custom_mutator *current_custom_fuzz; + list_t custom_mutator_list; /* this is a fixed buffer of size map_size that can be used by any function if @@ -747,6 +750,15 @@ struct custom_mutator { */ void *(*afl_custom_init)(afl_state_t *afl, unsigned int seed); + /** + * When afl-fuzz was compiled with INTROSPECTION=1 then custom mutators can + * also give introspection information back with this function. + * + * @param data pointer returned in afl_custom_init for this fuzz case + * @return pointer to a text string (const char*) + */ + const char *(*afl_custom_introspection)(void *data); + /** * This method is called just before fuzzing a queue entry with the custom * mutator, and receives the initial buffer. It should return the number of @@ -953,16 +965,17 @@ u8 trim_case_custom(afl_state_t *, struct queue_entry *q, u8 *in_buf, struct custom_mutator *load_custom_mutator_py(afl_state_t *, char *); void finalize_py_module(void *); -u32 fuzz_count_py(void *, const u8 *, size_t); -size_t post_process_py(void *, u8 *, size_t, u8 **); -s32 init_trim_py(void *, u8 *, size_t); -s32 post_trim_py(void *, u8); -size_t trim_py(void *, u8 **); -size_t havoc_mutation_py(void *, u8 *, size_t, u8 **, size_t); -u8 havoc_mutation_probability_py(void *); -u8 queue_get_py(void *, const u8 *); -void queue_new_entry_py(void *, const u8 *, const u8 *); -void deinit_py(void *); +u32 fuzz_count_py(void *, const u8 *, size_t); +size_t post_process_py(void *, u8 *, size_t, u8 **); +s32 init_trim_py(void *, u8 *, size_t); +s32 post_trim_py(void *, u8); +size_t trim_py(void *, u8 **); +size_t havoc_mutation_py(void *, u8 *, size_t, u8 **, size_t); +u8 havoc_mutation_probability_py(void *); +u8 queue_get_py(void *, const u8 *); +const char *introspection_py(void *); +void queue_new_entry_py(void *, const u8 *, const u8 *); +void deinit_py(void *); #endif diff --git a/src/afl-fuzz-bitmap.c b/src/afl-fuzz-bitmap.c index 735420c3..132499d6 100644 --- a/src/afl-fuzz-bitmap.c +++ b/src/afl-fuzz-bitmap.c @@ -588,8 +588,32 @@ save_if_interesting(afl_state_t *afl, void *mem, u32 len, u8 fault) { add_to_queue(afl, queue_fn, len, 0); #ifdef INTROSPECTION - fprintf(afl->introspection_file, "QUEUE %s = %s\n", afl->mutation, - afl->queue_top->fname); + if (afl->mutation[0] != 0) { + + fprintf(afl->introspection_file, "QUEUE %s = %s\n", afl->mutation, + afl->queue_top->fname); + + } else if (afl->custom_mutators_count && afl->current_custom_fuzz) { + + LIST_FOREACH(&afl->custom_mutator_list, struct custom_mutator, { + + if (afl->current_custom_fuzz == el && el->afl_custom_introspection) { + + const char *ptr = el->afl_custom_introspection(el->data); + + if (ptr != NULL && *ptr != 0) { + + fprintf(afl->introspection_file, "QUEUE CUSTOM %s = %s\n", ptr, + afl->queue_top->fname); + + } + + } + + }); + + } + #endif if (hnb == 2) { @@ -665,7 +689,32 @@ save_if_interesting(afl_state_t *afl, void *mem, u32 len, u8 fault) { ++afl->unique_tmouts; #ifdef INTROSPECTION - fprintf(afl->introspection_file, "UNIQUE_TIMEOUT %s\n", afl->mutation); + if (afl->mutation[0] != 0) { + + fprintf(afl->introspection_file, "UNIQUE_TIMEOUT %s\n", afl->mutation); + + } else if (afl->custom_mutators_count && afl->current_custom_fuzz) { + + LIST_FOREACH(&afl->custom_mutator_list, struct custom_mutator, { + + if (afl->current_custom_fuzz == el && el->afl_custom_introspection) { + + const char *ptr = el->afl_custom_introspection(el->data); + + if (ptr != NULL && *ptr != 0) { + + fprintf(afl->introspection_file, + "UNIQUE_TIMEOUT CUSTOM %s = %s\n", ptr, + afl->queue_top->fname); + + } + + } + + }); + + } + #endif /* Before saving, we make sure that it's a genuine hang by re-running @@ -751,7 +800,31 @@ save_if_interesting(afl_state_t *afl, void *mem, u32 len, u8 fault) { ++afl->unique_crashes; #ifdef INTROSPECTION - fprintf(afl->introspection_file, "UNIQUE_CRASH %s\n", afl->mutation); + if (afl->mutation[0] != 0) { + + fprintf(afl->introspection_file, "UNIQUE_CRASH %s\n", afl->mutation); + + } else if (afl->custom_mutators_count && afl->current_custom_fuzz) { + + LIST_FOREACH(&afl->custom_mutator_list, struct custom_mutator, { + + if (afl->current_custom_fuzz == el && el->afl_custom_introspection) { + + const char *ptr = el->afl_custom_introspection(el->data); + + if (ptr != NULL && *ptr != 0) { + + fprintf(afl->introspection_file, "UNIQUE_CRASH CUSTOM %s = %s\n", + ptr, afl->queue_top->fname); + + } + + } + + }); + + } + #endif if (unlikely(afl->infoexec)) { diff --git a/src/afl-fuzz-mutators.c b/src/afl-fuzz-mutators.c index c4d7233c..1d14f657 100644 --- a/src/afl-fuzz-mutators.c +++ b/src/afl-fuzz-mutators.c @@ -166,6 +166,13 @@ struct custom_mutator *load_custom_mutator(afl_state_t *afl, const char *fn) { } + /* "afl_custom_introspection", optional */ +#ifdef INTROSPECTION + mutator->afl_custom_introspection = dlsym(dh, "afl_custom_introspection"); + if (!mutator->afl_custom_introspection) + ACTF("optional symbol 'afl_custom_introspection' not found."); +#endif + /* "afl_custom_fuzz_count", optional */ mutator->afl_custom_fuzz_count = dlsym(dh, "afl_custom_fuzz_count"); if (!mutator->afl_custom_fuzz_count) diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index 91bbced6..64365ebb 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -1780,10 +1780,16 @@ custom_mutator_stage: orig_hit_cnt = afl->queued_paths + afl->unique_crashes; +#ifdef INTROSPECTION + afl->mutation[0] = 0; +#endif + LIST_FOREACH(&afl->custom_mutator_list, struct custom_mutator, { if (el->afl_custom_fuzz) { + afl->current_custom_fuzz = el; + if (el->afl_custom_fuzz_count) afl->stage_max = el->afl_custom_fuzz_count(el->data, out_buf, len); else @@ -1889,6 +1895,8 @@ custom_mutator_stage: }); + afl->current_custom_fuzz = NULL; + if (!has_custom_fuzz) goto havoc_stage; new_hit_cnt = afl->queued_paths + afl->unique_crashes; diff --git a/src/afl-fuzz-python.c b/src/afl-fuzz-python.c index adb92649..fe16bc46 100644 --- a/src/afl-fuzz-python.c +++ b/src/afl-fuzz-python.c @@ -163,6 +163,8 @@ static py_mutator_t *init_py_module(afl_state_t *afl, u8 *module_name) { PyObject_GetAttrString(py_module, "queue_get"); py_functions[PY_FUNC_QUEUE_NEW_ENTRY] = PyObject_GetAttrString(py_module, "queue_new_entry"); + py_functions[PY_FUNC_INTROSPECTION] = + PyObject_GetAttrString(py_module, "introspection"); py_functions[PY_FUNC_DEINIT] = PyObject_GetAttrString(py_module, "deinit"); if (!py_functions[PY_FUNC_DEINIT]) FATAL("deinit function not found in python module"); @@ -381,6 +383,15 @@ struct custom_mutator *load_custom_mutator_py(afl_state_t *afl, } + #ifdef INTROSPECTION + if (py_functions[PY_FUNC_INTROSPECTION]) { + + mutator->afl_custom_introspection = introspection_py; + + } + + #endif + OKF("Python mutator '%s' installed successfully.", module_name); /* Initialize the custom mutator */ @@ -679,6 +690,28 @@ u8 havoc_mutation_probability_py(void *py_mutator) { } +const char *introspection_py(void *py_mutator) { + + PyObject *py_args, *py_value; + + py_args = PyTuple_New(0); + py_value = PyObject_CallObject( + ((py_mutator_t *)py_mutator)->py_functions[PY_FUNC_INTROSPECTION], + py_args); + Py_DECREF(py_args); + + if (py_value == NULL) { + + return NULL; + + } else { + + return PyByteArray_AsString(py_value); + + } + +} + u8 queue_get_py(void *py_mutator, const u8 *filename) { PyObject *py_args, *py_value; -- cgit 1.4.1 From 1dfd7df7c0e28b6f356bda714bedfc612bf2db75 Mon Sep 17 00:00:00 2001 From: van Hauser Date: Tue, 10 Nov 2020 14:16:36 +0100 Subject: small fix to actually document custom mutator introspection --- src/afl-fuzz-one.c | 6 ------ 1 file changed, 6 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index 64365ebb..0adc3719 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -1846,12 +1846,6 @@ custom_mutator_stage: if (mutated_size > 0) { -#ifdef INTROSPECTION - snprintf(afl->mutation, sizeof(afl->mutation), "%s CUSTOM-%s", - afl->queue_cur->fname, - target != NULL ? (char *)target->fname : "none"); -#endif - if (common_fuzz_stuff(afl, mutated_buf, (u32)mutated_size)) { goto abandon_entry; -- cgit 1.4.1 From a19b3022d93195d3703817c728817d7e071e89fe Mon Sep 17 00:00:00 2001 From: Dominik Maier Date: Thu, 3 Dec 2020 19:22:44 +0100 Subject: afl_custom_describe api added --- docs/Changelog.md | 10 ++++--- include/afl-fuzz.h | 14 +++++++++ src/afl-fuzz-bitmap.c | 63 +++++++++++++++++++++++++++++--------- src/afl-fuzz-mutators.c | 80 ++++++++++++++++++++++++++++++++++++++++--------- src/afl-fuzz-one.c | 9 ++++-- unicorn_mode/unicornafl | 2 +- 6 files changed, 143 insertions(+), 35 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/docs/Changelog.md b/docs/Changelog.md index 02728f10..5201eb8b 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -22,18 +22,18 @@ sending a mail to . a schedule performance score, which is much better that the previous walk the whole queue approach. Select the old mode with -Z (auto enabled with -M) - - rpc.statsd support by Edznux, thanks a lot! + - rpc.statsd support, for stats and charts, by Edznux, thanks a lot! - Marcel Boehme submitted a patch that improves all AFFast schedules :) - not specifying -M or -S will now auto-set "-S default" - reading testcases from -i now descends into subdirectories - - allow up to 4 times the -x command line option - - loaded extras now have a duplicate protection + - allow the -x command line option up to 4 times + - loaded extras now have a duplication protection - If test cases are too large we do a partial read on the maximum supported size - longer seeds with the same trace information will now be ignored for fuzzing but still be used for splicing - crashing seeds are now not prohibiting a run anymore but are - skipped. They are used for splicing though. + skipped - they are used for splicing, though - update MOpt for expanded havoc modes - setting the env var AFL_NO_AUTODICT will not load an LTO autodictionary - added NO_SPLICING compile option and makefile define @@ -42,6 +42,8 @@ sending a mail to . - print special compile time options used in help output - when using -c cmplog, one of the childs was not killed, fixed - somewhere we broke -n dumb fuzzing, fixed + - added afl_custom_describe to the custom mutator API to allow for easy + mutation reproduction on crashing inputs - instrumentation - We received an enhanced gcc_plugin module from AdaCore, thank you very much!! diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h index 62d76323..92465e7e 100644 --- a/include/afl-fuzz.h +++ b/include/afl-fuzz.h @@ -798,6 +798,20 @@ struct custom_mutator { size_t (*afl_custom_fuzz)(void *data, u8 *buf, size_t buf_size, u8 **out_buf, u8 *add_buf, size_t add_buf_size, size_t max_size); + /** + * Describe the current testcase, generated by the last mutation. + * This will be called, for example, to give the written testcase a name + * after a crash ocurred. It can help to reproduce crashing mutations. + * + * (Optional) + * + * @param data pointer returned in afl_custom_init for this fuzz case + * @param[in] max_size Maximum size of the mutated output. The mutation must + * not produce data larger than max_size. + * @return A valid ptr to a 0-terminated string, or NULL on error. + */ + const char *(*afl_custom_describe)(void *data, size_t max_size); + /** * A post-processing function to use right before AFL writes the test case to * disk in order to execute the target. diff --git a/src/afl-fuzz-bitmap.c b/src/afl-fuzz-bitmap.c index 2d14b04e..a78bf374 100644 --- a/src/afl-fuzz-bitmap.c +++ b/src/afl-fuzz-bitmap.c @@ -425,7 +425,7 @@ void minimize_bits(afl_state_t *afl, u8 *dst, u8 *src) { /* Construct a file name for a new test case, capturing the operation that led to its discovery. Returns a ptr to afl->describe_op_buf_256. */ -u8 *describe_op(afl_state_t *afl, u8 hnb) { +u8 *describe_op(afl_state_t *afl, u8 new_bits) { u8 *ret = afl->describe_op_buf_256; @@ -445,29 +445,64 @@ u8 *describe_op(afl_state_t *afl, u8 hnb) { sprintf(ret + strlen(ret), ",time:%llu", get_cur_time() - afl->start_time); - sprintf(ret + strlen(ret), ",op:%s", afl->stage_short); + if (afl->current_custom_fuzz && + afl->current_custom_fuzz->afl_custom_describe) { - if (afl->stage_cur_byte >= 0) { + /* We are currently in a custom mutator that supports afl_custom_describe, + * use it! */ - sprintf(ret + strlen(ret), ",pos:%d", afl->stage_cur_byte); + size_t len_current = strlen(ret); + ret[len_current++] = ','; + ret[len_current++] = '\0'; - if (afl->stage_val_type != STAGE_VAL_NONE) { + size_t size_left = + sizeof(afl->describe_op_buf_256) - len_current - strlen(",+cov") - 2; + assert(size_left > 0); - sprintf(ret + strlen(ret), ",val:%s%+d", - (afl->stage_val_type == STAGE_VAL_BE) ? "be:" : "", - afl->stage_cur_val); + const char *custom_description = + afl->current_custom_fuzz->afl_custom_describe( + afl->current_custom_fuzz->data, size_left); + if (!custom_description || !custom_description[0]) { + + DEBUGF("Error getting a description from afl_custom_describe"); + /* Take the stage name as description fallback */ + sprintf(ret + len_current, "op:%s", afl->stage_short); + + } else { + + /* We got a proper custom description, use it */ + strncat(ret + len_current, custom_description, size_left); } } else { - sprintf(ret + strlen(ret), ",rep:%d", afl->stage_cur_val); + /* Normal testcase descriptions start here */ + sprintf(ret + strlen(ret), ",op:%s", afl->stage_short); + + if (afl->stage_cur_byte >= 0) { + + sprintf(ret + strlen(ret), ",pos:%d", afl->stage_cur_byte); + + if (afl->stage_val_type != STAGE_VAL_NONE) { + + sprintf(ret + strlen(ret), ",val:%s%+d", + (afl->stage_val_type == STAGE_VAL_BE) ? "be:" : "", + afl->stage_cur_val); + + } + + } else { + + sprintf(ret + strlen(ret), ",rep:%d", afl->stage_cur_val); + + } } } - if (hnb == 2) { strcat(ret, ",+cov"); } + if (new_bits == 2) { strcat(ret, ",+cov"); } return ret; @@ -540,7 +575,7 @@ save_if_interesting(afl_state_t *afl, void *mem, u32 len, u8 fault) { if (unlikely(len == 0)) { return 0; } u8 *queue_fn = ""; - u8 hnb = '\0'; + u8 new_bits = '\0'; s32 fd; u8 keeping = 0, res; u64 cksum = 0; @@ -566,7 +601,7 @@ save_if_interesting(afl_state_t *afl, void *mem, u32 len, u8 fault) { /* Keep only if there are new bits in the map, add to queue for future fuzzing, etc. */ - if (!(hnb = has_new_bits(afl, afl->virgin_bits))) { + if (!(new_bits = has_new_bits(afl, afl->virgin_bits))) { if (unlikely(afl->crash_mode)) { ++afl->total_crashes; } return 0; @@ -576,7 +611,7 @@ save_if_interesting(afl_state_t *afl, void *mem, u32 len, u8 fault) { #ifndef SIMPLE_FILES queue_fn = alloc_printf("%s/queue/id:%06u,%s", afl->out_dir, - afl->queued_paths, describe_op(afl, hnb)); + afl->queued_paths, describe_op(afl, new_bits)); #else @@ -619,7 +654,7 @@ save_if_interesting(afl_state_t *afl, void *mem, u32 len, u8 fault) { #endif - if (hnb == 2) { + if (new_bits == 2) { afl->queue_top->has_new_cov = 1; ++afl->queued_with_cov; diff --git a/src/afl-fuzz-mutators.c b/src/afl-fuzz-mutators.c index 1d14f657..0c85458e 100644 --- a/src/afl-fuzz-mutators.c +++ b/src/afl-fuzz-mutators.c @@ -151,7 +151,11 @@ struct custom_mutator *load_custom_mutator(afl_state_t *afl, const char *fn) { /* Mutator */ /* "afl_custom_init", optional for backward compatibility */ mutator->afl_custom_init = dlsym(dh, "afl_custom_init"); - if (!mutator->afl_custom_init) FATAL("Symbol 'afl_custom_init' not found."); + if (!mutator->afl_custom_init) { + + FATAL("Symbol 'afl_custom_init' not found."); + + } /* "afl_custom_fuzz" or "afl_custom_mutator", required */ mutator->afl_custom_fuzz = dlsym(dh, "afl_custom_fuzz"); @@ -161,49 +165,74 @@ struct custom_mutator *load_custom_mutator(afl_state_t *afl, const char *fn) { WARNF("Symbol 'afl_custom_fuzz' not found. Try 'afl_custom_mutator'."); mutator->afl_custom_fuzz = dlsym(dh, "afl_custom_mutator"); - if (!mutator->afl_custom_fuzz) + if (!mutator->afl_custom_fuzz) { + WARNF("Symbol 'afl_custom_mutator' not found."); + } + } /* "afl_custom_introspection", optional */ #ifdef INTROSPECTION mutator->afl_custom_introspection = dlsym(dh, "afl_custom_introspection"); - if (!mutator->afl_custom_introspection) + if (!mutator->afl_custom_introspection) { + ACTF("optional symbol 'afl_custom_introspection' not found."); + + } + #endif /* "afl_custom_fuzz_count", optional */ mutator->afl_custom_fuzz_count = dlsym(dh, "afl_custom_fuzz_count"); - if (!mutator->afl_custom_fuzz_count) + if (!mutator->afl_custom_fuzz_count) { + ACTF("optional symbol 'afl_custom_fuzz_count' not found."); + } + /* "afl_custom_deinit", optional for backward compatibility */ mutator->afl_custom_deinit = dlsym(dh, "afl_custom_deinit"); - if (!mutator->afl_custom_deinit) + if (!mutator->afl_custom_deinit) { + FATAL("Symbol 'afl_custom_deinit' not found."); + } + /* "afl_custom_post_process", optional */ mutator->afl_custom_post_process = dlsym(dh, "afl_custom_post_process"); - if (!mutator->afl_custom_post_process) + if (!mutator->afl_custom_post_process) { + ACTF("optional symbol 'afl_custom_post_process' not found."); + } + u8 notrim = 0; /* "afl_custom_init_trim", optional */ mutator->afl_custom_init_trim = dlsym(dh, "afl_custom_init_trim"); - if (!mutator->afl_custom_init_trim) + if (!mutator->afl_custom_init_trim) { + ACTF("optional symbol 'afl_custom_init_trim' not found."); + } + /* "afl_custom_trim", optional */ mutator->afl_custom_trim = dlsym(dh, "afl_custom_trim"); - if (!mutator->afl_custom_trim) + if (!mutator->afl_custom_trim) { + ACTF("optional symbol 'afl_custom_trim' not found."); + } + /* "afl_custom_post_trim", optional */ mutator->afl_custom_post_trim = dlsym(dh, "afl_custom_post_trim"); - if (!mutator->afl_custom_post_trim) + if (!mutator->afl_custom_post_trim) { + ACTF("optional symbol 'afl_custom_post_trim' not found."); + } + if (notrim) { mutator->afl_custom_init_trim = NULL; @@ -217,31 +246,54 @@ struct custom_mutator *load_custom_mutator(afl_state_t *afl, const char *fn) { /* "afl_custom_havoc_mutation", optional */ mutator->afl_custom_havoc_mutation = dlsym(dh, "afl_custom_havoc_mutation"); - if (!mutator->afl_custom_havoc_mutation) + if (!mutator->afl_custom_havoc_mutation) { + ACTF("optional symbol 'afl_custom_havoc_mutation' not found."); + } + /* "afl_custom_havoc_mutation", optional */ mutator->afl_custom_havoc_mutation_probability = dlsym(dh, "afl_custom_havoc_mutation_probability"); - if (!mutator->afl_custom_havoc_mutation_probability) + if (!mutator->afl_custom_havoc_mutation_probability) { + ACTF("optional symbol 'afl_custom_havoc_mutation_probability' not found."); + } + /* "afl_custom_queue_get", optional */ mutator->afl_custom_queue_get = dlsym(dh, "afl_custom_queue_get"); - if (!mutator->afl_custom_queue_get) + if (!mutator->afl_custom_queue_get) { + ACTF("optional symbol 'afl_custom_queue_get' not found."); + } + /* "afl_custom_queue_new_entry", optional */ mutator->afl_custom_queue_new_entry = dlsym(dh, "afl_custom_queue_new_entry"); - if (!mutator->afl_custom_queue_new_entry) + if (!mutator->afl_custom_queue_new_entry) { + ACTF("optional symbol 'afl_custom_queue_new_entry' not found"); + } + + /* "afl_custom_describe", optional */ + mutator->afl_custom_describe = dlsym(dh, "afl_custom_describe"); + if (!mutator->afl_custom_describe) { + + ACTF("Symbol 'afl_custom_describe' not found."); + + } + OKF("Custom mutator '%s' installed successfully.", fn); /* Initialize the custom mutator */ - if (mutator->afl_custom_init) + if (mutator->afl_custom_init) { + mutator->data = mutator->afl_custom_init(afl, rand_below(afl, 0xFFFFFFFF)); + } + mutator->stacked_custom = (mutator && mutator->afl_custom_havoc_mutation); mutator->stacked_custom_prob = 6; // like one of the default mutations in havoc diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index 0adc3719..ca48f72a 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -1790,11 +1790,16 @@ custom_mutator_stage: afl->current_custom_fuzz = el; - if (el->afl_custom_fuzz_count) + if (el->afl_custom_fuzz_count) { + afl->stage_max = el->afl_custom_fuzz_count(el->data, out_buf, len); - else + + } else { + afl->stage_max = saved_max; + } + has_custom_fuzz = true; afl->stage_short = el->name_short; diff --git a/unicorn_mode/unicornafl b/unicorn_mode/unicornafl index f44ec48f..8cca4801 160000 --- a/unicorn_mode/unicornafl +++ b/unicorn_mode/unicornafl @@ -1 +1 @@ -Subproject commit f44ec48f8d5929f243522c1152b5b3c0985a5548 +Subproject commit 8cca4801adb767dce7cf72202d7d25bdb420cf7d -- cgit 1.4.1 From 06ec5ab3d723bf7f0a2ee76be8b12c09fa870a9d Mon Sep 17 00:00:00 2001 From: Marcel Boehme Date: Mon, 7 Dec 2020 21:32:25 +0000 Subject: Sampling next seed by weight (hit_count, bitmap_size, exec_us) --- include/afl-fuzz.h | 3 ++- src/afl-fuzz-one.c | 6 ++++-- src/afl-fuzz-queue.c | 39 ++++++++++++++++++++++++++++++++------- 3 files changed, 38 insertions(+), 10 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h index bdf44def..6ce032df 100644 --- a/include/afl-fuzz.h +++ b/include/afl-fuzz.h @@ -168,7 +168,8 @@ struct queue_entry { u8 *trace_mini; /* Trace bytes, if kept */ u32 tc_ref; /* Trace bytes ref count */ - double perf_score; /* performance score */ + double perf_score, /* performance score */ + weight; u8 *testcase_buf; /* The testcase buffer, if loaded. */ diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index ca48f72a..a48afffb 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -445,8 +445,10 @@ u8 fuzz_one_original(afl_state_t *afl) { if (unlikely(afl->not_on_tty)) { - ACTF("Fuzzing test case #%u (%u total, %llu uniq crashes found)...", - afl->current_entry, afl->queued_paths, afl->unique_crashes); + ACTF("Fuzzing test case #%u (%u total, %llu uniq crashes found, perf_score=%0.0f, exec_us=%llu, hits=%u, map=%u)...", + afl->current_entry, afl->queued_paths, afl->unique_crashes, + afl->queue_cur->perf_score, afl->queue_cur->exec_us, + afl->n_fuzz[afl->queue_cur->n_fuzz_entry], afl->queue_cur->bitmap_size); fflush(stdout); } diff --git a/src/afl-fuzz-queue.c b/src/afl-fuzz-queue.c index f35b4f57..1e997c55 100644 --- a/src/afl-fuzz-queue.c +++ b/src/afl-fuzz-queue.c @@ -42,6 +42,21 @@ inline u32 select_next_queue_entry(afl_state_t *afl) { } +double compute_weight(afl_state_t *afl, struct queue_entry *q, double avg_exec_us, double avg_bitmap_size) { + + u32 hits = afl->n_fuzz[q->n_fuzz_entry]; + if (hits == 0) hits = 1; + + double weight = 1.0; + weight *= avg_exec_us / q->exec_us; + weight *= log(q->bitmap_size) / avg_bitmap_size; + weight /= log10(hits) + 1; + + if (q->favored) weight *= 5; + + return weight; +} + /* create the alias table that allows weighted random selection - expensive */ void create_alias_table(afl_state_t *afl) { @@ -65,25 +80,35 @@ void create_alias_table(afl_state_t *afl) { memset((void *)afl->alias_table, 0, n * sizeof(u32)); memset((void *)afl->alias_probability, 0, n * sizeof(double)); - double sum = 0; - + double avg_exec_us = 0.0; + double avg_bitmap_size = 0.0; for (i = 0; i < n; i++) { struct queue_entry *q = afl->queue_buf[i]; - - if (!q->disabled) { q->perf_score = calculate_score(afl, q); } - - sum += q->perf_score; + avg_exec_us += q->exec_us; + avg_bitmap_size += log(q->bitmap_size); } + avg_exec_us /= afl->queued_paths; + avg_bitmap_size /= afl->queued_paths; + double sum = 0; for (i = 0; i < n; i++) { struct queue_entry *q = afl->queue_buf[i]; - P[i] = (q->perf_score * n) / sum; + + if (!q->disabled) { + q->weight = compute_weight(afl, q, avg_exec_us, avg_bitmap_size); + q->perf_score = calculate_score(afl, q); + } + + sum += q->weight; } + for (i = 0; i < n; i++) + P[i] = (afl->queue_buf[i]->weight * n) / sum; + int nS = 0, nL = 0, s; for (s = (s32)n - 1; s >= 0; --s) { -- cgit 1.4.1 From 46156957bd120dc8d8bcd9da72f83574902c654f Mon Sep 17 00:00:00 2001 From: van Hauser Date: Tue, 8 Dec 2020 11:07:11 +0100 Subject: fix aflfast changes --- include/afl-fuzz.h | 2 +- src/afl-fuzz-one.c | 11 +++-- src/afl-fuzz-queue.c | 73 ++++++++++++++++++++++++---------- utils/aflpp_driver/aflpp_qemu_driver.c | 2 +- 4 files changed, 61 insertions(+), 27 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h index 6ce032df..2f2d31d3 100644 --- a/include/afl-fuzz.h +++ b/include/afl-fuzz.h @@ -169,7 +169,7 @@ struct queue_entry { u32 tc_ref; /* Trace bytes ref count */ double perf_score, /* performance score */ - weight; + weight; u8 *testcase_buf; /* The testcase buffer, if loaded. */ diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index a48afffb..e6fa6064 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -445,10 +445,13 @@ u8 fuzz_one_original(afl_state_t *afl) { if (unlikely(afl->not_on_tty)) { - ACTF("Fuzzing test case #%u (%u total, %llu uniq crashes found, perf_score=%0.0f, exec_us=%llu, hits=%u, map=%u)...", - afl->current_entry, afl->queued_paths, afl->unique_crashes, - afl->queue_cur->perf_score, afl->queue_cur->exec_us, - afl->n_fuzz[afl->queue_cur->n_fuzz_entry], afl->queue_cur->bitmap_size); + ACTF( + "Fuzzing test case #%u (%u total, %llu uniq crashes found, " + "perf_score=%0.0f, exec_us=%llu, hits=%u, map=%u)...", + afl->current_entry, afl->queued_paths, afl->unique_crashes, + afl->queue_cur->perf_score, afl->queue_cur->exec_us, + likely(afl->n_fuzz) ? afl->n_fuzz[afl->queue_cur->n_fuzz_entry] : 0, + afl->queue_cur->bitmap_size); fflush(stdout); } diff --git a/src/afl-fuzz-queue.c b/src/afl-fuzz-queue.c index 1e997c55..071e4a4c 100644 --- a/src/afl-fuzz-queue.c +++ b/src/afl-fuzz-queue.c @@ -42,7 +42,8 @@ inline u32 select_next_queue_entry(afl_state_t *afl) { } -double compute_weight(afl_state_t *afl, struct queue_entry *q, double avg_exec_us, double avg_bitmap_size) { +double compute_weight(afl_state_t *afl, struct queue_entry *q, + double avg_exec_us, double avg_bitmap_size) { u32 hits = afl->n_fuzz[q->n_fuzz_entry]; if (hits == 0) hits = 1; @@ -55,13 +56,15 @@ double compute_weight(afl_state_t *afl, struct queue_entry *q, double avg_exec_u if (q->favored) weight *= 5; return weight; + } /* create the alias table that allows weighted random selection - expensive */ void create_alias_table(afl_state_t *afl) { - u32 n = afl->queued_paths, i = 0, a, g; + u32 n = afl->queued_paths, i = 0, a, g; + double sum = 0; afl->alias_table = (u32 *)afl_realloc((void **)&afl->alias_table, n * sizeof(u32)); @@ -80,34 +83,62 @@ void create_alias_table(afl_state_t *afl) { memset((void *)afl->alias_table, 0, n * sizeof(u32)); memset((void *)afl->alias_probability, 0, n * sizeof(double)); - double avg_exec_us = 0.0; - double avg_bitmap_size = 0.0; - for (i = 0; i < n; i++) { + if (likely(afl->schedule >= FAST && afl->schedule <= RARE)) { - struct queue_entry *q = afl->queue_buf[i]; - avg_exec_us += q->exec_us; - avg_bitmap_size += log(q->bitmap_size); + double avg_exec_us = 0.0; + double avg_bitmap_size = 0.0; + for (i = 0; i < n; i++) { - } - avg_exec_us /= afl->queued_paths; - avg_bitmap_size /= afl->queued_paths; + struct queue_entry *q = afl->queue_buf[i]; + avg_exec_us += q->exec_us; + avg_bitmap_size += log(q->bitmap_size); - double sum = 0; - for (i = 0; i < n; i++) { + } + + avg_exec_us /= afl->queued_paths; + avg_bitmap_size /= afl->queued_paths; + + for (i = 0; i < n; i++) { - struct queue_entry *q = afl->queue_buf[i]; + struct queue_entry *q = afl->queue_buf[i]; + + if (!q->disabled) { + + q->weight = compute_weight(afl, q, avg_exec_us, avg_bitmap_size); + q->perf_score = calculate_score(afl, q); + + } + + sum += q->weight; - if (!q->disabled) { - q->weight = compute_weight(afl, q, avg_exec_us, avg_bitmap_size); - q->perf_score = calculate_score(afl, q); } - sum += q->weight; + for (i = 0; i < n; i++) { - } + P[i] = (afl->queue_buf[i]->weight * n) / sum; + + } + + } else { + + for (i = 0; i < n; i++) { + + struct queue_entry *q = afl->queue_buf[i]; + + if (!q->disabled) { q->perf_score = calculate_score(afl, q); } + + sum += q->perf_score; - for (i = 0; i < n; i++) - P[i] = (afl->queue_buf[i]->weight * n) / sum; + } + + for (i = 0; i < n; i++) { + + struct queue_entry *q = afl->queue_buf[i]; + P[i] = (q->perf_score * n) / sum; + + } + + } int nS = 0, nL = 0, s; for (s = (s32)n - 1; s >= 0; --s) { diff --git a/utils/aflpp_driver/aflpp_qemu_driver.c b/utils/aflpp_driver/aflpp_qemu_driver.c index cb3b86d0..a0c02833 100644 --- a/utils/aflpp_driver/aflpp_qemu_driver.c +++ b/utils/aflpp_driver/aflpp_qemu_driver.c @@ -7,7 +7,7 @@ int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size); __attribute__((weak)) int LLVMFuzzerInitialize(int *argc, char ***argv); #define kMaxAflInputSize (1 * 1024 * 1024); -static uint8_t AflInputBuf[kMaxAflInputSize]; +static uint8_t AflInputBuf[kMaxAflInputSize]; void __attribute__((noinline)) afl_qemu_driver_stdin_input(void) { -- cgit 1.4.1 From c6e038fe25789caa8da777f53154de1bd7b4e178 Mon Sep 17 00:00:00 2001 From: hexcoder- Date: Mon, 4 Jan 2021 20:40:53 +0100 Subject: code cleanups (shadowed vars, (un)signed type mismatches, format types, etc.) --- include/afl-fuzz.h | 4 +-- include/config.h | 46 +++++++++++++++--------------- src/afl-analyze.c | 4 +-- src/afl-cc.c | 74 ++++++++++++++++++++++++++++--------------------- src/afl-common.c | 20 ++++++------- src/afl-forkserver.c | 4 +-- src/afl-fuzz-bitmap.c | 2 +- src/afl-fuzz-extras.c | 6 ++-- src/afl-fuzz-mutators.c | 28 ++++++++++++------- src/afl-fuzz-one.c | 24 ++++++++-------- src/afl-fuzz-queue.c | 5 ++-- src/afl-fuzz-redqueen.c | 9 ++++-- src/afl-fuzz-run.c | 2 +- src/afl-fuzz-stats.c | 8 +++--- src/afl-fuzz.c | 8 +++--- src/afl-ld-lto.c | 6 ++-- src/afl-showmap.c | 12 ++++---- src/afl-tmin.c | 4 +-- 18 files changed, 144 insertions(+), 122 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h index d6a322cc..ede54f0e 100644 --- a/include/afl-fuzz.h +++ b/include/afl-fuzz.h @@ -573,7 +573,7 @@ typedef struct afl_state { u8 stage_name_buf[STAGE_BUF_SIZE]; /* reused stagename buf with len 64 */ - s32 stage_cur, stage_max; /* Stage progression */ + u32 stage_cur, stage_max; /* Stage progression */ s32 splicing_with; /* Splicing with which test case? */ u32 main_node_id, main_node_max; /* Main instance job splitting */ @@ -648,7 +648,7 @@ typedef struct afl_state { double last_avg_execs_saved; /* foreign sync */ -#define FOREIGN_SYNCS_MAX 32 +#define FOREIGN_SYNCS_MAX 32U u8 foreign_sync_cnt; struct foreign_sync foreign_syncs[FOREIGN_SYNCS_MAX]; diff --git a/include/config.h b/include/config.h index e8a49270..7c75e9c9 100644 --- a/include/config.h +++ b/include/config.h @@ -80,11 +80,11 @@ /* Default timeout for fuzzed code (milliseconds). This is the upper bound, also used for detecting hangs; the actual value is auto-scaled: */ -#define EXEC_TIMEOUT 1000 +#define EXEC_TIMEOUT 1000U /* Timeout rounding factor when auto-scaling (milliseconds): */ -#define EXEC_TM_ROUND 20 +#define EXEC_TM_ROUND 20U /* 64bit arch MACRO */ #if (defined(__x86_64__) || defined(__arm64__) || defined(__aarch64__)) @@ -93,48 +93,48 @@ /* Default memory limit for child process (MB) 0 = disabled : */ -#define MEM_LIMIT 0 +#define MEM_LIMIT 0U /* Default memory limit when running in QEMU mode (MB) 0 = disabled : */ -#define MEM_LIMIT_QEMU 0 +#define MEM_LIMIT_QEMU 0U /* Default memory limit when running in Unicorn mode (MB) 0 = disabled : */ -#define MEM_LIMIT_UNICORN 0 +#define MEM_LIMIT_UNICORN 0U /* Number of calibration cycles per every new test case (and for test cases that show variable behavior): */ -#define CAL_CYCLES 8 -#define CAL_CYCLES_LONG 40 +#define CAL_CYCLES 8U +#define CAL_CYCLES_LONG 40U /* Number of subsequent timeouts before abandoning an input file: */ -#define TMOUT_LIMIT 250 +#define TMOUT_LIMIT 250U /* Maximum number of unique hangs or crashes to record: */ -#define KEEP_UNIQUE_HANG 500 -#define KEEP_UNIQUE_CRASH 5000 +#define KEEP_UNIQUE_HANG 500U +#define KEEP_UNIQUE_CRASH 5000U /* Baseline number of random tweaks during a single 'havoc' stage: */ -#define HAVOC_CYCLES 256 -#define HAVOC_CYCLES_INIT 1024 +#define HAVOC_CYCLES 256U +#define HAVOC_CYCLES_INIT 1024U /* Maximum multiplier for the above (should be a power of two, beware of 32-bit int overflows): */ -#define HAVOC_MAX_MULT 64 -#define HAVOC_MAX_MULT_MOPT 64 +#define HAVOC_MAX_MULT 64U +#define HAVOC_MAX_MULT_MOPT 64U /* Absolute minimum number of havoc cycles (after all adjustments): */ -#define HAVOC_MIN 12 +#define HAVOC_MIN 12U /* Power Schedule Divisor */ -#define POWER_BETA 1 +#define POWER_BETA 1U #define MAX_FACTOR (POWER_BETA * 32) /* Maximum stacking for havoc-stage tweaks. The actual value is calculated @@ -146,19 +146,19 @@ In other words, the default (n = 4) produces 2, 4, 8, 16 stacked tweaks: */ -#define HAVOC_STACK_POW2 4 +#define HAVOC_STACK_POW2 4U /* Caps on block sizes for cloning and deletion operations. Each of these ranges has a 33% probability of getting picked, except for the first two cycles where smaller blocks are favored: */ -#define HAVOC_BLK_SMALL 32 -#define HAVOC_BLK_MEDIUM 128 -#define HAVOC_BLK_LARGE 1500 +#define HAVOC_BLK_SMALL 32U +#define HAVOC_BLK_MEDIUM 128U +#define HAVOC_BLK_LARGE 1500U /* Extra-large blocks, selected very rarely (<5% of the time): */ -#define HAVOC_BLK_XL 32768 +#define HAVOC_BLK_XL 32768U /* Probabilities of skipping non-favored entries in the queue, expressed as percentages: */ @@ -188,11 +188,11 @@ /* Maximum size of input file, in bytes (keep under 100MB): */ -#define MAX_FILE (1 * 1024 * 1024) +#define MAX_FILE (1 * 1024 * 1024U) /* The same, for the test case minimizer: */ -#define TMIN_MAX_FILE (10 * 1024 * 1024) +#define TMIN_MAX_FILE (10 * 1024 * 1024U) /* Block normalization steps for afl-tmin: */ diff --git a/src/afl-analyze.c b/src/afl-analyze.c index 6dac415b..8fc4434a 100644 --- a/src/afl-analyze.c +++ b/src/afl-analyze.c @@ -903,8 +903,8 @@ static void usage(u8 *argv0) { "Execution control settings:\n" " -f file - input file read by the tested program (stdin)\n" - " -t msec - timeout for each run (%d ms)\n" - " -m megs - memory limit for child process (%d MB)\n" + " -t msec - timeout for each run (%u ms)\n" + " -m megs - memory limit for child process (%u MB)\n" " -Q - use binary-only instrumentation (QEMU mode)\n" " -U - use unicorn-based instrumentation (Unicorn mode)\n" " -W - use qemu-based instrumentation with Wine (Wine " diff --git a/src/afl-cc.c b/src/afl-cc.c index e6a6718e..180ab3c4 100644 --- a/src/afl-cc.c +++ b/src/afl-cc.c @@ -120,8 +120,10 @@ char compiler_mode_string[7][12] = { u8 *getthecwd() { - static u8 fail[] = ""; - if (getcwd(cwd, sizeof(cwd)) == NULL) return fail; + if (getcwd(cwd, sizeof(cwd)) == NULL) { + static u8 fail[] = ""; + return fail; + } return cwd; } @@ -654,9 +656,9 @@ static void edit_params(u32 argc, char **argv, char **envp) { } - u32 idx; if (lto_mode && argc > 1) { + u32 idx; for (idx = 1; idx < argc; idx++) { if (!strncasecmp(argv[idx], "-fpic", 5)) have_pic = 1; @@ -1208,12 +1210,12 @@ int main(int argc, char **argv, char **envp) { if (getenv("AFL_LLVM_INSTRUMENT")) { - u8 *ptr = strtok(getenv("AFL_LLVM_INSTRUMENT"), ":,;"); + u8 *ptr2 = strtok(getenv("AFL_LLVM_INSTRUMENT"), ":,;"); - while (ptr) { + while (ptr2) { - if (strncasecmp(ptr, "afl", strlen("afl")) == 0 || - strncasecmp(ptr, "classic", strlen("classic")) == 0) { + if (strncasecmp(ptr2, "afl", strlen("afl")) == 0 || + strncasecmp(ptr2, "classic", strlen("classic")) == 0) { if (instrument_mode == INSTRUMENT_LTO) { @@ -1229,8 +1231,8 @@ int main(int argc, char **argv, char **envp) { } - if (strncasecmp(ptr, "pc-guard", strlen("pc-guard")) == 0 || - strncasecmp(ptr, "pcguard", strlen("pcguard")) == 0) { + if (strncasecmp(ptr2, "pc-guard", strlen("pc-guard")) == 0 || + strncasecmp(ptr2, "pcguard", strlen("pcguard")) == 0) { if (!instrument_mode || instrument_mode == INSTRUMENT_PCGUARD) instrument_mode = INSTRUMENT_PCGUARD; @@ -1241,8 +1243,8 @@ int main(int argc, char **argv, char **envp) { } // this is a hidden option - if (strncasecmp(ptr, "llvmnative", strlen("llvmnative")) == 0 || - strncasecmp(ptr, "llvm-native", strlen("llvm-native")) == 0) { + if (strncasecmp(ptr2, "llvmnative", strlen("llvmnative")) == 0 || + strncasecmp(ptr2, "llvm-native", strlen("llvm-native")) == 0) { if (!instrument_mode || instrument_mode == INSTRUMENT_LLVMNATIVE) instrument_mode = INSTRUMENT_LLVMNATIVE; @@ -1252,8 +1254,8 @@ int main(int argc, char **argv, char **envp) { } - if (strncasecmp(ptr, "cfg", strlen("cfg")) == 0 || - strncasecmp(ptr, "instrim", strlen("instrim")) == 0) { + if (strncasecmp(ptr2, "cfg", strlen("cfg")) == 0 || + strncasecmp(ptr2, "instrim", strlen("instrim")) == 0) { if (instrument_mode == INSTRUMENT_LTO) { @@ -1269,7 +1271,7 @@ int main(int argc, char **argv, char **envp) { } - if (strncasecmp(ptr, "lto", strlen("lto")) == 0) { + if (strncasecmp(ptr2, "lto", strlen("lto")) == 0) { lto_mode = 1; if (!instrument_mode || instrument_mode == INSTRUMENT_LTO) @@ -1280,7 +1282,7 @@ int main(int argc, char **argv, char **envp) { } - if (strcasecmp(ptr, "gcc") == 0) { + if (strcasecmp(ptr2, "gcc") == 0) { if (!instrument_mode || instrument_mode == INSTRUMENT_GCC) instrument_mode = INSTRUMENT_GCC; @@ -1291,7 +1293,7 @@ int main(int argc, char **argv, char **envp) { } - if (strcasecmp(ptr, "clang") == 0) { + if (strcasecmp(ptr2, "clang") == 0) { if (!instrument_mode || instrument_mode == INSTRUMENT_CLANG) instrument_mode = INSTRUMENT_CLANG; @@ -1302,29 +1304,29 @@ int main(int argc, char **argv, char **envp) { } - if (strncasecmp(ptr, "ctx", strlen("ctx")) == 0) { + if (strncasecmp(ptr2, "ctx", strlen("ctx")) == 0) { instrument_opt_mode |= INSTRUMENT_OPT_CTX; setenv("AFL_LLVM_CTX", "1", 1); } - if (strncasecmp(ptr, "ngram", strlen("ngram")) == 0) { + if (strncasecmp(ptr2, "ngram", strlen("ngram")) == 0) { - ptr += strlen("ngram"); - while (*ptr && (*ptr < '0' || *ptr > '9')) - ptr++; + ptr2 += strlen("ngram"); + while (*ptr2 && (*ptr2 < '0' || *ptr2 > '9')) + ptr2++; - if (!*ptr) { + if (!*ptr2) { - if ((ptr = getenv("AFL_LLVM_NGRAM_SIZE")) == NULL) + if ((ptr2 = getenv("AFL_LLVM_NGRAM_SIZE")) == NULL) FATAL( "you must set the NGRAM size with (e.g. for value 2) " "AFL_LLVM_INSTRUMENT=ngram-2"); } - ngram_size = atoi(ptr); + ngram_size = atoi(ptr2); if (ngram_size < 2 || ngram_size > NGRAM_SIZE_MAX) FATAL( "NGRAM instrumentation option must be between 2 and " @@ -1332,12 +1334,12 @@ int main(int argc, char **argv, char **envp) { "(%u)", NGRAM_SIZE_MAX); instrument_opt_mode |= (INSTRUMENT_OPT_NGRAM); - ptr = alloc_printf("%u", ngram_size); - setenv("AFL_LLVM_NGRAM_SIZE", ptr, 1); + ptr2 = alloc_printf("%u", ngram_size); + setenv("AFL_LLVM_NGRAM_SIZE", ptr2, 1); } - ptr = strtok(NULL, ":,;"); + ptr2 = strtok(NULL, ":,;"); } @@ -1448,20 +1450,28 @@ int main(int argc, char **argv, char **envp) { " The best is LTO but it often needs RANLIB and AR settings outside " "of afl-cc.\n\n"); +#if LLVM_MAJOR > 10 || (LLVM_MAJOR == 10 && LLVM_MINOR > 0) +#define NATIVE_MSG \ + " NATIVE: use llvm's native PCGUARD instrumentation (less " \ + "performant)\n" +#else +#define NATIVE_MSG "" +#endif + SAYF( "Sub-Modes: (set via env AFL_LLVM_INSTRUMENT, afl-cc selects the best " "available)\n" " PCGUARD: Dominator tree instrumentation (best!) (README.llvm.md)\n" -#if LLVM_MAJOR > 10 || (LLVM_MAJOR == 10 && LLVM_MINOR > 0) - " NATIVE: use llvm's native PCGUARD instrumentation (less " - "performant)\n" -#endif + + NATIVE_MSG + " CLASSIC: decision target instrumentation (README.llvm.md)\n" " CTX: CLASSIC + callee context (instrumentation/README.ctx.md)\n" " NGRAM-x: CLASSIC + previous path " "((instrumentation/README.ngram.md)\n" " INSTRIM: Dominator tree (for LLVM <= 6.0) " "(instrumentation/README.instrim.md)\n\n"); +#undef NATIVE_MSG SAYF( "Features: (see documentation links)\n" @@ -1625,7 +1635,7 @@ int main(int argc, char **argv, char **envp) { if (!instrument_mode) { instrument_mode = INSTRUMENT_CFG; - ptr = instrument_mode_string[instrument_mode]; + //ptr = instrument_mode_string[instrument_mode]; } diff --git a/src/afl-common.c b/src/afl-common.c index 1928663d..21cb6ab4 100644 --- a/src/afl-common.c +++ b/src/afl-common.c @@ -696,16 +696,16 @@ u8 *stringify_mem_size(u8 *buf, size_t len, u64 val) { u8 *stringify_time_diff(u8 *buf, size_t len, u64 cur_ms, u64 event_ms) { - u64 delta; - s32 t_d, t_h, t_m, t_s; - u8 val_buf[STRINGIFY_VAL_SIZE_MAX]; - if (!event_ms) { snprintf(buf, len, "none seen yet"); } else { + u64 delta; + s32 t_d, t_h, t_m, t_s; + u8 val_buf[STRINGIFY_VAL_SIZE_MAX]; + delta = cur_ms - event_ms; t_d = delta / 1000 / 60 / 60 / 24; @@ -858,16 +858,16 @@ u8 *u_stringify_mem_size(u8 *buf, u64 val) { u8 *u_stringify_time_diff(u8 *buf, u64 cur_ms, u64 event_ms) { - u64 delta; - s32 t_d, t_h, t_m, t_s; - u8 val_buf[STRINGIFY_VAL_SIZE_MAX]; - if (!event_ms) { sprintf(buf, "none seen yet"); } else { + u64 delta; + s32 t_d, t_h, t_m, t_s; + u8 val_buf[STRINGIFY_VAL_SIZE_MAX]; + delta = cur_ms - event_ms; t_d = delta / 1000 / 60 / 60 / 24; @@ -895,8 +895,8 @@ u32 get_map_size(void) { map_size = atoi(ptr); if (map_size < 8 || map_size > (1 << 29)) { - FATAL("illegal AFL_MAP_SIZE %u, must be between %u and %u", map_size, 8, - 1 << 29); + FATAL("illegal AFL_MAP_SIZE %u, must be between %u and %u", map_size, 8U, + 1U << 29); } diff --git a/src/afl-forkserver.c b/src/afl-forkserver.c index 90fa55e9..d6195cb5 100644 --- a/src/afl-forkserver.c +++ b/src/afl-forkserver.c @@ -213,7 +213,7 @@ restart_select: static void afl_fauxsrv_execv(afl_forkserver_t *fsrv, char **argv) { unsigned char tmp[4] = {0, 0, 0, 0}; - pid_t child_pid = -1; + pid_t child_pid; if (!be_quiet) { ACTF("Using Fauxserver:"); } @@ -1104,7 +1104,7 @@ fsrv_run_result_t afl_fsrv_run_target(afl_forkserver_t *fsrv, u32 timeout, "Unable to communicate with fork server. Some possible reasons:\n\n" " - You've run out of memory. Use -m to increase the the memory " "limit\n" - " to something higher than %lld.\n" + " to something higher than %llu.\n" " - The binary or one of the libraries it uses manages to " "create\n" " threads before the forkserver initializes.\n" diff --git a/src/afl-fuzz-bitmap.c b/src/afl-fuzz-bitmap.c index 62a8211c..85a01f98 100644 --- a/src/afl-fuzz-bitmap.c +++ b/src/afl-fuzz-bitmap.c @@ -703,7 +703,7 @@ save_if_interesting(afl_state_t *afl, void *mem, u32 len, u8 fault) { if (!classified) { classify_counts(&afl->fsrv); - classified = 1; +// classified = 1; } diff --git a/src/afl-fuzz-extras.c b/src/afl-fuzz-extras.c index 171cce96..04f0878c 100644 --- a/src/afl-fuzz-extras.c +++ b/src/afl-fuzz-extras.c @@ -266,7 +266,7 @@ static void extras_check_and_sort(afl_state_t *afl, u32 min_len, u32 max_len, if (afl->extras_cnt > afl->max_det_extras) { - WARNF("More than %d tokens - will use them probabilistically.", + WARNF("More than %u tokens - will use them probabilistically.", afl->max_det_extras); } @@ -431,7 +431,6 @@ void dedup_extras(afl_state_t *afl) { /* Adds a new extra / dict entry. */ void add_extra(afl_state_t *afl, u8 *mem, u32 len) { - u8 val_bufs[2][STRINGIFY_VAL_SIZE_MAX]; u32 i, found = 0; for (i = 0; i < afl->extras_cnt; i++) { @@ -451,6 +450,7 @@ void add_extra(afl_state_t *afl, u8 *mem, u32 len) { if (len > MAX_DICT_FILE) { + u8 val_bufs[2][STRINGIFY_VAL_SIZE_MAX]; WARNF("Extra '%.*s' is too big (%s, limit is %s), skipping file!", (int)len, mem, stringify_mem_size(val_bufs[0], sizeof(val_bufs[0]), len), stringify_mem_size(val_bufs[1], sizeof(val_bufs[1]), MAX_DICT_FILE)); @@ -481,7 +481,7 @@ void add_extra(afl_state_t *afl, u8 *mem, u32 len) { if (afl->extras_cnt == afl->max_det_extras + 1) { - WARNF("More than %d tokens - will use them probabilistically.", + WARNF("More than %u tokens - will use them probabilistically.", afl->max_det_extras); } diff --git a/src/afl-fuzz-mutators.c b/src/afl-fuzz-mutators.c index 0c85458e..5da692d3 100644 --- a/src/afl-fuzz-mutators.c +++ b/src/afl-fuzz-mutators.c @@ -316,16 +316,20 @@ u8 trim_case_custom(afl_state_t *afl, struct queue_entry *q, u8 *in_buf, /* Initialize trimming in the custom mutator */ afl->stage_cur = 0; - afl->stage_max = mutator->afl_custom_init_trim(mutator->data, in_buf, q->len); - if (unlikely(afl->stage_max) < 0) { + s32 retval = mutator->afl_custom_init_trim(mutator->data, in_buf, q->len); + if (unlikely(retval) < 0) { - FATAL("custom_init_trim error ret: %d", afl->stage_max); + FATAL("custom_init_trim error ret: %d", retval); + } else { + + afl->stage_max = retval; + } if (afl->not_on_tty && afl->debug) { - SAYF("[Custom Trimming] START: Max %d iterations, %u bytes", afl->stage_max, + SAYF("[Custom Trimming] START: Max %u iterations, %u bytes", afl->stage_max, q->len); } @@ -343,7 +347,7 @@ u8 trim_case_custom(afl_state_t *afl, struct queue_entry *q, u8 *in_buf, if (unlikely(!retbuf)) { - FATAL("custom_trim failed (ret %zd)", retlen); + FATAL("custom_trim failed (ret %zu)", retlen); } else if (unlikely(retlen > orig_len)) { @@ -409,7 +413,7 @@ u8 trim_case_custom(afl_state_t *afl, struct queue_entry *q, u8 *in_buf, if (afl->not_on_tty && afl->debug) { - SAYF("[Custom Trimming] SUCCESS: %d/%d iterations (now at %u bytes)", + SAYF("[Custom Trimming] SUCCESS: %u/%u iterations (now at %u bytes)", afl->stage_cur, afl->stage_max, q->len); } @@ -417,16 +421,20 @@ u8 trim_case_custom(afl_state_t *afl, struct queue_entry *q, u8 *in_buf, } else { /* Tell the custom mutator that the trimming was unsuccessful */ - afl->stage_cur = mutator->afl_custom_post_trim(mutator->data, 0); - if (unlikely(afl->stage_cur < 0)) { + s32 retval2 = mutator->afl_custom_post_trim(mutator->data, 0); + if (unlikely(retval2 < 0)) { + + FATAL("Error ret in custom_post_trim: %d", retval2); + + } else { - FATAL("Error ret in custom_post_trim: %d", afl->stage_cur); + afl->stage_cur = retval2; } if (afl->not_on_tty && afl->debug) { - SAYF("[Custom Trimming] FAILURE: %d/%d iterations", afl->stage_cur, + SAYF("[Custom Trimming] FAILURE: %u/%u iterations", afl->stage_cur, afl->stage_max); } diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index e6fa6064..f9509e86 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -368,7 +368,7 @@ static void locate_diffs(u8 *ptr1, u8 *ptr2, u32 len, s32 *first, s32 *last) { u8 fuzz_one_original(afl_state_t *afl) { - s32 len, temp_len; + u32 len, temp_len; u32 j; u32 i; u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0; @@ -545,7 +545,7 @@ u8 fuzz_one_original(afl_state_t *afl) { else orig_perf = perf_score = calculate_score(afl, afl->queue_cur); - if (unlikely(perf_score <= 0)) { goto abandon_entry; } + if (unlikely(perf_score == 0)) { goto abandon_entry; } if (unlikely(afl->shm.cmplog_mode && !afl->queue_cur->fully_colorized)) { @@ -902,7 +902,7 @@ u8 fuzz_one_original(afl_state_t *afl) { orig_hit_cnt = new_hit_cnt; - for (i = 0; (s32)i < len - 1; ++i) { + for (i = 0; i < len - 1; ++i) { /* Let's consult the effector map... */ @@ -945,7 +945,7 @@ u8 fuzz_one_original(afl_state_t *afl) { orig_hit_cnt = new_hit_cnt; - for (i = 0; (s32)i < len - 3; ++i) { + for (i = 0; i < len - 3; ++i) { /* Let's consult the effector map... */ if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && @@ -1405,7 +1405,7 @@ skip_arith: orig_hit_cnt = new_hit_cnt; - for (i = 0; (s32)i < len - 1; ++i) { + for (i = 0; i < len - 1; ++i) { u16 orig = *(u16 *)(out_buf + i); @@ -1493,7 +1493,7 @@ skip_arith: orig_hit_cnt = new_hit_cnt; - for (i = 0; (s32)i < len - 3; i++) { + for (i = 0; i < len - 3; i++) { u32 orig = *(u32 *)(out_buf + i); @@ -1850,7 +1850,7 @@ custom_mutator_stage: if (unlikely(!mutated_buf)) { - FATAL("Error in custom_fuzz. Size returned: %zd", mutated_size); + FATAL("Error in custom_fuzz. Size returned: %zu", mutated_size); } @@ -2026,7 +2026,7 @@ havoc_stage: el->data, out_buf, temp_len, &custom_havoc_buf, MAX_FILE); if (unlikely(!custom_havoc_buf)) { - FATAL("Error in custom_havoc (return %zd)", new_len); + FATAL("Error in custom_havoc (return %zu)", new_len); } @@ -2458,7 +2458,7 @@ havoc_stage: u32 use_extra = rand_below(afl, afl->a_extras_cnt); u32 extra_len = afl->a_extras[use_extra].len; - if ((s32)extra_len > temp_len) { break; } + if (extra_len > temp_len) { break; } u32 insert_at = rand_below(afl, temp_len - extra_len + 1); #ifdef INTROSPECTION @@ -2476,7 +2476,7 @@ havoc_stage: u32 use_extra = rand_below(afl, afl->extras_cnt); u32 extra_len = afl->extras[use_extra].len; - if ((s32)extra_len > temp_len) { break; } + if (extra_len > temp_len) { break; } u32 insert_at = rand_below(afl, temp_len - extra_len + 1); #ifdef INTROSPECTION @@ -2577,7 +2577,7 @@ havoc_stage: u32 copy_from, copy_to, copy_len; copy_len = choose_block_len(afl, new_len - 1); - if ((s32)copy_len > temp_len) copy_len = temp_len; + if (copy_len > temp_len) copy_len = temp_len; copy_from = rand_below(afl, new_len - copy_len + 1); copy_to = rand_below(afl, temp_len - copy_len + 1); @@ -2952,7 +2952,7 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { else orig_perf = perf_score = calculate_score(afl, afl->queue_cur); - if (unlikely(perf_score <= 0)) { goto abandon_entry; } + if (unlikely(perf_score == 0)) { goto abandon_entry; } if (unlikely(afl->shm.cmplog_mode && !afl->queue_cur->fully_colorized)) { diff --git a/src/afl-fuzz-queue.c b/src/afl-fuzz-queue.c index 9a0d199e..cd41bafc 100644 --- a/src/afl-fuzz-queue.c +++ b/src/afl-fuzz-queue.c @@ -489,11 +489,12 @@ void add_to_queue(afl_state_t *afl, u8 *fname, u32 len, u8 passed_det) { void destroy_queue(afl_state_t *afl) { - struct queue_entry *q; u32 i; for (i = 0; i < afl->queued_paths; i++) { + struct queue_entry *q; + q = afl->queue_buf[i]; ck_free(q->fname); ck_free(q->trace_mini); @@ -996,7 +997,7 @@ inline void queue_testcase_retake(afl_state_t *afl, struct queue_entry *q, if (unlikely(!q->testcase_buf)) { - PFATAL("Unable to malloc '%s' with len %d", q->fname, len); + PFATAL("Unable to malloc '%s' with len %u", q->fname, len); } diff --git a/src/afl-fuzz-redqueen.c b/src/afl-fuzz-redqueen.c index c5db8fa1..7dba1caa 100644 --- a/src/afl-fuzz-redqueen.c +++ b/src/afl-fuzz-redqueen.c @@ -445,6 +445,9 @@ static void try_to_add_to_dict(afl_state_t *afl, u64 v, u8 shape) { u32 k; u8 cons_ff = 0, cons_0 = 0; + + if (shape > sizeof(v)) FATAL("shape is greater than %zu, please report!", sizeof(v)); + for (k = 0; k < shape; ++k) { if (b[k] == 0) { @@ -453,7 +456,7 @@ static void try_to_add_to_dict(afl_state_t *afl, u64 v, u8 shape) { } else if (b[k] == 0xff) { - ++cons_0; + ++cons_ff; } else { @@ -667,12 +670,12 @@ static u8 rtn_fuzz(afl_state_t *afl, u32 key, u8 *orig_buf, u8 *buf, u32 len) { u8 status = 0; // opt not in the paper - u32 fails = 0; +// u32 fails = 0; u8 found_one = 0; for (i = 0; i < loggeds; ++i) { - fails = 0; + u32 fails = 0; struct cmpfn_operands *o = &((struct cmpfn_operands *)afl->shm.cmp_map->log[key])[i]; diff --git a/src/afl-fuzz-run.c b/src/afl-fuzz-run.c index 339fb9c3..11d8204b 100644 --- a/src/afl-fuzz-run.c +++ b/src/afl-fuzz-run.c @@ -682,7 +682,7 @@ void sync_fuzzers(afl_state_t *afl) { // same time. If so, the first temporary main node running again will demote // themselves so this is not an issue - u8 path[PATH_MAX]; +// u8 path2[PATH_MAX]; afl->is_main_node = 1; sprintf(path, "%s/is_main_node", afl->out_dir); int fd = open(path, O_CREAT | O_RDWR, 0644); diff --git a/src/afl-fuzz-stats.c b/src/afl-fuzz-stats.c index 1c211da6..c8366174 100644 --- a/src/afl-fuzz-stats.c +++ b/src/afl-fuzz-stats.c @@ -31,7 +31,6 @@ void write_setup_file(afl_state_t *afl, u32 argc, char **argv) { - char *val; u8 fn[PATH_MAX]; snprintf(fn, PATH_MAX, "%s/fuzzer_setup", afl->out_dir); FILE *f = create_ffile(fn); @@ -44,6 +43,7 @@ void write_setup_file(afl_state_t *afl, u32 argc, char **argv) { for (i = 0; i < s_afl_env; ++i) { + char *val; if ((val = getenv(afl_environment_variables[i])) != NULL) { fprintf(f, "%s=%s\n", afl_environment_variables[i], val); @@ -228,7 +228,7 @@ void write_stats_file(afl_state_t *afl, double bitmap_cvg, double stability, if (afl->virgin_bits[i] != 0xff) { - fprintf(f, " %d[%02x]", i, afl->virgin_bits[i]); + fprintf(f, " %u[%02x]", i, afl->virgin_bits[i]); } @@ -238,7 +238,7 @@ void write_stats_file(afl_state_t *afl, double bitmap_cvg, double stability, fprintf(f, "var_bytes :"); for (i = 0; i < afl->fsrv.map_size; i++) { - if (afl->var_bytes[i]) { fprintf(f, " %d", i); } + if (afl->var_bytes[i]) { fprintf(f, " %u", i); } } @@ -1163,7 +1163,7 @@ void show_init_stats(afl_state_t *afl) { } else { - ACTF("-t option specified. We'll use an exec timeout of %d ms.", + ACTF("-t option specified. We'll use an exec timeout of %u ms.", afl->fsrv.exec_tmout); } diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c index 897c2f1e..e239b47f 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -99,8 +99,8 @@ static void usage(u8 *argv0, int more_help) { " lin, quad> -- see docs/power_schedules.md\n" " -f file - location read by the fuzzed program (default: stdin " "or @@)\n" - " -t msec - timeout for each run (auto-scaled, 50-%d ms)\n" - " -m megs - memory limit for child process (%d MB, 0 = no limit)\n" + " -t msec - timeout for each run (auto-scaled, 50-%u ms)\n" + " -m megs - memory limit for child process (%u MB, 0 = no limit)\n" " -Q - use binary-only instrumentation (QEMU mode)\n" " -U - use unicorn-based instrumentation (Unicorn mode)\n" " -W - use qemu-based instrumentation with Wine (Wine " @@ -299,7 +299,7 @@ int main(int argc, char **argv_orig, char **envp) { s32 opt, i, auto_sync = 0 /*, user_set_cache = 0*/; u64 prev_queued = 0; - u32 sync_interval_cnt = 0, seek_to = 0, show_help = 0, map_size = MAP_SIZE; + u32 sync_interval_cnt = 0, seek_to = 0, show_help = 0, map_size = get_map_size(); u8 *extras_dir[4]; u8 mem_limit_given = 0, exit_1 = 0, debug = 0, extras_dir_cnt = 0 /*, have_p = 0*/; @@ -326,7 +326,7 @@ int main(int argc, char **argv_orig, char **envp) { if (get_afl_env("AFL_DEBUG")) { debug = afl->debug = 1; } - map_size = get_map_size(); +// map_size = get_map_size(); afl_state_init(afl, map_size); afl->debug = debug; afl_fsrv_init(&afl->fsrv); diff --git a/src/afl-ld-lto.c b/src/afl-ld-lto.c index fccdb1a5..1d54fda0 100644 --- a/src/afl-ld-lto.c +++ b/src/afl-ld-lto.c @@ -187,7 +187,7 @@ static void edit_params(int argc, char **argv) { if (debug) DEBUGF( - "passthrough=%s instrim=%d, gold_pos=%d, gold_present=%s " + "passthrough=%s instrim=%u, gold_pos=%u, gold_present=%s " "inst_present=%s rt_present=%s rt_lto_present=%s\n", passthrough ? "true" : "false", instrim, gold_pos, gold_present ? "true" : "false", inst_present ? "true" : "false", @@ -253,10 +253,10 @@ static void edit_params(int argc, char **argv) { int main(int argc, char **argv) { s32 pid, i, status; - u8 * ptr; +// u8 * ptr; char thecwd[PATH_MAX]; - if ((ptr = getenv("AFL_LD_CALLER")) != NULL) { + if (getenv("AFL_LD_CALLER") != NULL) { FATAL("ld loop detected! Set AFL_REAL_LD!\n"); diff --git a/src/afl-showmap.c b/src/afl-showmap.c index 355b2dc3..c0223a07 100644 --- a/src/afl-showmap.c +++ b/src/afl-showmap.c @@ -662,7 +662,7 @@ static void usage(u8 *argv0) { "Execution control settings:\n" " -t msec - timeout for each run (none)\n" - " -m megs - memory limit for child process (%d MB)\n" + " -m megs - memory limit for child process (%u MB)\n" " -Q - use binary-only instrumentation (QEMU mode)\n" " -U - use Unicorn-based instrumentation (Unicorn mode)\n" " -W - use qemu-based instrumentation with Wine (Wine mode)\n" @@ -1014,7 +1014,7 @@ int main(int argc, char **argv_orig, char **envp) { DIR * dir_in, *dir_out = NULL; struct dirent *dir_ent; - int done = 0; +// int done = 0; u8 infile[PATH_MAX], outfile[PATH_MAX]; u8 wait_for_gdb = 0; #if !defined(DT_REG) @@ -1090,11 +1090,11 @@ int main(int argc, char **argv_orig, char **envp) { if (get_afl_env("AFL_DEBUG")) { - int i = optind; + int j = optind; DEBUGF("%s:", fsrv->target_path); - while (argv[i] != NULL) { + while (argv[j] != NULL) { - SAYF(" \"%s\"", argv[i++]); + SAYF(" \"%s\"", argv[j++]); } @@ -1143,7 +1143,7 @@ int main(int argc, char **argv_orig, char **envp) { if (fsrv->support_shmem_fuzz && !fsrv->use_shmem_fuzz) shm_fuzz = deinit_shmem(fsrv, shm_fuzz); - while (done == 0 && (dir_ent = readdir(dir_in))) { + while ((dir_ent = readdir(dir_in))) { if (dir_ent->d_name[0] == '.') { diff --git a/src/afl-tmin.c b/src/afl-tmin.c index ed928c7c..09d97f58 100644 --- a/src/afl-tmin.c +++ b/src/afl-tmin.c @@ -835,8 +835,8 @@ static void usage(u8 *argv0) { "Execution control settings:\n" " -f file - input file read by the tested program (stdin)\n" - " -t msec - timeout for each run (%d ms)\n" - " -m megs - memory limit for child process (%d MB)\n" + " -t msec - timeout for each run (%u ms)\n" + " -m megs - memory limit for child process (%u MB)\n" " -Q - use binary-only instrumentation (QEMU mode)\n" " -U - use unicorn-based instrumentation (Unicorn mode)\n" " -W - use qemu-based instrumentation with Wine (Wine " -- cgit 1.4.1 From a0e884cf8bffe1a0394d106375f6a23edd2b60e6 Mon Sep 17 00:00:00 2001 From: van Hauser Date: Fri, 15 Jan 2021 16:56:40 +0100 Subject: merge cmplog --- include/afl-fuzz.h | 21 +- include/cmplog.h | 17 +- include/debug.h | 24 +- include/types.h | 43 +- instrumentation/afl-compiler-rt.o.c | 89 +- instrumentation/cmplog-instructions-pass.cc | 586 +++++++++-- src/afl-cc.c | 49 +- src/afl-fuzz-init.c | 38 + src/afl-fuzz-one.c | 81 +- src/afl-fuzz-queue.c | 1 + src/afl-fuzz-redqueen.c | 1437 +++++++++++++++++++++++---- src/afl-fuzz-state.c | 1 + src/afl-fuzz.c | 60 +- 13 files changed, 2055 insertions(+), 392 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h index 988a907d..8a2122dc 100644 --- a/include/afl-fuzz.h +++ b/include/afl-fuzz.h @@ -145,12 +145,22 @@ extern s16 interesting_16[INTERESTING_8_LEN + INTERESTING_16_LEN]; extern s32 interesting_32[INTERESTING_8_LEN + INTERESTING_16_LEN + INTERESTING_32_LEN]; +struct tainted { + + u32 pos; + u32 len; + struct tainted *next; + struct tainted *prev; + +}; + struct queue_entry { u8 *fname; /* File name for the test case */ u32 len; /* Input length */ - u8 cal_failed; /* Calibration failed? */ + u8 colorized, /* Do not run redqueen stage again */ + cal_failed; /* Calibration failed? */ bool trim_done, /* Trimmed? */ was_fuzzed, /* historical, but needed for MOpt */ passed_det, /* Deterministic stages passed? */ @@ -158,7 +168,6 @@ struct queue_entry { var_behavior, /* Variable behavior? */ favored, /* Currently favored? */ fs_redundant, /* Marked as redundant in the fs? */ - fully_colorized, /* Do not run redqueen stage again */ is_ascii, /* Is the input just ascii text? */ disabled; /* Is disabled from fuzz selection */ @@ -183,7 +192,11 @@ struct queue_entry { u8 *testcase_buf; /* The testcase buffer, if loaded. */ - struct queue_entry *next; /* Next element, if any */ + u8 * cmplog_colorinput; /* the result buf of colorization */ + struct tainted *taint; /* Taint information from CmpLog */ + + struct queue_entry *mother, /* queue entry this based on */ + *next; /* Next element, if any */ }; @@ -636,6 +649,8 @@ typedef struct afl_state { /* cmplog forkserver ids */ s32 cmplog_fsrv_ctl_fd, cmplog_fsrv_st_fd; u32 cmplog_prev_timed_out; + u32 cmplog_max_filesize; + u32 cmplog_lvl; struct afl_pass_stat *pass_stats; struct cmp_map * orig_cmp_map; diff --git a/include/cmplog.h b/include/cmplog.h index bf557785..6392c503 100644 --- a/include/cmplog.h +++ b/include/cmplog.h @@ -30,8 +30,10 @@ #include "config.h" +#define CMPLOG_LVL_MAX 3 + #define CMP_MAP_W 65536 -#define CMP_MAP_H 256 +#define CMP_MAP_H 32 #define CMP_MAP_RTN_H (CMP_MAP_H / 4) #define SHAPE_BYTES(x) (x + 1) @@ -41,13 +43,12 @@ struct cmp_header { - unsigned hits : 20; - - unsigned cnt : 20; - unsigned id : 16; - - unsigned shape : 5; // from 0 to 31 + unsigned hits : 24; + unsigned id : 24; + unsigned shape : 5; unsigned type : 1; + unsigned attribute : 4; + unsigned reserved : 6; } __attribute__((packed)); @@ -55,6 +56,8 @@ struct cmp_operands { u64 v0; u64 v1; + u64 v0_128; + u64 v1_128; }; diff --git a/include/debug.h b/include/debug.h index ef5b195b..fc1f39cb 100644 --- a/include/debug.h +++ b/include/debug.h @@ -295,8 +295,8 @@ static inline const char *colorfilter(const char *x) { \ SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD \ "\n[-] PROGRAM ABORT : " cRST x); \ - SAYF(cLRD "\n Location : " cRST "%s(), %s:%d\n\n", __func__, \ - __FILE__, __LINE__); \ + SAYF(cLRD "\n Location : " cRST "%s(), %s:%u\n\n", __func__, \ + __FILE__, (u32)__LINE__); \ exit(1); \ \ } while (0) @@ -308,8 +308,8 @@ static inline const char *colorfilter(const char *x) { \ SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD \ "\n[-] PROGRAM ABORT : " cRST x); \ - SAYF(cLRD "\n Stop location : " cRST "%s(), %s:%d\n\n", __func__, \ - __FILE__, __LINE__); \ + SAYF(cLRD "\n Stop location : " cRST "%s(), %s:%u\n\n", __func__, \ + __FILE__, (u32)__LINE__); \ abort(); \ \ } while (0) @@ -322,8 +322,8 @@ static inline const char *colorfilter(const char *x) { fflush(stdout); \ SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD \ "\n[-] SYSTEM ERROR : " cRST x); \ - SAYF(cLRD "\n Stop location : " cRST "%s(), %s:%d\n", __func__, \ - __FILE__, __LINE__); \ + SAYF(cLRD "\n Stop location : " cRST "%s(), %s:%u\n", __func__, \ + __FILE__, (u32)__LINE__); \ SAYF(cLRD " OS message : " cRST "%s\n", strerror(errno)); \ exit(1); \ \ @@ -344,12 +344,12 @@ static inline const char *colorfilter(const char *x) { /* Show a prefixed debug output. */ -#define DEBUGF(x...) \ - do { \ - \ - SAYF(cMGN "[D] " cBRI "DEBUG: " cRST x); \ - SAYF(cRST ""); \ - \ +#define DEBUGF(x...) \ + do { \ + \ + fprintf(stderr, cMGN "[D] " cBRI "DEBUG: " cRST x); \ + fprintf(stderr, cRST ""); \ + \ } while (0) /* Error-checking versions of read() and write() that call RPFATAL() as diff --git a/include/types.h b/include/types.h index 3e3bc953..d5c31597 100644 --- a/include/types.h +++ b/include/types.h @@ -26,9 +26,11 @@ #include #include -typedef uint8_t u8; -typedef uint16_t u16; -typedef uint32_t u32; +typedef uint8_t u8; +typedef uint16_t u16; +typedef uint32_t u32; +typedef unsigned __int128 uint128_t; +typedef uint128_t u128; /* Extended forkserver option values */ @@ -57,10 +59,12 @@ typedef uint32_t u32; typedef unsigned long long u64; -typedef int8_t s8; -typedef int16_t s16; -typedef int32_t s32; -typedef int64_t s64; +typedef int8_t s8; +typedef int16_t s16; +typedef int32_t s32; +typedef int64_t s64; +typedef __int128 int128_t; +typedef int128_t s128; #ifndef MIN #define MIN(a, b) \ @@ -114,6 +118,31 @@ typedef int64_t s64; \ }) +// It is impossible to define 128 bit constants, so ... +#define SWAPN(_x, _l) \ + ({ \ + \ + u128 _res = (_x), _ret; \ + char *d = (char *)&_ret, *s = (char *)&_res; \ + int i; \ + for (i = 0; i < 16; i++) \ + d[15 - i] = s[i]; \ + u32 sr = 128U - ((_l) << 3U); \ + (_ret >>= sr); \ + (u128) _ret; \ + \ + }) + +#define SWAPNN(_x, _y, _l) \ + ({ \ + \ + char *d = (char *)(_x), *s = (char *)(_y); \ + u32 i, l = (_l)-1; \ + for (i = 0; i <= l; i++) \ + d[l - i] = s[i]; \ + \ + }) + #ifdef AFL_LLVM_PASS #if defined(__linux__) || !defined(__ANDROID__) #define AFL_SR(s) (srandom(s)) diff --git a/instrumentation/afl-compiler-rt.o.c b/instrumentation/afl-compiler-rt.o.c index b735d8df..5d75af78 100644 --- a/instrumentation/afl-compiler-rt.o.c +++ b/instrumentation/afl-compiler-rt.o.c @@ -161,7 +161,7 @@ void send_forkserver_error(int error) { u32 status; if (!error || error > 0xffff) return; status = (FS_OPT_ERROR | FS_OPT_SET_ERROR(error)); - if (write(FORKSRV_FD + 1, (char *)&status, 4) != 4) return; + if (write(FORKSRV_FD + 1, (char *)&status, 4) != 4) { return; } } @@ -544,11 +544,11 @@ static void __afl_start_snapshots(void) { if (__afl_dictionary_len && __afl_dictionary) status |= FS_OPT_AUTODICT; memcpy(tmp, &status, 4); - if (write(FORKSRV_FD + 1, tmp, 4) != 4) return; + if (write(FORKSRV_FD + 1, tmp, 4) != 4) { return; } if (__afl_sharedmem_fuzzing || (__afl_dictionary_len && __afl_dictionary)) { - if (read(FORKSRV_FD, &was_killed, 4) != 4) _exit(1); + if (read(FORKSRV_FD, &was_killed, 4) != 4) { _exit(1); } if (getenv("AFL_DEBUG")) { @@ -1207,7 +1207,9 @@ void __sanitizer_cov_trace_pc_guard_init(uint32_t *start, uint32_t *stop) { ///// CmpLog instrumentation -void __cmplog_ins_hook1(uint8_t arg1, uint8_t arg2) { +void __cmplog_ins_hook1(uint8_t arg1, uint8_t arg2, uint8_t attr) { + + // fprintf(stderr, "hook1 arg0=%02x arg1=%02x attr=%u\n", arg1, arg2, attr); if (unlikely(!__afl_cmp_map)) return; @@ -1216,6 +1218,7 @@ void __cmplog_ins_hook1(uint8_t arg1, uint8_t arg2) { k &= CMP_MAP_W - 1; __afl_cmp_map->headers[k].type = CMP_TYPE_INS; + __afl_cmp_map->headers[k].attribute = attr; u32 hits = __afl_cmp_map->headers[k].hits; __afl_cmp_map->headers[k].hits = hits + 1; @@ -1230,7 +1233,7 @@ void __cmplog_ins_hook1(uint8_t arg1, uint8_t arg2) { } -void __cmplog_ins_hook2(uint16_t arg1, uint16_t arg2) { +void __cmplog_ins_hook2(uint16_t arg1, uint16_t arg2, uint8_t attr) { if (unlikely(!__afl_cmp_map)) return; @@ -1239,6 +1242,7 @@ void __cmplog_ins_hook2(uint16_t arg1, uint16_t arg2) { k &= CMP_MAP_W - 1; __afl_cmp_map->headers[k].type = CMP_TYPE_INS; + __afl_cmp_map->headers[k].attribute = attr; u32 hits = __afl_cmp_map->headers[k].hits; __afl_cmp_map->headers[k].hits = hits + 1; @@ -1251,7 +1255,9 @@ void __cmplog_ins_hook2(uint16_t arg1, uint16_t arg2) { } -void __cmplog_ins_hook4(uint32_t arg1, uint32_t arg2) { +void __cmplog_ins_hook4(uint32_t arg1, uint32_t arg2, uint8_t attr) { + + // fprintf(stderr, "hook4 arg0=%x arg1=%x attr=%u\n", arg1, arg2, attr); if (unlikely(!__afl_cmp_map)) return; @@ -1260,6 +1266,7 @@ void __cmplog_ins_hook4(uint32_t arg1, uint32_t arg2) { k &= CMP_MAP_W - 1; __afl_cmp_map->headers[k].type = CMP_TYPE_INS; + __afl_cmp_map->headers[k].attribute = attr; u32 hits = __afl_cmp_map->headers[k].hits; __afl_cmp_map->headers[k].hits = hits + 1; @@ -1272,7 +1279,9 @@ void __cmplog_ins_hook4(uint32_t arg1, uint32_t arg2) { } -void __cmplog_ins_hook8(uint64_t arg1, uint64_t arg2) { +void __cmplog_ins_hook8(uint64_t arg1, uint64_t arg2, uint8_t attr) { + + // fprintf(stderr, "hook8 arg0=%lx arg1=%lx attr=%u\n", arg1, arg2, attr); if (unlikely(!__afl_cmp_map)) return; @@ -1281,6 +1290,7 @@ void __cmplog_ins_hook8(uint64_t arg1, uint64_t arg2) { k &= CMP_MAP_W - 1; __afl_cmp_map->headers[k].type = CMP_TYPE_INS; + __afl_cmp_map->headers[k].attribute = attr; u32 hits = __afl_cmp_map->headers[k].hits; __afl_cmp_map->headers[k].hits = hits + 1; @@ -1293,16 +1303,77 @@ void __cmplog_ins_hook8(uint64_t arg1, uint64_t arg2) { } +// support for u24 to u120 via llvm _ExitInt(). size is in bytes minus 1 +void __cmplog_ins_hookN(uint128_t arg1, uint128_t arg2, uint8_t attr, + uint8_t size) { + + // fprintf(stderr, "hookN arg0=%llx:%llx arg1=%llx:%llx bytes=%u attr=%u\n", + // (u64)(arg1 >> 64), (u64)arg1, (u64)(arg2 >> 64), (u64)arg2, size + 1, + // attr); + + if (unlikely(!__afl_cmp_map)) return; + + uintptr_t k = (uintptr_t)__builtin_return_address(0); + k = (k >> 4) ^ (k << 8); + k &= CMP_MAP_W - 1; + + __afl_cmp_map->headers[k].type = CMP_TYPE_INS; + __afl_cmp_map->headers[k].attribute = attr; + + u32 hits = __afl_cmp_map->headers[k].hits; + __afl_cmp_map->headers[k].hits = hits + 1; + + __afl_cmp_map->headers[k].shape = size; + + hits &= CMP_MAP_H - 1; + __afl_cmp_map->log[k][hits].v0 = (u64)arg1; + __afl_cmp_map->log[k][hits].v1 = (u64)arg2; + + if (size > 7) { + + __afl_cmp_map->log[k][hits].v0_128 = (u64)(arg1 >> 64); + __afl_cmp_map->log[k][hits].v1_128 = (u64)(arg2 >> 64); + + } + +} + +void __cmplog_ins_hook16(uint128_t arg1, uint128_t arg2, uint8_t attr) { + + if (unlikely(!__afl_cmp_map)) return; + + uintptr_t k = (uintptr_t)__builtin_return_address(0); + k = (k >> 4) ^ (k << 8); + k &= CMP_MAP_W - 1; + + __afl_cmp_map->headers[k].type = CMP_TYPE_INS; + __afl_cmp_map->headers[k].attribute = attr; + + u32 hits = __afl_cmp_map->headers[k].hits; + __afl_cmp_map->headers[k].hits = hits + 1; + + __afl_cmp_map->headers[k].shape = 15; + + hits &= CMP_MAP_H - 1; + __afl_cmp_map->log[k][hits].v0 = (u64)arg1; + __afl_cmp_map->log[k][hits].v1 = (u64)arg2; + __afl_cmp_map->log[k][hits].v0_128 = (u64)(arg1 >> 64); + __afl_cmp_map->log[k][hits].v1_128 = (u64)(arg2 >> 64); + +} + #if defined(__APPLE__) #pragma weak __sanitizer_cov_trace_const_cmp1 = __cmplog_ins_hook1 #pragma weak __sanitizer_cov_trace_const_cmp2 = __cmplog_ins_hook2 #pragma weak __sanitizer_cov_trace_const_cmp4 = __cmplog_ins_hook4 #pragma weak __sanitizer_cov_trace_const_cmp8 = __cmplog_ins_hook8 + #pragma weak __sanitizer_cov_trace_const_cmp16 = __cmplog_ins_hook16 #pragma weak __sanitizer_cov_trace_cmp1 = __cmplog_ins_hook1 #pragma weak __sanitizer_cov_trace_cmp2 = __cmplog_ins_hook2 #pragma weak __sanitizer_cov_trace_cmp4 = __cmplog_ins_hook4 #pragma weak __sanitizer_cov_trace_cmp8 = __cmplog_ins_hook8 + #pragma weak __sanitizer_cov_trace_cmp16 = __cmplog_ins_hook16 #else void __sanitizer_cov_trace_const_cmp1(uint8_t arg1, uint8_t arg2) __attribute__((alias("__cmplog_ins_hook1"))); @@ -1312,6 +1383,8 @@ void __sanitizer_cov_trace_const_cmp4(uint32_t arg1, uint32_t arg2) __attribute__((alias("__cmplog_ins_hook4"))); void __sanitizer_cov_trace_const_cmp8(uint64_t arg1, uint64_t arg2) __attribute__((alias("__cmplog_ins_hook8"))); +void __sanitizer_cov_trace_const_cmp16(uint128_t arg1, uint128_t arg2) + __attribute__((alias("__cmplog_ins_hook16"))); void __sanitizer_cov_trace_cmp1(uint8_t arg1, uint8_t arg2) __attribute__((alias("__cmplog_ins_hook1"))); @@ -1321,6 +1394,8 @@ void __sanitizer_cov_trace_cmp4(uint32_t arg1, uint32_t arg2) __attribute__((alias("__cmplog_ins_hook4"))); void __sanitizer_cov_trace_cmp8(uint64_t arg1, uint64_t arg2) __attribute__((alias("__cmplog_ins_hook8"))); +void __sanitizer_cov_trace_cmp16(uint128_t arg1, uint128_t arg2) + __attribute__((alias("__cmplog_ins_hook16"))); #endif /* defined(__APPLE__) */ void __sanitizer_cov_trace_switch(uint64_t val, uint64_t *cases) { diff --git a/instrumentation/cmplog-instructions-pass.cc b/instrumentation/cmplog-instructions-pass.cc index 3499ccf0..a74fb6c8 100644 --- a/instrumentation/cmplog-instructions-pass.cc +++ b/instrumentation/cmplog-instructions-pass.cc @@ -85,9 +85,25 @@ class CmpLogInstructions : public ModulePass { char CmpLogInstructions::ID = 0; +template +Iterator Unique(Iterator first, Iterator last) { + + while (first != last) { + + Iterator next(first); + last = std::remove(++next, last, *first); + first = next; + + } + + return last; + +} + bool CmpLogInstructions::hookInstrs(Module &M) { std::vector icomps; + std::vector switches; LLVMContext & C = M.getContext(); Type * VoidTy = Type::getVoidTy(C); @@ -95,13 +111,15 @@ bool CmpLogInstructions::hookInstrs(Module &M) { IntegerType *Int16Ty = IntegerType::getInt16Ty(C); IntegerType *Int32Ty = IntegerType::getInt32Ty(C); IntegerType *Int64Ty = IntegerType::getInt64Ty(C); + IntegerType *Int128Ty = IntegerType::getInt128Ty(C); #if LLVM_VERSION_MAJOR < 9 Constant * #else FunctionCallee #endif - c1 = M.getOrInsertFunction("__cmplog_ins_hook1", VoidTy, Int8Ty, Int8Ty + c1 = M.getOrInsertFunction("__cmplog_ins_hook1", VoidTy, Int8Ty, Int8Ty, + Int8Ty #if LLVM_VERSION_MAJOR < 5 , NULL @@ -118,7 +136,8 @@ bool CmpLogInstructions::hookInstrs(Module &M) { #else FunctionCallee #endif - c2 = M.getOrInsertFunction("__cmplog_ins_hook2", VoidTy, Int16Ty, Int16Ty + c2 = M.getOrInsertFunction("__cmplog_ins_hook2", VoidTy, Int16Ty, Int16Ty, + Int8Ty #if LLVM_VERSION_MAJOR < 5 , NULL @@ -135,7 +154,8 @@ bool CmpLogInstructions::hookInstrs(Module &M) { #else FunctionCallee #endif - c4 = M.getOrInsertFunction("__cmplog_ins_hook4", VoidTy, Int32Ty, Int32Ty + c4 = M.getOrInsertFunction("__cmplog_ins_hook4", VoidTy, Int32Ty, Int32Ty, + Int8Ty #if LLVM_VERSION_MAJOR < 5 , NULL @@ -152,7 +172,8 @@ bool CmpLogInstructions::hookInstrs(Module &M) { #else FunctionCallee #endif - c8 = M.getOrInsertFunction("__cmplog_ins_hook8", VoidTy, Int64Ty, Int64Ty + c8 = M.getOrInsertFunction("__cmplog_ins_hook8", VoidTy, Int64Ty, Int64Ty, + Int8Ty #if LLVM_VERSION_MAJOR < 5 , NULL @@ -164,6 +185,42 @@ bool CmpLogInstructions::hookInstrs(Module &M) { FunctionCallee cmplogHookIns8 = c8; #endif +#if LLVM_VERSION_MAJOR < 9 + Constant * +#else + FunctionCallee +#endif + c16 = M.getOrInsertFunction("__cmplog_ins_hook16", VoidTy, Int128Ty, + Int128Ty, Int8Ty +#if LLVM_VERSION_MAJOR < 5 + , + NULL +#endif + ); +#if LLVM_VERSION_MAJOR < 9 + Function *cmplogHookIns16 = cast(c16); +#else + FunctionCallee cmplogHookIns16 = c16; +#endif + +#if LLVM_VERSION_MAJOR < 9 + Constant * +#else + FunctionCallee +#endif + cN = M.getOrInsertFunction("__cmplog_ins_hookN", VoidTy, Int128Ty, + Int128Ty, Int8Ty, Int8Ty +#if LLVM_VERSION_MAJOR < 5 + , + NULL +#endif + ); +#if LLVM_VERSION_MAJOR < 9 + Function *cmplogHookInsN = cast(cN); +#else + FunctionCallee cmplogHookInsN = cN; +#endif + /* iterate over all functions, bbs and instruction and add suitable calls */ for (auto &F : M) { @@ -174,35 +231,16 @@ bool CmpLogInstructions::hookInstrs(Module &M) { for (auto &IN : BB) { CmpInst *selectcmpInst = nullptr; - if ((selectcmpInst = dyn_cast(&IN))) { - if (selectcmpInst->getPredicate() == CmpInst::ICMP_EQ || - selectcmpInst->getPredicate() == CmpInst::ICMP_NE || - selectcmpInst->getPredicate() == CmpInst::ICMP_UGT || - selectcmpInst->getPredicate() == CmpInst::ICMP_SGT || - selectcmpInst->getPredicate() == CmpInst::ICMP_ULT || - selectcmpInst->getPredicate() == CmpInst::ICMP_SLT || - selectcmpInst->getPredicate() == CmpInst::ICMP_UGE || - selectcmpInst->getPredicate() == CmpInst::ICMP_SGE || - selectcmpInst->getPredicate() == CmpInst::ICMP_ULE || - selectcmpInst->getPredicate() == CmpInst::ICMP_SLE || - selectcmpInst->getPredicate() == CmpInst::FCMP_OGE || - selectcmpInst->getPredicate() == CmpInst::FCMP_UGE || - selectcmpInst->getPredicate() == CmpInst::FCMP_OLE || - selectcmpInst->getPredicate() == CmpInst::FCMP_ULE || - selectcmpInst->getPredicate() == CmpInst::FCMP_OGT || - selectcmpInst->getPredicate() == CmpInst::FCMP_UGT || - selectcmpInst->getPredicate() == CmpInst::FCMP_OLT || - selectcmpInst->getPredicate() == CmpInst::FCMP_ULT || - selectcmpInst->getPredicate() == CmpInst::FCMP_UEQ || - selectcmpInst->getPredicate() == CmpInst::FCMP_OEQ || - selectcmpInst->getPredicate() == CmpInst::FCMP_UNE || - selectcmpInst->getPredicate() == CmpInst::FCMP_ONE) { - - icomps.push_back(selectcmpInst); + icomps.push_back(selectcmpInst); - } + } + + SwitchInst *switchInst = nullptr; + if ((switchInst = dyn_cast(BB.getTerminator()))) { + + if (switchInst->getNumCases() > 1) { switches.push_back(switchInst); } } @@ -212,101 +250,473 @@ bool CmpLogInstructions::hookInstrs(Module &M) { } - if (!icomps.size()) return false; - // if (!be_quiet) errs() << "Hooking " << icomps.size() << " cmp - // instructions\n"; + // unique the collected switches + switches.erase(Unique(switches.begin(), switches.end()), switches.end()); + + // Instrument switch values for cmplog + if (switches.size()) { + + if (!be_quiet) + errs() << "Hooking " << switches.size() << " switch instructions\n"; - for (auto &selectcmpInst : icomps) { + for (auto &SI : switches) { - IRBuilder<> IRB(selectcmpInst->getParent()); - IRB.SetInsertPoint(selectcmpInst); + Value * Val = SI->getCondition(); + unsigned int max_size = Val->getType()->getIntegerBitWidth(), cast_size; + unsigned char do_cast = 0; - auto op0 = selectcmpInst->getOperand(0); - auto op1 = selectcmpInst->getOperand(1); + if (!SI->getNumCases() || max_size <= 8) { - IntegerType * intTyOp0 = NULL; - IntegerType * intTyOp1 = NULL; - unsigned max_size = 0; - std::vector args; + // if (!be_quiet) errs() << "skip trivial switch..\n"; + continue; - if (selectcmpInst->getOpcode() == Instruction::FCmp) { + } + + IRBuilder<> IRB(SI->getParent()); + IRB.SetInsertPoint(SI); + + if (max_size % 8) { + + max_size = (((max_size / 8) + 1) * 8); + do_cast = 1; + + } + + if (max_size > 128) { + + if (!be_quiet) { + + fprintf(stderr, + "Cannot handle this switch bit size: %u (truncating)\n", + max_size); + + } + + max_size = 128; + do_cast = 1; + + } + + // do we need to cast? + switch (max_size) { + + case 8: + case 16: + case 32: + case 64: + case 128: + cast_size = max_size; + break; + default: + cast_size = 128; + do_cast = 1; + + } + + Value *CompareTo = Val; + + if (do_cast) { + + ConstantInt *cint = dyn_cast(Val); + if (cint) { + + uint64_t val = cint->getZExtValue(); + // fprintf(stderr, "ConstantInt: %lu\n", val); + switch (cast_size) { + + case 8: + CompareTo = ConstantInt::get(Int8Ty, val); + break; + case 16: + CompareTo = ConstantInt::get(Int16Ty, val); + break; + case 32: + CompareTo = ConstantInt::get(Int32Ty, val); + break; + case 64: + CompareTo = ConstantInt::get(Int64Ty, val); + break; + case 128: + CompareTo = ConstantInt::get(Int128Ty, val); + break; + + } - auto ty0 = op0->getType(); - if (ty0->isHalfTy() + } else { + + CompareTo = IRB.CreateBitCast(Val, IntegerType::get(C, cast_size)); + + } + + } + + for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end(); i != e; + ++i) { + +#if LLVM_VERSION_MAJOR < 5 + ConstantInt *cint = i.getCaseValue(); +#else + ConstantInt *cint = i->getCaseValue(); +#endif + + if (cint) { + + std::vector args; + args.push_back(CompareTo); + + Value *new_param = cint; + + if (do_cast) { + + uint64_t val = cint->getZExtValue(); + // fprintf(stderr, "ConstantInt: %lu\n", val); + switch (cast_size) { + + case 8: + new_param = ConstantInt::get(Int8Ty, val); + break; + case 16: + new_param = ConstantInt::get(Int16Ty, val); + break; + case 32: + new_param = ConstantInt::get(Int32Ty, val); + break; + case 64: + new_param = ConstantInt::get(Int64Ty, val); + break; + case 128: + new_param = ConstantInt::get(Int128Ty, val); + break; + + } + + } + + if (new_param) { + + args.push_back(new_param); + ConstantInt *attribute = ConstantInt::get(Int8Ty, 1); + args.push_back(attribute); + if (cast_size != max_size) { + + ConstantInt *bitsize = + ConstantInt::get(Int8Ty, (max_size / 8) - 1); + args.push_back(bitsize); + + } + + switch (cast_size) { + + case 8: + IRB.CreateCall(cmplogHookIns1, args); + break; + case 16: + IRB.CreateCall(cmplogHookIns2, args); + break; + case 32: + IRB.CreateCall(cmplogHookIns4, args); + break; + case 64: + IRB.CreateCall(cmplogHookIns8, args); + break; + case 128: + if (max_size == 128) { + + IRB.CreateCall(cmplogHookIns16, args); + + } else { + + IRB.CreateCall(cmplogHookInsN, args); + + } + + break; + + } + + } + + } + + } + + } + + } + + if (icomps.size()) { + + // if (!be_quiet) errs() << "Hooking " << icomps.size() << + // " cmp instructions\n"; + + for (auto &selectcmpInst : icomps) { + + IRBuilder<> IRB(selectcmpInst->getParent()); + IRB.SetInsertPoint(selectcmpInst); + + Value *op0 = selectcmpInst->getOperand(0); + Value *op1 = selectcmpInst->getOperand(1); + + IntegerType * intTyOp0 = NULL; + IntegerType * intTyOp1 = NULL; + unsigned max_size = 0, cast_size = 0; + unsigned char attr = 0, do_cast = 0; + std::vector args; + + CmpInst *cmpInst = dyn_cast(selectcmpInst); + + if (!cmpInst) { continue; } + + switch (cmpInst->getPredicate()) { + + case CmpInst::ICMP_NE: + case CmpInst::FCMP_UNE: + case CmpInst::FCMP_ONE: + break; + case CmpInst::ICMP_EQ: + case CmpInst::FCMP_UEQ: + case CmpInst::FCMP_OEQ: + attr += 1; + break; + case CmpInst::ICMP_UGT: + case CmpInst::ICMP_SGT: + case CmpInst::FCMP_OGT: + case CmpInst::FCMP_UGT: + attr += 2; + break; + case CmpInst::ICMP_UGE: + case CmpInst::ICMP_SGE: + case CmpInst::FCMP_OGE: + case CmpInst::FCMP_UGE: + attr += 3; + break; + case CmpInst::ICMP_ULT: + case CmpInst::ICMP_SLT: + case CmpInst::FCMP_OLT: + case CmpInst::FCMP_ULT: + attr += 4; + break; + case CmpInst::ICMP_ULE: + case CmpInst::ICMP_SLE: + case CmpInst::FCMP_OLE: + case CmpInst::FCMP_ULE: + attr += 5; + break; + default: + break; + + } + + if (selectcmpInst->getOpcode() == Instruction::FCmp) { + + auto ty0 = op0->getType(); + if (ty0->isHalfTy() #if LLVM_VERSION_MAJOR >= 11 - || ty0->isBFloatTy() + || ty0->isBFloatTy() #endif - ) - max_size = 16; - else if (ty0->isFloatTy()) - max_size = 32; - else if (ty0->isDoubleTy()) - max_size = 64; + ) + max_size = 16; + else if (ty0->isFloatTy()) + max_size = 32; + else if (ty0->isDoubleTy()) + max_size = 64; + else if (ty0->isX86_FP80Ty()) + max_size = 80; + else if (ty0->isFP128Ty() || ty0->isPPC_FP128Ty()) + max_size = 128; + + attr += 8; + do_cast = 1; - if (max_size) { + } else { - Value *V0 = IRB.CreateBitCast(op0, IntegerType::get(C, max_size)); - intTyOp0 = dyn_cast(V0->getType()); - Value *V1 = IRB.CreateBitCast(op1, IntegerType::get(C, max_size)); - intTyOp1 = dyn_cast(V1->getType()); + intTyOp0 = dyn_cast(op0->getType()); + intTyOp1 = dyn_cast(op1->getType()); if (intTyOp0 && intTyOp1) { max_size = intTyOp0->getBitWidth() > intTyOp1->getBitWidth() ? intTyOp0->getBitWidth() : intTyOp1->getBitWidth(); - args.push_back(V0); - args.push_back(V1); - } else { + } + + } + + if (!max_size) { continue; } + + // _ExtInt() with non-8th values + if (max_size % 8) { + + max_size = (((max_size / 8) + 1) * 8); + do_cast = 1; + + } + + if (max_size > 128) { + + if (!be_quiet) { - max_size = 0; + fprintf(stderr, + "Cannot handle this compare bit size: %u (truncating)\n", + max_size); } + max_size = 128; + do_cast = 1; + + } + + // do we need to cast? + switch (max_size) { + + case 8: + case 16: + case 32: + case 64: + case 128: + cast_size = max_size; + break; + default: + cast_size = 128; + do_cast = 1; + } - } else { + if (do_cast) { + + // F*cking LLVM optimized out any kind of bitcasts of ConstantInt values + // creating illegal calls. WTF. So we have to work around this. + + ConstantInt *cint = dyn_cast(op0); + if (cint) { + + uint64_t val = cint->getZExtValue(); + // fprintf(stderr, "ConstantInt: %lu\n", val); + ConstantInt *new_param = NULL; + switch (cast_size) { + + case 8: + new_param = ConstantInt::get(Int8Ty, val); + break; + case 16: + new_param = ConstantInt::get(Int16Ty, val); + break; + case 32: + new_param = ConstantInt::get(Int32Ty, val); + break; + case 64: + new_param = ConstantInt::get(Int64Ty, val); + break; + case 128: + new_param = ConstantInt::get(Int128Ty, val); + break; + + } + + if (!new_param) { continue; } + args.push_back(new_param); + + } else { + + Value *V0 = IRB.CreateBitCast(op0, IntegerType::get(C, cast_size)); + args.push_back(V0); + + } + + cint = dyn_cast(op1); + if (cint) { + + uint64_t val = cint->getZExtValue(); + ConstantInt *new_param = NULL; + switch (cast_size) { + + case 8: + new_param = ConstantInt::get(Int8Ty, val); + break; + case 16: + new_param = ConstantInt::get(Int16Ty, val); + break; + case 32: + new_param = ConstantInt::get(Int32Ty, val); + break; + case 64: + new_param = ConstantInt::get(Int64Ty, val); + break; + case 128: + new_param = ConstantInt::get(Int128Ty, val); + break; + + } + + if (!new_param) { continue; } + args.push_back(new_param); + + } else { + + Value *V1 = IRB.CreateBitCast(op1, IntegerType::get(C, cast_size)); + args.push_back(V1); - intTyOp0 = dyn_cast(op0->getType()); - intTyOp1 = dyn_cast(op1->getType()); + } - if (intTyOp0 && intTyOp1) { + } else { - max_size = intTyOp0->getBitWidth() > intTyOp1->getBitWidth() - ? intTyOp0->getBitWidth() - : intTyOp1->getBitWidth(); args.push_back(op0); args.push_back(op1); } - } + ConstantInt *attribute = ConstantInt::get(Int8Ty, attr); + args.push_back(attribute); + + if (cast_size != max_size) { + + ConstantInt *bitsize = ConstantInt::get(Int8Ty, (max_size / 8) - 1); + args.push_back(bitsize); + + } + + // fprintf(stderr, "_ExtInt(%u) castTo %u with attr %u didcast %u\n", + // max_size, cast_size, attr, do_cast); + + switch (cast_size) { - if (max_size < 8 || max_size > 64 || !intTyOp0 || !intTyOp1) continue; - - switch (max_size) { - - case 8: - IRB.CreateCall(cmplogHookIns1, args); - break; - case 16: - IRB.CreateCall(cmplogHookIns2, args); - break; - case 32: - IRB.CreateCall(cmplogHookIns4, args); - break; - case 64: - IRB.CreateCall(cmplogHookIns8, args); - break; - default: - break; + case 8: + IRB.CreateCall(cmplogHookIns1, args); + break; + case 16: + IRB.CreateCall(cmplogHookIns2, args); + break; + case 32: + IRB.CreateCall(cmplogHookIns4, args); + break; + case 64: + IRB.CreateCall(cmplogHookIns8, args); + break; + case 128: + if (max_size == 128) { + + IRB.CreateCall(cmplogHookIns16, args); + + } else { + + IRB.CreateCall(cmplogHookInsN, args); + + } + + break; + + } } } - return true; + if (switches.size() || icomps.size()) + return true; + else + return false; } diff --git a/src/afl-cc.c b/src/afl-cc.c index 8fb42718..02c9c7c5 100644 --- a/src/afl-cc.c +++ b/src/afl-cc.c @@ -528,10 +528,10 @@ static void edit_params(u32 argc, char **argv, char **envp) { cc_params[cc_par_cnt++] = alloc_printf( "-Wl,-mllvm=-load=%s/cmplog-routines-pass.so", obj_path); - cc_params[cc_par_cnt++] = alloc_printf( - "-Wl,-mllvm=-load=%s/split-switches-pass.so", obj_path); cc_params[cc_par_cnt++] = alloc_printf( "-Wl,-mllvm=-load=%s/cmplog-instructions-pass.so", obj_path); + cc_params[cc_par_cnt++] = alloc_printf( + "-Wl,-mllvm=-load=%s/split-switches-pass.so", obj_path); } else { @@ -541,18 +541,18 @@ static void edit_params(u32 argc, char **argv, char **envp) { cc_params[cc_par_cnt++] = alloc_printf("%s/cmplog-routines-pass.so", obj_path); - // reuse split switches from laf cc_params[cc_par_cnt++] = "-Xclang"; cc_params[cc_par_cnt++] = "-load"; cc_params[cc_par_cnt++] = "-Xclang"; cc_params[cc_par_cnt++] = - alloc_printf("%s/split-switches-pass.so", obj_path); + alloc_printf("%s/cmplog-instructions-pass.so", obj_path); + // reuse split switches from laf cc_params[cc_par_cnt++] = "-Xclang"; cc_params[cc_par_cnt++] = "-load"; cc_params[cc_par_cnt++] = "-Xclang"; cc_params[cc_par_cnt++] = - alloc_printf("%s/cmplog-instructions-pass.so", obj_path); + alloc_printf("%s/split-switches-pass.so", obj_path); } @@ -792,10 +792,8 @@ static void edit_params(u32 argc, char **argv, char **envp) { } -#if defined(USEMMAP) - #if !defined(__HAIKU__) +#if defined(USEMMAP) && !defined(__HAIKU__) cc_params[cc_par_cnt++] = "-lrt"; - #endif #endif cc_params[cc_par_cnt++] = "-D__AFL_HAVE_MANUAL_CONTROL=1"; @@ -858,6 +856,7 @@ static void edit_params(u32 argc, char **argv, char **envp) { cc_params[cc_par_cnt++] = "-D__AFL_COVERAGE_DISCARD()=__afl_coverage_discard()"; cc_params[cc_par_cnt++] = "-D__AFL_COVERAGE_ABORT()=__afl_coverage_abort()"; + cc_params[cc_par_cnt++] = "-D__AFL_FUZZ_TESTCASE_BUF=(__afl_fuzz_ptr ? __afl_fuzz_ptr : " "__afl_fuzz_alt_ptr)"; @@ -967,10 +966,8 @@ static void edit_params(u32 argc, char **argv, char **envp) { alloc_printf("-Wl,--dynamic-list=%s/dynamic_list.txt", obj_path); #endif - #if defined(USEMMAP) - #if !defined(__HAIKU__) + #if defined(USEMMAP) && !defined(__HAIKU__) cc_params[cc_par_cnt++] = "-lrt"; - #endif #endif } @@ -1278,7 +1275,6 @@ int main(int argc, char **argv, char **envp) { } - // this is a hidden option if (strncasecmp(ptr2, "llvmnative", strlen("llvmnative")) == 0 || strncasecmp(ptr2, "llvm-native", strlen("llvm-native")) == 0) { @@ -1349,29 +1345,28 @@ int main(int argc, char **argv, char **envp) { if (strncasecmp(ptr2, "ngram", strlen("ngram")) == 0) { - ptr2 += strlen("ngram"); - while (*ptr2 && (*ptr2 < '0' || *ptr2 > '9')) - ptr2++; + u8 *ptr3 = ptr2 + strlen("ngram"); + while (*ptr3 && (*ptr3 < '0' || *ptr3 > '9')) + ptr3++; - if (!*ptr2) { + if (!*ptr3) { - if ((ptr2 = getenv("AFL_LLVM_NGRAM_SIZE")) == NULL) + if ((ptr3 = getenv("AFL_LLVM_NGRAM_SIZE")) == NULL) FATAL( "you must set the NGRAM size with (e.g. for value 2) " "AFL_LLVM_INSTRUMENT=ngram-2"); } - ngram_size = atoi(ptr2); + ngram_size = atoi(ptr3); if (ngram_size < 2 || ngram_size > NGRAM_SIZE_MAX) FATAL( "NGRAM instrumentation option must be between 2 and " - "NGRAM_SIZE_MAX " - "(%u)", + "NGRAM_SIZE_MAX (%u)", NGRAM_SIZE_MAX); instrument_opt_mode |= (INSTRUMENT_OPT_NGRAM); - ptr2 = alloc_printf("%u", ngram_size); - setenv("AFL_LLVM_NGRAM_SIZE", ptr2, 1); + u8 *ptr4 = alloc_printf("%u", ngram_size); + setenv("AFL_LLVM_NGRAM_SIZE", ptr4, 1); } @@ -1507,6 +1502,7 @@ int main(int argc, char **argv, char **envp) { "((instrumentation/README.ngram.md)\n" " INSTRIM: Dominator tree (for LLVM <= 6.0) " "(instrumentation/README.instrim.md)\n\n"); + #undef NATIVE_MSG SAYF( @@ -1641,16 +1637,15 @@ int main(int argc, char **argv, char **envp) { if (have_lto) SAYF("afl-cc LTO with ld=%s %s\n", AFL_REAL_LD, AFL_CLANG_FLTO); if (have_llvm) - SAYF("afl-cc LLVM version %d using binary path \"%s\".\n", LLVM_MAJOR, + SAYF("afl-cc LLVM version %d using the binary path \"%s\".\n", LLVM_MAJOR, LLVM_BINDIR); #endif -#if defined(USEMMAP) +#ifdef USEMMAP #if !defined(__HAIKU__) - cc_params[cc_par_cnt++] = "-lrt"; - SAYF("Compiled with shm_open support (adds -lrt when linking).\n"); - #else SAYF("Compiled with shm_open support.\n"); + #else + SAYF("Compiled with shm_open support (adds -lrt when linking).\n"); #endif #else SAYF("Compiled with shmat support.\n"); diff --git a/src/afl-fuzz-init.c b/src/afl-fuzz-init.c index dbffa4f9..cbff6d7e 100644 --- a/src/afl-fuzz-init.c +++ b/src/afl-fuzz-init.c @@ -729,6 +729,30 @@ void read_testcases(afl_state_t *afl, u8 *directory) { add_to_queue(afl, fn2, st.st_size >= MAX_FILE ? MAX_FILE : st.st_size, passed_det); + if (unlikely(afl->shm.cmplog_mode)) { + + if (afl->cmplog_lvl == 1) { + + if (!afl->cmplog_max_filesize || + afl->cmplog_max_filesize < st.st_size) { + + afl->cmplog_max_filesize = st.st_size; + + } + + } else if (afl->cmplog_lvl == 2) { + + if (!afl->cmplog_max_filesize || + afl->cmplog_max_filesize > st.st_size) { + + afl->cmplog_max_filesize = st.st_size; + + } + + } + + } + if (unlikely(afl->schedule >= FAST && afl->schedule <= RARE)) { u64 cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST); @@ -756,6 +780,20 @@ void read_testcases(afl_state_t *afl, u8 *directory) { } + if (unlikely(afl->shm.cmplog_mode)) { + + if (afl->cmplog_max_filesize < 1024) { + + afl->cmplog_max_filesize = 1024; + + } else { + + afl->cmplog_max_filesize = (((afl->cmplog_max_filesize >> 10) + 1) << 10); + + } + + } + afl->last_path_time = 0; afl->queued_at_start = afl->queued_paths; diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index f9509e86..596bae22 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -165,7 +165,7 @@ static u8 could_be_arith(u32 old_val, u32 new_val, u8 blen) { /* See if one-byte adjustments to any byte could produce this result. */ - for (i = 0; i < blen; ++i) { + for (i = 0; (u8)i < blen; ++i) { u8 a = old_val >> (8 * i), b = new_val >> (8 * i); @@ -193,7 +193,7 @@ static u8 could_be_arith(u32 old_val, u32 new_val, u8 blen) { diffs = 0; - for (i = 0; i < blen / 2; ++i) { + for (i = 0; (u8)i < blen / 2; ++i) { u16 a = old_val >> (16 * i), b = new_val >> (16 * i); @@ -290,7 +290,7 @@ static u8 could_be_interest(u32 old_val, u32 new_val, u8 blen, u8 check_le) { /* See if two-byte insertions over old_val could give us new_val. */ - for (i = 0; (s32)i < blen - 1; ++i) { + for (i = 0; (u8)i < blen - 1; ++i) { for (j = 0; j < sizeof(interesting_16) / 2; ++j) { @@ -545,14 +545,31 @@ u8 fuzz_one_original(afl_state_t *afl) { else orig_perf = perf_score = calculate_score(afl, afl->queue_cur); - if (unlikely(perf_score == 0)) { goto abandon_entry; } + if (unlikely(perf_score <= 0)) { goto abandon_entry; } - if (unlikely(afl->shm.cmplog_mode && !afl->queue_cur->fully_colorized)) { + if (unlikely(afl->shm.cmplog_mode && + afl->queue_cur->colorized < afl->cmplog_lvl && + (u32)len <= afl->cmplog_max_filesize)) { - if (input_to_state_stage(afl, in_buf, out_buf, len, - afl->queue_cur->exec_cksum)) { + if (unlikely(len < 4)) { - goto abandon_entry; + afl->queue_cur->colorized = 0xff; + + } else { + + if (afl->cmplog_lvl == 3 || + (afl->cmplog_lvl == 2 && afl->queue_cur->tc_ref) || + !(afl->fsrv.total_execs % afl->queued_paths) || + get_cur_time() - afl->last_path_time > 15000) { + + if (input_to_state_stage(afl, in_buf, out_buf, len, + afl->queue_cur->exec_cksum)) { + + goto abandon_entry; + + } + + } } @@ -2796,7 +2813,7 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { } - s32 len, temp_len; + u32 len, temp_len; u32 i; u32 j; u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0; @@ -2952,14 +2969,31 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { else orig_perf = perf_score = calculate_score(afl, afl->queue_cur); - if (unlikely(perf_score == 0)) { goto abandon_entry; } + if (unlikely(perf_score <= 0)) { goto abandon_entry; } - if (unlikely(afl->shm.cmplog_mode && !afl->queue_cur->fully_colorized)) { + if (unlikely(afl->shm.cmplog_mode && + afl->queue_cur->colorized < afl->cmplog_lvl && + (u32)len <= afl->cmplog_max_filesize)) { - if (input_to_state_stage(afl, in_buf, out_buf, len, - afl->queue_cur->exec_cksum)) { + if (unlikely(len < 4)) { - goto abandon_entry; + afl->queue_cur->colorized = 0xff; + + } else { + + if (afl->cmplog_lvl == 3 || + (afl->cmplog_lvl == 2 && afl->queue_cur->tc_ref) || + !(afl->fsrv.total_execs % afl->queued_paths) || + get_cur_time() - afl->last_path_time > 15000) { + + if (input_to_state_stage(afl, in_buf, out_buf, len, + afl->queue_cur->exec_cksum)) { + + goto abandon_entry; + + } + + } } @@ -3315,7 +3349,7 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { orig_hit_cnt = new_hit_cnt; - for (i = 0; (s32)i < len - 1; ++i) { + for (i = 0; i < len - 1; ++i) { /* Let's consult the effector map... */ @@ -3357,7 +3391,7 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { orig_hit_cnt = new_hit_cnt; - for (i = 0; (s32)i < len - 3; ++i) { + for (i = 0; i < len - 3; ++i) { /* Let's consult the effector map... */ if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && @@ -3489,7 +3523,7 @@ skip_bitflip: orig_hit_cnt = new_hit_cnt; - for (i = 0; (s32)i < len - 1; ++i) { + for (i = 0; i < len - 1; ++i) { u16 orig = *(u16 *)(out_buf + i); @@ -3615,7 +3649,7 @@ skip_bitflip: orig_hit_cnt = new_hit_cnt; - for (i = 0; (s32)i < len - 3; ++i) { + for (i = 0; i < len - 3; ++i) { u32 orig = *(u32 *)(out_buf + i); @@ -3805,7 +3839,7 @@ skip_arith: orig_hit_cnt = new_hit_cnt; - for (i = 0; (s32)i < len - 1; ++i) { + for (i = 0; i < len - 1; ++i) { u16 orig = *(u16 *)(out_buf + i); @@ -3891,7 +3925,7 @@ skip_arith: orig_hit_cnt = new_hit_cnt; - for (i = 0; (s32)i < len - 3; ++i) { + for (i = 0; i < len - 3; ++i) { u32 orig = *(u32 *)(out_buf + i); @@ -4120,7 +4154,7 @@ skip_user_extras: /* See the comment in the earlier code; extras are sorted by size. */ - if ((s32)(afl->a_extras[j].len) > (s32)(len - i) || + if ((afl->a_extras[j].len) > (len - i) || !memcmp(afl->a_extras[j].data, out_buf + i, afl->a_extras[j].len) || !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, afl->a_extras[j].len))) { @@ -4837,7 +4871,7 @@ pacemaker_fuzzing: u32 copy_from, copy_to, copy_len; copy_len = choose_block_len(afl, new_len - 1); - if ((s32)copy_len > temp_len) copy_len = temp_len; + if (copy_len > temp_len) copy_len = temp_len; copy_from = rand_below(afl, new_len - copy_len + 1); copy_to = rand_below(afl, temp_len - copy_len + 1); @@ -5033,8 +5067,7 @@ pacemaker_fuzzing: the last differing byte. Bail out if the difference is just a single byte or so. */ - locate_diffs(in_buf, new_buf, MIN(len, (s32)target->len), &f_diff, - &l_diff); + locate_diffs(in_buf, new_buf, MIN(len, target->len), &f_diff, &l_diff); if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) { diff --git a/src/afl-fuzz-queue.c b/src/afl-fuzz-queue.c index 66938635..aec57a6e 100644 --- a/src/afl-fuzz-queue.c +++ b/src/afl-fuzz-queue.c @@ -433,6 +433,7 @@ void add_to_queue(afl_state_t *afl, u8 *fname, u32 len, u8 passed_det) { q->passed_det = passed_det; q->trace_mini = NULL; q->testcase_buf = NULL; + q->mother = afl->queue_cur; #ifdef INTROSPECTION q->bitsmap_size = afl->bitsmap_size; diff --git a/src/afl-fuzz-redqueen.c b/src/afl-fuzz-redqueen.c index 28585afe..955a9232 100644 --- a/src/afl-fuzz-redqueen.c +++ b/src/afl-fuzz-redqueen.c @@ -28,6 +28,8 @@ #include "afl-fuzz.h" #include "cmplog.h" +//#define _DEBUG + ///// Colorization struct range { @@ -35,6 +37,8 @@ struct range { u32 start; u32 end; struct range *next; + struct range *prev; + u8 ok; }; @@ -44,6 +48,8 @@ static struct range *add_range(struct range *ranges, u32 start, u32 end) { r->start = start; r->end = end; r->next = ranges; + r->ok = 0; + if (likely(ranges)) ranges->prev = r; return r; } @@ -51,45 +57,61 @@ static struct range *add_range(struct range *ranges, u32 start, u32 end) { static struct range *pop_biggest_range(struct range **ranges) { struct range *r = *ranges; - struct range *prev = NULL; struct range *rmax = NULL; - struct range *prev_rmax = NULL; u32 max_size = 0; while (r) { - u32 s = r->end - r->start; - if (s >= max_size) { + if (!r->ok) { + + u32 s = 1 + r->end - r->start; + + if (s >= max_size) { + + max_size = s; + rmax = r; - max_size = s; - prev_rmax = prev; - rmax = r; + } } - prev = r; r = r->next; } - if (rmax) { + return rmax; - if (prev_rmax) { +} - prev_rmax->next = rmax->next; +#ifdef _DEBUG +// static int logging = 0; +static void dump(char *txt, u8 *buf, u32 len) { - } else { + u32 i; + fprintf(stderr, "DUMP %s %llx ", txt, hash64(buf, len, 0)); + for (i = 0; i < len; i++) + fprintf(stderr, "%02x", buf[i]); + fprintf(stderr, "\n"); - *ranges = rmax->next; +} - } +static void dump_file(char *path, char *name, u32 counter, u8 *buf, u32 len) { - } + char fn[4096]; + if (!path) path = "."; + snprintf(fn, sizeof(fn), "%s/%s%d", path, name, counter); + int fd = open(fn, O_RDWR | O_CREAT | O_TRUNC, 0644); + if (fd >= 0) { - return rmax; + write(fd, buf, len); + close(fd); + + } } +#endif + static u8 get_exec_checksum(afl_state_t *afl, u8 *buf, u32 len, u64 *cksum) { if (unlikely(common_fuzz_stuff(afl, buf, len))) { return 1; } @@ -99,107 +121,270 @@ static u8 get_exec_checksum(afl_state_t *afl, u8 *buf, u32 len, u64 *cksum) { } -static void xor_replace(u8 *buf, u32 len) { +/* replace everything with different values but stay in the same type */ +static void type_replace(afl_state_t *afl, u8 *buf, u32 len) { u32 i; + u8 c; for (i = 0; i < len; ++i) { - buf[i] ^= 0xff; + // wont help for UTF or non-latin charsets + do { + + switch (buf[i]) { + + case 'A' ... 'F': + c = 'A' + rand_below(afl, 1 + 'F' - 'A'); + break; + case 'a' ... 'f': + c = 'a' + rand_below(afl, 1 + 'f' - 'a'); + break; + case '0': + c = '1'; + break; + case '1': + c = '0'; + break; + case '2' ... '9': + c = '2' + rand_below(afl, 1 + '9' - '2'); + break; + case 'G' ... 'Z': + c = 'G' + rand_below(afl, 1 + 'Z' - 'G'); + break; + case 'g' ... 'z': + c = 'g' + rand_below(afl, 1 + 'z' - 'g'); + break; + case '!' ... '*': + c = '!' + rand_below(afl, 1 + '*' - '!'); + break; + case ',' ... '.': + c = ',' + rand_below(afl, 1 + '.' - ','); + break; + case ':' ... '@': + c = ':' + rand_below(afl, 1 + '@' - ':'); + break; + case '[' ... '`': + c = '[' + rand_below(afl, 1 + '`' - '['); + break; + case '{' ... '~': + c = '{' + rand_below(afl, 1 + '~' - '{'); + break; + case '+': + c = '/'; + break; + case '/': + c = '+'; + break; + case ' ': + c = '\t'; + break; + case '\t': + c = ' '; + break; + /* + case '\r': + case '\n': + // nothing ... + break; + */ + default: + c = (buf[i] ^ 0xff); + + } + + } while (c == buf[i]); + + buf[i] = c; } } -static u8 colorization(afl_state_t *afl, u8 *buf, u32 len, u64 exec_cksum) { +static u8 colorization(afl_state_t *afl, u8 *buf, u32 len, u64 exec_cksum, + struct tainted **taints) { - struct range *ranges = add_range(NULL, 0, len); - u8 * backup = ck_alloc_nozero(len); + struct range * ranges = add_range(NULL, 0, len - 1), *rng; + struct tainted *taint = NULL; + u8 * backup = ck_alloc_nozero(len); + u8 * changed = ck_alloc_nozero(len); u64 orig_hit_cnt, new_hit_cnt; orig_hit_cnt = afl->queued_paths + afl->unique_crashes; afl->stage_name = "colorization"; afl->stage_short = "colorization"; - afl->stage_max = 1000; + afl->stage_max = (len << 1); - struct range *rng = NULL; afl->stage_cur = 0; + memcpy(backup, buf, len); + memcpy(changed, buf, len); + type_replace(afl, changed, len); + while ((rng = pop_biggest_range(&ranges)) != NULL && afl->stage_cur < afl->stage_max) { - u32 s = rng->end - rng->start; + u32 s = 1 + rng->end - rng->start; + + memcpy(buf + rng->start, changed + rng->start, s); - if (s != 0) { + u64 cksum; + u64 start_us = get_cur_time_us(); + if (unlikely(get_exec_checksum(afl, buf, len, &cksum))) { - /* Range not empty */ + goto checksum_fail; - memcpy(backup, buf + rng->start, s); - xor_replace(buf + rng->start, s); + } + + u64 stop_us = get_cur_time_us(); + + /* Discard if the mutations change the path or if it is too decremental + in speed - how could the same path have a much different speed + though ...*/ + if (cksum != exec_cksum || + (unlikely(stop_us - start_us > 3 * afl->queue_cur->exec_us) && + likely(!afl->fixed_seed))) { + + memcpy(buf + rng->start, backup + rng->start, s); - u64 cksum; - u64 start_us = get_cur_time_us(); - if (unlikely(get_exec_checksum(afl, buf, len, &cksum))) { + if (s > 1) { // to not add 0 size ranges - goto checksum_fail; + ranges = add_range(ranges, rng->start, rng->start - 1 + s / 2); + ranges = add_range(ranges, rng->start + s / 2, rng->end); } - u64 stop_us = get_cur_time_us(); + if (ranges == rng) { + + ranges = rng->next; + if (ranges) { ranges->prev = NULL; } + + } else if (rng->next) { + + rng->prev->next = rng->next; + rng->next->prev = rng->prev; - /* Discard if the mutations change the paths or if it is too decremental - in speed */ - if (cksum != exec_cksum || - ((stop_us - start_us > 2 * afl->queue_cur->exec_us) && - likely(!afl->fixed_seed))) { + } else { - ranges = add_range(ranges, rng->start, rng->start + s / 2); - ranges = add_range(ranges, rng->start + s / 2 + 1, rng->end); - memcpy(buf + rng->start, backup, s); + if (rng->prev) { rng->prev->next = NULL; } } + free(rng); + + } else { + + rng->ok = 1; + } - ck_free(rng); - rng = NULL; ++afl->stage_cur; } - if (afl->stage_cur < afl->stage_max) { afl->queue_cur->fully_colorized = 1; } + rng = ranges; + while (rng) { - new_hit_cnt = afl->queued_paths + afl->unique_crashes; - afl->stage_finds[STAGE_COLORIZATION] += new_hit_cnt - orig_hit_cnt; - afl->stage_cycles[STAGE_COLORIZATION] += afl->stage_cur; - ck_free(backup); + rng = rng->next; - ck_free(rng); - rng = NULL; + } - while (ranges) { + u32 i = 1; + u32 positions = 0; + while (i) { + restart: + i = 0; + struct range *r = NULL; + u32 pos = (u32)-1; rng = ranges; - ranges = rng->next; - ck_free(rng); - rng = NULL; - } + while (rng) { - return 0; + if (rng->ok == 1 && rng->start < pos) { -checksum_fail: - if (rng) { ck_free(rng); } - ck_free(backup); + if (taint && taint->pos + taint->len == rng->start) { + + taint->len += (1 + rng->end - rng->start); + positions += (1 + rng->end - rng->start); + rng->ok = 2; + goto restart; + + } else { + + r = rng; + pos = rng->start; + + } + + } + + rng = rng->next; + + } + + if (r) { + + struct tainted *t = ck_alloc_nozero(sizeof(struct tainted)); + t->pos = r->start; + t->len = 1 + r->end - r->start; + positions += (1 + r->end - r->start); + if (likely(taint)) { taint->prev = t; } + t->next = taint; + t->prev = NULL; + taint = t; + r->ok = 2; + i = 1; + } + + } + + *taints = taint; + + /* temporary: clean ranges */ while (ranges) { rng = ranges; ranges = rng->next; ck_free(rng); - rng = NULL; } + new_hit_cnt = afl->queued_paths + afl->unique_crashes; + +#ifdef _DEBUG + /* + char fn[4096]; + snprintf(fn, sizeof(fn), "%s/introspection_color.txt", afl->out_dir); + FILE *f = fopen(fn, "a"); + if (f) { + + */ + FILE *f = stderr; + fprintf(f, + "Colorization: fname=%s len=%u result=%u execs=%u found=%llu " + "taint=%u\n", + afl->queue_cur->fname, len, afl->queue_cur->colorized, afl->stage_cur, + new_hit_cnt - orig_hit_cnt, positions); +/* + fclose(f); + + } + +*/ +#endif + + afl->stage_finds[STAGE_COLORIZATION] += new_hit_cnt - orig_hit_cnt; + afl->stage_cycles[STAGE_COLORIZATION] += afl->stage_cur; + ck_free(backup); + ck_free(changed); + + return 0; + +checksum_fail: + ck_free(backup); + ck_free(changed); + return 1; } @@ -212,12 +397,19 @@ static u8 its_fuzz(afl_state_t *afl, u8 *buf, u32 len, u8 *status) { orig_hit_cnt = afl->queued_paths + afl->unique_crashes; +#ifdef _DEBUG + dump("DATA", buf, len); +#endif + if (unlikely(common_fuzz_stuff(afl, buf, len))) { return 1; } new_hit_cnt = afl->queued_paths + afl->unique_crashes; if (unlikely(new_hit_cnt != orig_hit_cnt)) { +#ifdef _DEBUG + fprintf(stderr, "NEW FIND\n"); +#endif *status = 1; } else { @@ -278,11 +470,33 @@ static int strntoull(const char *str, size_t sz, char **end, int base, } static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header *h, - u64 pattern, u64 repl, u64 o_pattern, u32 idx, - u8 *orig_buf, u8 *buf, u32 len, u8 do_reverse, - u8 *status) { - - if (!buf) { FATAL("BUG: buf was NULL. Please report this.\n"); } + u64 pattern, u64 repl, u64 o_pattern, + u64 changed_val, u8 attr, u32 idx, u32 taint_len, + u8 *orig_buf, u8 *buf, u8 *cbuf, u32 len, + u8 do_reverse, u8 lvl, u8 *status) { + + // (void)(changed_val); // TODO + // we can use the information in changed_val to see if there is a + // computable i2s transformation. + // if (pattern != o_pattern && repl != changed_val) { + + // u64 in_diff = pattern - o_pattern, out_diff = repl - changed_val; + // if (in_diff != out_diff) { + + // switch(in_diff) { + + // detect uppercase <-> lowercase, base64, hex encoding, etc.: + // repl = reverse_transform(TYPE, pattern); + // } + // } + // } + // not 100% but would have a chance to be detected + + // fprintf(stderr, + // "Encode: %llx->%llx into %llx(<-%llx) at pos=%u " + // "taint_len=%u shape=%u attr=%u\n", + // o_pattern, pattern, repl, changed_val, idx, taint_len, + // h->shape + 1, attr); u64 *buf_64 = (u64 *)&buf[idx]; u32 *buf_32 = (u32 *)&buf[idx]; @@ -293,76 +507,215 @@ static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header *h, u16 *o_buf_16 = (u16 *)&orig_buf[idx]; u8 * o_buf_8 = &orig_buf[idx]; - u32 its_len = len - idx; - // *status = 0; + u32 its_len = MIN(len - idx, taint_len); u8 * endptr; u8 use_num = 0, use_unum = 0; unsigned long long unum; long long num; - if (afl->queue_cur->is_ascii) { + // reverse atoi()/strnu?toll() is expensive, so we only to it in lvl == 3 + if (lvl & 4) { - endptr = buf_8; - if (strntoll(buf_8, len - idx, (char **)&endptr, 0, &num)) { + if (afl->queue_cur->is_ascii) { - if (!strntoull(buf_8, len - idx, (char **)&endptr, 0, &unum)) - use_unum = 1; + endptr = buf_8; + if (strntoll(buf_8, len - idx, (char **)&endptr, 0, &num)) { - } else + if (!strntoull(buf_8, len - idx, (char **)&endptr, 0, &unum)) + use_unum = 1; - use_num = 1; + } else - } + use_num = 1; - if (use_num && (u64)num == pattern) { + } - size_t old_len = endptr - buf_8; - size_t num_len = snprintf(NULL, 0, "%lld", num); +#ifdef _DEBUG + if (idx == 0) + fprintf(stderr, "ASCII is=%u use_num=%u use_unum=%u idx=%u %llx==%llx\n", + afl->queue_cur->is_ascii, use_num, use_unum, idx, num, pattern); +#endif - u8 *new_buf = afl_realloc((void **)&afl->out_scratch_buf, len + num_len); - if (unlikely(!new_buf)) { PFATAL("alloc"); } - memcpy(new_buf, buf, idx); + // num is likely not pattern as atoi("AAA") will be zero... + if (use_num && ((u64)num == pattern || !num)) { - snprintf(new_buf + idx, num_len, "%lld", num); - memcpy(new_buf + idx + num_len, buf_8 + old_len, len - idx - old_len); + u8 tmp_buf[32]; + size_t num_len = snprintf(tmp_buf, sizeof(tmp_buf), "%lld", repl); + size_t old_len = endptr - buf_8; - if (unlikely(its_fuzz(afl, new_buf, len, status))) { return 1; } + u8 *new_buf = afl_realloc((void **)&afl->out_scratch_buf, len + num_len); + if (unlikely(!new_buf)) { PFATAL("alloc"); } - } else if (use_unum && unum == pattern) { + memcpy(new_buf, buf, idx); + memcpy(new_buf + idx, tmp_buf, num_len); + memcpy(new_buf + idx + num_len, buf_8 + old_len, len - idx - old_len); - size_t old_len = endptr - buf_8; - size_t num_len = snprintf(NULL, 0, "%llu", unum); + if (new_buf[idx + num_len] >= '0' && new_buf[idx + num_len] <= '9') { + + new_buf[idx + num_len] = ' '; + + } - u8 *new_buf = afl_realloc((void **)&afl->out_scratch_buf, len + num_len); - if (unlikely(!new_buf)) { PFATAL("alloc"); } - memcpy(new_buf, buf, idx); + if (unlikely(its_fuzz(afl, new_buf, len, status))) { return 1; } - snprintf(new_buf + idx, num_len, "%llu", unum); - memcpy(new_buf + idx + num_len, buf_8 + old_len, len - idx - old_len); + } else if (use_unum && (unum == pattern || !unum)) { - if (unlikely(its_fuzz(afl, new_buf, len, status))) { return 1; } + u8 tmp_buf[32]; + size_t num_len = snprintf(tmp_buf, sizeof(tmp_buf), "%llu", repl); + size_t old_len = endptr - buf_8; + + u8 *new_buf = afl_realloc((void **)&afl->out_scratch_buf, len + num_len); + if (unlikely(!new_buf)) { PFATAL("alloc"); } + + memcpy(new_buf, buf, idx); + memcpy(new_buf + idx, tmp_buf, num_len); + memcpy(new_buf + idx + num_len, buf_8 + old_len, len - idx - old_len); + + if (new_buf[idx + num_len] >= '0' && new_buf[idx + num_len] <= '9') { + + new_buf[idx + num_len] = ' '; + + } + + if (unlikely(its_fuzz(afl, new_buf, len, status))) { return 1; } + + } } - if (SHAPE_BYTES(h->shape) >= 8 && *status != 1) { + // we only allow this for ascii2integer (above) + if (unlikely(pattern == o_pattern)) { return 0; } - if (its_len >= 8 && *buf_64 == pattern && *o_buf_64 == o_pattern) { + if ((lvl & 1) || ((lvl & 2) && (attr >= 8 && attr <= 15)) || attr >= 16) { - *buf_64 = repl; - if (unlikely(its_fuzz(afl, buf, len, status))) { return 1; } - *buf_64 = pattern; + if (SHAPE_BYTES(h->shape) >= 8 && *status != 1) { + + // if (its_len >= 8 && (attr == 0 || attr >= 8)) + // fprintf(stderr, + // "TestU64: %u>=4 %x==%llx" + // " %x==%llx (idx=%u attr=%u) <= %llx<-%llx\n", + // its_len, *buf_32, pattern, *o_buf_32, o_pattern, idx, attr, + // repl, changed_val); + + // if this is an fcmp (attr & 8 == 8) then do not compare the patterns - + // due to a bug in llvm dynamic float bitcasts do not work :( + // the value 16 means this is a +- 1.0 test case + if (its_len >= 8 && + ((*buf_64 == pattern && *o_buf_64 == o_pattern) || attr >= 16)) { + + u64 tmp_64 = *buf_64; + *buf_64 = repl; + if (unlikely(its_fuzz(afl, buf, len, status))) { return 1; } + if (*status == 1) { memcpy(cbuf + idx, buf_64, 8); } + *buf_64 = tmp_64; + + // fprintf(stderr, "Status=%u\n", *status); + + } + + // reverse encoding + if (do_reverse && *status != 1) { + + if (unlikely(cmp_extend_encoding(afl, h, SWAP64(pattern), SWAP64(repl), + SWAP64(o_pattern), SWAP64(changed_val), + attr, idx, taint_len, orig_buf, buf, + cbuf, len, 0, lvl, status))) { + + return 1; + + } + + } } - // reverse encoding - if (do_reverse && *status != 1) { + if (SHAPE_BYTES(h->shape) >= 4 && *status != 1) { - if (unlikely(cmp_extend_encoding(afl, h, SWAP64(pattern), SWAP64(repl), - SWAP64(o_pattern), idx, orig_buf, buf, - len, 0, status))) { + // if (its_len >= 4 && (attr <= 1 || attr >= 8)) + // fprintf(stderr, + // "TestU32: %u>=4 %x==%llx" + // " %x==%llx (idx=%u attr=%u) <= %llx<-%llx\n", + // its_len, *buf_32, pattern, *o_buf_32, o_pattern, idx, attr, + // repl, changed_val); - return 1; + if (its_len >= 4 && + ((*buf_32 == (u32)pattern && *o_buf_32 == (u32)o_pattern) || + attr >= 16)) { + + u32 tmp_32 = *buf_32; + *buf_32 = (u32)repl; + if (unlikely(its_fuzz(afl, buf, len, status))) { return 1; } + if (*status == 1) { memcpy(cbuf + idx, buf_32, 4); } + *buf_32 = tmp_32; + + // fprintf(stderr, "Status=%u\n", *status); + + } + + // reverse encoding + if (do_reverse && *status != 1) { + + if (unlikely(cmp_extend_encoding(afl, h, SWAP32(pattern), SWAP32(repl), + SWAP32(o_pattern), SWAP32(changed_val), + attr, idx, taint_len, orig_buf, buf, + cbuf, len, 0, lvl, status))) { + + return 1; + + } + + } + + } + + if (SHAPE_BYTES(h->shape) >= 2 && *status != 1) { + + if (its_len >= 2 && + ((*buf_16 == (u16)pattern && *o_buf_16 == (u16)o_pattern) || + attr >= 16)) { + + u16 tmp_16 = *buf_16; + *buf_16 = (u16)repl; + if (unlikely(its_fuzz(afl, buf, len, status))) { return 1; } + if (*status == 1) { memcpy(cbuf + idx, buf_16, 2); } + *buf_16 = tmp_16; + + } + + // reverse encoding + if (do_reverse && *status != 1) { + + if (unlikely(cmp_extend_encoding(afl, h, SWAP16(pattern), SWAP16(repl), + SWAP16(o_pattern), SWAP16(changed_val), + attr, idx, taint_len, orig_buf, buf, + cbuf, len, 0, lvl, status))) { + + return 1; + + } + + } + + } + + if (*status != 1) { // u8 + + // if (its_len >= 1 && (attr <= 1 || attr >= 8)) + // fprintf(stderr, + // "TestU8: %u>=1 %x==%x %x==%x (idx=%u attr=%u) <= %x<-%x\n", + // its_len, *buf_8, pattern, *o_buf_8, o_pattern, idx, attr, + // repl, changed_val); + + if (its_len >= 1 && + ((*buf_8 == (u8)pattern && *o_buf_8 == (u8)o_pattern) || + attr >= 16)) { + + u8 tmp_8 = *buf_8; + *buf_8 = (u8)repl; + if (unlikely(its_fuzz(afl, buf, len, status))) { return 1; } + if (*status == 1) { cbuf[idx] = *buf_8; } + *buf_8 = tmp_8; } @@ -370,49 +723,205 @@ static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header *h, } - if (SHAPE_BYTES(h->shape) >= 4 && *status != 1) { + // here we add and subract 1 from the value, but only if it is not an + // == or != comparison + // Bits: 1 = Equal, 2 = Greater, 3 = Lesser, 4 = Float - if (its_len >= 4 && *buf_32 == (u32)pattern && - *o_buf_32 == (u32)o_pattern) { + if (lvl < 4) { return 0; } - *buf_32 = (u32)repl; - if (unlikely(its_fuzz(afl, buf, len, status))) { return 1; } - *buf_32 = pattern; + if (attr >= 8 && attr < 16) { // lesser/greater integer comparison + + u64 repl_new; + if (SHAPE_BYTES(h->shape) == 4 && its_len >= 4) { + + float *f = (float *)&repl; + float g = *f; + g += 1.0; + u32 *r = (u32 *)&g; + repl_new = (u32)*r; + + } else if (SHAPE_BYTES(h->shape) == 8 && its_len >= 8) { + + double *f = (double *)&repl; + double g = *f; + g += 1.0; + + u64 *r = (u64 *)&g; + repl_new = *r; + + } else { + + return 0; } - // reverse encoding - if (do_reverse && *status != 1) { + changed_val = repl_new; + + if (unlikely(cmp_extend_encoding(afl, h, pattern, repl_new, o_pattern, + changed_val, 16, idx, taint_len, orig_buf, + buf, cbuf, len, 1, lvl, status))) { + + return 1; + + } + + if (SHAPE_BYTES(h->shape) == 4) { + + float *f = (float *)&repl; + float g = *f; + g -= 1.0; + u32 *r = (u32 *)&g; + repl_new = (u32)*r; + + } else if (SHAPE_BYTES(h->shape) == 8) { + + double *f = (double *)&repl; + double g = *f; + g -= 1.0; + u64 *r = (u64 *)&g; + repl_new = *r; + + } else { + + return 0; + + } + + changed_val = repl_new; + + if (unlikely(cmp_extend_encoding(afl, h, pattern, repl_new, o_pattern, + changed_val, 16, idx, taint_len, orig_buf, + buf, cbuf, len, 1, lvl, status))) { + + return 1; + + } + + // transform double to float, llvm likes to do that internally ... + if (SHAPE_BYTES(h->shape) == 8 && its_len >= 4) { + + double *f = (double *)&repl; + float g = (float)*f; + repl_new = 0; +#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) + memcpy((char *)&repl_new, (char *)&g, 4); +#else + memcpy(((char *)&repl_new) + 4, (char *)&g, 4); +#endif + changed_val = repl_new; + h->shape = 3; // modify shape - if (unlikely(cmp_extend_encoding(afl, h, SWAP32(pattern), SWAP32(repl), - SWAP32(o_pattern), idx, orig_buf, buf, - len, 0, status))) { + // fprintf(stderr, "DOUBLE2FLOAT %llx\n", repl_new); + if (unlikely(cmp_extend_encoding( + afl, h, pattern, repl_new, o_pattern, changed_val, 16, idx, + taint_len, orig_buf, buf, cbuf, len, 1, lvl, status))) { + + h->shape = 7; return 1; } + h->shape = 7; // recover shape + + } + + } else if (attr > 1 && attr < 8) { // lesser/greater integer comparison + + u64 repl_new; + + repl_new = repl + 1; + changed_val = repl_new; + if (unlikely(cmp_extend_encoding(afl, h, pattern, repl_new, o_pattern, + changed_val, 32, idx, taint_len, orig_buf, + buf, cbuf, len, 1, lvl, status))) { + + return 1; + + } + + repl_new = repl - 1; + changed_val = repl_new; + if (unlikely(cmp_extend_encoding(afl, h, pattern, repl_new, o_pattern, + changed_val, 32, idx, taint_len, orig_buf, + buf, cbuf, len, 1, lvl, status))) { + + return 1; + } } - if (SHAPE_BYTES(h->shape) >= 2 && *status != 1) { + return 0; - if (its_len >= 2 && *buf_16 == (u16)pattern && - *o_buf_16 == (u16)o_pattern) { +} - *buf_16 = (u16)repl; +static u8 cmp_extend_encoding128(afl_state_t *afl, struct cmp_header *h, + u128 pattern, u128 repl, u128 o_pattern, + u128 changed_val, u8 attr, u32 idx, + u32 taint_len, u8 *orig_buf, u8 *buf, u8 *cbuf, + u32 len, u8 do_reverse, u8 lvl, u8 *status) { + + u128 *buf_128 = (u128 *)&buf[idx]; + u64 * buf0 = (u64 *)&buf[idx]; + u64 * buf1 = (u64 *)(buf + idx + 8); + u128 *o_buf_128 = (u128 *)&orig_buf[idx]; + u32 its_len = MIN(len - idx, taint_len); + u64 v10 = (u64)repl; + u64 v11 = (u64)(repl >> 64); + + // if this is an fcmp (attr & 8 == 8) then do not compare the patterns - + // due to a bug in llvm dynamic float bitcasts do not work :( + // the value 16 means this is a +- 1.0 test case + if (its_len >= 16) { + +#ifdef _DEBUG + fprintf(stderr, "TestU128: %u>=16 (idx=%u attr=%u) (%u)\n", its_len, idx, + attr, do_reverse); + u64 v00 = (u64)pattern; + u64 v01 = pattern >> 64; + u64 ov00 = (u64)o_pattern; + u64 ov01 = o_pattern >> 64; + u64 ov10 = (u64)changed_val; + u64 ov11 = changed_val >> 64; + u64 b00 = (u64)*buf_128; + u64 b01 = *buf_128 >> 64; + u64 ob00 = (u64)*o_buf_128; + u64 ob01 = *o_buf_128 >> 64; + fprintf(stderr, + "TestU128: %llx:%llx==%llx:%llx" + " %llx:%llx==%llx:%llx <= %llx:%llx<-%llx:%llx\n", + b01, b00, v01, v00, ob01, ob00, ov01, ov00, v11, v10, ov11, ov10); +#endif + + if (*buf_128 == pattern && *o_buf_128 == o_pattern) { + + u128 tmp_128 = *buf_128; + // *buf_128 = repl; <- this crashes +#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) + *buf0 = v10; + *buf1 = v11; +#else + *buf1 = v10; + *buf0 = v11; +#endif if (unlikely(its_fuzz(afl, buf, len, status))) { return 1; } - *buf_16 = (u16)pattern; + if (*status == 1) { memcpy(cbuf + idx, buf_128, 16); } + *buf_128 = tmp_128; + +#ifdef _DEBUG + fprintf(stderr, "Status=%u\n", *status); +#endif } // reverse encoding if (do_reverse && *status != 1) { - if (unlikely(cmp_extend_encoding(afl, h, SWAP16(pattern), SWAP16(repl), - SWAP16(o_pattern), idx, orig_buf, buf, - len, 0, status))) { + if (unlikely(cmp_extend_encoding128( + afl, h, SWAPN(pattern, 128), SWAPN(repl, 128), + SWAPN(o_pattern, 128), SWAPN(changed_val, 128), attr, idx, + taint_len, orig_buf, buf, cbuf, len, 0, lvl, status))) { return 1; @@ -422,14 +931,82 @@ static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header *h, } - /* avoid CodeQL warning on unsigned overflow */ - if (/* SHAPE_BYTES(h->shape) >= 1 && */ *status != 1) { + return 0; - if (its_len >= 1 && *buf_8 == (u8)pattern && *o_buf_8 == (u8)o_pattern) { +} - *buf_8 = (u8)repl; +// uh a pointer read from (long double*) reads 12 bytes, not 10 ... +// so lets make this complicated. +static u8 cmp_extend_encoding_ld(afl_state_t *afl, struct cmp_header *h, + u8 *pattern, u8 *repl, u8 *o_pattern, + u8 *changed_val, u8 attr, u32 idx, + u32 taint_len, u8 *orig_buf, u8 *buf, u8 *cbuf, + u32 len, u8 do_reverse, u8 lvl, u8 *status) { + + u8 *buf_ld = &buf[idx], *o_buf_ld = &orig_buf[idx], backup[10]; + u32 its_len = MIN(len - idx, taint_len); + + if (its_len >= 10) { + +#ifdef _DEBUG + fprintf(stderr, "TestUld: %u>=10 (len=%u idx=%u attr=%u) (%u)\n", its_len, + len, idx, attr, do_reverse); + fprintf(stderr, "TestUld: "); + u32 i; + for (i = 0; i < 10; i++) + fprintf(stderr, "%02x", pattern[i]); + fprintf(stderr, "=="); + for (i = 0; i < 10; i++) + fprintf(stderr, "%02x", buf_ld[i]); + fprintf(stderr, " "); + for (i = 0; i < 10; i++) + fprintf(stderr, "%02x", o_pattern[i]); + fprintf(stderr, "=="); + for (i = 0; i < 10; i++) + fprintf(stderr, "%02x", o_buf_ld[i]); + fprintf(stderr, " <= "); + for (i = 0; i < 10; i++) + fprintf(stderr, "%02x", repl[i]); + fprintf(stderr, "=="); + for (i = 0; i < 10; i++) + fprintf(stderr, "%02x", changed_val[i]); + fprintf(stderr, "\n"); +#endif + + if (!memcmp(pattern, buf_ld, 10) && !memcmp(o_pattern, o_buf_ld, 10)) { + + // if this is an fcmp (attr & 8 == 8) then do not compare the patterns - + // due to a bug in llvm dynamic float bitcasts do not work :( + // the value 16 means this is a +- 1.0 test case + + memcpy(backup, buf_ld, 10); + memcpy(buf_ld, repl, 10); if (unlikely(its_fuzz(afl, buf, len, status))) { return 1; } - *buf_8 = (u8)pattern; + if (*status == 1) { memcpy(cbuf + idx, repl, 10); } + memcpy(buf_ld, backup, 10); + +#ifdef _DEBUG + fprintf(stderr, "Status=%u\n", *status); +#endif + + } + + } + + // reverse encoding + if (do_reverse && *status != 1) { + + u8 sp[10], sr[10], osp[10], osr[10]; + SWAPNN(sp, pattern, 10); + SWAPNN(sr, repl, 10); + SWAPNN(osp, o_pattern, 10); + SWAPNN(osr, changed_val, 10); + + if (unlikely(cmp_extend_encoding_ld(afl, h, sp, sr, osp, osr, attr, idx, + taint_len, orig_buf, buf, cbuf, len, 0, + lvl, status))) { + + return 1; } @@ -445,10 +1022,6 @@ static void try_to_add_to_dict(afl_state_t *afl, u64 v, u8 shape) { u32 k; u8 cons_ff = 0, cons_0 = 0; - - if (shape > sizeof(v)) - FATAL("shape is greater than %zu, please report!", sizeof(v)); - for (k = 0; k < shape; ++k) { if (b[k] == 0) { @@ -457,7 +1030,7 @@ static void try_to_add_to_dict(afl_state_t *afl, u64 v, u8 shape) { } else if (b[k] == 0xff) { - ++cons_ff; + ++cons_0; } else { @@ -493,28 +1066,126 @@ static void try_to_add_to_dict(afl_state_t *afl, u64 v, u8 shape) { } -static u8 cmp_fuzz(afl_state_t *afl, u32 key, u8 *orig_buf, u8 *buf, u32 len) { +static void try_to_add_to_dict128(afl_state_t *afl, u128 v) { - struct cmp_header *h = &afl->shm.cmp_map->headers[key]; - u32 i, j, idx; + u8 *b = (u8 *)&v; - u32 loggeds = h->hits; + u32 k; + u8 cons_ff = 0, cons_0 = 0; + for (k = 0; k < 16; ++k) { + + if (b[k] == 0) { + + ++cons_0; + + } else if (b[k] == 0xff) { + + ++cons_0; + + } else { + + cons_0 = cons_ff = 0; + + } + + // too many uninteresting values? try adding 2 64-bit values + if (cons_0 > 6 || cons_ff > 6) { + + u64 v64 = (u64)v; + try_to_add_to_dict(afl, v64, 8); + v64 = (u64)(v >> 64); + try_to_add_to_dict(afl, v64, 8); + + return; + + } + + } + + maybe_add_auto(afl, (u8 *)&v, 16); + u128 rev = SWAPN(v, 128); + maybe_add_auto(afl, (u8 *)&rev, 16); + +} + +static void try_to_add_to_dictN(afl_state_t *afl, u128 v, u8 size) { + + u8 *b = (u8 *)&v; + + u32 k; + u8 cons_ff = 0, cons_0 = 0; +#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) + for (k = 0; k < size; ++k) { + +#else + for (k = 16 - size; k < 16; ++k) { + +#endif + if (b[k] == 0) { + + ++cons_0; + + } else if (b[k] == 0xff) { + + ++cons_0; + + } else { + + cons_0 = cons_ff = 0; + + } + + } + + maybe_add_auto(afl, (u8 *)&v, size); + u128 rev = SWAPN(v, size); + maybe_add_auto(afl, (u8 *)&rev, size); + +} + +static u8 cmp_fuzz(afl_state_t *afl, u32 key, u8 *orig_buf, u8 *buf, u8 *cbuf, + u32 len, u32 lvl, struct tainted *taint) { + + struct cmp_header *h = &afl->shm.cmp_map->headers[key]; + struct tainted * t; + u32 i, j, idx, taint_len; + u32 have_taint = 1, is_128 = 0, is_n = 0, is_ld = 0; + u32 loggeds = h->hits; if (h->hits > CMP_MAP_H) { loggeds = CMP_MAP_H; } u8 status = 0; - // opt not in the paper - u32 fails; - u8 found_one = 0; + u8 found_one = 0; /* loop cmps are useless, detect and ignore them */ - u64 s_v0, s_v1; - u8 s_v0_fixed = 1, s_v1_fixed = 1; - u8 s_v0_inc = 1, s_v1_inc = 1; - u8 s_v0_dec = 1, s_v1_dec = 1; + u128 s128_v0 = 0, s128_v1 = 0, orig_s128_v0 = 0, orig_s128_v1 = 0; + long double ld0, ld1, o_ld0, o_ld1; + u64 s_v0, s_v1; + u8 s_v0_fixed = 1, s_v1_fixed = 1; + u8 s_v0_inc = 1, s_v1_inc = 1; + u8 s_v0_dec = 1, s_v1_dec = 1; - for (i = 0; i < loggeds; ++i) { + switch (SHAPE_BYTES(h->shape)) { + + case 1: + case 2: + case 4: + case 8: + break; + case 16: + is_128 = 1; + break; + case 10: + if (h->attribute & 8) { is_ld = 1; } + // fall through + default: + is_n = 1; - fails = 0; + } + + // FCmp not in if level 1 only + if ((h->attribute & 8) && lvl < 2) return 0; + + for (i = 0; i < loggeds; ++i) { struct cmp_operands *o = &afl->shm.cmp_map->log[key][i]; @@ -551,55 +1222,242 @@ static u8 cmp_fuzz(afl_state_t *afl, u32 key, u8 *orig_buf, u8 *buf, u32 len) { } - for (idx = 0; idx < len && fails < 8; ++idx) { +#ifdef _DEBUG + fprintf(stderr, "Handling: %llx->%llx vs %llx->%llx attr=%u shape=%u\n", + orig_o->v0, o->v0, orig_o->v1, o->v1, h->attribute, + SHAPE_BYTES(h->shape)); +#endif + + if (taint) { + + t = taint; + + while (t->next) { + + t = t->next; + + } + + } else { + + have_taint = 0; + t = NULL; + + } + + if (unlikely(is_128 || is_n)) { + + s128_v0 = ((u128)o->v0) + (((u128)o->v0_128) << 64); + s128_v1 = ((u128)o->v1) + (((u128)o->v1_128) << 64); + orig_s128_v0 = ((u128)orig_o->v0) + (((u128)orig_o->v0_128) << 64); + orig_s128_v1 = ((u128)orig_o->v1) + (((u128)orig_o->v1_128) << 64); + + if (is_ld) { + +#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) + memcpy((char *)&ld0, (char *)&s128_v0, sizeof(long double)); + memcpy((char *)&ld1, (char *)&s128_v1, sizeof(long double)); + memcpy((char *)&o_ld0, (char *)&orig_s128_v0, sizeof(long double)); + memcpy((char *)&o_ld1, (char *)&orig_s128_v1, sizeof(long double)); +#else + memcpy((char *)&ld0, (char *)(&s128_v0) + 6, sizeof(long double)); + memcpy((char *)&ld1, (char *)(&s128_v1) + 6, sizeof(long double)); + memcpy((char *)&o_ld0, (char *)(&orig_s128_v0) + 6, + sizeof(long double)); + memcpy((char *)&o_ld1, (char *)(&orig_s128_v1) + 6, + sizeof(long double)); +#endif + + } + + } + + for (idx = 0; idx < len; ++idx) { + + if (have_taint) { + + if (!t || idx < t->pos) { + + continue; + + } else { + + taint_len = t->pos + t->len - idx; + + if (idx == t->pos + t->len - 1) { t = t->prev; } + + } + + } else { + + taint_len = len - idx; + + } status = 0; - if (unlikely(cmp_extend_encoding(afl, h, o->v0, o->v1, orig_o->v0, idx, - orig_buf, buf, len, 1, &status))) { - return 1; + if (is_ld) { // long double special case + + if (ld0 != o_ld0 && o_ld1 != o_ld0) { + + if (unlikely(cmp_extend_encoding_ld( + afl, h, (u8 *)&ld0, (u8 *)&ld1, (u8 *)&o_ld0, (u8 *)&o_ld1, + h->attribute, idx, taint_len, orig_buf, buf, cbuf, len, 1, + lvl, &status))) { + + return 1; + + } + + } + + if (status == 1) { + + found_one = 1; + break; + + } + + if (ld1 != o_ld1 && o_ld0 != o_ld1) { + + if (unlikely(cmp_extend_encoding_ld( + afl, h, (u8 *)&ld1, (u8 *)&ld0, (u8 *)&o_ld1, (u8 *)&o_ld0, + h->attribute, idx, taint_len, orig_buf, buf, cbuf, len, 1, + lvl, &status))) { + + return 1; + + } + + } + + if (status == 1) { + + found_one = 1; + break; + + } + + } + + if (is_128) { // u128 special case + + if (s128_v0 != orig_s128_v0 && orig_s128_v0 != orig_s128_v1) { + + if (unlikely(cmp_extend_encoding128( + afl, h, s128_v0, s128_v1, orig_s128_v0, orig_s128_v1, + h->attribute, idx, taint_len, orig_buf, buf, cbuf, len, 1, + lvl, &status))) { + + return 1; + + } + + } + + if (status == 1) { + + found_one = 1; + break; + + } + + if (s128_v1 != orig_s128_v1 && orig_s128_v1 != orig_s128_v0) { + + if (unlikely(cmp_extend_encoding128( + afl, h, s128_v1, s128_v0, orig_s128_v1, orig_s128_v0, + h->attribute, idx, taint_len, orig_buf, buf, cbuf, len, 1, + lvl, &status))) { + + return 1; + + } + + } + + if (status == 1) { + + found_one = 1; + break; + + } } - if (status == 2) { + // even for u128 and long double do cmp_extend_encoding() because + // if we got here their own special trials failed and it might just be + // a cast from e.g. u64 to u128 from the input data. - ++fails; + if ((o->v0 != orig_o->v0 || lvl >= 4) && orig_o->v0 != orig_o->v1) { - } else if (status == 1) { + if (unlikely(cmp_extend_encoding( + afl, h, o->v0, o->v1, orig_o->v0, orig_o->v1, h->attribute, idx, + taint_len, orig_buf, buf, cbuf, len, 1, lvl, &status))) { + return 1; + + } + + } + + if (status == 1) { + + found_one = 1; break; } status = 0; - if (unlikely(cmp_extend_encoding(afl, h, o->v1, o->v0, orig_o->v1, idx, - orig_buf, buf, len, 1, &status))) { + if ((o->v1 != orig_o->v1 || lvl >= 4) && orig_o->v0 != orig_o->v1) { - return 1; + if (unlikely(cmp_extend_encoding( + afl, h, o->v1, o->v0, orig_o->v1, orig_o->v0, h->attribute, idx, + taint_len, orig_buf, buf, cbuf, len, 1, lvl, &status))) { - } + return 1; - if (status == 2) { + } - ++fails; + } - } else if (status == 1) { + if (status == 1) { + found_one = 1; break; } } - if (status == 1) { found_one = 1; } +#ifdef _DEBUG + fprintf(stderr, + "END: %llx->%llx vs %llx->%llx attr=%u i=%u found=%u is128=%u " + "isN=%u size=%u\n", + orig_o->v0, o->v0, orig_o->v1, o->v1, h->attribute, i, found_one, + is_128, is_n, SHAPE_BYTES(h->shape)); +#endif // If failed, add to dictionary - if (fails == 8) { + if (!found_one) { if (afl->pass_stats[key].total == 0) { - try_to_add_to_dict(afl, o->v0, SHAPE_BYTES(h->shape)); - try_to_add_to_dict(afl, o->v1, SHAPE_BYTES(h->shape)); + if (unlikely(is_128)) { + + try_to_add_to_dict128(afl, s128_v0); + try_to_add_to_dict128(afl, s128_v1); + + } else if (unlikely(is_n)) { + + try_to_add_to_dictN(afl, s128_v0, SHAPE_BYTES(h->shape)); + try_to_add_to_dictN(afl, s128_v1, SHAPE_BYTES(h->shape)); + + } else { + + try_to_add_to_dict(afl, o->v0, SHAPE_BYTES(h->shape)); + try_to_add_to_dict(afl, o->v1, SHAPE_BYTES(h->shape)); + + } } @@ -630,20 +1488,19 @@ static u8 cmp_fuzz(afl_state_t *afl, u32 key, u8 *orig_buf, u8 *buf, u32 len) { } static u8 rtn_extend_encoding(afl_state_t *afl, u8 *pattern, u8 *repl, - u8 *o_pattern, u32 idx, u8 *orig_buf, u8 *buf, - u32 len, u8 *status) { + u8 *o_pattern, u32 idx, u32 taint_len, + u8 *orig_buf, u8 *buf, u8 *cbuf, u32 len, + u8 *status) { u32 i; u32 its_len = MIN((u32)32, len - idx); - + its_len = MIN(its_len, taint_len); u8 save[32]; memcpy(save, &buf[idx], its_len); - *status = 0; - for (i = 0; i < its_len; ++i) { - if (pattern[i] != buf[idx + i] || o_pattern[i] != orig_buf[idx + i] || + if ((pattern[i] != buf[idx + i] && o_pattern[i] != orig_buf[idx + i]) || *status == 1) { break; @@ -654,6 +1511,8 @@ static u8 rtn_extend_encoding(afl_state_t *afl, u8 *pattern, u8 *repl, if (unlikely(its_fuzz(afl, buf, len, status))) { return 1; } + if (*status == 1) { memcpy(cbuf + idx, &buf[idx], i); } + } memcpy(&buf[idx], save, i); @@ -661,23 +1520,21 @@ static u8 rtn_extend_encoding(afl_state_t *afl, u8 *pattern, u8 *repl, } -static u8 rtn_fuzz(afl_state_t *afl, u32 key, u8 *orig_buf, u8 *buf, u32 len) { +static u8 rtn_fuzz(afl_state_t *afl, u32 key, u8 *orig_buf, u8 *buf, u8 *cbuf, + u32 len, struct tainted *taint) { + struct tainted * t; struct cmp_header *h = &afl->shm.cmp_map->headers[key]; - u32 i, j, idx; + u32 i, j, idx, have_taint = 1, taint_len; u32 loggeds = h->hits; if (h->hits > CMP_MAP_RTN_H) { loggeds = CMP_MAP_RTN_H; } u8 status = 0; - // opt not in the paper - // u32 fails = 0; u8 found_one = 0; for (i = 0; i < loggeds; ++i) { - u32 fails = 0; - struct cmpfn_operands *o = &((struct cmpfn_operands *)afl->shm.cmp_map->log[key])[i]; @@ -696,50 +1553,84 @@ static u8 rtn_fuzz(afl_state_t *afl, u32 key, u8 *orig_buf, u8 *buf, u32 len) { } - for (idx = 0; idx < len && fails < 8; ++idx) { + if (taint) { - if (unlikely(rtn_extend_encoding(afl, o->v0, o->v1, orig_o->v0, idx, - orig_buf, buf, len, &status))) { + t = taint; + while (t->next) { - return 1; + t = t->next; } - if (status == 2) { + } else { - ++fails; + have_taint = 0; + t = NULL; - } else if (status == 1) { + } - break; + for (idx = 0; idx < len; ++idx) { + + if (have_taint) { + + if (!t || idx < t->pos) { + + continue; + + } else { + + taint_len = t->pos + t->len - idx; + + if (idx == t->pos + t->len - 1) { t = t->prev; } + + } + + } else { + + taint_len = len - idx; } - if (unlikely(rtn_extend_encoding(afl, o->v1, o->v0, orig_o->v1, idx, - orig_buf, buf, len, &status))) { + status = 0; + + if (unlikely(rtn_extend_encoding(afl, o->v0, o->v1, orig_o->v0, idx, + taint_len, orig_buf, buf, cbuf, len, + &status))) { return 1; } - if (status == 2) { + if (status == 1) { + + found_one = 1; + break; + + } + + status = 0; + + if (unlikely(rtn_extend_encoding(afl, o->v1, o->v0, orig_o->v1, idx, + taint_len, orig_buf, buf, cbuf, len, + &status))) { - ++fails; + return 1; - } else if (status == 1) { + } + if (status == 1) { + + found_one = 1; break; } } - if (status == 1) { found_one = 1; } - // If failed, add to dictionary - if (fails == 8) { + if (!found_one) { - if (afl->pass_stats[key].total == 0) { + if (unlikely(!afl->pass_stats[key].total)) { maybe_add_auto(afl, o->v0, SHAPE_BYTES(h->shape)); maybe_add_auto(afl, o->v1, SHAPE_BYTES(h->shape)); @@ -791,7 +1682,44 @@ u8 input_to_state_stage(afl_state_t *afl, u8 *orig_buf, u8 *buf, u32 len, memcpy(afl->orig_cmp_map, afl->shm.cmp_map, sizeof(struct cmp_map)); - if (unlikely(colorization(afl, buf, len, exec_cksum))) { return 1; } + struct tainted *taint = NULL; + + if (!afl->queue_cur->taint || !afl->queue_cur->cmplog_colorinput) { + + if (unlikely(colorization(afl, buf, len, exec_cksum, &taint))) { return 1; } + + // no taint? still try, create a dummy to prevent again colorization + if (!taint) { + + taint = ck_alloc(sizeof(struct tainted)); + taint->len = len; + + } + + } else { + + buf = afl->queue_cur->cmplog_colorinput; + taint = afl->queue_cur->taint; + // reget the cmplog information + if (unlikely(common_fuzz_cmplog_stuff(afl, buf, len))) { return 1; } + + } + +#ifdef _DEBUG + dump("ORIG", orig_buf, len); + dump("NEW ", buf, len); +#endif + + struct tainted *t = taint; + + while (t) { + +#ifdef _DEBUG + fprintf(stderr, "T: pos=%u len=%u\n", t->pos, t->len); +#endif + t = t->next; + + } // do it manually, forkserver clear only afl->fsrv.trace_bits memset(afl->shm.cmp_map->headers, 0, sizeof(afl->shm.cmp_map->headers)); @@ -807,15 +1735,38 @@ u8 input_to_state_stage(afl_state_t *afl, u8 *orig_buf, u8 *buf, u32 len, afl->stage_max = 0; afl->stage_cur = 0; + u32 lvl; + u32 cmplog_done = afl->queue_cur->colorized; + u32 cmplog_lvl = afl->cmplog_lvl; + if (!cmplog_done) { + + lvl = 1; + + } else { + + lvl = 0; + + } + + if (cmplog_lvl >= 2 && cmplog_done < 2) { lvl += 2; } + if (cmplog_lvl >= 3 && cmplog_done < 3) { lvl += 4; } + + u8 *cbuf = afl_realloc((void **)&afl->in_scratch_buf, len + 128); + memcpy(cbuf, orig_buf, len); + u8 *virgin_backup = afl_realloc((void **)&afl->ex_buf, afl->shm.map_size); + memcpy(virgin_backup, afl->virgin_bits, afl->shm.map_size); + u32 k; for (k = 0; k < CMP_MAP_W; ++k) { if (!afl->shm.cmp_map->headers[k].hits) { continue; } - if (afl->pass_stats[k].total && - (rand_below(afl, afl->pass_stats[k].total) >= - afl->pass_stats[k].faileds || - afl->pass_stats[k].total == 0xff)) { + if (afl->pass_stats[k].faileds == 0xff || + afl->pass_stats[k].total == 0xff) { + +#ifdef _DEBUG + fprintf(stderr, "DISABLED %u\n", k); +#endif afl->shm.cmp_map->headers[k].hits = 0; // ignore this cmp @@ -841,11 +1792,19 @@ u8 input_to_state_stage(afl_state_t *afl, u8 *orig_buf, u8 *buf, u32 len, if (afl->shm.cmp_map->headers[k].type == CMP_TYPE_INS) { - if (unlikely(cmp_fuzz(afl, k, orig_buf, buf, len))) { goto exit_its; } + if (unlikely(cmp_fuzz(afl, k, orig_buf, buf, cbuf, len, lvl, taint))) { + + goto exit_its; + + } } else { - if (unlikely(rtn_fuzz(afl, k, orig_buf, buf, len))) { goto exit_its; } + if (unlikely(rtn_fuzz(afl, k, orig_buf, buf, cbuf, len, taint))) { + + goto exit_its; + + } } @@ -854,12 +1813,86 @@ u8 input_to_state_stage(afl_state_t *afl, u8 *orig_buf, u8 *buf, u32 len, r = 0; exit_its: + + afl->queue_cur->colorized = afl->cmplog_lvl; + if (afl->cmplog_lvl == CMPLOG_LVL_MAX) { + + ck_free(afl->queue_cur->cmplog_colorinput); + t = taint; + while (taint) { + + t = taint->next; + ck_free(taint); + taint = t; + + } + + afl->queue_cur->taint = NULL; + + } else { + + if (!afl->queue_cur->taint) { afl->queue_cur->taint = taint; } + + if (!afl->queue_cur->cmplog_colorinput) { + + afl->queue_cur->cmplog_colorinput = ck_alloc_nozero(len); + memcpy(afl->queue_cur->cmplog_colorinput, buf, len); + memcpy(buf, orig_buf, len); + + } + + } + + // copy the current virgin bits so we can recover the information + u8 *virgin_save = afl_realloc((void **)&afl->eff_buf, afl->shm.map_size); + memcpy(virgin_save, afl->virgin_bits, afl->shm.map_size); + // reset virgin bits to the backup previous to redqueen + memcpy(afl->virgin_bits, virgin_backup, afl->shm.map_size); + + u8 status = 0; + its_fuzz(afl, cbuf, len, &status); + + // now combine with the saved virgin bits +#ifdef WORD_SIZE_64 + u64 *v = (u64 *)afl->virgin_bits; + u64 *s = (u64 *)virgin_save; + u32 i; + for (i = 0; i < (afl->shm.map_size >> 3); i++) { + + v[i] &= s[i]; + + } + +#else + u32 *v = (u64 *)afl->virgin_bits; + u32 *s = (u64 *)virgin_save; + u32 i; + for (i = 0; i < (afl->shm.map_size >> 2); i++) { + + v[i] &= s[i]; + + } + +#endif + +#ifdef _DEBUG + dump("COMB", cbuf, len); + if (status == 1) { + + fprintf(stderr, "NEW COMBINED\n"); + + } else { + + fprintf(stderr, "NO new combined\n"); + + } + +#endif + new_hit_cnt = afl->queued_paths + afl->unique_crashes; afl->stage_finds[STAGE_ITS] += new_hit_cnt - orig_hit_cnt; afl->stage_cycles[STAGE_ITS] += afl->fsrv.total_execs - orig_execs; - memcpy(buf, orig_buf, len); - return r; } diff --git a/src/afl-fuzz-state.c b/src/afl-fuzz-state.c index 60c9684c..8423a3d1 100644 --- a/src/afl-fuzz-state.c +++ b/src/afl-fuzz-state.c @@ -102,6 +102,7 @@ void afl_state_init(afl_state_t *afl, uint32_t map_size) { afl->stats_update_freq = 1; afl->stats_avg_exec = 0; afl->skip_deterministic = 1; + afl->cmplog_lvl = 1; #ifndef NO_SPLICING afl->use_splicing = 1; #endif diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c index bb2674f0..1e914ca6 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -77,13 +77,8 @@ static void at_exit() { } int kill_signal = SIGKILL; - /* AFL_KILL_SIGNAL should already be a valid int at this point */ - if (getenv("AFL_KILL_SIGNAL")) { - - kill_signal = atoi(getenv("AFL_KILL_SIGNAL")); - - } + if ((ptr = getenv("AFL_KILL_SIGNAL"))) { kill_signal = atoi(ptr); } if (pid1 > 0) { kill(pid1, kill_signal); } if (pid2 > 0) { kill(pid2, kill_signal); } @@ -103,13 +98,14 @@ static void usage(u8 *argv0, int more_help) { "Execution control settings:\n" " -p schedule - power schedules compute a seed's performance score:\n" - " -- see docs/power_schedules.md\n" + " fast(default), explore, exploit, seek, rare, mmopt, " + "coe, lin\n" + " quad -- see docs/power_schedules.md\n" " -f file - location read by the fuzzed program (default: stdin " "or @@)\n" " -t msec - timeout for each run (auto-scaled, 50-%u ms)\n" - " -m megs - memory limit for child process (%u MB, 0 = no limit)\n" + " -m megs - memory limit for child process (%u MB, 0 = no limit " + "[default])\n" " -Q - use binary-only instrumentation (QEMU mode)\n" " -U - use unicorn-based instrumentation (Unicorn mode)\n" " -W - use qemu-based instrumentation with Wine (Wine " @@ -125,7 +121,9 @@ static void usage(u8 *argv0, int more_help) { " See docs/README.MOpt.md\n" " -c program - enable CmpLog by specifying a binary compiled for " "it.\n" - " if using QEMU, just use -c 0.\n\n" + " if using QEMU, just use -c 0.\n" + " -l cmplog_level - set the complexity/intensivity of CmpLog.\n" + " Values: 1 (default), 2 (intensive) and 3 (heavy)\n\n" "Fuzzing behavior settings:\n" " -Z - sequential queue selection instead of weighted " @@ -337,7 +335,6 @@ int main(int argc, char **argv_orig, char **envp) { if (get_afl_env("AFL_DEBUG")) { debug = afl->debug = 1; } - // map_size = get_map_size(); afl_state_init(afl, map_size); afl->debug = debug; afl_fsrv_init(&afl->fsrv); @@ -358,7 +355,8 @@ int main(int argc, char **argv_orig, char **envp) { while ((opt = getopt( argc, argv, - "+b:c:i:I:o:f:F:m:t:T:dDnCB:S:M:x:QNUWe:p:s:V:E:L:hRP:Z")) > 0) { + "+b:B:c:CdDe:E:hi:I:f:F:l:L:m:M:nNo:p:P:RQs:S:t:T:UV:Wx:Z")) > + 0) { switch (opt) { @@ -787,6 +785,26 @@ int main(int argc, char **argv_orig, char **envp) { } break; + case 'l': { + + afl->cmplog_lvl = atoi(optarg); + if (afl->cmplog_lvl < 1 || afl->cmplog_lvl > CMPLOG_LVL_MAX) { + + FATAL( + "Bad complog level value, accepted values are 1 (default), 2 and " + "%u.", + CMPLOG_LVL_MAX); + + } + + if (afl->cmplog_lvl == CMPLOG_LVL_MAX) { + + afl->cmplog_max_filesize = MAX_FILE; + + } + + } break; + case 'L': { /* MOpt mode */ if (afl->limit_time_sig) { FATAL("Multiple -L options not supported"); } @@ -1635,6 +1653,14 @@ int main(int argc, char **argv_orig, char **envp) { if (afl->use_splicing) { ++afl->cycles_wo_finds; + + if (unlikely(afl->shm.cmplog_mode && + afl->cmplog_max_filesize < MAX_FILE)) { + + afl->cmplog_max_filesize <<= 4; + + } + switch (afl->expand_havoc) { case 0: @@ -1652,6 +1678,7 @@ int main(int argc, char **argv_orig, char **envp) { } afl->expand_havoc = 2; + if (afl->cmplog_lvl < 2) afl->cmplog_lvl = 2; break; case 2: // if (!have_p) afl->schedule = EXPLOIT; @@ -1665,11 +1692,14 @@ int main(int argc, char **argv_orig, char **envp) { afl->expand_havoc = 4; break; case 4: - // if not in sync mode, enable deterministic mode? - // if (!afl->sync_id) afl->skip_deterministic = 0; afl->expand_havoc = 5; + if (afl->cmplog_lvl < 3) afl->cmplog_lvl = 3; break; case 5: + // if not in sync mode, enable deterministic mode? + if (!afl->sync_id) afl->skip_deterministic = 0; + afl->expand_havoc = 6; + case 6: // nothing else currently break; -- cgit 1.4.1 From c71ce79963ffd3e1203d1078b8a60f91c4ecebf1 Mon Sep 17 00:00:00 2001 From: van Hauser Date: Sun, 17 Jan 2021 15:18:20 +0100 Subject: fix colorization --- GNUmakefile | 4 +-- include/afl-fuzz.h | 4 +-- instrumentation/afl-compiler-rt.o.c | 3 +- src/afl-fuzz-one.c | 6 ++-- src/afl-fuzz-redqueen.c | 56 ++++++++++++++++++++++--------------- src/afl-fuzz.c | 2 ++ src/afl-showmap.c | 25 ++++++++++++++--- 7 files changed, 65 insertions(+), 35 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/GNUmakefile b/GNUmakefile index 7b05a1d5..c71a7d47 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -428,8 +428,8 @@ src/afl-sharedmem.o : $(COMM_HDR) src/afl-sharedmem.c include/sharedmem.h afl-fuzz: $(COMM_HDR) include/afl-fuzz.h $(AFL_FUZZ_FILES) src/afl-common.o src/afl-sharedmem.o src/afl-forkserver.o src/afl-performance.o | test_x86 $(CC) $(CFLAGS) $(COMPILE_STATIC) $(CFLAGS_FLTO) $(AFL_FUZZ_FILES) src/afl-common.o src/afl-sharedmem.o src/afl-forkserver.o src/afl-performance.o -o $@ $(PYFLAGS) $(LDFLAGS) -lm -afl-showmap: src/afl-showmap.c src/afl-common.o src/afl-sharedmem.o src/afl-forkserver.o $(COMM_HDR) | test_x86 - $(CC) $(CFLAGS) $(COMPILE_STATIC) $(CFLAGS_FLTO) src/$@.c src/afl-common.o src/afl-sharedmem.o src/afl-forkserver.o -o $@ $(LDFLAGS) +afl-showmap: src/afl-showmap.c src/afl-common.o src/afl-sharedmem.o src/afl-forkserver.o src/afl-performance.o $(COMM_HDR) | test_x86 + $(CC) $(CFLAGS) $(COMPILE_STATIC) $(CFLAGS_FLTO) src/$@.c src/afl-common.o src/afl-sharedmem.o src/afl-forkserver.o src/afl-performance.o -o $@ $(LDFLAGS) afl-tmin: src/afl-tmin.c src/afl-common.o src/afl-sharedmem.o src/afl-forkserver.o src/afl-performance.o $(COMM_HDR) | test_x86 $(CC) $(CFLAGS) $(COMPILE_STATIC) $(CFLAGS_FLTO) src/$@.c src/afl-common.o src/afl-sharedmem.o src/afl-forkserver.o src/afl-performance.o -o $@ $(LDFLAGS) diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h index 8a2122dc..621e8745 100644 --- a/include/afl-fuzz.h +++ b/include/afl-fuzz.h @@ -1136,9 +1136,9 @@ void read_foreign_testcases(afl_state_t *, int); u8 common_fuzz_cmplog_stuff(afl_state_t *afl, u8 *out_buf, u32 len); /* RedQueen */ -u8 input_to_state_stage(afl_state_t *afl, u8 *orig_buf, u8 *buf, u32 len, - u64 exec_cksum); +u8 input_to_state_stage(afl_state_t *afl, u8 *orig_buf, u8 *buf, u32 len); +/* our RNG wrapper */ AFL_RAND_RETURN rand_next(afl_state_t *afl); /* probability between 0.0 and 1.0 */ diff --git a/instrumentation/afl-compiler-rt.o.c b/instrumentation/afl-compiler-rt.o.c index 5d75af78..bbec52f9 100644 --- a/instrumentation/afl-compiler-rt.o.c +++ b/instrumentation/afl-compiler-rt.o.c @@ -1209,7 +1209,8 @@ void __sanitizer_cov_trace_pc_guard_init(uint32_t *start, uint32_t *stop) { void __cmplog_ins_hook1(uint8_t arg1, uint8_t arg2, uint8_t attr) { - // fprintf(stderr, "hook1 arg0=%02x arg1=%02x attr=%u\n", arg1, arg2, attr); + // fprintf(stderr, "hook1 arg0=%02x arg1=%02x attr=%u\n", + // (u8) arg1, (u8) arg2, attr); if (unlikely(!__afl_cmp_map)) return; diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index 596bae22..4ce22c08 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -562,8 +562,7 @@ u8 fuzz_one_original(afl_state_t *afl) { !(afl->fsrv.total_execs % afl->queued_paths) || get_cur_time() - afl->last_path_time > 15000) { - if (input_to_state_stage(afl, in_buf, out_buf, len, - afl->queue_cur->exec_cksum)) { + if (input_to_state_stage(afl, in_buf, out_buf, len)) { goto abandon_entry; @@ -2986,8 +2985,7 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { !(afl->fsrv.total_execs % afl->queued_paths) || get_cur_time() - afl->last_path_time > 15000) { - if (input_to_state_stage(afl, in_buf, out_buf, len, - afl->queue_cur->exec_cksum)) { + if (input_to_state_stage(afl, in_buf, out_buf, len)) { goto abandon_entry; diff --git a/src/afl-fuzz-redqueen.c b/src/afl-fuzz-redqueen.c index 955a9232..052f59f1 100644 --- a/src/afl-fuzz-redqueen.c +++ b/src/afl-fuzz-redqueen.c @@ -88,7 +88,7 @@ static struct range *pop_biggest_range(struct range **ranges) { static void dump(char *txt, u8 *buf, u32 len) { u32 i; - fprintf(stderr, "DUMP %s %llx ", txt, hash64(buf, len, 0)); + fprintf(stderr, "DUMP %s %llx ", txt, hash64(buf, len, HASH_CONST)); for (i = 0; i < len; i++) fprintf(stderr, "%02x", buf[i]); fprintf(stderr, "\n"); @@ -117,6 +117,7 @@ static u8 get_exec_checksum(afl_state_t *afl, u8 *buf, u32 len, u64 *cksum) { if (unlikely(common_fuzz_stuff(afl, buf, len))) { return 1; } *cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST); + return 0; } @@ -200,7 +201,7 @@ static void type_replace(afl_state_t *afl, u8 *buf, u32 len) { } -static u8 colorization(afl_state_t *afl, u8 *buf, u32 len, u64 exec_cksum, +static u8 colorization(afl_state_t *afl, u8 *buf, u32 len, struct tainted **taints) { struct range * ranges = add_range(NULL, 0, len - 1), *rng; @@ -208,18 +209,31 @@ static u8 colorization(afl_state_t *afl, u8 *buf, u32 len, u64 exec_cksum, u8 * backup = ck_alloc_nozero(len); u8 * changed = ck_alloc_nozero(len); - u64 orig_hit_cnt, new_hit_cnt; + u64 orig_hit_cnt, new_hit_cnt, exec_cksum; orig_hit_cnt = afl->queued_paths + afl->unique_crashes; afl->stage_name = "colorization"; afl->stage_short = "colorization"; afl->stage_max = (len << 1); - afl->stage_cur = 0; + + // in colorization we do not classify counts, hence we have to calculate + // the original checksum! + if (unlikely(get_exec_checksum(afl, buf, len, &exec_cksum))) { + + goto checksum_fail; + + } + memcpy(backup, buf, len); memcpy(changed, buf, len); type_replace(afl, changed, len); +#ifdef _DEBUG + dump("ORIG", buf, len); + dump("CHAN", changed, len); +#endif + while ((rng = pop_biggest_range(&ranges)) != NULL && afl->stage_cur < afl->stage_max) { @@ -227,7 +241,7 @@ static u8 colorization(afl_state_t *afl, u8 *buf, u32 len, u64 exec_cksum, memcpy(buf + rng->start, changed + rng->start, s); - u64 cksum; + u64 cksum = 0; u64 start_us = get_cur_time_us(); if (unlikely(get_exec_checksum(afl, buf, len, &cksum))) { @@ -633,11 +647,11 @@ static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header *h, if (SHAPE_BYTES(h->shape) >= 4 && *status != 1) { // if (its_len >= 4 && (attr <= 1 || attr >= 8)) - // fprintf(stderr, - // "TestU32: %u>=4 %x==%llx" - // " %x==%llx (idx=%u attr=%u) <= %llx<-%llx\n", - // its_len, *buf_32, pattern, *o_buf_32, o_pattern, idx, attr, - // repl, changed_val); + // fprintf(stderr, + // "TestU32: %u>=4 %x==%llx" + // " %x==%llx (idx=%u attr=%u) <= %llx<-%llx\n", + // its_len, *buf_32, pattern, *o_buf_32, o_pattern, idx, attr, + // repl, changed_val); if (its_len >= 4 && ((*buf_32 == (u32)pattern && *o_buf_32 == (u32)o_pattern) || @@ -702,10 +716,10 @@ static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header *h, if (*status != 1) { // u8 // if (its_len >= 1 && (attr <= 1 || attr >= 8)) - // fprintf(stderr, - // "TestU8: %u>=1 %x==%x %x==%x (idx=%u attr=%u) <= %x<-%x\n", - // its_len, *buf_8, pattern, *o_buf_8, o_pattern, idx, attr, - // repl, changed_val); + // fprintf(stderr, + // "TestU8: %u>=1 %x==%x %x==%x (idx=%u attr=%u) <= %x<-%x\n", + // its_len, *buf_8, (u8)pattern, *o_buf_8, (u8)o_pattern, idx, + // attr, (u8)repl, (u8)changed_val); if (its_len >= 1 && ((*buf_8 == (u8)pattern && *o_buf_8 == (u8)o_pattern) || @@ -1659,8 +1673,7 @@ static u8 rtn_fuzz(afl_state_t *afl, u32 key, u8 *orig_buf, u8 *buf, u8 *cbuf, ///// Input to State stage // afl->queue_cur->exec_cksum -u8 input_to_state_stage(afl_state_t *afl, u8 *orig_buf, u8 *buf, u32 len, - u64 exec_cksum) { +u8 input_to_state_stage(afl_state_t *afl, u8 *orig_buf, u8 *buf, u32 len) { u8 r = 1; if (unlikely(!afl->orig_cmp_map)) { @@ -1686,7 +1699,7 @@ u8 input_to_state_stage(afl_state_t *afl, u8 *orig_buf, u8 *buf, u32 len, if (!afl->queue_cur->taint || !afl->queue_cur->cmplog_colorinput) { - if (unlikely(colorization(afl, buf, len, exec_cksum, &taint))) { return 1; } + if (unlikely(colorization(afl, buf, len, &taint))) { return 1; } // no taint? still try, create a dummy to prevent again colorization if (!taint) { @@ -1696,6 +1709,10 @@ u8 input_to_state_stage(afl_state_t *afl, u8 *orig_buf, u8 *buf, u32 len, } +#ifdef _DEBUG + dump("NEW ", buf, len); +#endif + } else { buf = afl->queue_cur->cmplog_colorinput; @@ -1705,11 +1722,6 @@ u8 input_to_state_stage(afl_state_t *afl, u8 *orig_buf, u8 *buf, u32 len, } -#ifdef _DEBUG - dump("ORIG", orig_buf, len); - dump("NEW ", buf, len); -#endif - struct tainted *t = taint; while (t) { diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c index 1e914ca6..88c40ee8 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -1094,6 +1094,8 @@ int main(int argc, char **argv_orig, char **envp) { } + if (afl->shm.cmplog_mode) { OKF("CmpLog level: %u", afl->cmplog_lvl); } + /* Dynamically allocate memory for AFLFast schedules */ if (afl->schedule >= FAST && afl->schedule <= RARE) { diff --git a/src/afl-showmap.c b/src/afl-showmap.c index 5c9d38e0..5d98d646 100644 --- a/src/afl-showmap.c +++ b/src/afl-showmap.c @@ -42,6 +42,7 @@ #include "sharedmem.h" #include "forkserver.h" #include "common.h" +#include "hash.h" #include #include @@ -86,7 +87,8 @@ static u8 quiet_mode, /* Hide non-essential messages? */ binary_mode, /* Write output as a binary map */ keep_cores, /* Allow coredumps? */ remove_shm = 1, /* remove shmem? */ - collect_coverage; /* collect coverage */ + collect_coverage, /* collect coverage */ + no_classify; /* do not classify counts */ static volatile u8 stop_soon, /* Ctrl-C pressed? */ child_crashed; /* Child crashed? */ @@ -317,7 +319,9 @@ static void showmap_run_target_forkserver(afl_forkserver_t *fsrv, u8 *mem, } - classify_counts(fsrv); + if (fsrv->trace_bits[0] == 1) { fsrv->trace_bits[0] = 0; } + + if (!no_classify) { classify_counts(fsrv); } if (!quiet_mode) { SAYF(cRST "-- Program output ends --\n"); } @@ -490,7 +494,9 @@ static void showmap_run_target(afl_forkserver_t *fsrv, char **argv) { } - classify_counts(fsrv); + if (fsrv->trace_bits[0] == 1) { fsrv->trace_bits[0] = 0; } + + if (!no_classify) { classify_counts(fsrv); } if (!quiet_mode) { SAYF(cRST "-- Program output ends --\n"); } @@ -680,6 +686,7 @@ static void usage(u8 *argv0) { " -q - sink program's output and don't show messages\n" " -e - show edge coverage only, ignore hit counts\n" " -r - show real tuple values instead of AFL filter values\n" + " -s - do not classify the map\n" " -c - allow core dumps\n\n" "This tool displays raw tuple data captured by AFL instrumentation.\n" @@ -729,10 +736,14 @@ int main(int argc, char **argv_orig, char **envp) { if (getenv("AFL_QUIET") != NULL) { be_quiet = 1; } - while ((opt = getopt(argc, argv, "+i:o:f:m:t:A:eqCZQUWbcrh")) > 0) { + while ((opt = getopt(argc, argv, "+i:o:f:m:t:A:eqCZQUWbcrsh")) > 0) { switch (opt) { + case 's': + no_classify = 1; + break; + case 'C': collect_coverage = 1; quiet_mode = 1; @@ -1213,6 +1224,12 @@ int main(int argc, char **argv_orig, char **envp) { showmap_run_target(fsrv, use_argv); tcnt = write_results_to_file(fsrv, out_file); + if (!quiet_mode) { + + OKF("Hash of coverage map: %llx", + hash64(fsrv->trace_bits, fsrv->map_size, HASH_CONST)); + + } } -- cgit 1.4.1 From cd8668ad3ace7d5e369d492197a7da787183c056 Mon Sep 17 00:00:00 2001 From: van Hauser Date: Mon, 25 Jan 2021 13:55:09 +0100 Subject: mopt fix --- src/afl-fuzz-one.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index 4ce22c08..a7262eec 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -4781,8 +4781,7 @@ pacemaker_fuzzing: } - afl->stage_cycles_puppet_v2[afl->swarm_now] - [STAGE_OverWriteExtra]++; + MOpt_globals.cycles_v2[STAGE_OverWriteExtra]++; break; @@ -4837,8 +4836,7 @@ pacemaker_fuzzing: memcpy(out_buf + insert_at, ptr, extra_len); temp_len += extra_len; - afl->stage_cycles_puppet_v2[afl->swarm_now][STAGE_InsertExtra] += - 1; + MOpt_globals.cycles_v2[STAGE_InsertExtra]++; break; } @@ -4920,7 +4918,7 @@ pacemaker_fuzzing: } - afl->stage_cycles_puppet_v2[afl->swarm_now][STAGE_Splice]++; + MOpt_globals.cycles_v2[STAGE_Splice]++; break; } // end of default: -- cgit 1.4.1 From e0663c91b9cbf1bdc46593dec4ba11224e6847d7 Mon Sep 17 00:00:00 2001 From: van Hauser Date: Tue, 26 Jan 2021 12:15:13 +0100 Subject: wip fix --- src/afl-fuzz-init.c | 23 ++++++++++++++++++----- src/afl-fuzz-one.c | 13 +++++++++---- 2 files changed, 27 insertions(+), 9 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/src/afl-fuzz-init.c b/src/afl-fuzz-init.c index fed58eb6..2cb152a9 100644 --- a/src/afl-fuzz-init.c +++ b/src/afl-fuzz-init.c @@ -1026,6 +1026,14 @@ void perform_dry_run(afl_state_t *afl) { /* Remove from fuzzing queue but keep for splicing */ struct queue_entry *p = afl->queue; + + if (!p->disabled && !p->was_fuzzed) { + + --afl->pending_not_fuzzed; + --afl->active_paths; + + } + p->disabled = 1; p->perf_score = 0; while (p && p->next != q) @@ -1036,9 +1044,6 @@ void perform_dry_run(afl_state_t *afl) { else afl->queue = q->next; - --afl->pending_not_fuzzed; - --afl->active_paths; - afl->max_depth = 0; p = afl->queue; while (p) { @@ -1123,8 +1128,16 @@ restart_outer_cull_loop: if (!p->cal_failed && p->exec_cksum == q->exec_cksum) { duplicates = 1; - --afl->pending_not_fuzzed; - afl->active_paths--; + if (!p->disabled && !q->disabled && !p->was_fuzzed && !q->was_fuzzed) { + + --afl->pending_not_fuzzed; + afl->active_paths--; + + } else { + + FATAL("disabled entry? this should not happen, please report!"); + + } // We do not remove any of the memory allocated because for // splicing the data might still be interesting. diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index a7262eec..af768183 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -2782,11 +2782,16 @@ abandon_entry: cycle and have not seen this entry before. */ if (!afl->stop_soon && !afl->queue_cur->cal_failed && - (afl->queue_cur->was_fuzzed == 0 || afl->queue_cur->fuzz_level == 0)) { + (afl->queue_cur->was_fuzzed == 0 || afl->queue_cur->fuzz_level == 0) && + !afl->queue_cur->disabled) { - --afl->pending_not_fuzzed; - afl->queue_cur->was_fuzzed = 1; - if (afl->queue_cur->favored) { --afl->pending_favored; } + if (!afl->queue_cur->was_fuzzed) { + + --afl->pending_not_fuzzed; + afl->queue_cur->was_fuzzed = 1; + if (afl->queue_cur->favored) { --afl->pending_favored; } + + } } -- cgit 1.4.1 From a61a30dee03aced16d117150c4dbfd7079de7e68 Mon Sep 17 00:00:00 2001 From: van Hauser Date: Thu, 28 Jan 2021 14:11:33 +0100 Subject: fix another pending_not_fuzzed location --- src/afl-fuzz-extras.c | 2 +- src/afl-fuzz-init.c | 5 +++-- src/afl-fuzz-one.c | 4 ++-- src/afl-fuzz.c | 2 +- 4 files changed, 7 insertions(+), 6 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/src/afl-fuzz-extras.c b/src/afl-fuzz-extras.c index a3583651..7ecad233 100644 --- a/src/afl-fuzz-extras.c +++ b/src/afl-fuzz-extras.c @@ -413,7 +413,7 @@ void dedup_extras(afl_state_t *afl) { if (j + 1 < afl->extras_cnt) // not at the end of the list? memmove((char *)&afl->extras[j], (char *)&afl->extras[j + 1], (afl->extras_cnt - j - 1) * sizeof(struct extra_data)); - afl->extras_cnt--; + --afl->extras_cnt; goto restart_dedup; // restart if several duplicates are in a row } diff --git a/src/afl-fuzz-init.c b/src/afl-fuzz-init.c index 5f5e65cd..84f81112 100644 --- a/src/afl-fuzz-init.c +++ b/src/afl-fuzz-init.c @@ -1041,6 +1041,7 @@ void perform_dry_run(afl_state_t *afl) { if (!p->was_fuzzed) { + p->was_fuzzed = 1; --afl->pending_not_fuzzed; --afl->active_paths; @@ -1153,7 +1154,7 @@ restart_outer_cull_loop: p->was_fuzzed = 1; --afl->pending_not_fuzzed; - afl->active_paths--; + --afl->active_paths; } @@ -1168,7 +1169,7 @@ restart_outer_cull_loop: q->was_fuzzed = 1; --afl->pending_not_fuzzed; - afl->active_paths--; + --afl->active_paths; } diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index af768183..ff766158 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -530,7 +530,7 @@ u8 fuzz_one_original(afl_state_t *afl) { len = afl->queue_cur->len; /* maybe current entry is not ready for splicing anymore */ - if (unlikely(len <= 4 && old_len > 4)) afl->ready_for_splicing_count--; + if (unlikely(len <= 4 && old_len > 4)) --afl->ready_for_splicing_count; } @@ -2958,7 +2958,7 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { len = afl->queue_cur->len; /* maybe current entry is not ready for splicing anymore */ - if (unlikely(len <= 4 && old_len > 4)) afl->ready_for_splicing_count--; + if (unlikely(len <= 4 && old_len > 4)) --afl->ready_for_splicing_count; } diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c index a1f749b5..e856730e 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -1636,7 +1636,7 @@ int main(int argc, char **argv_orig, char **envp) { (afl->old_seed_selection && !afl->queue_cur))) { ++afl->queue_cycle; - runs_in_current_cycle = 0; + runs_in_current_cycle = (u32)-1; afl->cur_skipped_paths = 0; if (unlikely(afl->old_seed_selection)) { -- cgit 1.4.1 From 9d08f0d098c91e69b5fe41674e4c5d05363af604 Mon Sep 17 00:00:00 2001 From: van Hauser Date: Sat, 30 Jan 2021 15:39:47 +0100 Subject: added AFL_CMPLOG_ONLY_NEW feature --- docs/Changelog.md | 2 + docs/env_variables.md | 5 ++ include/afl-fuzz.h | 2 +- include/common.h | 2 +- include/envs.h | 1 + src/afl-analyze.c | 22 ++++---- src/afl-common.c | 6 +-- src/afl-fuzz-init.c | 3 ++ src/afl-fuzz-one.c | 5 +- src/afl-fuzz-redqueen.c | 135 +++++++++++++++++++++++++----------------------- src/afl-fuzz-state.c | 7 +++ src/afl-fuzz.c | 35 +++++++------ src/afl-showmap.c | 24 ++++----- src/afl-tmin.c | 24 ++++----- 14 files changed, 147 insertions(+), 126 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/docs/Changelog.md b/docs/Changelog.md index 329b7520..6e59961b 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -20,6 +20,8 @@ sending a mail to . transformations (e.g. toupper, tolower, to/from hex, xor, arithmetics, etc.). this is costly hence new command line option -l that sets the intensity (values 1 to 3). recommended is 1 or 2. + - added `AFL_CMPLOG_ONLY_NEW` to not use cmplog on initial testcases from + `-i` or resumes (as these have most likely already been done) - fix crash for very, very fast targets+systems (thanks to mhlakhani for reporting) - if determinstic mode is active (-D, or -M without -d) then we sync diff --git a/docs/env_variables.md b/docs/env_variables.md index 66d85749..4c3b1cfb 100644 --- a/docs/env_variables.md +++ b/docs/env_variables.md @@ -287,6 +287,11 @@ checks or alter some of the more exotic semantics of the tool: the target. This must be equal or larger than the size the target was compiled with. + - `AFL_CMPLOG_ONLY_NEW` will only perform the expensive cmplog feature for + newly found testcases and not for testcases that are loaded on startup + (`-i in`). This is an important feature to set when resuming a fuzzing + session. + - `AFL_TESTCACHE_SIZE` allows you to override the size of `#define TESTCASE_CACHE` in config.h. Recommended values are 50-250MB - or more if your fuzzing finds a huge amount of paths for large inputs. diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h index 12db9e4d..e8a21cb5 100644 --- a/include/afl-fuzz.h +++ b/include/afl-fuzz.h @@ -384,7 +384,7 @@ typedef struct afl_env_vars { afl_dumb_forksrv, afl_import_first, afl_custom_mutator_only, afl_no_ui, afl_force_ui, afl_i_dont_care_about_missing_crashes, afl_bench_just_one, afl_bench_until_crash, afl_debug_child, afl_autoresume, afl_cal_fast, - afl_cycle_schedules, afl_expand_havoc, afl_statsd; + afl_cycle_schedules, afl_expand_havoc, afl_statsd, afl_cmplog_only_new; u8 *afl_tmpdir, *afl_custom_mutator_library, *afl_python_module, *afl_path, *afl_hang_tmout, *afl_forksrv_init_tmout, *afl_skip_crashes, *afl_preload, diff --git a/include/common.h b/include/common.h index bdaa1735..bb8831f2 100644 --- a/include/common.h +++ b/include/common.h @@ -47,7 +47,7 @@ void argv_cpy_free(char **argv); char **get_qemu_argv(u8 *own_loc, u8 **target_path_p, int argc, char **argv); char **get_wine_argv(u8 *own_loc, u8 **target_path_p, int argc, char **argv); char * get_afl_env(char *env); -u8 *get_libqasan_path(u8 *own_loc); +u8 * get_libqasan_path(u8 *own_loc); extern u8 be_quiet; extern u8 *doc_path; /* path to documentation dir */ diff --git a/include/envs.h b/include/envs.h index 926c9e27..210b34a6 100644 --- a/include/envs.h +++ b/include/envs.h @@ -28,6 +28,7 @@ static char *afl_environment_variables[] = { "AFL_CC", "AFL_CMIN_ALLOW_ANY", "AFL_CMIN_CRASHES_ONLY", + "AFL_CMPLOG_ONLY_NEW", "AFL_CODE_END", "AFL_CODE_START", "AFL_COMPCOV_BINNAME", diff --git a/src/afl-analyze.c b/src/afl-analyze.c index 28598ba0..20aef2da 100644 --- a/src/afl-analyze.c +++ b/src/afl-analyze.c @@ -1079,28 +1079,28 @@ int main(int argc, char **argv_orig, char **envp) { if (optind == argc || !in_file) { usage(argv[0]); } if (qemu_mode && getenv("AFL_USE_QASAN")) { - - u8* preload = getenv("AFL_PRELOAD"); - u8* libqasan = get_libqasan_path(argv_orig[0]); - + + u8 *preload = getenv("AFL_PRELOAD"); + u8 *libqasan = get_libqasan_path(argv_orig[0]); + if (!preload) { - + setenv("AFL_PRELOAD", libqasan, 0); - + } else { - + u8 *result = ck_alloc(strlen(libqasan) + strlen(preload) + 2); strcpy(result, libqasan); strcat(result, " "); strcat(result, preload); - + setenv("AFL_PRELOAD", result, 1); ck_free(result); - + } - + ck_free(libqasan); - + } map_size = get_map_size(); diff --git a/src/afl-common.c b/src/afl-common.c index a69f2e97..235c4c05 100644 --- a/src/afl-common.c +++ b/src/afl-common.c @@ -364,11 +364,7 @@ u8 *get_libqasan_path(u8 *own_loc) { cp = alloc_printf("%s/libqasan.so", own_copy); ck_free(own_copy); - if (!access(cp, X_OK)) { - - return cp; - - } + if (!access(cp, X_OK)) { return cp; } } else { diff --git a/src/afl-fuzz-init.c b/src/afl-fuzz-init.c index 1808f0a1..2a7864f9 100644 --- a/src/afl-fuzz-init.c +++ b/src/afl-fuzz-init.c @@ -25,6 +25,7 @@ #include "afl-fuzz.h" #include +#include "cmplog.h" #ifdef HAVE_AFFINITY @@ -833,6 +834,8 @@ void perform_dry_run(afl_state_t *afl) { } + if (afl->afl_env.afl_cmplog_only_new) { q->colorized = CMPLOG_LVL_MAX; } + u8 *fn = strrchr(q->fname, '/') + 1; ACTF("Attempting dry run with '%s'...", fn); diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index ff766158..0cf889a8 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -26,6 +26,7 @@ #include "afl-fuzz.h" #include #include +#include "cmplog.h" /* MOpt */ @@ -553,7 +554,7 @@ u8 fuzz_one_original(afl_state_t *afl) { if (unlikely(len < 4)) { - afl->queue_cur->colorized = 0xff; + afl->queue_cur->colorized = CMPLOG_LVL_MAX; } else { @@ -2981,7 +2982,7 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { if (unlikely(len < 4)) { - afl->queue_cur->colorized = 0xff; + afl->queue_cur->colorized = CMPLOG_LVL_MAX; } else { diff --git a/src/afl-fuzz-redqueen.c b/src/afl-fuzz-redqueen.c index fc620781..d7657c1d 100644 --- a/src/afl-fuzz-redqueen.c +++ b/src/afl-fuzz-redqueen.c @@ -1118,7 +1118,11 @@ static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header *h, #ifdef ARITHMETIC_LESSER_GREATER if (lvl < LVL3 || attr == IS_TRANSFORM) { return 0; } - if (!(attr & (IS_GREATER | IS_LESSER)) || SHAPE_BYTES(h->shape) < 4) { return 0; } + if (!(attr & (IS_GREATER | IS_LESSER)) || SHAPE_BYTES(h->shape) < 4) { + + return 0; + + } // transform >= to < and <= to > if ((attr & IS_EQUAL) && (attr & (IS_GREATER | IS_LESSER))) { @@ -1138,110 +1142,110 @@ static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header *h, // lesser/greater FP comparison if (attr >= IS_FP && attr < IS_FP_MOD) { - u64 repl_new; - - if (attr & IS_GREATER) { + u64 repl_new; - if (SHAPE_BYTES(h->shape) == 4 && its_len >= 4) { + if (attr & IS_GREATER) { - float *f = (float *)&repl; - float g = *f; - g += 1.0; - u32 *r = (u32 *)&g; - repl_new = (u32)*r; + if (SHAPE_BYTES(h->shape) == 4 && its_len >= 4) { - } else if (SHAPE_BYTES(h->shape) == 8 && its_len >= 8) { + float *f = (float *)&repl; + float g = *f; + g += 1.0; + u32 *r = (u32 *)&g; + repl_new = (u32)*r; - double *f = (double *)&repl; - double g = *f; - g += 1.0; + } else if (SHAPE_BYTES(h->shape) == 8 && its_len >= 8) { - u64 *r = (u64 *)&g; - repl_new = *r; + double *f = (double *)&repl; + double g = *f; + g += 1.0; - } else { + u64 *r = (u64 *)&g; + repl_new = *r; - return 0; + } else { - } + return 0; - changed_val = repl_new; + } - if (unlikely(cmp_extend_encoding( - afl, h, pattern, repl_new, o_pattern, changed_val, 16, idx, - taint_len, orig_buf, buf, cbuf, len, 1, lvl, status))) { + changed_val = repl_new; - return 1; + if (unlikely(cmp_extend_encoding( + afl, h, pattern, repl_new, o_pattern, changed_val, 16, idx, + taint_len, orig_buf, buf, cbuf, len, 1, lvl, status))) { - } + return 1; - } else { + } - if (SHAPE_BYTES(h->shape) == 4) { + } else { - float *f = (float *)&repl; - float g = *f; - g -= 1.0; - u32 *r = (u32 *)&g; - repl_new = (u32)*r; + if (SHAPE_BYTES(h->shape) == 4) { - } else if (SHAPE_BYTES(h->shape) == 8) { + float *f = (float *)&repl; + float g = *f; + g -= 1.0; + u32 *r = (u32 *)&g; + repl_new = (u32)*r; - double *f = (double *)&repl; - double g = *f; - g -= 1.0; - u64 *r = (u64 *)&g; - repl_new = *r; + } else if (SHAPE_BYTES(h->shape) == 8) { - } else { + double *f = (double *)&repl; + double g = *f; + g -= 1.0; + u64 *r = (u64 *)&g; + repl_new = *r; - return 0; + } else { - } + return 0; - changed_val = repl_new; + } - if (unlikely(cmp_extend_encoding( - afl, h, pattern, repl_new, o_pattern, changed_val, 16, idx, - taint_len, orig_buf, buf, cbuf, len, 1, lvl, status))) { + changed_val = repl_new; - return 1; + if (unlikely(cmp_extend_encoding( + afl, h, pattern, repl_new, o_pattern, changed_val, 16, idx, + taint_len, orig_buf, buf, cbuf, len, 1, lvl, status))) { - } + return 1; } - // transform double to float, llvm likes to do that internally ... - if (SHAPE_BYTES(h->shape) == 8 && its_len >= 4) { + } - double *f = (double *)&repl; - float g = (float)*f; - repl_new = 0; + // transform double to float, llvm likes to do that internally ... + if (SHAPE_BYTES(h->shape) == 8 && its_len >= 4) { + + double *f = (double *)&repl; + float g = (float)*f; + repl_new = 0; #if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) - memcpy((char *)&repl_new, (char *)&g, 4); + memcpy((char *)&repl_new, (char *)&g, 4); #else - memcpy(((char *)&repl_new) + 4, (char *)&g, 4); + memcpy(((char *)&repl_new) + 4, (char *)&g, 4); #endif - changed_val = repl_new; - h->shape = 3; // modify shape - - // fprintf(stderr, "DOUBLE2FLOAT %llx\n", repl_new); + changed_val = repl_new; + h->shape = 3; // modify shape - if (unlikely(cmp_extend_encoding( - afl, h, pattern, repl_new, o_pattern, changed_val, 16, idx, - taint_len, orig_buf, buf, cbuf, len, 1, lvl, status))) { + // fprintf(stderr, "DOUBLE2FLOAT %llx\n", repl_new); - h->shape = 7; // recover shape - return 1; - - } + if (unlikely(cmp_extend_encoding( + afl, h, pattern, repl_new, o_pattern, changed_val, 16, idx, + taint_len, orig_buf, buf, cbuf, len, 1, lvl, status))) { h->shape = 7; // recover shape + return 1; } + h->shape = 7; // recover shape + } + } + else if (attr < IS_FP) { // lesser/greater integer comparison @@ -1707,6 +1711,7 @@ static u8 cmp_fuzz(afl_state_t *afl, u32 key, u8 *orig_buf, u8 *buf, u8 *cbuf, try_to_add_to_dictN(afl, s128_v1, SHAPE_BYTES(h->shape)); } else + #endif { diff --git a/src/afl-fuzz-state.c b/src/afl-fuzz-state.c index 8423a3d1..5040e3ef 100644 --- a/src/afl-fuzz-state.c +++ b/src/afl-fuzz-state.c @@ -236,6 +236,13 @@ void read_afl_environment(afl_state_t *afl, char **envp) { afl->afl_env.afl_custom_mutator_only = get_afl_env(afl_environment_variables[i]) ? 1 : 0; + } else if (!strncmp(env, "AFL_CMPLOG_ONLY_NEW", + + afl_environment_variable_len)) { + + afl->afl_env.afl_cmplog_only_new = + get_afl_env(afl_environment_variables[i]) ? 1 : 0; + } else if (!strncmp(env, "AFL_NO_UI", afl_environment_variable_len)) { afl->afl_env.afl_no_ui = diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c index 312d9424..9d9b0434 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -181,6 +181,7 @@ static void usage(u8 *argv0, int more_help) { "AFL_AUTORESUME: resume fuzzing if directory specified by -o already exists\n" "AFL_BENCH_JUST_ONE: run the target just once\n" "AFL_BENCH_UNTIL_CRASH: exit soon when the first crashing input has been found\n" + "AFL_CMPLOG_ONLY_NEW: do not run cmplog on initial testcases (good for resumes!)\n" "AFL_CRASH_EXITCODE: optional child exit code to be interpreted as crash\n" "AFL_CUSTOM_MUTATOR_LIBRARY: lib with afl_custom_fuzz() to mutate inputs\n" "AFL_CUSTOM_MUTATOR_ONLY: avoid AFL++'s internal mutators\n" @@ -326,8 +327,9 @@ int main(int argc, char **argv_orig, char **envp) { "compile time)"); } + #endif - + char **argv = argv_cpy_dup(argc, argv_orig); afl_state_t *afl = calloc(1, sizeof(afl_state_t)); @@ -356,8 +358,7 @@ int main(int argc, char **argv_orig, char **envp) { while ((opt = getopt( argc, argv, - "+b:B:c:CdDe:E:hi:I:f:F:l:L:m:M:nNo:p:RQs:S:t:T:UV:Wx:Z")) > - 0) { + "+b:B:c:CdDe:E:hi:I:f:F:l:L:m:M:nNo:p:RQs:S:t:T:UV:Wx:Z")) > 0) { switch (opt) { @@ -984,31 +985,31 @@ int main(int argc, char **argv_orig, char **envp) { usage(argv[0], show_help); } - + if (afl->fsrv.qemu_mode && getenv("AFL_USE_QASAN")) { - - u8* preload = getenv("AFL_PRELOAD"); - u8* libqasan = get_libqasan_path(argv_orig[0]); - + + u8 *preload = getenv("AFL_PRELOAD"); + u8 *libqasan = get_libqasan_path(argv_orig[0]); + if (!preload) { - + setenv("AFL_PRELOAD", libqasan, 0); - + } else { - + u8 *result = ck_alloc(strlen(libqasan) + strlen(preload) + 2); strcpy(result, libqasan); strcat(result, " "); strcat(result, preload); - + setenv("AFL_PRELOAD", result, 1); ck_free(result); - + } - + afl->afl_env.afl_preload = (u8 *)getenv("AFL_PRELOAD"); ck_free(libqasan); - + } if (afl->fsrv.mem_limit && afl->shm.cmplog_mode) afl->fsrv.mem_limit += 260; @@ -1270,7 +1271,7 @@ int main(int argc, char **argv_orig, char **envp) { "instead of using AFL_PRELOAD?"); } - + if (afl->afl_env.afl_preload) { if (afl->fsrv.qemu_mode) { @@ -1322,7 +1323,7 @@ int main(int argc, char **argv_orig, char **envp) { FATAL("Use AFL_PRELOAD instead of AFL_LD_PRELOAD"); } - + save_cmdline(afl, argc, argv); fix_up_banner(afl, argv[optind]); diff --git a/src/afl-showmap.c b/src/afl-showmap.c index f3cd5a90..62bf1021 100644 --- a/src/afl-showmap.c +++ b/src/afl-showmap.c @@ -942,30 +942,30 @@ int main(int argc, char **argv_orig, char **envp) { } if (optind == argc || !out_file) { usage(argv[0]); } - + if (fsrv->qemu_mode && getenv("AFL_USE_QASAN")) { - - u8* preload = getenv("AFL_PRELOAD"); - u8* libqasan = get_libqasan_path(argv_orig[0]); - + + u8 *preload = getenv("AFL_PRELOAD"); + u8 *libqasan = get_libqasan_path(argv_orig[0]); + if (!preload) { - + setenv("AFL_PRELOAD", libqasan, 0); - + } else { - + u8 *result = ck_alloc(strlen(libqasan) + strlen(preload) + 2); strcpy(result, libqasan); strcat(result, " "); strcat(result, preload); - + setenv("AFL_PRELOAD", result, 1); ck_free(result); - + } - + ck_free(libqasan); - + } if (in_dir) { diff --git a/src/afl-tmin.c b/src/afl-tmin.c index 9e9e2d63..09b5211d 100644 --- a/src/afl-tmin.c +++ b/src/afl-tmin.c @@ -1074,30 +1074,30 @@ int main(int argc, char **argv_orig, char **envp) { if (optind == argc || !in_file || !output_file) { usage(argv[0]); } check_environment_vars(envp); - + if (fsrv->qemu_mode && getenv("AFL_USE_QASAN")) { - - u8* preload = getenv("AFL_PRELOAD"); - u8* libqasan = get_libqasan_path(argv_orig[0]); - + + u8 *preload = getenv("AFL_PRELOAD"); + u8 *libqasan = get_libqasan_path(argv_orig[0]); + if (!preload) { - + setenv("AFL_PRELOAD", libqasan, 0); - + } else { - + u8 *result = ck_alloc(strlen(libqasan) + strlen(preload) + 2); strcpy(result, libqasan); strcat(result, " "); strcat(result, preload); - + setenv("AFL_PRELOAD", result, 1); ck_free(result); - + } - + ck_free(libqasan); - + } /* initialize cmplog_mode */ -- cgit 1.4.1 From e5116c6d55185177413104cad1232ca64e04b844 Mon Sep 17 00:00:00 2001 From: aflpp Date: Sun, 31 Jan 2021 17:29:37 +0100 Subject: fix -Z, remove q->next --- include/afl-fuzz.h | 4 +- include/xxhash.h | 2 +- instrumentation/compare-transform-pass.so.cc | 2 +- src/afl-fuzz-init.c | 102 +++++++++++---------------- src/afl-fuzz-one.c | 3 +- src/afl-fuzz-queue.c | 39 +++++----- src/afl-fuzz-stats.c | 10 +-- src/afl-fuzz.c | 65 +++++------------ utils/afl_untracer/afl-untracer.c | 2 +- utils/libtokencap/libtokencap.so.c | 10 +-- utils/persistent_mode/persistent_demo_new.c | 2 +- utils/persistent_mode/test-instr.c | 2 +- 12 files changed, 100 insertions(+), 143 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h index e8a21cb5..9b27606c 100644 --- a/include/afl-fuzz.h +++ b/include/afl-fuzz.h @@ -154,6 +154,7 @@ struct queue_entry { u8 *fname; /* File name for the test case */ u32 len; /* Input length */ + u32 id; /* entry number in queue_buf */ u8 colorized, /* Do not run redqueen stage again */ cal_failed; /* Calibration failed? */ @@ -191,8 +192,7 @@ struct queue_entry { u8 * cmplog_colorinput; /* the result buf of colorization */ struct tainted *taint; /* Taint information from CmpLog */ - struct queue_entry *mother, /* queue entry this based on */ - *next; /* Next element, if any */ + struct queue_entry *mother; /* queue entry this based on */ }; diff --git a/include/xxhash.h b/include/xxhash.h index 006d3f3d..3bd56d13 100644 --- a/include/xxhash.h +++ b/include/xxhash.h @@ -287,7 +287,7 @@ typedef uint32_t XXH32_hash_t; #else #include #if UINT_MAX == 0xFFFFFFFFUL -typedef unsigned int XXH32_hash_t; +typedef unsigned int XXH32_hash_t; #else #if ULONG_MAX == 0xFFFFFFFFUL typedef unsigned long XXH32_hash_t; diff --git a/instrumentation/compare-transform-pass.so.cc b/instrumentation/compare-transform-pass.so.cc index da5cf7e9..932540a7 100644 --- a/instrumentation/compare-transform-pass.so.cc +++ b/instrumentation/compare-transform-pass.so.cc @@ -68,7 +68,7 @@ class CompareTransform : public ModulePass { const char *getPassName() const override { #else - StringRef getPassName() const override { + StringRef getPassName() const override { #endif return "transforms compare functions"; diff --git a/src/afl-fuzz-init.c b/src/afl-fuzz-init.c index 2a7864f9..56dae48c 100644 --- a/src/afl-fuzz-init.c +++ b/src/afl-fuzz-init.c @@ -817,12 +817,15 @@ void read_testcases(afl_state_t *afl, u8 *directory) { void perform_dry_run(afl_state_t *afl) { - struct queue_entry *q = afl->queue; - u32 cal_failures = 0; + struct queue_entry *q; + u32 cal_failures = 0, idx; u8 * skip_crashes = afl->afl_env.afl_skip_crashes; u8 * use_mem; - while (q) { + for (idx = 0; idx < afl->queued_paths; idx++) { + + q = afl->queue_buf[idx]; + if (unlikely(q->disabled)) { continue; } u8 res; s32 fd; @@ -1052,20 +1055,22 @@ void perform_dry_run(afl_state_t *afl) { p->disabled = 1; p->perf_score = 0; - while (p && p->next != q) - p = p->next; - if (p) - p->next = q->next; - else - afl->queue = q->next; + u32 i = 0; + while (unlikely(afl->queue_buf[i]->disabled)) { + + ++i; + + } + + afl->queue = afl->queue_buf[i]; afl->max_depth = 0; - p = afl->queue; - while (p) { + for (i = 0; i < afl->queued_paths; i++) { - if (p->depth > afl->max_depth) afl->max_depth = p->depth; - p = p->next; + if (!afl->queue_buf[i]->disabled && + afl->queue_buf[i]->depth > afl->max_depth) + afl->max_depth = afl->queue_buf[i]->depth; } @@ -1098,8 +1103,6 @@ void perform_dry_run(afl_state_t *afl) { } - q = q->next; - } if (cal_failures) { @@ -1125,31 +1128,23 @@ void perform_dry_run(afl_state_t *afl) { /* Now we remove all entries from the queue that have a duplicate trace map */ - q = afl->queue; - struct queue_entry *p, *prev = NULL; - int duplicates = 0; - -restart_outer_cull_loop: + u32 duplicates = 0, i; - while (q) { + for (idx = 0; idx < afl->queued_paths; idx++) { - if (q->cal_failed || !q->exec_cksum) { goto next_entry; } + q = afl->queue_buf[idx]; + if (q->disabled || q->cal_failed || !q->exec_cksum) { continue; } - restart_inner_cull_loop: + u32 done = 0; + for (i = idx + 1; i < afl->queued_paths && !done; i++) { - p = q->next; + struct queue_entry *p = afl->queue_buf[i]; + if (p->disabled || p->cal_failed || !p->exec_cksum) { continue; } - while (p) { - - if (!p->cal_failed && p->exec_cksum == q->exec_cksum) { + if (p->exec_cksum == q->exec_cksum) { duplicates = 1; - // We do not remove any of the memory allocated because for - // splicing the data might still be interesting. - // We only decouple them from the linked list. - // This will result in some leaks at exit, but who cares. - // we keep the shorter file if (p->len >= q->len) { @@ -1163,8 +1158,6 @@ restart_outer_cull_loop: p->disabled = 1; p->perf_score = 0; - q->next = p->next; - goto restart_inner_cull_loop; } else { @@ -1178,35 +1171,26 @@ restart_outer_cull_loop: q->disabled = 1; q->perf_score = 0; - if (prev) - prev->next = q = p; - else - afl->queue = q = p; - goto restart_outer_cull_loop; + + done = 1; } } - p = p->next; - } - next_entry: - - prev = q; - q = q->next; - } if (duplicates) { afl->max_depth = 0; - q = afl->queue; - while (q) { - if (q->depth > afl->max_depth) afl->max_depth = q->depth; - q = q->next; + for (idx = 0; idx < afl->queued_paths; idx++) { + + if (!afl->queue_buf[idx]->disabled && + afl->queue_buf[idx]->depth > afl->max_depth) + afl->max_depth = afl->queue_buf[idx]->depth; } @@ -1256,11 +1240,15 @@ static void link_or_copy(u8 *old_path, u8 *new_path) { void pivot_inputs(afl_state_t *afl) { struct queue_entry *q = afl->queue; - u32 id = 0; + u32 id = 0, i; ACTF("Creating hard links for all input files..."); - while (q) { + for (i = 0; i < afl->queued_paths; i++) { + + q = afl->queue_buf[i]; + + if (unlikely(q->disabled)) { continue; } u8 *nfn, *rsl = strrchr(q->fname, '/'); u32 orig_id; @@ -1288,19 +1276,14 @@ void pivot_inputs(afl_state_t *afl) { afl->resuming_fuzz = 1; nfn = alloc_printf("%s/queue/%s", afl->out_dir, rsl); - /* Since we're at it, let's also try to find parent and figure out the + /* Since we're at it, let's also get the parent and figure out the appropriate depth for this entry. */ src_str = strchr(rsl + 3, ':'); if (src_str && sscanf(src_str + 1, "%06u", &src_id) == 1) { - struct queue_entry *s = afl->queue; - while (src_id-- && s) { - - s = s->next; - - } + struct queue_entry *s = afl->queue_buf[src_id]; if (s) { q->depth = s->depth + 1; } @@ -1348,7 +1331,6 @@ void pivot_inputs(afl_state_t *afl) { if (q->passed_det) { mark_as_det_done(afl, q); } - q = q->next; ++id; } diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index 0cf889a8..18291fb7 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -544,7 +544,8 @@ u8 fuzz_one_original(afl_state_t *afl) { if (likely(!afl->old_seed_selection)) orig_perf = perf_score = afl->queue_cur->perf_score; else - orig_perf = perf_score = calculate_score(afl, afl->queue_cur); + afl->queue_cur->perf_score = orig_perf = perf_score = + calculate_score(afl, afl->queue_cur); if (unlikely(perf_score <= 0)) { goto abandon_entry; } diff --git a/src/afl-fuzz-queue.c b/src/afl-fuzz-queue.c index 4442b400..ad3e3b8e 100644 --- a/src/afl-fuzz-queue.c +++ b/src/afl-fuzz-queue.c @@ -143,7 +143,7 @@ void create_alias_table(afl_state_t *afl) { struct queue_entry *q = afl->queue_buf[i]; - if (!q->disabled) { q->perf_score = calculate_score(afl, q); } + if (likely(!q->disabled)) { q->perf_score = calculate_score(afl, q); } sum += q->perf_score; @@ -444,7 +444,6 @@ void add_to_queue(afl_state_t *afl, u8 *fname, u32 len, u8 passed_det) { if (afl->queue_top) { - afl->queue_top->next = q; afl->queue_top = q; } else { @@ -465,6 +464,7 @@ void add_to_queue(afl_state_t *afl, u8 *fname, u32 len, u8 passed_det) { AFL_BUF_PARAM(queue), afl->queued_paths * sizeof(struct queue_entry *)); if (unlikely(!queue_buf)) { PFATAL("alloc"); } queue_buf[afl->queued_paths - 1] = q; + q->id = afl->queued_paths - 1; afl->last_path_time = get_cur_time(); @@ -641,10 +641,9 @@ void cull_queue(afl_state_t *afl) { if (likely(!afl->score_changed || afl->non_instrumented_mode)) { return; } - struct queue_entry *q; - u32 len = (afl->fsrv.map_size >> 3); - u32 i; - u8 * temp_v = afl->map_tmp_buf; + u32 len = (afl->fsrv.map_size >> 3); + u32 i; + u8 *temp_v = afl->map_tmp_buf; afl->score_changed = 0; @@ -653,12 +652,9 @@ void cull_queue(afl_state_t *afl) { afl->queued_favored = 0; afl->pending_favored = 0; - q = afl->queue; - - while (q) { + for (i = 0; i < afl->queued_paths; i++) { - q->favored = 0; - q = q->next; + afl->queue_buf[i]->favored = 0; } @@ -697,12 +693,13 @@ void cull_queue(afl_state_t *afl) { } - q = afl->queue; + for (i = 0; i < afl->queued_paths; i++) { + + if (likely(!afl->queue_buf[i]->disabled)) { - while (q) { + mark_as_redundant(afl, afl->queue_buf[i], !afl->queue_buf[i]->favored); - mark_as_redundant(afl, q, !q->favored); - q = q->next; + } } @@ -852,13 +849,15 @@ u32 calculate_score(afl_state_t *afl, struct queue_entry *q) { // Don't modify perf_score for unfuzzed seeds if (q->fuzz_level == 0) break; - struct queue_entry *queue_it = afl->queue; - while (queue_it) { + u32 i; + for (i = 0; i < afl->queued_paths; i++) { - fuzz_mu += log2(afl->n_fuzz[q->n_fuzz_entry]); - n_paths++; + if (likely(!afl->queue_buf[i]->disabled)) { - queue_it = queue_it->next; + fuzz_mu += log2(afl->n_fuzz[afl->queue_buf[i]->n_fuzz_entry]); + n_paths++; + + } } diff --git a/src/afl-fuzz-stats.c b/src/afl-fuzz-stats.c index 82da8176..7e99bf8f 100644 --- a/src/afl-fuzz-stats.c +++ b/src/afl-fuzz-stats.c @@ -1014,8 +1014,8 @@ void show_stats(afl_state_t *afl) { void show_init_stats(afl_state_t *afl) { - struct queue_entry *q = afl->queue; - u32 min_bits = 0, max_bits = 0, max_len = 0, count = 0; + struct queue_entry *q; + u32 min_bits = 0, max_bits = 0, max_len = 0, count = 0, i; u64 min_us = 0, max_us = 0; u64 avg_us = 0; @@ -1028,7 +1028,10 @@ void show_init_stats(afl_state_t *afl) { } - while (q) { + for (i = 0; i < afl->queued_paths; i++) { + + q = afl->queue_buf[i]; + if (unlikely(q->disabled)) { continue; } if (!min_us || q->exec_us < min_us) { min_us = q->exec_us; } if (q->exec_us > max_us) { max_us = q->exec_us; } @@ -1039,7 +1042,6 @@ void show_init_stats(afl_state_t *afl) { if (q->len > max_len) { max_len = q->len; } ++count; - q = q->next; } diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c index 9d9b0434..40d42c11 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -1558,45 +1558,6 @@ int main(int argc, char **argv_orig, char **envp) { perform_dry_run(afl); - /* - if (!user_set_cache && afl->q_testcase_max_cache_size) { - - / * The user defined not a fixed number of entries for the cache. - Hence we autodetect a good value. After the dry run inputs are - trimmed and we know the average and max size of the input seeds. - We use this information to set a fitting size to max entries - based on the cache size. * / - - struct queue_entry *q = afl->queue; - u64 size = 0, count = 0, avg = 0, max = 0; - - while (q) { - - ++count; - size += q->len; - if (max < q->len) { max = q->len; } - q = q->next; - - } - - if (count) { - - avg = size / count; - avg = ((avg + max) / 2) + 1; - - } - - if (avg < 10240) { avg = 10240; } - - afl->q_testcase_max_cache_entries = afl->q_testcase_max_cache_size / avg; - - if (afl->q_testcase_max_cache_entries > 32768) - afl->q_testcase_max_cache_entries = 32768; - - } - - */ - if (afl->q_testcase_max_cache_entries) { afl->q_testcase_cache = @@ -1668,7 +1629,10 @@ int main(int argc, char **argv_orig, char **envp) { if (unlikely(afl->old_seed_selection)) { afl->current_entry = 0; - afl->queue_cur = afl->queue; + while (unlikely(afl->queue_buf[afl->current_entry]->disabled)) { + ++afl->current_entry; + } + afl->queue_cur = afl->queue_buf[afl->current_entry]; if (unlikely(seek_to)) { @@ -1800,12 +1764,14 @@ int main(int argc, char **argv_orig, char **envp) { } - struct queue_entry *q = afl->queue; // we must recalculate the scores of all queue entries - while (q) { + for (i = 0; i < (s32)afl->queued_paths; i++) { + + if (likely(!afl->queue_buf[i]->disabled)) { - update_bitmap_score(afl, q); - q = q->next; + update_bitmap_score(afl, afl->queue_buf[i]); + + } } @@ -1847,8 +1813,15 @@ int main(int argc, char **argv_orig, char **envp) { if (unlikely(afl->old_seed_selection)) { - afl->queue_cur = afl->queue_cur->next; - ++afl->current_entry; + while (++afl->current_entry < afl->queued_paths && + afl->queue_buf[afl->current_entry]->disabled) + ; + if (unlikely(afl->current_entry >= afl->queued_paths || + afl->queue_buf[afl->current_entry] == NULL || + afl->queue_buf[afl->current_entry]->disabled)) + afl->queue_cur = NULL; + else + afl->queue_cur = afl->queue_buf[afl->current_entry]; } diff --git a/utils/afl_untracer/afl-untracer.c b/utils/afl_untracer/afl-untracer.c index f3894a06..d2bad0b9 100644 --- a/utils/afl_untracer/afl-untracer.c +++ b/utils/afl_untracer/afl-untracer.c @@ -284,7 +284,7 @@ library_list_t *find_library(char *name) { // this seems to work for clang too. nice :) requires gcc 4.4+ #pragma GCC push_options #pragma GCC optimize("O0") -void breakpoint(void) { +void breakpoint(void) { if (debug) fprintf(stderr, "Breakpoint function \"breakpoint\" reached.\n"); diff --git a/utils/libtokencap/libtokencap.so.c b/utils/libtokencap/libtokencap.so.c index 3629e804..26033b46 100644 --- a/utils/libtokencap/libtokencap.so.c +++ b/utils/libtokencap/libtokencap.so.c @@ -161,8 +161,8 @@ static void __tokencap_load_mappings(void) { #elif defined __FreeBSD__ || defined __OpenBSD__ || defined __NetBSD__ - #if defined __FreeBSD__ - int mib[] = {CTL_KERN, KERN_PROC, KERN_PROC_VMMAP, __tokencap_pid}; + #if defined __FreeBSD__ + int mib[] = {CTL_KERN, KERN_PROC, KERN_PROC_VMMAP, __tokencap_pid}; #elif defined __OpenBSD__ int mib[] = {CTL_KERN, KERN_PROC_VMMAP, __tokencap_pid}; #elif defined __NetBSD__ @@ -177,7 +177,7 @@ static void __tokencap_load_mappings(void) { #if defined __FreeBSD__ || defined __NetBSD__ len = len * 4 / 3; - #elif defined __OpenBSD__ + #elif defined __OpenBSD__ len -= len % sizeof(struct kinfo_vmentry); #endif @@ -202,8 +202,8 @@ static void __tokencap_load_mappings(void) { #if defined __FreeBSD__ || defined __NetBSD__ - #if defined __FreeBSD__ - size_t size = region->kve_structsize; + #if defined __FreeBSD__ + size_t size = region->kve_structsize; if (size == 0) break; #elif defined __NetBSD__ diff --git a/utils/persistent_mode/persistent_demo_new.c b/utils/persistent_mode/persistent_demo_new.c index 7e694696..ca616236 100644 --- a/utils/persistent_mode/persistent_demo_new.c +++ b/utils/persistent_mode/persistent_demo_new.c @@ -51,7 +51,7 @@ __AFL_FUZZ_INIT(); /* To ensure checks are not optimized out it is recommended to disable code optimization for the fuzzer harness main() */ #pragma clang optimize off -#pragma GCC optimize("O0") +#pragma GCC optimize("O0") int main(int argc, char **argv) { diff --git a/utils/persistent_mode/test-instr.c b/utils/persistent_mode/test-instr.c index 6da511de..2c6b6d77 100644 --- a/utils/persistent_mode/test-instr.c +++ b/utils/persistent_mode/test-instr.c @@ -24,7 +24,7 @@ __AFL_FUZZ_INIT(); /* To ensure checks are not optimized out it is recommended to disable code optimization for the fuzzer harness main() */ #pragma clang optimize off -#pragma GCC optimize("O0") +#pragma GCC optimize("O0") int main(int argc, char **argv) { -- cgit 1.4.1 From d808a8401e1acbcde3352d86e9e2da3f7bac97e8 Mon Sep 17 00:00:00 2001 From: van Hauser Date: Mon, 1 Feb 2021 12:16:55 +0100 Subject: import cmplog opts --- include/afl-fuzz.h | 1 + src/afl-fuzz-one.c | 4 ++-- src/afl-fuzz-redqueen.c | 31 +++++++++++++++++++++++++++++-- 3 files changed, 32 insertions(+), 4 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h index 9b27606c..c3a8c2ee 100644 --- a/include/afl-fuzz.h +++ b/include/afl-fuzz.h @@ -647,6 +647,7 @@ typedef struct afl_state { u32 cmplog_prev_timed_out; u32 cmplog_max_filesize; u32 cmplog_lvl; + u32 colorize_success; struct afl_pass_stat *pass_stats; struct cmp_map * orig_cmp_map; diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index 18291fb7..c73e394a 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -562,7 +562,7 @@ u8 fuzz_one_original(afl_state_t *afl) { if (afl->cmplog_lvl == 3 || (afl->cmplog_lvl == 2 && afl->queue_cur->tc_ref) || !(afl->fsrv.total_execs % afl->queued_paths) || - get_cur_time() - afl->last_path_time > 15000) { + get_cur_time() - afl->last_path_time > 300000) { if (input_to_state_stage(afl, in_buf, out_buf, len)) { @@ -2990,7 +2990,7 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { if (afl->cmplog_lvl == 3 || (afl->cmplog_lvl == 2 && afl->queue_cur->tc_ref) || !(afl->fsrv.total_execs % afl->queued_paths) || - get_cur_time() - afl->last_path_time > 15000) { + get_cur_time() - afl->last_path_time > 300000) { if (input_to_state_stage(afl, in_buf, out_buf, len)) { diff --git a/src/afl-fuzz-redqueen.c b/src/afl-fuzz-redqueen.c index 74c9db38..997b7528 100644 --- a/src/afl-fuzz-redqueen.c +++ b/src/afl-fuzz-redqueen.c @@ -379,8 +379,6 @@ static u8 colorization(afl_state_t *afl, u8 *buf, u32 len, } - *taints = taint; - /* temporary: clean ranges */ while (ranges) { @@ -423,6 +421,35 @@ static u8 colorization(afl_state_t *afl, u8 *buf, u32 len, #endif + if (taint) { + + if (len / positions == 1 && positions > 16384 && + afl->active_paths / afl->colorize_success > 20) { + +#ifdef _DEBUG + fprintf(stderr, "Colorization unsatisfactory\n"); +#endif + + *taints = NULL; + + struct tainted *t; + while (taint) { + + t = taint->next; + ck_free(taint); + taint = t; + + } + + } else { + + *taints = taint; + ++afl->colorize_success; + + } + + } + afl->stage_finds[STAGE_COLORIZATION] += new_hit_cnt - orig_hit_cnt; afl->stage_cycles[STAGE_COLORIZATION] += afl->stage_cur; ck_free(backup); -- cgit 1.4.1