diff options
author | van Hauser <vh@thc.org> | 2021-01-15 16:56:40 +0100 |
---|---|---|
committer | van Hauser <vh@thc.org> | 2021-01-15 16:56:40 +0100 |
commit | a0e884cf8bffe1a0394d106375f6a23edd2b60e6 (patch) | |
tree | ff1b4a01977ef9c133ec8dd8cbdd1b9db5a0367f /src | |
parent | fba8790e322ab933df9851f0cf6ce9ca941f3814 (diff) | |
download | afl++-a0e884cf8bffe1a0394d106375f6a23edd2b60e6.tar.gz |
merge cmplog
Diffstat (limited to 'src')
-rw-r--r-- | src/afl-cc.c | 49 | ||||
-rw-r--r-- | src/afl-fuzz-init.c | 38 | ||||
-rw-r--r-- | src/afl-fuzz-one.c | 81 | ||||
-rw-r--r-- | src/afl-fuzz-queue.c | 1 | ||||
-rw-r--r-- | src/afl-fuzz-redqueen.c | 1437 | ||||
-rw-r--r-- | src/afl-fuzz-state.c | 1 | ||||
-rw-r--r-- | src/afl-fuzz.c | 60 |
7 files changed, 1399 insertions, 268 deletions
diff --git a/src/afl-cc.c b/src/afl-cc.c index 8fb42718..02c9c7c5 100644 --- a/src/afl-cc.c +++ b/src/afl-cc.c @@ -529,9 +529,9 @@ static void edit_params(u32 argc, char **argv, char **envp) { cc_params[cc_par_cnt++] = alloc_printf( "-Wl,-mllvm=-load=%s/cmplog-routines-pass.so", obj_path); cc_params[cc_par_cnt++] = alloc_printf( - "-Wl,-mllvm=-load=%s/split-switches-pass.so", obj_path); - cc_params[cc_par_cnt++] = alloc_printf( "-Wl,-mllvm=-load=%s/cmplog-instructions-pass.so", obj_path); + cc_params[cc_par_cnt++] = alloc_printf( + "-Wl,-mllvm=-load=%s/split-switches-pass.so", obj_path); } else { @@ -541,18 +541,18 @@ static void edit_params(u32 argc, char **argv, char **envp) { cc_params[cc_par_cnt++] = alloc_printf("%s/cmplog-routines-pass.so", obj_path); - // reuse split switches from laf cc_params[cc_par_cnt++] = "-Xclang"; cc_params[cc_par_cnt++] = "-load"; cc_params[cc_par_cnt++] = "-Xclang"; cc_params[cc_par_cnt++] = - alloc_printf("%s/split-switches-pass.so", obj_path); + alloc_printf("%s/cmplog-instructions-pass.so", obj_path); + // reuse split switches from laf cc_params[cc_par_cnt++] = "-Xclang"; cc_params[cc_par_cnt++] = "-load"; cc_params[cc_par_cnt++] = "-Xclang"; cc_params[cc_par_cnt++] = - alloc_printf("%s/cmplog-instructions-pass.so", obj_path); + alloc_printf("%s/split-switches-pass.so", obj_path); } @@ -792,10 +792,8 @@ static void edit_params(u32 argc, char **argv, char **envp) { } -#if defined(USEMMAP) - #if !defined(__HAIKU__) +#if defined(USEMMAP) && !defined(__HAIKU__) cc_params[cc_par_cnt++] = "-lrt"; - #endif #endif cc_params[cc_par_cnt++] = "-D__AFL_HAVE_MANUAL_CONTROL=1"; @@ -858,6 +856,7 @@ static void edit_params(u32 argc, char **argv, char **envp) { cc_params[cc_par_cnt++] = "-D__AFL_COVERAGE_DISCARD()=__afl_coverage_discard()"; cc_params[cc_par_cnt++] = "-D__AFL_COVERAGE_ABORT()=__afl_coverage_abort()"; + cc_params[cc_par_cnt++] = "-D__AFL_FUZZ_TESTCASE_BUF=(__afl_fuzz_ptr ? __afl_fuzz_ptr : " "__afl_fuzz_alt_ptr)"; @@ -967,10 +966,8 @@ static void edit_params(u32 argc, char **argv, char **envp) { alloc_printf("-Wl,--dynamic-list=%s/dynamic_list.txt", obj_path); #endif - #if defined(USEMMAP) - #if !defined(__HAIKU__) + #if defined(USEMMAP) && !defined(__HAIKU__) cc_params[cc_par_cnt++] = "-lrt"; - #endif #endif } @@ -1278,7 +1275,6 @@ int main(int argc, char **argv, char **envp) { } - // this is a hidden option if (strncasecmp(ptr2, "llvmnative", strlen("llvmnative")) == 0 || strncasecmp(ptr2, "llvm-native", strlen("llvm-native")) == 0) { @@ -1349,29 +1345,28 @@ int main(int argc, char **argv, char **envp) { if (strncasecmp(ptr2, "ngram", strlen("ngram")) == 0) { - ptr2 += strlen("ngram"); - while (*ptr2 && (*ptr2 < '0' || *ptr2 > '9')) - ptr2++; + u8 *ptr3 = ptr2 + strlen("ngram"); + while (*ptr3 && (*ptr3 < '0' || *ptr3 > '9')) + ptr3++; - if (!*ptr2) { + if (!*ptr3) { - if ((ptr2 = getenv("AFL_LLVM_NGRAM_SIZE")) == NULL) + if ((ptr3 = getenv("AFL_LLVM_NGRAM_SIZE")) == NULL) FATAL( "you must set the NGRAM size with (e.g. for value 2) " "AFL_LLVM_INSTRUMENT=ngram-2"); } - ngram_size = atoi(ptr2); + ngram_size = atoi(ptr3); if (ngram_size < 2 || ngram_size > NGRAM_SIZE_MAX) FATAL( "NGRAM instrumentation option must be between 2 and " - "NGRAM_SIZE_MAX " - "(%u)", + "NGRAM_SIZE_MAX (%u)", NGRAM_SIZE_MAX); instrument_opt_mode |= (INSTRUMENT_OPT_NGRAM); - ptr2 = alloc_printf("%u", ngram_size); - setenv("AFL_LLVM_NGRAM_SIZE", ptr2, 1); + u8 *ptr4 = alloc_printf("%u", ngram_size); + setenv("AFL_LLVM_NGRAM_SIZE", ptr4, 1); } @@ -1507,6 +1502,7 @@ int main(int argc, char **argv, char **envp) { "((instrumentation/README.ngram.md)\n" " INSTRIM: Dominator tree (for LLVM <= 6.0) " "(instrumentation/README.instrim.md)\n\n"); + #undef NATIVE_MSG SAYF( @@ -1641,16 +1637,15 @@ int main(int argc, char **argv, char **envp) { if (have_lto) SAYF("afl-cc LTO with ld=%s %s\n", AFL_REAL_LD, AFL_CLANG_FLTO); if (have_llvm) - SAYF("afl-cc LLVM version %d using binary path \"%s\".\n", LLVM_MAJOR, + SAYF("afl-cc LLVM version %d using the binary path \"%s\".\n", LLVM_MAJOR, LLVM_BINDIR); #endif -#if defined(USEMMAP) +#ifdef USEMMAP #if !defined(__HAIKU__) - cc_params[cc_par_cnt++] = "-lrt"; - SAYF("Compiled with shm_open support (adds -lrt when linking).\n"); - #else SAYF("Compiled with shm_open support.\n"); + #else + SAYF("Compiled with shm_open support (adds -lrt when linking).\n"); #endif #else SAYF("Compiled with shmat support.\n"); diff --git a/src/afl-fuzz-init.c b/src/afl-fuzz-init.c index dbffa4f9..cbff6d7e 100644 --- a/src/afl-fuzz-init.c +++ b/src/afl-fuzz-init.c @@ -729,6 +729,30 @@ void read_testcases(afl_state_t *afl, u8 *directory) { add_to_queue(afl, fn2, st.st_size >= MAX_FILE ? MAX_FILE : st.st_size, passed_det); + if (unlikely(afl->shm.cmplog_mode)) { + + if (afl->cmplog_lvl == 1) { + + if (!afl->cmplog_max_filesize || + afl->cmplog_max_filesize < st.st_size) { + + afl->cmplog_max_filesize = st.st_size; + + } + + } else if (afl->cmplog_lvl == 2) { + + if (!afl->cmplog_max_filesize || + afl->cmplog_max_filesize > st.st_size) { + + afl->cmplog_max_filesize = st.st_size; + + } + + } + + } + if (unlikely(afl->schedule >= FAST && afl->schedule <= RARE)) { u64 cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST); @@ -756,6 +780,20 @@ void read_testcases(afl_state_t *afl, u8 *directory) { } + if (unlikely(afl->shm.cmplog_mode)) { + + if (afl->cmplog_max_filesize < 1024) { + + afl->cmplog_max_filesize = 1024; + + } else { + + afl->cmplog_max_filesize = (((afl->cmplog_max_filesize >> 10) + 1) << 10); + + } + + } + afl->last_path_time = 0; afl->queued_at_start = afl->queued_paths; diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index f9509e86..596bae22 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -165,7 +165,7 @@ static u8 could_be_arith(u32 old_val, u32 new_val, u8 blen) { /* See if one-byte adjustments to any byte could produce this result. */ - for (i = 0; i < blen; ++i) { + for (i = 0; (u8)i < blen; ++i) { u8 a = old_val >> (8 * i), b = new_val >> (8 * i); @@ -193,7 +193,7 @@ static u8 could_be_arith(u32 old_val, u32 new_val, u8 blen) { diffs = 0; - for (i = 0; i < blen / 2; ++i) { + for (i = 0; (u8)i < blen / 2; ++i) { u16 a = old_val >> (16 * i), b = new_val >> (16 * i); @@ -290,7 +290,7 @@ static u8 could_be_interest(u32 old_val, u32 new_val, u8 blen, u8 check_le) { /* See if two-byte insertions over old_val could give us new_val. */ - for (i = 0; (s32)i < blen - 1; ++i) { + for (i = 0; (u8)i < blen - 1; ++i) { for (j = 0; j < sizeof(interesting_16) / 2; ++j) { @@ -545,14 +545,31 @@ u8 fuzz_one_original(afl_state_t *afl) { else orig_perf = perf_score = calculate_score(afl, afl->queue_cur); - if (unlikely(perf_score == 0)) { goto abandon_entry; } + if (unlikely(perf_score <= 0)) { goto abandon_entry; } - if (unlikely(afl->shm.cmplog_mode && !afl->queue_cur->fully_colorized)) { + if (unlikely(afl->shm.cmplog_mode && + afl->queue_cur->colorized < afl->cmplog_lvl && + (u32)len <= afl->cmplog_max_filesize)) { - if (input_to_state_stage(afl, in_buf, out_buf, len, - afl->queue_cur->exec_cksum)) { + if (unlikely(len < 4)) { - goto abandon_entry; + afl->queue_cur->colorized = 0xff; + + } else { + + if (afl->cmplog_lvl == 3 || + (afl->cmplog_lvl == 2 && afl->queue_cur->tc_ref) || + !(afl->fsrv.total_execs % afl->queued_paths) || + get_cur_time() - afl->last_path_time > 15000) { + + if (input_to_state_stage(afl, in_buf, out_buf, len, + afl->queue_cur->exec_cksum)) { + + goto abandon_entry; + + } + + } } @@ -2796,7 +2813,7 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { } - s32 len, temp_len; + u32 len, temp_len; u32 i; u32 j; u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0; @@ -2952,14 +2969,31 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { else orig_perf = perf_score = calculate_score(afl, afl->queue_cur); - if (unlikely(perf_score == 0)) { goto abandon_entry; } + if (unlikely(perf_score <= 0)) { goto abandon_entry; } - if (unlikely(afl->shm.cmplog_mode && !afl->queue_cur->fully_colorized)) { + if (unlikely(afl->shm.cmplog_mode && + afl->queue_cur->colorized < afl->cmplog_lvl && + (u32)len <= afl->cmplog_max_filesize)) { - if (input_to_state_stage(afl, in_buf, out_buf, len, - afl->queue_cur->exec_cksum)) { + if (unlikely(len < 4)) { - goto abandon_entry; + afl->queue_cur->colorized = 0xff; + + } else { + + if (afl->cmplog_lvl == 3 || + (afl->cmplog_lvl == 2 && afl->queue_cur->tc_ref) || + !(afl->fsrv.total_execs % afl->queued_paths) || + get_cur_time() - afl->last_path_time > 15000) { + + if (input_to_state_stage(afl, in_buf, out_buf, len, + afl->queue_cur->exec_cksum)) { + + goto abandon_entry; + + } + + } } @@ -3315,7 +3349,7 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { orig_hit_cnt = new_hit_cnt; - for (i = 0; (s32)i < len - 1; ++i) { + for (i = 0; i < len - 1; ++i) { /* Let's consult the effector map... */ @@ -3357,7 +3391,7 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { orig_hit_cnt = new_hit_cnt; - for (i = 0; (s32)i < len - 3; ++i) { + for (i = 0; i < len - 3; ++i) { /* Let's consult the effector map... */ if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && @@ -3489,7 +3523,7 @@ skip_bitflip: orig_hit_cnt = new_hit_cnt; - for (i = 0; (s32)i < len - 1; ++i) { + for (i = 0; i < len - 1; ++i) { u16 orig = *(u16 *)(out_buf + i); @@ -3615,7 +3649,7 @@ skip_bitflip: orig_hit_cnt = new_hit_cnt; - for (i = 0; (s32)i < len - 3; ++i) { + for (i = 0; i < len - 3; ++i) { u32 orig = *(u32 *)(out_buf + i); @@ -3805,7 +3839,7 @@ skip_arith: orig_hit_cnt = new_hit_cnt; - for (i = 0; (s32)i < len - 1; ++i) { + for (i = 0; i < len - 1; ++i) { u16 orig = *(u16 *)(out_buf + i); @@ -3891,7 +3925,7 @@ skip_arith: orig_hit_cnt = new_hit_cnt; - for (i = 0; (s32)i < len - 3; ++i) { + for (i = 0; i < len - 3; ++i) { u32 orig = *(u32 *)(out_buf + i); @@ -4120,7 +4154,7 @@ skip_user_extras: /* See the comment in the earlier code; extras are sorted by size. */ - if ((s32)(afl->a_extras[j].len) > (s32)(len - i) || + if ((afl->a_extras[j].len) > (len - i) || !memcmp(afl->a_extras[j].data, out_buf + i, afl->a_extras[j].len) || !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, afl->a_extras[j].len))) { @@ -4837,7 +4871,7 @@ pacemaker_fuzzing: u32 copy_from, copy_to, copy_len; copy_len = choose_block_len(afl, new_len - 1); - if ((s32)copy_len > temp_len) copy_len = temp_len; + if (copy_len > temp_len) copy_len = temp_len; copy_from = rand_below(afl, new_len - copy_len + 1); copy_to = rand_below(afl, temp_len - copy_len + 1); @@ -5033,8 +5067,7 @@ pacemaker_fuzzing: the last differing byte. Bail out if the difference is just a single byte or so. */ - locate_diffs(in_buf, new_buf, MIN(len, (s32)target->len), &f_diff, - &l_diff); + locate_diffs(in_buf, new_buf, MIN(len, target->len), &f_diff, &l_diff); if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) { diff --git a/src/afl-fuzz-queue.c b/src/afl-fuzz-queue.c index 66938635..aec57a6e 100644 --- a/src/afl-fuzz-queue.c +++ b/src/afl-fuzz-queue.c @@ -433,6 +433,7 @@ void add_to_queue(afl_state_t *afl, u8 *fname, u32 len, u8 passed_det) { q->passed_det = passed_det; q->trace_mini = NULL; q->testcase_buf = NULL; + q->mother = afl->queue_cur; #ifdef INTROSPECTION q->bitsmap_size = afl->bitsmap_size; diff --git a/src/afl-fuzz-redqueen.c b/src/afl-fuzz-redqueen.c index 28585afe..955a9232 100644 --- a/src/afl-fuzz-redqueen.c +++ b/src/afl-fuzz-redqueen.c @@ -28,6 +28,8 @@ #include "afl-fuzz.h" #include "cmplog.h" +//#define _DEBUG + ///// Colorization struct range { @@ -35,6 +37,8 @@ struct range { u32 start; u32 end; struct range *next; + struct range *prev; + u8 ok; }; @@ -44,6 +48,8 @@ static struct range *add_range(struct range *ranges, u32 start, u32 end) { r->start = start; r->end = end; r->next = ranges; + r->ok = 0; + if (likely(ranges)) ranges->prev = r; return r; } @@ -51,45 +57,61 @@ static struct range *add_range(struct range *ranges, u32 start, u32 end) { static struct range *pop_biggest_range(struct range **ranges) { struct range *r = *ranges; - struct range *prev = NULL; struct range *rmax = NULL; - struct range *prev_rmax = NULL; u32 max_size = 0; while (r) { - u32 s = r->end - r->start; - if (s >= max_size) { + if (!r->ok) { + + u32 s = 1 + r->end - r->start; + + if (s >= max_size) { + + max_size = s; + rmax = r; - max_size = s; - prev_rmax = prev; - rmax = r; + } } - prev = r; r = r->next; } - if (rmax) { + return rmax; - if (prev_rmax) { +} - prev_rmax->next = rmax->next; +#ifdef _DEBUG +// static int logging = 0; +static void dump(char *txt, u8 *buf, u32 len) { - } else { + u32 i; + fprintf(stderr, "DUMP %s %llx ", txt, hash64(buf, len, 0)); + for (i = 0; i < len; i++) + fprintf(stderr, "%02x", buf[i]); + fprintf(stderr, "\n"); - *ranges = rmax->next; +} - } +static void dump_file(char *path, char *name, u32 counter, u8 *buf, u32 len) { - } + char fn[4096]; + if (!path) path = "."; + snprintf(fn, sizeof(fn), "%s/%s%d", path, name, counter); + int fd = open(fn, O_RDWR | O_CREAT | O_TRUNC, 0644); + if (fd >= 0) { - return rmax; + write(fd, buf, len); + close(fd); + + } } +#endif + static u8 get_exec_checksum(afl_state_t *afl, u8 *buf, u32 len, u64 *cksum) { if (unlikely(common_fuzz_stuff(afl, buf, len))) { return 1; } @@ -99,107 +121,270 @@ static u8 get_exec_checksum(afl_state_t *afl, u8 *buf, u32 len, u64 *cksum) { } -static void xor_replace(u8 *buf, u32 len) { +/* replace everything with different values but stay in the same type */ +static void type_replace(afl_state_t *afl, u8 *buf, u32 len) { u32 i; + u8 c; for (i = 0; i < len; ++i) { - buf[i] ^= 0xff; + // wont help for UTF or non-latin charsets + do { + + switch (buf[i]) { + + case 'A' ... 'F': + c = 'A' + rand_below(afl, 1 + 'F' - 'A'); + break; + case 'a' ... 'f': + c = 'a' + rand_below(afl, 1 + 'f' - 'a'); + break; + case '0': + c = '1'; + break; + case '1': + c = '0'; + break; + case '2' ... '9': + c = '2' + rand_below(afl, 1 + '9' - '2'); + break; + case 'G' ... 'Z': + c = 'G' + rand_below(afl, 1 + 'Z' - 'G'); + break; + case 'g' ... 'z': + c = 'g' + rand_below(afl, 1 + 'z' - 'g'); + break; + case '!' ... '*': + c = '!' + rand_below(afl, 1 + '*' - '!'); + break; + case ',' ... '.': + c = ',' + rand_below(afl, 1 + '.' - ','); + break; + case ':' ... '@': + c = ':' + rand_below(afl, 1 + '@' - ':'); + break; + case '[' ... '`': + c = '[' + rand_below(afl, 1 + '`' - '['); + break; + case '{' ... '~': + c = '{' + rand_below(afl, 1 + '~' - '{'); + break; + case '+': + c = '/'; + break; + case '/': + c = '+'; + break; + case ' ': + c = '\t'; + break; + case '\t': + c = ' '; + break; + /* + case '\r': + case '\n': + // nothing ... + break; + */ + default: + c = (buf[i] ^ 0xff); + + } + + } while (c == buf[i]); + + buf[i] = c; } } -static u8 colorization(afl_state_t *afl, u8 *buf, u32 len, u64 exec_cksum) { +static u8 colorization(afl_state_t *afl, u8 *buf, u32 len, u64 exec_cksum, + struct tainted **taints) { - struct range *ranges = add_range(NULL, 0, len); - u8 * backup = ck_alloc_nozero(len); + struct range * ranges = add_range(NULL, 0, len - 1), *rng; + struct tainted *taint = NULL; + u8 * backup = ck_alloc_nozero(len); + u8 * changed = ck_alloc_nozero(len); u64 orig_hit_cnt, new_hit_cnt; orig_hit_cnt = afl->queued_paths + afl->unique_crashes; afl->stage_name = "colorization"; afl->stage_short = "colorization"; - afl->stage_max = 1000; + afl->stage_max = (len << 1); - struct range *rng = NULL; afl->stage_cur = 0; + memcpy(backup, buf, len); + memcpy(changed, buf, len); + type_replace(afl, changed, len); + while ((rng = pop_biggest_range(&ranges)) != NULL && afl->stage_cur < afl->stage_max) { - u32 s = rng->end - rng->start; + u32 s = 1 + rng->end - rng->start; + + memcpy(buf + rng->start, changed + rng->start, s); - if (s != 0) { + u64 cksum; + u64 start_us = get_cur_time_us(); + if (unlikely(get_exec_checksum(afl, buf, len, &cksum))) { - /* Range not empty */ + goto checksum_fail; - memcpy(backup, buf + rng->start, s); - xor_replace(buf + rng->start, s); + } + + u64 stop_us = get_cur_time_us(); + + /* Discard if the mutations change the path or if it is too decremental + in speed - how could the same path have a much different speed + though ...*/ + if (cksum != exec_cksum || + (unlikely(stop_us - start_us > 3 * afl->queue_cur->exec_us) && + likely(!afl->fixed_seed))) { + + memcpy(buf + rng->start, backup + rng->start, s); - u64 cksum; - u64 start_us = get_cur_time_us(); - if (unlikely(get_exec_checksum(afl, buf, len, &cksum))) { + if (s > 1) { // to not add 0 size ranges - goto checksum_fail; + ranges = add_range(ranges, rng->start, rng->start - 1 + s / 2); + ranges = add_range(ranges, rng->start + s / 2, rng->end); } - u64 stop_us = get_cur_time_us(); + if (ranges == rng) { + + ranges = rng->next; + if (ranges) { ranges->prev = NULL; } + + } else if (rng->next) { + + rng->prev->next = rng->next; + rng->next->prev = rng->prev; - /* Discard if the mutations change the paths or if it is too decremental - in speed */ - if (cksum != exec_cksum || - ((stop_us - start_us > 2 * afl->queue_cur->exec_us) && - likely(!afl->fixed_seed))) { + } else { - ranges = add_range(ranges, rng->start, rng->start + s / 2); - ranges = add_range(ranges, rng->start + s / 2 + 1, rng->end); - memcpy(buf + rng->start, backup, s); + if (rng->prev) { rng->prev->next = NULL; } } + free(rng); + + } else { + + rng->ok = 1; + } - ck_free(rng); - rng = NULL; ++afl->stage_cur; } - if (afl->stage_cur < afl->stage_max) { afl->queue_cur->fully_colorized = 1; } + rng = ranges; + while (rng) { - new_hit_cnt = afl->queued_paths + afl->unique_crashes; - afl->stage_finds[STAGE_COLORIZATION] += new_hit_cnt - orig_hit_cnt; - afl->stage_cycles[STAGE_COLORIZATION] += afl->stage_cur; - ck_free(backup); + rng = rng->next; - ck_free(rng); - rng = NULL; + } - while (ranges) { + u32 i = 1; + u32 positions = 0; + while (i) { + restart: + i = 0; + struct range *r = NULL; + u32 pos = (u32)-1; rng = ranges; - ranges = rng->next; - ck_free(rng); - rng = NULL; - } + while (rng) { - return 0; + if (rng->ok == 1 && rng->start < pos) { -checksum_fail: - if (rng) { ck_free(rng); } - ck_free(backup); + if (taint && taint->pos + taint->len == rng->start) { + + taint->len += (1 + rng->end - rng->start); + positions += (1 + rng->end - rng->start); + rng->ok = 2; + goto restart; + + } else { + + r = rng; + pos = rng->start; + + } + + } + + rng = rng->next; + + } + + if (r) { + + struct tainted *t = ck_alloc_nozero(sizeof(struct tainted)); + t->pos = r->start; + t->len = 1 + r->end - r->start; + positions += (1 + r->end - r->start); + if (likely(taint)) { taint->prev = t; } + t->next = taint; + t->prev = NULL; + taint = t; + r->ok = 2; + i = 1; + } + + } + + *taints = taint; + + /* temporary: clean ranges */ while (ranges) { rng = ranges; ranges = rng->next; ck_free(rng); - rng = NULL; } + new_hit_cnt = afl->queued_paths + afl->unique_crashes; + +#ifdef _DEBUG + /* + char fn[4096]; + snprintf(fn, sizeof(fn), "%s/introspection_color.txt", afl->out_dir); + FILE *f = fopen(fn, "a"); + if (f) { + + */ + FILE *f = stderr; + fprintf(f, + "Colorization: fname=%s len=%u result=%u execs=%u found=%llu " + "taint=%u\n", + afl->queue_cur->fname, len, afl->queue_cur->colorized, afl->stage_cur, + new_hit_cnt - orig_hit_cnt, positions); +/* + fclose(f); + + } + +*/ +#endif + + afl->stage_finds[STAGE_COLORIZATION] += new_hit_cnt - orig_hit_cnt; + afl->stage_cycles[STAGE_COLORIZATION] += afl->stage_cur; + ck_free(backup); + ck_free(changed); + + return 0; + +checksum_fail: + ck_free(backup); + ck_free(changed); + return 1; } @@ -212,12 +397,19 @@ static u8 its_fuzz(afl_state_t *afl, u8 *buf, u32 len, u8 *status) { orig_hit_cnt = afl->queued_paths + afl->unique_crashes; +#ifdef _DEBUG + dump("DATA", buf, len); +#endif + if (unlikely(common_fuzz_stuff(afl, buf, len))) { return 1; } new_hit_cnt = afl->queued_paths + afl->unique_crashes; if (unlikely(new_hit_cnt != orig_hit_cnt)) { +#ifdef _DEBUG + fprintf(stderr, "NEW FIND\n"); +#endif *status = 1; } else { @@ -278,11 +470,33 @@ static int strntoull(const char *str, size_t sz, char **end, int base, } static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header *h, - u64 pattern, u64 repl, u64 o_pattern, u32 idx, - u8 *orig_buf, u8 *buf, u32 len, u8 do_reverse, - u8 *status) { - - if (!buf) { FATAL("BUG: buf was NULL. Please report this.\n"); } + u64 pattern, u64 repl, u64 o_pattern, + u64 changed_val, u8 attr, u32 idx, u32 taint_len, + u8 *orig_buf, u8 *buf, u8 *cbuf, u32 len, + u8 do_reverse, u8 lvl, u8 *status) { + + // (void)(changed_val); // TODO + // we can use the information in changed_val to see if there is a + // computable i2s transformation. + // if (pattern != o_pattern && repl != changed_val) { + + // u64 in_diff = pattern - o_pattern, out_diff = repl - changed_val; + // if (in_diff != out_diff) { + + // switch(in_diff) { + + // detect uppercase <-> lowercase, base64, hex encoding, etc.: + // repl = reverse_transform(TYPE, pattern); + // } + // } + // } + // not 100% but would have a chance to be detected + + // fprintf(stderr, + // "Encode: %llx->%llx into %llx(<-%llx) at pos=%u " + // "taint_len=%u shape=%u attr=%u\n", + // o_pattern, pattern, repl, changed_val, idx, taint_len, + // h->shape + 1, attr); u64 *buf_64 = (u64 *)&buf[idx]; u32 *buf_32 = (u32 *)&buf[idx]; @@ -293,76 +507,215 @@ static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header *h, u16 *o_buf_16 = (u16 *)&orig_buf[idx]; u8 * o_buf_8 = &orig_buf[idx]; - u32 its_len = len - idx; - // *status = 0; + u32 its_len = MIN(len - idx, taint_len); u8 * endptr; u8 use_num = 0, use_unum = 0; unsigned long long unum; long long num; - if (afl->queue_cur->is_ascii) { + // reverse atoi()/strnu?toll() is expensive, so we only to it in lvl == 3 + if (lvl & 4) { - endptr = buf_8; - if (strntoll(buf_8, len - idx, (char **)&endptr, 0, &num)) { + if (afl->queue_cur->is_ascii) { - if (!strntoull(buf_8, len - idx, (char **)&endptr, 0, &unum)) - use_unum = 1; + endptr = buf_8; + if (strntoll(buf_8, len - idx, (char **)&endptr, 0, &num)) { - } else + if (!strntoull(buf_8, len - idx, (char **)&endptr, 0, &unum)) + use_unum = 1; - use_num = 1; + } else - } + use_num = 1; - if (use_num && (u64)num == pattern) { + } - size_t old_len = endptr - buf_8; - size_t num_len = snprintf(NULL, 0, "%lld", num); +#ifdef _DEBUG + if (idx == 0) + fprintf(stderr, "ASCII is=%u use_num=%u use_unum=%u idx=%u %llx==%llx\n", + afl->queue_cur->is_ascii, use_num, use_unum, idx, num, pattern); +#endif - u8 *new_buf = afl_realloc((void **)&afl->out_scratch_buf, len + num_len); - if (unlikely(!new_buf)) { PFATAL("alloc"); } - memcpy(new_buf, buf, idx); + // num is likely not pattern as atoi("AAA") will be zero... + if (use_num && ((u64)num == pattern || !num)) { - snprintf(new_buf + idx, num_len, "%lld", num); - memcpy(new_buf + idx + num_len, buf_8 + old_len, len - idx - old_len); + u8 tmp_buf[32]; + size_t num_len = snprintf(tmp_buf, sizeof(tmp_buf), "%lld", repl); + size_t old_len = endptr - buf_8; - if (unlikely(its_fuzz(afl, new_buf, len, status))) { return 1; } + u8 *new_buf = afl_realloc((void **)&afl->out_scratch_buf, len + num_len); + if (unlikely(!new_buf)) { PFATAL("alloc"); } - } else if (use_unum && unum == pattern) { + memcpy(new_buf, buf, idx); + memcpy(new_buf + idx, tmp_buf, num_len); + memcpy(new_buf + idx + num_len, buf_8 + old_len, len - idx - old_len); - size_t old_len = endptr - buf_8; - size_t num_len = snprintf(NULL, 0, "%llu", unum); + if (new_buf[idx + num_len] >= '0' && new_buf[idx + num_len] <= '9') { + + new_buf[idx + num_len] = ' '; + + } - u8 *new_buf = afl_realloc((void **)&afl->out_scratch_buf, len + num_len); - if (unlikely(!new_buf)) { PFATAL("alloc"); } - memcpy(new_buf, buf, idx); + if (unlikely(its_fuzz(afl, new_buf, len, status))) { return 1; } - snprintf(new_buf + idx, num_len, "%llu", unum); - memcpy(new_buf + idx + num_len, buf_8 + old_len, len - idx - old_len); + } else if (use_unum && (unum == pattern || !unum)) { - if (unlikely(its_fuzz(afl, new_buf, len, status))) { return 1; } + u8 tmp_buf[32]; + size_t num_len = snprintf(tmp_buf, sizeof(tmp_buf), "%llu", repl); + size_t old_len = endptr - buf_8; + + u8 *new_buf = afl_realloc((void **)&afl->out_scratch_buf, len + num_len); + if (unlikely(!new_buf)) { PFATAL("alloc"); } + + memcpy(new_buf, buf, idx); + memcpy(new_buf + idx, tmp_buf, num_len); + memcpy(new_buf + idx + num_len, buf_8 + old_len, len - idx - old_len); + + if (new_buf[idx + num_len] >= '0' && new_buf[idx + num_len] <= '9') { + + new_buf[idx + num_len] = ' '; + + } + + if (unlikely(its_fuzz(afl, new_buf, len, status))) { return 1; } + + } } - if (SHAPE_BYTES(h->shape) >= 8 && *status != 1) { + // we only allow this for ascii2integer (above) + if (unlikely(pattern == o_pattern)) { return 0; } - if (its_len >= 8 && *buf_64 == pattern && *o_buf_64 == o_pattern) { + if ((lvl & 1) || ((lvl & 2) && (attr >= 8 && attr <= 15)) || attr >= 16) { - *buf_64 = repl; - if (unlikely(its_fuzz(afl, buf, len, status))) { return 1; } - *buf_64 = pattern; + if (SHAPE_BYTES(h->shape) >= 8 && *status != 1) { + + // if (its_len >= 8 && (attr == 0 || attr >= 8)) + // fprintf(stderr, + // "TestU64: %u>=4 %x==%llx" + // " %x==%llx (idx=%u attr=%u) <= %llx<-%llx\n", + // its_len, *buf_32, pattern, *o_buf_32, o_pattern, idx, attr, + // repl, changed_val); + + // if this is an fcmp (attr & 8 == 8) then do not compare the patterns - + // due to a bug in llvm dynamic float bitcasts do not work :( + // the value 16 means this is a +- 1.0 test case + if (its_len >= 8 && + ((*buf_64 == pattern && *o_buf_64 == o_pattern) || attr >= 16)) { + + u64 tmp_64 = *buf_64; + *buf_64 = repl; + if (unlikely(its_fuzz(afl, buf, len, status))) { return 1; } + if (*status == 1) { memcpy(cbuf + idx, buf_64, 8); } + *buf_64 = tmp_64; + + // fprintf(stderr, "Status=%u\n", *status); + + } + + // reverse encoding + if (do_reverse && *status != 1) { + + if (unlikely(cmp_extend_encoding(afl, h, SWAP64(pattern), SWAP64(repl), + SWAP64(o_pattern), SWAP64(changed_val), + attr, idx, taint_len, orig_buf, buf, + cbuf, len, 0, lvl, status))) { + + return 1; + + } + + } } - // reverse encoding - if (do_reverse && *status != 1) { + if (SHAPE_BYTES(h->shape) >= 4 && *status != 1) { - if (unlikely(cmp_extend_encoding(afl, h, SWAP64(pattern), SWAP64(repl), - SWAP64(o_pattern), idx, orig_buf, buf, - len, 0, status))) { + // if (its_len >= 4 && (attr <= 1 || attr >= 8)) + // fprintf(stderr, + // "TestU32: %u>=4 %x==%llx" + // " %x==%llx (idx=%u attr=%u) <= %llx<-%llx\n", + // its_len, *buf_32, pattern, *o_buf_32, o_pattern, idx, attr, + // repl, changed_val); - return 1; + if (its_len >= 4 && + ((*buf_32 == (u32)pattern && *o_buf_32 == (u32)o_pattern) || + attr >= 16)) { + + u32 tmp_32 = *buf_32; + *buf_32 = (u32)repl; + if (unlikely(its_fuzz(afl, buf, len, status))) { return 1; } + if (*status == 1) { memcpy(cbuf + idx, buf_32, 4); } + *buf_32 = tmp_32; + + // fprintf(stderr, "Status=%u\n", *status); + + } + + // reverse encoding + if (do_reverse && *status != 1) { + + if (unlikely(cmp_extend_encoding(afl, h, SWAP32(pattern), SWAP32(repl), + SWAP32(o_pattern), SWAP32(changed_val), + attr, idx, taint_len, orig_buf, buf, + cbuf, len, 0, lvl, status))) { + + return 1; + + } + + } + + } + + if (SHAPE_BYTES(h->shape) >= 2 && *status != 1) { + + if (its_len >= 2 && + ((*buf_16 == (u16)pattern && *o_buf_16 == (u16)o_pattern) || + attr >= 16)) { + + u16 tmp_16 = *buf_16; + *buf_16 = (u16)repl; + if (unlikely(its_fuzz(afl, buf, len, status))) { return 1; } + if (*status == 1) { memcpy(cbuf + idx, buf_16, 2); } + *buf_16 = tmp_16; + + } + + // reverse encoding + if (do_reverse && *status != 1) { + + if (unlikely(cmp_extend_encoding(afl, h, SWAP16(pattern), SWAP16(repl), + SWAP16(o_pattern), SWAP16(changed_val), + attr, idx, taint_len, orig_buf, buf, + cbuf, len, 0, lvl, status))) { + + return 1; + + } + + } + + } + + if (*status != 1) { // u8 + + // if (its_len >= 1 && (attr <= 1 || attr >= 8)) + // fprintf(stderr, + // "TestU8: %u>=1 %x==%x %x==%x (idx=%u attr=%u) <= %x<-%x\n", + // its_len, *buf_8, pattern, *o_buf_8, o_pattern, idx, attr, + // repl, changed_val); + + if (its_len >= 1 && + ((*buf_8 == (u8)pattern && *o_buf_8 == (u8)o_pattern) || + attr >= 16)) { + + u8 tmp_8 = *buf_8; + *buf_8 = (u8)repl; + if (unlikely(its_fuzz(afl, buf, len, status))) { return 1; } + if (*status == 1) { cbuf[idx] = *buf_8; } + *buf_8 = tmp_8; } @@ -370,49 +723,205 @@ static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header *h, } - if (SHAPE_BYTES(h->shape) >= 4 && *status != 1) { + // here we add and subract 1 from the value, but only if it is not an + // == or != comparison + // Bits: 1 = Equal, 2 = Greater, 3 = Lesser, 4 = Float - if (its_len >= 4 && *buf_32 == (u32)pattern && - *o_buf_32 == (u32)o_pattern) { + if (lvl < 4) { return 0; } - *buf_32 = (u32)repl; - if (unlikely(its_fuzz(afl, buf, len, status))) { return 1; } - *buf_32 = pattern; + if (attr >= 8 && attr < 16) { // lesser/greater integer comparison + + u64 repl_new; + if (SHAPE_BYTES(h->shape) == 4 && its_len >= 4) { + + float *f = (float *)&repl; + float g = *f; + g += 1.0; + u32 *r = (u32 *)&g; + repl_new = (u32)*r; + + } else if (SHAPE_BYTES(h->shape) == 8 && its_len >= 8) { + + double *f = (double *)&repl; + double g = *f; + g += 1.0; + + u64 *r = (u64 *)&g; + repl_new = *r; + + } else { + + return 0; } - // reverse encoding - if (do_reverse && *status != 1) { + changed_val = repl_new; + + if (unlikely(cmp_extend_encoding(afl, h, pattern, repl_new, o_pattern, + changed_val, 16, idx, taint_len, orig_buf, + buf, cbuf, len, 1, lvl, status))) { + + return 1; + + } + + if (SHAPE_BYTES(h->shape) == 4) { + + float *f = (float *)&repl; + float g = *f; + g -= 1.0; + u32 *r = (u32 *)&g; + repl_new = (u32)*r; + + } else if (SHAPE_BYTES(h->shape) == 8) { + + double *f = (double *)&repl; + double g = *f; + g -= 1.0; + u64 *r = (u64 *)&g; + repl_new = *r; + + } else { + + return 0; + + } + + changed_val = repl_new; + + if (unlikely(cmp_extend_encoding(afl, h, pattern, repl_new, o_pattern, + changed_val, 16, idx, taint_len, orig_buf, + buf, cbuf, len, 1, lvl, status))) { + + return 1; + + } + + // transform double to float, llvm likes to do that internally ... + if (SHAPE_BYTES(h->shape) == 8 && its_len >= 4) { + + double *f = (double *)&repl; + float g = (float)*f; + repl_new = 0; +#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) + memcpy((char *)&repl_new, (char *)&g, 4); +#else + memcpy(((char *)&repl_new) + 4, (char *)&g, 4); +#endif + changed_val = repl_new; + h->shape = 3; // modify shape - if (unlikely(cmp_extend_encoding(afl, h, SWAP32(pattern), SWAP32(repl), - SWAP32(o_pattern), idx, orig_buf, buf, - len, 0, status))) { + // fprintf(stderr, "DOUBLE2FLOAT %llx\n", repl_new); + if (unlikely(cmp_extend_encoding( + afl, h, pattern, repl_new, o_pattern, changed_val, 16, idx, + taint_len, orig_buf, buf, cbuf, len, 1, lvl, status))) { + + h->shape = 7; return 1; } + h->shape = 7; // recover shape + + } + + } else if (attr > 1 && attr < 8) { // lesser/greater integer comparison + + u64 repl_new; + + repl_new = repl + 1; + changed_val = repl_new; + if (unlikely(cmp_extend_encoding(afl, h, pattern, repl_new, o_pattern, + changed_val, 32, idx, taint_len, orig_buf, + buf, cbuf, len, 1, lvl, status))) { + + return 1; + + } + + repl_new = repl - 1; + changed_val = repl_new; + if (unlikely(cmp_extend_encoding(afl, h, pattern, repl_new, o_pattern, + changed_val, 32, idx, taint_len, orig_buf, + buf, cbuf, len, 1, lvl, status))) { + + return 1; + } } - if (SHAPE_BYTES(h->shape) >= 2 && *status != 1) { + return 0; - if (its_len >= 2 && *buf_16 == (u16)pattern && - *o_buf_16 == (u16)o_pattern) { +} - *buf_16 = (u16)repl; +static u8 cmp_extend_encoding128(afl_state_t *afl, struct cmp_header *h, + u128 pattern, u128 repl, u128 o_pattern, + u128 changed_val, u8 attr, u32 idx, + u32 taint_len, u8 *orig_buf, u8 *buf, u8 *cbuf, + u32 len, u8 do_reverse, u8 lvl, u8 *status) { + + u128 *buf_128 = (u128 *)&buf[idx]; + u64 * buf0 = (u64 *)&buf[idx]; + u64 * buf1 = (u64 *)(buf + idx + 8); + u128 *o_buf_128 = (u128 *)&orig_buf[idx]; + u32 its_len = MIN(len - idx, taint_len); + u64 v10 = (u64)repl; + u64 v11 = (u64)(repl >> 64); + + // if this is an fcmp (attr & 8 == 8) then do not compare the patterns - + // due to a bug in llvm dynamic float bitcasts do not work :( + // the value 16 means this is a +- 1.0 test case + if (its_len >= 16) { + +#ifdef _DEBUG + fprintf(stderr, "TestU128: %u>=16 (idx=%u attr=%u) (%u)\n", its_len, idx, + attr, do_reverse); + u64 v00 = (u64)pattern; + u64 v01 = pattern >> 64; + u64 ov00 = (u64)o_pattern; + u64 ov01 = o_pattern >> 64; + u64 ov10 = (u64)changed_val; + u64 ov11 = changed_val >> 64; + u64 b00 = (u64)*buf_128; + u64 b01 = *buf_128 >> 64; + u64 ob00 = (u64)*o_buf_128; + u64 ob01 = *o_buf_128 >> 64; + fprintf(stderr, + "TestU128: %llx:%llx==%llx:%llx" + " %llx:%llx==%llx:%llx <= %llx:%llx<-%llx:%llx\n", + b01, b00, v01, v00, ob01, ob00, ov01, ov00, v11, v10, ov11, ov10); +#endif + + if (*buf_128 == pattern && *o_buf_128 == o_pattern) { + + u128 tmp_128 = *buf_128; + // *buf_128 = repl; <- this crashes +#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) + *buf0 = v10; + *buf1 = v11; +#else + *buf1 = v10; + *buf0 = v11; +#endif if (unlikely(its_fuzz(afl, buf, len, status))) { return 1; } - *buf_16 = (u16)pattern; + if (*status == 1) { memcpy(cbuf + idx, buf_128, 16); } + *buf_128 = tmp_128; + +#ifdef _DEBUG + fprintf(stderr, "Status=%u\n", *status); +#endif } // reverse encoding if (do_reverse && *status != 1) { - if (unlikely(cmp_extend_encoding(afl, h, SWAP16(pattern), SWAP16(repl), - SWAP16(o_pattern), idx, orig_buf, buf, - len, 0, status))) { + if (unlikely(cmp_extend_encoding128( + afl, h, SWAPN(pattern, 128), SWAPN(repl, 128), + SWAPN(o_pattern, 128), SWAPN(changed_val, 128), attr, idx, + taint_len, orig_buf, buf, cbuf, len, 0, lvl, status))) { return 1; @@ -422,14 +931,82 @@ static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header *h, } - /* avoid CodeQL warning on unsigned overflow */ - if (/* SHAPE_BYTES(h->shape) >= 1 && */ *status != 1) { + return 0; - if (its_len >= 1 && *buf_8 == (u8)pattern && *o_buf_8 == (u8)o_pattern) { +} - *buf_8 = (u8)repl; +// uh a pointer read from (long double*) reads 12 bytes, not 10 ... +// so lets make this complicated. +static u8 cmp_extend_encoding_ld(afl_state_t *afl, struct cmp_header *h, + u8 *pattern, u8 *repl, u8 *o_pattern, + u8 *changed_val, u8 attr, u32 idx, + u32 taint_len, u8 *orig_buf, u8 *buf, u8 *cbuf, + u32 len, u8 do_reverse, u8 lvl, u8 *status) { + + u8 *buf_ld = &buf[idx], *o_buf_ld = &orig_buf[idx], backup[10]; + u32 its_len = MIN(len - idx, taint_len); + + if (its_len >= 10) { + +#ifdef _DEBUG + fprintf(stderr, "TestUld: %u>=10 (len=%u idx=%u attr=%u) (%u)\n", its_len, + len, idx, attr, do_reverse); + fprintf(stderr, "TestUld: "); + u32 i; + for (i = 0; i < 10; i++) + fprintf(stderr, "%02x", pattern[i]); + fprintf(stderr, "=="); + for (i = 0; i < 10; i++) + fprintf(stderr, "%02x", buf_ld[i]); + fprintf(stderr, " "); + for (i = 0; i < 10; i++) + fprintf(stderr, "%02x", o_pattern[i]); + fprintf(stderr, "=="); + for (i = 0; i < 10; i++) + fprintf(stderr, "%02x", o_buf_ld[i]); + fprintf(stderr, " <= "); + for (i = 0; i < 10; i++) + fprintf(stderr, "%02x", repl[i]); + fprintf(stderr, "=="); + for (i = 0; i < 10; i++) + fprintf(stderr, "%02x", changed_val[i]); + fprintf(stderr, "\n"); +#endif + + if (!memcmp(pattern, buf_ld, 10) && !memcmp(o_pattern, o_buf_ld, 10)) { + + // if this is an fcmp (attr & 8 == 8) then do not compare the patterns - + // due to a bug in llvm dynamic float bitcasts do not work :( + // the value 16 means this is a +- 1.0 test case + + memcpy(backup, buf_ld, 10); + memcpy(buf_ld, repl, 10); if (unlikely(its_fuzz(afl, buf, len, status))) { return 1; } - *buf_8 = (u8)pattern; + if (*status == 1) { memcpy(cbuf + idx, repl, 10); } + memcpy(buf_ld, backup, 10); + +#ifdef _DEBUG + fprintf(stderr, "Status=%u\n", *status); +#endif + + } + + } + + // reverse encoding + if (do_reverse && *status != 1) { + + u8 sp[10], sr[10], osp[10], osr[10]; + SWAPNN(sp, pattern, 10); + SWAPNN(sr, repl, 10); + SWAPNN(osp, o_pattern, 10); + SWAPNN(osr, changed_val, 10); + + if (unlikely(cmp_extend_encoding_ld(afl, h, sp, sr, osp, osr, attr, idx, + taint_len, orig_buf, buf, cbuf, len, 0, + lvl, status))) { + + return 1; } @@ -445,10 +1022,6 @@ static void try_to_add_to_dict(afl_state_t *afl, u64 v, u8 shape) { u32 k; u8 cons_ff = 0, cons_0 = 0; - - if (shape > sizeof(v)) - FATAL("shape is greater than %zu, please report!", sizeof(v)); - for (k = 0; k < shape; ++k) { if (b[k] == 0) { @@ -457,7 +1030,7 @@ static void try_to_add_to_dict(afl_state_t *afl, u64 v, u8 shape) { } else if (b[k] == 0xff) { - ++cons_ff; + ++cons_0; } else { @@ -493,28 +1066,126 @@ static void try_to_add_to_dict(afl_state_t *afl, u64 v, u8 shape) { } -static u8 cmp_fuzz(afl_state_t *afl, u32 key, u8 *orig_buf, u8 *buf, u32 len) { +static void try_to_add_to_dict128(afl_state_t *afl, u128 v) { - struct cmp_header *h = &afl->shm.cmp_map->headers[key]; - u32 i, j, idx; + u8 *b = (u8 *)&v; - u32 loggeds = h->hits; + u32 k; + u8 cons_ff = 0, cons_0 = 0; + for (k = 0; k < 16; ++k) { + + if (b[k] == 0) { + + ++cons_0; + + } else if (b[k] == 0xff) { + + ++cons_0; + + } else { + + cons_0 = cons_ff = 0; + + } + + // too many uninteresting values? try adding 2 64-bit values + if (cons_0 > 6 || cons_ff > 6) { + + u64 v64 = (u64)v; + try_to_add_to_dict(afl, v64, 8); + v64 = (u64)(v >> 64); + try_to_add_to_dict(afl, v64, 8); + + return; + + } + + } + + maybe_add_auto(afl, (u8 *)&v, 16); + u128 rev = SWAPN(v, 128); + maybe_add_auto(afl, (u8 *)&rev, 16); + +} + +static void try_to_add_to_dictN(afl_state_t *afl, u128 v, u8 size) { + + u8 *b = (u8 *)&v; + + u32 k; + u8 cons_ff = 0, cons_0 = 0; +#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) + for (k = 0; k < size; ++k) { + +#else + for (k = 16 - size; k < 16; ++k) { + +#endif + if (b[k] == 0) { + + ++cons_0; + + } else if (b[k] == 0xff) { + + ++cons_0; + + } else { + + cons_0 = cons_ff = 0; + + } + + } + + maybe_add_auto(afl, (u8 *)&v, size); + u128 rev = SWAPN(v, size); + maybe_add_auto(afl, (u8 *)&rev, size); + +} + +static u8 cmp_fuzz(afl_state_t *afl, u32 key, u8 *orig_buf, u8 *buf, u8 *cbuf, + u32 len, u32 lvl, struct tainted *taint) { + + struct cmp_header *h = &afl->shm.cmp_map->headers[key]; + struct tainted * t; + u32 i, j, idx, taint_len; + u32 have_taint = 1, is_128 = 0, is_n = 0, is_ld = 0; + u32 loggeds = h->hits; if (h->hits > CMP_MAP_H) { loggeds = CMP_MAP_H; } u8 status = 0; - // opt not in the paper - u32 fails; - u8 found_one = 0; + u8 found_one = 0; /* loop cmps are useless, detect and ignore them */ - u64 s_v0, s_v1; - u8 s_v0_fixed = 1, s_v1_fixed = 1; - u8 s_v0_inc = 1, s_v1_inc = 1; - u8 s_v0_dec = 1, s_v1_dec = 1; + u128 s128_v0 = 0, s128_v1 = 0, orig_s128_v0 = 0, orig_s128_v1 = 0; + long double ld0, ld1, o_ld0, o_ld1; + u64 s_v0, s_v1; + u8 s_v0_fixed = 1, s_v1_fixed = 1; + u8 s_v0_inc = 1, s_v1_inc = 1; + u8 s_v0_dec = 1, s_v1_dec = 1; - for (i = 0; i < loggeds; ++i) { + switch (SHAPE_BYTES(h->shape)) { + + case 1: + case 2: + case 4: + case 8: + break; + case 16: + is_128 = 1; + break; + case 10: + if (h->attribute & 8) { is_ld = 1; } + // fall through + default: + is_n = 1; - fails = 0; + } + + // FCmp not in if level 1 only + if ((h->attribute & 8) && lvl < 2) return 0; + + for (i = 0; i < loggeds; ++i) { struct cmp_operands *o = &afl->shm.cmp_map->log[key][i]; @@ -551,55 +1222,242 @@ static u8 cmp_fuzz(afl_state_t *afl, u32 key, u8 *orig_buf, u8 *buf, u32 len) { } - for (idx = 0; idx < len && fails < 8; ++idx) { +#ifdef _DEBUG + fprintf(stderr, "Handling: %llx->%llx vs %llx->%llx attr=%u shape=%u\n", + orig_o->v0, o->v0, orig_o->v1, o->v1, h->attribute, + SHAPE_BYTES(h->shape)); +#endif + + if (taint) { + + t = taint; + + while (t->next) { + + t = t->next; + + } + + } else { + + have_taint = 0; + t = NULL; + + } + + if (unlikely(is_128 || is_n)) { + + s128_v0 = ((u128)o->v0) + (((u128)o->v0_128) << 64); + s128_v1 = ((u128)o->v1) + (((u128)o->v1_128) << 64); + orig_s128_v0 = ((u128)orig_o->v0) + (((u128)orig_o->v0_128) << 64); + orig_s128_v1 = ((u128)orig_o->v1) + (((u128)orig_o->v1_128) << 64); + + if (is_ld) { + +#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) + memcpy((char *)&ld0, (char *)&s128_v0, sizeof(long double)); + memcpy((char *)&ld1, (char *)&s128_v1, sizeof(long double)); + memcpy((char *)&o_ld0, (char *)&orig_s128_v0, sizeof(long double)); + memcpy((char *)&o_ld1, (char *)&orig_s128_v1, sizeof(long double)); +#else + memcpy((char *)&ld0, (char *)(&s128_v0) + 6, sizeof(long double)); + memcpy((char *)&ld1, (char *)(&s128_v1) + 6, sizeof(long double)); + memcpy((char *)&o_ld0, (char *)(&orig_s128_v0) + 6, + sizeof(long double)); + memcpy((char *)&o_ld1, (char *)(&orig_s128_v1) + 6, + sizeof(long double)); +#endif + + } + + } + + for (idx = 0; idx < len; ++idx) { + + if (have_taint) { + + if (!t || idx < t->pos) { + + continue; + + } else { + + taint_len = t->pos + t->len - idx; + + if (idx == t->pos + t->len - 1) { t = t->prev; } + + } + + } else { + + taint_len = len - idx; + + } status = 0; - if (unlikely(cmp_extend_encoding(afl, h, o->v0, o->v1, orig_o->v0, idx, - orig_buf, buf, len, 1, &status))) { - return 1; + if (is_ld) { // long double special case + + if (ld0 != o_ld0 && o_ld1 != o_ld0) { + + if (unlikely(cmp_extend_encoding_ld( + afl, h, (u8 *)&ld0, (u8 *)&ld1, (u8 *)&o_ld0, (u8 *)&o_ld1, + h->attribute, idx, taint_len, orig_buf, buf, cbuf, len, 1, + lvl, &status))) { + + return 1; + + } + + } + + if (status == 1) { + + found_one = 1; + break; + + } + + if (ld1 != o_ld1 && o_ld0 != o_ld1) { + + if (unlikely(cmp_extend_encoding_ld( + afl, h, (u8 *)&ld1, (u8 *)&ld0, (u8 *)&o_ld1, (u8 *)&o_ld0, + h->attribute, idx, taint_len, orig_buf, buf, cbuf, len, 1, + lvl, &status))) { + + return 1; + + } + + } + + if (status == 1) { + + found_one = 1; + break; + + } + + } + + if (is_128) { // u128 special case + + if (s128_v0 != orig_s128_v0 && orig_s128_v0 != orig_s128_v1) { + + if (unlikely(cmp_extend_encoding128( + afl, h, s128_v0, s128_v1, orig_s128_v0, orig_s128_v1, + h->attribute, idx, taint_len, orig_buf, buf, cbuf, len, 1, + lvl, &status))) { + + return 1; + + } + + } + + if (status == 1) { + + found_one = 1; + break; + + } + + if (s128_v1 != orig_s128_v1 && orig_s128_v1 != orig_s128_v0) { + + if (unlikely(cmp_extend_encoding128( + afl, h, s128_v1, s128_v0, orig_s128_v1, orig_s128_v0, + h->attribute, idx, taint_len, orig_buf, buf, cbuf, len, 1, + lvl, &status))) { + + return 1; + + } + + } + + if (status == 1) { + + found_one = 1; + break; + + } } - if (status == 2) { + // even for u128 and long double do cmp_extend_encoding() because + // if we got here their own special trials failed and it might just be + // a cast from e.g. u64 to u128 from the input data. - ++fails; + if ((o->v0 != orig_o->v0 || lvl >= 4) && orig_o->v0 != orig_o->v1) { - } else if (status == 1) { + if (unlikely(cmp_extend_encoding( + afl, h, o->v0, o->v1, orig_o->v0, orig_o->v1, h->attribute, idx, + taint_len, orig_buf, buf, cbuf, len, 1, lvl, &status))) { + return 1; + + } + + } + + if (status == 1) { + + found_one = 1; break; } status = 0; - if (unlikely(cmp_extend_encoding(afl, h, o->v1, o->v0, orig_o->v1, idx, - orig_buf, buf, len, 1, &status))) { + if ((o->v1 != orig_o->v1 || lvl >= 4) && orig_o->v0 != orig_o->v1) { - return 1; + if (unlikely(cmp_extend_encoding( + afl, h, o->v1, o->v0, orig_o->v1, orig_o->v0, h->attribute, idx, + taint_len, orig_buf, buf, cbuf, len, 1, lvl, &status))) { - } + return 1; - if (status == 2) { + } - ++fails; + } - } else if (status == 1) { + if (status == 1) { + found_one = 1; break; } } - if (status == 1) { found_one = 1; } +#ifdef _DEBUG + fprintf(stderr, + "END: %llx->%llx vs %llx->%llx attr=%u i=%u found=%u is128=%u " + "isN=%u size=%u\n", + orig_o->v0, o->v0, orig_o->v1, o->v1, h->attribute, i, found_one, + is_128, is_n, SHAPE_BYTES(h->shape)); +#endif // If failed, add to dictionary - if (fails == 8) { + if (!found_one) { if (afl->pass_stats[key].total == 0) { - try_to_add_to_dict(afl, o->v0, SHAPE_BYTES(h->shape)); - try_to_add_to_dict(afl, o->v1, SHAPE_BYTES(h->shape)); + if (unlikely(is_128)) { + + try_to_add_to_dict128(afl, s128_v0); + try_to_add_to_dict128(afl, s128_v1); + + } else if (unlikely(is_n)) { + + try_to_add_to_dictN(afl, s128_v0, SHAPE_BYTES(h->shape)); + try_to_add_to_dictN(afl, s128_v1, SHAPE_BYTES(h->shape)); + + } else { + + try_to_add_to_dict(afl, o->v0, SHAPE_BYTES(h->shape)); + try_to_add_to_dict(afl, o->v1, SHAPE_BYTES(h->shape)); + + } } @@ -630,20 +1488,19 @@ static u8 cmp_fuzz(afl_state_t *afl, u32 key, u8 *orig_buf, u8 *buf, u32 len) { } static u8 rtn_extend_encoding(afl_state_t *afl, u8 *pattern, u8 *repl, - u8 *o_pattern, u32 idx, u8 *orig_buf, u8 *buf, - u32 len, u8 *status) { + u8 *o_pattern, u32 idx, u32 taint_len, + u8 *orig_buf, u8 *buf, u8 *cbuf, u32 len, + u8 *status) { u32 i; u32 its_len = MIN((u32)32, len - idx); - + its_len = MIN(its_len, taint_len); u8 save[32]; memcpy(save, &buf[idx], its_len); - *status = 0; - for (i = 0; i < its_len; ++i) { - if (pattern[i] != buf[idx + i] || o_pattern[i] != orig_buf[idx + i] || + if ((pattern[i] != buf[idx + i] && o_pattern[i] != orig_buf[idx + i]) || *status == 1) { break; @@ -654,6 +1511,8 @@ static u8 rtn_extend_encoding(afl_state_t *afl, u8 *pattern, u8 *repl, if (unlikely(its_fuzz(afl, buf, len, status))) { return 1; } + if (*status == 1) { memcpy(cbuf + idx, &buf[idx], i); } + } memcpy(&buf[idx], save, i); @@ -661,23 +1520,21 @@ static u8 rtn_extend_encoding(afl_state_t *afl, u8 *pattern, u8 *repl, } -static u8 rtn_fuzz(afl_state_t *afl, u32 key, u8 *orig_buf, u8 *buf, u32 len) { +static u8 rtn_fuzz(afl_state_t *afl, u32 key, u8 *orig_buf, u8 *buf, u8 *cbuf, + u32 len, struct tainted *taint) { + struct tainted * t; struct cmp_header *h = &afl->shm.cmp_map->headers[key]; - u32 i, j, idx; + u32 i, j, idx, have_taint = 1, taint_len; u32 loggeds = h->hits; if (h->hits > CMP_MAP_RTN_H) { loggeds = CMP_MAP_RTN_H; } u8 status = 0; - // opt not in the paper - // u32 fails = 0; u8 found_one = 0; for (i = 0; i < loggeds; ++i) { - u32 fails = 0; - struct cmpfn_operands *o = &((struct cmpfn_operands *)afl->shm.cmp_map->log[key])[i]; @@ -696,50 +1553,84 @@ static u8 rtn_fuzz(afl_state_t *afl, u32 key, u8 *orig_buf, u8 *buf, u32 len) { } - for (idx = 0; idx < len && fails < 8; ++idx) { + if (taint) { - if (unlikely(rtn_extend_encoding(afl, o->v0, o->v1, orig_o->v0, idx, - orig_buf, buf, len, &status))) { + t = taint; + while (t->next) { - return 1; + t = t->next; } - if (status == 2) { + } else { - ++fails; + have_taint = 0; + t = NULL; - } else if (status == 1) { + } - break; + for (idx = 0; idx < len; ++idx) { + + if (have_taint) { + + if (!t || idx < t->pos) { + + continue; + + } else { + + taint_len = t->pos + t->len - idx; + + if (idx == t->pos + t->len - 1) { t = t->prev; } + + } + + } else { + + taint_len = len - idx; } - if (unlikely(rtn_extend_encoding(afl, o->v1, o->v0, orig_o->v1, idx, - orig_buf, buf, len, &status))) { + status = 0; + + if (unlikely(rtn_extend_encoding(afl, o->v0, o->v1, orig_o->v0, idx, + taint_len, orig_buf, buf, cbuf, len, + &status))) { return 1; } - if (status == 2) { + if (status == 1) { + + found_one = 1; + break; + + } + + status = 0; + + if (unlikely(rtn_extend_encoding(afl, o->v1, o->v0, orig_o->v1, idx, + taint_len, orig_buf, buf, cbuf, len, + &status))) { - ++fails; + return 1; - } else if (status == 1) { + } + if (status == 1) { + + found_one = 1; break; } } - if (status == 1) { found_one = 1; } - // If failed, add to dictionary - if (fails == 8) { + if (!found_one) { - if (afl->pass_stats[key].total == 0) { + if (unlikely(!afl->pass_stats[key].total)) { maybe_add_auto(afl, o->v0, SHAPE_BYTES(h->shape)); maybe_add_auto(afl, o->v1, SHAPE_BYTES(h->shape)); @@ -791,7 +1682,44 @@ u8 input_to_state_stage(afl_state_t *afl, u8 *orig_buf, u8 *buf, u32 len, memcpy(afl->orig_cmp_map, afl->shm.cmp_map, sizeof(struct cmp_map)); - if (unlikely(colorization(afl, buf, len, exec_cksum))) { return 1; } + struct tainted *taint = NULL; + + if (!afl->queue_cur->taint || !afl->queue_cur->cmplog_colorinput) { + + if (unlikely(colorization(afl, buf, len, exec_cksum, &taint))) { return 1; } + + // no taint? still try, create a dummy to prevent again colorization + if (!taint) { + + taint = ck_alloc(sizeof(struct tainted)); + taint->len = len; + + } + + } else { + + buf = afl->queue_cur->cmplog_colorinput; + taint = afl->queue_cur->taint; + // reget the cmplog information + if (unlikely(common_fuzz_cmplog_stuff(afl, buf, len))) { return 1; } + + } + +#ifdef _DEBUG + dump("ORIG", orig_buf, len); + dump("NEW ", buf, len); +#endif + + struct tainted *t = taint; + + while (t) { + +#ifdef _DEBUG + fprintf(stderr, "T: pos=%u len=%u\n", t->pos, t->len); +#endif + t = t->next; + + } // do it manually, forkserver clear only afl->fsrv.trace_bits memset(afl->shm.cmp_map->headers, 0, sizeof(afl->shm.cmp_map->headers)); @@ -807,15 +1735,38 @@ u8 input_to_state_stage(afl_state_t *afl, u8 *orig_buf, u8 *buf, u32 len, afl->stage_max = 0; afl->stage_cur = 0; + u32 lvl; + u32 cmplog_done = afl->queue_cur->colorized; + u32 cmplog_lvl = afl->cmplog_lvl; + if (!cmplog_done) { + + lvl = 1; + + } else { + + lvl = 0; + + } + + if (cmplog_lvl >= 2 && cmplog_done < 2) { lvl += 2; } + if (cmplog_lvl >= 3 && cmplog_done < 3) { lvl += 4; } + + u8 *cbuf = afl_realloc((void **)&afl->in_scratch_buf, len + 128); + memcpy(cbuf, orig_buf, len); + u8 *virgin_backup = afl_realloc((void **)&afl->ex_buf, afl->shm.map_size); + memcpy(virgin_backup, afl->virgin_bits, afl->shm.map_size); + u32 k; for (k = 0; k < CMP_MAP_W; ++k) { if (!afl->shm.cmp_map->headers[k].hits) { continue; } - if (afl->pass_stats[k].total && - (rand_below(afl, afl->pass_stats[k].total) >= - afl->pass_stats[k].faileds || - afl->pass_stats[k].total == 0xff)) { + if (afl->pass_stats[k].faileds == 0xff || + afl->pass_stats[k].total == 0xff) { + +#ifdef _DEBUG + fprintf(stderr, "DISABLED %u\n", k); +#endif afl->shm.cmp_map->headers[k].hits = 0; // ignore this cmp @@ -841,11 +1792,19 @@ u8 input_to_state_stage(afl_state_t *afl, u8 *orig_buf, u8 *buf, u32 len, if (afl->shm.cmp_map->headers[k].type == CMP_TYPE_INS) { - if (unlikely(cmp_fuzz(afl, k, orig_buf, buf, len))) { goto exit_its; } + if (unlikely(cmp_fuzz(afl, k, orig_buf, buf, cbuf, len, lvl, taint))) { + + goto exit_its; + + } } else { - if (unlikely(rtn_fuzz(afl, k, orig_buf, buf, len))) { goto exit_its; } + if (unlikely(rtn_fuzz(afl, k, orig_buf, buf, cbuf, len, taint))) { + + goto exit_its; + + } } @@ -854,12 +1813,86 @@ u8 input_to_state_stage(afl_state_t *afl, u8 *orig_buf, u8 *buf, u32 len, r = 0; exit_its: + + afl->queue_cur->colorized = afl->cmplog_lvl; + if (afl->cmplog_lvl == CMPLOG_LVL_MAX) { + + ck_free(afl->queue_cur->cmplog_colorinput); + t = taint; + while (taint) { + + t = taint->next; + ck_free(taint); + taint = t; + + } + + afl->queue_cur->taint = NULL; + + } else { + + if (!afl->queue_cur->taint) { afl->queue_cur->taint = taint; } + + if (!afl->queue_cur->cmplog_colorinput) { + + afl->queue_cur->cmplog_colorinput = ck_alloc_nozero(len); + memcpy(afl->queue_cur->cmplog_colorinput, buf, len); + memcpy(buf, orig_buf, len); + + } + + } + + // copy the current virgin bits so we can recover the information + u8 *virgin_save = afl_realloc((void **)&afl->eff_buf, afl->shm.map_size); + memcpy(virgin_save, afl->virgin_bits, afl->shm.map_size); + // reset virgin bits to the backup previous to redqueen + memcpy(afl->virgin_bits, virgin_backup, afl->shm.map_size); + + u8 status = 0; + its_fuzz(afl, cbuf, len, &status); + + // now combine with the saved virgin bits +#ifdef WORD_SIZE_64 + u64 *v = (u64 *)afl->virgin_bits; + u64 *s = (u64 *)virgin_save; + u32 i; + for (i = 0; i < (afl->shm.map_size >> 3); i++) { + + v[i] &= s[i]; + + } + +#else + u32 *v = (u64 *)afl->virgin_bits; + u32 *s = (u64 *)virgin_save; + u32 i; + for (i = 0; i < (afl->shm.map_size >> 2); i++) { + + v[i] &= s[i]; + + } + +#endif + +#ifdef _DEBUG + dump("COMB", cbuf, len); + if (status == 1) { + + fprintf(stderr, "NEW COMBINED\n"); + + } else { + + fprintf(stderr, "NO new combined\n"); + + } + +#endif + new_hit_cnt = afl->queued_paths + afl->unique_crashes; afl->stage_finds[STAGE_ITS] += new_hit_cnt - orig_hit_cnt; afl->stage_cycles[STAGE_ITS] += afl->fsrv.total_execs - orig_execs; - memcpy(buf, orig_buf, len); - return r; } diff --git a/src/afl-fuzz-state.c b/src/afl-fuzz-state.c index 60c9684c..8423a3d1 100644 --- a/src/afl-fuzz-state.c +++ b/src/afl-fuzz-state.c @@ -102,6 +102,7 @@ void afl_state_init(afl_state_t *afl, uint32_t map_size) { afl->stats_update_freq = 1; afl->stats_avg_exec = 0; afl->skip_deterministic = 1; + afl->cmplog_lvl = 1; #ifndef NO_SPLICING afl->use_splicing = 1; #endif diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c index bb2674f0..1e914ca6 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -77,13 +77,8 @@ static void at_exit() { } int kill_signal = SIGKILL; - /* AFL_KILL_SIGNAL should already be a valid int at this point */ - if (getenv("AFL_KILL_SIGNAL")) { - - kill_signal = atoi(getenv("AFL_KILL_SIGNAL")); - - } + if ((ptr = getenv("AFL_KILL_SIGNAL"))) { kill_signal = atoi(ptr); } if (pid1 > 0) { kill(pid1, kill_signal); } if (pid2 > 0) { kill(pid2, kill_signal); } @@ -103,13 +98,14 @@ static void usage(u8 *argv0, int more_help) { "Execution control settings:\n" " -p schedule - power schedules compute a seed's performance score:\n" - " <fast(default), rare, exploit, seek, mmopt, coe, " - "explore,\n" - " lin, quad> -- see docs/power_schedules.md\n" + " fast(default), explore, exploit, seek, rare, mmopt, " + "coe, lin\n" + " quad -- see docs/power_schedules.md\n" " -f file - location read by the fuzzed program (default: stdin " "or @@)\n" " -t msec - timeout for each run (auto-scaled, 50-%u ms)\n" - " -m megs - memory limit for child process (%u MB, 0 = no limit)\n" + " -m megs - memory limit for child process (%u MB, 0 = no limit " + "[default])\n" " -Q - use binary-only instrumentation (QEMU mode)\n" " -U - use unicorn-based instrumentation (Unicorn mode)\n" " -W - use qemu-based instrumentation with Wine (Wine " @@ -125,7 +121,9 @@ static void usage(u8 *argv0, int more_help) { " See docs/README.MOpt.md\n" " -c program - enable CmpLog by specifying a binary compiled for " "it.\n" - " if using QEMU, just use -c 0.\n\n" + " if using QEMU, just use -c 0.\n" + " -l cmplog_level - set the complexity/intensivity of CmpLog.\n" + " Values: 1 (default), 2 (intensive) and 3 (heavy)\n\n" "Fuzzing behavior settings:\n" " -Z - sequential queue selection instead of weighted " @@ -337,7 +335,6 @@ int main(int argc, char **argv_orig, char **envp) { if (get_afl_env("AFL_DEBUG")) { debug = afl->debug = 1; } - // map_size = get_map_size(); afl_state_init(afl, map_size); afl->debug = debug; afl_fsrv_init(&afl->fsrv); @@ -358,7 +355,8 @@ int main(int argc, char **argv_orig, char **envp) { while ((opt = getopt( argc, argv, - "+b:c:i:I:o:f:F:m:t:T:dDnCB:S:M:x:QNUWe:p:s:V:E:L:hRP:Z")) > 0) { + "+b:B:c:CdDe:E:hi:I:f:F:l:L:m:M:nNo:p:P:RQs:S:t:T:UV:Wx:Z")) > + 0) { switch (opt) { @@ -787,6 +785,26 @@ int main(int argc, char **argv_orig, char **envp) { } break; + case 'l': { + + afl->cmplog_lvl = atoi(optarg); + if (afl->cmplog_lvl < 1 || afl->cmplog_lvl > CMPLOG_LVL_MAX) { + + FATAL( + "Bad complog level value, accepted values are 1 (default), 2 and " + "%u.", + CMPLOG_LVL_MAX); + + } + + if (afl->cmplog_lvl == CMPLOG_LVL_MAX) { + + afl->cmplog_max_filesize = MAX_FILE; + + } + + } break; + case 'L': { /* MOpt mode */ if (afl->limit_time_sig) { FATAL("Multiple -L options not supported"); } @@ -1635,6 +1653,14 @@ int main(int argc, char **argv_orig, char **envp) { if (afl->use_splicing) { ++afl->cycles_wo_finds; + + if (unlikely(afl->shm.cmplog_mode && + afl->cmplog_max_filesize < MAX_FILE)) { + + afl->cmplog_max_filesize <<= 4; + + } + switch (afl->expand_havoc) { case 0: @@ -1652,6 +1678,7 @@ int main(int argc, char **argv_orig, char **envp) { } afl->expand_havoc = 2; + if (afl->cmplog_lvl < 2) afl->cmplog_lvl = 2; break; case 2: // if (!have_p) afl->schedule = EXPLOIT; @@ -1665,11 +1692,14 @@ int main(int argc, char **argv_orig, char **envp) { afl->expand_havoc = 4; break; case 4: - // if not in sync mode, enable deterministic mode? - // if (!afl->sync_id) afl->skip_deterministic = 0; afl->expand_havoc = 5; + if (afl->cmplog_lvl < 3) afl->cmplog_lvl = 3; break; case 5: + // if not in sync mode, enable deterministic mode? + if (!afl->sync_id) afl->skip_deterministic = 0; + afl->expand_havoc = 6; + case 6: // nothing else currently break; |