From b24639d0113e15933e749ea0f96abe3f25a134a0 Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Mon, 2 Sep 2019 18:49:43 +0200 Subject: run code formatter --- src/afl-fuzz-one.c | 5565 +++++++++++++++++++++++++++------------------------- 1 file changed, 2935 insertions(+), 2630 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index 59370c3d..1b7abedd 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -28,22 +28,31 @@ int select_algorithm(void) { int i_puppet, j_puppet; - double sele = ((double)(UR(10000))*0.0001); + double sele = ((double)(UR(10000)) * 0.0001); j_puppet = 0; for (i_puppet = 0; i_puppet < operator_num; ++i_puppet) { - if (unlikely(i_puppet == 0)) { - if (sele < probability_now[swarm_now][i_puppet]) - break; - } else { - if (sele < probability_now[swarm_now][i_puppet]) { - j_puppet =1; - break; - } + + if (unlikely(i_puppet == 0)) { + + if (sele < probability_now[swarm_now][i_puppet]) break; + + } else { + + if (sele < probability_now[swarm_now][i_puppet]) { + + j_puppet = 1; + break; + } + + } + } - if (j_puppet ==1 && sele < probability_now[swarm_now][i_puppet-1]) + + if (j_puppet == 1 && sele < probability_now[swarm_now][i_puppet - 1]) FATAL("error select_algorithm"); return i_puppet; + } /* Helper to choose random block len for block operations in fuzz_one(). @@ -58,27 +67,29 @@ static u32 choose_block_len(u32 limit) { switch (UR(rlim)) { - case 0: min_value = 1; - max_value = HAVOC_BLK_SMALL; - break; + case 0: + min_value = 1; + max_value = HAVOC_BLK_SMALL; + break; - case 1: min_value = HAVOC_BLK_SMALL; - max_value = HAVOC_BLK_MEDIUM; - break; + case 1: + min_value = HAVOC_BLK_SMALL; + max_value = HAVOC_BLK_MEDIUM; + break; - default: + default: - if (UR(10)) { + if (UR(10)) { - min_value = HAVOC_BLK_MEDIUM; - max_value = HAVOC_BLK_LARGE; + min_value = HAVOC_BLK_MEDIUM; + max_value = HAVOC_BLK_LARGE; - } else { + } else { - min_value = HAVOC_BLK_LARGE; - max_value = HAVOC_BLK_XL; + min_value = HAVOC_BLK_LARGE; + max_value = HAVOC_BLK_XL; - } + } } @@ -88,7 +99,6 @@ static u32 choose_block_len(u32 limit) { } - /* Helper function to see if a particular change (xor_val = old ^ new) could be a product of deterministic bit flips with the lengths and stepovers attempted by afl-fuzz. This is used to avoid dupes in some of the @@ -104,7 +114,12 @@ static u8 could_be_bitflip(u32 xor_val) { /* Shift left until first bit set. */ - while (!(xor_val & 1)) { ++sh; xor_val >>= 1; } + while (!(xor_val & 1)) { + + ++sh; + xor_val >>= 1; + + } /* 1-, 2-, and 4-bit patterns are OK anywhere. */ @@ -115,14 +130,12 @@ static u8 could_be_bitflip(u32 xor_val) { if (sh & 7) return 0; - if (xor_val == 0xff || xor_val == 0xffff || xor_val == 0xffffffff) - return 1; + if (xor_val == 0xff || xor_val == 0xffff || xor_val == 0xffffffff) return 1; return 0; } - /* Helper function to see if a particular value is reachable through arithmetic operations. Used for similar purposes. */ @@ -136,10 +149,15 @@ static u8 could_be_arith(u32 old_val, u32 new_val, u8 blen) { for (i = 0; i < blen; ++i) { - u8 a = old_val >> (8 * i), - b = new_val >> (8 * i); + u8 a = old_val >> (8 * i), b = new_val >> (8 * i); + + if (a != b) { + + ++diffs; + ov = a; + nv = b; - if (a != b) { ++diffs; ov = a; nv = b; } + } } @@ -147,8 +165,7 @@ static u8 could_be_arith(u32 old_val, u32 new_val, u8 blen) { if (diffs == 1) { - if ((u8)(ov - nv) <= ARITH_MAX || - (u8)(nv - ov) <= ARITH_MAX) return 1; + if ((u8)(ov - nv) <= ARITH_MAX || (u8)(nv - ov) <= ARITH_MAX) return 1; } @@ -160,10 +177,15 @@ static u8 could_be_arith(u32 old_val, u32 new_val, u8 blen) { for (i = 0; i < blen / 2; ++i) { - u16 a = old_val >> (16 * i), - b = new_val >> (16 * i); + u16 a = old_val >> (16 * i), b = new_val >> (16 * i); + + if (a != b) { - if (a != b) { ++diffs; ov = a; nv = b; } + ++diffs; + ov = a; + nv = b; + + } } @@ -171,13 +193,12 @@ static u8 could_be_arith(u32 old_val, u32 new_val, u8 blen) { if (diffs == 1) { - if ((u16)(ov - nv) <= ARITH_MAX || - (u16)(nv - ov) <= ARITH_MAX) return 1; + if ((u16)(ov - nv) <= ARITH_MAX || (u16)(nv - ov) <= ARITH_MAX) return 1; - ov = SWAP16(ov); nv = SWAP16(nv); + ov = SWAP16(ov); + nv = SWAP16(nv); - if ((u16)(ov - nv) <= ARITH_MAX || - (u16)(nv - ov) <= ARITH_MAX) return 1; + if ((u16)(ov - nv) <= ARITH_MAX || (u16)(nv - ov) <= ARITH_MAX) return 1; } @@ -186,13 +207,15 @@ static u8 could_be_arith(u32 old_val, u32 new_val, u8 blen) { if (blen == 4) { if ((u32)(old_val - new_val) <= ARITH_MAX || - (u32)(new_val - old_val) <= ARITH_MAX) return 1; + (u32)(new_val - old_val) <= ARITH_MAX) + return 1; new_val = SWAP32(new_val); old_val = SWAP32(old_val); if ((u32)(old_val - new_val) <= ARITH_MAX || - (u32)(new_val - old_val) <= ARITH_MAX) return 1; + (u32)(new_val - old_val) <= ARITH_MAX) + return 1; } @@ -200,8 +223,7 @@ static u8 could_be_arith(u32 old_val, u32 new_val, u8 blen) { } - -/* Last but not least, a similar helper to see if insertion of an +/* Last but not least, a similar helper to see if insertion of an interesting integer is redundant given the insertions done for shorter blen. The last param (check_le) is set if the caller already executed LE insertion for current blen and wants to see @@ -220,8 +242,8 @@ static u8 could_be_interest(u32 old_val, u32 new_val, u8 blen, u8 check_le) { for (j = 0; j < sizeof(interesting_8); ++j) { - u32 tval = (old_val & ~(0xff << (i * 8))) | - (((u8)interesting_8[j]) << (i * 8)); + u32 tval = + (old_val & ~(0xff << (i * 8))) | (((u8)interesting_8[j]) << (i * 8)); if (new_val == tval) return 1; @@ -274,11 +296,10 @@ static u8 could_be_interest(u32 old_val, u32 new_val, u8 blen, u8 check_le) { } - #ifndef IGNORE_FINDS -/* Helper function to compare buffers; returns first and last differing offset. We - use this to find reasonable locations for splicing two files. */ +/* Helper function to compare buffers; returns first and last differing offset. + We use this to find reasonable locations for splicing two files. */ static void locate_diffs(u8* ptr1, u8* ptr2, u32 len, s32* first, s32* last) { @@ -313,11 +334,11 @@ static void locate_diffs(u8* ptr1, u8* ptr2, u32 len, s32* first, s32* last) { u8 fuzz_one_original(char** argv) { s32 len, fd, temp_len, i, j; - u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0; - u64 havoc_queued = 0, orig_hit_cnt, new_hit_cnt; + u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0; + u64 havoc_queued = 0, orig_hit_cnt, new_hit_cnt; u32 splice_cycle = 0, perf_score = 100, orig_perf, prev_cksum, eff_cnt = 1; - u8 ret_val = 1, doing_det = 0; + u8 ret_val = 1, doing_det = 0; u8 a_collect[MAX_AUTO_EXTRA]; u32 a_len = 0; @@ -337,8 +358,10 @@ u8 fuzz_one_original(char** argv) { possibly skip to them at the expense of already-fuzzed or non-favored cases. */ - if (((queue_cur->was_fuzzed > 0 || queue_cur->fuzz_level > 0) || !queue_cur->favored) && - UR(100) < SKIP_TO_NEW_PROB) return 1; + if (((queue_cur->was_fuzzed > 0 || queue_cur->fuzz_level > 0) || + !queue_cur->favored) && + UR(100) < SKIP_TO_NEW_PROB) + return 1; } else if (!dumb_mode && !queue_cur->favored && queued_paths > 10) { @@ -346,7 +369,8 @@ u8 fuzz_one_original(char** argv) { The odds of skipping stuff are higher for already-fuzzed inputs and lower for never-fuzzed entries. */ - if (queue_cycle > 1 && (queue_cur->fuzz_level == 0 || queue_cur->was_fuzzed)) { + if (queue_cycle > 1 && + (queue_cur->fuzz_level == 0 || queue_cur->was_fuzzed)) { if (UR(100) < SKIP_NFAV_NEW_PROB) return 1; @@ -361,9 +385,11 @@ u8 fuzz_one_original(char** argv) { #endif /* ^IGNORE_FINDS */ if (not_on_tty) { + ACTF("Fuzzing test case #%u (%u total, %llu uniq crashes found)...", current_entry, queued_paths, unique_crashes); fflush(stdout); + } /* Map the test case into memory. */ @@ -376,7 +402,8 @@ u8 fuzz_one_original(char** argv) { orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); - if (orig_in == MAP_FAILED) PFATAL("Unable to mmap '%s' with len %d", queue_cur->fname, len); + if (orig_in == MAP_FAILED) + PFATAL("Unable to mmap '%s' with len %d", queue_cur->fname, len); close(fd); @@ -402,14 +429,15 @@ u8 fuzz_one_original(char** argv) { res = calibrate_case(argv, queue_cur, in_buf, queue_cycle - 1, 0); - if (res == FAULT_ERROR) - FATAL("Unable to execute target application"); + if (res == FAULT_ERROR) FATAL("Unable to execute target application"); } if (stop_soon || res != crash_mode) { + ++cur_skipped_paths; goto abandon_entry; + } } @@ -422,12 +450,13 @@ u8 fuzz_one_original(char** argv) { u8 res = trim_case(argv, queue_cur, in_buf); - if (res == FAULT_ERROR) - FATAL("Unable to execute target application"); + if (res == FAULT_ERROR) FATAL("Unable to execute target application"); if (stop_soon) { + ++cur_skipped_paths; goto abandon_entry; + } /* Don't retry trimming, even if it failed. */ @@ -449,49 +478,56 @@ u8 fuzz_one_original(char** argv) { if (perf_score == 0) goto abandon_entry; if (custom_mutator) { + stage_short = "custom"; stage_name = "custom mutator"; stage_max = len << 3; stage_val_type = STAGE_VAL_NONE; - const u32 max_seed_size = 4096*4096; - u8* mutated_buf = ck_alloc(max_seed_size); + const u32 max_seed_size = 4096 * 4096; + u8* mutated_buf = ck_alloc(max_seed_size); orig_hit_cnt = queued_paths + unique_crashes; - for (stage_cur = 0 ; stage_cur < stage_max ; ++stage_cur) { - size_t orig_size = (size_t) len; - size_t mutated_size = custom_mutator(out_buf, orig_size, mutated_buf, max_seed_size, UR(UINT32_MAX)); + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { + + size_t orig_size = (size_t)len; + size_t mutated_size = custom_mutator(out_buf, orig_size, mutated_buf, + max_seed_size, UR(UINT32_MAX)); if (mutated_size > 0) { + out_buf = ck_realloc(out_buf, mutated_size); memcpy(out_buf, mutated_buf, mutated_size); - if (common_fuzz_stuff(argv, out_buf, (u32) mutated_size)) { + if (common_fuzz_stuff(argv, out_buf, (u32)mutated_size)) { + goto abandon_entry; + } + } + } ck_free(mutated_buf); new_hit_cnt = queued_paths + unique_crashes; - stage_finds[STAGE_CUSTOM_MUTATOR] += new_hit_cnt - orig_hit_cnt; + stage_finds[STAGE_CUSTOM_MUTATOR] += new_hit_cnt - orig_hit_cnt; stage_cycles[STAGE_CUSTOM_MUTATOR] += stage_max; goto abandon_entry; - } + } /* Skip right away if -d is given, if it has not been chosen sufficiently often to warrant the expensive deterministic stage (fuzz_level), or if it has gone through deterministic testing in earlier, resumed runs (passed_det). */ - if (skip_deterministic - || ((!queue_cur->passed_det) - && perf_score < ( - queue_cur->depth * 30 <= havoc_max_mult * 100 - ? queue_cur->depth * 30 - : havoc_max_mult * 100)) - || queue_cur->passed_det) + if (skip_deterministic || + ((!queue_cur->passed_det) && + perf_score < (queue_cur->depth * 30 <= havoc_max_mult * 100 + ? queue_cur->depth * 30 + : havoc_max_mult * 100)) || + queue_cur->passed_det) #ifdef USE_PYTHON goto python_stage; #else @@ -514,17 +550,20 @@ u8 fuzz_one_original(char** argv) { * SIMPLE BITFLIP (+dictionary construction) * *********************************************/ -#define FLIP_BIT(_ar, _b) do { \ - u8* _arf = (u8*)(_ar); \ - u32 _bf = (_b); \ - _arf[(_bf) >> 3] ^= (128 >> ((_bf) & 7)); \ +#define FLIP_BIT(_ar, _b) \ + do { \ + \ + u8* _arf = (u8*)(_ar); \ + u32 _bf = (_b); \ + _arf[(_bf) >> 3] ^= (128 >> ((_bf)&7)); \ + \ } while (0) /* Single walking bit. */ stage_short = "flip1"; - stage_max = len << 3; - stage_name = "bitflip 1/1"; + stage_max = len << 3; + stage_name = "bitflip 1/1"; stage_val_type = STAGE_VAL_NONE; @@ -556,7 +595,7 @@ u8 fuzz_one_original(char** argv) { We do this here, rather than as a separate stage, because it's a nice way to keep the operation approximately "free" (i.e., no extra execs). - + Empirically, performing the check when flipping the least significant bit is advantageous, compared to doing it at the time of more disruptive changes, where the program flow may be affected in more violent ways. @@ -602,7 +641,7 @@ u8 fuzz_one_original(char** argv) { if (cksum != queue_cur->exec_cksum) { - if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3]; + if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3]; ++a_len; } @@ -613,14 +652,14 @@ u8 fuzz_one_original(char** argv) { new_hit_cnt = queued_paths + unique_crashes; - stage_finds[STAGE_FLIP1] += new_hit_cnt - orig_hit_cnt; + stage_finds[STAGE_FLIP1] += new_hit_cnt - orig_hit_cnt; stage_cycles[STAGE_FLIP1] += stage_max; /* Two walking bits. */ - stage_name = "bitflip 2/1"; + stage_name = "bitflip 2/1"; stage_short = "flip2"; - stage_max = (len << 3) - 1; + stage_max = (len << 3) - 1; orig_hit_cnt = new_hit_cnt; @@ -640,14 +679,14 @@ u8 fuzz_one_original(char** argv) { new_hit_cnt = queued_paths + unique_crashes; - stage_finds[STAGE_FLIP2] += new_hit_cnt - orig_hit_cnt; + stage_finds[STAGE_FLIP2] += new_hit_cnt - orig_hit_cnt; stage_cycles[STAGE_FLIP2] += stage_max; /* Four walking bits. */ - stage_name = "bitflip 4/1"; + stage_name = "bitflip 4/1"; stage_short = "flip4"; - stage_max = (len << 3) - 3; + stage_max = (len << 3) - 3; orig_hit_cnt = new_hit_cnt; @@ -671,7 +710,7 @@ u8 fuzz_one_original(char** argv) { new_hit_cnt = queued_paths + unique_crashes; - stage_finds[STAGE_FLIP4] += new_hit_cnt - orig_hit_cnt; + stage_finds[STAGE_FLIP4] += new_hit_cnt - orig_hit_cnt; stage_cycles[STAGE_FLIP4] += stage_max; /* Effector map setup. These macros calculate: @@ -682,27 +721,29 @@ u8 fuzz_one_original(char** argv) { */ -#define EFF_APOS(_p) ((_p) >> EFF_MAP_SCALE2) -#define EFF_REM(_x) ((_x) & ((1 << EFF_MAP_SCALE2) - 1)) -#define EFF_ALEN(_l) (EFF_APOS(_l) + !!EFF_REM(_l)) -#define EFF_SPAN_ALEN(_p, _l) (EFF_APOS((_p) + (_l) - 1) - EFF_APOS(_p) + 1) +#define EFF_APOS(_p) ((_p) >> EFF_MAP_SCALE2) +#define EFF_REM(_x) ((_x) & ((1 << EFF_MAP_SCALE2) - 1)) +#define EFF_ALEN(_l) (EFF_APOS(_l) + !!EFF_REM(_l)) +#define EFF_SPAN_ALEN(_p, _l) (EFF_APOS((_p) + (_l)-1) - EFF_APOS(_p) + 1) /* Initialize effector map for the next step (see comments below). Always flag first and last byte as doing something. */ - eff_map = ck_alloc(EFF_ALEN(len)); + eff_map = ck_alloc(EFF_ALEN(len)); eff_map[0] = 1; if (EFF_APOS(len - 1) != 0) { + eff_map[EFF_APOS(len - 1)] = 1; ++eff_cnt; + } /* Walking byte. */ - stage_name = "bitflip 8/8"; + stage_name = "bitflip 8/8"; stage_short = "flip8"; - stage_max = len; + stage_max = len; orig_hit_cnt = new_hit_cnt; @@ -732,8 +773,10 @@ u8 fuzz_one_original(char** argv) { cksum = ~queue_cur->exec_cksum; if (cksum != queue_cur->exec_cksum) { + eff_map[EFF_APOS(stage_cur)] = 1; ++eff_cnt; + } } @@ -763,17 +806,17 @@ u8 fuzz_one_original(char** argv) { new_hit_cnt = queued_paths + unique_crashes; - stage_finds[STAGE_FLIP8] += new_hit_cnt - orig_hit_cnt; + stage_finds[STAGE_FLIP8] += new_hit_cnt - orig_hit_cnt; stage_cycles[STAGE_FLIP8] += stage_max; /* Two walking bytes. */ if (len < 2) goto skip_bitflip; - stage_name = "bitflip 16/8"; + stage_name = "bitflip 16/8"; stage_short = "flip16"; - stage_cur = 0; - stage_max = len - 1; + stage_cur = 0; + stage_max = len - 1; orig_hit_cnt = new_hit_cnt; @@ -782,8 +825,10 @@ u8 fuzz_one_original(char** argv) { /* Let's consult the effector map... */ if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { + --stage_max; continue; + } stage_cur_byte = i; @@ -795,22 +840,21 @@ u8 fuzz_one_original(char** argv) { *(u16*)(out_buf + i) ^= 0xFFFF; - } new_hit_cnt = queued_paths + unique_crashes; - stage_finds[STAGE_FLIP16] += new_hit_cnt - orig_hit_cnt; + stage_finds[STAGE_FLIP16] += new_hit_cnt - orig_hit_cnt; stage_cycles[STAGE_FLIP16] += stage_max; if (len < 4) goto skip_bitflip; /* Four walking bytes. */ - stage_name = "bitflip 32/8"; + stage_name = "bitflip 32/8"; stage_short = "flip32"; - stage_cur = 0; - stage_max = len - 3; + stage_cur = 0; + stage_max = len - 3; orig_hit_cnt = new_hit_cnt; @@ -819,8 +863,10 @@ u8 fuzz_one_original(char** argv) { /* Let's consult the effector map... */ if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { + --stage_max; continue; + } stage_cur_byte = i; @@ -836,7 +882,7 @@ u8 fuzz_one_original(char** argv) { new_hit_cnt = queued_paths + unique_crashes; - stage_finds[STAGE_FLIP32] += new_hit_cnt - orig_hit_cnt; + stage_finds[STAGE_FLIP32] += new_hit_cnt - orig_hit_cnt; stage_cycles[STAGE_FLIP32] += stage_max; skip_bitflip: @@ -849,10 +895,10 @@ skip_bitflip: /* 8-bit arithmetics. */ - stage_name = "arith 8/8"; + stage_name = "arith 8/8"; stage_short = "arith8"; - stage_cur = 0; - stage_max = 2 * len * ARITH_MAX; + stage_cur = 0; + stage_max = 2 * len * ARITH_MAX; stage_val_type = STAGE_VAL_LE; @@ -865,8 +911,10 @@ skip_bitflip: /* Let's consult the effector map... */ if (!eff_map[EFF_APOS(i)]) { + stage_max -= 2 * ARITH_MAX; continue; + } stage_cur_byte = i; @@ -886,9 +934,11 @@ skip_bitflip: if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else --stage_max; + } else + + --stage_max; - r = orig ^ (orig - j); + r = orig ^ (orig - j); if (!could_be_bitflip(r)) { @@ -898,7 +948,9 @@ skip_bitflip: if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else --stage_max; + } else + + --stage_max; out_buf[i] = orig; @@ -908,17 +960,17 @@ skip_bitflip: new_hit_cnt = queued_paths + unique_crashes; - stage_finds[STAGE_ARITH8] += new_hit_cnt - orig_hit_cnt; + stage_finds[STAGE_ARITH8] += new_hit_cnt - orig_hit_cnt; stage_cycles[STAGE_ARITH8] += stage_max; /* 16-bit arithmetics, both endians. */ if (len < 2) goto skip_arith; - stage_name = "arith 16/8"; + stage_name = "arith 16/8"; stage_short = "arith16"; - stage_cur = 0; - stage_max = 4 * (len - 1) * ARITH_MAX; + stage_cur = 0; + stage_max = 4 * (len - 1) * ARITH_MAX; orig_hit_cnt = new_hit_cnt; @@ -929,25 +981,26 @@ skip_bitflip: /* Let's consult the effector map... */ if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { + stage_max -= 4 * ARITH_MAX; continue; + } stage_cur_byte = i; for (j = 1; j <= ARITH_MAX; ++j) { - u16 r1 = orig ^ (orig + j), - r2 = orig ^ (orig - j), + u16 r1 = orig ^ (orig + j), r2 = orig ^ (orig - j), r3 = orig ^ SWAP16(SWAP16(orig) + j), r4 = orig ^ SWAP16(SWAP16(orig) - j); /* Try little endian addition and subtraction first. Do it only - if the operation would affect more than one byte (hence the + if the operation would affect more than one byte (hence the & 0xff overflow checks) and if it couldn't be a product of a bitflip. */ - stage_val_type = STAGE_VAL_LE; + stage_val_type = STAGE_VAL_LE; if ((orig & 0xff) + j > 0xff && !could_be_bitflip(r1)) { @@ -956,8 +1009,10 @@ skip_bitflip: if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - - } else --stage_max; + + } else + + --stage_max; if ((orig & 0xff) < j && !could_be_bitflip(r2)) { @@ -967,13 +1022,14 @@ skip_bitflip: if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else --stage_max; + } else + + --stage_max; /* Big endian comes next. Same deal. */ stage_val_type = STAGE_VAL_BE; - if ((orig >> 8) + j > 0xff && !could_be_bitflip(r3)) { stage_cur_val = j; @@ -982,7 +1038,9 @@ skip_bitflip: if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else --stage_max; + } else + + --stage_max; if ((orig >> 8) < j && !could_be_bitflip(r4)) { @@ -992,7 +1050,9 @@ skip_bitflip: if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else --stage_max; + } else + + --stage_max; *(u16*)(out_buf + i) = orig; @@ -1002,17 +1062,17 @@ skip_bitflip: new_hit_cnt = queued_paths + unique_crashes; - stage_finds[STAGE_ARITH16] += new_hit_cnt - orig_hit_cnt; + stage_finds[STAGE_ARITH16] += new_hit_cnt - orig_hit_cnt; stage_cycles[STAGE_ARITH16] += stage_max; /* 32-bit arithmetics, both endians. */ if (len < 4) goto skip_arith; - stage_name = "arith 32/8"; + stage_name = "arith 32/8"; stage_short = "arith32"; - stage_cur = 0; - stage_max = 4 * (len - 3) * ARITH_MAX; + stage_cur = 0; + stage_max = 4 * (len - 3) * ARITH_MAX; orig_hit_cnt = new_hit_cnt; @@ -1024,16 +1084,17 @@ skip_bitflip: if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { + stage_max -= 4 * ARITH_MAX; continue; + } stage_cur_byte = i; for (j = 1; j <= ARITH_MAX; ++j) { - u32 r1 = orig ^ (orig + j), - r2 = orig ^ (orig - j), + u32 r1 = orig ^ (orig + j), r2 = orig ^ (orig - j), r3 = orig ^ SWAP32(SWAP32(orig) + j), r4 = orig ^ SWAP32(SWAP32(orig) - j); @@ -1050,7 +1111,9 @@ skip_bitflip: if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else --stage_max; + } else + + --stage_max; if ((orig & 0xffff) < j && !could_be_bitflip(r2)) { @@ -1060,7 +1123,9 @@ skip_bitflip: if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else --stage_max; + } else + + --stage_max; /* Big endian next. */ @@ -1074,7 +1139,9 @@ skip_bitflip: if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else --stage_max; + } else + + --stage_max; if ((SWAP32(orig) & 0xffff) < j && !could_be_bitflip(r4)) { @@ -1084,7 +1151,9 @@ skip_bitflip: if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else --stage_max; + } else + + --stage_max; *(u32*)(out_buf + i) = orig; @@ -1094,7 +1163,7 @@ skip_bitflip: new_hit_cnt = queued_paths + unique_crashes; - stage_finds[STAGE_ARITH32] += new_hit_cnt - orig_hit_cnt; + stage_finds[STAGE_ARITH32] += new_hit_cnt - orig_hit_cnt; stage_cycles[STAGE_ARITH32] += stage_max; skip_arith: @@ -1103,10 +1172,10 @@ skip_arith: * INTERESTING VALUES * **********************/ - stage_name = "interest 8/8"; + stage_name = "interest 8/8"; stage_short = "int8"; - stage_cur = 0; - stage_max = len * sizeof(interesting_8); + stage_cur = 0; + stage_max = len * sizeof(interesting_8); stage_val_type = STAGE_VAL_LE; @@ -1121,8 +1190,10 @@ skip_arith: /* Let's consult the effector map... */ if (!eff_map[EFF_APOS(i)]) { + stage_max -= sizeof(interesting_8); continue; + } stage_cur_byte = i; @@ -1133,8 +1204,10 @@ skip_arith: if (could_be_bitflip(orig ^ (u8)interesting_8[j]) || could_be_arith(orig, (u8)interesting_8[j], 1)) { + --stage_max; continue; + } stage_cur_val = interesting_8[j]; @@ -1151,17 +1224,17 @@ skip_arith: new_hit_cnt = queued_paths + unique_crashes; - stage_finds[STAGE_INTEREST8] += new_hit_cnt - orig_hit_cnt; + stage_finds[STAGE_INTEREST8] += new_hit_cnt - orig_hit_cnt; stage_cycles[STAGE_INTEREST8] += stage_max; /* Setting 16-bit integers, both endians. */ if (no_arith || len < 2) goto skip_interest; - stage_name = "interest 16/8"; + stage_name = "interest 16/8"; stage_short = "int16"; - stage_cur = 0; - stage_max = 2 * (len - 1) * (sizeof(interesting_16) >> 1); + stage_cur = 0; + stage_max = 2 * (len - 1) * (sizeof(interesting_16) >> 1); orig_hit_cnt = new_hit_cnt; @@ -1172,8 +1245,10 @@ skip_arith: /* Let's consult the effector map... */ if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { + stage_max -= sizeof(interesting_16); continue; + } stage_cur_byte = i; @@ -1196,7 +1271,9 @@ skip_arith: if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else --stage_max; + } else + + --stage_max; if ((u16)interesting_16[j] != SWAP16(interesting_16[j]) && !could_be_bitflip(orig ^ SWAP16(interesting_16[j])) && @@ -1209,7 +1286,9 @@ skip_arith: if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else --stage_max; + } else + + --stage_max; } @@ -1219,17 +1298,17 @@ skip_arith: new_hit_cnt = queued_paths + unique_crashes; - stage_finds[STAGE_INTEREST16] += new_hit_cnt - orig_hit_cnt; + stage_finds[STAGE_INTEREST16] += new_hit_cnt - orig_hit_cnt; stage_cycles[STAGE_INTEREST16] += stage_max; if (len < 4) goto skip_interest; /* Setting 32-bit integers, both endians. */ - stage_name = "interest 32/8"; + stage_name = "interest 32/8"; stage_short = "int32"; - stage_cur = 0; - stage_max = 2 * (len - 3) * (sizeof(interesting_32) >> 2); + stage_cur = 0; + stage_max = 2 * (len - 3) * (sizeof(interesting_32) >> 2); orig_hit_cnt = new_hit_cnt; @@ -1241,8 +1320,10 @@ skip_arith: if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { + stage_max -= sizeof(interesting_32) >> 1; continue; + } stage_cur_byte = i; @@ -1265,7 +1346,9 @@ skip_arith: if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else --stage_max; + } else + + --stage_max; if ((u32)interesting_32[j] != SWAP32(interesting_32[j]) && !could_be_bitflip(orig ^ SWAP32(interesting_32[j])) && @@ -1278,7 +1361,9 @@ skip_arith: if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else --stage_max; + } else + + --stage_max; } @@ -1288,7 +1373,7 @@ skip_arith: new_hit_cnt = queued_paths + unique_crashes; - stage_finds[STAGE_INTEREST32] += new_hit_cnt - orig_hit_cnt; + stage_finds[STAGE_INTEREST32] += new_hit_cnt - orig_hit_cnt; stage_cycles[STAGE_INTEREST32] += stage_max; skip_interest: @@ -1301,10 +1386,10 @@ skip_interest: /* Overwrite with user-supplied extras. */ - stage_name = "user extras (over)"; + stage_name = "user extras (over)"; stage_short = "ext_UO"; - stage_cur = 0; - stage_max = extras_cnt * len; + stage_cur = 0; + stage_max = extras_cnt * len; stage_val_type = STAGE_VAL_NONE; @@ -1354,15 +1439,15 @@ skip_interest: new_hit_cnt = queued_paths + unique_crashes; - stage_finds[STAGE_EXTRAS_UO] += new_hit_cnt - orig_hit_cnt; + stage_finds[STAGE_EXTRAS_UO] += new_hit_cnt - orig_hit_cnt; stage_cycles[STAGE_EXTRAS_UO] += stage_max; /* Insertion of user-supplied extras. */ - stage_name = "user extras (insert)"; + stage_name = "user extras (insert)"; stage_short = "ext_UI"; - stage_cur = 0; - stage_max = extras_cnt * len; + stage_cur = 0; + stage_max = extras_cnt * len; orig_hit_cnt = new_hit_cnt; @@ -1375,8 +1460,10 @@ skip_interest: for (j = 0; j < extras_cnt; ++j) { if (len + extras[j].len > MAX_FILE) { - --stage_max; + + --stage_max; continue; + } /* Insert token */ @@ -1386,8 +1473,10 @@ skip_interest: memcpy(ex_tmp + i + extras[j].len, out_buf + i, len - i); if (common_fuzz_stuff(argv, ex_tmp, len + extras[j].len)) { + ck_free(ex_tmp); goto abandon_entry; + } ++stage_cur; @@ -1403,17 +1492,17 @@ skip_interest: new_hit_cnt = queued_paths + unique_crashes; - stage_finds[STAGE_EXTRAS_UI] += new_hit_cnt - orig_hit_cnt; + stage_finds[STAGE_EXTRAS_UI] += new_hit_cnt - orig_hit_cnt; stage_cycles[STAGE_EXTRAS_UI] += stage_max; skip_user_extras: if (!a_extras_cnt) goto skip_extras; - stage_name = "auto extras (over)"; + stage_name = "auto extras (over)"; stage_short = "ext_AO"; - stage_cur = 0; - stage_max = MIN(a_extras_cnt, USE_AUTO_EXTRAS) * len; + stage_cur = 0; + stage_max = MIN(a_extras_cnt, USE_AUTO_EXTRAS) * len; stage_val_type = STAGE_VAL_NONE; @@ -1431,7 +1520,8 @@ skip_user_extras: if (a_extras[j].len > len - i || !memcmp(a_extras[j].data, out_buf + i, a_extras[j].len) || - !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, a_extras[j].len))) { + !memchr(eff_map + EFF_APOS(i), 1, + EFF_SPAN_ALEN(i, a_extras[j].len))) { --stage_max; continue; @@ -1454,7 +1544,7 @@ skip_user_extras: new_hit_cnt = queued_paths + unique_crashes; - stage_finds[STAGE_EXTRAS_AO] += new_hit_cnt - orig_hit_cnt; + stage_finds[STAGE_EXTRAS_AO] += new_hit_cnt - orig_hit_cnt; stage_cycles[STAGE_EXTRAS_AO] += stage_max; skip_extras: @@ -1473,36 +1563,51 @@ python_stage: if (!py_module) goto havoc_stage; - stage_name = "python"; + stage_name = "python"; stage_short = "python"; - stage_max = HAVOC_CYCLES * perf_score / havoc_div / 100; + stage_max = HAVOC_CYCLES * perf_score / havoc_div / 100; if (stage_max < HAVOC_MIN) stage_max = HAVOC_MIN; orig_hit_cnt = queued_paths + unique_crashes; - char* retbuf = NULL; + char* retbuf = NULL; size_t retlen = 0; for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { + struct queue_entry* target; - u32 tid; - u8* new_buf; + u32 tid; + u8* new_buf; -retry_external_pick: + retry_external_pick: /* Pick a random other queue entry for passing to external API */ - do { tid = UR(queued_paths); } while (tid == current_entry && queued_paths > 1); + do { + + tid = UR(queued_paths); + + } while (tid == current_entry && queued_paths > 1); target = queue; - while (tid >= 100) { target = target->next_100; tid -= 100; } - while (tid--) target = target->next; + while (tid >= 100) { + + target = target->next_100; + tid -= 100; + + } + + while (tid--) + target = target->next; /* Make sure that the target has a reasonable length. */ - while (target && (target->len < 2 || target == queue_cur) && queued_paths > 1) { + while (target && (target->len < 2 || target == queue_cur) && + queued_paths > 1) { + target = target->next; ++splicing_with; + } if (!target) goto retry_external_pick; @@ -1519,12 +1624,14 @@ retry_external_pick: ck_free(new_buf); if (retbuf) { - if (!retlen) - goto abandon_entry; + + if (!retlen) goto abandon_entry; if (common_fuzz_stuff(argv, retbuf, retlen)) { + free(retbuf); goto abandon_entry; + } /* Reset retbuf/retlen */ @@ -1536,26 +1643,35 @@ retry_external_pick: permitting. */ if (queued_paths != havoc_queued) { + if (perf_score <= havoc_max_mult * 100) { - stage_max *= 2; + + stage_max *= 2; perf_score *= 2; + } havoc_queued = queued_paths; + } + } + } new_hit_cnt = queued_paths + unique_crashes; - stage_finds[STAGE_PYTHON] += new_hit_cnt - orig_hit_cnt; + stage_finds[STAGE_PYTHON] += new_hit_cnt - orig_hit_cnt; stage_cycles[STAGE_PYTHON] += stage_max; if (python_only) { + /* Skip other stages */ ret_val = 0; goto abandon_entry; + } + #endif /**************** @@ -1571,10 +1687,10 @@ havoc_stage: if (!splice_cycle) { - stage_name = "havoc"; + stage_name = "havoc"; stage_short = "havoc"; - stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * - perf_score / havoc_div / 100; + stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * perf_score / + havoc_div / 100; } else { @@ -1583,9 +1699,9 @@ havoc_stage: perf_score = orig_perf; sprintf(tmp, "splice %u", splice_cycle); - stage_name = tmp; + stage_name = tmp; stage_short = "splice"; - stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100; + stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100; } @@ -1605,7 +1721,7 @@ havoc_stage: u32 use_stacking = 1 << (1 + UR(HAVOC_STACK_POW2)); stage_cur_val = use_stacking; - + for (i = 0; i < use_stacking; ++i) { switch (UR(15 + ((extras_cnt + a_extras_cnt) ? 2 : 0))) { @@ -1617,7 +1733,7 @@ havoc_stage: FLIP_BIT(out_buf, UR(temp_len << 3)); break; - case 1: + case 1: /* Set byte to interesting value. */ @@ -1633,12 +1749,12 @@ havoc_stage: if (UR(2)) { *(u16*)(out_buf + UR(temp_len - 1)) = - interesting_16[UR(sizeof(interesting_16) >> 1)]; + interesting_16[UR(sizeof(interesting_16) >> 1)]; } else { - *(u16*)(out_buf + UR(temp_len - 1)) = SWAP16( - interesting_16[UR(sizeof(interesting_16) >> 1)]); + *(u16*)(out_buf + UR(temp_len - 1)) = + SWAP16(interesting_16[UR(sizeof(interesting_16) >> 1)]); } @@ -1651,14 +1767,14 @@ havoc_stage: if (temp_len < 4) break; if (UR(2)) { - + *(u32*)(out_buf + UR(temp_len - 3)) = - interesting_32[UR(sizeof(interesting_32) >> 2)]; + interesting_32[UR(sizeof(interesting_32) >> 2)]; } else { - *(u32*)(out_buf + UR(temp_len - 3)) = SWAP32( - interesting_32[UR(sizeof(interesting_32) >> 2)]); + *(u32*)(out_buf + UR(temp_len - 3)) = + SWAP32(interesting_32[UR(sizeof(interesting_32) >> 2)]); } @@ -1696,7 +1812,7 @@ havoc_stage: u16 num = 1 + UR(ARITH_MAX); *(u16*)(out_buf + pos) = - SWAP16(SWAP16(*(u16*)(out_buf + pos)) - num); + SWAP16(SWAP16(*(u16*)(out_buf + pos)) - num); } @@ -1720,7 +1836,7 @@ havoc_stage: u16 num = 1 + UR(ARITH_MAX); *(u16*)(out_buf + pos) = - SWAP16(SWAP16(*(u16*)(out_buf + pos)) + num); + SWAP16(SWAP16(*(u16*)(out_buf + pos)) + num); } @@ -1744,7 +1860,7 @@ havoc_stage: u32 num = 1 + UR(ARITH_MAX); *(u32*)(out_buf + pos) = - SWAP32(SWAP32(*(u32*)(out_buf + pos)) - num); + SWAP32(SWAP32(*(u32*)(out_buf + pos)) - num); } @@ -1768,7 +1884,7 @@ havoc_stage: u32 num = 1 + UR(ARITH_MAX); *(u32*)(out_buf + pos) = - SWAP32(SWAP32(*(u32*)(out_buf + pos)) + num); + SWAP32(SWAP32(*(u32*)(out_buf + pos)) + num); } @@ -1785,28 +1901,28 @@ havoc_stage: case 11 ... 12: { - /* Delete bytes. We're making this a bit more likely - than insertion (the next option) in hopes of keeping - files reasonably small. */ + /* Delete bytes. We're making this a bit more likely + than insertion (the next option) in hopes of keeping + files reasonably small. */ - u32 del_from, del_len; + u32 del_from, del_len; - if (temp_len < 2) break; + if (temp_len < 2) break; - /* Don't delete too much. */ + /* Don't delete too much. */ - del_len = choose_block_len(temp_len - 1); + del_len = choose_block_len(temp_len - 1); - del_from = UR(temp_len - del_len + 1); + del_from = UR(temp_len - del_len + 1); - memmove(out_buf + del_from, out_buf + del_from + del_len, - temp_len - del_from - del_len); + memmove(out_buf + del_from, out_buf + del_from + del_len, + temp_len - del_from - del_len); - temp_len -= del_len; + temp_len -= del_len; - break; + break; - } + } case 13: @@ -1820,7 +1936,7 @@ havoc_stage: if (actually_clone) { - clone_len = choose_block_len(temp_len); + clone_len = choose_block_len(temp_len); clone_from = UR(temp_len - clone_len + 1); } else { @@ -1830,7 +1946,7 @@ havoc_stage: } - clone_to = UR(temp_len); + clone_to = UR(temp_len); new_buf = ck_alloc_nozero(temp_len + clone_len); @@ -1860,128 +1976,129 @@ havoc_stage: case 14: { - /* Overwrite bytes with a randomly selected chunk (75%) or fixed - bytes (25%). */ + /* Overwrite bytes with a randomly selected chunk (75%) or fixed + bytes (25%). */ - u32 copy_from, copy_to, copy_len; + u32 copy_from, copy_to, copy_len; - if (temp_len < 2) break; + if (temp_len < 2) break; - copy_len = choose_block_len(temp_len - 1); + copy_len = choose_block_len(temp_len - 1); - copy_from = UR(temp_len - copy_len + 1); - copy_to = UR(temp_len - copy_len + 1); + copy_from = UR(temp_len - copy_len + 1); + copy_to = UR(temp_len - copy_len + 1); - if (UR(4)) { + if (UR(4)) { - if (copy_from != copy_to) - memmove(out_buf + copy_to, out_buf + copy_from, copy_len); + if (copy_from != copy_to) + memmove(out_buf + copy_to, out_buf + copy_from, copy_len); - } else memset(out_buf + copy_to, - UR(2) ? UR(256) : out_buf[UR(temp_len)], copy_len); + } else - break; + memset(out_buf + copy_to, UR(2) ? UR(256) : out_buf[UR(temp_len)], + copy_len); - } + break; - /* Values 15 and 16 can be selected only if there are any extras - present in the dictionaries. */ + } + + /* Values 15 and 16 can be selected only if there are any extras + present in the dictionaries. */ case 15: { - /* Overwrite bytes with an extra. */ + /* Overwrite bytes with an extra. */ - if (!extras_cnt || (a_extras_cnt && UR(2))) { + if (!extras_cnt || (a_extras_cnt && UR(2))) { - /* No user-specified extras or odds in our favor. Let's use an - auto-detected one. */ + /* No user-specified extras or odds in our favor. Let's use an + auto-detected one. */ - u32 use_extra = UR(a_extras_cnt); - u32 extra_len = a_extras[use_extra].len; - u32 insert_at; + u32 use_extra = UR(a_extras_cnt); + u32 extra_len = a_extras[use_extra].len; + u32 insert_at; - if (extra_len > temp_len) break; + if (extra_len > temp_len) break; - insert_at = UR(temp_len - extra_len + 1); - memcpy(out_buf + insert_at, a_extras[use_extra].data, extra_len); + insert_at = UR(temp_len - extra_len + 1); + memcpy(out_buf + insert_at, a_extras[use_extra].data, extra_len); - } else { + } else { - /* No auto extras or odds in our favor. Use the dictionary. */ + /* No auto extras or odds in our favor. Use the dictionary. */ - u32 use_extra = UR(extras_cnt); - u32 extra_len = extras[use_extra].len; - u32 insert_at; + u32 use_extra = UR(extras_cnt); + u32 extra_len = extras[use_extra].len; + u32 insert_at; - if (extra_len > temp_len) break; + if (extra_len > temp_len) break; - insert_at = UR(temp_len - extra_len + 1); - memcpy(out_buf + insert_at, extras[use_extra].data, extra_len); + insert_at = UR(temp_len - extra_len + 1); + memcpy(out_buf + insert_at, extras[use_extra].data, extra_len); - } + } - break; + break; - } + } case 16: { - u32 use_extra, extra_len, insert_at = UR(temp_len + 1); - u8* new_buf; + u32 use_extra, extra_len, insert_at = UR(temp_len + 1); + u8* new_buf; - /* Insert an extra. Do the same dice-rolling stuff as for the - previous case. */ + /* Insert an extra. Do the same dice-rolling stuff as for the + previous case. */ - if (!extras_cnt || (a_extras_cnt && UR(2))) { + if (!extras_cnt || (a_extras_cnt && UR(2))) { - use_extra = UR(a_extras_cnt); - extra_len = a_extras[use_extra].len; + use_extra = UR(a_extras_cnt); + extra_len = a_extras[use_extra].len; - if (temp_len + extra_len >= MAX_FILE) break; + if (temp_len + extra_len >= MAX_FILE) break; - new_buf = ck_alloc_nozero(temp_len + extra_len); + new_buf = ck_alloc_nozero(temp_len + extra_len); - /* Head */ - memcpy(new_buf, out_buf, insert_at); + /* Head */ + memcpy(new_buf, out_buf, insert_at); - /* Inserted part */ - memcpy(new_buf + insert_at, a_extras[use_extra].data, extra_len); + /* Inserted part */ + memcpy(new_buf + insert_at, a_extras[use_extra].data, extra_len); - } else { + } else { - use_extra = UR(extras_cnt); - extra_len = extras[use_extra].len; + use_extra = UR(extras_cnt); + extra_len = extras[use_extra].len; - if (temp_len + extra_len >= MAX_FILE) break; + if (temp_len + extra_len >= MAX_FILE) break; - new_buf = ck_alloc_nozero(temp_len + extra_len); + new_buf = ck_alloc_nozero(temp_len + extra_len); - /* Head */ - memcpy(new_buf, out_buf, insert_at); + /* Head */ + memcpy(new_buf, out_buf, insert_at); - /* Inserted part */ - memcpy(new_buf + insert_at, extras[use_extra].data, extra_len); + /* Inserted part */ + memcpy(new_buf + insert_at, extras[use_extra].data, extra_len); - } + } - /* Tail */ - memcpy(new_buf + insert_at + extra_len, out_buf + insert_at, - temp_len - insert_at); + /* Tail */ + memcpy(new_buf + insert_at + extra_len, out_buf + insert_at, + temp_len - insert_at); - ck_free(out_buf); - out_buf = new_buf; - temp_len += extra_len; + ck_free(out_buf); + out_buf = new_buf; + temp_len += extra_len; - break; + break; - } + } } } - if (common_fuzz_stuff(argv, out_buf, temp_len)) - goto abandon_entry; + if (common_fuzz_stuff(argv, out_buf, temp_len)) goto abandon_entry; /* out_buf might have been mangled a bit, so let's restore it to its original size and shape. */ @@ -1996,8 +2113,10 @@ havoc_stage: if (queued_paths != havoc_queued) { if (perf_score <= havoc_max_mult * 100) { - stage_max *= 2; + + stage_max *= 2; perf_score *= 2; + } havoc_queued = queued_paths; @@ -2009,11 +2128,15 @@ havoc_stage: new_hit_cnt = queued_paths + unique_crashes; if (!splice_cycle) { - stage_finds[STAGE_HAVOC] += new_hit_cnt - orig_hit_cnt; + + stage_finds[STAGE_HAVOC] += new_hit_cnt - orig_hit_cnt; stage_cycles[STAGE_HAVOC] += stage_max; + } else { - stage_finds[STAGE_SPLICE] += new_hit_cnt - orig_hit_cnt; + + stage_finds[STAGE_SPLICE] += new_hit_cnt - orig_hit_cnt; stage_cycles[STAGE_SPLICE] += stage_max; + } #ifndef IGNORE_FINDS @@ -2029,38 +2152,53 @@ havoc_stage: retry_splicing: - if (use_splicing && splice_cycle++ < SPLICE_CYCLES && - queued_paths > 1 && queue_cur->len > 1) { + if (use_splicing && splice_cycle++ < SPLICE_CYCLES && queued_paths > 1 && + queue_cur->len > 1) { struct queue_entry* target; - u32 tid, split_at; - u8* new_buf; - s32 f_diff, l_diff; + u32 tid, split_at; + u8* new_buf; + s32 f_diff, l_diff; /* First of all, if we've modified in_buf for havoc, let's clean that up... */ if (in_buf != orig_in) { + ck_free(in_buf); in_buf = orig_in; len = queue_cur->len; + } /* Pick a random queue entry and seek to it. Don't splice with yourself. */ - do { tid = UR(queued_paths); } while (tid == current_entry); + do { + + tid = UR(queued_paths); + + } while (tid == current_entry); splicing_with = tid; target = queue; - while (tid >= 100) { target = target->next_100; tid -= 100; } - while (tid--) target = target->next; + while (tid >= 100) { + + target = target->next_100; + tid -= 100; + + } + + while (tid--) + target = target->next; /* Make sure that the target has a reasonable length. */ while (target && (target->len < 2 || target == queue_cur)) { + target = target->next; ++splicing_with; + } if (!target) goto retry_splicing; @@ -2084,8 +2222,10 @@ retry_splicing: locate_diffs(in_buf, new_buf, MIN(len, target->len), &f_diff, &l_diff); if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) { + ck_free(new_buf); goto retry_splicing; + } /* Split somewhere between the first and last differing byte. */ @@ -2102,11 +2242,11 @@ retry_splicing: out_buf = ck_alloc_nozero(len); memcpy(out_buf, in_buf, len); -#ifdef USE_PYTHON +# ifdef USE_PYTHON goto python_stage; -#else +# else goto havoc_stage; -#endif +# endif } @@ -2121,10 +2261,13 @@ abandon_entry: /* Update pending_not_fuzzed count if we made it through the calibration cycle and have not seen this entry before. */ - if (!stop_soon && !queue_cur->cal_failed && (queue_cur->was_fuzzed == 0 || queue_cur->fuzz_level == 0)) { + if (!stop_soon && !queue_cur->cal_failed && + (queue_cur->was_fuzzed == 0 || queue_cur->fuzz_level == 0)) { + --pending_not_fuzzed; queue_cur->was_fuzzed = 1; if (queue_cur->favored) --pending_favored; + } ++queue_cur->fuzz_level; @@ -2144,3576 +2287,3738 @@ abandon_entry: /* MOpt mode */ u8 pilot_fuzzing(char** argv) { - s32 len, fd, temp_len, i, j; - u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0; - u64 havoc_queued, orig_hit_cnt, new_hit_cnt, cur_ms_lv; - u32 splice_cycle = 0, perf_score = 100, orig_perf, prev_cksum, eff_cnt = 1; + s32 len, fd, temp_len, i, j; + u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0; + u64 havoc_queued, orig_hit_cnt, new_hit_cnt, cur_ms_lv; + u32 splice_cycle = 0, perf_score = 100, orig_perf, prev_cksum, eff_cnt = 1; - u8 ret_val = 1, doing_det = 0; + u8 ret_val = 1, doing_det = 0; - u8 a_collect[MAX_AUTO_EXTRA]; - u32 a_len = 0; + u8 a_collect[MAX_AUTO_EXTRA]; + u32 a_len = 0; #ifdef IGNORE_FINDS - /* In IGNORE_FINDS mode, skip any entries that weren't in the - initial data set. */ + /* In IGNORE_FINDS mode, skip any entries that weren't in the + initial data set. */ - if (queue_cur->depth > 1) return 1; + if (queue_cur->depth > 1) return 1; #else - if (pending_favored) { + if (pending_favored) { - /* If we have any favored, non-fuzzed new arrivals in the queue, - possibly skip to them at the expense of already-fuzzed or non-favored - cases. */ + /* If we have any favored, non-fuzzed new arrivals in the queue, + possibly skip to them at the expense of already-fuzzed or non-favored + cases. */ - if ((queue_cur->was_fuzzed || !queue_cur->favored) && - UR(100) < SKIP_TO_NEW_PROB) return 1; + if ((queue_cur->was_fuzzed || !queue_cur->favored) && + UR(100) < SKIP_TO_NEW_PROB) + return 1; - } - else if (!dumb_mode && !queue_cur->favored && queued_paths > 10) { + } else if (!dumb_mode && !queue_cur->favored && queued_paths > 10) { - /* Otherwise, still possibly skip non-favored cases, albeit less often. - The odds of skipping stuff are higher for already-fuzzed inputs and - lower for never-fuzzed entries. */ + /* Otherwise, still possibly skip non-favored cases, albeit less often. + The odds of skipping stuff are higher for already-fuzzed inputs and + lower for never-fuzzed entries. */ - if (queue_cycle > 1 && !queue_cur->was_fuzzed) { + if (queue_cycle > 1 && !queue_cur->was_fuzzed) { - if (UR(100) < SKIP_NFAV_NEW_PROB) return 1; + if (UR(100) < SKIP_NFAV_NEW_PROB) return 1; - } - else { + } else { - if (UR(100) < SKIP_NFAV_OLD_PROB) return 1; + if (UR(100) < SKIP_NFAV_OLD_PROB) return 1; - } + } - } + } #endif /* ^IGNORE_FINDS */ - if (not_on_tty) { - ACTF("Fuzzing test case #%u (%u total, %llu uniq crashes found)...", - current_entry, queued_paths, unique_crashes); - fflush(stdout); - } + if (not_on_tty) { - /* Map the test case into memory. */ + ACTF("Fuzzing test case #%u (%u total, %llu uniq crashes found)...", + current_entry, queued_paths, unique_crashes); + fflush(stdout); - fd = open(queue_cur->fname, O_RDONLY); + } - if (fd < 0) PFATAL("Unable to open '%s'", queue_cur->fname); + /* Map the test case into memory. */ - len = queue_cur->len; + fd = open(queue_cur->fname, O_RDONLY); - orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); + if (fd < 0) PFATAL("Unable to open '%s'", queue_cur->fname); - if (orig_in == MAP_FAILED) PFATAL("Unable to mmap '%s'", queue_cur->fname); + len = queue_cur->len; - close(fd); + orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); - /* We could mmap() out_buf as MAP_PRIVATE, but we end up clobbering every - single byte anyway, so it wouldn't give us any performance or memory usage - benefits. */ + if (orig_in == MAP_FAILED) PFATAL("Unable to mmap '%s'", queue_cur->fname); - out_buf = ck_alloc_nozero(len); + close(fd); - subseq_tmouts = 0; + /* We could mmap() out_buf as MAP_PRIVATE, but we end up clobbering every + single byte anyway, so it wouldn't give us any performance or memory usage + benefits. */ - cur_depth = queue_cur->depth; + out_buf = ck_alloc_nozero(len); - /******************************************* - * CALIBRATION (only if failed earlier on) * - *******************************************/ + subseq_tmouts = 0; - if (queue_cur->cal_failed) { + cur_depth = queue_cur->depth; - u8 res = FAULT_TMOUT; + /******************************************* + * CALIBRATION (only if failed earlier on) * + *******************************************/ - if (queue_cur->cal_failed < CAL_CHANCES) { + if (queue_cur->cal_failed) { - res = calibrate_case(argv, queue_cur, in_buf, queue_cycle - 1, 0); + u8 res = FAULT_TMOUT; - if (res == FAULT_ERROR) - FATAL("Unable to execute target application"); + if (queue_cur->cal_failed < CAL_CHANCES) { - } + res = calibrate_case(argv, queue_cur, in_buf, queue_cycle - 1, 0); - if (stop_soon || res != crash_mode) { - ++cur_skipped_paths; - goto abandon_entry; - } + if (res == FAULT_ERROR) FATAL("Unable to execute target application"); - } + } - /************ - * TRIMMING * - ************/ + if (stop_soon || res != crash_mode) { - if (!dumb_mode && !queue_cur->trim_done) { + ++cur_skipped_paths; + goto abandon_entry; - u8 res = trim_case(argv, queue_cur, in_buf); + } - if (res == FAULT_ERROR) - FATAL("Unable to execute target application"); + } - if (stop_soon) { - ++cur_skipped_paths; - goto abandon_entry; - } + /************ + * TRIMMING * + ************/ - /* Don't retry trimming, even if it failed. */ + if (!dumb_mode && !queue_cur->trim_done) { - queue_cur->trim_done = 1; + u8 res = trim_case(argv, queue_cur, in_buf); - len = queue_cur->len; + if (res == FAULT_ERROR) FATAL("Unable to execute target application"); - } + if (stop_soon) { - memcpy(out_buf, in_buf, len); + ++cur_skipped_paths; + goto abandon_entry; - /********************* - * PERFORMANCE SCORE * - *********************/ + } - orig_perf = perf_score = calculate_score(queue_cur); + /* Don't retry trimming, even if it failed. */ - /* Skip right away if -d is given, if we have done deterministic fuzzing on - this entry ourselves (was_fuzzed), or if it has gone through deterministic - testing in earlier, resumed runs (passed_det). */ + queue_cur->trim_done = 1; - if (skip_deterministic || queue_cur->was_fuzzed || queue_cur->passed_det) - goto havoc_stage; + len = queue_cur->len; - /* Skip deterministic fuzzing if exec path checksum puts this out of scope - for this master instance. */ + } - if (master_max && (queue_cur->exec_cksum % master_max) != master_id - 1) - goto havoc_stage; + memcpy(out_buf, in_buf, len); + /********************* + * PERFORMANCE SCORE * + *********************/ - cur_ms_lv = get_cur_time(); - if (!(key_puppet == 0 && ((cur_ms_lv - last_path_time < limit_time_puppet) || - (last_crash_time != 0 && cur_ms_lv - last_crash_time < limit_time_puppet) || last_path_time == 0))) - { - key_puppet = 1; - goto pacemaker_fuzzing; - } + orig_perf = perf_score = calculate_score(queue_cur); - doing_det = 1; + /* Skip right away if -d is given, if we have done deterministic fuzzing on + this entry ourselves (was_fuzzed), or if it has gone through deterministic + testing in earlier, resumed runs (passed_det). */ - /********************************************* - * SIMPLE BITFLIP (+dictionary construction) * - *********************************************/ + if (skip_deterministic || queue_cur->was_fuzzed || queue_cur->passed_det) + goto havoc_stage; -#define FLIP_BIT(_ar, _b) do { \ - u8* _arf = (u8*)(_ar); \ - u32 _bf = (_b); \ - _arf[(_bf) >> 3] ^= (128 >> ((_bf) & 7)); \ - } while (0) + /* Skip deterministic fuzzing if exec path checksum puts this out of scope + for this master instance. */ + + if (master_max && (queue_cur->exec_cksum % master_max) != master_id - 1) + goto havoc_stage; + + cur_ms_lv = get_cur_time(); + if (!(key_puppet == 0 && ((cur_ms_lv - last_path_time < limit_time_puppet) || + (last_crash_time != 0 && + cur_ms_lv - last_crash_time < limit_time_puppet) || + last_path_time == 0))) { + + key_puppet = 1; + goto pacemaker_fuzzing; + + } + + doing_det = 1; + + /********************************************* + * SIMPLE BITFLIP (+dictionary construction) * + *********************************************/ + +#define FLIP_BIT(_ar, _b) \ + do { \ + \ + u8* _arf = (u8*)(_ar); \ + u32 _bf = (_b); \ + _arf[(_bf) >> 3] ^= (128 >> ((_bf)&7)); \ + \ + } while (0) + + /* Single walking bit. */ + + stage_short = "flip1"; + stage_max = len << 3; + stage_name = "bitflip 1/1"; + + stage_val_type = STAGE_VAL_NONE; + + orig_hit_cnt = queued_paths + unique_crashes; + + prev_cksum = queue_cur->exec_cksum; + + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { + + stage_cur_byte = stage_cur >> 3; + + FLIP_BIT(out_buf, stage_cur); + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + + FLIP_BIT(out_buf, stage_cur); + + /* While flipping the least significant bit in every byte, pull of an extra + trick to detect possible syntax tokens. In essence, the idea is that if + you have a binary blob like this: + + xxxxxxxxIHDRxxxxxxxx + + ...and changing the leading and trailing bytes causes variable or no + changes in program flow, but touching any character in the "IHDR" string + always produces the same, distinctive path, it's highly likely that + "IHDR" is an atomically-checked magic value of special significance to + the fuzzed format. + + We do this here, rather than as a separate stage, because it's a nice + way to keep the operation approximately "free" (i.e., no extra execs). + + Empirically, performing the check when flipping the least significant bit + is advantageous, compared to doing it at the time of more disruptive + changes, where the program flow may be affected in more violent ways. + + The caveat is that we won't generate dictionaries in the -d mode or -S + mode - but that's probably a fair trade-off. + + This won't work particularly well with paths that exhibit variable + behavior, but fails gracefully, so we'll carry out the checks anyway. + + */ + + if (!dumb_mode && (stage_cur & 7) == 7) { + + u32 cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST); + + if (stage_cur == stage_max - 1 && cksum == prev_cksum) { + + /* If at end of file and we are still collecting a string, grab the + final character and force output. */ + + if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3]; + ++a_len; + + if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA) + maybe_add_auto(a_collect, a_len); + + } else if (cksum != prev_cksum) { + + /* Otherwise, if the checksum has changed, see if we have something + worthwhile queued up, and collect that if the answer is yes. */ + + if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA) + maybe_add_auto(a_collect, a_len); + + a_len = 0; + prev_cksum = cksum; + + } + + /* Continue collecting string, but only if the bit flip actually made + any difference - we don't want no-op tokens. */ + + if (cksum != queue_cur->exec_cksum) { + + if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3]; + ++a_len; + + } + + } + + } + + new_hit_cnt = queued_paths + unique_crashes; + + stage_finds[STAGE_FLIP1] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_FLIP1] += stage_max; + + /* Two walking bits. */ + + stage_name = "bitflip 2/1"; + stage_short = "flip2"; + stage_max = (len << 3) - 1; + + orig_hit_cnt = new_hit_cnt; + + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { + + stage_cur_byte = stage_cur >> 3; + + FLIP_BIT(out_buf, stage_cur); + FLIP_BIT(out_buf, stage_cur + 1); + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + + FLIP_BIT(out_buf, stage_cur); + FLIP_BIT(out_buf, stage_cur + 1); + + } + + new_hit_cnt = queued_paths + unique_crashes; + + stage_finds[STAGE_FLIP2] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_FLIP2] += stage_max; + + /* Four walking bits. */ + + stage_name = "bitflip 4/1"; + stage_short = "flip4"; + stage_max = (len << 3) - 3; + + orig_hit_cnt = new_hit_cnt; + + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { + + stage_cur_byte = stage_cur >> 3; + + FLIP_BIT(out_buf, stage_cur); + FLIP_BIT(out_buf, stage_cur + 1); + FLIP_BIT(out_buf, stage_cur + 2); + FLIP_BIT(out_buf, stage_cur + 3); + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + + FLIP_BIT(out_buf, stage_cur); + FLIP_BIT(out_buf, stage_cur + 1); + FLIP_BIT(out_buf, stage_cur + 2); + FLIP_BIT(out_buf, stage_cur + 3); + + } + + new_hit_cnt = queued_paths + unique_crashes; + + stage_finds[STAGE_FLIP4] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_FLIP4] += stage_max; + + /* Effector map setup. These macros calculate: + + EFF_APOS - position of a particular file offset in the map. + EFF_ALEN - length of a map with a particular number of bytes. + EFF_SPAN_ALEN - map span for a sequence of bytes. + + */ + +#define EFF_APOS(_p) ((_p) >> EFF_MAP_SCALE2) +#define EFF_REM(_x) ((_x) & ((1 << EFF_MAP_SCALE2) - 1)) +#define EFF_ALEN(_l) (EFF_APOS(_l) + !!EFF_REM(_l)) +#define EFF_SPAN_ALEN(_p, _l) (EFF_APOS((_p) + (_l)-1) - EFF_APOS(_p) + 1) + + /* Initialize effector map for the next step (see comments below). Always + flag first and last byte as doing something. */ + + eff_map = ck_alloc(EFF_ALEN(len)); + eff_map[0] = 1; + + if (EFF_APOS(len - 1) != 0) { + + eff_map[EFF_APOS(len - 1)] = 1; + ++eff_cnt; + + } + + /* Walking byte. */ + + stage_name = "bitflip 8/8"; + stage_short = "flip8"; + stage_max = len; + + orig_hit_cnt = new_hit_cnt; + + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { + + stage_cur_byte = stage_cur; + + out_buf[stage_cur] ^= 0xFF; + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + + /* We also use this stage to pull off a simple trick: we identify + bytes that seem to have no effect on the current execution path + even when fully flipped - and we skip them during more expensive + deterministic stages, such as arithmetics or known ints. */ + + if (!eff_map[EFF_APOS(stage_cur)]) { + + u32 cksum; + + /* If in dumb mode or if the file is very short, just flag everything + without wasting time on checksums. */ + + if (!dumb_mode && len >= EFF_MIN_LEN) + cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST); + else + cksum = ~queue_cur->exec_cksum; + + if (cksum != queue_cur->exec_cksum) { + + eff_map[EFF_APOS(stage_cur)] = 1; + ++eff_cnt; + + } + + } + + out_buf[stage_cur] ^= 0xFF; + + } + + /* If the effector map is more than EFF_MAX_PERC dense, just flag the + whole thing as worth fuzzing, since we wouldn't be saving much time + anyway. */ + + if (eff_cnt != EFF_ALEN(len) && + eff_cnt * 100 / EFF_ALEN(len) > EFF_MAX_PERC) { + + memset(eff_map, 1, EFF_ALEN(len)); + + blocks_eff_select += EFF_ALEN(len); + + } else { + + blocks_eff_select += eff_cnt; + + } + + blocks_eff_total += EFF_ALEN(len); + + new_hit_cnt = queued_paths + unique_crashes; + + stage_finds[STAGE_FLIP8] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_FLIP8] += stage_max; + + /* Two walking bytes. */ + + if (len < 2) goto skip_bitflip; + + stage_name = "bitflip 16/8"; + stage_short = "flip16"; + stage_cur = 0; + stage_max = len - 1; + + orig_hit_cnt = new_hit_cnt; + + for (i = 0; i < len - 1; ++i) { + + /* Let's consult the effector map... */ + + if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { + + --stage_max; + continue; + + } + + stage_cur_byte = i; + + *(u16*)(out_buf + i) ^= 0xFFFF; + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + + *(u16*)(out_buf + i) ^= 0xFFFF; + + } + + new_hit_cnt = queued_paths + unique_crashes; + + stage_finds[STAGE_FLIP16] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_FLIP16] += stage_max; + + if (len < 4) goto skip_bitflip; + + /* Four walking bytes. */ + + stage_name = "bitflip 32/8"; + stage_short = "flip32"; + stage_cur = 0; + stage_max = len - 3; + + orig_hit_cnt = new_hit_cnt; + + for (i = 0; i < len - 3; ++i) { + + /* Let's consult the effector map... */ + if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && + !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { + + --stage_max; + continue; + + } + + stage_cur_byte = i; + + *(u32*)(out_buf + i) ^= 0xFFFFFFFF; + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + + *(u32*)(out_buf + i) ^= 0xFFFFFFFF; + + } + + new_hit_cnt = queued_paths + unique_crashes; + + stage_finds[STAGE_FLIP32] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_FLIP32] += stage_max; + +skip_bitflip: + + if (no_arith) goto skip_arith; - /* Single walking bit. */ + /********************** + * ARITHMETIC INC/DEC * + **********************/ + + /* 8-bit arithmetics. */ + + stage_name = "arith 8/8"; + stage_short = "arith8"; + stage_cur = 0; + stage_max = 2 * len * ARITH_MAX; - stage_short = "flip1"; - stage_max = len << 3; - stage_name = "bitflip 1/1"; + stage_val_type = STAGE_VAL_LE; + orig_hit_cnt = new_hit_cnt; + for (i = 0; i < len; ++i) { + u8 orig = out_buf[i]; - stage_val_type = STAGE_VAL_NONE; + /* Let's consult the effector map... */ + + if (!eff_map[EFF_APOS(i)]) { - orig_hit_cnt = queued_paths + unique_crashes; + stage_max -= 2 * ARITH_MAX; + continue; + + } + + stage_cur_byte = i; + + for (j = 1; j <= ARITH_MAX; ++j) { + + u8 r = orig ^ (orig + j); + + /* Do arithmetic operations only if the result couldn't be a product + of a bitflip. */ - prev_cksum = queue_cur->exec_cksum; + if (!could_be_bitflip(r)) { - for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { + stage_cur_val = j; + out_buf[i] = orig + j; - stage_cur_byte = stage_cur >> 3; + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - FLIP_BIT(out_buf, stage_cur); + } else - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + --stage_max; - FLIP_BIT(out_buf, stage_cur); + r = orig ^ (orig - j); - /* While flipping the least significant bit in every byte, pull of an extra - trick to detect possible syntax tokens. In essence, the idea is that if - you have a binary blob like this: + if (!could_be_bitflip(r)) { - xxxxxxxxIHDRxxxxxxxx + stage_cur_val = -j; + out_buf[i] = orig - j; - ...and changing the leading and trailing bytes causes variable or no - changes in program flow, but touching any character in the "IHDR" string - always produces the same, distinctive path, it's highly likely that - "IHDR" is an atomically-checked magic value of special significance to - the fuzzed format. + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - We do this here, rather than as a separate stage, because it's a nice - way to keep the operation approximately "free" (i.e., no extra execs). + } else - Empirically, performing the check when flipping the least significant bit - is advantageous, compared to doing it at the time of more disruptive - changes, where the program flow may be affected in more violent ways. + --stage_max; - The caveat is that we won't generate dictionaries in the -d mode or -S - mode - but that's probably a fair trade-off. + out_buf[i] = orig; - This won't work particularly well with paths that exhibit variable - behavior, but fails gracefully, so we'll carry out the checks anyway. + } - */ + } - if (!dumb_mode && (stage_cur & 7) == 7) { + new_hit_cnt = queued_paths + unique_crashes; - u32 cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST); + stage_finds[STAGE_ARITH8] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_ARITH8] += stage_max; - if (stage_cur == stage_max - 1 && cksum == prev_cksum) { + /* 16-bit arithmetics, both endians. */ - /* If at end of file and we are still collecting a string, grab the - final character and force output. */ + if (len < 2) goto skip_arith; - if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3]; - ++a_len; + stage_name = "arith 16/8"; + stage_short = "arith16"; + stage_cur = 0; + stage_max = 4 * (len - 1) * ARITH_MAX; - if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA) - maybe_add_auto(a_collect, a_len); + orig_hit_cnt = new_hit_cnt; - } - else if (cksum != prev_cksum) { + for (i = 0; i < len - 1; ++i) { - /* Otherwise, if the checksum has changed, see if we have something - worthwhile queued up, and collect that if the answer is yes. */ + u16 orig = *(u16*)(out_buf + i); - if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA) - maybe_add_auto(a_collect, a_len); + /* Let's consult the effector map... */ - a_len = 0; - prev_cksum = cksum; + if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { - } + stage_max -= 4 * ARITH_MAX; + continue; - /* Continue collecting string, but only if the bit flip actually made - any difference - we don't want no-op tokens. */ + } + + stage_cur_byte = i; + + for (j = 1; j <= ARITH_MAX; ++j) { + + u16 r1 = orig ^ (orig + j), r2 = orig ^ (orig - j), + r3 = orig ^ SWAP16(SWAP16(orig) + j), + r4 = orig ^ SWAP16(SWAP16(orig) - j); + + /* Try little endian addition and subtraction first. Do it only + if the operation would affect more than one byte (hence the + & 0xff overflow checks) and if it couldn't be a product of + a bitflip. */ + + stage_val_type = STAGE_VAL_LE; + + if ((orig & 0xff) + j > 0xff && !could_be_bitflip(r1)) { + + stage_cur_val = j; + *(u16*)(out_buf + i) = orig + j; + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + + } else + + --stage_max; + + if ((orig & 0xff) < j && !could_be_bitflip(r2)) { + + stage_cur_val = -j; + *(u16*)(out_buf + i) = orig - j; + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + + } else + + --stage_max; + + /* Big endian comes next. Same deal. */ + + stage_val_type = STAGE_VAL_BE; + + if ((orig >> 8) + j > 0xff && !could_be_bitflip(r3)) { + + stage_cur_val = j; + *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) + j); + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + + } else + + --stage_max; + + if ((orig >> 8) < j && !could_be_bitflip(r4)) { - if (cksum != queue_cur->exec_cksum) { + stage_cur_val = -j; + *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) - j); - if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3]; - ++a_len; + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - } + } else - } + --stage_max; - } + *(u16*)(out_buf + i) = orig; - new_hit_cnt = queued_paths + unique_crashes; + } - stage_finds[STAGE_FLIP1] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_FLIP1] += stage_max; + } - /* Two walking bits. */ + new_hit_cnt = queued_paths + unique_crashes; - stage_name = "bitflip 2/1"; - stage_short = "flip2"; - stage_max = (len << 3) - 1; + stage_finds[STAGE_ARITH16] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_ARITH16] += stage_max; - orig_hit_cnt = new_hit_cnt; + /* 32-bit arithmetics, both endians. */ - for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { + if (len < 4) goto skip_arith; - stage_cur_byte = stage_cur >> 3; + stage_name = "arith 32/8"; + stage_short = "arith32"; + stage_cur = 0; + stage_max = 4 * (len - 3) * ARITH_MAX; - FLIP_BIT(out_buf, stage_cur); - FLIP_BIT(out_buf, stage_cur + 1); + orig_hit_cnt = new_hit_cnt; - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + for (i = 0; i < len - 3; ++i) { - FLIP_BIT(out_buf, stage_cur); - FLIP_BIT(out_buf, stage_cur + 1); + u32 orig = *(u32*)(out_buf + i); - } + /* Let's consult the effector map... */ - new_hit_cnt = queued_paths + unique_crashes; + if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && + !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { - stage_finds[STAGE_FLIP2] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_FLIP2] += stage_max; + stage_max -= 4 * ARITH_MAX; + continue; + } + stage_cur_byte = i; - /* Four walking bits. */ + for (j = 1; j <= ARITH_MAX; ++j) { - stage_name = "bitflip 4/1"; - stage_short = "flip4"; - stage_max = (len << 3) - 3; + u32 r1 = orig ^ (orig + j), r2 = orig ^ (orig - j), + r3 = orig ^ SWAP32(SWAP32(orig) + j), + r4 = orig ^ SWAP32(SWAP32(orig) - j); + /* Little endian first. Same deal as with 16-bit: we only want to + try if the operation would have effect on more than two bytes. */ + stage_val_type = STAGE_VAL_LE; + if ((orig & 0xffff) + j > 0xffff && !could_be_bitflip(r1)) { + stage_cur_val = j; + *(u32*)(out_buf + i) = orig + j; - orig_hit_cnt = new_hit_cnt; + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { + } else - stage_cur_byte = stage_cur >> 3; + --stage_max; - FLIP_BIT(out_buf, stage_cur); - FLIP_BIT(out_buf, stage_cur + 1); - FLIP_BIT(out_buf, stage_cur + 2); - FLIP_BIT(out_buf, stage_cur + 3); + if ((orig & 0xffff) < j && !could_be_bitflip(r2)) { - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + stage_cur_val = -j; + *(u32*)(out_buf + i) = orig - j; - FLIP_BIT(out_buf, stage_cur); - FLIP_BIT(out_buf, stage_cur + 1); - FLIP_BIT(out_buf, stage_cur + 2); - FLIP_BIT(out_buf, stage_cur + 3); + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + stage_cur++; - } + } else - new_hit_cnt = queued_paths + unique_crashes; + --stage_max; - stage_finds[STAGE_FLIP4] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_FLIP4] += stage_max; + /* Big endian next. */ + stage_val_type = STAGE_VAL_BE; + if ((SWAP32(orig) & 0xffff) + j > 0xffff && !could_be_bitflip(r3)) { + stage_cur_val = j; + *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) + j); - /* Effector map setup. These macros calculate: + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - EFF_APOS - position of a particular file offset in the map. - EFF_ALEN - length of a map with a particular number of bytes. - EFF_SPAN_ALEN - map span for a sequence of bytes. + } else - */ + --stage_max; -#define EFF_APOS(_p) ((_p) >> EFF_MAP_SCALE2) -#define EFF_REM(_x) ((_x) & ((1 << EFF_MAP_SCALE2) - 1)) -#define EFF_ALEN(_l) (EFF_APOS(_l) + !!EFF_REM(_l)) -#define EFF_SPAN_ALEN(_p, _l) (EFF_APOS((_p) + (_l) - 1) - EFF_APOS(_p) + 1) + if ((SWAP32(orig) & 0xffff) < j && !could_be_bitflip(r4)) { - /* Initialize effector map for the next step (see comments below). Always - flag first and last byte as doing something. */ + stage_cur_val = -j; + *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) - j); - eff_map = ck_alloc(EFF_ALEN(len)); - eff_map[0] = 1; + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - if (EFF_APOS(len - 1) != 0) { - eff_map[EFF_APOS(len - 1)] = 1; - ++eff_cnt; - } + } else - /* Walking byte. */ + --stage_max; - stage_name = "bitflip 8/8"; - stage_short = "flip8"; - stage_max = len; + *(u32*)(out_buf + i) = orig; + } + } - orig_hit_cnt = new_hit_cnt; + new_hit_cnt = queued_paths + unique_crashes; - for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { + stage_finds[STAGE_ARITH32] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_ARITH32] += stage_max; - stage_cur_byte = stage_cur; +skip_arith: - out_buf[stage_cur] ^= 0xFF; + /********************** + * INTERESTING VALUES * + **********************/ - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + stage_name = "interest 8/8"; + stage_short = "int8"; + stage_cur = 0; + stage_max = len * sizeof(interesting_8); - /* We also use this stage to pull off a simple trick: we identify - bytes that seem to have no effect on the current execution path - even when fully flipped - and we skip them during more expensive - deterministic stages, such as arithmetics or known ints. */ + stage_val_type = STAGE_VAL_LE; - if (!eff_map[EFF_APOS(stage_cur)]) { + orig_hit_cnt = new_hit_cnt; - u32 cksum; + /* Setting 8-bit integers. */ - /* If in dumb mode or if the file is very short, just flag everything - without wasting time on checksums. */ + for (i = 0; i < len; ++i) { - if (!dumb_mode && len >= EFF_MIN_LEN) - cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST); - else - cksum = ~queue_cur->exec_cksum; + u8 orig = out_buf[i]; - if (cksum != queue_cur->exec_cksum) { - eff_map[EFF_APOS(stage_cur)] = 1; - ++eff_cnt; - } + /* Let's consult the effector map... */ - } + if (!eff_map[EFF_APOS(i)]) { - out_buf[stage_cur] ^= 0xFF; + stage_max -= sizeof(interesting_8); + continue; - } + } - /* If the effector map is more than EFF_MAX_PERC dense, just flag the - whole thing as worth fuzzing, since we wouldn't be saving much time - anyway. */ + stage_cur_byte = i; - if (eff_cnt != EFF_ALEN(len) && - eff_cnt * 100 / EFF_ALEN(len) > EFF_MAX_PERC) { + for (j = 0; j < sizeof(interesting_8); ++j) { - memset(eff_map, 1, EFF_ALEN(len)); + /* Skip if the value could be a product of bitflips or arithmetics. */ - blocks_eff_select += EFF_ALEN(len); + if (could_be_bitflip(orig ^ (u8)interesting_8[j]) || + could_be_arith(orig, (u8)interesting_8[j], 1)) { - } - else { + --stage_max; + continue; - blocks_eff_select += eff_cnt; + } - } + stage_cur_val = interesting_8[j]; + out_buf[i] = interesting_8[j]; - blocks_eff_total += EFF_ALEN(len); + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - new_hit_cnt = queued_paths + unique_crashes; + out_buf[i] = orig; + ++stage_cur; - stage_finds[STAGE_FLIP8] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_FLIP8] += stage_max; + } + } + new_hit_cnt = queued_paths + unique_crashes; + stage_finds[STAGE_INTEREST8] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_INTEREST8] += stage_max; + /* Setting 16-bit integers, both endians. */ - /* Two walking bytes. */ + if (no_arith || len < 2) goto skip_interest; - if (len < 2) goto skip_bitflip; + stage_name = "interest 16/8"; + stage_short = "int16"; + stage_cur = 0; + stage_max = 2 * (len - 1) * (sizeof(interesting_16) >> 1); - stage_name = "bitflip 16/8"; - stage_short = "flip16"; - stage_cur = 0; - stage_max = len - 1; + orig_hit_cnt = new_hit_cnt; + for (i = 0; i < len - 1; ++i) { + u16 orig = *(u16*)(out_buf + i); - orig_hit_cnt = new_hit_cnt; + /* Let's consult the effector map... */ - for (i = 0; i < len - 1; ++i) { + if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { - /* Let's consult the effector map... */ + stage_max -= sizeof(interesting_16); + continue; - if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { - --stage_max; - continue; - } + } - stage_cur_byte = i; + stage_cur_byte = i; - *(u16*)(out_buf + i) ^= 0xFFFF; + for (j = 0; j < sizeof(interesting_16) / 2; ++j) { - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + stage_cur_val = interesting_16[j]; - *(u16*)(out_buf + i) ^= 0xFFFF; + /* Skip if this could be a product of a bitflip, arithmetics, + or single-byte interesting value insertion. */ + if (!could_be_bitflip(orig ^ (u16)interesting_16[j]) && + !could_be_arith(orig, (u16)interesting_16[j], 2) && + !could_be_interest(orig, (u16)interesting_16[j], 2, 0)) { - } + stage_val_type = STAGE_VAL_LE; - new_hit_cnt = queued_paths + unique_crashes; + *(u16*)(out_buf + i) = interesting_16[j]; - stage_finds[STAGE_FLIP16] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_FLIP16] += stage_max; + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + } else + --stage_max; + if ((u16)interesting_16[j] != SWAP16(interesting_16[j]) && + !could_be_bitflip(orig ^ SWAP16(interesting_16[j])) && + !could_be_arith(orig, SWAP16(interesting_16[j]), 2) && + !could_be_interest(orig, SWAP16(interesting_16[j]), 2, 1)) { - if (len < 4) goto skip_bitflip; + stage_val_type = STAGE_VAL_BE; - /* Four walking bytes. */ + *(u16*)(out_buf + i) = SWAP16(interesting_16[j]); + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - stage_name = "bitflip 32/8"; - stage_short = "flip32"; - stage_cur = 0; - stage_max = len - 3; + } else + --stage_max; + } - orig_hit_cnt = new_hit_cnt; + *(u16*)(out_buf + i) = orig; - for (i = 0; i < len - 3; ++i) { + } - /* Let's consult the effector map... */ - if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && - !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { - --stage_max; - continue; - } + new_hit_cnt = queued_paths + unique_crashes; - stage_cur_byte = i; + stage_finds[STAGE_INTEREST16] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_INTEREST16] += stage_max; - *(u32*)(out_buf + i) ^= 0xFFFFFFFF; + if (len < 4) goto skip_interest; - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + /* Setting 32-bit integers, both endians. */ - *(u32*)(out_buf + i) ^= 0xFFFFFFFF; + stage_name = "interest 32/8"; + stage_short = "int32"; + stage_cur = 0; + stage_max = 2 * (len - 3) * (sizeof(interesting_32) >> 2); - } + orig_hit_cnt = new_hit_cnt; - new_hit_cnt = queued_paths + unique_crashes; + for (i = 0; i < len - 3; ++i) { - stage_finds[STAGE_FLIP32] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_FLIP32] += stage_max; + u32 orig = *(u32*)(out_buf + i); + /* Let's consult the effector map... */ + if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && + !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { + stage_max -= sizeof(interesting_32) >> 1; + continue; + } + stage_cur_byte = i; - skip_bitflip: + for (j = 0; j < sizeof(interesting_32) / 4; ++j) { - if (no_arith) goto skip_arith; + stage_cur_val = interesting_32[j]; - /********************** - * ARITHMETIC INC/DEC * - **********************/ + /* Skip if this could be a product of a bitflip, arithmetics, + or word interesting value insertion. */ - /* 8-bit arithmetics. */ + if (!could_be_bitflip(orig ^ (u32)interesting_32[j]) && + !could_be_arith(orig, interesting_32[j], 4) && + !could_be_interest(orig, interesting_32[j], 4, 0)) { - stage_name = "arith 8/8"; - stage_short = "arith8"; - stage_cur = 0; - stage_max = 2 * len * ARITH_MAX; + stage_val_type = STAGE_VAL_LE; + *(u32*)(out_buf + i) = interesting_32[j]; + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + } else - stage_val_type = STAGE_VAL_LE; + --stage_max; - orig_hit_cnt = new_hit_cnt; + if ((u32)interesting_32[j] != SWAP32(interesting_32[j]) && + !could_be_bitflip(orig ^ SWAP32(interesting_32[j])) && + !could_be_arith(orig, SWAP32(interesting_32[j]), 4) && + !could_be_interest(orig, SWAP32(interesting_32[j]), 4, 1)) { - for (i = 0; i < len; ++i) { + stage_val_type = STAGE_VAL_BE; - u8 orig = out_buf[i]; + *(u32*)(out_buf + i) = SWAP32(interesting_32[j]); + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - /* Let's consult the effector map... */ + } else - if (!eff_map[EFF_APOS(i)]) { - stage_max -= 2 * ARITH_MAX; - continue; - } + --stage_max; - stage_cur_byte = i; + } - for (j = 1; j <= ARITH_MAX; ++j) { + *(u32*)(out_buf + i) = orig; - u8 r = orig ^ (orig + j); + } - /* Do arithmetic operations only if the result couldn't be a product - of a bitflip. */ + new_hit_cnt = queued_paths + unique_crashes; - if (!could_be_bitflip(r)) { + stage_finds[STAGE_INTEREST32] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_INTEREST32] += stage_max; - stage_cur_val = j; - out_buf[i] = orig + j; +skip_interest: - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + /******************** + * DICTIONARY STUFF * + ********************/ - } else --stage_max; + if (!extras_cnt) goto skip_user_extras; - r = orig ^ (orig - j); + /* Overwrite with user-supplied extras. */ - if (!could_be_bitflip(r)) { + stage_name = "user extras (over)"; + stage_short = "ext_UO"; + stage_cur = 0; + stage_max = extras_cnt * len; - stage_cur_val = -j; - out_buf[i] = orig - j; + stage_val_type = STAGE_VAL_NONE; - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + orig_hit_cnt = new_hit_cnt; - } else --stage_max; + for (i = 0; i < len; ++i) { - out_buf[i] = orig; + u32 last_len = 0; - } + stage_cur_byte = i; - } + /* Extras are sorted by size, from smallest to largest. This means + that we don't have to worry about restoring the buffer in + between writes at a particular offset determined by the outer + loop. */ - new_hit_cnt = queued_paths + unique_crashes; + for (j = 0; j < extras_cnt; ++j) { - stage_finds[STAGE_ARITH8] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_ARITH8] += stage_max; + /* Skip extras probabilistically if extras_cnt > MAX_DET_EXTRAS. Also + skip them if there's no room to insert the payload, if the token + is redundant, or if its entire span has no bytes set in the effector + map. */ + if ((extras_cnt > MAX_DET_EXTRAS && UR(extras_cnt) >= MAX_DET_EXTRAS) || + extras[j].len > len - i || + !memcmp(extras[j].data, out_buf + i, extras[j].len) || + !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, extras[j].len))) { + --stage_max; + continue; + } + last_len = extras[j].len; + memcpy(out_buf + i, extras[j].data, last_len); - /* 16-bit arithmetics, both endians. */ + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - if (len < 2) goto skip_arith; + ++stage_cur; - stage_name = "arith 16/8"; - stage_short = "arith16"; - stage_cur = 0; - stage_max = 4 * (len - 1) * ARITH_MAX; + } + /* Restore all the clobbered memory. */ + memcpy(out_buf + i, in_buf + i, last_len); + } + new_hit_cnt = queued_paths + unique_crashes; - orig_hit_cnt = new_hit_cnt; + stage_finds[STAGE_EXTRAS_UO] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_EXTRAS_UO] += stage_max; - for (i = 0; i < len - 1; ++i) { + /* Insertion of user-supplied extras. */ - u16 orig = *(u16*)(out_buf + i); + stage_name = "user extras (insert)"; + stage_short = "ext_UI"; + stage_cur = 0; + stage_max = extras_cnt * len; - /* Let's consult the effector map... */ + orig_hit_cnt = new_hit_cnt; - if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { - stage_max -= 4 * ARITH_MAX; - continue; - } + ex_tmp = ck_alloc(len + MAX_DICT_FILE); - stage_cur_byte = i; + for (i = 0; i <= len; ++i) { - for (j = 1; j <= ARITH_MAX; ++j) { + stage_cur_byte = i; - u16 r1 = orig ^ (orig + j), - r2 = orig ^ (orig - j), - r3 = orig ^ SWAP16(SWAP16(orig) + j), - r4 = orig ^ SWAP16(SWAP16(orig) - j); + for (j = 0; j < extras_cnt; ++j) { - /* Try little endian addition and subtraction first. Do it only - if the operation would affect more than one byte (hence the - & 0xff overflow checks) and if it couldn't be a product of - a bitflip. */ + if (len + extras[j].len > MAX_FILE) { - stage_val_type = STAGE_VAL_LE; + --stage_max; + continue; - if ((orig & 0xff) + j > 0xff && !could_be_bitflip(r1)) { + } - stage_cur_val = j; - *(u16*)(out_buf + i) = orig + j; + /* Insert token */ + memcpy(ex_tmp + i, extras[j].data, extras[j].len); - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + /* Copy tail */ + memcpy(ex_tmp + i + extras[j].len, out_buf + i, len - i); - } else --stage_max; + if (common_fuzz_stuff(argv, ex_tmp, len + extras[j].len)) { - if ((orig & 0xff) < j && !could_be_bitflip(r2)) { + ck_free(ex_tmp); + goto abandon_entry; - stage_cur_val = -j; - *(u16*)(out_buf + i) = orig - j; + } - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + ++stage_cur; - } else --stage_max; + } - /* Big endian comes next. Same deal. */ + /* Copy head */ + ex_tmp[i] = out_buf[i]; - stage_val_type = STAGE_VAL_BE; + } + ck_free(ex_tmp); - if ((orig >> 8) + j > 0xff && !could_be_bitflip(r3)) { + new_hit_cnt = queued_paths + unique_crashes; - stage_cur_val = j; - *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) + j); + stage_finds[STAGE_EXTRAS_UI] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_EXTRAS_UI] += stage_max; - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; +skip_user_extras: - } else --stage_max; + if (!a_extras_cnt) goto skip_extras; - if ((orig >> 8) < j && !could_be_bitflip(r4)) { + stage_name = "auto extras (over)"; + stage_short = "ext_AO"; + stage_cur = 0; + stage_max = MIN(a_extras_cnt, USE_AUTO_EXTRAS) * len; - stage_cur_val = -j; - *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) - j); + stage_val_type = STAGE_VAL_NONE; - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + orig_hit_cnt = new_hit_cnt; - } else --stage_max; + for (i = 0; i < len; ++i) { - *(u16*)(out_buf + i) = orig; + u32 last_len = 0; - } + stage_cur_byte = i; - } + for (j = 0; j < MIN(a_extras_cnt, USE_AUTO_EXTRAS); ++j) { - new_hit_cnt = queued_paths + unique_crashes; + /* See the comment in the earlier code; extras are sorted by size. */ - stage_finds[STAGE_ARITH16] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_ARITH16] += stage_max; + if (a_extras[j].len > len - i || + !memcmp(a_extras[j].data, out_buf + i, a_extras[j].len) || + !memchr(eff_map + EFF_APOS(i), 1, + EFF_SPAN_ALEN(i, a_extras[j].len))) { + --stage_max; + continue; + } + last_len = a_extras[j].len; + memcpy(out_buf + i, a_extras[j].data, last_len); - /* 32-bit arithmetics, both endians. */ + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - if (len < 4) goto skip_arith; + ++stage_cur; - stage_name = "arith 32/8"; - stage_short = "arith32"; - stage_cur = 0; - stage_max = 4 * (len - 3) * ARITH_MAX; + } + /* Restore all the clobbered memory. */ + memcpy(out_buf + i, in_buf + i, last_len); + } - orig_hit_cnt = new_hit_cnt; + new_hit_cnt = queued_paths + unique_crashes; - for (i = 0; i < len - 3; ++i) { + stage_finds[STAGE_EXTRAS_AO] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_EXTRAS_AO] += stage_max; - u32 orig = *(u32*)(out_buf + i); +skip_extras: - /* Let's consult the effector map... */ + /* If we made this to here without jumping to havoc_stage or abandon_entry, + we're properly done with deterministic steps and can mark it as such + in the .state/ directory. */ - if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && - !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { - stage_max -= 4 * ARITH_MAX; - continue; - } + if (!queue_cur->passed_det) mark_as_det_done(queue_cur); - stage_cur_byte = i; + /**************** + * RANDOM HAVOC * + ****************/ - for (j = 1; j <= ARITH_MAX; ++j) { +havoc_stage: +pacemaker_fuzzing: - u32 r1 = orig ^ (orig + j), - r2 = orig ^ (orig - j), - r3 = orig ^ SWAP32(SWAP32(orig) + j), - r4 = orig ^ SWAP32(SWAP32(orig) - j); + stage_cur_byte = -1; - /* Little endian first. Same deal as with 16-bit: we only want to - try if the operation would have effect on more than two bytes. */ + /* The havoc stage mutation code is also invoked when splicing files; if the + splice_cycle variable is set, generate different descriptions and such. */ - stage_val_type = STAGE_VAL_LE; + if (!splice_cycle) { - if ((orig & 0xffff) + j > 0xffff && !could_be_bitflip(r1)) { + stage_name = "MOpt-havoc"; + stage_short = "MOpt_havoc"; + stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * perf_score / + havoc_div / 100; - stage_cur_val = j; - *(u32*)(out_buf + i) = orig + j; + } else { - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + static u8 tmp[32]; - } else --stage_max; + perf_score = orig_perf; - if ((orig & 0xffff) < j && !could_be_bitflip(r2)) { + sprintf(tmp, "MOpt-splice %u", splice_cycle); + stage_name = tmp; + stage_short = "MOpt_splice"; + stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100; - stage_cur_val = -j; - *(u32*)(out_buf + i) = orig - j; + } - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur++; + s32 temp_len_puppet; + cur_ms_lv = get_cur_time(); - } else --stage_max; + { - /* Big endian next. */ + if (key_puppet == 1) { - stage_val_type = STAGE_VAL_BE; + if (unlikely(orig_hit_cnt_puppet == 0)) { - if ((SWAP32(orig) & 0xffff) + j > 0xffff && !could_be_bitflip(r3)) { + orig_hit_cnt_puppet = queued_paths + unique_crashes; + last_limit_time_start = get_cur_time(); + SPLICE_CYCLES_puppet = + (UR(SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) + + SPLICE_CYCLES_puppet_low); - stage_cur_val = j; - *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) + j); + } - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + } - } else --stage_max; + { - if ((SWAP32(orig) & 0xffff) < j && !could_be_bitflip(r4)) { +#ifndef IGNORE_FINDS + havoc_stage_puppet: +#endif - stage_cur_val = -j; - *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) - j); + stage_cur_byte = -1; - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + /* The havoc stage mutation code is also invoked when splicing files; if + the splice_cycle variable is set, generate different descriptions and + such. */ - } else --stage_max; + if (!splice_cycle) { - *(u32*)(out_buf + i) = orig; + stage_name = "MOpt avoc"; + stage_short = "MOpt_havoc"; + stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * + perf_score / havoc_div / 100; - } + } else { - } + static u8 tmp[32]; + perf_score = orig_perf; + sprintf(tmp, "MOpt splice %u", splice_cycle); + stage_name = tmp; + stage_short = "MOpt_splice"; + stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100; - new_hit_cnt = queued_paths + unique_crashes; + } - stage_finds[STAGE_ARITH32] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_ARITH32] += stage_max; + if (stage_max < HAVOC_MIN) stage_max = HAVOC_MIN; + temp_len = len; + orig_hit_cnt = queued_paths + unique_crashes; + havoc_queued = queued_paths; - skip_arith: + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { - /********************** - * INTERESTING VALUES * - **********************/ + u32 use_stacking = 1 << (1 + UR(HAVOC_STACK_POW2)); - stage_name = "interest 8/8"; - stage_short = "int8"; - stage_cur = 0; - stage_max = len * sizeof(interesting_8); + stage_cur_val = use_stacking; + for (i = 0; i < operator_num; ++i) { + stage_cycles_puppet_v3[swarm_now][i] = + stage_cycles_puppet_v2[swarm_now][i]; - stage_val_type = STAGE_VAL_LE; + } - orig_hit_cnt = new_hit_cnt; + for (i = 0; i < use_stacking; ++i) { - /* Setting 8-bit integers. */ + switch (select_algorithm()) { - for (i = 0; i < len; ++i) { + case 0: + /* Flip a single bit somewhere. Spooky! */ + FLIP_BIT(out_buf, UR(temp_len << 3)); + stage_cycles_puppet_v2[swarm_now][STAGE_FLIP1] += 1; + break; - u8 orig = out_buf[i]; + case 1: + if (temp_len < 2) break; + temp_len_puppet = UR(temp_len << 3); + FLIP_BIT(out_buf, temp_len_puppet); + FLIP_BIT(out_buf, temp_len_puppet + 1); + stage_cycles_puppet_v2[swarm_now][STAGE_FLIP2] += 1; + break; - /* Let's consult the effector map... */ + case 2: + if (temp_len < 2) break; + temp_len_puppet = UR(temp_len << 3); + FLIP_BIT(out_buf, temp_len_puppet); + FLIP_BIT(out_buf, temp_len_puppet + 1); + FLIP_BIT(out_buf, temp_len_puppet + 2); + FLIP_BIT(out_buf, temp_len_puppet + 3); + stage_cycles_puppet_v2[swarm_now][STAGE_FLIP4] += 1; + break; - if (!eff_map[EFF_APOS(i)]) { - stage_max -= sizeof(interesting_8); - continue; - } + case 3: + if (temp_len < 4) break; + out_buf[UR(temp_len)] ^= 0xFF; + stage_cycles_puppet_v2[swarm_now][STAGE_FLIP8] += 1; + break; - stage_cur_byte = i; + case 4: + if (temp_len < 8) break; + *(u16*)(out_buf + UR(temp_len - 1)) ^= 0xFFFF; + stage_cycles_puppet_v2[swarm_now][STAGE_FLIP16] += 1; + break; - for (j = 0; j < sizeof(interesting_8); ++j) { + case 5: + if (temp_len < 8) break; + *(u32*)(out_buf + UR(temp_len - 3)) ^= 0xFFFFFFFF; + stage_cycles_puppet_v2[swarm_now][STAGE_FLIP32] += 1; + break; - /* Skip if the value could be a product of bitflips or arithmetics. */ + case 6: + out_buf[UR(temp_len)] -= 1 + UR(ARITH_MAX); + out_buf[UR(temp_len)] += 1 + UR(ARITH_MAX); + stage_cycles_puppet_v2[swarm_now][STAGE_ARITH8] += 1; + break; - if (could_be_bitflip(orig ^ (u8)interesting_8[j]) || - could_be_arith(orig, (u8)interesting_8[j], 1)) { - --stage_max; - continue; - } + case 7: + /* Randomly subtract from word, random endian. */ + if (temp_len < 8) break; + if (UR(2)) { - stage_cur_val = interesting_8[j]; - out_buf[i] = interesting_8[j]; + u32 pos = UR(temp_len - 1); + *(u16*)(out_buf + pos) -= 1 + UR(ARITH_MAX); - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + } else { - out_buf[i] = orig; - ++stage_cur; + u32 pos = UR(temp_len - 1); + u16 num = 1 + UR(ARITH_MAX); + *(u16*)(out_buf + pos) = + SWAP16(SWAP16(*(u16*)(out_buf + pos)) - num); - } + } - } + /* Randomly add to word, random endian. */ + if (UR(2)) { - new_hit_cnt = queued_paths + unique_crashes; + u32 pos = UR(temp_len - 1); + *(u16*)(out_buf + pos) += 1 + UR(ARITH_MAX); - stage_finds[STAGE_INTEREST8] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_INTEREST8] += stage_max; + } else { + u32 pos = UR(temp_len - 1); + u16 num = 1 + UR(ARITH_MAX); + *(u16*)(out_buf + pos) = + SWAP16(SWAP16(*(u16*)(out_buf + pos)) + num); + } + stage_cycles_puppet_v2[swarm_now][STAGE_ARITH16] += 1; + break; - /* Setting 16-bit integers, both endians. */ + case 8: + /* Randomly subtract from dword, random endian. */ + if (temp_len < 8) break; + if (UR(2)) { - if (no_arith || len < 2) goto skip_interest; + u32 pos = UR(temp_len - 3); + *(u32*)(out_buf + pos) -= 1 + UR(ARITH_MAX); - stage_name = "interest 16/8"; - stage_short = "int16"; - stage_cur = 0; - stage_max = 2 * (len - 1) * (sizeof(interesting_16) >> 1); + } else { + u32 pos = UR(temp_len - 3); + u32 num = 1 + UR(ARITH_MAX); + *(u32*)(out_buf + pos) = + SWAP32(SWAP32(*(u32*)(out_buf + pos)) - num); + } - orig_hit_cnt = new_hit_cnt; + /* Randomly add to dword, random endian. */ + // if (temp_len < 4) break; + if (UR(2)) { - for (i = 0; i < len - 1; ++i) { + u32 pos = UR(temp_len - 3); + *(u32*)(out_buf + pos) += 1 + UR(ARITH_MAX); - u16 orig = *(u16*)(out_buf + i); + } else { - /* Let's consult the effector map... */ + u32 pos = UR(temp_len - 3); + u32 num = 1 + UR(ARITH_MAX); + *(u32*)(out_buf + pos) = + SWAP32(SWAP32(*(u32*)(out_buf + pos)) + num); - if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { - stage_max -= sizeof(interesting_16); - continue; - } + } - stage_cur_byte = i; + stage_cycles_puppet_v2[swarm_now][STAGE_ARITH32] += 1; + break; - for (j = 0; j < sizeof(interesting_16) / 2; ++j) { + case 9: + /* Set byte to interesting value. */ + if (temp_len < 4) break; + out_buf[UR(temp_len)] = interesting_8[UR(sizeof(interesting_8))]; + stage_cycles_puppet_v2[swarm_now][STAGE_INTEREST8] += 1; + break; - stage_cur_val = interesting_16[j]; + case 10: + /* Set word to interesting value, randomly choosing endian. */ + if (temp_len < 8) break; + if (UR(2)) { - /* Skip if this could be a product of a bitflip, arithmetics, - or single-byte interesting value insertion. */ + *(u16*)(out_buf + UR(temp_len - 1)) = + interesting_16[UR(sizeof(interesting_16) >> 1)]; - if (!could_be_bitflip(orig ^ (u16)interesting_16[j]) && - !could_be_arith(orig, (u16)interesting_16[j], 2) && - !could_be_interest(orig, (u16)interesting_16[j], 2, 0)) { + } else { - stage_val_type = STAGE_VAL_LE; + *(u16*)(out_buf + UR(temp_len - 1)) = + SWAP16(interesting_16[UR(sizeof(interesting_16) >> 1)]); - *(u16*)(out_buf + i) = interesting_16[j]; + } - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + stage_cycles_puppet_v2[swarm_now][STAGE_INTEREST16] += 1; + break; - } else --stage_max; + case 11: + /* Set dword to interesting value, randomly choosing endian. */ - if ((u16)interesting_16[j] != SWAP16(interesting_16[j]) && - !could_be_bitflip(orig ^ SWAP16(interesting_16[j])) && - !could_be_arith(orig, SWAP16(interesting_16[j]), 2) && - !could_be_interest(orig, SWAP16(interesting_16[j]), 2, 1)) { + if (temp_len < 8) break; - stage_val_type = STAGE_VAL_BE; + if (UR(2)) { - *(u16*)(out_buf + i) = SWAP16(interesting_16[j]); - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + *(u32*)(out_buf + UR(temp_len - 3)) = + interesting_32[UR(sizeof(interesting_32) >> 2)]; - } else --stage_max; + } else { - } + *(u32*)(out_buf + UR(temp_len - 3)) = + SWAP32(interesting_32[UR(sizeof(interesting_32) >> 2)]); - *(u16*)(out_buf + i) = orig; + } - } + stage_cycles_puppet_v2[swarm_now][STAGE_INTEREST32] += 1; + break; - new_hit_cnt = queued_paths + unique_crashes; + case 12: - stage_finds[STAGE_INTEREST16] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_INTEREST16] += stage_max; + /* Just set a random byte to a random value. Because, + why not. We use XOR with 1-255 to eliminate the + possibility of a no-op. */ + out_buf[UR(temp_len)] ^= 1 + UR(255); + stage_cycles_puppet_v2[swarm_now][STAGE_RANDOMBYTE] += 1; + break; + case 13: { + /* Delete bytes. We're making this a bit more likely + than insertion (the next option) in hopes of keeping + files reasonably small. */ + u32 del_from, del_len; - if (len < 4) goto skip_interest; + if (temp_len < 2) break; - /* Setting 32-bit integers, both endians. */ + /* Don't delete too much. */ - stage_name = "interest 32/8"; - stage_short = "int32"; - stage_cur = 0; - stage_max = 2 * (len - 3) * (sizeof(interesting_32) >> 2); + del_len = choose_block_len(temp_len - 1); + del_from = UR(temp_len - del_len + 1); - orig_hit_cnt = new_hit_cnt; + memmove(out_buf + del_from, out_buf + del_from + del_len, + temp_len - del_from - del_len); - for (i = 0; i < len - 3; ++i) { + temp_len -= del_len; + stage_cycles_puppet_v2[swarm_now][STAGE_DELETEBYTE] += 1; + break; - u32 orig = *(u32*)(out_buf + i); + } - /* Let's consult the effector map... */ + case 14: - if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && - !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { - stage_max -= sizeof(interesting_32) >> 1; - continue; - } + if (temp_len + HAVOC_BLK_XL < MAX_FILE) { - stage_cur_byte = i; + /* Clone bytes (75%) or insert a block of constant bytes (25%). + */ - for (j = 0; j < sizeof(interesting_32) / 4; ++j) { + u8 actually_clone = UR(4); + u32 clone_from, clone_to, clone_len; + u8* new_buf; - stage_cur_val = interesting_32[j]; + if (actually_clone) { - /* Skip if this could be a product of a bitflip, arithmetics, - or word interesting value insertion. */ + clone_len = choose_block_len(temp_len); + clone_from = UR(temp_len - clone_len + 1); - if (!could_be_bitflip(orig ^ (u32)interesting_32[j]) && - !could_be_arith(orig, interesting_32[j], 4) && - !could_be_interest(orig, interesting_32[j], 4, 0)) { + } else { - stage_val_type = STAGE_VAL_LE; + clone_len = choose_block_len(HAVOC_BLK_XL); + clone_from = 0; - *(u32*)(out_buf + i) = interesting_32[j]; + } - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + clone_to = UR(temp_len); - } else --stage_max; + new_buf = ck_alloc_nozero(temp_len + clone_len); - if ((u32)interesting_32[j] != SWAP32(interesting_32[j]) && - !could_be_bitflip(orig ^ SWAP32(interesting_32[j])) && - !could_be_arith(orig, SWAP32(interesting_32[j]), 4) && - !could_be_interest(orig, SWAP32(interesting_32[j]), 4, 1)) { + /* Head */ - stage_val_type = STAGE_VAL_BE; + memcpy(new_buf, out_buf, clone_to); - *(u32*)(out_buf + i) = SWAP32(interesting_32[j]); - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + /* Inserted part */ - } else --stage_max; + if (actually_clone) + memcpy(new_buf + clone_to, out_buf + clone_from, clone_len); + else + memset(new_buf + clone_to, + UR(2) ? UR(256) : out_buf[UR(temp_len)], clone_len); - } + /* Tail */ + memcpy(new_buf + clone_to + clone_len, out_buf + clone_to, + temp_len - clone_to); - *(u32*)(out_buf + i) = orig; + ck_free(out_buf); + out_buf = new_buf; + temp_len += clone_len; + stage_cycles_puppet_v2[swarm_now][STAGE_Clone75] += 1; - } + } - new_hit_cnt = queued_paths + unique_crashes; + break; - stage_finds[STAGE_INTEREST32] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_INTEREST32] += stage_max; + case 15: { + /* Overwrite bytes with a randomly selected chunk (75%) or fixed + bytes (25%). */ + u32 copy_from, copy_to, copy_len; + if (temp_len < 2) break; + copy_len = choose_block_len(temp_len - 1); - skip_interest: + copy_from = UR(temp_len - copy_len + 1); + copy_to = UR(temp_len - copy_len + 1); - /******************** - * DICTIONARY STUFF * - ********************/ + if (UR(4)) { - if (!extras_cnt) goto skip_user_extras; + if (copy_from != copy_to) + memmove(out_buf + copy_to, out_buf + copy_from, copy_len); - /* Overwrite with user-supplied extras. */ + } else - stage_name = "user extras (over)"; - stage_short = "ext_UO"; - stage_cur = 0; - stage_max = extras_cnt * len; + memset(out_buf + copy_to, + UR(2) ? UR(256) : out_buf[UR(temp_len)], copy_len); + stage_cycles_puppet_v2[swarm_now][STAGE_OverWrite75] += 1; + break; + } + } + } - stage_val_type = STAGE_VAL_NONE; + tmp_pilot_time += 1; - orig_hit_cnt = new_hit_cnt; + u64 temp_total_found = queued_paths + unique_crashes; - for (i = 0; i < len; ++i) { + if (common_fuzz_stuff(argv, out_buf, temp_len)) + goto abandon_entry_puppet; - u32 last_len = 0; + /* out_buf might have been mangled a bit, so let's restore it to its + original size and shape. */ - stage_cur_byte = i; + if (temp_len < len) out_buf = ck_realloc(out_buf, len); + temp_len = len; + memcpy(out_buf, in_buf, len); - /* Extras are sorted by size, from smallest to largest. This means - that we don't have to worry about restoring the buffer in - between writes at a particular offset determined by the outer - loop. */ + /* If we're finding new stuff, let's run for a bit longer, limits + permitting. */ - for (j = 0; j < extras_cnt; ++j) { + if (queued_paths != havoc_queued) { - /* Skip extras probabilistically if extras_cnt > MAX_DET_EXTRAS. Also - skip them if there's no room to insert the payload, if the token - is redundant, or if its entire span has no bytes set in the effector - map. */ + if (perf_score <= havoc_max_mult * 100) { - if ((extras_cnt > MAX_DET_EXTRAS && UR(extras_cnt) >= MAX_DET_EXTRAS) || - extras[j].len > len - i || - !memcmp(extras[j].data, out_buf + i, extras[j].len) || - !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, extras[j].len))) { + stage_max *= 2; + perf_score *= 2; - --stage_max; - continue; + } - } + havoc_queued = queued_paths; - last_len = extras[j].len; - memcpy(out_buf + i, extras[j].data, last_len); + } - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + if (unlikely(queued_paths + unique_crashes > temp_total_found)) { - ++stage_cur; + u64 temp_temp_puppet = + queued_paths + unique_crashes - temp_total_found; + total_puppet_find = total_puppet_find + temp_temp_puppet; + for (i = 0; i < 16; ++i) { - } + if (stage_cycles_puppet_v2[swarm_now][i] > + stage_cycles_puppet_v3[swarm_now][i]) + stage_finds_puppet_v2[swarm_now][i] += temp_temp_puppet; - /* Restore all the clobbered memory. */ - memcpy(out_buf + i, in_buf + i, last_len); + } - } + } - new_hit_cnt = queued_paths + unique_crashes; + } - stage_finds[STAGE_EXTRAS_UO] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_EXTRAS_UO] += stage_max; + new_hit_cnt = queued_paths + unique_crashes; - /* Insertion of user-supplied extras. */ + if (!splice_cycle) { - stage_name = "user extras (insert)"; - stage_short = "ext_UI"; - stage_cur = 0; - stage_max = extras_cnt * len; + stage_finds[STAGE_HAVOC] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_HAVOC] += stage_max; + } else { + stage_finds[STAGE_SPLICE] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_SPLICE] += stage_max; + } - orig_hit_cnt = new_hit_cnt; +#ifndef IGNORE_FINDS - ex_tmp = ck_alloc(len + MAX_DICT_FILE); + /************ + * SPLICING * + ************/ - for (i = 0; i <= len; ++i) { + retry_splicing_puppet: - stage_cur_byte = i; + if (use_splicing && splice_cycle++ < SPLICE_CYCLES_puppet && + queued_paths > 1 && queue_cur->len > 1) { - for (j = 0; j < extras_cnt; ++j) { + struct queue_entry* target; + u32 tid, split_at; + u8* new_buf; + s32 f_diff, l_diff; - if (len + extras[j].len > MAX_FILE) { - --stage_max; - continue; - } + /* First of all, if we've modified in_buf for havoc, let's clean that + up... */ - /* Insert token */ - memcpy(ex_tmp + i, extras[j].data, extras[j].len); + if (in_buf != orig_in) { - /* Copy tail */ - memcpy(ex_tmp + i + extras[j].len, out_buf + i, len - i); + ck_free(in_buf); + in_buf = orig_in; + len = queue_cur->len; - if (common_fuzz_stuff(argv, ex_tmp, len + extras[j].len)) { - ck_free(ex_tmp); - goto abandon_entry; - } + } - ++stage_cur; + /* Pick a random queue entry and seek to it. Don't splice with yourself. + */ - } + do { - /* Copy head */ - ex_tmp[i] = out_buf[i]; + tid = UR(queued_paths); - } + } while (tid == current_entry); - ck_free(ex_tmp); + splicing_with = tid; + target = queue; - new_hit_cnt = queued_paths + unique_crashes; + while (tid >= 100) { - stage_finds[STAGE_EXTRAS_UI] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_EXTRAS_UI] += stage_max; + target = target->next_100; + tid -= 100; - skip_user_extras: + } - if (!a_extras_cnt) goto skip_extras; + while (tid--) + target = target->next; - stage_name = "auto extras (over)"; - stage_short = "ext_AO"; - stage_cur = 0; - stage_max = MIN(a_extras_cnt, USE_AUTO_EXTRAS) * len; + /* Make sure that the target has a reasonable length. */ + while (target && (target->len < 2 || target == queue_cur)) { - stage_val_type = STAGE_VAL_NONE; + target = target->next; + ++splicing_with; - orig_hit_cnt = new_hit_cnt; + } - for (i = 0; i < len; ++i) { + if (!target) goto retry_splicing_puppet; - u32 last_len = 0; + /* Read the testcase into a new buffer. */ - stage_cur_byte = i; + fd = open(target->fname, O_RDONLY); - for (j = 0; j < MIN(a_extras_cnt, USE_AUTO_EXTRAS); ++j) { + if (fd < 0) PFATAL("Unable to open '%s'", target->fname); - /* See the comment in the earlier code; extras are sorted by size. */ + new_buf = ck_alloc_nozero(target->len); - if (a_extras[j].len > len - i || - !memcmp(a_extras[j].data, out_buf + i, a_extras[j].len) || - !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, a_extras[j].len))) { + ck_read(fd, new_buf, target->len, target->fname); - --stage_max; - continue; + close(fd); - } + /* Find a suitable splicin g location, somewhere between the first and + the last differing byte. Bail out if the difference is just a single + byte or so. */ - last_len = a_extras[j].len; - memcpy(out_buf + i, a_extras[j].data, last_len); + locate_diffs(in_buf, new_buf, MIN(len, target->len), &f_diff, &l_diff); - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) { - ++stage_cur; + ck_free(new_buf); + goto retry_splicing_puppet; - } + } - /* Restore all the clobbered memory. */ - memcpy(out_buf + i, in_buf + i, last_len); + /* Split somewhere between the first and last differing byte. */ - } + split_at = f_diff + UR(l_diff - f_diff); - new_hit_cnt = queued_paths + unique_crashes; + /* Do the thing. */ - stage_finds[STAGE_EXTRAS_AO] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_EXTRAS_AO] += stage_max; + len = target->len; + memcpy(new_buf, in_buf, split_at); + in_buf = new_buf; + ck_free(out_buf); + out_buf = ck_alloc_nozero(len); + memcpy(out_buf, in_buf, len); + goto havoc_stage_puppet; - skip_extras: + } - /* If we made this to here without jumping to havoc_stage or abandon_entry, - we're properly done with deterministic steps and can mark it as such - in the .state/ directory. */ +#endif /* !IGNORE_FINDS */ - if (!queue_cur->passed_det) mark_as_det_done(queue_cur); + ret_val = 0; - /**************** - * RANDOM HAVOC * - ****************/ + abandon_entry: + abandon_entry_puppet: - havoc_stage: - pacemaker_fuzzing: + if (splice_cycle >= SPLICE_CYCLES_puppet) + SPLICE_CYCLES_puppet = + (UR(SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) + + SPLICE_CYCLES_puppet_low); + splicing_with = -1; - stage_cur_byte = -1; + /* Update pending_not_fuzzed count if we made it through the calibration + cycle and have not seen this entry before. */ - /* The havoc stage mutation code is also invoked when splicing files; if the - splice_cycle variable is set, generate different descriptions and such. */ + // if (!stop_soon && !queue_cur->cal_failed && !queue_cur->was_fuzzed) { - if (!splice_cycle) { + // queue_cur->was_fuzzed = 1; + // --pending_not_fuzzed; + // if (queue_cur->favored) --pending_favored; + // } - stage_name = "MOpt-havoc"; - stage_short = "MOpt_havoc"; - stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * - perf_score / havoc_div / 100; + munmap(orig_in, queue_cur->len); - } - else { + if (in_buf != orig_in) ck_free(in_buf); + ck_free(out_buf); + ck_free(eff_map); - static u8 tmp[32]; + if (key_puppet == 1) { - perf_score = orig_perf; + if (unlikely(queued_paths + unique_crashes > + ((queued_paths + unique_crashes) * limit_time_bound + + orig_hit_cnt_puppet))) { - sprintf(tmp, "MOpt-splice %u", splice_cycle); - stage_name = tmp; - stage_short = "MOpt_splice"; - stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100; + key_puppet = 0; + cur_ms_lv = get_cur_time(); + new_hit_cnt = queued_paths + unique_crashes; + orig_hit_cnt_puppet = 0; + last_limit_time_start = 0; - } + } - s32 temp_len_puppet; - cur_ms_lv = get_cur_time(); + } - { + if (unlikely(tmp_pilot_time > period_pilot)) { + total_pacemaker_time += tmp_pilot_time; + new_hit_cnt = queued_paths + unique_crashes; + swarm_fitness[swarm_now] = + (double)(total_puppet_find - temp_puppet_find) / + ((double)(tmp_pilot_time) / period_pilot_tmp); + tmp_pilot_time = 0; + temp_puppet_find = total_puppet_find; - if (key_puppet == 1) - { - if (unlikely(orig_hit_cnt_puppet == 0)) - { - orig_hit_cnt_puppet = queued_paths + unique_crashes; - last_limit_time_start = get_cur_time(); - SPLICE_CYCLES_puppet = (UR(SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) + SPLICE_CYCLES_puppet_low); - } - } + u64 temp_stage_finds_puppet = 0; + for (i = 0; i < operator_num; ++i) { + double temp_eff = 0.0; - { -#ifndef IGNORE_FINDS - havoc_stage_puppet: -#endif + if (stage_cycles_puppet_v2[swarm_now][i] > + stage_cycles_puppet[swarm_now][i]) + temp_eff = (double)(stage_finds_puppet_v2[swarm_now][i] - + stage_finds_puppet[swarm_now][i]) / + (double)(stage_cycles_puppet_v2[swarm_now][i] - + stage_cycles_puppet[swarm_now][i]); - stage_cur_byte = -1; + if (eff_best[swarm_now][i] < temp_eff) { - /* The havoc stage mutation code is also invoked when splicing files; if the - splice_cycle variable is set, generate different descriptions and such. */ + eff_best[swarm_now][i] = temp_eff; + L_best[swarm_now][i] = x_now[swarm_now][i]; - if (!splice_cycle) { + } - stage_name = "MOpt avoc"; - stage_short = "MOpt_havoc"; - stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * - perf_score / havoc_div / 100; + stage_finds_puppet[swarm_now][i] = + stage_finds_puppet_v2[swarm_now][i]; + stage_cycles_puppet[swarm_now][i] = + stage_cycles_puppet_v2[swarm_now][i]; + temp_stage_finds_puppet += stage_finds_puppet[swarm_now][i]; - } - else { - static u8 tmp[32]; - perf_score = orig_perf; - sprintf(tmp, "MOpt splice %u", splice_cycle); - stage_name = tmp; - stage_short = "MOpt_splice"; - stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100; - } + } + swarm_now = swarm_now + 1; + if (swarm_now == swarm_num) { + key_module = 1; + for (i = 0; i < operator_num; ++i) { - if (stage_max < HAVOC_MIN) stage_max = HAVOC_MIN; - - temp_len = len; - - orig_hit_cnt = queued_paths + unique_crashes; - - havoc_queued = queued_paths; - - - - for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { - - u32 use_stacking = 1 << (1 + UR(HAVOC_STACK_POW2)); - - stage_cur_val = use_stacking; - - - for (i = 0; i < operator_num; ++i) - { - stage_cycles_puppet_v3[swarm_now][i] = stage_cycles_puppet_v2[swarm_now][i]; - } - - - for (i = 0; i < use_stacking; ++i) { - - switch (select_algorithm()) { - - case 0: - /* Flip a single bit somewhere. Spooky! */ - FLIP_BIT(out_buf, UR(temp_len << 3)); - stage_cycles_puppet_v2[swarm_now][STAGE_FLIP1] += 1; - break; - - - case 1: - if (temp_len < 2) break; - temp_len_puppet = UR(temp_len << 3); - FLIP_BIT(out_buf, temp_len_puppet); - FLIP_BIT(out_buf, temp_len_puppet + 1); - stage_cycles_puppet_v2[swarm_now][STAGE_FLIP2] += 1; - break; - - case 2: - if (temp_len < 2) break; - temp_len_puppet = UR(temp_len << 3); - FLIP_BIT(out_buf, temp_len_puppet); - FLIP_BIT(out_buf, temp_len_puppet + 1); - FLIP_BIT(out_buf, temp_len_puppet + 2); - FLIP_BIT(out_buf, temp_len_puppet + 3); - stage_cycles_puppet_v2[swarm_now][STAGE_FLIP4] += 1; - break; - - case 3: - if (temp_len < 4) break; - out_buf[UR(temp_len)] ^= 0xFF; - stage_cycles_puppet_v2[swarm_now][STAGE_FLIP8] += 1; - break; - - case 4: - if (temp_len < 8) break; - *(u16*)(out_buf + UR(temp_len - 1)) ^= 0xFFFF; - stage_cycles_puppet_v2[swarm_now][STAGE_FLIP16] += 1; - break; - - case 5: - if (temp_len < 8) break; - *(u32*)(out_buf + UR(temp_len - 3)) ^= 0xFFFFFFFF; - stage_cycles_puppet_v2[swarm_now][STAGE_FLIP32] += 1; - break; - - case 6: - out_buf[UR(temp_len)] -= 1 + UR(ARITH_MAX); - out_buf[UR(temp_len)] += 1 + UR(ARITH_MAX); - stage_cycles_puppet_v2[swarm_now][STAGE_ARITH8] += 1; - break; - - case 7: - /* Randomly subtract from word, random endian. */ - if (temp_len < 8) break; - if (UR(2)) { - u32 pos = UR(temp_len - 1); - *(u16*)(out_buf + pos) -= 1 + UR(ARITH_MAX); - } - else { - u32 pos = UR(temp_len - 1); - u16 num = 1 + UR(ARITH_MAX); - *(u16*)(out_buf + pos) = - SWAP16(SWAP16(*(u16*)(out_buf + pos)) - num); - } - /* Randomly add to word, random endian. */ - if (UR(2)) { - u32 pos = UR(temp_len - 1); - *(u16*)(out_buf + pos) += 1 + UR(ARITH_MAX); - } - else { - u32 pos = UR(temp_len - 1); - u16 num = 1 + UR(ARITH_MAX); - *(u16*)(out_buf + pos) = - SWAP16(SWAP16(*(u16*)(out_buf + pos)) + num); - } - stage_cycles_puppet_v2[swarm_now][STAGE_ARITH16] += 1; - break; - - - case 8: - /* Randomly subtract from dword, random endian. */ - if (temp_len < 8) break; - if (UR(2)) { - u32 pos = UR(temp_len - 3); - *(u32*)(out_buf + pos) -= 1 + UR(ARITH_MAX); - } - else { - u32 pos = UR(temp_len - 3); - u32 num = 1 + UR(ARITH_MAX); - *(u32*)(out_buf + pos) = - SWAP32(SWAP32(*(u32*)(out_buf + pos)) - num); - } - /* Randomly add to dword, random endian. */ - //if (temp_len < 4) break; - if (UR(2)) { - u32 pos = UR(temp_len - 3); - *(u32*)(out_buf + pos) += 1 + UR(ARITH_MAX); - } - else { - u32 pos = UR(temp_len - 3); - u32 num = 1 + UR(ARITH_MAX); - *(u32*)(out_buf + pos) = - SWAP32(SWAP32(*(u32*)(out_buf + pos)) + num); - } - stage_cycles_puppet_v2[swarm_now][STAGE_ARITH32] += 1; - break; - - - case 9: - /* Set byte to interesting value. */ - if (temp_len < 4) break; - out_buf[UR(temp_len)] = interesting_8[UR(sizeof(interesting_8))]; - stage_cycles_puppet_v2[swarm_now][STAGE_INTEREST8] += 1; - break; - - case 10: - /* Set word to interesting value, randomly choosing endian. */ - if (temp_len < 8) break; - if (UR(2)) { - *(u16*)(out_buf + UR(temp_len - 1)) = - interesting_16[UR(sizeof(interesting_16) >> 1)]; - } - else { - *(u16*)(out_buf + UR(temp_len - 1)) = SWAP16( - interesting_16[UR(sizeof(interesting_16) >> 1)]); - } - stage_cycles_puppet_v2[swarm_now][STAGE_INTEREST16] += 1; - break; - - - case 11: - /* Set dword to interesting value, randomly choosing endian. */ - - if (temp_len < 8) break; - - if (UR(2)) { - *(u32*)(out_buf + UR(temp_len - 3)) = - interesting_32[UR(sizeof(interesting_32) >> 2)]; - } - else { - *(u32*)(out_buf + UR(temp_len - 3)) = SWAP32( - interesting_32[UR(sizeof(interesting_32) >> 2)]); - } - stage_cycles_puppet_v2[swarm_now][STAGE_INTEREST32] += 1; - break; + core_operator_cycles_puppet_v2[i] = core_operator_cycles_puppet[i]; + core_operator_cycles_puppet_v3[i] = core_operator_cycles_puppet[i]; + core_operator_finds_puppet_v2[i] = core_operator_finds_puppet[i]; + } - case 12: + double swarm_eff = 0.0; + swarm_now = 0; + for (i = 0; i < swarm_num; ++i) { - /* Just set a random byte to a random value. Because, - why not. We use XOR with 1-255 to eliminate the - possibility of a no-op. */ + if (swarm_fitness[i] > swarm_eff) { - out_buf[UR(temp_len)] ^= 1 + UR(255); - stage_cycles_puppet_v2[swarm_now][STAGE_RANDOMBYTE] += 1; - break; + swarm_eff = swarm_fitness[i]; + swarm_now = i; + } + } - case 13: { + if (swarm_now < 0 || swarm_now > swarm_num - 1) + PFATAL("swarm_now error number %d", swarm_now); - /* Delete bytes. We're making this a bit more likely - than insertion (the next option) in hopes of keeping - files reasonably small. */ + } - u32 del_from, del_len; + } - if (temp_len < 2) break; + return ret_val; - /* Don't delete too much. */ + } - del_len = choose_block_len(temp_len - 1); + } - del_from = UR(temp_len - del_len + 1); +#undef FLIP_BIT - memmove(out_buf + del_from, out_buf + del_from + del_len, - temp_len - del_from - del_len); +} - temp_len -= del_len; - stage_cycles_puppet_v2[swarm_now][STAGE_DELETEBYTE] += 1; - break; +u8 core_fuzzing(char** argv) { - } + int i; - case 14: + if (swarm_num == 1) { - if (temp_len + HAVOC_BLK_XL < MAX_FILE) { + key_module = 2; + return 0; - /* Clone bytes (75%) or insert a block of constant bytes (25%). */ + } - u8 actually_clone = UR(4); - u32 clone_from, clone_to, clone_len; - u8* new_buf; + s32 len, fd, temp_len, j; + u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0; + u64 havoc_queued, orig_hit_cnt, new_hit_cnt, cur_ms_lv; + u32 splice_cycle = 0, perf_score = 100, orig_perf, prev_cksum, eff_cnt = 1; - if (actually_clone) { + u8 ret_val = 1, doing_det = 0; - clone_len = choose_block_len(temp_len); - clone_from = UR(temp_len - clone_len + 1); + u8 a_collect[MAX_AUTO_EXTRA]; + u32 a_len = 0; - } - else { +#ifdef IGNORE_FINDS - clone_len = choose_block_len(HAVOC_BLK_XL); - clone_from = 0; + /* In IGNORE_FINDS mode, skip any entries that weren't in the + initial data set. */ - } + if (queue_cur->depth > 1) return 1; - clone_to = UR(temp_len); +#else - new_buf = ck_alloc_nozero(temp_len + clone_len); + if (pending_favored) { - /* Head */ + /* If we have any favored, non-fuzzed new arrivals in the queue, + possibly skip to them at the expense of already-fuzzed or non-favored + cases. */ - memcpy(new_buf, out_buf, clone_to); + if ((queue_cur->was_fuzzed || !queue_cur->favored) && + UR(100) < SKIP_TO_NEW_PROB) + return 1; - /* Inserted part */ + } else if (!dumb_mode && !queue_cur->favored && queued_paths > 10) { - if (actually_clone) - memcpy(new_buf + clone_to, out_buf + clone_from, clone_len); - else - memset(new_buf + clone_to, - UR(2) ? UR(256) : out_buf[UR(temp_len)], clone_len); + /* Otherwise, still possibly skip non-favored cases, albeit less often. + The odds of skipping stuff are higher for already-fuzzed inputs and + lower for never-fuzzed entries. */ - /* Tail */ - memcpy(new_buf + clone_to + clone_len, out_buf + clone_to, - temp_len - clone_to); + if (queue_cycle > 1 && !queue_cur->was_fuzzed) { - ck_free(out_buf); - out_buf = new_buf; - temp_len += clone_len; - stage_cycles_puppet_v2[swarm_now][STAGE_Clone75] += 1; - } + if (UR(100) < SKIP_NFAV_NEW_PROB) return 1; - break; + } else { - case 15: { + if (UR(100) < SKIP_NFAV_OLD_PROB) return 1; - /* Overwrite bytes with a randomly selected chunk (75%) or fixed - bytes (25%). */ + } - u32 copy_from, copy_to, copy_len; + } - if (temp_len < 2) break; +#endif /* ^IGNORE_FINDS */ - copy_len = choose_block_len(temp_len - 1); + if (not_on_tty) { - copy_from = UR(temp_len - copy_len + 1); - copy_to = UR(temp_len - copy_len + 1); + ACTF("Fuzzing test case #%u (%u total, %llu uniq crashes found)...", + current_entry, queued_paths, unique_crashes); + fflush(stdout); - if (UR(4)) { + } - if (copy_from != copy_to) - memmove(out_buf + copy_to, out_buf + copy_from, copy_len); + /* Map the test case into memory. */ - } - else memset(out_buf + copy_to, - UR(2) ? UR(256) : out_buf[UR(temp_len)], copy_len); - stage_cycles_puppet_v2[swarm_now][STAGE_OverWrite75] += 1; - break; + fd = open(queue_cur->fname, O_RDONLY); - } + if (fd < 0) PFATAL("Unable to open '%s'", queue_cur->fname); + len = queue_cur->len; - } + orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); - } + if (orig_in == MAP_FAILED) PFATAL("Unable to mmap '%s'", queue_cur->fname); + close(fd); - tmp_pilot_time += 1; + /* We could mmap() out_buf as MAP_PRIVATE, but we end up clobbering every + single byte anyway, so it wouldn't give us any performance or memory usage + benefits. */ + out_buf = ck_alloc_nozero(len); + subseq_tmouts = 0; + cur_depth = queue_cur->depth; - u64 temp_total_found = queued_paths + unique_crashes; + /******************************************* + * CALIBRATION (only if failed earlier on) * + *******************************************/ + if (queue_cur->cal_failed) { + u8 res = FAULT_TMOUT; + if (queue_cur->cal_failed < CAL_CHANCES) { - if (common_fuzz_stuff(argv, out_buf, temp_len)) - goto abandon_entry_puppet; + res = calibrate_case(argv, queue_cur, in_buf, queue_cycle - 1, 0); - /* out_buf might have been mangled a bit, so let's restore it to its - original size and shape. */ + if (res == FAULT_ERROR) FATAL("Unable to execute target application"); - if (temp_len < len) out_buf = ck_realloc(out_buf, len); - temp_len = len; - memcpy(out_buf, in_buf, len); + } - /* If we're finding new stuff, let's run for a bit longer, limits - permitting. */ + if (stop_soon || res != crash_mode) { - if (queued_paths != havoc_queued) { + ++cur_skipped_paths; + goto abandon_entry; - if (perf_score <= havoc_max_mult * 100) { - stage_max *= 2; - perf_score *= 2; - } + } - havoc_queued = queued_paths; + } - } + /************ + * TRIMMING * + ************/ - if (unlikely(queued_paths + unique_crashes > temp_total_found)) - { - u64 temp_temp_puppet = queued_paths + unique_crashes - temp_total_found; - total_puppet_find = total_puppet_find + temp_temp_puppet; - for (i = 0; i < 16; ++i) - { - if (stage_cycles_puppet_v2[swarm_now][i] > stage_cycles_puppet_v3[swarm_now][i]) - stage_finds_puppet_v2[swarm_now][i] += temp_temp_puppet; - } - } + if (!dumb_mode && !queue_cur->trim_done) { - } - new_hit_cnt = queued_paths + unique_crashes; + u8 res = trim_case(argv, queue_cur, in_buf); - if (!splice_cycle) { - stage_finds[STAGE_HAVOC] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_HAVOC] += stage_max; - } else { - stage_finds[STAGE_SPLICE] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_SPLICE] += stage_max; - } + if (res == FAULT_ERROR) FATAL("Unable to execute target application"); -#ifndef IGNORE_FINDS + if (stop_soon) { - /************ - * SPLICING * - ************/ + ++cur_skipped_paths; + goto abandon_entry; + } - retry_splicing_puppet: + /* Don't retry trimming, even if it failed. */ - if (use_splicing && splice_cycle++ < SPLICE_CYCLES_puppet && - queued_paths > 1 && queue_cur->len > 1) { + queue_cur->trim_done = 1; - struct queue_entry* target; - u32 tid, split_at; - u8* new_buf; - s32 f_diff, l_diff; + len = queue_cur->len; - /* First of all, if we've modified in_buf for havoc, let's clean that - up... */ + } - if (in_buf != orig_in) { - ck_free(in_buf); - in_buf = orig_in; - len = queue_cur->len; - } + memcpy(out_buf, in_buf, len); - /* Pick a random queue entry and seek to it. Don't splice with yourself. */ + /********************* + * PERFORMANCE SCORE * + *********************/ - do { tid = UR(queued_paths); } while (tid == current_entry); + orig_perf = perf_score = calculate_score(queue_cur); - splicing_with = tid; - target = queue; + /* Skip right away if -d is given, if we have done deterministic fuzzing on + this entry ourselves (was_fuzzed), or if it has gone through deterministic + testing in earlier, resumed runs (passed_det). */ - while (tid >= 100) { target = target->next_100; tid -= 100; } - while (tid--) target = target->next; + if (skip_deterministic || queue_cur->was_fuzzed || queue_cur->passed_det) + goto havoc_stage; - /* Make sure that the target has a reasonable length. */ + /* Skip deterministic fuzzing if exec path checksum puts this out of scope + for this master instance. */ - while (target && (target->len < 2 || target == queue_cur)) { - target = target->next; - ++splicing_with; - } + if (master_max && (queue_cur->exec_cksum % master_max) != master_id - 1) + goto havoc_stage; - if (!target) goto retry_splicing_puppet; + cur_ms_lv = get_cur_time(); + if (!(key_puppet == 0 && ((cur_ms_lv - last_path_time < limit_time_puppet) || + (last_crash_time != 0 && + cur_ms_lv - last_crash_time < limit_time_puppet) || + last_path_time == 0))) { - /* Read the testcase into a new buffer. */ + key_puppet = 1; + goto pacemaker_fuzzing; - fd = open(target->fname, O_RDONLY); + } - if (fd < 0) PFATAL("Unable to open '%s'", target->fname); + doing_det = 1; - new_buf = ck_alloc_nozero(target->len); + /********************************************* + * SIMPLE BITFLIP (+dictionary construction) * + *********************************************/ - ck_read(fd, new_buf, target->len, target->fname); +#define FLIP_BIT(_ar, _b) \ + do { \ + \ + u8* _arf = (u8*)(_ar); \ + u32 _bf = (_b); \ + _arf[(_bf) >> 3] ^= (128 >> ((_bf)&7)); \ + \ + } while (0) - close(fd); + /* Single walking bit. */ - /* Find a suitable splicin g location, somewhere between the first and - the last differing byte. Bail out if the difference is just a single - byte or so. */ + stage_short = "flip1"; + stage_max = len << 3; + stage_name = "bitflip 1/1"; - locate_diffs(in_buf, new_buf, MIN(len, target->len), &f_diff, &l_diff); + stage_val_type = STAGE_VAL_NONE; - if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) { - ck_free(new_buf); - goto retry_splicing_puppet; - } + orig_hit_cnt = queued_paths + unique_crashes; - /* Split somewhere between the first and last differing byte. */ + prev_cksum = queue_cur->exec_cksum; - split_at = f_diff + UR(l_diff - f_diff); + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { - /* Do the thing. */ + stage_cur_byte = stage_cur >> 3; - len = target->len; - memcpy(new_buf, in_buf, split_at); - in_buf = new_buf; - ck_free(out_buf); - out_buf = ck_alloc_nozero(len); - memcpy(out_buf, in_buf, len); - goto havoc_stage_puppet; + FLIP_BIT(out_buf, stage_cur); - } + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; -#endif /* !IGNORE_FINDS */ + FLIP_BIT(out_buf, stage_cur); - ret_val = 0; + /* While flipping the least significant bit in every byte, pull of an extra + trick to detect possible syntax tokens. In essence, the idea is that if + you have a binary blob like this: - abandon_entry: - abandon_entry_puppet: + xxxxxxxxIHDRxxxxxxxx - if (splice_cycle >= SPLICE_CYCLES_puppet) - SPLICE_CYCLES_puppet = (UR(SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) + SPLICE_CYCLES_puppet_low); + ...and changing the leading and trailing bytes causes variable or no + changes in program flow, but touching any character in the "IHDR" string + always produces the same, distinctive path, it's highly likely that + "IHDR" is an atomically-checked magic value of special significance to + the fuzzed format. + We do this here, rather than as a separate stage, because it's a nice + way to keep the operation approximately "free" (i.e., no extra execs). - splicing_with = -1; + Empirically, performing the check when flipping the least significant bit + is advantageous, compared to doing it at the time of more disruptive + changes, where the program flow may be affected in more violent ways. - /* Update pending_not_fuzzed count if we made it through the calibration - cycle and have not seen this entry before. */ + The caveat is that we won't generate dictionaries in the -d mode or -S + mode - but that's probably a fair trade-off. - // if (!stop_soon && !queue_cur->cal_failed && !queue_cur->was_fuzzed) { - // queue_cur->was_fuzzed = 1; - // --pending_not_fuzzed; - // if (queue_cur->favored) --pending_favored; - // } + This won't work particularly well with paths that exhibit variable + behavior, but fails gracefully, so we'll carry out the checks anyway. - munmap(orig_in, queue_cur->len); + */ - if (in_buf != orig_in) ck_free(in_buf); - ck_free(out_buf); - ck_free(eff_map); + if (!dumb_mode && (stage_cur & 7) == 7) { + u32 cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST); - if (key_puppet == 1) { - if (unlikely(queued_paths + unique_crashes > ((queued_paths + unique_crashes)*limit_time_bound + orig_hit_cnt_puppet))) { - key_puppet = 0; - cur_ms_lv = get_cur_time(); - new_hit_cnt = queued_paths + unique_crashes; - orig_hit_cnt_puppet = 0; - last_limit_time_start = 0; - } - } + if (stage_cur == stage_max - 1 && cksum == prev_cksum) { + /* If at end of file and we are still collecting a string, grab the + final character and force output. */ - if (unlikely(tmp_pilot_time > period_pilot)) { - total_pacemaker_time += tmp_pilot_time; - new_hit_cnt = queued_paths + unique_crashes; - swarm_fitness[swarm_now] = (double)(total_puppet_find - temp_puppet_find) / ((double)(tmp_pilot_time)/ period_pilot_tmp); - tmp_pilot_time = 0; - temp_puppet_find = total_puppet_find; + if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3]; + ++a_len; - u64 temp_stage_finds_puppet = 0; - for (i = 0; i < operator_num; ++i) { - double temp_eff = 0.0; + if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA) + maybe_add_auto(a_collect, a_len); - if (stage_cycles_puppet_v2[swarm_now][i] > stage_cycles_puppet[swarm_now][i]) - temp_eff = (double)(stage_finds_puppet_v2[swarm_now][i] - stage_finds_puppet[swarm_now][i]) / - (double)(stage_cycles_puppet_v2[swarm_now][i] - stage_cycles_puppet[swarm_now][i]); + } else if (cksum != prev_cksum) { - if (eff_best[swarm_now][i] < temp_eff) { - eff_best[swarm_now][i] = temp_eff; - L_best[swarm_now][i] = x_now[swarm_now][i]; - } + /* Otherwise, if the checksum has changed, see if we have something + worthwhile queued up, and collect that if the answer is yes. */ - stage_finds_puppet[swarm_now][i] = stage_finds_puppet_v2[swarm_now][i]; - stage_cycles_puppet[swarm_now][i] = stage_cycles_puppet_v2[swarm_now][i]; - temp_stage_finds_puppet += stage_finds_puppet[swarm_now][i]; - } + if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA) + maybe_add_auto(a_collect, a_len); - swarm_now = swarm_now + 1; - if (swarm_now == swarm_num) { - key_module = 1; - for (i = 0; i < operator_num; ++i) { - core_operator_cycles_puppet_v2[i] = core_operator_cycles_puppet[i]; - core_operator_cycles_puppet_v3[i] = core_operator_cycles_puppet[i]; - core_operator_finds_puppet_v2[i] = core_operator_finds_puppet[i]; - } + a_len = 0; + prev_cksum = cksum; - double swarm_eff = 0.0; - swarm_now = 0; - for (i = 0; i < swarm_num; ++i) { - if (swarm_fitness[i] > swarm_eff) { - swarm_eff = swarm_fitness[i]; - swarm_now = i; - } - } - if (swarm_now <0 || swarm_now > swarm_num - 1) - PFATAL("swarm_now error number %d", swarm_now); + } - } - } - return ret_val; - } - } + /* Continue collecting string, but only if the bit flip actually made + any difference - we don't want no-op tokens. */ + if (cksum != queue_cur->exec_cksum) { -#undef FLIP_BIT + if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3]; + ++a_len; -} + } + } -u8 core_fuzzing(char** argv) { - int i; + } - if (swarm_num == 1) { - key_module = 2; - return 0; - } + new_hit_cnt = queued_paths + unique_crashes; + stage_finds[STAGE_FLIP1] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_FLIP1] += stage_max; - s32 len, fd, temp_len, j; - u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0; - u64 havoc_queued, orig_hit_cnt, new_hit_cnt, cur_ms_lv; - u32 splice_cycle = 0, perf_score = 100, orig_perf, prev_cksum, eff_cnt = 1; + /* Two walking bits. */ - u8 ret_val = 1, doing_det = 0; + stage_name = "bitflip 2/1"; + stage_short = "flip2"; + stage_max = (len << 3) - 1; - u8 a_collect[MAX_AUTO_EXTRA]; - u32 a_len = 0; + orig_hit_cnt = new_hit_cnt; -#ifdef IGNORE_FINDS + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { - /* In IGNORE_FINDS mode, skip any entries that weren't in the - initial data set. */ + stage_cur_byte = stage_cur >> 3; - if (queue_cur->depth > 1) return 1; + FLIP_BIT(out_buf, stage_cur); + FLIP_BIT(out_buf, stage_cur + 1); -#else + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - if (pending_favored) { + FLIP_BIT(out_buf, stage_cur); + FLIP_BIT(out_buf, stage_cur + 1); - /* If we have any favored, non-fuzzed new arrivals in the queue, - possibly skip to them at the expense of already-fuzzed or non-favored - cases. */ + } - if ((queue_cur->was_fuzzed || !queue_cur->favored) && - UR(100) < SKIP_TO_NEW_PROB) return 1; + new_hit_cnt = queued_paths + unique_crashes; - } else if (!dumb_mode && !queue_cur->favored && queued_paths > 10) { + stage_finds[STAGE_FLIP2] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_FLIP2] += stage_max; - /* Otherwise, still possibly skip non-favored cases, albeit less often. - The odds of skipping stuff are higher for already-fuzzed inputs and - lower for never-fuzzed entries. */ + /* Four walking bits. */ - if (queue_cycle > 1 && !queue_cur->was_fuzzed) { + stage_name = "bitflip 4/1"; + stage_short = "flip4"; + stage_max = (len << 3) - 3; - if (UR(100) < SKIP_NFAV_NEW_PROB) return 1; + orig_hit_cnt = new_hit_cnt; - } else { + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { - if (UR(100) < SKIP_NFAV_OLD_PROB) return 1; + stage_cur_byte = stage_cur >> 3; - } + FLIP_BIT(out_buf, stage_cur); + FLIP_BIT(out_buf, stage_cur + 1); + FLIP_BIT(out_buf, stage_cur + 2); + FLIP_BIT(out_buf, stage_cur + 3); - } + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; -#endif /* ^IGNORE_FINDS */ + FLIP_BIT(out_buf, stage_cur); + FLIP_BIT(out_buf, stage_cur + 1); + FLIP_BIT(out_buf, stage_cur + 2); + FLIP_BIT(out_buf, stage_cur + 3); - if (not_on_tty) { - ACTF("Fuzzing test case #%u (%u total, %llu uniq crashes found)...", - current_entry, queued_paths, unique_crashes); - fflush(stdout); - } + } - /* Map the test case into memory. */ + new_hit_cnt = queued_paths + unique_crashes; - fd = open(queue_cur->fname, O_RDONLY); + stage_finds[STAGE_FLIP4] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_FLIP4] += stage_max; - if (fd < 0) PFATAL("Unable to open '%s'", queue_cur->fname); + /* Effector map setup. These macros calculate: - len = queue_cur->len; + EFF_APOS - position of a particular file offset in the map. + EFF_ALEN - length of a map with a particular number of bytes. + EFF_SPAN_ALEN - map span for a sequence of bytes. - orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); + */ - if (orig_in == MAP_FAILED) PFATAL("Unable to mmap '%s'", queue_cur->fname); +#define EFF_APOS(_p) ((_p) >> EFF_MAP_SCALE2) +#define EFF_REM(_x) ((_x) & ((1 << EFF_MAP_SCALE2) - 1)) +#define EFF_ALEN(_l) (EFF_APOS(_l) + !!EFF_REM(_l)) +#define EFF_SPAN_ALEN(_p, _l) (EFF_APOS((_p) + (_l)-1) - EFF_APOS(_p) + 1) - close(fd); + /* Initialize effector map for the next step (see comments below). Always + flag first and last byte as doing something. */ - /* We could mmap() out_buf as MAP_PRIVATE, but we end up clobbering every - single byte anyway, so it wouldn't give us any performance or memory usage - benefits. */ + eff_map = ck_alloc(EFF_ALEN(len)); + eff_map[0] = 1; - out_buf = ck_alloc_nozero(len); + if (EFF_APOS(len - 1) != 0) { - subseq_tmouts = 0; + eff_map[EFF_APOS(len - 1)] = 1; + ++eff_cnt; - cur_depth = queue_cur->depth; + } - /******************************************* - * CALIBRATION (only if failed earlier on) * - *******************************************/ + /* Walking byte. */ - if (queue_cur->cal_failed) { + stage_name = "bitflip 8/8"; + stage_short = "flip8"; + stage_max = len; - u8 res = FAULT_TMOUT; + orig_hit_cnt = new_hit_cnt; - if (queue_cur->cal_failed < CAL_CHANCES) { + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { - res = calibrate_case(argv, queue_cur, in_buf, queue_cycle - 1, 0); + stage_cur_byte = stage_cur; - if (res == FAULT_ERROR) - FATAL("Unable to execute target application"); + out_buf[stage_cur] ^= 0xFF; - } + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - if (stop_soon || res != crash_mode) { - ++cur_skipped_paths; - goto abandon_entry; - } + /* We also use this stage to pull off a simple trick: we identify + bytes that seem to have no effect on the current execution path + even when fully flipped - and we skip them during more expensive + deterministic stages, such as arithmetics or known ints. */ - } + if (!eff_map[EFF_APOS(stage_cur)]) { - /************ - * TRIMMING * - ************/ + u32 cksum; - if (!dumb_mode && !queue_cur->trim_done) { + /* If in dumb mode or if the file is very short, just flag everything + without wasting time on checksums. */ - u8 res = trim_case(argv, queue_cur, in_buf); + if (!dumb_mode && len >= EFF_MIN_LEN) + cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST); + else + cksum = ~queue_cur->exec_cksum; - if (res == FAULT_ERROR) - FATAL("Unable to execute target application"); + if (cksum != queue_cur->exec_cksum) { - if (stop_soon) { - ++cur_skipped_paths; - goto abandon_entry; - } + eff_map[EFF_APOS(stage_cur)] = 1; + ++eff_cnt; - /* Don't retry trimming, even if it failed. */ + } - queue_cur->trim_done = 1; + } - len = queue_cur->len; + out_buf[stage_cur] ^= 0xFF; - } + } - memcpy(out_buf, in_buf, len); + /* If the effector map is more than EFF_MAX_PERC dense, just flag the + whole thing as worth fuzzing, since we wouldn't be saving much time + anyway. */ - /********************* - * PERFORMANCE SCORE * - *********************/ + if (eff_cnt != EFF_ALEN(len) && + eff_cnt * 100 / EFF_ALEN(len) > EFF_MAX_PERC) { - orig_perf = perf_score = calculate_score(queue_cur); + memset(eff_map, 1, EFF_ALEN(len)); - /* Skip right away if -d is given, if we have done deterministic fuzzing on - this entry ourselves (was_fuzzed), or if it has gone through deterministic - testing in earlier, resumed runs (passed_det). */ + blocks_eff_select += EFF_ALEN(len); - if (skip_deterministic || queue_cur->was_fuzzed || queue_cur->passed_det) - goto havoc_stage; + } else { - /* Skip deterministic fuzzing if exec path checksum puts this out of scope - for this master instance. */ + blocks_eff_select += eff_cnt; - if (master_max && (queue_cur->exec_cksum % master_max) != master_id - 1) - goto havoc_stage; + } + blocks_eff_total += EFF_ALEN(len); - cur_ms_lv = get_cur_time(); - if (!(key_puppet == 0 && ((cur_ms_lv - last_path_time < limit_time_puppet) || - (last_crash_time != 0 && cur_ms_lv - last_crash_time < limit_time_puppet) || last_path_time == 0))) - { - key_puppet = 1; - goto pacemaker_fuzzing; - } + new_hit_cnt = queued_paths + unique_crashes; - doing_det = 1; + stage_finds[STAGE_FLIP8] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_FLIP8] += stage_max; - /********************************************* - * SIMPLE BITFLIP (+dictionary construction) * - *********************************************/ + /* Two walking bytes. */ -#define FLIP_BIT(_ar, _b) do { \ - u8* _arf = (u8*)(_ar); \ - u32 _bf = (_b); \ - _arf[(_bf) >> 3] ^= (128 >> ((_bf) & 7)); \ - } while (0) + if (len < 2) goto skip_bitflip; - /* Single walking bit. */ + stage_name = "bitflip 16/8"; + stage_short = "flip16"; + stage_cur = 0; + stage_max = len - 1; - stage_short = "flip1"; - stage_max = len << 3; - stage_name = "bitflip 1/1"; + orig_hit_cnt = new_hit_cnt; - stage_val_type = STAGE_VAL_NONE; + for (i = 0; i < len - 1; ++i) { - orig_hit_cnt = queued_paths + unique_crashes; + /* Let's consult the effector map... */ - prev_cksum = queue_cur->exec_cksum; + if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { - for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { + --stage_max; + continue; - stage_cur_byte = stage_cur >> 3; + } - FLIP_BIT(out_buf, stage_cur); + stage_cur_byte = i; - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + *(u16*)(out_buf + i) ^= 0xFFFF; - FLIP_BIT(out_buf, stage_cur); + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - /* While flipping the least significant bit in every byte, pull of an extra - trick to detect possible syntax tokens. In essence, the idea is that if - you have a binary blob like this: + *(u16*)(out_buf + i) ^= 0xFFFF; - xxxxxxxxIHDRxxxxxxxx + } - ...and changing the leading and trailing bytes causes variable or no - changes in program flow, but touching any character in the "IHDR" string - always produces the same, distinctive path, it's highly likely that - "IHDR" is an atomically-checked magic value of special significance to - the fuzzed format. + new_hit_cnt = queued_paths + unique_crashes; - We do this here, rather than as a separate stage, because it's a nice - way to keep the operation approximately "free" (i.e., no extra execs). + stage_finds[STAGE_FLIP16] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_FLIP16] += stage_max; - Empirically, performing the check when flipping the least significant bit - is advantageous, compared to doing it at the time of more disruptive - changes, where the program flow may be affected in more violent ways. + if (len < 4) goto skip_bitflip; - The caveat is that we won't generate dictionaries in the -d mode or -S - mode - but that's probably a fair trade-off. + /* Four walking bytes. */ - This won't work particularly well with paths that exhibit variable - behavior, but fails gracefully, so we'll carry out the checks anyway. + stage_name = "bitflip 32/8"; + stage_short = "flip32"; + stage_cur = 0; + stage_max = len - 3; - */ + orig_hit_cnt = new_hit_cnt; - if (!dumb_mode && (stage_cur & 7) == 7) { + for (i = 0; i < len - 3; ++i) { - u32 cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST); + /* Let's consult the effector map... */ + if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && + !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { - if (stage_cur == stage_max - 1 && cksum == prev_cksum) { + --stage_max; + continue; - /* If at end of file and we are still collecting a string, grab the - final character and force output. */ + } - if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3]; - ++a_len; + stage_cur_byte = i; - if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA) - maybe_add_auto(a_collect, a_len); + *(u32*)(out_buf + i) ^= 0xFFFFFFFF; - } - else if (cksum != prev_cksum) { + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - /* Otherwise, if the checksum has changed, see if we have something - worthwhile queued up, and collect that if the answer is yes. */ + *(u32*)(out_buf + i) ^= 0xFFFFFFFF; - if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA) - maybe_add_auto(a_collect, a_len); + } - a_len = 0; - prev_cksum = cksum; + new_hit_cnt = queued_paths + unique_crashes; - } + stage_finds[STAGE_FLIP32] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_FLIP32] += stage_max; - /* Continue collecting string, but only if the bit flip actually made - any difference - we don't want no-op tokens. */ +skip_bitflip: - if (cksum != queue_cur->exec_cksum) { + if (no_arith) goto skip_arith; - if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3]; - ++a_len; + /********************** + * ARITHMETIC INC/DEC * + **********************/ - } + /* 8-bit arithmetics. */ - } + stage_name = "arith 8/8"; + stage_short = "arith8"; + stage_cur = 0; + stage_max = 2 * len * ARITH_MAX; - } + stage_val_type = STAGE_VAL_LE; - new_hit_cnt = queued_paths + unique_crashes; + orig_hit_cnt = new_hit_cnt; - stage_finds[STAGE_FLIP1] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_FLIP1] += stage_max; + for (i = 0; i < len; ++i) { + u8 orig = out_buf[i]; + /* Let's consult the effector map... */ - /* Two walking bits. */ + if (!eff_map[EFF_APOS(i)]) { - stage_name = "bitflip 2/1"; - stage_short = "flip2"; - stage_max = (len << 3) - 1; + stage_max -= 2 * ARITH_MAX; + continue; - orig_hit_cnt = new_hit_cnt; + } - for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { + stage_cur_byte = i; - stage_cur_byte = stage_cur >> 3; + for (j = 1; j <= ARITH_MAX; ++j) { - FLIP_BIT(out_buf, stage_cur); - FLIP_BIT(out_buf, stage_cur + 1); + u8 r = orig ^ (orig + j); - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + /* Do arithmetic operations only if the result couldn't be a product + of a bitflip. */ - FLIP_BIT(out_buf, stage_cur); - FLIP_BIT(out_buf, stage_cur + 1); + if (!could_be_bitflip(r)) { - } + stage_cur_val = j; + out_buf[i] = orig + j; - new_hit_cnt = queued_paths + unique_crashes; + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - stage_finds[STAGE_FLIP2] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_FLIP2] += stage_max; + } else + --stage_max; - /* Four walking bits. */ + r = orig ^ (orig - j); - stage_name = "bitflip 4/1"; - stage_short = "flip4"; - stage_max = (len << 3) - 3; + if (!could_be_bitflip(r)) { + stage_cur_val = -j; + out_buf[i] = orig - j; - orig_hit_cnt = new_hit_cnt; + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { + } else - stage_cur_byte = stage_cur >> 3; + --stage_max; - FLIP_BIT(out_buf, stage_cur); - FLIP_BIT(out_buf, stage_cur + 1); - FLIP_BIT(out_buf, stage_cur + 2); - FLIP_BIT(out_buf, stage_cur + 3); + out_buf[i] = orig; - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + } - FLIP_BIT(out_buf, stage_cur); - FLIP_BIT(out_buf, stage_cur + 1); - FLIP_BIT(out_buf, stage_cur + 2); - FLIP_BIT(out_buf, stage_cur + 3); + } - } + new_hit_cnt = queued_paths + unique_crashes; - new_hit_cnt = queued_paths + unique_crashes; + stage_finds[STAGE_ARITH8] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_ARITH8] += stage_max; - stage_finds[STAGE_FLIP4] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_FLIP4] += stage_max; + /* 16-bit arithmetics, both endians. */ + if (len < 2) goto skip_arith; - /* Effector map setup. These macros calculate: + stage_name = "arith 16/8"; + stage_short = "arith16"; + stage_cur = 0; + stage_max = 4 * (len - 1) * ARITH_MAX; - EFF_APOS - position of a particular file offset in the map. - EFF_ALEN - length of a map with a particular number of bytes. - EFF_SPAN_ALEN - map span for a sequence of bytes. + orig_hit_cnt = new_hit_cnt; - */ + for (i = 0; i < len - 1; ++i) { -#define EFF_APOS(_p) ((_p) >> EFF_MAP_SCALE2) -#define EFF_REM(_x) ((_x) & ((1 << EFF_MAP_SCALE2) - 1)) -#define EFF_ALEN(_l) (EFF_APOS(_l) + !!EFF_REM(_l)) -#define EFF_SPAN_ALEN(_p, _l) (EFF_APOS((_p) + (_l) - 1) - EFF_APOS(_p) + 1) + u16 orig = *(u16*)(out_buf + i); - /* Initialize effector map for the next step (see comments below). Always - flag first and last byte as doing something. */ + /* Let's consult the effector map... */ - eff_map = ck_alloc(EFF_ALEN(len)); - eff_map[0] = 1; + if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { - if (EFF_APOS(len - 1) != 0) { - eff_map[EFF_APOS(len - 1)] = 1; - ++eff_cnt; - } + stage_max -= 4 * ARITH_MAX; + continue; - /* Walking byte. */ + } - stage_name = "bitflip 8/8"; - stage_short = "flip8"; - stage_max = len; + stage_cur_byte = i; + for (j = 1; j <= ARITH_MAX; ++j) { - orig_hit_cnt = new_hit_cnt; + u16 r1 = orig ^ (orig + j), r2 = orig ^ (orig - j), + r3 = orig ^ SWAP16(SWAP16(orig) + j), + r4 = orig ^ SWAP16(SWAP16(orig) - j); - for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { + /* Try little endian addition and subtraction first. Do it only + if the operation would affect more than one byte (hence the + & 0xff overflow checks) and if it couldn't be a product of + a bitflip. */ - stage_cur_byte = stage_cur; + stage_val_type = STAGE_VAL_LE; - out_buf[stage_cur] ^= 0xFF; + if ((orig & 0xff) + j > 0xff && !could_be_bitflip(r1)) { - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + stage_cur_val = j; + *(u16*)(out_buf + i) = orig + j; - /* We also use this stage to pull off a simple trick: we identify - bytes that seem to have no effect on the current execution path - even when fully flipped - and we skip them during more expensive - deterministic stages, such as arithmetics or known ints. */ + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - if (!eff_map[EFF_APOS(stage_cur)]) { + } else - u32 cksum; + --stage_max; - /* If in dumb mode or if the file is very short, just flag everything - without wasting time on checksums. */ + if ((orig & 0xff) < j && !could_be_bitflip(r2)) { - if (!dumb_mode && len >= EFF_MIN_LEN) - cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST); - else - cksum = ~queue_cur->exec_cksum; + stage_cur_val = -j; + *(u16*)(out_buf + i) = orig - j; - if (cksum != queue_cur->exec_cksum) { - eff_map[EFF_APOS(stage_cur)] = 1; - ++eff_cnt; - } + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - } + } else - out_buf[stage_cur] ^= 0xFF; + --stage_max; - } + /* Big endian comes next. Same deal. */ - /* If the effector map is more than EFF_MAX_PERC dense, just flag the - whole thing as worth fuzzing, since we wouldn't be saving much time - anyway. */ + stage_val_type = STAGE_VAL_BE; - if (eff_cnt != EFF_ALEN(len) && - eff_cnt * 100 / EFF_ALEN(len) > EFF_MAX_PERC) { + if ((orig >> 8) + j > 0xff && !could_be_bitflip(r3)) { - memset(eff_map, 1, EFF_ALEN(len)); + stage_cur_val = j; + *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) + j); - blocks_eff_select += EFF_ALEN(len); + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - } - else { + } else - blocks_eff_select += eff_cnt; + --stage_max; - } + if ((orig >> 8) < j && !could_be_bitflip(r4)) { - blocks_eff_total += EFF_ALEN(len); + stage_cur_val = -j; + *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) - j); - new_hit_cnt = queued_paths + unique_crashes; + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - stage_finds[STAGE_FLIP8] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_FLIP8] += stage_max; + } else + --stage_max; + *(u16*)(out_buf + i) = orig; - /* Two walking bytes. */ + } - if (len < 2) goto skip_bitflip; + } - stage_name = "bitflip 16/8"; - stage_short = "flip16"; - stage_cur = 0; - stage_max = len - 1; + new_hit_cnt = queued_paths + unique_crashes; + stage_finds[STAGE_ARITH16] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_ARITH16] += stage_max; - orig_hit_cnt = new_hit_cnt; + /* 32-bit arithmetics, both endians. */ - for (i = 0; i < len - 1; ++i) { + if (len < 4) goto skip_arith; - /* Let's consult the effector map... */ + stage_name = "arith 32/8"; + stage_short = "arith32"; + stage_cur = 0; + stage_max = 4 * (len - 3) * ARITH_MAX; - if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { - --stage_max; - continue; - } + orig_hit_cnt = new_hit_cnt; - stage_cur_byte = i; + for (i = 0; i < len - 3; ++i) { - *(u16*)(out_buf + i) ^= 0xFFFF; + u32 orig = *(u32*)(out_buf + i); - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + /* Let's consult the effector map... */ - *(u16*)(out_buf + i) ^= 0xFFFF; + if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && + !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { + stage_max -= 4 * ARITH_MAX; + continue; - } + } - new_hit_cnt = queued_paths + unique_crashes; + stage_cur_byte = i; - stage_finds[STAGE_FLIP16] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_FLIP16] += stage_max; + for (j = 1; j <= ARITH_MAX; ++j) { + u32 r1 = orig ^ (orig + j), r2 = orig ^ (orig - j), + r3 = orig ^ SWAP32(SWAP32(orig) + j), + r4 = orig ^ SWAP32(SWAP32(orig) - j); + /* Little endian first. Same deal as with 16-bit: we only want to + try if the operation would have effect on more than two bytes. */ - if (len < 4) goto skip_bitflip; + stage_val_type = STAGE_VAL_LE; - /* Four walking bytes. */ + if ((orig & 0xffff) + j > 0xffff && !could_be_bitflip(r1)) { - stage_name = "bitflip 32/8"; - stage_short = "flip32"; - stage_cur = 0; - stage_max = len - 3; + stage_cur_val = j; + *(u32*)(out_buf + i) = orig + j; + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - orig_hit_cnt = new_hit_cnt; + } else - for (i = 0; i < len - 3; ++i) { + --stage_max; - /* Let's consult the effector map... */ - if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && - !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { - --stage_max; - continue; - } + if ((orig & 0xffff) < j && !could_be_bitflip(r2)) { - stage_cur_byte = i; + stage_cur_val = -j; + *(u32*)(out_buf + i) = orig - j; - *(u32*)(out_buf + i) ^= 0xFFFFFFFF; + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + } else - *(u32*)(out_buf + i) ^= 0xFFFFFFFF; + --stage_max; - } + /* Big endian next. */ - new_hit_cnt = queued_paths + unique_crashes; + stage_val_type = STAGE_VAL_BE; - stage_finds[STAGE_FLIP32] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_FLIP32] += stage_max; + if ((SWAP32(orig) & 0xffff) + j > 0xffff && !could_be_bitflip(r3)) { + stage_cur_val = j; + *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) + j); + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + } else - skip_bitflip: + --stage_max; - if (no_arith) goto skip_arith; + if ((SWAP32(orig) & 0xffff) < j && !could_be_bitflip(r4)) { - /********************** - * ARITHMETIC INC/DEC * - **********************/ + stage_cur_val = -j; + *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) - j); - /* 8-bit arithmetics. */ + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - stage_name = "arith 8/8"; - stage_short = "arith8"; - stage_cur = 0; - stage_max = 2 * len * ARITH_MAX; + } else + --stage_max; - stage_val_type = STAGE_VAL_LE; + *(u32*)(out_buf + i) = orig; - orig_hit_cnt = new_hit_cnt; + } - for (i = 0; i < len; ++i) { + } - u8 orig = out_buf[i]; + new_hit_cnt = queued_paths + unique_crashes; - /* Let's consult the effector map... */ + stage_finds[STAGE_ARITH32] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_ARITH32] += stage_max; - if (!eff_map[EFF_APOS(i)]) { - stage_max -= 2 * ARITH_MAX; - continue; - } +skip_arith: - stage_cur_byte = i; + /********************** + * INTERESTING VALUES * + **********************/ - for (j = 1; j <= ARITH_MAX; ++j) { + stage_name = "interest 8/8"; + stage_short = "int8"; + stage_cur = 0; + stage_max = len * sizeof(interesting_8); - u8 r = orig ^ (orig + j); + stage_val_type = STAGE_VAL_LE; - /* Do arithmetic operations only if the result couldn't be a product - of a bitflip. */ + orig_hit_cnt = new_hit_cnt; - if (!could_be_bitflip(r)) { + /* Setting 8-bit integers. */ - stage_cur_val = j; - out_buf[i] = orig + j; + for (i = 0; i < len; ++i) { - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + u8 orig = out_buf[i]; - } else --stage_max; + /* Let's consult the effector map... */ - r = orig ^ (orig - j); + if (!eff_map[EFF_APOS(i)]) { - if (!could_be_bitflip(r)) { + stage_max -= sizeof(interesting_8); + continue; - stage_cur_val = -j; - out_buf[i] = orig - j; + } - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + stage_cur_byte = i; - } else --stage_max; + for (j = 0; j < sizeof(interesting_8); ++j) { - out_buf[i] = orig; + /* Skip if the value could be a product of bitflips or arithmetics. */ - } + if (could_be_bitflip(orig ^ (u8)interesting_8[j]) || + could_be_arith(orig, (u8)interesting_8[j], 1)) { - } + --stage_max; + continue; - new_hit_cnt = queued_paths + unique_crashes; + } - stage_finds[STAGE_ARITH8] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_ARITH8] += stage_max; + stage_cur_val = interesting_8[j]; + out_buf[i] = interesting_8[j]; + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + out_buf[i] = orig; + ++stage_cur; + } - /* 16-bit arithmetics, both endians. */ + } - if (len < 2) goto skip_arith; + new_hit_cnt = queued_paths + unique_crashes; - stage_name = "arith 16/8"; - stage_short = "arith16"; - stage_cur = 0; - stage_max = 4 * (len - 1) * ARITH_MAX; + stage_finds[STAGE_INTEREST8] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_INTEREST8] += stage_max; + /* Setting 16-bit integers, both endians. */ - orig_hit_cnt = new_hit_cnt; + if (no_arith || len < 2) goto skip_interest; - for (i = 0; i < len - 1; ++i) { + stage_name = "interest 16/8"; + stage_short = "int16"; + stage_cur = 0; + stage_max = 2 * (len - 1) * (sizeof(interesting_16) >> 1); - u16 orig = *(u16*)(out_buf + i); + orig_hit_cnt = new_hit_cnt; - /* Let's consult the effector map... */ + for (i = 0; i < len - 1; ++i) { - if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { - stage_max -= 4 * ARITH_MAX; - continue; - } + u16 orig = *(u16*)(out_buf + i); - stage_cur_byte = i; + /* Let's consult the effector map... */ - for (j = 1; j <= ARITH_MAX; ++j) { + if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { - u16 r1 = orig ^ (orig + j), - r2 = orig ^ (orig - j), - r3 = orig ^ SWAP16(SWAP16(orig) + j), - r4 = orig ^ SWAP16(SWAP16(orig) - j); + stage_max -= sizeof(interesting_16); + continue; - /* Try little endian addition and subtraction first. Do it only - if the operation would affect more than one byte (hence the - & 0xff overflow checks) and if it couldn't be a product of - a bitflip. */ + } - stage_val_type = STAGE_VAL_LE; + stage_cur_byte = i; - if ((orig & 0xff) + j > 0xff && !could_be_bitflip(r1)) { + for (j = 0; j < sizeof(interesting_16) / 2; ++j) { - stage_cur_val = j; - *(u16*)(out_buf + i) = orig + j; + stage_cur_val = interesting_16[j]; - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + /* Skip if this could be a product of a bitflip, arithmetics, + or single-byte interesting value insertion. */ - } else --stage_max; + if (!could_be_bitflip(orig ^ (u16)interesting_16[j]) && + !could_be_arith(orig, (u16)interesting_16[j], 2) && + !could_be_interest(orig, (u16)interesting_16[j], 2, 0)) { - if ((orig & 0xff) < j && !could_be_bitflip(r2)) { + stage_val_type = STAGE_VAL_LE; - stage_cur_val = -j; - *(u16*)(out_buf + i) = orig - j; + *(u16*)(out_buf + i) = interesting_16[j]; - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - } else --stage_max; + } else - /* Big endian comes next. Same deal. */ + --stage_max; - stage_val_type = STAGE_VAL_BE; + if ((u16)interesting_16[j] != SWAP16(interesting_16[j]) && + !could_be_bitflip(orig ^ SWAP16(interesting_16[j])) && + !could_be_arith(orig, SWAP16(interesting_16[j]), 2) && + !could_be_interest(orig, SWAP16(interesting_16[j]), 2, 1)) { + stage_val_type = STAGE_VAL_BE; - if ((orig >> 8) + j > 0xff && !could_be_bitflip(r3)) { + *(u16*)(out_buf + i) = SWAP16(interesting_16[j]); + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - stage_cur_val = j; - *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) + j); + } else - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + --stage_max; - } else --stage_max; + } - if ((orig >> 8) < j && !could_be_bitflip(r4)) { + *(u16*)(out_buf + i) = orig; - stage_cur_val = -j; - *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) - j); + } - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + new_hit_cnt = queued_paths + unique_crashes; - } else --stage_max; + stage_finds[STAGE_INTEREST16] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_INTEREST16] += stage_max; - *(u16*)(out_buf + i) = orig; + if (len < 4) goto skip_interest; - } + /* Setting 32-bit integers, both endians. */ - } + stage_name = "interest 32/8"; + stage_short = "int32"; + stage_cur = 0; + stage_max = 2 * (len - 3) * (sizeof(interesting_32) >> 2); - new_hit_cnt = queued_paths + unique_crashes; + orig_hit_cnt = new_hit_cnt; - stage_finds[STAGE_ARITH16] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_ARITH16] += stage_max; + for (i = 0; i < len - 3; ++i) { + u32 orig = *(u32*)(out_buf + i); + /* Let's consult the effector map... */ - /* 32-bit arithmetics, both endians. */ + if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && + !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { - if (len < 4) goto skip_arith; + stage_max -= sizeof(interesting_32) >> 1; + continue; - stage_name = "arith 32/8"; - stage_short = "arith32"; - stage_cur = 0; - stage_max = 4 * (len - 3) * ARITH_MAX; + } - orig_hit_cnt = new_hit_cnt; + stage_cur_byte = i; - for (i = 0; i < len - 3; ++i) { + for (j = 0; j < sizeof(interesting_32) / 4; ++j) { - u32 orig = *(u32*)(out_buf + i); + stage_cur_val = interesting_32[j]; - /* Let's consult the effector map... */ + /* Skip if this could be a product of a bitflip, arithmetics, + or word interesting value insertion. */ - if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && - !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { - stage_max -= 4 * ARITH_MAX; - continue; - } + if (!could_be_bitflip(orig ^ (u32)interesting_32[j]) && + !could_be_arith(orig, interesting_32[j], 4) && + !could_be_interest(orig, interesting_32[j], 4, 0)) { - stage_cur_byte = i; + stage_val_type = STAGE_VAL_LE; - for (j = 1; j <= ARITH_MAX; ++j) { + *(u32*)(out_buf + i) = interesting_32[j]; - u32 r1 = orig ^ (orig + j), - r2 = orig ^ (orig - j), - r3 = orig ^ SWAP32(SWAP32(orig) + j), - r4 = orig ^ SWAP32(SWAP32(orig) - j); + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - /* Little endian first. Same deal as with 16-bit: we only want to - try if the operation would have effect on more than two bytes. */ + } else - stage_val_type = STAGE_VAL_LE; + --stage_max; - if ((orig & 0xffff) + j > 0xffff && !could_be_bitflip(r1)) { + if ((u32)interesting_32[j] != SWAP32(interesting_32[j]) && + !could_be_bitflip(orig ^ SWAP32(interesting_32[j])) && + !could_be_arith(orig, SWAP32(interesting_32[j]), 4) && + !could_be_interest(orig, SWAP32(interesting_32[j]), 4, 1)) { - stage_cur_val = j; - *(u32*)(out_buf + i) = orig + j; + stage_val_type = STAGE_VAL_BE; - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + *(u32*)(out_buf + i) = SWAP32(interesting_32[j]); + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - } else --stage_max; + } else - if ((orig & 0xffff) < j && !could_be_bitflip(r2)) { + --stage_max; - stage_cur_val = -j; - *(u32*)(out_buf + i) = orig - j; + } - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + *(u32*)(out_buf + i) = orig; - } else --stage_max; + } - /* Big endian next. */ + new_hit_cnt = queued_paths + unique_crashes; - stage_val_type = STAGE_VAL_BE; + stage_finds[STAGE_INTEREST32] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_INTEREST32] += stage_max; - if ((SWAP32(orig) & 0xffff) + j > 0xffff && !could_be_bitflip(r3)) { +skip_interest: - stage_cur_val = j; - *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) + j); + /******************** + * DICTIONARY STUFF * + ********************/ - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + if (!extras_cnt) goto skip_user_extras; - } else --stage_max; + /* Overwrite with user-supplied extras. */ - if ((SWAP32(orig) & 0xffff) < j && !could_be_bitflip(r4)) { + stage_name = "user extras (over)"; + stage_short = "ext_UO"; + stage_cur = 0; + stage_max = extras_cnt * len; - stage_cur_val = -j; - *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) - j); + stage_val_type = STAGE_VAL_NONE; - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + orig_hit_cnt = new_hit_cnt; - } else --stage_max; + for (i = 0; i < len; ++i) { - *(u32*)(out_buf + i) = orig; + u32 last_len = 0; - } + stage_cur_byte = i; - } + /* Extras are sorted by size, from smallest to largest. This means + that we don't have to worry about restoring the buffer in + between writes at a particular offset determined by the outer + loop. */ - new_hit_cnt = queued_paths + unique_crashes; + for (j = 0; j < extras_cnt; ++j) { - stage_finds[STAGE_ARITH32] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_ARITH32] += stage_max; + /* Skip extras probabilistically if extras_cnt > MAX_DET_EXTRAS. Also + skip them if there's no room to insert the payload, if the token + is redundant, or if its entire span has no bytes set in the effector + map. */ + if ((extras_cnt > MAX_DET_EXTRAS && UR(extras_cnt) >= MAX_DET_EXTRAS) || + extras[j].len > len - i || + !memcmp(extras[j].data, out_buf + i, extras[j].len) || + !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, extras[j].len))) { + --stage_max; + continue; - skip_arith: + } - /********************** - * INTERESTING VALUES * - **********************/ + last_len = extras[j].len; + memcpy(out_buf + i, extras[j].data, last_len); - stage_name = "interest 8/8"; - stage_short = "int8"; - stage_cur = 0; - stage_max = len * sizeof(interesting_8); + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + } - stage_val_type = STAGE_VAL_LE; + /* Restore all the clobbered memory. */ + memcpy(out_buf + i, in_buf + i, last_len); - orig_hit_cnt = new_hit_cnt; + } - /* Setting 8-bit integers. */ + new_hit_cnt = queued_paths + unique_crashes; - for (i = 0; i < len; ++i) { + stage_finds[STAGE_EXTRAS_UO] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_EXTRAS_UO] += stage_max; - u8 orig = out_buf[i]; + /* Insertion of user-supplied extras. */ - /* Let's consult the effector map... */ + stage_name = "user extras (insert)"; + stage_short = "ext_UI"; + stage_cur = 0; + stage_max = extras_cnt * len; - if (!eff_map[EFF_APOS(i)]) { - stage_max -= sizeof(interesting_8); - continue; - } + orig_hit_cnt = new_hit_cnt; - stage_cur_byte = i; + ex_tmp = ck_alloc(len + MAX_DICT_FILE); - for (j = 0; j < sizeof(interesting_8); ++j) { + for (i = 0; i <= len; ++i) { - /* Skip if the value could be a product of bitflips or arithmetics. */ + stage_cur_byte = i; - if (could_be_bitflip(orig ^ (u8)interesting_8[j]) || - could_be_arith(orig, (u8)interesting_8[j], 1)) { - --stage_max; - continue; - } + for (j = 0; j < extras_cnt; ++j) { - stage_cur_val = interesting_8[j]; - out_buf[i] = interesting_8[j]; + if (len + extras[j].len > MAX_FILE) { - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + --stage_max; + continue; - out_buf[i] = orig; - ++stage_cur; + } - } + /* Insert token */ + memcpy(ex_tmp + i, extras[j].data, extras[j].len); - } + /* Copy tail */ + memcpy(ex_tmp + i + extras[j].len, out_buf + i, len - i); - new_hit_cnt = queued_paths + unique_crashes; + if (common_fuzz_stuff(argv, ex_tmp, len + extras[j].len)) { - stage_finds[STAGE_INTEREST8] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_INTEREST8] += stage_max; + ck_free(ex_tmp); + goto abandon_entry; + } + ++stage_cur; - /* Setting 16-bit integers, both endians. */ + } - if (no_arith || len < 2) goto skip_interest; + /* Copy head */ + ex_tmp[i] = out_buf[i]; - stage_name = "interest 16/8"; - stage_short = "int16"; - stage_cur = 0; - stage_max = 2 * (len - 1) * (sizeof(interesting_16) >> 1); + } + ck_free(ex_tmp); - orig_hit_cnt = new_hit_cnt; + new_hit_cnt = queued_paths + unique_crashes; - for (i = 0; i < len - 1; ++i) { + stage_finds[STAGE_EXTRAS_UI] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_EXTRAS_UI] += stage_max; - u16 orig = *(u16*)(out_buf + i); +skip_user_extras: - /* Let's consult the effector map... */ + if (!a_extras_cnt) goto skip_extras; - if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { - stage_max -= sizeof(interesting_16); - continue; - } + stage_name = "auto extras (over)"; + stage_short = "ext_AO"; + stage_cur = 0; + stage_max = MIN(a_extras_cnt, USE_AUTO_EXTRAS) * len; - stage_cur_byte = i; + stage_val_type = STAGE_VAL_NONE; - for (j = 0; j < sizeof(interesting_16) / 2; ++j) { + orig_hit_cnt = new_hit_cnt; - stage_cur_val = interesting_16[j]; + for (i = 0; i < len; ++i) { - /* Skip if this could be a product of a bitflip, arithmetics, - or single-byte interesting value insertion. */ + u32 last_len = 0; - if (!could_be_bitflip(orig ^ (u16)interesting_16[j]) && - !could_be_arith(orig, (u16)interesting_16[j], 2) && - !could_be_interest(orig, (u16)interesting_16[j], 2, 0)) { + stage_cur_byte = i; - stage_val_type = STAGE_VAL_LE; + for (j = 0; j < MIN(a_extras_cnt, USE_AUTO_EXTRAS); ++j) { - *(u16*)(out_buf + i) = interesting_16[j]; + /* See the comment in the earlier code; extras are sorted by size. */ - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + if (a_extras[j].len > len - i || + !memcmp(a_extras[j].data, out_buf + i, a_extras[j].len) || + !memchr(eff_map + EFF_APOS(i), 1, + EFF_SPAN_ALEN(i, a_extras[j].len))) { - } else --stage_max; + --stage_max; + continue; - if ((u16)interesting_16[j] != SWAP16(interesting_16[j]) && - !could_be_bitflip(orig ^ SWAP16(interesting_16[j])) && - !could_be_arith(orig, SWAP16(interesting_16[j]), 2) && - !could_be_interest(orig, SWAP16(interesting_16[j]), 2, 1)) { + } - stage_val_type = STAGE_VAL_BE; + last_len = a_extras[j].len; + memcpy(out_buf + i, a_extras[j].data, last_len); - *(u16*)(out_buf + i) = SWAP16(interesting_16[j]); - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - } else --stage_max; + ++stage_cur; - } + } - *(u16*)(out_buf + i) = orig; + /* Restore all the clobbered memory. */ + memcpy(out_buf + i, in_buf + i, last_len); - } + } - new_hit_cnt = queued_paths + unique_crashes; + new_hit_cnt = queued_paths + unique_crashes; - stage_finds[STAGE_INTEREST16] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_INTEREST16] += stage_max; + stage_finds[STAGE_EXTRAS_AO] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_EXTRAS_AO] += stage_max; +skip_extras: + /* If we made this to here without jumping to havoc_stage or abandon_entry, + we're properly done with deterministic steps and can mark it as such + in the .state/ directory. */ + if (!queue_cur->passed_det) mark_as_det_done(queue_cur); - if (len < 4) goto skip_interest; + /**************** + * RANDOM HAVOC * + ****************/ - /* Setting 32-bit integers, both endians. */ +havoc_stage: +pacemaker_fuzzing: - stage_name = "interest 32/8"; - stage_short = "int32"; - stage_cur = 0; - stage_max = 2 * (len - 3) * (sizeof(interesting_32) >> 2); + stage_cur_byte = -1; + /* The havoc stage mutation code is also invoked when splicing files; if the + splice_cycle variable is set, generate different descriptions and such. */ - orig_hit_cnt = new_hit_cnt; + if (!splice_cycle) { - for (i = 0; i < len - 3; ++i) { + stage_name = "MOpt-havoc"; + stage_short = "MOpt_havoc"; + stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * perf_score / + havoc_div / 100; - u32 orig = *(u32*)(out_buf + i); + } else { - /* Let's consult the effector map... */ + static u8 tmp[32]; - if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && - !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { - stage_max -= sizeof(interesting_32) >> 1; - continue; - } + perf_score = orig_perf; - stage_cur_byte = i; + sprintf(tmp, "MOpt-core-splice %u", splice_cycle); + stage_name = tmp; + stage_short = "MOpt_core_splice"; + stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100; - for (j = 0; j < sizeof(interesting_32) / 4; ++j) { + } - stage_cur_val = interesting_32[j]; + s32 temp_len_puppet; + cur_ms_lv = get_cur_time(); - /* Skip if this could be a product of a bitflip, arithmetics, - or word interesting value insertion. */ + // for (; swarm_now < swarm_num; ++swarm_now) + { - if (!could_be_bitflip(orig ^ (u32)interesting_32[j]) && - !could_be_arith(orig, interesting_32[j], 4) && - !could_be_interest(orig, interesting_32[j], 4, 0)) { + if (key_puppet == 1) { - stage_val_type = STAGE_VAL_LE; + if (unlikely(orig_hit_cnt_puppet == 0)) { - *(u32*)(out_buf + i) = interesting_32[j]; + orig_hit_cnt_puppet = queued_paths + unique_crashes; + last_limit_time_start = get_cur_time(); + SPLICE_CYCLES_puppet = + (UR(SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) + + SPLICE_CYCLES_puppet_low); - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + } - } else --stage_max; + } - if ((u32)interesting_32[j] != SWAP32(interesting_32[j]) && - !could_be_bitflip(orig ^ SWAP32(interesting_32[j])) && - !could_be_arith(orig, SWAP32(interesting_32[j]), 4) && - !could_be_interest(orig, SWAP32(interesting_32[j]), 4, 1)) { + { - stage_val_type = STAGE_VAL_BE; +#ifndef IGNORE_FINDS + havoc_stage_puppet: +#endif - *(u32*)(out_buf + i) = SWAP32(interesting_32[j]); - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + stage_cur_byte = -1; - } else --stage_max; + /* The havoc stage mutation code is also invoked when splicing files; if + the splice_cycle variable is set, generate different descriptions and + such. */ - } + if (!splice_cycle) { - *(u32*)(out_buf + i) = orig; + stage_name = "MOpt core avoc"; + stage_short = "MOpt_core_havoc"; + stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * + perf_score / havoc_div / 100; - } + } else { - new_hit_cnt = queued_paths + unique_crashes; + static u8 tmp[32]; + perf_score = orig_perf; + sprintf(tmp, "MOpt core splice %u", splice_cycle); + stage_name = tmp; + stage_short = "MOpt_core_splice"; + stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100; - stage_finds[STAGE_INTEREST32] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_INTEREST32] += stage_max; + } + if (stage_max < HAVOC_MIN) stage_max = HAVOC_MIN; + temp_len = len; + orig_hit_cnt = queued_paths + unique_crashes; + havoc_queued = queued_paths; + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { - skip_interest: + u32 use_stacking = 1 << (1 + UR(HAVOC_STACK_POW2)); + stage_cur_val = use_stacking; - /******************** - * DICTIONARY STUFF * - ********************/ + for (i = 0; i < operator_num; ++i) { - if (!extras_cnt) goto skip_user_extras; + core_operator_cycles_puppet_v3[i] = core_operator_cycles_puppet_v2[i]; - /* Overwrite with user-supplied extras. */ + } - stage_name = "user extras (over)"; - stage_short = "ext_UO"; - stage_cur = 0; - stage_max = extras_cnt * len; + for (i = 0; i < use_stacking; ++i) { + switch (select_algorithm()) { - stage_val_type = STAGE_VAL_NONE; + case 0: + /* Flip a single bit somewhere. Spooky! */ + FLIP_BIT(out_buf, UR(temp_len << 3)); + core_operator_cycles_puppet_v2[STAGE_FLIP1] += 1; + break; - orig_hit_cnt = new_hit_cnt; + case 1: + if (temp_len < 2) break; + temp_len_puppet = UR(temp_len << 3); + FLIP_BIT(out_buf, temp_len_puppet); + FLIP_BIT(out_buf, temp_len_puppet + 1); + core_operator_cycles_puppet_v2[STAGE_FLIP2] += 1; + break; - for (i = 0; i < len; ++i) { + case 2: + if (temp_len < 2) break; + temp_len_puppet = UR(temp_len << 3); + FLIP_BIT(out_buf, temp_len_puppet); + FLIP_BIT(out_buf, temp_len_puppet + 1); + FLIP_BIT(out_buf, temp_len_puppet + 2); + FLIP_BIT(out_buf, temp_len_puppet + 3); + core_operator_cycles_puppet_v2[STAGE_FLIP4] += 1; + break; - u32 last_len = 0; + case 3: + if (temp_len < 4) break; + out_buf[UR(temp_len)] ^= 0xFF; + core_operator_cycles_puppet_v2[STAGE_FLIP8] += 1; + break; - stage_cur_byte = i; + case 4: + if (temp_len < 8) break; + *(u16*)(out_buf + UR(temp_len - 1)) ^= 0xFFFF; + core_operator_cycles_puppet_v2[STAGE_FLIP16] += 1; + break; - /* Extras are sorted by size, from smallest to largest. This means - that we don't have to worry about restoring the buffer in - between writes at a particular offset determined by the outer - loop. */ + case 5: + if (temp_len < 8) break; + *(u32*)(out_buf + UR(temp_len - 3)) ^= 0xFFFFFFFF; + core_operator_cycles_puppet_v2[STAGE_FLIP32] += 1; + break; - for (j = 0; j < extras_cnt; ++j) { + case 6: + out_buf[UR(temp_len)] -= 1 + UR(ARITH_MAX); + out_buf[UR(temp_len)] += 1 + UR(ARITH_MAX); + core_operator_cycles_puppet_v2[STAGE_ARITH8] += 1; + break; - /* Skip extras probabilistically if extras_cnt > MAX_DET_EXTRAS. Also - skip them if there's no room to insert the payload, if the token - is redundant, or if its entire span has no bytes set in the effector - map. */ + case 7: + /* Randomly subtract from word, random endian. */ + if (temp_len < 8) break; + if (UR(2)) { - if ((extras_cnt > MAX_DET_EXTRAS && UR(extras_cnt) >= MAX_DET_EXTRAS) || - extras[j].len > len - i || - !memcmp(extras[j].data, out_buf + i, extras[j].len) || - !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, extras[j].len))) { + u32 pos = UR(temp_len - 1); + *(u16*)(out_buf + pos) -= 1 + UR(ARITH_MAX); - --stage_max; - continue; + } else { - } + u32 pos = UR(temp_len - 1); + u16 num = 1 + UR(ARITH_MAX); + *(u16*)(out_buf + pos) = + SWAP16(SWAP16(*(u16*)(out_buf + pos)) - num); - last_len = extras[j].len; - memcpy(out_buf + i, extras[j].data, last_len); + } - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + /* Randomly add to word, random endian. */ + if (UR(2)) { - ++stage_cur; + u32 pos = UR(temp_len - 1); + *(u16*)(out_buf + pos) += 1 + UR(ARITH_MAX); - } + } else { - /* Restore all the clobbered memory. */ - memcpy(out_buf + i, in_buf + i, last_len); + u32 pos = UR(temp_len - 1); + u16 num = 1 + UR(ARITH_MAX); + *(u16*)(out_buf + pos) = + SWAP16(SWAP16(*(u16*)(out_buf + pos)) + num); - } + } - new_hit_cnt = queued_paths + unique_crashes; + core_operator_cycles_puppet_v2[STAGE_ARITH16] += 1; + break; - stage_finds[STAGE_EXTRAS_UO] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_EXTRAS_UO] += stage_max; + case 8: + /* Randomly subtract from dword, random endian. */ + if (temp_len < 8) break; + if (UR(2)) { - /* Insertion of user-supplied extras. */ + u32 pos = UR(temp_len - 3); + *(u32*)(out_buf + pos) -= 1 + UR(ARITH_MAX); - stage_name = "user extras (insert)"; - stage_short = "ext_UI"; - stage_cur = 0; - stage_max = extras_cnt * len; + } else { + u32 pos = UR(temp_len - 3); + u32 num = 1 + UR(ARITH_MAX); + *(u32*)(out_buf + pos) = + SWAP32(SWAP32(*(u32*)(out_buf + pos)) - num); + } + /* Randomly add to dword, random endian. */ + if (UR(2)) { - orig_hit_cnt = new_hit_cnt; + u32 pos = UR(temp_len - 3); + *(u32*)(out_buf + pos) += 1 + UR(ARITH_MAX); - ex_tmp = ck_alloc(len + MAX_DICT_FILE); + } else { - for (i = 0; i <= len; ++i) { + u32 pos = UR(temp_len - 3); + u32 num = 1 + UR(ARITH_MAX); + *(u32*)(out_buf + pos) = + SWAP32(SWAP32(*(u32*)(out_buf + pos)) + num); - stage_cur_byte = i; + } - for (j = 0; j < extras_cnt; ++j) { + core_operator_cycles_puppet_v2[STAGE_ARITH32] += 1; + break; - if (len + extras[j].len > MAX_FILE) { - --stage_max; - continue; - } + case 9: + /* Set byte to interesting value. */ + if (temp_len < 4) break; + out_buf[UR(temp_len)] = interesting_8[UR(sizeof(interesting_8))]; + core_operator_cycles_puppet_v2[STAGE_INTEREST8] += 1; + break; - /* Insert token */ - memcpy(ex_tmp + i, extras[j].data, extras[j].len); + case 10: + /* Set word to interesting value, randomly choosing endian. */ + if (temp_len < 8) break; + if (UR(2)) { - /* Copy tail */ - memcpy(ex_tmp + i + extras[j].len, out_buf + i, len - i); + *(u16*)(out_buf + UR(temp_len - 1)) = + interesting_16[UR(sizeof(interesting_16) >> 1)]; - if (common_fuzz_stuff(argv, ex_tmp, len + extras[j].len)) { - ck_free(ex_tmp); - goto abandon_entry; - } + } else { - ++stage_cur; + *(u16*)(out_buf + UR(temp_len - 1)) = + SWAP16(interesting_16[UR(sizeof(interesting_16) >> 1)]); - } + } - /* Copy head */ - ex_tmp[i] = out_buf[i]; + core_operator_cycles_puppet_v2[STAGE_INTEREST16] += 1; + break; - } + case 11: + /* Set dword to interesting value, randomly choosing endian. */ - ck_free(ex_tmp); + if (temp_len < 8) break; - new_hit_cnt = queued_paths + unique_crashes; + if (UR(2)) { - stage_finds[STAGE_EXTRAS_UI] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_EXTRAS_UI] += stage_max; + *(u32*)(out_buf + UR(temp_len - 3)) = + interesting_32[UR(sizeof(interesting_32) >> 2)]; - skip_user_extras: + } else { - if (!a_extras_cnt) goto skip_extras; + *(u32*)(out_buf + UR(temp_len - 3)) = + SWAP32(interesting_32[UR(sizeof(interesting_32) >> 2)]); - stage_name = "auto extras (over)"; - stage_short = "ext_AO"; - stage_cur = 0; - stage_max = MIN(a_extras_cnt, USE_AUTO_EXTRAS) * len; + } + core_operator_cycles_puppet_v2[STAGE_INTEREST32] += 1; + break; - stage_val_type = STAGE_VAL_NONE; + case 12: - orig_hit_cnt = new_hit_cnt; + /* Just set a random byte to a random value. Because, + why not. We use XOR with 1-255 to eliminate the + possibility of a no-op. */ - for (i = 0; i < len; ++i) { + out_buf[UR(temp_len)] ^= 1 + UR(255); + core_operator_cycles_puppet_v2[STAGE_RANDOMBYTE] += 1; + break; - u32 last_len = 0; + case 13: { - stage_cur_byte = i; + /* Delete bytes. We're making this a bit more likely + than insertion (the next option) in hopes of keeping + files reasonably small. */ - for (j = 0; j < MIN(a_extras_cnt, USE_AUTO_EXTRAS); ++j) { + u32 del_from, del_len; - /* See the comment in the earlier code; extras are sorted by size. */ + if (temp_len < 2) break; - if (a_extras[j].len > len - i || - !memcmp(a_extras[j].data, out_buf + i, a_extras[j].len) || - !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, a_extras[j].len))) { + /* Don't delete too much. */ - --stage_max; - continue; + del_len = choose_block_len(temp_len - 1); - } + del_from = UR(temp_len - del_len + 1); - last_len = a_extras[j].len; - memcpy(out_buf + i, a_extras[j].data, last_len); + memmove(out_buf + del_from, out_buf + del_from + del_len, + temp_len - del_from - del_len); - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + temp_len -= del_len; + core_operator_cycles_puppet_v2[STAGE_DELETEBYTE] += 1; + break; - ++stage_cur; + } - } + case 14: - /* Restore all the clobbered memory. */ - memcpy(out_buf + i, in_buf + i, last_len); + if (temp_len + HAVOC_BLK_XL < MAX_FILE) { - } + /* Clone bytes (75%) or insert a block of constant bytes (25%). + */ - new_hit_cnt = queued_paths + unique_crashes; + u8 actually_clone = UR(4); + u32 clone_from, clone_to, clone_len; + u8* new_buf; - stage_finds[STAGE_EXTRAS_AO] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_EXTRAS_AO] += stage_max; + if (actually_clone) { - skip_extras: + clone_len = choose_block_len(temp_len); + clone_from = UR(temp_len - clone_len + 1); - /* If we made this to here without jumping to havoc_stage or abandon_entry, - we're properly done with deterministic steps and can mark it as such - in the .state/ directory. */ + } else { - if (!queue_cur->passed_det) mark_as_det_done(queue_cur); + clone_len = choose_block_len(HAVOC_BLK_XL); + clone_from = 0; - /**************** - * RANDOM HAVOC * - ****************/ + } - havoc_stage: - pacemaker_fuzzing: + clone_to = UR(temp_len); + new_buf = ck_alloc_nozero(temp_len + clone_len); - stage_cur_byte = -1; + /* Head */ - /* The havoc stage mutation code is also invoked when splicing files; if the - splice_cycle variable is set, generate different descriptions and such. */ + memcpy(new_buf, out_buf, clone_to); - if (!splice_cycle) { + /* Inserted part */ - stage_name = "MOpt-havoc"; - stage_short = "MOpt_havoc"; - stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * - perf_score / havoc_div / 100; + if (actually_clone) + memcpy(new_buf + clone_to, out_buf + clone_from, clone_len); + else + memset(new_buf + clone_to, + UR(2) ? UR(256) : out_buf[UR(temp_len)], clone_len); - } else { + /* Tail */ + memcpy(new_buf + clone_to + clone_len, out_buf + clone_to, + temp_len - clone_to); - static u8 tmp[32]; + ck_free(out_buf); + out_buf = new_buf; + temp_len += clone_len; + core_operator_cycles_puppet_v2[STAGE_Clone75] += 1; - perf_score = orig_perf; + } - sprintf(tmp, "MOpt-core-splice %u", splice_cycle); - stage_name = tmp; - stage_short = "MOpt_core_splice"; - stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100; + break; - } + case 15: { - s32 temp_len_puppet; - cur_ms_lv = get_cur_time(); + /* Overwrite bytes with a randomly selected chunk (75%) or fixed + bytes (25%). */ - //for (; swarm_now < swarm_num; ++swarm_now) - { - if (key_puppet == 1) { - if (unlikely(orig_hit_cnt_puppet == 0)) { - orig_hit_cnt_puppet = queued_paths + unique_crashes; - last_limit_time_start = get_cur_time(); - SPLICE_CYCLES_puppet = (UR(SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) + SPLICE_CYCLES_puppet_low); - } - } - { -#ifndef IGNORE_FINDS - havoc_stage_puppet: -#endif + u32 copy_from, copy_to, copy_len; - stage_cur_byte = -1; - - /* The havoc stage mutation code is also invoked when splicing files; if the - splice_cycle variable is set, generate different descriptions and such. */ - - if (!splice_cycle) { - stage_name = "MOpt core avoc"; - stage_short = "MOpt_core_havoc"; - stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * - perf_score / havoc_div / 100; - } else { - static u8 tmp[32]; - perf_score = orig_perf; - sprintf(tmp, "MOpt core splice %u", splice_cycle); - stage_name = tmp; - stage_short = "MOpt_core_splice"; - stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100; - } - - if (stage_max < HAVOC_MIN) stage_max = HAVOC_MIN; - temp_len = len; - orig_hit_cnt = queued_paths + unique_crashes; - havoc_queued = queued_paths; - - for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { - - u32 use_stacking = 1 << (1 + UR(HAVOC_STACK_POW2)); - stage_cur_val = use_stacking; - - for (i = 0; i < operator_num; ++i) { - core_operator_cycles_puppet_v3[i] = core_operator_cycles_puppet_v2[i]; - } - - for (i = 0; i < use_stacking; ++i) { - - switch (select_algorithm()) { - - case 0: - /* Flip a single bit somewhere. Spooky! */ - FLIP_BIT(out_buf, UR(temp_len << 3)); - core_operator_cycles_puppet_v2[STAGE_FLIP1] += 1; - break; - - - case 1: - if (temp_len < 2) break; - temp_len_puppet = UR(temp_len << 3); - FLIP_BIT(out_buf, temp_len_puppet); - FLIP_BIT(out_buf, temp_len_puppet + 1); - core_operator_cycles_puppet_v2[STAGE_FLIP2] += 1; - break; - - case 2: - if (temp_len < 2) break; - temp_len_puppet = UR(temp_len << 3); - FLIP_BIT(out_buf, temp_len_puppet); - FLIP_BIT(out_buf, temp_len_puppet + 1); - FLIP_BIT(out_buf, temp_len_puppet + 2); - FLIP_BIT(out_buf, temp_len_puppet + 3); - core_operator_cycles_puppet_v2[STAGE_FLIP4] += 1; - break; - - case 3: - if (temp_len < 4) break; - out_buf[UR(temp_len)] ^= 0xFF; - core_operator_cycles_puppet_v2[STAGE_FLIP8] += 1; - break; - - case 4: - if (temp_len < 8) break; - *(u16*)(out_buf + UR(temp_len - 1)) ^= 0xFFFF; - core_operator_cycles_puppet_v2[STAGE_FLIP16] += 1; - break; - - case 5: - if (temp_len < 8) break; - *(u32*)(out_buf + UR(temp_len - 3)) ^= 0xFFFFFFFF; - core_operator_cycles_puppet_v2[STAGE_FLIP32] += 1; - break; - - case 6: - out_buf[UR(temp_len)] -= 1 + UR(ARITH_MAX); - out_buf[UR(temp_len)] += 1 + UR(ARITH_MAX); - core_operator_cycles_puppet_v2[STAGE_ARITH8] += 1; - break; - - case 7: - /* Randomly subtract from word, random endian. */ - if (temp_len < 8) break; - if (UR(2)) { - u32 pos = UR(temp_len - 1); - *(u16*)(out_buf + pos) -= 1 + UR(ARITH_MAX); - } else { - u32 pos = UR(temp_len - 1); - u16 num = 1 + UR(ARITH_MAX); - *(u16*)(out_buf + pos) = - SWAP16(SWAP16(*(u16*)(out_buf + pos)) - num); - } - /* Randomly add to word, random endian. */ - if (UR(2)) { - u32 pos = UR(temp_len - 1); - *(u16*)(out_buf + pos) += 1 + UR(ARITH_MAX); - } else { - u32 pos = UR(temp_len - 1); - u16 num = 1 + UR(ARITH_MAX); - *(u16*)(out_buf + pos) = - SWAP16(SWAP16(*(u16*)(out_buf + pos)) + num); - } - core_operator_cycles_puppet_v2[STAGE_ARITH16] += 1; - break; - - - case 8: - /* Randomly subtract from dword, random endian. */ - if (temp_len < 8) break; - if (UR(2)) { - u32 pos = UR(temp_len - 3); - *(u32*)(out_buf + pos) -= 1 + UR(ARITH_MAX); - } else { - u32 pos = UR(temp_len - 3); - u32 num = 1 + UR(ARITH_MAX); - *(u32*)(out_buf + pos) = - SWAP32(SWAP32(*(u32*)(out_buf + pos)) - num); - } - /* Randomly add to dword, random endian. */ - if (UR(2)) { - u32 pos = UR(temp_len - 3); - *(u32*)(out_buf + pos) += 1 + UR(ARITH_MAX); - } else { - u32 pos = UR(temp_len - 3); - u32 num = 1 + UR(ARITH_MAX); - *(u32*)(out_buf + pos) = - SWAP32(SWAP32(*(u32*)(out_buf + pos)) + num); - } - core_operator_cycles_puppet_v2[STAGE_ARITH32] += 1; - break; - - - case 9: - /* Set byte to interesting value. */ - if (temp_len < 4) break; - out_buf[UR(temp_len)] = interesting_8[UR(sizeof(interesting_8))]; - core_operator_cycles_puppet_v2[STAGE_INTEREST8] += 1; - break; - - case 10: - /* Set word to interesting value, randomly choosing endian. */ - if (temp_len < 8) break; - if (UR(2)) { - *(u16*)(out_buf + UR(temp_len - 1)) = - interesting_16[UR(sizeof(interesting_16) >> 1)]; - } else { - *(u16*)(out_buf + UR(temp_len - 1)) = SWAP16( - interesting_16[UR(sizeof(interesting_16) >> 1)]); - } - core_operator_cycles_puppet_v2[STAGE_INTEREST16] += 1; - break; - - - case 11: - /* Set dword to interesting value, randomly choosing endian. */ - - if (temp_len < 8) break; - - if (UR(2)) { - *(u32*)(out_buf + UR(temp_len - 3)) = - interesting_32[UR(sizeof(interesting_32) >> 2)]; - } else { - *(u32*)(out_buf + UR(temp_len - 3)) = SWAP32( - interesting_32[UR(sizeof(interesting_32) >> 2)]); - } - core_operator_cycles_puppet_v2[STAGE_INTEREST32] += 1; - break; + if (temp_len < 2) break; + copy_len = choose_block_len(temp_len - 1); - case 12: + copy_from = UR(temp_len - copy_len + 1); + copy_to = UR(temp_len - copy_len + 1); - /* Just set a random byte to a random value. Because, - why not. We use XOR with 1-255 to eliminate the - possibility of a no-op. */ + if (UR(4)) { - out_buf[UR(temp_len)] ^= 1 + UR(255); - core_operator_cycles_puppet_v2[STAGE_RANDOMBYTE] += 1; - break; + if (copy_from != copy_to) + memmove(out_buf + copy_to, out_buf + copy_from, copy_len); + } else - case 13: { + memset(out_buf + copy_to, + UR(2) ? UR(256) : out_buf[UR(temp_len)], copy_len); + core_operator_cycles_puppet_v2[STAGE_OverWrite75] += 1; + break; - /* Delete bytes. We're making this a bit more likely - than insertion (the next option) in hopes of keeping - files reasonably small. */ + } - u32 del_from, del_len; + } - if (temp_len < 2) break; + } - /* Don't delete too much. */ + tmp_core_time += 1; - del_len = choose_block_len(temp_len - 1); + u64 temp_total_found = queued_paths + unique_crashes; - del_from = UR(temp_len - del_len + 1); + if (common_fuzz_stuff(argv, out_buf, temp_len)) + goto abandon_entry_puppet; - memmove(out_buf + del_from, out_buf + del_from + del_len, - temp_len - del_from - del_len); + /* out_buf might have been mangled a bit, so let's restore it to its + original size and shape. */ - temp_len -= del_len; - core_operator_cycles_puppet_v2[STAGE_DELETEBYTE] += 1; - break; + if (temp_len < len) out_buf = ck_realloc(out_buf, len); + temp_len = len; + memcpy(out_buf, in_buf, len); - } + /* If we're finding new stuff, let's run for a bit longer, limits + permitting. */ - case 14: + if (queued_paths != havoc_queued) { - if (temp_len + HAVOC_BLK_XL < MAX_FILE) { + if (perf_score <= havoc_max_mult * 100) { - /* Clone bytes (75%) or insert a block of constant bytes (25%). */ + stage_max *= 2; + perf_score *= 2; - u8 actually_clone = UR(4); - u32 clone_from, clone_to, clone_len; - u8* new_buf; + } - if (actually_clone) { + havoc_queued = queued_paths; - clone_len = choose_block_len(temp_len); - clone_from = UR(temp_len - clone_len + 1); + } - } else { + if (unlikely(queued_paths + unique_crashes > temp_total_found)) { - clone_len = choose_block_len(HAVOC_BLK_XL); - clone_from = 0; + u64 temp_temp_puppet = + queued_paths + unique_crashes - temp_total_found; + total_puppet_find = total_puppet_find + temp_temp_puppet; + for (i = 0; i < 16; ++i) { - } + if (core_operator_cycles_puppet_v2[i] > + core_operator_cycles_puppet_v3[i]) + core_operator_finds_puppet_v2[i] += temp_temp_puppet; - clone_to = UR(temp_len); + } - new_buf = ck_alloc_nozero(temp_len + clone_len); + } - /* Head */ + } - memcpy(new_buf, out_buf, clone_to); + new_hit_cnt = queued_paths + unique_crashes; - /* Inserted part */ +#ifndef IGNORE_FINDS - if (actually_clone) - memcpy(new_buf + clone_to, out_buf + clone_from, clone_len); - else - memset(new_buf + clone_to, - UR(2) ? UR(256) : out_buf[UR(temp_len)], clone_len); + /************ + * SPLICING * + ************/ - /* Tail */ - memcpy(new_buf + clone_to + clone_len, out_buf + clone_to, - temp_len - clone_to); + retry_splicing_puppet: - ck_free(out_buf); - out_buf = new_buf; - temp_len += clone_len; - core_operator_cycles_puppet_v2[STAGE_Clone75] += 1; - } + if (use_splicing && splice_cycle++ < SPLICE_CYCLES_puppet && + queued_paths > 1 && queue_cur->len > 1) { - break; + struct queue_entry* target; + u32 tid, split_at; + u8* new_buf; + s32 f_diff, l_diff; - case 15: { + /* First of all, if we've modified in_buf for havoc, let's clean that + up... */ - /* Overwrite bytes with a randomly selected chunk (75%) or fixed - bytes (25%). */ + if (in_buf != orig_in) { - u32 copy_from, copy_to, copy_len; + ck_free(in_buf); + in_buf = orig_in; + len = queue_cur->len; - if (temp_len < 2) break; + } - copy_len = choose_block_len(temp_len - 1); + /* Pick a random queue entry and seek to it. Don't splice with yourself. + */ - copy_from = UR(temp_len - copy_len + 1); - copy_to = UR(temp_len - copy_len + 1); + do { - if (UR(4)) { + tid = UR(queued_paths); - if (copy_from != copy_to) - memmove(out_buf + copy_to, out_buf + copy_from, copy_len); + } while (tid == current_entry); - } - else memset(out_buf + copy_to, - UR(2) ? UR(256) : out_buf[UR(temp_len)], copy_len); - core_operator_cycles_puppet_v2[STAGE_OverWrite75] += 1; - break; + splicing_with = tid; + target = queue; - } + while (tid >= 100) { + target = target->next_100; + tid -= 100; - } + } - } + while (tid--) + target = target->next; - tmp_core_time += 1; + /* Make sure that the target has a reasonable length. */ - u64 temp_total_found = queued_paths + unique_crashes; + while (target && (target->len < 2 || target == queue_cur)) { - if (common_fuzz_stuff(argv, out_buf, temp_len)) - goto abandon_entry_puppet; + target = target->next; + ++splicing_with; - /* out_buf might have been mangled a bit, so let's restore it to its - original size and shape. */ + } - if (temp_len < len) out_buf = ck_realloc(out_buf, len); - temp_len = len; - memcpy(out_buf, in_buf, len); + if (!target) goto retry_splicing_puppet; - /* If we're finding new stuff, let's run for a bit longer, limits - permitting. */ + /* Read the testcase into a new buffer. */ - if (queued_paths != havoc_queued) { + fd = open(target->fname, O_RDONLY); - if (perf_score <= havoc_max_mult * 100) { - stage_max *= 2; - perf_score *= 2; - } + if (fd < 0) PFATAL("Unable to open '%s'", target->fname); - havoc_queued = queued_paths; + new_buf = ck_alloc_nozero(target->len); - } + ck_read(fd, new_buf, target->len, target->fname); - if (unlikely(queued_paths + unique_crashes > temp_total_found)) - { - u64 temp_temp_puppet = queued_paths + unique_crashes - temp_total_found; - total_puppet_find = total_puppet_find + temp_temp_puppet; - for (i = 0; i < 16; ++i) - { - if (core_operator_cycles_puppet_v2[i] > core_operator_cycles_puppet_v3[i]) - core_operator_finds_puppet_v2[i] += temp_temp_puppet; - } - } + close(fd); - } + /* Find a suitable splicin g location, somewhere between the first and + the last differing byte. Bail out if the difference is just a single + byte or so. */ - new_hit_cnt = queued_paths + unique_crashes; + locate_diffs(in_buf, new_buf, MIN(len, target->len), &f_diff, &l_diff); + if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) { -#ifndef IGNORE_FINDS + ck_free(new_buf); + goto retry_splicing_puppet; - /************ - * SPLICING * - ************/ + } + /* Split somewhere between the first and last differing byte. */ - retry_splicing_puppet: + split_at = f_diff + UR(l_diff - f_diff); + /* Do the thing. */ + len = target->len; + memcpy(new_buf, in_buf, split_at); + in_buf = new_buf; + ck_free(out_buf); + out_buf = ck_alloc_nozero(len); + memcpy(out_buf, in_buf, len); - if (use_splicing && splice_cycle++ < SPLICE_CYCLES_puppet && - queued_paths > 1 && queue_cur->len > 1) { + goto havoc_stage_puppet; - struct queue_entry* target; - u32 tid, split_at; - u8* new_buf; - s32 f_diff, l_diff; + } - /* First of all, if we've modified in_buf for havoc, let's clean that - up... */ +#endif /* !IGNORE_FINDS */ - if (in_buf != orig_in) { - ck_free(in_buf); - in_buf = orig_in; - len = queue_cur->len; - } + ret_val = 0; + abandon_entry: + abandon_entry_puppet: - /* Pick a random queue entry and seek to it. Don't splice with yourself. */ + if (splice_cycle >= SPLICE_CYCLES_puppet) + SPLICE_CYCLES_puppet = + (UR(SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) + + SPLICE_CYCLES_puppet_low); - do { tid = UR(queued_paths); } while (tid == current_entry); + splicing_with = -1; - splicing_with = tid; - target = queue; + munmap(orig_in, queue_cur->len); - while (tid >= 100) { target = target->next_100; tid -= 100; } - while (tid--) target = target->next; + if (in_buf != orig_in) ck_free(in_buf); + ck_free(out_buf); + ck_free(eff_map); - /* Make sure that the target has a reasonable length. */ + if (key_puppet == 1) { - while (target && (target->len < 2 || target == queue_cur)) { - target = target->next; - ++splicing_with; - } + if (unlikely(queued_paths + unique_crashes > + ((queued_paths + unique_crashes) * limit_time_bound + + orig_hit_cnt_puppet))) { - if (!target) goto retry_splicing_puppet; + key_puppet = 0; + cur_ms_lv = get_cur_time(); + new_hit_cnt = queued_paths + unique_crashes; + orig_hit_cnt_puppet = 0; + last_limit_time_start = 0; - /* Read the testcase into a new buffer. */ + } - fd = open(target->fname, O_RDONLY); + } - if (fd < 0) PFATAL("Unable to open '%s'", target->fname); + if (unlikely(tmp_core_time > period_core)) { - new_buf = ck_alloc_nozero(target->len); + total_pacemaker_time += tmp_core_time; + tmp_core_time = 0; + temp_puppet_find = total_puppet_find; + new_hit_cnt = queued_paths + unique_crashes; - ck_read(fd, new_buf, target->len, target->fname); + u64 temp_stage_finds_puppet = 0; + for (i = 0; i < operator_num; ++i) { - close(fd); + core_operator_finds_puppet[i] = core_operator_finds_puppet_v2[i]; + core_operator_cycles_puppet[i] = core_operator_cycles_puppet_v2[i]; + temp_stage_finds_puppet += core_operator_finds_puppet[i]; - /* Find a suitable splicin g location, somewhere between the first and - the last differing byte. Bail out if the difference is just a single - byte or so. */ + } - locate_diffs(in_buf, new_buf, MIN(len, target->len), &f_diff, &l_diff); + key_module = 2; - if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) { - ck_free(new_buf); - goto retry_splicing_puppet; - } + old_hit_count = new_hit_cnt; - /* Split somewhere between the first and last differing byte. */ + } - split_at = f_diff + UR(l_diff - f_diff); + return ret_val; - /* Do the thing. */ + } - len = target->len; - memcpy(new_buf, in_buf, split_at); - in_buf = new_buf; - ck_free(out_buf); - out_buf = ck_alloc_nozero(len); - memcpy(out_buf, in_buf, len); + } - goto havoc_stage_puppet; +#undef FLIP_BIT - } +} -#endif /* !IGNORE_FINDS */ +void pso_updating(void) { - ret_val = 0; - abandon_entry: - abandon_entry_puppet: + g_now += 1; + if (g_now > g_max) g_now = 0; + w_now = (w_init - w_end) * (g_max - g_now) / (g_max) + w_end; + int tmp_swarm, i, j; + u64 temp_operator_finds_puppet = 0; + for (i = 0; i < operator_num; ++i) { - if (splice_cycle >= SPLICE_CYCLES_puppet) - SPLICE_CYCLES_puppet = (UR(SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) + SPLICE_CYCLES_puppet_low); + operator_finds_puppet[i] = core_operator_finds_puppet[i]; + for (j = 0; j < swarm_num; ++j) { - splicing_with = -1; + operator_finds_puppet[i] = + operator_finds_puppet[i] + stage_finds_puppet[j][i]; + } - munmap(orig_in, queue_cur->len); + temp_operator_finds_puppet = + temp_operator_finds_puppet + operator_finds_puppet[i]; - if (in_buf != orig_in) ck_free(in_buf); - ck_free(out_buf); - ck_free(eff_map); + } + for (i = 0; i < operator_num; ++i) { - if (key_puppet == 1) - { - if (unlikely(queued_paths + unique_crashes > ((queued_paths + unique_crashes)*limit_time_bound + orig_hit_cnt_puppet))) - { - key_puppet = 0; - cur_ms_lv = get_cur_time(); - new_hit_cnt = queued_paths + unique_crashes; - orig_hit_cnt_puppet = 0; - last_limit_time_start = 0; - } - } + if (operator_finds_puppet[i]) + G_best[i] = (double)((double)(operator_finds_puppet[i]) / + (double)(temp_operator_finds_puppet)); + } - if (unlikely(tmp_core_time > period_core)) - { - total_pacemaker_time += tmp_core_time; - tmp_core_time = 0; - temp_puppet_find = total_puppet_find; - new_hit_cnt = queued_paths + unique_crashes; + for (tmp_swarm = 0; tmp_swarm < swarm_num; ++tmp_swarm) { - u64 temp_stage_finds_puppet = 0; - for (i = 0; i < operator_num; ++i) - { + double x_temp = 0.0; + for (i = 0; i < operator_num; ++i) { - core_operator_finds_puppet[i] = core_operator_finds_puppet_v2[i]; - core_operator_cycles_puppet[i] = core_operator_cycles_puppet_v2[i]; - temp_stage_finds_puppet += core_operator_finds_puppet[i]; - } + probability_now[tmp_swarm][i] = 0.0; + v_now[tmp_swarm][i] = + w_now * v_now[tmp_swarm][i] + + RAND_C * (L_best[tmp_swarm][i] - x_now[tmp_swarm][i]) + + RAND_C * (G_best[i] - x_now[tmp_swarm][i]); + x_now[tmp_swarm][i] += v_now[tmp_swarm][i]; + if (x_now[tmp_swarm][i] > v_max) + x_now[tmp_swarm][i] = v_max; + else if (x_now[tmp_swarm][i] < v_min) + x_now[tmp_swarm][i] = v_min; + x_temp += x_now[tmp_swarm][i]; - key_module = 2; + } - old_hit_count = new_hit_cnt; - } - return ret_val; - } - } + for (i = 0; i < operator_num; ++i) { + x_now[tmp_swarm][i] = x_now[tmp_swarm][i] / x_temp; + if (likely(i != 0)) + probability_now[tmp_swarm][i] = + probability_now[tmp_swarm][i - 1] + x_now[tmp_swarm][i]; + else + probability_now[tmp_swarm][i] = x_now[tmp_swarm][i]; -#undef FLIP_BIT + } -} + if (probability_now[tmp_swarm][operator_num - 1] < 0.99 || + probability_now[tmp_swarm][operator_num - 1] > 1.01) + FATAL("ERROR probability"); + } -void pso_updating(void) { + swarm_now = 0; + key_module = 0; - g_now += 1; - if (g_now > g_max) g_now = 0; - w_now = (w_init - w_end)*(g_max - g_now) / (g_max)+w_end; - int tmp_swarm, i, j; - u64 temp_operator_finds_puppet = 0; - for (i = 0; i < operator_num; ++i) - { - operator_finds_puppet[i] = core_operator_finds_puppet[i]; - - for (j = 0; j < swarm_num; ++j) - { - operator_finds_puppet[i] = operator_finds_puppet[i] + stage_finds_puppet[j][i]; - } - temp_operator_finds_puppet = temp_operator_finds_puppet + operator_finds_puppet[i]; - } - - for (i = 0; i < operator_num; ++i) - { - if (operator_finds_puppet[i]) - G_best[i] = (double)((double)(operator_finds_puppet[i]) / (double)(temp_operator_finds_puppet)); - } - - for (tmp_swarm = 0; tmp_swarm < swarm_num; ++tmp_swarm) - { - double x_temp = 0.0; - for (i = 0; i < operator_num; ++i) - { - probability_now[tmp_swarm][i] = 0.0; - v_now[tmp_swarm][i] = w_now * v_now[tmp_swarm][i] + RAND_C * (L_best[tmp_swarm][i] - x_now[tmp_swarm][i]) + RAND_C * (G_best[i] - x_now[tmp_swarm][i]); - x_now[tmp_swarm][i] += v_now[tmp_swarm][i]; - if (x_now[tmp_swarm][i] > v_max) - x_now[tmp_swarm][i] = v_max; - else if (x_now[tmp_swarm][i] < v_min) - x_now[tmp_swarm][i] = v_min; - x_temp += x_now[tmp_swarm][i]; - } - - for (i = 0; i < operator_num; ++i) - { - x_now[tmp_swarm][i] = x_now[tmp_swarm][i] / x_temp; - if (likely(i != 0)) - probability_now[tmp_swarm][i] = probability_now[tmp_swarm][i - 1] + x_now[tmp_swarm][i]; - else - probability_now[tmp_swarm][i] = x_now[tmp_swarm][i]; - } - if (probability_now[tmp_swarm][operator_num - 1] < 0.99 || probability_now[tmp_swarm][operator_num - 1] > 1.01) FATAL("ERROR probability"); - } - swarm_now = 0; - key_module = 0; } - /* larger change for MOpt implementation: the original fuzz_one was renamed to fuzz_one_original. All documentation references to fuzz_one therefore mean fuzz_one_original */ u8 fuzz_one(char** argv) { - int key_val_lv = 0; - if (limit_time_sig == 0) { - key_val_lv = fuzz_one_original(argv); - } else { - if (key_module == 0) - key_val_lv = pilot_fuzzing(argv); - else if (key_module == 1) - key_val_lv = core_fuzzing(argv); - else if (key_module == 2) - pso_updating(); - } - - return key_val_lv; + + int key_val_lv = 0; + if (limit_time_sig == 0) { + + key_val_lv = fuzz_one_original(argv); + + } else { + + if (key_module == 0) + key_val_lv = pilot_fuzzing(argv); + else if (key_module == 1) + key_val_lv = core_fuzzing(argv); + else if (key_module == 2) + pso_updating(); + + } + + return key_val_lv; + } -- cgit 1.4.1