diff options
Diffstat (limited to 'src')
-rw-r--r-- | src/afl-analyze.c | 5 | ||||
-rw-r--r-- | src/afl-as.c | 4 | ||||
-rw-r--r-- | src/afl-forkserver.c | 6 | ||||
-rw-r--r-- | src/afl-fuzz-extras.c | 8 | ||||
-rw-r--r-- | src/afl-fuzz-init.c | 11 | ||||
-rw-r--r-- | src/afl-fuzz-one.c | 99 | ||||
-rw-r--r-- | src/afl-fuzz-python.c | 7 | ||||
-rw-r--r-- | src/afl-fuzz-redqueen.c | 14 | ||||
-rw-r--r-- | src/afl-fuzz-run.c | 4 | ||||
-rw-r--r-- | src/afl-fuzz-stats.c | 6 | ||||
-rw-r--r-- | src/afl-fuzz.c | 6 | ||||
-rw-r--r-- | src/afl-showmap.c | 6 | ||||
-rw-r--r-- | src/afl-tmin.c | 17 |
13 files changed, 107 insertions, 86 deletions
diff --git a/src/afl-analyze.c b/src/afl-analyze.c index e6dd0fca..7c1c269a 100644 --- a/src/afl-analyze.c +++ b/src/afl-analyze.c @@ -384,7 +384,7 @@ static void show_legend(void) { /* Interpret and report a pattern in the input file. */ -static void dump_hex(u8 *buf, u32 len, u8 *b_data) { +static void dump_hex(u32 len, u8 *b_data) { u32 i; @@ -678,7 +678,7 @@ static void analyze(char **argv) { } - dump_hex(in_data, in_len, b_data); + dump_hex(in_len, b_data); SAYF("\n"); @@ -700,6 +700,7 @@ static void analyze(char **argv) { static void handle_stop_sig(int sig) { + (void)sig; stop_soon = 1; if (child_pid > 0) { kill(child_pid, SIGKILL); } diff --git a/src/afl-as.c b/src/afl-as.c index f16d6060..4ba4cc71 100644 --- a/src/afl-as.c +++ b/src/afl-as.c @@ -136,7 +136,7 @@ static void edit_params(int argc, char **argv) { as_params[argc] = 0; - for (i = 1; i < argc - 1; i++) { + for (i = 1; (s32)i < argc - 1; i++) { if (!strcmp(argv[i], "--64")) { @@ -591,7 +591,7 @@ int main(int argc, char **argv) { rand_seed = tv.tv_sec ^ tv.tv_usec ^ getpid(); // in fast systems where pids can repeat in the same seconds we need this - for (i = 1; i < argc; i++) + for (i = 1; (s32)i < argc; i++) for (j = 0; j < strlen(argv[i]); j++) rand_seed += argv[i][j]; diff --git a/src/afl-forkserver.c b/src/afl-forkserver.c index 47493eba..15935ab0 100644 --- a/src/afl-forkserver.c +++ b/src/afl-forkserver.c @@ -145,6 +145,10 @@ restart_select: if (likely(sret > 0)) { restart_read: + if (*stop_soon_p) { + // Early return - the user wants to quit. + return 0; + } len_read = read(fd, (u8 *)buf, 4); if (likely(len_read == 4)) { // for speed we put this first @@ -691,7 +695,7 @@ void afl_fsrv_start(afl_forkserver_t *fsrv, char **argv, } offset = 0; - while (offset < status && (u8)dict[offset] + offset < status) { + while (offset < (u32)status && (u8)dict[offset] + offset < (u32)status) { fsrv->function_ptr(fsrv->function_opt, dict + offset + 1, (u8)dict[offset]); diff --git a/src/afl-fuzz-extras.c b/src/afl-fuzz-extras.c index 12771cd7..097871c8 100644 --- a/src/afl-fuzz-extras.c +++ b/src/afl-fuzz-extras.c @@ -115,7 +115,7 @@ void load_extras_file(afl_state_t *afl, u8 *fname, u32 *min_len, u32 *max_len, if (*lptr == '@') { ++lptr; - if (atoi(lptr) > dict_level) { continue; } + if (atoi(lptr) > (s32)dict_level) { continue; } while (isdigit(*lptr)) { ++lptr; @@ -402,7 +402,7 @@ void maybe_add_auto(void *afl_tmp, u8 *mem, u32 len) { while (i--) { - if (*((u32 *)mem) == interesting_32[i] || + if (*((u32 *)mem) == (u32)interesting_32[i] || *((u32 *)mem) == SWAP32(interesting_32[i])) { return; @@ -480,7 +480,7 @@ sort_a_extras: /* Then, sort the top USE_AUTO_EXTRAS entries by size. */ - qsort(afl->a_extras, MIN(USE_AUTO_EXTRAS, afl->a_extras_cnt), + qsort(afl->a_extras, MIN((u32)USE_AUTO_EXTRAS, afl->a_extras_cnt), sizeof(struct extra_data), compare_extras_len); } @@ -494,7 +494,7 @@ void save_auto(afl_state_t *afl) { if (!afl->auto_changed) { return; } afl->auto_changed = 0; - for (i = 0; i < MIN(USE_AUTO_EXTRAS, afl->a_extras_cnt); ++i) { + for (i = 0; i < MIN((u32)USE_AUTO_EXTRAS, afl->a_extras_cnt); ++i) { u8 *fn = alloc_printf("%s/queue/.state/auto_extras/auto_%06u", afl->out_dir, i); diff --git a/src/afl-fuzz-init.c b/src/afl-fuzz-init.c index 2c17ffbb..1abdcede 100644 --- a/src/afl-fuzz-init.c +++ b/src/afl-fuzz-init.c @@ -110,7 +110,7 @@ void bind_to_free_cpu(afl_state_t *afl) { u8 cpu_used[4096] = {0}; u8 lockfile[PATH_MAX] = ""; - u32 i; + s32 i; if (afl->afl_env.afl_no_affinity) { @@ -509,7 +509,7 @@ void read_foreign_testcases(afl_state_t *afl, int first) { afl->stage_cur = 0; afl->stage_max = 0; - for (i = 0; i < nl_cnt; ++i) { + for (i = 0; i < (u32)nl_cnt; ++i) { struct stat st; @@ -667,7 +667,7 @@ void read_testcases(afl_state_t *afl) { } - for (i = 0; i < nl_cnt; ++i) { + for (i = 0; i < (u32)nl_cnt; ++i) { struct stat st; @@ -2147,7 +2147,7 @@ void get_core_count(afl_state_t *afl) { WARNF("System under apparent load, performance may be spotty."); - } else if (cur_runnable + 1 <= afl->cpu_core_count) { + } else if ((s64)cur_runnable + 1 <= (s64)afl->cpu_core_count) { OKF("Try parallel jobs - see %s/parallel_fuzzing.md.", doc_path); @@ -2201,6 +2201,7 @@ void fix_up_sync(afl_state_t *afl) { static void handle_resize(int sig) { + (void)sig; afl_states_clear_screen(); } @@ -2252,6 +2253,7 @@ void check_asan_opts(void) { static void handle_stop_sig(int sig) { + (void)sig; afl_states_stop(); } @@ -2260,6 +2262,7 @@ static void handle_stop_sig(int sig) { static void handle_skipreq(int sig) { + (void)sig; afl_states_request_skip(); } diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index 1f0bf30e..9d09f6af 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -77,7 +77,7 @@ static int select_algorithm(afl_state_t *afl) { static u32 choose_block_len(afl_state_t *afl, u32 limit) { u32 min_value, max_value; - u32 rlim = MIN(afl->queue_cycle, 3); + u32 rlim = MIN(afl->queue_cycle, (u32)3); if (unlikely(!afl->run_over10m)) { rlim = 1; } @@ -292,7 +292,7 @@ static u8 could_be_interest(u32 old_val, u32 new_val, u8 blen, u8 check_le) { /* See if two-byte insertions over old_val could give us new_val. */ - for (i = 0; i < blen - 1; ++i) { + for (i = 0; (s32)i < blen - 1; ++i) { for (j = 0; j < sizeof(interesting_16) / 2; ++j) { @@ -372,7 +372,9 @@ static void locate_diffs(u8 *ptr1, u8 *ptr2, u32 len, s32 *first, s32 *last) { u8 fuzz_one_original(afl_state_t *afl) { - s32 len, fd, temp_len, i, j; + s32 len, fd, temp_len; + u32 j; + u32 i; u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0; u64 havoc_queued = 0, orig_hit_cnt, new_hit_cnt = 0, prev_cksum; u32 splice_cycle = 0, perf_score = 100, orig_perf, eff_cnt = 1; @@ -862,7 +864,7 @@ u8 fuzz_one_original(afl_state_t *afl) { whole thing as worth fuzzing, since we wouldn't be saving much time anyway. */ - if (eff_cnt != EFF_ALEN(len) && + if (eff_cnt != (u32)EFF_ALEN(len) && eff_cnt * 100 / EFF_ALEN(len) > EFF_MAX_PERC) { memset(eff_map, 1, EFF_ALEN(len)); @@ -893,7 +895,7 @@ u8 fuzz_one_original(afl_state_t *afl) { orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len - 1; ++i) { + for (i = 0; (s32)i < len - 1; ++i) { /* Let's consult the effector map... */ @@ -931,7 +933,7 @@ u8 fuzz_one_original(afl_state_t *afl) { orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len - 3; ++i) { + for (i = 0; (s32)i < len - 3; ++i) { /* Let's consult the effector map... */ if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && @@ -977,7 +979,7 @@ skip_bitflip: orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len; ++i) { + for (i = 0; i < (u32)len; ++i) { u8 orig = out_buf[i]; @@ -1051,7 +1053,7 @@ skip_bitflip: orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len - 1; ++i) { + for (i = 0; i < (u32)len - 1; ++i) { u16 orig = *(u16 *)(out_buf + i); @@ -1161,7 +1163,7 @@ skip_bitflip: orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len - 3; ++i) { + for (i = 0; i < (u32)len - 3; ++i) { u32 orig = *(u32 *)(out_buf + i); @@ -1202,7 +1204,7 @@ skip_bitflip: } - if ((orig & 0xffff) < j && !could_be_bitflip(r2)) { + if ((orig & 0xffff) < (u32)j && !could_be_bitflip(r2)) { afl->stage_cur_val = -j; *(u32 *)(out_buf + i) = orig - j; @@ -1234,7 +1236,7 @@ skip_bitflip: } - if ((SWAP32(orig) & 0xffff) < j && !could_be_bitflip(r4)) { + if ((SWAP32(orig) & 0xffff) < (u32)j && !could_be_bitflip(r4)) { afl->stage_cur_val = -j; *(u32 *)(out_buf + i) = SWAP32(SWAP32(orig) - j); @@ -1276,7 +1278,7 @@ skip_arith: /* Setting 8-bit integers. */ - for (i = 0; i < len; ++i) { + for (i = 0; i < (u32)len; ++i) { u8 orig = out_buf[i]; @@ -1291,7 +1293,7 @@ skip_arith: afl->stage_cur_byte = i; - for (j = 0; j < sizeof(interesting_8); ++j) { + for (j = 0; j < (u32)sizeof(interesting_8); ++j) { /* Skip if the value could be a product of bitflips or arithmetics. */ @@ -1331,7 +1333,7 @@ skip_arith: orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len - 1; ++i) { + for (i = 0; (s32)i < len - 1; ++i) { u16 orig = *(u16 *)(out_buf + i); @@ -1409,7 +1411,7 @@ skip_arith: orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len - 3; i++) { + for (i = 0; (s32)i < len - 3; i++) { u32 orig = *(u32 *)(out_buf + i); @@ -1496,7 +1498,7 @@ skip_interest: orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len; ++i) { + for (i = 0; i < (u32)len; ++i) { u32 last_len = 0; @@ -1556,7 +1558,7 @@ skip_interest: ex_tmp = ck_maybe_grow(BUF_PARAMS(ex), len + MAX_DICT_FILE); - for (i = 0; i <= len; ++i) { + for (i = 0; i <= (u32)len; ++i) { afl->stage_cur_byte = i; @@ -1602,19 +1604,20 @@ skip_user_extras: afl->stage_name = "auto extras (over)"; afl->stage_short = "ext_AO"; afl->stage_cur = 0; - afl->stage_max = MIN(afl->a_extras_cnt, USE_AUTO_EXTRAS) * len; + afl->stage_max = MIN(afl->a_extras_cnt, (u32)USE_AUTO_EXTRAS) * len; afl->stage_val_type = STAGE_VAL_NONE; orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len; ++i) { + for (i = 0; i < (u32)len; ++i) { u32 last_len = 0; afl->stage_cur_byte = i; - for (j = 0; j < MIN(afl->a_extras_cnt, USE_AUTO_EXTRAS); ++j) { + u32 min_extra_len = MIN(afl->a_extras_cnt, (u32)USE_AUTO_EXTRAS); + for (j = 0; j < min_extra_len; ++j) { /* See the comment in the earlier code; extras are sorted by size. */ @@ -2231,7 +2234,7 @@ havoc_stage: u32 extra_len = afl->a_extras[use_extra].len; u32 insert_at; - if (extra_len > temp_len) { break; } + if ((s32)extra_len > temp_len) { break; } insert_at = rand_below(afl, temp_len - extra_len + 1); memcpy(out_buf + insert_at, afl->a_extras[use_extra].data, @@ -2245,7 +2248,7 @@ havoc_stage: u32 extra_len = afl->extras[use_extra].len; u32 insert_at; - if (extra_len > temp_len) { break; } + if ((s32)extra_len > temp_len) { break; } insert_at = rand_below(afl, temp_len - extra_len + 1); memcpy(out_buf + insert_at, afl->extras[use_extra].data, @@ -2360,7 +2363,7 @@ havoc_stage: u32 copy_from, copy_to, copy_len; copy_len = choose_block_len(afl, new_len - 1); - if (copy_len > temp_len) copy_len = temp_len; + if ((s32)copy_len > temp_len) copy_len = temp_len; copy_from = rand_below(afl, new_len - copy_len + 1); copy_to = rand_below(afl, temp_len - copy_len + 1); @@ -2517,7 +2520,7 @@ retry_splicing: the last differing byte. Bail out if the difference is just a single byte or so. */ - locate_diffs(in_buf, new_buf, MIN(len, target->len), &f_diff, &l_diff); + locate_diffs(in_buf, new_buf, MIN(len, (s64)target->len), &f_diff, &l_diff); if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) { goto retry_splicing; } @@ -2587,7 +2590,9 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { } - s32 len, fd, temp_len, i, j; + s32 len, fd, temp_len; + u32 i; + u32 j; u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0; u64 havoc_queued = 0, orig_hit_cnt, new_hit_cnt = 0, cur_ms_lv, prev_cksum; u32 splice_cycle = 0, perf_score = 100, orig_perf, eff_cnt = 1; @@ -2761,9 +2766,9 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { cur_ms_lv = get_cur_time(); if (!(afl->key_puppet == 0 && - ((cur_ms_lv - afl->last_path_time < afl->limit_time_puppet) || + ((cur_ms_lv - afl->last_path_time < (u32)afl->limit_time_puppet) || (afl->last_crash_time != 0 && - cur_ms_lv - afl->last_crash_time < afl->limit_time_puppet) || + cur_ms_lv - afl->last_crash_time < (u32)afl->limit_time_puppet) || afl->last_path_time == 0))) { afl->key_puppet = 1; @@ -3058,7 +3063,7 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { whole thing as worth fuzzing, since we wouldn't be saving much time anyway. */ - if (eff_cnt != EFF_ALEN(len) && + if (eff_cnt != (u32)EFF_ALEN(len) && eff_cnt * 100 / EFF_ALEN(len) > EFF_MAX_PERC) { memset(eff_map, 1, EFF_ALEN(len)); @@ -3089,7 +3094,7 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len - 1; ++i) { + for (i = 0; (s32)i < len - 1; ++i) { /* Let's consult the effector map... */ @@ -3127,7 +3132,7 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len - 3; ++i) { + for (i = 0; (s32)i < len - 3; ++i) { /* Let's consult the effector map... */ if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && @@ -3173,7 +3178,7 @@ skip_bitflip: orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len; ++i) { + for (i = 0; i < (u32)len; ++i) { u8 orig = out_buf[i]; @@ -3247,7 +3252,7 @@ skip_bitflip: orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len - 1; ++i) { + for (i = 0; (s32)i < len - 1; ++i) { u16 orig = *(u16 *)(out_buf + i); @@ -3357,7 +3362,7 @@ skip_bitflip: orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len - 3; ++i) { + for (i = 0; (s32)i < len - 3; ++i) { u32 orig = *(u32 *)(out_buf + i); @@ -3472,7 +3477,7 @@ skip_arith: /* Setting 8-bit integers. */ - for (i = 0; i < len; ++i) { + for (i = 0; i < (u32)len; ++i) { u8 orig = out_buf[i]; @@ -3527,7 +3532,7 @@ skip_arith: orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len - 1; ++i) { + for (i = 0; (s32)i < len - 1; ++i) { u16 orig = *(u16 *)(out_buf + i); @@ -3605,7 +3610,7 @@ skip_arith: orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len - 3; ++i) { + for (i = 0; (s32)i < len - 3; ++i) { u32 orig = *(u32 *)(out_buf + i); @@ -3692,7 +3697,7 @@ skip_interest: orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len; ++i) { + for (i = 0; i < (u32)len; ++i) { u32 last_len = 0; @@ -3752,7 +3757,7 @@ skip_interest: ex_tmp = ck_maybe_grow(BUF_PARAMS(ex), len + MAX_DICT_FILE); - for (i = 0; i <= len; ++i) { + for (i = 0; i <= (u32)len; ++i) { afl->stage_cur_byte = i; @@ -3798,23 +3803,23 @@ skip_user_extras: afl->stage_name = "auto extras (over)"; afl->stage_short = "ext_AO"; afl->stage_cur = 0; - afl->stage_max = MIN(afl->a_extras_cnt, USE_AUTO_EXTRAS) * len; + afl->stage_max = MIN(afl->a_extras_cnt, (u32)USE_AUTO_EXTRAS) * len; afl->stage_val_type = STAGE_VAL_NONE; orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len; ++i) { + for (i = 0; i < (u32)len; ++i) { u32 last_len = 0; afl->stage_cur_byte = i; - for (j = 0; j < MIN(afl->a_extras_cnt, USE_AUTO_EXTRAS); ++j) { + for (j = 0; j < MIN(afl->a_extras_cnt, (u32)USE_AUTO_EXTRAS); ++j) { /* See the comment in the earlier code; extras are sorted by size. */ - if (afl->a_extras[j].len > len - i || + if ((s32)(afl->a_extras[j].len) > (s32)(len - i) || !memcmp(afl->a_extras[j].data, out_buf + i, afl->a_extras[j].len) || !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, afl->a_extras[j].len))) { @@ -4276,7 +4281,7 @@ pacemaker_fuzzing: u32 use_extra = rand_below(afl, afl->a_extras_cnt); u32 extra_len = afl->a_extras[use_extra].len; - if (extra_len > temp_len) break; + if (extra_len > (u32)temp_len) break; u32 insert_at = rand_below(afl, temp_len - extra_len + 1); memcpy(out_buf + insert_at, afl->a_extras[use_extra].data, @@ -4289,7 +4294,7 @@ pacemaker_fuzzing: u32 use_extra = rand_below(afl, afl->extras_cnt); u32 extra_len = afl->extras[use_extra].len; - if (extra_len > temp_len) break; + if (extra_len > (u32)temp_len) break; u32 insert_at = rand_below(afl, temp_len - extra_len + 1); memcpy(out_buf + insert_at, afl->extras[use_extra].data, @@ -4449,7 +4454,7 @@ pacemaker_fuzzing: retry_splicing_puppet: - if (afl->use_splicing && splice_cycle++ < afl->SPLICE_CYCLES_puppet && + if (afl->use_splicing && splice_cycle++ < (u32)afl->SPLICE_CYCLES_puppet && afl->queued_paths > 1 && afl->queue_cur->len > 1) { struct queue_entry *target; @@ -4519,7 +4524,7 @@ pacemaker_fuzzing: the last differing byte. Bail out if the difference is just a single byte or so. */ - locate_diffs(in_buf, new_buf, MIN(len, target->len), &f_diff, &l_diff); + locate_diffs(in_buf, new_buf, MIN(len, (s32)target->len), &f_diff, &l_diff); if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) { @@ -4551,7 +4556,7 @@ pacemaker_fuzzing: abandon_entry: abandon_entry_puppet: - if (splice_cycle >= afl->SPLICE_CYCLES_puppet) { + if ((s64)splice_cycle >= afl->SPLICE_CYCLES_puppet) { afl->SPLICE_CYCLES_puppet = (rand_below( diff --git a/src/afl-fuzz-python.c b/src/afl-fuzz-python.c index 2044c97d..a077469e 100644 --- a/src/afl-fuzz-python.c +++ b/src/afl-fuzz-python.c @@ -30,6 +30,9 @@ static void *unsupported(afl_state_t *afl, unsigned int seed) { + (void)afl; + (void)seed; + FATAL("Python Mutator cannot be called twice yet"); return NULL; @@ -111,6 +114,8 @@ static size_t fuzz_py(void *py_mutator, u8 *buf, size_t buf_size, u8 **out_buf, static py_mutator_t *init_py_module(afl_state_t *afl, u8 *module_name) { + (void)afl; + if (!module_name) { return NULL; } py_mutator_t *py = calloc(1, sizeof(py_mutator_t)); @@ -247,6 +252,8 @@ void finalize_py_module(void *py_mutator) { static void init_py(afl_state_t *afl, py_mutator_t *py_mutator, unsigned int seed) { + (void)afl; + PyObject *py_args, *py_value; /* Provide the init function a seed for the Python RNG */ diff --git a/src/afl-fuzz-redqueen.c b/src/afl-fuzz-redqueen.c index cb4c78df..1cfe8802 100644 --- a/src/afl-fuzz-redqueen.c +++ b/src/afl-fuzz-redqueen.c @@ -352,7 +352,7 @@ static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header *h, } - if (use_num && num == pattern) { + if (use_num && (u64) num == pattern) { size_t old_len = endptr - buf_8; size_t num_len = snprintf(NULL, 0, "%lld", num); @@ -659,12 +659,12 @@ static u8 cmp_fuzz(afl_state_t *afl, u32 key, u8 *orig_buf, u8 *buf, u32 len) { } -static u8 rtn_extend_encoding(afl_state_t *afl, struct cmp_header *h, +static u8 rtn_extend_encoding(afl_state_t *afl, u8 *pattern, u8 *repl, u8 *o_pattern, u32 idx, u8 *orig_buf, u8 *buf, u32 len, u8 *status) { u32 i; - u32 its_len = MIN(32, len - idx); + u32 its_len = MIN((u32)32, len - idx); u8 save[32]; memcpy(save, &buf[idx], its_len); @@ -728,7 +728,7 @@ static u8 rtn_fuzz(afl_state_t *afl, u32 key, u8 *orig_buf, u8 *buf, u32 len) { for (idx = 0; idx < len && fails < 8; ++idx) { - if (unlikely(rtn_extend_encoding(afl, h, o->v0, o->v1, orig_o->v0, idx, + if (unlikely(rtn_extend_encoding(afl, o->v0, o->v1, orig_o->v0, idx, orig_buf, buf, len, &status))) { return 1; @@ -745,7 +745,7 @@ static u8 rtn_fuzz(afl_state_t *afl, u32 key, u8 *orig_buf, u8 *buf, u32 len) { } - if (unlikely(rtn_extend_encoding(afl, h, o->v1, o->v0, orig_o->v1, idx, + if (unlikely(rtn_extend_encoding(afl, o->v1, o->v0, orig_o->v1, idx, orig_buf, buf, len, &status))) { return 1; @@ -853,12 +853,12 @@ u8 input_to_state_stage(afl_state_t *afl, u8 *orig_buf, u8 *buf, u32 len, if (afl->shm.cmp_map->headers[k].type == CMP_TYPE_INS) { - afl->stage_max += MIN((u32)afl->shm.cmp_map->headers[k].hits, CMP_MAP_H); + afl->stage_max += MIN((u32)(afl->shm.cmp_map->headers[k].hits), (u32)CMP_MAP_H); } else { afl->stage_max += - MIN((u32)afl->shm.cmp_map->headers[k].hits, CMP_MAP_RTN_H); + MIN((u32)(afl->shm.cmp_map->headers[k].hits), (u32)CMP_MAP_RTN_H); } diff --git a/src/afl-fuzz-run.c b/src/afl-fuzz-run.c index ed4a1081..8d652155 100644 --- a/src/afl-fuzz-run.c +++ b/src/afl-fuzz-run.c @@ -733,12 +733,12 @@ u8 trim_case(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) { len_p2 = next_pow2(q->len); - remove_len = MAX(len_p2 / TRIM_START_STEPS, TRIM_MIN_BYTES); + remove_len = MAX(len_p2 / TRIM_START_STEPS, (u32)TRIM_MIN_BYTES); /* Continue until the number of steps gets too high or the stepover gets too small. */ - while (remove_len >= MAX(len_p2 / TRIM_END_STEPS, TRIM_MIN_BYTES)) { + while (remove_len >= MAX(len_p2 / TRIM_END_STEPS, (u32)TRIM_MIN_BYTES)) { u32 remove_pos = remove_len; diff --git a/src/afl-fuzz-stats.c b/src/afl-fuzz-stats.c index 7b30b5ea..aeb290bd 100644 --- a/src/afl-fuzz-stats.c +++ b/src/afl-fuzz-stats.c @@ -890,12 +890,12 @@ void show_stats(afl_state_t *afl) { if (afl->cpu_aff >= 0) { SAYF("%s" cGRA "[cpu%03u:%s%3u%%" cGRA "]\r" cRST, spacing, - MIN(afl->cpu_aff, 999), cpu_color, MIN(cur_utilization, 999)); + MIN(afl->cpu_aff, 999), cpu_color, MIN(cur_utilization, (u32)999)); } else { SAYF("%s" cGRA " [cpu:%s%3u%%" cGRA "]\r" cRST, spacing, cpu_color, - MIN(cur_utilization, 999)); + MIN(cur_utilization, (u32)999)); } @@ -1081,7 +1081,7 @@ void show_init_stats(afl_state_t *afl) { if (afl->non_instrumented_mode && !(afl->afl_env.afl_hang_tmout)) { - afl->hang_tmout = MIN(EXEC_TIMEOUT, afl->fsrv.exec_tmout * 2 + 100); + afl->hang_tmout = MIN((u32)EXEC_TIMEOUT, afl->fsrv.exec_tmout * 2 + 100); } diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c index c73a76c6..031c4049 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -79,7 +79,7 @@ static void at_exit() { /* Display usage hints. */ -static void usage(afl_state_t *afl, u8 *argv0, int more_help) { +static void usage(u8 *argv0, int more_help) { SAYF( "\n%s [ options ] -- /path/to/fuzzed_app [ ... ]\n\n" @@ -677,7 +677,7 @@ int main(int argc, char **argv_orig, char **envp) { u64 limit_time_puppet2 = afl->limit_time_puppet * 60 * 1000; - if (limit_time_puppet2 < afl->limit_time_puppet) { + if ((s32)limit_time_puppet2 < afl->limit_time_puppet) { FATAL("limit_time overflow"); @@ -811,7 +811,7 @@ int main(int argc, char **argv_orig, char **envp) { if (optind == argc || !afl->in_dir || !afl->out_dir || show_help) { - usage(afl, argv[0], show_help); + usage(argv[0], show_help); } diff --git a/src/afl-showmap.c b/src/afl-showmap.c index 71e975a1..4962006e 100644 --- a/src/afl-showmap.c +++ b/src/afl-showmap.c @@ -256,8 +256,7 @@ static u32 write_results_to_file(afl_forkserver_t *fsrv, u8 *outfile) { /* Execute target application. */ -static void showmap_run_target_forkserver(afl_forkserver_t *fsrv, char **argv, - u8 *mem, u32 len) { +static void showmap_run_target_forkserver(afl_forkserver_t *fsrv, u8 *mem, u32 len) { afl_fsrv_write_to_testcase(fsrv, mem, len); @@ -444,6 +443,7 @@ static void showmap_run_target(afl_forkserver_t *fsrv, char **argv) { static void handle_stop_sig(int sig) { + (void)sig; stop_soon = 1; afl_fsrv_killall(); @@ -1016,7 +1016,7 @@ int main(int argc, char **argv_orig, char **envp) { } - showmap_run_target_forkserver(fsrv, use_argv, in_data, in_len); + showmap_run_target_forkserver(fsrv, in_data, in_len); ck_free(in_data); tcnt = write_results_to_file(fsrv, outfile); diff --git a/src/afl-tmin.c b/src/afl-tmin.c index 68fcdd14..b50d8597 100644 --- a/src/afl-tmin.c +++ b/src/afl-tmin.c @@ -250,7 +250,7 @@ static s32 write_to_file(u8 *path, u8 *mem, u32 len) { /* Execute target application. Returns 0 if the changes are a dud, or 1 if they should be kept. */ -static u8 tmin_run_target(afl_forkserver_t *fsrv, char **argv, u8 *mem, u32 len, +static u8 tmin_run_target(afl_forkserver_t *fsrv, u8 *mem, u32 len, u8 first_run) { afl_fsrv_write_to_testcase(fsrv, mem, len); @@ -342,7 +342,7 @@ static u8 tmin_run_target(afl_forkserver_t *fsrv, char **argv, u8 *mem, u32 len, /* Actually minimize! */ -static void minimize(afl_forkserver_t *fsrv, char **argv) { +static void minimize(afl_forkserver_t *fsrv) { static u32 alpha_map[256]; @@ -380,7 +380,7 @@ static void minimize(afl_forkserver_t *fsrv, char **argv) { memset(tmp_buf + set_pos, '0', use_len); u8 res; - res = tmin_run_target(fsrv, argv, tmp_buf, in_len, 0); + res = tmin_run_target(fsrv, tmp_buf, in_len, 0); if (res) { @@ -453,7 +453,7 @@ next_del_blksize: /* Tail */ memcpy(tmp_buf + del_pos, in_data + del_pos + del_len, tail_len); - res = tmin_run_target(fsrv, argv, tmp_buf, del_pos + tail_len, 0); + res = tmin_run_target(fsrv, tmp_buf, del_pos + tail_len, 0); if (res) { @@ -524,7 +524,7 @@ next_del_blksize: } - res = tmin_run_target(fsrv, argv, tmp_buf, in_len, 0); + res = tmin_run_target(fsrv, tmp_buf, in_len, 0); if (res) { @@ -560,7 +560,7 @@ next_del_blksize: if (orig == '0') { continue; } tmp_buf[i] = '0'; - res = tmin_run_target(fsrv, argv, tmp_buf, in_len, 0); + res = tmin_run_target(fsrv, tmp_buf, in_len, 0); if (res) { @@ -623,6 +623,7 @@ finalize_all: static void handle_stop_sig(int sig) { + (void)sig; stop_soon = 1; afl_fsrv_killall(); @@ -1131,7 +1132,7 @@ int main(int argc, char **argv_orig, char **envp) { ACTF("Performing dry run (mem limit = %llu MB, timeout = %u ms%s)...", fsrv->mem_limit, fsrv->exec_tmout, edges_only ? ", edges only" : ""); - tmin_run_target(fsrv, use_argv, in_data, in_len, 1); + tmin_run_target(fsrv, in_data, in_len, 1); if (hang_mode && !fsrv->last_run_timed_out) { @@ -1169,7 +1170,7 @@ int main(int argc, char **argv_orig, char **envp) { } - minimize(fsrv, use_argv); + minimize(fsrv); ACTF("Writing output to '%s'...", output_file); |