diff options
57 files changed, 8578 insertions, 7029 deletions
diff --git a/Makefile b/Makefile index 6eb6f871..edf3d99b 100644 --- a/Makefile +++ b/Makefile @@ -159,8 +159,8 @@ afl-gotcpu: src/afl-gotcpu.c $(COMM_HDR) | test_x86 code-format: - ./.custom-format.py -i src/* - ./.custom-format.py -i include/* + ./.custom-format.py -i src/*.c + ./.custom-format.py -i include/*.h ./.custom-format.py -i libdislocator/*.c ./.custom-format.py -i libtokencap/*.c ./.custom-format.py -i llvm_mode/*.c diff --git a/include/afl-as.h b/include/afl-as.h index 4748eda7..4f8fb640 100644 --- a/include/afl-as.h +++ b/include/afl-as.h @@ -37,7 +37,7 @@ #include "config.h" #include "types.h" -/* +/* ------------------ Performances notes ------------------ @@ -106,47 +106,47 @@ static const u8* trampoline_fmt_32 = - "\n" - "/* --- AFL TRAMPOLINE (32-BIT) --- */\n" - "\n" - ".align 4\n" - "\n" - "leal -16(%%esp), %%esp\n" - "movl %%edi, 0(%%esp)\n" - "movl %%edx, 4(%%esp)\n" - "movl %%ecx, 8(%%esp)\n" - "movl %%eax, 12(%%esp)\n" - "movl $0x%08x, %%ecx\n" - "call __afl_maybe_log\n" - "movl 12(%%esp), %%eax\n" - "movl 8(%%esp), %%ecx\n" - "movl 4(%%esp), %%edx\n" - "movl 0(%%esp), %%edi\n" - "leal 16(%%esp), %%esp\n" - "\n" - "/* --- END --- */\n" - "\n"; + "\n" + "/* --- AFL TRAMPOLINE (32-BIT) --- */\n" + "\n" + ".align 4\n" + "\n" + "leal -16(%%esp), %%esp\n" + "movl %%edi, 0(%%esp)\n" + "movl %%edx, 4(%%esp)\n" + "movl %%ecx, 8(%%esp)\n" + "movl %%eax, 12(%%esp)\n" + "movl $0x%08x, %%ecx\n" + "call __afl_maybe_log\n" + "movl 12(%%esp), %%eax\n" + "movl 8(%%esp), %%ecx\n" + "movl 4(%%esp), %%edx\n" + "movl 0(%%esp), %%edi\n" + "leal 16(%%esp), %%esp\n" + "\n" + "/* --- END --- */\n" + "\n"; static const u8* trampoline_fmt_64 = - "\n" - "/* --- AFL TRAMPOLINE (64-BIT) --- */\n" - "\n" - ".align 4\n" - "\n" - "leaq -(128+24)(%%rsp), %%rsp\n" - "movq %%rdx, 0(%%rsp)\n" - "movq %%rcx, 8(%%rsp)\n" - "movq %%rax, 16(%%rsp)\n" - "movq $0x%08x, %%rcx\n" - "call __afl_maybe_log\n" - "movq 16(%%rsp), %%rax\n" - "movq 8(%%rsp), %%rcx\n" - "movq 0(%%rsp), %%rdx\n" - "leaq (128+24)(%%rsp), %%rsp\n" - "\n" - "/* --- END --- */\n" - "\n"; + "\n" + "/* --- AFL TRAMPOLINE (64-BIT) --- */\n" + "\n" + ".align 4\n" + "\n" + "leaq -(128+24)(%%rsp), %%rsp\n" + "movq %%rdx, 0(%%rsp)\n" + "movq %%rcx, 8(%%rsp)\n" + "movq %%rax, 16(%%rsp)\n" + "movq $0x%08x, %%rcx\n" + "call __afl_maybe_log\n" + "movq 16(%%rsp), %%rax\n" + "movq 8(%%rsp), %%rcx\n" + "movq 0(%%rsp), %%rdx\n" + "leaq (128+24)(%%rsp), %%rsp\n" + "\n" + "/* --- END --- */\n" + "\n"; static const u8* main_payload_32 = @@ -398,9 +398,9 @@ static const u8* main_payload_32 = recognize .string. */ #ifdef __APPLE__ -# define CALL_L64(str) "call _" str "\n" +# define CALL_L64(str) "call _" str "\n" #else -# define CALL_L64(str) "call " str "@PLT\n" +# define CALL_L64(str) "call " str "@PLT\n" #endif /* ^__APPLE__ */ static const u8* main_payload_64 = @@ -415,7 +415,7 @@ static const u8* main_payload_64 = "\n" "__afl_maybe_log:\n" "\n" -#if defined(__OpenBSD__) || (defined(__FreeBSD__) && (__FreeBSD__ < 9)) +#if defined(__OpenBSD__) || (defined(__FreeBSD__) && (__FreeBSD__ < 9)) " .byte 0x9f /* lahf */\n" #else " lahf\n" @@ -448,7 +448,7 @@ static const u8* main_payload_64 = "__afl_return:\n" "\n" " addb $127, %al\n" -#if defined(__OpenBSD__) || (defined(__FreeBSD__) && (__FreeBSD__ < 9)) +#if defined(__OpenBSD__) || (defined(__FreeBSD__) && (__FreeBSD__ < 9)) " .byte 0x9e /* sahf */\n" #else " sahf\n" @@ -737,9 +737,9 @@ static const u8* main_payload_64 = #ifdef __APPLE__ " .comm __afl_area_ptr, 8\n" -#ifndef COVERAGE_ONLY +# ifndef COVERAGE_ONLY " .comm __afl_prev_loc, 8\n" -#endif /* !COVERAGE_ONLY */ +# endif /* !COVERAGE_ONLY */ " .comm __afl_fork_pid, 4\n" " .comm __afl_temp, 4\n" " .comm __afl_setup_failure, 1\n" @@ -747,9 +747,9 @@ static const u8* main_payload_64 = #else " .lcomm __afl_area_ptr, 8\n" -#ifndef COVERAGE_ONLY +# ifndef COVERAGE_ONLY " .lcomm __afl_prev_loc, 8\n" -#endif /* !COVERAGE_ONLY */ +# endif /* !COVERAGE_ONLY */ " .lcomm __afl_fork_pid, 4\n" " .lcomm __afl_temp, 4\n" " .lcomm __afl_setup_failure, 1\n" @@ -765,3 +765,4 @@ static const u8* main_payload_64 = "\n"; #endif /* !_HAVE_AFL_AS_H */ + diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h index ca22ef75..3e121851 100644 --- a/include/afl-fuzz.h +++ b/include/afl-fuzz.h @@ -27,12 +27,12 @@ #define MESSAGES_TO_STDOUT #ifndef _GNU_SOURCE -#define _GNU_SOURCE +# define _GNU_SOURCE #endif #define _FILE_OFFSET_BITS 64 #ifdef __ANDROID__ - #include "android-ashmem.h" +# include "android-ashmem.h" #endif #include "config.h" @@ -68,7 +68,7 @@ #include <sys/ioctl.h> #include <sys/file.h> -#if defined(__APPLE__) || defined(__FreeBSD__) || defined (__OpenBSD__) +#if defined(__APPLE__) || defined(__FreeBSD__) || defined(__OpenBSD__) # include <sys/sysctl.h> # define HAVE_ARC4RANDOM 1 #endif /* __APPLE__ || __FreeBSD__ || __OpenBSD__ */ @@ -88,45 +88,47 @@ struct queue_entry { - u8* fname; /* File name for the test case */ - u32 len; /* Input length */ + u8* fname; /* File name for the test case */ + u32 len; /* Input length */ - u8 cal_failed, /* Calibration failed? */ - trim_done, /* Trimmed? */ - was_fuzzed, /* historical, but needed for MOpt */ - passed_det, /* Deterministic stages passed? */ - has_new_cov, /* Triggers new coverage? */ - var_behavior, /* Variable behavior? */ - favored, /* Currently favored? */ - fs_redundant; /* Marked as redundant in the fs? */ + u8 cal_failed, /* Calibration failed? */ + trim_done, /* Trimmed? */ + was_fuzzed, /* historical, but needed for MOpt */ + passed_det, /* Deterministic stages passed? */ + has_new_cov, /* Triggers new coverage? */ + var_behavior, /* Variable behavior? */ + favored, /* Currently favored? */ + fs_redundant; /* Marked as redundant in the fs? */ - u32 bitmap_size, /* Number of bits set in bitmap */ - fuzz_level, /* Number of fuzzing iterations */ - exec_cksum; /* Checksum of the execution trace */ + u32 bitmap_size, /* Number of bits set in bitmap */ + fuzz_level, /* Number of fuzzing iterations */ + exec_cksum; /* Checksum of the execution trace */ - u64 exec_us, /* Execution time (us) */ - handicap, /* Number of queue cycles behind */ - n_fuzz, /* Number of fuzz, does not overflow */ - depth; /* Path depth */ + u64 exec_us, /* Execution time (us) */ + handicap, /* Number of queue cycles behind */ + n_fuzz, /* Number of fuzz, does not overflow */ + depth; /* Path depth */ - u8* trace_mini; /* Trace bytes, if kept */ - u32 tc_ref; /* Trace bytes ref count */ + u8* trace_mini; /* Trace bytes, if kept */ + u32 tc_ref; /* Trace bytes ref count */ - struct queue_entry *next, /* Next element, if any */ - *next_100; /* 100 elements ahead */ + struct queue_entry *next, /* Next element, if any */ + *next_100; /* 100 elements ahead */ }; struct extra_data { - u8* data; /* Dictionary token data */ - u32 len; /* Dictionary token length */ - u32 hit_cnt; /* Use count in the corpus */ -}; + u8* data; /* Dictionary token data */ + u32 len; /* Dictionary token length */ + u32 hit_cnt; /* Use count in the corpus */ + +}; /* Fuzzing stages */ enum { + /* 00 */ STAGE_FLIP1, /* 01 */ STAGE_FLIP2, /* 02 */ STAGE_FLIP4, @@ -146,72 +148,60 @@ enum { /* 16 */ STAGE_SPLICE, /* 17 */ STAGE_PYTHON, /* 18 */ STAGE_CUSTOM_MUTATOR + }; /* Stage value types */ enum { + /* 00 */ STAGE_VAL_NONE, /* 01 */ STAGE_VAL_LE, /* 02 */ STAGE_VAL_BE + }; /* Execution status fault codes */ enum { + /* 00 */ FAULT_NONE, /* 01 */ FAULT_TMOUT, /* 02 */ FAULT_CRASH, /* 03 */ FAULT_ERROR, /* 04 */ FAULT_NOINST, /* 05 */ FAULT_NOBITS -}; +}; /* MOpt: Lots of globals, but mostly for the status UI and other things where it really makes no sense to haul them around as function parameters. */ -extern u64 limit_time_puppet, - orig_hit_cnt_puppet, - last_limit_time_start, - tmp_pilot_time, - total_pacemaker_time, - total_puppet_find, - temp_puppet_find, - most_time_key, - most_time, - most_execs_key, - most_execs, - old_hit_count; - -extern s32 SPLICE_CYCLES_puppet, - limit_time_sig, - key_puppet, - key_module; - -extern double w_init, - w_end, - w_now; +extern u64 limit_time_puppet, orig_hit_cnt_puppet, last_limit_time_start, + tmp_pilot_time, total_pacemaker_time, total_puppet_find, temp_puppet_find, + most_time_key, most_time, most_execs_key, most_execs, old_hit_count; + +extern s32 SPLICE_CYCLES_puppet, limit_time_sig, key_puppet, key_module; + +extern double w_init, w_end, w_now; extern s32 g_now; extern s32 g_max; #define operator_num 16 #define swarm_num 5 -#define period_core 500000 +#define period_core 500000 extern u64 tmp_core_time; extern s32 swarm_now; -extern double x_now[swarm_num][operator_num], - L_best[swarm_num][operator_num], - eff_best[swarm_num][operator_num], - G_best[operator_num], - v_now[swarm_num][operator_num], - probability_now[swarm_num][operator_num], - swarm_fitness[swarm_num]; +extern double x_now[swarm_num][operator_num], L_best[swarm_num][operator_num], + eff_best[swarm_num][operator_num], G_best[operator_num], + v_now[swarm_num][operator_num], probability_now[swarm_num][operator_num], + swarm_fitness[swarm_num]; -extern u64 stage_finds_puppet[swarm_num][operator_num], /* Patterns found per fuzz stage */ +extern u64 stage_finds_puppet[swarm_num][operator_num], /* Patterns found per + fuzz stage */ stage_finds_puppet_v2[swarm_num][operator_num], stage_cycles_puppet_v2[swarm_num][operator_num], stage_cycles_puppet_v3[swarm_num][operator_num], @@ -221,9 +211,9 @@ extern u64 stage_finds_puppet[swarm_num][operator_num], /* Patterns fo core_operator_finds_puppet_v2[operator_num], core_operator_cycles_puppet[operator_num], core_operator_cycles_puppet_v2[operator_num], - core_operator_cycles_puppet_v3[operator_num]; /* Execs per fuzz stage */ + core_operator_cycles_puppet_v3[operator_num]; /* Execs per fuzz stage */ -#define RAND_C (rand()%1000*0.001) +#define RAND_C (rand() % 1000 * 0.001) #define v_max 1 #define v_min 0.05 #define limit_time_bound 1.1 @@ -236,225 +226,228 @@ extern u64 stage_finds_puppet[swarm_num][operator_num], /* Patterns fo #define period_pilot 50000 extern double period_pilot_tmp; -extern s32 key_lv; - -extern u8 *in_dir, /* Input directory with test cases */ - *out_dir, /* Working & output directory */ - *tmp_dir , /* Temporary directory for input */ - *sync_dir, /* Synchronization directory */ - *sync_id, /* Fuzzer ID */ - *power_name, /* Power schedule name */ - *use_banner, /* Display banner */ - *in_bitmap, /* Input bitmap */ - *file_extension, /* File extension */ - *orig_cmdline; /* Original command line */ -extern u8 *doc_path, /* Path to documentation dir */ - *target_path, /* Path to target binary */ - *out_file; /* File to fuzz, if any */ - -extern u32 exec_tmout; /* Configurable exec timeout (ms) */ -extern u32 hang_tmout; /* Timeout used for hang det (ms) */ - -extern u64 mem_limit; /* Memory cap for child (MB) */ - -extern u8 cal_cycles, /* Calibration cycles defaults */ - cal_cycles_long, - debug, /* Debug mode */ - python_only; /* Python-only mode */ - -extern u32 stats_update_freq; /* Stats update frequency (execs) */ +extern s32 key_lv; + +extern u8 *in_dir, /* Input directory with test cases */ + *out_dir, /* Working & output directory */ + *tmp_dir, /* Temporary directory for input */ + *sync_dir, /* Synchronization directory */ + *sync_id, /* Fuzzer ID */ + *power_name, /* Power schedule name */ + *use_banner, /* Display banner */ + *in_bitmap, /* Input bitmap */ + *file_extension, /* File extension */ + *orig_cmdline; /* Original command line */ +extern u8 *doc_path, /* Path to documentation dir */ + *target_path, /* Path to target binary */ + *out_file; /* File to fuzz, if any */ + +extern u32 exec_tmout; /* Configurable exec timeout (ms) */ +extern u32 hang_tmout; /* Timeout used for hang det (ms) */ + +extern u64 mem_limit; /* Memory cap for child (MB) */ + +extern u8 cal_cycles, /* Calibration cycles defaults */ + cal_cycles_long, debug, /* Debug mode */ + python_only; /* Python-only mode */ + +extern u32 stats_update_freq; /* Stats update frequency (execs) */ enum { - /* 00 */ EXPLORE, /* AFL default, Exploration-based constant schedule */ - /* 01 */ FAST, /* Exponential schedule */ - /* 02 */ COE, /* Cut-Off Exponential schedule */ - /* 03 */ LIN, /* Linear schedule */ - /* 04 */ QUAD, /* Quadratic schedule */ - /* 05 */ EXPLOIT, /* AFL's exploitation-based const. */ - + + /* 00 */ EXPLORE, /* AFL default, Exploration-based constant schedule */ + /* 01 */ FAST, /* Exponential schedule */ + /* 02 */ COE, /* Cut-Off Exponential schedule */ + /* 03 */ LIN, /* Linear schedule */ + /* 04 */ QUAD, /* Quadratic schedule */ + /* 05 */ EXPLOIT, /* AFL's exploitation-based const. */ + POWER_SCHEDULES_NUM + }; -extern char *power_names[POWER_SCHEDULES_NUM]; +extern char* power_names[POWER_SCHEDULES_NUM]; -extern u8 schedule; /* Power schedule (default: EXPLORE)*/ +extern u8 schedule; /* Power schedule (default: EXPLORE)*/ extern u8 havoc_max_mult; -extern u8 skip_deterministic, /* Skip deterministic stages? */ - force_deterministic, /* Force deterministic stages? */ - use_splicing, /* Recombine input files? */ - dumb_mode, /* Run in non-instrumented mode? */ - score_changed, /* Scoring for favorites changed? */ - kill_signal, /* Signal that killed the child */ - resuming_fuzz, /* Resuming an older fuzzing job? */ - timeout_given, /* Specific timeout given? */ - not_on_tty, /* stdout is not a tty */ - term_too_small, /* terminal dimensions too small */ - no_forkserver, /* Disable forkserver? */ - crash_mode, /* Crash mode! Yeah! */ - in_place_resume, /* Attempt in-place resume? */ - auto_changed, /* Auto-generated tokens changed? */ - no_cpu_meter_red, /* Feng shui on the status screen */ - no_arith, /* Skip most arithmetic ops */ - shuffle_queue, /* Shuffle input queue? */ - bitmap_changed, /* Time to update bitmap? */ - qemu_mode, /* Running in QEMU mode? */ - unicorn_mode, /* Running in Unicorn mode? */ - skip_requested, /* Skip request, via SIGUSR1 */ - run_over10m, /* Run time over 10 minutes? */ - persistent_mode, /* Running in persistent mode? */ - deferred_mode, /* Deferred forkserver mode? */ - fixed_seed, /* do not reseed */ - fast_cal, /* Try to calibrate faster? */ - uses_asan; /* Target uses ASAN? */ - -extern s32 out_fd, /* Persistent fd for out_file */ +extern u8 skip_deterministic, /* Skip deterministic stages? */ + force_deterministic, /* Force deterministic stages? */ + use_splicing, /* Recombine input files? */ + dumb_mode, /* Run in non-instrumented mode? */ + score_changed, /* Scoring for favorites changed? */ + kill_signal, /* Signal that killed the child */ + resuming_fuzz, /* Resuming an older fuzzing job? */ + timeout_given, /* Specific timeout given? */ + not_on_tty, /* stdout is not a tty */ + term_too_small, /* terminal dimensions too small */ + no_forkserver, /* Disable forkserver? */ + crash_mode, /* Crash mode! Yeah! */ + in_place_resume, /* Attempt in-place resume? */ + auto_changed, /* Auto-generated tokens changed? */ + no_cpu_meter_red, /* Feng shui on the status screen */ + no_arith, /* Skip most arithmetic ops */ + shuffle_queue, /* Shuffle input queue? */ + bitmap_changed, /* Time to update bitmap? */ + qemu_mode, /* Running in QEMU mode? */ + unicorn_mode, /* Running in Unicorn mode? */ + skip_requested, /* Skip request, via SIGUSR1 */ + run_over10m, /* Run time over 10 minutes? */ + persistent_mode, /* Running in persistent mode? */ + deferred_mode, /* Deferred forkserver mode? */ + fixed_seed, /* do not reseed */ + fast_cal, /* Try to calibrate faster? */ + uses_asan; /* Target uses ASAN? */ + +extern s32 out_fd, /* Persistent fd for out_file */ #ifndef HAVE_ARC4RANDOM - dev_urandom_fd, /* Persistent fd for /dev/urandom */ + dev_urandom_fd, /* Persistent fd for /dev/urandom */ #endif - dev_null_fd, /* Persistent fd for /dev/null */ - fsrv_ctl_fd, /* Fork server control pipe (write) */ - fsrv_st_fd; /* Fork server status pipe (read) */ - -extern s32 forksrv_pid, /* PID of the fork server */ - child_pid, /* PID of the fuzzed program */ - out_dir_fd; /* FD of the lock file */ - -extern u8* trace_bits; /* SHM with instrumentation bitmap */ - -extern u8 virgin_bits[MAP_SIZE], /* Regions yet untouched by fuzzing */ - virgin_tmout[MAP_SIZE], /* Bits we haven't seen in tmouts */ - virgin_crash[MAP_SIZE]; /* Bits we haven't seen in crashes */ - -extern u8 var_bytes[MAP_SIZE]; /* Bytes that appear to be variable */ - -extern volatile u8 stop_soon, /* Ctrl-C pressed? */ - clear_screen, /* Window resized? */ - child_timed_out; /* Traced process timed out? */ - -extern u32 queued_paths, /* Total number of queued testcases */ - queued_variable, /* Testcases with variable behavior */ - queued_at_start, /* Total number of initial inputs */ - queued_discovered, /* Items discovered during this run */ - queued_imported, /* Items imported via -S */ - queued_favored, /* Paths deemed favorable */ - queued_with_cov, /* Paths with new coverage bytes */ - pending_not_fuzzed, /* Queued but not done yet */ - pending_favored, /* Pending favored paths */ - cur_skipped_paths, /* Abandoned inputs in cur cycle */ - cur_depth, /* Current path depth */ - max_depth, /* Max path depth */ - useless_at_start, /* Number of useless starting paths */ - var_byte_count, /* Bitmap bytes with var behavior */ - current_entry, /* Current queue entry ID */ - havoc_div; /* Cycle count divisor for havoc */ - -extern u64 total_crashes, /* Total number of crashes */ - unique_crashes, /* Crashes with unique signatures */ - total_tmouts, /* Total number of timeouts */ - unique_tmouts, /* Timeouts with unique signatures */ - unique_hangs, /* Hangs with unique signatures */ - total_execs, /* Total execve() calls */ - slowest_exec_ms, /* Slowest testcase non hang in ms */ - start_time, /* Unix start time (ms) */ - last_path_time, /* Time for most recent path (ms) */ - last_crash_time, /* Time for most recent crash (ms) */ - last_hang_time, /* Time for most recent hang (ms) */ - last_crash_execs, /* Exec counter at last crash */ - queue_cycle, /* Queue round counter */ - cycles_wo_finds, /* Cycles without any new paths */ - trim_execs, /* Execs done to trim input files */ - bytes_trim_in, /* Bytes coming into the trimmer */ - bytes_trim_out, /* Bytes coming outa the trimmer */ - blocks_eff_total, /* Blocks subject to effector maps */ - blocks_eff_select; /* Blocks selected as fuzzable */ - -extern u32 subseq_tmouts; /* Number of timeouts in a row */ - -extern u8 *stage_name, /* Name of the current fuzz stage */ - *stage_short, /* Short stage name */ - *syncing_party; /* Currently syncing with... */ - -extern s32 stage_cur, stage_max; /* Stage progression */ -extern s32 splicing_with; /* Splicing with which test case? */ - -extern u32 master_id, master_max; /* Master instance job splitting */ - -extern u32 syncing_case; /* Syncing with case #... */ - -extern s32 stage_cur_byte, /* Byte offset of current stage op */ - stage_cur_val; /* Value used for stage op */ - -extern u8 stage_val_type; /* Value type (STAGE_VAL_*) */ - -extern u64 stage_finds[32], /* Patterns found per fuzz stage */ - stage_cycles[32]; /* Execs per fuzz stage */ + dev_null_fd, /* Persistent fd for /dev/null */ + fsrv_ctl_fd, /* Fork server control pipe (write) */ + fsrv_st_fd; /* Fork server status pipe (read) */ + +extern s32 forksrv_pid, /* PID of the fork server */ + child_pid, /* PID of the fuzzed program */ + out_dir_fd; /* FD of the lock file */ + +extern u8* trace_bits; /* SHM with instrumentation bitmap */ + +extern u8 virgin_bits[MAP_SIZE], /* Regions yet untouched by fuzzing */ + virgin_tmout[MAP_SIZE], /* Bits we haven't seen in tmouts */ + virgin_crash[MAP_SIZE]; /* Bits we haven't seen in crashes */ + +extern u8 var_bytes[MAP_SIZE]; /* Bytes that appear to be variable */ + +extern volatile u8 stop_soon, /* Ctrl-C pressed? */ + clear_screen, /* Window resized? */ + child_timed_out; /* Traced process timed out? */ + +extern u32 queued_paths, /* Total number of queued testcases */ + queued_variable, /* Testcases with variable behavior */ + queued_at_start, /* Total number of initial inputs */ + queued_discovered, /* Items discovered during this run */ + queued_imported, /* Items imported via -S */ + queued_favored, /* Paths deemed favorable */ + queued_with_cov, /* Paths with new coverage bytes */ + pending_not_fuzzed, /* Queued but not done yet */ + pending_favored, /* Pending favored paths */ + cur_skipped_paths, /* Abandoned inputs in cur cycle */ + cur_depth, /* Current path depth */ + max_depth, /* Max path depth */ + useless_at_start, /* Number of useless starting paths */ + var_byte_count, /* Bitmap bytes with var behavior */ + current_entry, /* Current queue entry ID */ + havoc_div; /* Cycle count divisor for havoc */ + +extern u64 total_crashes, /* Total number of crashes */ + unique_crashes, /* Crashes with unique signatures */ + total_tmouts, /* Total number of timeouts */ + unique_tmouts, /* Timeouts with unique signatures */ + unique_hangs, /* Hangs with unique signatures */ + total_execs, /* Total execve() calls */ + slowest_exec_ms, /* Slowest testcase non hang in ms */ + start_time, /* Unix start time (ms) */ + last_path_time, /* Time for most recent path (ms) */ + last_crash_time, /* Time for most recent crash (ms) */ + last_hang_time, /* Time for most recent hang (ms) */ + last_crash_execs, /* Exec counter at last crash */ + queue_cycle, /* Queue round counter */ + cycles_wo_finds, /* Cycles without any new paths */ + trim_execs, /* Execs done to trim input files */ + bytes_trim_in, /* Bytes coming into the trimmer */ + bytes_trim_out, /* Bytes coming outa the trimmer */ + blocks_eff_total, /* Blocks subject to effector maps */ + blocks_eff_select; /* Blocks selected as fuzzable */ + +extern u32 subseq_tmouts; /* Number of timeouts in a row */ + +extern u8 *stage_name, /* Name of the current fuzz stage */ + *stage_short, /* Short stage name */ + *syncing_party; /* Currently syncing with... */ + +extern s32 stage_cur, stage_max; /* Stage progression */ +extern s32 splicing_with; /* Splicing with which test case? */ + +extern u32 master_id, master_max; /* Master instance job splitting */ + +extern u32 syncing_case; /* Syncing with case #... */ + +extern s32 stage_cur_byte, /* Byte offset of current stage op */ + stage_cur_val; /* Value used for stage op */ + +extern u8 stage_val_type; /* Value type (STAGE_VAL_*) */ + +extern u64 stage_finds[32], /* Patterns found per fuzz stage */ + stage_cycles[32]; /* Execs per fuzz stage */ #ifndef HAVE_ARC4RANDOM -extern u32 rand_cnt; /* Random number counter */ +extern u32 rand_cnt; /* Random number counter */ #endif -extern u64 total_cal_us, /* Total calibration time (us) */ - total_cal_cycles; /* Total calibration cycles */ +extern u64 total_cal_us, /* Total calibration time (us) */ + total_cal_cycles; /* Total calibration cycles */ -extern u64 total_bitmap_size, /* Total bit count for all bitmaps */ - total_bitmap_entries; /* Number of bitmaps counted */ +extern u64 total_bitmap_size, /* Total bit count for all bitmaps */ + total_bitmap_entries; /* Number of bitmaps counted */ -extern s32 cpu_core_count; /* CPU core count */ +extern s32 cpu_core_count; /* CPU core count */ #ifdef HAVE_AFFINITY -extern s32 cpu_aff; /* Selected CPU core */ +extern s32 cpu_aff; /* Selected CPU core */ #endif /* HAVE_AFFINITY */ -extern FILE* plot_file; /* Gnuplot output file */ - +extern FILE* plot_file; /* Gnuplot output file */ - -extern struct queue_entry *queue, /* Fuzzing queue (linked list) */ - *queue_cur, /* Current offset within the queue */ - *queue_top, /* Top of the list */ - *q_prev100; /* Previous 100 marker */ +extern struct queue_entry *queue, /* Fuzzing queue (linked list) */ + *queue_cur, /* Current offset within the queue */ + *queue_top, /* Top of the list */ + *q_prev100; /* Previous 100 marker */ extern struct queue_entry* - top_rated[MAP_SIZE]; /* Top entries for bitmap bytes */ + top_rated[MAP_SIZE]; /* Top entries for bitmap bytes */ -extern struct extra_data* extras; /* Extra tokens to fuzz with */ -extern u32 extras_cnt; /* Total number of tokens read */ +extern struct extra_data* extras; /* Extra tokens to fuzz with */ +extern u32 extras_cnt; /* Total number of tokens read */ -extern struct extra_data* a_extras; /* Automatically selected extras */ -extern u32 a_extras_cnt; /* Total number of tokens available */ +extern struct extra_data* a_extras; /* Automatically selected extras */ +extern u32 a_extras_cnt; /* Total number of tokens available */ u8* (*post_handler)(u8* buf, u32* len); /* hooks for the custom mutator function */ -size_t (*custom_mutator)(u8 *data, size_t size, u8* mutated_out, size_t max_size, unsigned int seed); -size_t (*pre_save_handler)(u8 *data, size_t size, u8 **new_data); +size_t (*custom_mutator)(u8* data, size_t size, u8* mutated_out, + size_t max_size, unsigned int seed); +size_t (*pre_save_handler)(u8* data, size_t size, u8** new_data); /* Interesting values, as per config.h */ extern s8 interesting_8[INTERESTING_8_LEN]; extern s16 interesting_16[INTERESTING_8_LEN + INTERESTING_16_LEN]; -extern s32 interesting_32[INTERESTING_8_LEN + INTERESTING_16_LEN + INTERESTING_32_LEN]; +extern s32 + interesting_32[INTERESTING_8_LEN + INTERESTING_16_LEN + INTERESTING_32_LEN]; /* Python stuff */ #ifdef USE_PYTHON -#include <Python.h> +# include <Python.h> -extern PyObject *py_module; +extern PyObject* py_module; enum { + /* 00 */ PY_FUNC_INIT, /* 01 */ PY_FUNC_FUZZ, /* 02 */ PY_FUNC_INIT_TRIM, /* 03 */ PY_FUNC_POST_TRIM, /* 04 */ PY_FUNC_TRIM, PY_FUNC_COUNT + }; -extern PyObject *py_functions[PY_FUNC_COUNT]; +extern PyObject* py_functions[PY_FUNC_COUNT]; #endif @@ -462,13 +455,13 @@ extern PyObject *py_functions[PY_FUNC_COUNT]; /* Python */ #ifdef USE_PYTHON -int init_py(); +int init_py(); void finalize_py(); void fuzz_py(char*, size_t, char*, size_t, char**, size_t*); -u32 init_trim_py(char*, size_t); -u32 post_trim_py(char); +u32 init_trim_py(char*, size_t); +u32 post_trim_py(char); void trim_py(char**, size_t*); -u8 trim_case_python(char**, struct queue_entry*, u8*); +u8 trim_case_python(char**, struct queue_entry*, u8*); #endif /* Queue */ @@ -480,16 +473,16 @@ void add_to_queue(u8*, u32, u8); void destroy_queue(void); void update_bitmap_score(struct queue_entry*); void cull_queue(void); -u32 calculate_score(struct queue_entry*); +u32 calculate_score(struct queue_entry*); /* Bitmap */ void write_bitmap(void); void read_bitmap(u8*); -u8 has_new_bits(u8*); -u32 count_bits(u8*); -u32 count_bytes(u8*); -u32 count_non_255_bytes(u8*); +u8 has_new_bits(u8*); +u32 count_bits(u8*); +u32 count_bytes(u8*); +u32 count_non_255_bytes(u8*); #ifdef __x86_64__ void simplify_trace(u64*); void classify_counts(u64*); @@ -529,51 +522,51 @@ void show_init_stats(void); /* Run */ -u8 run_target(char**, u32); +u8 run_target(char**, u32); void write_to_testcase(void*, u32); void write_with_gap(void*, u32, u32, u32); -u8 calibrate_case(char**, struct queue_entry*, u8*, u32, u8); +u8 calibrate_case(char**, struct queue_entry*, u8*, u32, u8); void sync_fuzzers(char**); -u8 trim_case(char**, struct queue_entry*, u8*); -u8 common_fuzz_stuff(char**, u8*, u32); +u8 trim_case(char**, struct queue_entry*, u8*); +u8 common_fuzz_stuff(char**, u8*, u32); /* Fuzz one */ -u8 fuzz_one_original(char**); +u8 fuzz_one_original(char**); static u8 pilot_fuzzing(char**); -u8 core_fuzzing(char**); -void pso_updating(void); -u8 fuzz_one(char**); +u8 core_fuzzing(char**); +void pso_updating(void); +u8 fuzz_one(char**); /* Init */ #ifdef HAVE_AFFINITY void bind_to_free_cpu(void); #endif -void setup_post(void); -void setup_custom_mutator(void); -void read_testcases(void); -void perform_dry_run(char**); -void pivot_inputs(void); -u32 find_start_position(void); -void find_timeout(void); +void setup_post(void); +void setup_custom_mutator(void); +void read_testcases(void); +void perform_dry_run(char**); +void pivot_inputs(void); +u32 find_start_position(void); +void find_timeout(void); double get_runnable_processes(void); -void nuke_resume_dir(void); -void maybe_delete_out_dir(void); -void setup_dirs_fds(void); -void setup_cmdline_file(char**); -void setup_stdio_file(void); -void check_crash_handling(void); -void check_cpu_governor(void); -void get_core_count(void); -void fix_up_sync(void); -void check_asan_opts(void); -void check_binary(u8*); -void fix_up_banner(u8*); -void check_if_tty(void); -void setup_signal_handlers(void); +void nuke_resume_dir(void); +void maybe_delete_out_dir(void); +void setup_dirs_fds(void); +void setup_cmdline_file(char**); +void setup_stdio_file(void); +void check_crash_handling(void); +void check_cpu_governor(void); +void get_core_count(void); +void fix_up_sync(void); +void check_asan_opts(void); +void check_binary(u8*); +void fix_up_banner(u8*); +void check_if_tty(void); +void setup_signal_handlers(void); char** get_qemu_argv(u8*, char**, int); -void save_cmdline(u32, char**); +void save_cmdline(u32, char**); /**** Inline routines ****/ @@ -581,25 +574,27 @@ void save_cmdline(u32, char**); have slight bias. */ static inline u32 UR(u32 limit) { + #ifdef HAVE_ARC4RANDOM - if (fixed_seed) { - return random() % limit; - } + if (fixed_seed) { return random() % limit; } /* The boundary not being necessarily a power of 2, we need to ensure the result uniformity. */ return arc4random_uniform(limit); #else if (!fixed_seed && unlikely(!rand_cnt--)) { + u32 seed[2]; ck_read(dev_urandom_fd, &seed, sizeof(seed), "/dev/urandom"); srandom(seed[0]); rand_cnt = (RESEED_RNG / 2) + (seed[1] % RESEED_RNG); + } return random() % limit; #endif + } /* Find first power of two greater or equal to val (assuming val under @@ -608,7 +603,8 @@ static inline u32 UR(u32 limit) { static u64 next_p2(u64 val) { u64 ret = 1; - while (val > ret) ret <<= 1; + while (val > ret) + ret <<= 1; return ret; } @@ -617,7 +613,7 @@ static u64 next_p2(u64 val) { static u64 get_cur_time(void) { - struct timeval tv; + struct timeval tv; struct timezone tz; gettimeofday(&tv, &tz); @@ -626,12 +622,11 @@ static u64 get_cur_time(void) { } - /* Get unix time in microseconds */ static u64 get_cur_time_us(void) { - struct timeval tv; + struct timeval tv; struct timezone tz; gettimeofday(&tv, &tz); diff --git a/include/alloc-inl.h b/include/alloc-inl.h index 2f98da0e..4a4beff1 100644 --- a/include/alloc-inl.h +++ b/include/alloc-inl.h @@ -31,82 +31,105 @@ /* User-facing macro to sprintf() to a dynamically allocated buffer. */ -#define alloc_printf(_str...) ({ \ - u8* _tmp; \ - s32 _len = snprintf(NULL, 0, _str); \ +#define alloc_printf(_str...) \ + ({ \ + \ + u8* _tmp; \ + s32 _len = snprintf(NULL, 0, _str); \ if (_len < 0) FATAL("Whoa, snprintf() fails?!"); \ - _tmp = ck_alloc(_len + 1); \ - snprintf((char*)_tmp, _len + 1, _str); \ - _tmp; \ + _tmp = ck_alloc(_len + 1); \ + snprintf((char*)_tmp, _len + 1, _str); \ + _tmp; \ + \ }) /* Macro to enforce allocation limits as a last-resort defense against integer overflows. */ -#define ALLOC_CHECK_SIZE(_s) do { \ - if ((_s) > MAX_ALLOC) \ - ABORT("Bad alloc request: %u bytes", (_s)); \ +#define ALLOC_CHECK_SIZE(_s) \ + do { \ + \ + if ((_s) > MAX_ALLOC) ABORT("Bad alloc request: %u bytes", (_s)); \ + \ } while (0) /* Macro to check malloc() failures and the like. */ -#define ALLOC_CHECK_RESULT(_r, _s) do { \ - if (!(_r)) \ - ABORT("Out of memory: can't allocate %u bytes", (_s)); \ +#define ALLOC_CHECK_RESULT(_r, _s) \ + do { \ + \ + if (!(_r)) ABORT("Out of memory: can't allocate %u bytes", (_s)); \ + \ } while (0) /* Magic tokens used to mark used / freed chunks. */ -#define ALLOC_MAGIC_C1 0xFF00FF00 /* Used head (dword) */ -#define ALLOC_MAGIC_F 0xFE00FE00 /* Freed head (dword) */ -#define ALLOC_MAGIC_C2 0xF0 /* Used tail (byte) */ +#define ALLOC_MAGIC_C1 0xFF00FF00 /* Used head (dword) */ +#define ALLOC_MAGIC_F 0xFE00FE00 /* Freed head (dword) */ +#define ALLOC_MAGIC_C2 0xF0 /* Used tail (byte) */ /* Positions of guard tokens in relation to the user-visible pointer. */ -#define ALLOC_C1(_ptr) (((u32*)(_ptr))[-2]) -#define ALLOC_S(_ptr) (((u32*)(_ptr))[-1]) -#define ALLOC_C2(_ptr) (((u8*)(_ptr))[ALLOC_S(_ptr)]) +#define ALLOC_C1(_ptr) (((u32*)(_ptr))[-2]) +#define ALLOC_S(_ptr) (((u32*)(_ptr))[-1]) +#define ALLOC_C2(_ptr) (((u8*)(_ptr))[ALLOC_S(_ptr)]) -#define ALLOC_OFF_HEAD 8 +#define ALLOC_OFF_HEAD 8 #define ALLOC_OFF_TOTAL (ALLOC_OFF_HEAD + 1) /* Allocator increments for ck_realloc_block(). */ -#define ALLOC_BLK_INC 256 +#define ALLOC_BLK_INC 256 /* Sanity-checking macros for pointers. */ -#define CHECK_PTR(_p) do { \ - if (_p) { \ - if (ALLOC_C1(_p) ^ ALLOC_MAGIC_C1) {\ - if (ALLOC_C1(_p) == ALLOC_MAGIC_F) \ - ABORT("Use after free."); \ - else ABORT("Corrupted head alloc canary."); \ - } \ - } \ +#define CHECK_PTR(_p) \ + do { \ + \ + if (_p) { \ + \ + if (ALLOC_C1(_p) ^ ALLOC_MAGIC_C1) { \ + \ + if (ALLOC_C1(_p) == ALLOC_MAGIC_F) \ + ABORT("Use after free."); \ + else \ + ABORT("Corrupted head alloc canary."); \ + \ + } \ + \ + } \ + \ } while (0) /* #define CHECK_PTR(_p) do { \ + \ if (_p) { \ + \ if (ALLOC_C1(_p) ^ ALLOC_MAGIC_C1) {\ + \ if (ALLOC_C1(_p) == ALLOC_MAGIC_F) \ ABORT("Use after free."); \ else ABORT("Corrupted head alloc canary."); \ + \ } \ if (ALLOC_C2(_p) ^ ALLOC_MAGIC_C2) \ ABORT("Corrupted tail alloc canary."); \ + \ } \ + \ } while (0) */ -#define CHECK_PTR_EXPR(_p) ({ \ - typeof (_p) _tmp = (_p); \ - CHECK_PTR(_tmp); \ - _tmp; \ +#define CHECK_PTR_EXPR(_p) \ + ({ \ + \ + typeof(_p) _tmp = (_p); \ + CHECK_PTR(_tmp); \ + _tmp; \ + \ }) - /* Allocate a buffer, explicitly not zeroing it. Returns NULL for zero-sized requests. */ @@ -123,14 +146,13 @@ static inline void* DFL_ck_alloc_nozero(u32 size) { ret += ALLOC_OFF_HEAD; ALLOC_C1(ret) = ALLOC_MAGIC_C1; - ALLOC_S(ret) = size; + ALLOC_S(ret) = size; ALLOC_C2(ret) = ALLOC_MAGIC_C2; - return (void *)ret; + return (void*)ret; } - /* Allocate a buffer, returning zeroed memory. */ static inline void* DFL_ck_alloc(u32 size) { @@ -144,7 +166,6 @@ static inline void* DFL_ck_alloc(u32 size) { } - /* Free memory, checking for double free and corrupted heap. When DEBUG_BUILD is set, the old memory will be also clobbered with 0xFF. */ @@ -163,20 +184,19 @@ static inline void DFL_ck_free(void* mem) { ALLOC_C1(mem) = ALLOC_MAGIC_F; - u8 *realStart = mem; + u8* realStart = mem; free(realStart - ALLOC_OFF_HEAD); } - /* Re-allocate a buffer, checking for issues and zeroing any newly-added tail. With DEBUG_BUILD, the buffer is always reallocated to a new addresses and the old memory is clobbered with 0xFF. */ static inline void* DFL_ck_realloc(void* orig, u32 size) { - u8* ret; - u32 old_size = 0; + u8* ret; + u32 old_size = 0; if (!size) { @@ -193,9 +213,9 @@ static inline void* DFL_ck_realloc(void* orig, u32 size) { ALLOC_C1(orig) = ALLOC_MAGIC_F; #endif /* !DEBUG_BUILD */ - old_size = ALLOC_S(orig); - u8 *origu8 = orig; - origu8 -= ALLOC_OFF_HEAD; + old_size = ALLOC_S(orig); + u8* origu8 = orig; + origu8 -= ALLOC_OFF_HEAD; orig = origu8; ALLOC_CHECK_SIZE(old_size); @@ -219,7 +239,7 @@ static inline void* DFL_ck_realloc(void* orig, u32 size) { if (orig) { - u8 *origu8 = orig; + u8* origu8 = orig; memcpy(ret + ALLOC_OFF_HEAD, origu8 + ALLOC_OFF_HEAD, MIN(size, old_size)); memset(origu8 + ALLOC_OFF_HEAD, 0xFF, old_size); @@ -234,17 +254,15 @@ static inline void* DFL_ck_realloc(void* orig, u32 size) { ret += ALLOC_OFF_HEAD; ALLOC_C1(ret) = ALLOC_MAGIC_C1; - ALLOC_S(ret) = size; + ALLOC_S(ret) = size; ALLOC_C2(ret) = ALLOC_MAGIC_C2; - if (size > old_size) - memset(ret + old_size, 0, size - old_size); + if (size > old_size) memset(ret + old_size, 0, size - old_size); - return (void *)ret; + return (void*)ret; } - /* Re-allocate a buffer with ALLOC_BLK_INC increments (used to speed up repeated small reallocs without complicating the user code). */ @@ -268,13 +286,12 @@ static inline void* DFL_ck_realloc_block(void* orig, u32 size) { } - /* Create a buffer with a copy of a string. Returns NULL for NULL inputs. */ static inline u8* DFL_ck_strdup(u8* str) { - u8* ret; - u32 size; + u8* ret; + u32 size; if (!str) return NULL; @@ -287,38 +304,36 @@ static inline u8* DFL_ck_strdup(u8* str) { ret += ALLOC_OFF_HEAD; ALLOC_C1(ret) = ALLOC_MAGIC_C1; - ALLOC_S(ret) = size; + ALLOC_S(ret) = size; ALLOC_C2(ret) = ALLOC_MAGIC_C2; return memcpy(ret, str, size); } - /* Create a buffer with a copy of a memory block. Returns NULL for zero-sized or NULL inputs. */ static inline void* DFL_ck_memdup(void* mem, u32 size) { - u8* ret; + u8* ret; if (!mem || !size) return NULL; ALLOC_CHECK_SIZE(size); ret = malloc(size + ALLOC_OFF_TOTAL); ALLOC_CHECK_RESULT(ret, size); - + ret += ALLOC_OFF_HEAD; ALLOC_C1(ret) = ALLOC_MAGIC_C1; - ALLOC_S(ret) = size; + ALLOC_S(ret) = size; ALLOC_C2(ret) = ALLOC_MAGIC_C2; return memcpy(ret, mem, size); } - /* Create a buffer with a block of text, appending a NUL terminator at the end. Returns NULL for zero-sized or NULL inputs. */ @@ -331,11 +346,11 @@ static inline u8* DFL_ck_memdup_str(u8* mem, u32 size) { ALLOC_CHECK_SIZE(size); ret = malloc(size + ALLOC_OFF_TOTAL + 1); ALLOC_CHECK_RESULT(ret, size); - + ret += ALLOC_OFF_HEAD; ALLOC_C1(ret) = ALLOC_MAGIC_C1; - ALLOC_S(ret) = size; + ALLOC_S(ret) = size; ALLOC_C2(ret) = ALLOC_MAGIC_C2; memcpy(ret, mem, size); @@ -345,22 +360,21 @@ static inline u8* DFL_ck_memdup_str(u8* mem, u32 size) { } - #ifndef DEBUG_BUILD /* In non-debug mode, we just do straightforward aliasing of the above functions to user-visible names such as ck_alloc(). */ -#define ck_alloc DFL_ck_alloc -#define ck_alloc_nozero DFL_ck_alloc_nozero -#define ck_realloc DFL_ck_realloc -#define ck_realloc_block DFL_ck_realloc_block -#define ck_strdup DFL_ck_strdup -#define ck_memdup DFL_ck_memdup -#define ck_memdup_str DFL_ck_memdup_str -#define ck_free DFL_ck_free +# define ck_alloc DFL_ck_alloc +# define ck_alloc_nozero DFL_ck_alloc_nozero +# define ck_realloc DFL_ck_realloc +# define ck_realloc_block DFL_ck_realloc_block +# define ck_strdup DFL_ck_strdup +# define ck_memdup DFL_ck_memdup +# define ck_memdup_str DFL_ck_memdup_str +# define ck_free DFL_ck_free -#define alloc_report() +# define alloc_report() #else @@ -369,34 +383,35 @@ static inline u8* DFL_ck_memdup_str(u8* mem, u32 size) { /* Alloc tracking data structures: */ -#define ALLOC_BUCKETS 4096 +# define ALLOC_BUCKETS 4096 struct TRK_obj { - void *ptr; + + void* ptr; char *file, *func; - u32 line; + u32 line; + }; -#ifdef AFL_MAIN +# ifdef AFL_MAIN struct TRK_obj* TRK[ALLOC_BUCKETS]; u32 TRK_cnt[ALLOC_BUCKETS]; -# define alloc_report() TRK_report() +# define alloc_report() TRK_report() -#else +# else extern struct TRK_obj* TRK[ALLOC_BUCKETS]; -extern u32 TRK_cnt[ALLOC_BUCKETS]; +extern u32 TRK_cnt[ALLOC_BUCKETS]; -# define alloc_report() +# define alloc_report() -#endif /* ^AFL_MAIN */ +# endif /* ^AFL_MAIN */ /* Bucket-assigning function for a given pointer: */ -#define TRKH(_ptr) (((((u32)(_ptr)) >> 16) ^ ((u32)(_ptr))) % ALLOC_BUCKETS) - +# define TRKH(_ptr) (((((u32)(_ptr)) >> 16) ^ ((u32)(_ptr))) % ALLOC_BUCKETS) /* Add a new entry to the list of allocated objects. */ @@ -415,7 +430,7 @@ static inline void TRK_alloc_buf(void* ptr, const char* file, const char* func, if (!TRK[bucket][i].ptr) { - TRK[bucket][i].ptr = ptr; + TRK[bucket][i].ptr = ptr; TRK[bucket][i].file = (char*)file; TRK[bucket][i].func = (char*)func; TRK[bucket][i].line = line; @@ -425,10 +440,10 @@ static inline void TRK_alloc_buf(void* ptr, const char* file, const char* func, /* No space available - allocate more. */ - TRK[bucket] = DFL_ck_realloc_block(TRK[bucket], - (TRK_cnt[bucket] + 1) * sizeof(struct TRK_obj)); + TRK[bucket] = DFL_ck_realloc_block( + TRK[bucket], (TRK_cnt[bucket] + 1) * sizeof(struct TRK_obj)); - TRK[bucket][i].ptr = ptr; + TRK[bucket][i].ptr = ptr; TRK[bucket][i].file = (char*)file; TRK[bucket][i].func = (char*)func; TRK[bucket][i].line = line; @@ -437,7 +452,6 @@ static inline void TRK_alloc_buf(void* ptr, const char* file, const char* func, } - /* Remove entry from the list of allocated objects. */ static inline void TRK_free_buf(void* ptr, const char* file, const char* func, @@ -460,12 +474,11 @@ static inline void TRK_free_buf(void* ptr, const char* file, const char* func, } - WARNF("ALLOC: Attempt to free non-allocated memory in %s (%s:%u)", - func, file, line); + WARNF("ALLOC: Attempt to free non-allocated memory in %s (%s:%u)", func, file, + line); } - /* Do a final report on all non-deallocated objects. */ static inline void TRK_report(void) { @@ -482,7 +495,6 @@ static inline void TRK_report(void) { } - /* Simple wrappers for non-debugging functions: */ static inline void* TRK_ck_alloc(u32 size, const char* file, const char* func, @@ -494,7 +506,6 @@ static inline void* TRK_ck_alloc(u32 size, const char* file, const char* func, } - static inline void* TRK_ck_realloc(void* orig, u32 size, const char* file, const char* func, u32 line) { @@ -505,7 +516,6 @@ static inline void* TRK_ck_realloc(void* orig, u32 size, const char* file, } - static inline void* TRK_ck_realloc_block(void* orig, u32 size, const char* file, const char* func, u32 line) { @@ -516,7 +526,6 @@ static inline void* TRK_ck_realloc_block(void* orig, u32 size, const char* file, } - static inline void* TRK_ck_strdup(u8* str, const char* file, const char* func, u32 line) { @@ -526,7 +535,6 @@ static inline void* TRK_ck_strdup(u8* str, const char* file, const char* func, } - static inline void* TRK_ck_memdup(void* mem, u32 size, const char* file, const char* func, u32 line) { @@ -536,7 +544,6 @@ static inline void* TRK_ck_memdup(void* mem, u32 size, const char* file, } - static inline void* TRK_ck_memdup_str(void* mem, u32 size, const char* file, const char* func, u32 line) { @@ -546,9 +553,8 @@ static inline void* TRK_ck_memdup_str(void* mem, u32 size, const char* file, } - -static inline void TRK_ck_free(void* ptr, const char* file, - const char* func, u32 line) { +static inline void TRK_ck_free(void* ptr, const char* file, const char* func, + u32 line) { TRK_free_buf(ptr, file, func, line); DFL_ck_free(ptr); @@ -557,30 +563,27 @@ static inline void TRK_ck_free(void* ptr, const char* file, /* Aliasing user-facing names to tracking functions: */ -#define ck_alloc(_p1) \ - TRK_ck_alloc(_p1, __FILE__, __FUNCTION__, __LINE__) +# define ck_alloc(_p1) TRK_ck_alloc(_p1, __FILE__, __FUNCTION__, __LINE__) -#define ck_alloc_nozero(_p1) \ - TRK_ck_alloc(_p1, __FILE__, __FUNCTION__, __LINE__) +#define ck_alloc_nozero(_p1) TRK_ck_alloc(_p1, __FILE__, __FUNCTION__, __LINE__) -#define ck_realloc(_p1, _p2) \ +# define ck_realloc(_p1, _p2)\ TRK_ck_realloc(_p1, _p2, __FILE__, __FUNCTION__, __LINE__) -#define ck_realloc_block(_p1, _p2) \ +# define ck_realloc_block(_p1, _p2)\ TRK_ck_realloc_block(_p1, _p2, __FILE__, __FUNCTION__, __LINE__) -#define ck_strdup(_p1) \ - TRK_ck_strdup(_p1, __FILE__, __FUNCTION__, __LINE__) +# define ck_strdup(_p1) TRK_ck_strdup(_p1, __FILE__, __FUNCTION__, __LINE__) -#define ck_memdup(_p1, _p2) \ +# define ck_memdup(_p1, _p2)\ TRK_ck_memdup(_p1, _p2, __FILE__, __FUNCTION__, __LINE__) -#define ck_memdup_str(_p1, _p2) \ +# define ck_memdup_str(_p1, _p2)\ TRK_ck_memdup_str(_p1, _p2, __FILE__, __FUNCTION__, __LINE__) -#define ck_free(_p1) \ - TRK_ck_free(_p1, __FILE__, __FUNCTION__, __LINE__) +# define ck_free(_p1) TRK_ck_free(_p1, __FILE__, __FUNCTION__, __LINE__) #endif /* ^!DEBUG_BUILD */ #endif /* ! _HAVE_ALLOC_INL_H */ + diff --git a/include/android-ashmem.h b/include/android-ashmem.h index a787c04b..a4b5bf30 100644 --- a/include/android-ashmem.h +++ b/include/android-ashmem.h @@ -8,74 +8,73 @@ #include <sys/mman.h> #if __ANDROID_API__ >= 26 -#define shmat bionic_shmat -#define shmctl bionic_shmctl -#define shmdt bionic_shmdt -#define shmget bionic_shmget +# define shmat bionic_shmat +# define shmctl bionic_shmctl +# define shmdt bionic_shmdt +# define shmget bionic_shmget #endif - #include <sys/shm.h> +#include <sys/shm.h> #undef shmat #undef shmctl #undef shmdt #undef shmget #include <stdio.h> -#define ASHMEM_DEVICE "/dev/ashmem" +#define ASHMEM_DEVICE "/dev/ashmem" -static inline int shmctl(int __shmid, int __cmd, struct shmid_ds *__buf) -{ - int ret = 0; - if (__cmd == IPC_RMID) { - int length = ioctl(__shmid, ASHMEM_GET_SIZE, NULL); - struct ashmem_pin pin = {0, length}; - ret = ioctl(__shmid, ASHMEM_UNPIN, &pin); - close(__shmid); - } +static inline int shmctl(int __shmid, int __cmd, struct shmid_ds *__buf) { + + int ret = 0; + if (__cmd == IPC_RMID) { + + int length = ioctl(__shmid, ASHMEM_GET_SIZE, NULL); + struct ashmem_pin pin = {0, length}; + ret = ioctl(__shmid, ASHMEM_UNPIN, &pin); + close(__shmid); + + } + + return ret; - return ret; } -static inline int shmget (key_t __key, size_t __size, int __shmflg) -{ - int fd,ret; - char ourkey[11]; +static inline int shmget(key_t __key, size_t __size, int __shmflg) { + + int fd, ret; + char ourkey[11]; - fd = open(ASHMEM_DEVICE, O_RDWR); - if (fd < 0) - return fd; + fd = open(ASHMEM_DEVICE, O_RDWR); + if (fd < 0) return fd; - sprintf(ourkey,"%d",__key); - ret = ioctl(fd, ASHMEM_SET_NAME, ourkey); - if (ret < 0) - goto error; + sprintf(ourkey, "%d", __key); + ret = ioctl(fd, ASHMEM_SET_NAME, ourkey); + if (ret < 0) goto error; - ret = ioctl(fd, ASHMEM_SET_SIZE, __size); - if (ret < 0) - goto error; + ret = ioctl(fd, ASHMEM_SET_SIZE, __size); + if (ret < 0) goto error; - return fd; + return fd; error: - close(fd); - return ret; + close(fd); + return ret; + } -static inline void *shmat (int __shmid, const void *__shmaddr, int __shmflg) -{ - int size; +static inline void *shmat(int __shmid, const void *__shmaddr, int __shmflg) { + + int size; void *ptr; - + size = ioctl(__shmid, ASHMEM_GET_SIZE, NULL); - if (size < 0) { - return NULL; - } + if (size < 0) { return NULL; } ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, __shmid, 0); - if (ptr == MAP_FAILED) { - return NULL; - } - + if (ptr == MAP_FAILED) { return NULL; } + return ptr; + } #endif + diff --git a/include/common.h b/include/common.h index 161caa39..9845c2af 100644 --- a/include/common.h +++ b/include/common.h @@ -4,3 +4,4 @@ void detect_file_args(char **argv, u8 *prog_in); #endif + diff --git a/include/config.h b/include/config.h index 17836151..babba3bd 100644 --- a/include/config.h +++ b/include/config.h @@ -21,7 +21,7 @@ /* Version string: */ -#define VERSION "++2.53d" // c = release, d = volatile github dev +#define VERSION "++2.53d" // c = release, d = volatile github dev /****************************************************** * * @@ -41,64 +41,64 @@ /* Default timeout for fuzzed code (milliseconds). This is the upper bound, also used for detecting hangs; the actual value is auto-scaled: */ -#define EXEC_TIMEOUT 1000 +#define EXEC_TIMEOUT 1000 /* Timeout rounding factor when auto-scaling (milliseconds): */ -#define EXEC_TM_ROUND 20 +#define EXEC_TM_ROUND 20 /* Default memory limit for child process (MB): */ -#ifndef __x86_64__ -# define MEM_LIMIT 25 +#ifndef __x86_64__ +# define MEM_LIMIT 25 #else -# define MEM_LIMIT 50 +# define MEM_LIMIT 50 #endif /* ^!__x86_64__ */ /* Default memory limit when running in QEMU mode (MB): */ -#define MEM_LIMIT_QEMU 200 +#define MEM_LIMIT_QEMU 200 /* Default memory limit when running in Unicorn mode (MB): */ -#define MEM_LIMIT_UNICORN 200 +#define MEM_LIMIT_UNICORN 200 /* Number of calibration cycles per every new test case (and for test cases that show variable behavior): */ -#define CAL_CYCLES 8 -#define CAL_CYCLES_LONG 40 +#define CAL_CYCLES 8 +#define CAL_CYCLES_LONG 40 /* Number of subsequent timeouts before abandoning an input file: */ -#define TMOUT_LIMIT 250 +#define TMOUT_LIMIT 250 /* Maximum number of unique hangs or crashes to record: */ -#define KEEP_UNIQUE_HANG 500 -#define KEEP_UNIQUE_CRASH 5000 +#define KEEP_UNIQUE_HANG 500 +#define KEEP_UNIQUE_CRASH 5000 /* Baseline number of random tweaks during a single 'havoc' stage: */ -#define HAVOC_CYCLES 256 -#define HAVOC_CYCLES_INIT 1024 +#define HAVOC_CYCLES 256 +#define HAVOC_CYCLES_INIT 1024 /* Maximum multiplier for the above (should be a power of two, beware of 32-bit int overflows): */ -#define HAVOC_MAX_MULT 16 +#define HAVOC_MAX_MULT 16 #define HAVOC_MAX_MULT_MOPT 32 /* Absolute minimum number of havoc cycles (after all adjustments): */ -#define HAVOC_MIN 16 +#define HAVOC_MIN 16 /* Power Schedule Divisor */ -#define POWER_BETA 1 -#define MAX_FACTOR (POWER_BETA * 32) +#define POWER_BETA 1 +#define MAX_FACTOR (POWER_BETA * 32) /* Maximum stacking for havoc-stage tweaks. The actual value is calculated - like this: + like this: n = random between 1 and HAVOC_STACK_POW2 stacking = 2^n @@ -106,116 +106,116 @@ In other words, the default (n = 7) produces 2, 4, 8, 16, 32, 64, or 128 stacked tweaks: */ -#define HAVOC_STACK_POW2 7 +#define HAVOC_STACK_POW2 7 /* Caps on block sizes for cloning and deletion operations. Each of these ranges has a 33% probability of getting picked, except for the first two cycles where smaller blocks are favored: */ -#define HAVOC_BLK_SMALL 32 -#define HAVOC_BLK_MEDIUM 128 -#define HAVOC_BLK_LARGE 1500 +#define HAVOC_BLK_SMALL 32 +#define HAVOC_BLK_MEDIUM 128 +#define HAVOC_BLK_LARGE 1500 /* Extra-large blocks, selected very rarely (<5% of the time): */ -#define HAVOC_BLK_XL 32768 +#define HAVOC_BLK_XL 32768 /* Probabilities of skipping non-favored entries in the queue, expressed as percentages: */ -#define SKIP_TO_NEW_PROB 99 /* ...when there are new, pending favorites */ -#define SKIP_NFAV_OLD_PROB 95 /* ...no new favs, cur entry already fuzzed */ -#define SKIP_NFAV_NEW_PROB 75 /* ...no new favs, cur entry not fuzzed yet */ +#define SKIP_TO_NEW_PROB 99 /* ...when there are new, pending favorites */ +#define SKIP_NFAV_OLD_PROB 95 /* ...no new favs, cur entry already fuzzed */ +#define SKIP_NFAV_NEW_PROB 75 /* ...no new favs, cur entry not fuzzed yet */ /* Splicing cycle count: */ -#define SPLICE_CYCLES 15 +#define SPLICE_CYCLES 15 /* Nominal per-splice havoc cycle length: */ -#define SPLICE_HAVOC 32 +#define SPLICE_HAVOC 32 /* Maximum offset for integer addition / subtraction stages: */ -#define ARITH_MAX 35 +#define ARITH_MAX 35 /* Limits for the test case trimmer. The absolute minimum chunk size; and the starting and ending divisors for chopping up the input file: */ -#define TRIM_MIN_BYTES 4 -#define TRIM_START_STEPS 16 -#define TRIM_END_STEPS 1024 +#define TRIM_MIN_BYTES 4 +#define TRIM_START_STEPS 16 +#define TRIM_END_STEPS 1024 /* Maximum size of input file, in bytes (keep under 100MB): */ -#define MAX_FILE (1 * 1024 * 1024) +#define MAX_FILE (1 * 1024 * 1024) /* The same, for the test case minimizer: */ -#define TMIN_MAX_FILE (10 * 1024 * 1024) +#define TMIN_MAX_FILE (10 * 1024 * 1024) /* Block normalization steps for afl-tmin: */ -#define TMIN_SET_MIN_SIZE 4 -#define TMIN_SET_STEPS 128 +#define TMIN_SET_MIN_SIZE 4 +#define TMIN_SET_STEPS 128 /* Maximum dictionary token size (-x), in bytes: */ -#define MAX_DICT_FILE 128 +#define MAX_DICT_FILE 128 /* Length limits for auto-detected dictionary tokens: */ -#define MIN_AUTO_EXTRA 3 -#define MAX_AUTO_EXTRA 32 +#define MIN_AUTO_EXTRA 3 +#define MAX_AUTO_EXTRA 32 /* Maximum number of user-specified dictionary tokens to use in deterministic steps; past this point, the "extras/user" step will be still carried out, but with proportionally lower odds: */ -#define MAX_DET_EXTRAS 200 +#define MAX_DET_EXTRAS 200 /* Maximum number of auto-extracted dictionary tokens to actually use in fuzzing (first value), and to keep in memory as candidates. The latter should be much higher than the former. */ -#define USE_AUTO_EXTRAS 50 -#define MAX_AUTO_EXTRAS (USE_AUTO_EXTRAS * 10) +#define USE_AUTO_EXTRAS 50 +#define MAX_AUTO_EXTRAS (USE_AUTO_EXTRAS * 10) /* Scaling factor for the effector map used to skip some of the more expensive deterministic steps. The actual divisor is set to 2^EFF_MAP_SCALE2 bytes: */ -#define EFF_MAP_SCALE2 3 +#define EFF_MAP_SCALE2 3 /* Minimum input file length at which the effector logic kicks in: */ -#define EFF_MIN_LEN 128 +#define EFF_MIN_LEN 128 /* Maximum effector density past which everything is just fuzzed unconditionally (%): */ -#define EFF_MAX_PERC 90 +#define EFF_MAX_PERC 90 /* UI refresh frequency (Hz): */ -#define UI_TARGET_HZ 5 +#define UI_TARGET_HZ 5 /* Fuzzer stats file and plot update intervals (sec): */ -#define STATS_UPDATE_SEC 60 -#define PLOT_UPDATE_SEC 5 +#define STATS_UPDATE_SEC 60 +#define PLOT_UPDATE_SEC 5 /* Smoothing divisor for CPU load and exec speed stats (1 - no smoothing). */ -#define AVG_SMOOTHING 16 +#define AVG_SMOOTHING 16 /* Sync interval (every n havoc cycles): */ -#define SYNC_INTERVAL 5 +#define SYNC_INTERVAL 5 /* Output directory reuse grace period (minutes): */ -#define OUTPUT_GRACE 25 +#define OUTPUT_GRACE 25 /* Uncomment to use simple file names (id_NNNNNN): */ @@ -223,42 +223,42 @@ /* List of interesting values to use in fuzzing. */ -#define INTERESTING_8 \ - -128, /* Overflow signed 8-bit when decremented */ \ - -1, /* */ \ - 0, /* */ \ - 1, /* */ \ - 16, /* One-off with common buffer size */ \ - 32, /* One-off with common buffer size */ \ - 64, /* One-off with common buffer size */ \ - 100, /* One-off with common buffer size */ \ - 127 /* Overflow signed 8-bit when incremented */ +#define INTERESTING_8 \ + -128, /* Overflow signed 8-bit when decremented */ \ + -1, /* */ \ + 0, /* */ \ + 1, /* */ \ + 16, /* One-off with common buffer size */ \ + 32, /* One-off with common buffer size */ \ + 64, /* One-off with common buffer size */ \ + 100, /* One-off with common buffer size */ \ + 127 /* Overflow signed 8-bit when incremented */ #define INTERESTING_8_LEN 9 -#define INTERESTING_16 \ - -32768, /* Overflow signed 16-bit when decremented */ \ - -129, /* Overflow signed 8-bit */ \ - 128, /* Overflow signed 8-bit */ \ - 255, /* Overflow unsig 8-bit when incremented */ \ - 256, /* Overflow unsig 8-bit */ \ - 512, /* One-off with common buffer size */ \ - 1000, /* One-off with common buffer size */ \ - 1024, /* One-off with common buffer size */ \ - 4096, /* One-off with common buffer size */ \ - 32767 /* Overflow signed 16-bit when incremented */ +#define INTERESTING_16 \ + -32768, /* Overflow signed 16-bit when decremented */ \ + -129, /* Overflow signed 8-bit */ \ + 128, /* Overflow signed 8-bit */ \ + 255, /* Overflow unsig 8-bit when incremented */ \ + 256, /* Overflow unsig 8-bit */ \ + 512, /* One-off with common buffer size */ \ + 1000, /* One-off with common buffer size */ \ + 1024, /* One-off with common buffer size */ \ + 4096, /* One-off with common buffer size */ \ + 32767 /* Overflow signed 16-bit when incremented */ #define INTERESTING_16_LEN 10 -#define INTERESTING_32 \ - -2147483648LL, /* Overflow signed 32-bit when decremented */ \ - -100663046, /* Large negative number (endian-agnostic) */ \ - -32769, /* Overflow signed 16-bit */ \ - 32768, /* Overflow signed 16-bit */ \ - 65535, /* Overflow unsig 16-bit when incremented */ \ - 65536, /* Overflow unsig 16 bit */ \ - 100663045, /* Large positive number (endian-agnostic) */ \ - 2147483647 /* Overflow signed 32-bit when incremented */ +#define INTERESTING_32 \ + -2147483648LL, /* Overflow signed 32-bit when decremented */ \ + -100663046, /* Large negative number (endian-agnostic) */ \ + -32769, /* Overflow signed 16-bit */ \ + 32768, /* Overflow signed 16-bit */ \ + 65535, /* Overflow unsig 16-bit when incremented */ \ + 65536, /* Overflow unsig 16 bit */ \ + 100663045, /* Large positive number (endian-agnostic) */ \ + 2147483647 /* Overflow signed 32-bit when incremented */ #define INTERESTING_32_LEN 8 @@ -270,57 +270,57 @@ /* Call count interval between reseeding the libc PRNG from /dev/urandom: */ -#define RESEED_RNG 10000 +#define RESEED_RNG 10000 /* Maximum line length passed from GCC to 'as' and used for parsing configuration files: */ -#define MAX_LINE 8192 +#define MAX_LINE 8192 /* Environment variable used to pass SHM ID to the called program. */ -#define SHM_ENV_VAR "__AFL_SHM_ID" +#define SHM_ENV_VAR "__AFL_SHM_ID" /* Other less interesting, internal-only variables. */ -#define CLANG_ENV_VAR "__AFL_CLANG_MODE" -#define AS_LOOP_ENV_VAR "__AFL_AS_LOOPCHECK" -#define PERSIST_ENV_VAR "__AFL_PERSISTENT" -#define DEFER_ENV_VAR "__AFL_DEFER_FORKSRV" +#define CLANG_ENV_VAR "__AFL_CLANG_MODE" +#define AS_LOOP_ENV_VAR "__AFL_AS_LOOPCHECK" +#define PERSIST_ENV_VAR "__AFL_PERSISTENT" +#define DEFER_ENV_VAR "__AFL_DEFER_FORKSRV" /* In-code signatures for deferred and persistent mode. */ -#define PERSIST_SIG "##SIG_AFL_PERSISTENT##" -#define DEFER_SIG "##SIG_AFL_DEFER_FORKSRV##" +#define PERSIST_SIG "##SIG_AFL_PERSISTENT##" +#define DEFER_SIG "##SIG_AFL_DEFER_FORKSRV##" /* Distinctive bitmap signature used to indicate failed execution: */ -#define EXEC_FAIL_SIG 0xfee1dead +#define EXEC_FAIL_SIG 0xfee1dead /* Distinctive exit code used to indicate MSAN trip condition: */ -#define MSAN_ERROR 86 +#define MSAN_ERROR 86 /* Designated file descriptors for forkserver commands (the application will use FORKSRV_FD and FORKSRV_FD + 1): */ -#define FORKSRV_FD 198 +#define FORKSRV_FD 198 /* Fork server init timeout multiplier: we'll wait the user-selected timeout plus this much for the fork server to spin up. */ -#define FORK_WAIT_MULT 10 +#define FORK_WAIT_MULT 10 /* Calibration timeout adjustments, to be a bit more generous when resuming fuzzing sessions or trying to calibrate already-added internal finds. The first value is a percentage, the other is in milliseconds: */ -#define CAL_TMOUT_PERC 125 -#define CAL_TMOUT_ADD 50 +#define CAL_TMOUT_PERC 125 +#define CAL_TMOUT_ADD 50 /* Number of chances to calibrate a case before giving up: */ -#define CAL_CHANCES 3 +#define CAL_CHANCES 3 /* Map size for the traced binary (2^MAP_SIZE_POW2). Must be greater than 2; you probably want to keep it under 18 or so for performance reasons @@ -328,28 +328,27 @@ problems with complex programs). You need to recompile the target binary after changing this - otherwise, SEGVs may ensue. */ -#define MAP_SIZE_POW2 16 -#define MAP_SIZE (1 << MAP_SIZE_POW2) +#define MAP_SIZE_POW2 16 +#define MAP_SIZE (1 << MAP_SIZE_POW2) /* Maximum allocator request size (keep well under INT_MAX): */ -#define MAX_ALLOC 0x40000000 +#define MAX_ALLOC 0x40000000 /* A made-up hashing seed: */ -#define HASH_CONST 0xa5b35705 +#define HASH_CONST 0xa5b35705 /* Constants for afl-gotcpu to control busy loop timing: */ -#define CTEST_TARGET_MS 5000 -#define CTEST_CORE_TRG_MS 1000 -#define CTEST_BUSY_CYCLES (10 * 1000 * 1000) +#define CTEST_TARGET_MS 5000 +#define CTEST_CORE_TRG_MS 1000 +#define CTEST_BUSY_CYCLES (10 * 1000 * 1000) /* Enable NeverZero counters in QEMU mode */ #define AFL_QEMU_NOT_ZERO - /* Uncomment this to use inferior block-coverage-based instrumentation. Note that you need to recompile the target binary for this to have any effect: */ @@ -368,3 +367,4 @@ // #define IGNORE_FINDS #endif /* ! _HAVE_CONFIG_H */ + diff --git a/include/debug.h b/include/debug.h index c0044280..6a59ad7a 100644 --- a/include/debug.h +++ b/include/debug.h @@ -108,39 +108,39 @@ #ifdef FANCY_BOXES -# define SET_G1 "\x1b)0" /* Set G1 for box drawing */ -# define RESET_G1 "\x1b)B" /* Reset G1 to ASCII */ -# define bSTART "\x0e" /* Enter G1 drawing mode */ -# define bSTOP "\x0f" /* Leave G1 drawing mode */ -# define bH "q" /* Horizontal line */ -# define bV "x" /* Vertical line */ -# define bLT "l" /* Left top corner */ -# define bRT "k" /* Right top corner */ -# define bLB "m" /* Left bottom corner */ -# define bRB "j" /* Right bottom corner */ -# define bX "n" /* Cross */ -# define bVR "t" /* Vertical, branch right */ -# define bVL "u" /* Vertical, branch left */ -# define bHT "v" /* Horizontal, branch top */ -# define bHB "w" /* Horizontal, branch bottom */ +# define SET_G1 "\x1b)0" /* Set G1 for box drawing */ +# define RESET_G1 "\x1b)B" /* Reset G1 to ASCII */ +# define bSTART "\x0e" /* Enter G1 drawing mode */ +# define bSTOP "\x0f" /* Leave G1 drawing mode */ +# define bH "q" /* Horizontal line */ +# define bV "x" /* Vertical line */ +# define bLT "l" /* Left top corner */ +# define bRT "k" /* Right top corner */ +# define bLB "m" /* Left bottom corner */ +# define bRB "j" /* Right bottom corner */ +# define bX "n" /* Cross */ +# define bVR "t" /* Vertical, branch right */ +# define bVL "u" /* Vertical, branch left */ +# define bHT "v" /* Horizontal, branch top */ +# define bHB "w" /* Horizontal, branch bottom */ #else -# define SET_G1 "" +# define SET_G1 "" # define RESET_G1 "" -# define bSTART "" -# define bSTOP "" -# define bH "-" -# define bV "|" -# define bLT "+" -# define bRT "+" -# define bLB "+" -# define bRB "+" -# define bX "+" -# define bVR "+" -# define bVL "+" -# define bHT "+" -# define bHB "+" +# define bSTART "" +# define bSTOP "" +# define bH "-" +# define bV "|" +# define bLT "+" +# define bRT "+" +# define bLB "+" +# define bRB "+" +# define bX "+" +# define bVR "+" +# define bVL "+" +# define bHT "+" +# define bHB "+" #endif /* ^FANCY_BOXES */ @@ -148,11 +148,11 @@ * Misc terminal codes * ***********************/ -#define TERM_HOME "\x1b[H" -#define TERM_CLEAR TERM_HOME "\x1b[2J" -#define cEOL "\x1b[0K" -#define CURSOR_HIDE "\x1b[?25l" -#define CURSOR_SHOW "\x1b[?25h" +#define TERM_HOME "\x1b[H" +#define TERM_CLEAR TERM_HOME "\x1b[2J" +#define cEOL "\x1b[0K" +#define CURSOR_HIDE "\x1b[?25l" +#define CURSOR_SHOW "\x1b[?25h" /************************ * Debug & error macros * @@ -161,91 +161,125 @@ /* Just print stuff to the appropriate stream. */ #ifdef MESSAGES_TO_STDOUT -# define SAYF(x...) printf(x) -#else -# define SAYF(x...) fprintf(stderr, x) +# define SAYF(x...) printf(x) +#else +# define SAYF(x...) fprintf(stderr, x) #endif /* ^MESSAGES_TO_STDOUT */ /* Show a prefixed warning. */ -#define WARNF(x...) do { \ +#define WARNF(x...) \ + do { \ + \ SAYF(cYEL "[!] " cBRI "WARNING: " cRST x); \ - SAYF(cRST "\n"); \ + SAYF(cRST "\n"); \ + \ } while (0) /* Show a prefixed "doing something" message. */ -#define ACTF(x...) do { \ +#define ACTF(x...) \ + do { \ + \ SAYF(cLBL "[*] " cRST x); \ - SAYF(cRST "\n"); \ + SAYF(cRST "\n"); \ + \ } while (0) /* Show a prefixed "success" message. */ -#define OKF(x...) do { \ +#define OKF(x...) \ + do { \ + \ SAYF(cLGN "[+] " cRST x); \ - SAYF(cRST "\n"); \ + SAYF(cRST "\n"); \ + \ } while (0) /* Show a prefixed fatal error message (not used in afl). */ -#define BADF(x...) do { \ +#define BADF(x...) \ + do { \ + \ SAYF(cLRD "\n[-] " cRST x); \ - SAYF(cRST "\n"); \ + SAYF(cRST "\n"); \ + \ } while (0) /* Die with a verbose non-OS fatal error message. */ -#define FATAL(x...) do { \ - SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD "\n[-] PROGRAM ABORT : " \ - cRST x); \ - SAYF(cLRD "\n Location : " cRST "%s(), %s:%u\n\n", \ - __FUNCTION__, __FILE__, __LINE__); \ - exit(1); \ +#define FATAL(x...) \ + do { \ + \ + SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD \ + "\n[-] PROGRAM ABORT : " cRST x); \ + SAYF(cLRD "\n Location : " cRST "%s(), %s:%u\n\n", __FUNCTION__, \ + __FILE__, __LINE__); \ + exit(1); \ + \ } while (0) /* Die by calling abort() to provide a core dump. */ -#define ABORT(x...) do { \ - SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD "\n[-] PROGRAM ABORT : " \ - cRST x); \ - SAYF(cLRD "\n Stop location : " cRST "%s(), %s:%u\n\n", \ - __FUNCTION__, __FILE__, __LINE__); \ - abort(); \ +#define ABORT(x...) \ + do { \ + \ + SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD \ + "\n[-] PROGRAM ABORT : " cRST x); \ + SAYF(cLRD "\n Stop location : " cRST "%s(), %s:%u\n\n", __FUNCTION__, \ + __FILE__, __LINE__); \ + abort(); \ + \ } while (0) /* Die while also including the output of perror(). */ -#define PFATAL(x...) do { \ - fflush(stdout); \ - SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD "\n[-] SYSTEM ERROR : " \ - cRST x); \ - SAYF(cLRD "\n Stop location : " cRST "%s(), %s:%u\n", \ - __FUNCTION__, __FILE__, __LINE__); \ - SAYF(cLRD " OS message : " cRST "%s\n", strerror(errno)); \ - exit(1); \ +#define PFATAL(x...) \ + do { \ + \ + fflush(stdout); \ + SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD \ + "\n[-] SYSTEM ERROR : " cRST x); \ + SAYF(cLRD "\n Stop location : " cRST "%s(), %s:%u\n", __FUNCTION__, \ + __FILE__, __LINE__); \ + SAYF(cLRD " OS message : " cRST "%s\n", strerror(errno)); \ + exit(1); \ + \ } while (0) /* Die with FAULT() or PFAULT() depending on the value of res (used to interpret different failure modes for read(), write(), etc). */ -#define RPFATAL(res, x...) do { \ - if (res < 0) PFATAL(x); else FATAL(x); \ +#define RPFATAL(res, x...) \ + do { \ + \ + if (res < 0) \ + PFATAL(x); \ + else \ + FATAL(x); \ + \ } while (0) /* Error-checking versions of read() and write() that call RPFATAL() as appropriate. */ -#define ck_write(fd, buf, len, fn) do { \ - u32 _len = (len); \ - s32 _res = write(fd, buf, _len); \ +#define ck_write(fd, buf, len, fn) \ + do { \ + \ + u32 _len = (len); \ + s32 _res = write(fd, buf, _len); \ if (_res != _len) RPFATAL(_res, "Short write to %s", fn); \ + \ } while (0) -#define ck_read(fd, buf, len, fn) do { \ - u32 _len = (len); \ - s32 _res = read(fd, buf, _len); \ +#define ck_read(fd, buf, len, fn) \ + do { \ + \ + u32 _len = (len); \ + s32 _res = read(fd, buf, _len); \ if (_res != _len) RPFATAL(_res, "Short read from %s", fn); \ + \ } while (0) #endif /* ! _HAVE_DEBUG_H */ + diff --git a/include/forkserver.h b/include/forkserver.h index fa40d9c6..af5dab72 100644 --- a/include/forkserver.h +++ b/include/forkserver.h @@ -5,21 +5,21 @@ void handle_timeout(int sig); void init_forkserver(char **argv); #ifdef __APPLE__ -#define MSG_FORK_ON_APPLE \ - " - On MacOS X, the semantics of fork() syscalls are non-standard and " \ - "may\n" \ - " break afl-fuzz performance optimizations when running " \ - "platform-specific\n" \ +# define MSG_FORK_ON_APPLE \ + " - On MacOS X, the semantics of fork() syscalls are non-standard and " \ + "may\n" \ + " break afl-fuzz performance optimizations when running " \ + "platform-specific\n" \ " targets. To fix this, set AFL_NO_FORKSRV=1 in the environment.\n\n" #else -#define MSG_FORK_ON_APPLE "" +# define MSG_FORK_ON_APPLE "" #endif #ifdef RLIMIT_AS - #define MSG_ULIMIT_USAGE " ( ulimit -Sv $[%llu << 10];" +# define MSG_ULIMIT_USAGE " ( ulimit -Sv $[%llu << 10];" #else - #define MSG_ULIMIT_USAGE " ( ulimit -Sd $[%llu << 10];" +# define MSG_ULIMIT_USAGE " ( ulimit -Sd $[%llu << 10];" #endif /* ^RLIMIT_AS */ - #endif + diff --git a/include/hash.h b/include/hash.h index f39a8257..5d0512a6 100644 --- a/include/hash.h +++ b/include/hash.h @@ -31,12 +31,12 @@ #ifdef __x86_64__ -#define ROL64(_x, _r) ((((u64)(_x)) << (_r)) | (((u64)(_x)) >> (64 - (_r)))) +# define ROL64(_x, _r) ((((u64)(_x)) << (_r)) | (((u64)(_x)) >> (64 - (_r)))) static inline u32 hash32(const void* key, u32 len, u32 seed) { const u64* data = (u64*)key; - u64 h1 = seed ^ len; + u64 h1 = seed ^ len; len >>= 3; @@ -45,12 +45,12 @@ static inline u32 hash32(const void* key, u32 len, u32 seed) { u64 k1 = *data++; k1 *= 0x87c37b91114253d5ULL; - k1 = ROL64(k1, 31); + k1 = ROL64(k1, 31); k1 *= 0x4cf5ad432745937fULL; h1 ^= k1; - h1 = ROL64(h1, 27); - h1 = h1 * 5 + 0x52dce729; + h1 = ROL64(h1, 27); + h1 = h1 * 5 + 0x52dce729; } @@ -64,14 +64,14 @@ static inline u32 hash32(const void* key, u32 len, u32 seed) { } -#else +#else -#define ROL32(_x, _r) ((((u32)(_x)) << (_r)) | (((u32)(_x)) >> (32 - (_r)))) +# define ROL32(_x, _r) ((((u32)(_x)) << (_r)) | (((u32)(_x)) >> (32 - (_r)))) static inline u32 hash32(const void* key, u32 len, u32 seed) { - const u32* data = (u32*)key; - u32 h1 = seed ^ len; + const u32* data = (u32*)key; + u32 h1 = seed ^ len; len >>= 2; @@ -80,12 +80,12 @@ static inline u32 hash32(const void* key, u32 len, u32 seed) { u32 k1 = *data++; k1 *= 0xcc9e2d51; - k1 = ROL32(k1, 15); + k1 = ROL32(k1, 15); k1 *= 0x1b873593; h1 ^= k1; - h1 = ROL32(h1, 13); - h1 = h1 * 5 + 0xe6546b64; + h1 = ROL32(h1, 13); + h1 = h1 * 5 + 0xe6546b64; } @@ -102,3 +102,4 @@ static inline u32 hash32(const void* key, u32 len, u32 seed) { #endif /* ^__x86_64__ */ #endif /* !_HAVE_HASH_H */ + diff --git a/include/sharedmem.h b/include/sharedmem.h index 9aa44d0e..7e13b13b 100644 --- a/include/sharedmem.h +++ b/include/sharedmem.h @@ -5,3 +5,4 @@ void setup_shm(unsigned char dumb_mode); void remove_shm(void); #endif + diff --git a/include/types.h b/include/types.h index 7606d4ed..60ae64c2 100644 --- a/include/types.h +++ b/include/types.h @@ -46,26 +46,31 @@ typedef unsigned long long u64; typedef uint64_t u64; #endif /* ^__x86_64__ */ -typedef int8_t s8; -typedef int16_t s16; -typedef int32_t s32; -typedef int64_t s64; +typedef int8_t s8; +typedef int16_t s16; +typedef int32_t s32; +typedef int64_t s64; #ifndef MIN -# define MIN(_a,_b) ((_a) > (_b) ? (_b) : (_a)) -# define MAX(_a,_b) ((_a) > (_b) ? (_a) : (_b)) +# define MIN(_a, _b) ((_a) > (_b) ? (_b) : (_a)) +# define MAX(_a, _b) ((_a) > (_b) ? (_a) : (_b)) #endif /* !MIN */ -#define SWAP16(_x) ({ \ - u16 _ret = (_x); \ +#define SWAP16(_x) \ + ({ \ + \ + u16 _ret = (_x); \ (u16)((_ret << 8) | (_ret >> 8)); \ + \ }) -#define SWAP32(_x) ({ \ - u32 _ret = (_x); \ - (u32)((_ret << 24) | (_ret >> 24) | \ - ((_ret << 8) & 0x00FF0000) | \ - ((_ret >> 8) & 0x0000FF00)); \ +#define SWAP32(_x) \ + ({ \ + \ + u32 _ret = (_x); \ + (u32)((_ret << 24) | (_ret >> 24) | ((_ret << 8) & 0x00FF0000) | \ + ((_ret >> 8) & 0x0000FF00)); \ + \ }) #ifdef AFL_LLVM_PASS @@ -77,15 +82,15 @@ typedef int64_t s64; #define STRINGIFY_INTERNAL(x) #x #define STRINGIFY(x) STRINGIFY_INTERNAL(x) -#define MEM_BARRIER() \ - __asm__ volatile("" ::: "memory") +#define MEM_BARRIER() __asm__ volatile("" ::: "memory") #if __GNUC__ < 6 - #define likely(_x) (_x) - #define unlikely(_x) (_x) +# define likely(_x) (_x) +# define unlikely(_x) (_x) #else - #define likely(_x) __builtin_expect(!!(_x), 1) - #define unlikely(_x) __builtin_expect(!!(_x), 0) +# define likely(_x) __builtin_expect(!!(_x), 1) +# define unlikely(_x) __builtin_expect(!!(_x), 0) #endif #endif /* ! _HAVE_TYPES_H */ + diff --git a/libdislocator/libdislocator.so.c b/libdislocator/libdislocator.so.c index 71620b17..5104fed4 100644 --- a/libdislocator/libdislocator.so.c +++ b/libdislocator/libdislocator.so.c @@ -38,23 +38,35 @@ /* Error / message handling: */ -#define DEBUGF(_x...) do { \ - if (alloc_verbose) { \ - if (++call_depth == 1) { \ +#define DEBUGF(_x...) \ + do { \ + \ + if (alloc_verbose) { \ + \ + if (++call_depth == 1) { \ + \ fprintf(stderr, "[AFL] " _x); \ - fprintf(stderr, "\n"); \ - } \ - call_depth--; \ - } \ + fprintf(stderr, "\n"); \ + \ + } \ + call_depth--; \ + \ + } \ + \ } while (0) -#define FATAL(_x...) do { \ - if (++call_depth == 1) { \ +#define FATAL(_x...) \ + do { \ + \ + if (++call_depth == 1) { \ + \ fprintf(stderr, "*** [AFL] " _x); \ - fprintf(stderr, " ***\n"); \ - abort(); \ - } \ - call_depth--; \ + fprintf(stderr, " ***\n"); \ + abort(); \ + \ + } \ + call_depth--; \ + \ } while (0) /* Macro to count the number of pages needed to store a buffer: */ @@ -63,7 +75,7 @@ /* Canary & clobber bytes: */ -#define ALLOC_CANARY 0xAACCAACC +#define ALLOC_CANARY 0xAACCAACC #define ALLOC_CLOBBER 0xCC #define PTR_C(_p) (((u32*)(_p))[-1]) @@ -73,14 +85,13 @@ static u32 max_mem = MAX_ALLOC; /* Max heap usage to permit */ static u8 alloc_verbose, /* Additional debug messages */ - hard_fail, /* abort() when max_mem exceeded? */ - no_calloc_over; /* abort() on calloc() overflows? */ + hard_fail, /* abort() when max_mem exceeded? */ + no_calloc_over; /* abort() on calloc() overflows? */ static __thread size_t total_mem; /* Currently allocated mem */ static __thread u32 call_depth; /* To avoid recursion via fprintf() */ - /* This is the main alloc function. It allocates one page more than necessary, sets that tailing page to PROT_NONE, and then increments the return address so that it is right-aligned to that boundary. Since it always uses mmap(), @@ -90,14 +101,11 @@ static void* __dislocator_alloc(size_t len) { void* ret; - if (total_mem + len > max_mem || total_mem + len < total_mem) { - if (hard_fail) - FATAL("total allocs exceed %u MB", max_mem / 1024 / 1024); + if (hard_fail) FATAL("total allocs exceed %u MB", max_mem / 1024 / 1024); - DEBUGF("total allocs exceed %u MB, returning NULL", - max_mem / 1024 / 1024); + DEBUGF("total allocs exceed %u MB, returning NULL", max_mem / 1024 / 1024); return NULL; @@ -142,7 +150,6 @@ static void* __dislocator_alloc(size_t len) { } - /* The "user-facing" wrapper for calloc(). This just checks for overflows and displays debug messages if requested. */ @@ -157,8 +164,11 @@ void* calloc(size_t elem_len, size_t elem_cnt) { if (elem_cnt && len / elem_cnt != elem_len) { if (no_calloc_over) { - DEBUGF("calloc(%zu, %zu) would overflow, returning NULL", elem_len, elem_cnt); + + DEBUGF("calloc(%zu, %zu) would overflow, returning NULL", elem_len, + elem_cnt); return NULL; + } FATAL("calloc(%zu, %zu) would overflow", elem_len, elem_cnt); @@ -174,7 +184,6 @@ void* calloc(size_t elem_len, size_t elem_cnt) { } - /* The wrapper for malloc(). Roughly the same, also clobbers the returned memory (unlike calloc(), malloc() is not guaranteed to return zeroed memory). */ @@ -193,7 +202,6 @@ void* malloc(size_t len) { } - /* The wrapper for free(). This simply marks the entire region as PROT_NONE. If the region is already freed, the code will segfault during the attempt to read the canary. Not very graceful, but works, right? */ @@ -224,7 +232,6 @@ void free(void* ptr) { } - /* Realloc is pretty straightforward, too. We forcibly reallocate the buffer, move data, and then free (aka mprotect()) the original one. */ @@ -249,7 +256,6 @@ void* realloc(void* ptr, size_t len) { } - __attribute__((constructor)) void __dislocator_init(void) { u8* tmp = getenv("AFL_LD_LIMIT_MB"); @@ -266,3 +272,4 @@ __attribute__((constructor)) void __dislocator_init(void) { no_calloc_over = !!getenv("AFL_LD_NO_CALLOC_OVER"); } + diff --git a/libtokencap/libtokencap.so.c b/libtokencap/libtokencap.so.c index 54072279..fa26447e 100644 --- a/libtokencap/libtokencap.so.c +++ b/libtokencap/libtokencap.so.c @@ -30,27 +30,23 @@ # error "Sorry, this library is Linux-specific for now!" #endif /* !__linux__ */ - /* Mapping data and such */ #define MAX_MAPPINGS 1024 -static struct mapping { - void *st, *en; -} __tokencap_ro[MAX_MAPPINGS]; +static struct mapping { void *st, *en; } __tokencap_ro[MAX_MAPPINGS]; static u32 __tokencap_ro_cnt; static u8 __tokencap_ro_loaded; static FILE* __tokencap_out_file; - /* Identify read-only regions in memory. Only parameters that fall into these ranges are worth dumping when passed to strcmp() and so on. Read-write regions are far more likely to contain user input instead. */ static void __tokencap_load_mappings(void) { - u8 buf[MAX_LINE]; + u8 buf[MAX_LINE]; FILE* f = fopen("/proc/self/maps", "r"); __tokencap_ro_loaded = 1; @@ -59,8 +55,8 @@ static void __tokencap_load_mappings(void) { while (fgets(buf, MAX_LINE, f)) { - u8 rf, wf; - void* st, *en; + u8 rf, wf; + void *st, *en; if (sscanf(buf, "%p-%p %c%c", &st, &en, &rf, &wf) != 4) continue; if (wf == 'w' || rf != 'r') continue; @@ -76,7 +72,6 @@ static void __tokencap_load_mappings(void) { } - /* Check an address against the list of read-only mappings. */ static u8 __tokencap_is_ro(const void* ptr) { @@ -85,20 +80,19 @@ static u8 __tokencap_is_ro(const void* ptr) { if (!__tokencap_ro_loaded) __tokencap_load_mappings(); - for (i = 0; i < __tokencap_ro_cnt; i++) + for (i = 0; i < __tokencap_ro_cnt; i++) if (ptr >= __tokencap_ro[i].st && ptr <= __tokencap_ro[i].en) return 1; return 0; } - /* Dump an interesting token to output file, quoting and escaping it properly. */ static void __tokencap_dump(const u8* ptr, size_t len, u8 is_text) { - u8 buf[MAX_AUTO_EXTRA * 4 + 1]; + u8 buf[MAX_AUTO_EXTRA * 4 + 1]; u32 i; u32 pos = 0; @@ -120,9 +114,7 @@ static void __tokencap_dump(const u8* ptr, size_t len, u8 is_text) { pos += 4; break; - default: - - buf[pos++] = ptr[i]; + default: buf[pos++] = ptr[i]; } @@ -130,11 +122,10 @@ static void __tokencap_dump(const u8* ptr, size_t len, u8 is_text) { buf[pos] = 0; - fprintf(__tokencap_out_file, "\"%s\"\n", buf); + fprintf(__tokencap_out_file, "\"%s\"\n", buf); } - /* Replacements for strcmp(), memcmp(), and so on. Note that these will be used only if the target is compiled with -fno-builtins and linked dynamically. */ @@ -151,13 +142,13 @@ int strcmp(const char* str1, const char* str2) { if (c1 != c2) return (c1 > c2) ? 1 : -1; if (!c1) return 0; - str1++; str2++; + str1++; + str2++; } } - #undef strncmp int strncmp(const char* str1, const char* str2, size_t len) { @@ -171,7 +162,8 @@ int strncmp(const char* str1, const char* str2, size_t len) { if (!c1) return 0; if (c1 != c2) return (c1 > c2) ? 1 : -1; - str1++; str2++; + str1++; + str2++; } @@ -179,7 +171,6 @@ int strncmp(const char* str1, const char* str2, size_t len) { } - #undef strcasecmp int strcasecmp(const char* str1, const char* str2) { @@ -193,13 +184,13 @@ int strcasecmp(const char* str1, const char* str2) { if (c1 != c2) return (c1 > c2) ? 1 : -1; if (!c1) return 0; - str1++; str2++; + str1++; + str2++; } } - #undef strncasecmp int strncasecmp(const char* str1, const char* str2, size_t len) { @@ -213,7 +204,8 @@ int strncasecmp(const char* str1, const char* str2, size_t len) { if (!c1) return 0; if (c1 != c2) return (c1 > c2) ? 1 : -1; - str1++; str2++; + str1++; + str2++; } @@ -221,7 +213,6 @@ int strncasecmp(const char* str1, const char* str2, size_t len) { } - #undef memcmp int memcmp(const void* mem1, const void* mem2, size_t len) { @@ -233,7 +224,8 @@ int memcmp(const void* mem1, const void* mem2, size_t len) { unsigned char c1 = *(const char*)mem1, c2 = *(const char*)mem2; if (c1 != c2) return (c1 > c2) ? 1 : -1; - mem1++; mem2++; + mem1++; + mem2++; } @@ -241,7 +233,6 @@ int memcmp(const void* mem1, const void* mem2, size_t len) { } - #undef strstr char* strstr(const char* haystack, const char* needle) { @@ -249,16 +240,17 @@ char* strstr(const char* haystack, const char* needle) { if (__tokencap_is_ro(haystack)) __tokencap_dump(haystack, strlen(haystack), 1); - if (__tokencap_is_ro(needle)) - __tokencap_dump(needle, strlen(needle), 1); + if (__tokencap_is_ro(needle)) __tokencap_dump(needle, strlen(needle), 1); do { + const char* n = needle; const char* h = haystack; - while(*n && *h && *n == *h) n++, h++; + while (*n && *h && *n == *h) + n++, h++; - if(!*n) return (char*)haystack; + if (!*n) return (char*)haystack; } while (*(haystack++)); @@ -266,7 +258,6 @@ char* strstr(const char* haystack, const char* needle) { } - #undef strcasestr char* strcasestr(const char* haystack, const char* needle) { @@ -274,25 +265,24 @@ char* strcasestr(const char* haystack, const char* needle) { if (__tokencap_is_ro(haystack)) __tokencap_dump(haystack, strlen(haystack), 1); - if (__tokencap_is_ro(needle)) - __tokencap_dump(needle, strlen(needle), 1); + if (__tokencap_is_ro(needle)) __tokencap_dump(needle, strlen(needle), 1); do { const char* n = needle; const char* h = haystack; - while(*n && *h && tolower(*n) == tolower(*h)) n++, h++; + while (*n && *h && tolower(*n) == tolower(*h)) + n++, h++; - if(!*n) return (char*)haystack; + if (!*n) return (char*)haystack; - } while(*(haystack++)); + } while (*(haystack++)); return 0; } - /* Init code to open the output file (or default to stderr). */ __attribute__((constructor)) void __tokencap_init(void) { diff --git a/llvm_mode/LLVMInsTrim.so.cc b/llvm_mode/LLVMInsTrim.so.cc index 95b52d48..4b5597e2 100644 --- a/llvm_mode/LLVMInsTrim.so.cc +++ b/llvm_mode/LLVMInsTrim.so.cc @@ -37,268 +37,349 @@ static cl::opt<bool> LoopHeadOpt("loophead", cl::desc("LoopHead"), cl::init(false)); namespace { - struct InsTrim : public ModulePass { - protected: - std::list<std::string> myWhitelist; +struct InsTrim : public ModulePass { - private: - std::mt19937 generator; - int total_instr = 0; + protected: + std::list<std::string> myWhitelist; - unsigned int genLabel() { - return generator() & (MAP_SIZE - 1); - } + private: + std::mt19937 generator; + int total_instr = 0; + + unsigned int genLabel() { + + return generator() & (MAP_SIZE - 1); + + } + + public: + static char ID; + InsTrim() : ModulePass(ID), generator(0) { - public: - static char ID; - InsTrim() : ModulePass(ID), generator(0) { - char* instWhiteListFilename = getenv("AFL_LLVM_WHITELIST"); - if (instWhiteListFilename) { - std::string line; - std::ifstream fileStream; - fileStream.open(instWhiteListFilename); - if (!fileStream) - report_fatal_error("Unable to open AFL_LLVM_WHITELIST"); + char *instWhiteListFilename = getenv("AFL_LLVM_WHITELIST"); + if (instWhiteListFilename) { + + std::string line; + std::ifstream fileStream; + fileStream.open(instWhiteListFilename); + if (!fileStream) report_fatal_error("Unable to open AFL_LLVM_WHITELIST"); + getline(fileStream, line); + while (fileStream) { + + myWhitelist.push_back(line); getline(fileStream, line); - while (fileStream) { - myWhitelist.push_back(line); - getline(fileStream, line); - } + } - } - void getAnalysisUsage(AnalysisUsage &AU) const override { - AU.addRequired<DominatorTreeWrapperPass>(); } + } + + void getAnalysisUsage(AnalysisUsage &AU) const override { + + AU.addRequired<DominatorTreeWrapperPass>(); + + } + #if LLVM_VERSION_MAJOR < 4 - const char * + const char * #else - StringRef + StringRef #endif - getPassName() const override { - return "InstTrim Instrumentation"; - } + getPassName() const override { + + return "InstTrim Instrumentation"; + + } + + bool runOnModule(Module &M) override { + + char be_quiet = 0; + + if (isatty(2) && !getenv("AFL_QUIET")) { + + SAYF(cCYA "LLVMInsTrim" VERSION cRST " by csienslab\n"); + + } else + + be_quiet = 1; - bool runOnModule(Module &M) override { - char be_quiet = 0; - - if (isatty(2) && !getenv("AFL_QUIET")) { - SAYF(cCYA "LLVMInsTrim" VERSION cRST " by csienslab\n"); - } else be_quiet = 1; - #if LLVM_VERSION_MAJOR < 9 - char* neverZero_counters_str; - if ((neverZero_counters_str = getenv("AFL_LLVM_NOT_ZERO")) != NULL) - OKF("LLVM neverZero activated (by hexcoder)\n"); + char *neverZero_counters_str; + if ((neverZero_counters_str = getenv("AFL_LLVM_NOT_ZERO")) != NULL) + OKF("LLVM neverZero activated (by hexcoder)\n"); #endif - - if (getenv("AFL_LLVM_INSTRIM_LOOPHEAD") != NULL || getenv("LOOPHEAD") != NULL) { - LoopHeadOpt = true; - } - // this is our default - MarkSetOpt = true; - -/* // I dont think this makes sense to port into LLVMInsTrim - char* inst_ratio_str = getenv("AFL_INST_RATIO"); - unsigned int inst_ratio = 100; - if (inst_ratio_str) { - if (sscanf(inst_ratio_str, "%u", &inst_ratio) != 1 || !inst_ratio || inst_ratio > 100) - FATAL("Bad value of AFL_INST_RATIO (must be between 1 and 100)"); - } -*/ + if (getenv("AFL_LLVM_INSTRIM_LOOPHEAD") != NULL || + getenv("LOOPHEAD") != NULL) { + + LoopHeadOpt = true; + + } + + // this is our default + MarkSetOpt = true; + + /* // I dont think this makes sense to port into LLVMInsTrim + char* inst_ratio_str = getenv("AFL_INST_RATIO"); + unsigned int inst_ratio = 100; + if (inst_ratio_str) { + + if (sscanf(inst_ratio_str, "%u", &inst_ratio) != 1 || !inst_ratio || + inst_ratio > 100) FATAL("Bad value of AFL_INST_RATIO (must be between 1 + and 100)"); + + } + + */ - LLVMContext &C = M.getContext(); - IntegerType *Int8Ty = IntegerType::getInt8Ty(C); - IntegerType *Int32Ty = IntegerType::getInt32Ty(C); + LLVMContext &C = M.getContext(); + IntegerType *Int8Ty = IntegerType::getInt8Ty(C); + IntegerType *Int32Ty = IntegerType::getInt32Ty(C); - GlobalVariable *CovMapPtr = new GlobalVariable( + GlobalVariable *CovMapPtr = new GlobalVariable( M, PointerType::getUnqual(Int8Ty), false, GlobalValue::ExternalLinkage, nullptr, "__afl_area_ptr"); - GlobalVariable *OldPrev = new GlobalVariable( - M, Int32Ty, false, GlobalValue::ExternalLinkage, 0, "__afl_prev_loc", - 0, GlobalVariable::GeneralDynamicTLSModel, 0, false); + GlobalVariable *OldPrev = new GlobalVariable( + M, Int32Ty, false, GlobalValue::ExternalLinkage, 0, "__afl_prev_loc", 0, + GlobalVariable::GeneralDynamicTLSModel, 0, false); - u64 total_rs = 0; - u64 total_hs = 0; + u64 total_rs = 0; + u64 total_hs = 0; + + for (Function &F : M) { + + if (!F.size()) { continue; } + + if (!myWhitelist.empty()) { + + bool instrumentBlock = false; + DebugLoc Loc; + StringRef instFilename; + + for (auto &BB : F) { + + BasicBlock::iterator IP = BB.getFirstInsertionPt(); + IRBuilder<> IRB(&(*IP)); + if (!Loc) Loc = IP->getDebugLoc(); - for (Function &F : M) { - if (!F.size()) { - continue; } - if (!myWhitelist.empty()) { - bool instrumentBlock = false; - DebugLoc Loc; - StringRef instFilename; + if (Loc) { + + DILocation *cDILoc = dyn_cast<DILocation>(Loc.getAsMDNode()); + + unsigned int instLine = cDILoc->getLine(); + instFilename = cDILoc->getFilename(); + + if (instFilename.str().empty()) { + + /* If the original location is empty, try using the inlined location + */ + DILocation *oDILoc = cDILoc->getInlinedAt(); + if (oDILoc) { + + instFilename = oDILoc->getFilename(); + instLine = oDILoc->getLine(); + + } - for (auto &BB : F) { - BasicBlock::iterator IP = BB.getFirstInsertionPt(); - IRBuilder<> IRB(&(*IP)); - if (!Loc) - Loc = IP->getDebugLoc(); } - if ( Loc ) { - DILocation *cDILoc = dyn_cast<DILocation>(Loc.getAsMDNode()); + /* Continue only if we know where we actually are */ + if (!instFilename.str().empty()) { - unsigned int instLine = cDILoc->getLine(); - instFilename = cDILoc->getFilename(); + for (std::list<std::string>::iterator it = myWhitelist.begin(); + it != myWhitelist.end(); ++it) { - if (instFilename.str().empty()) { - /* If the original location is empty, try using the inlined location */ - DILocation *oDILoc = cDILoc->getInlinedAt(); - if (oDILoc) { - instFilename = oDILoc->getFilename(); - instLine = oDILoc->getLine(); - } - } + if (instFilename.str().length() >= it->length()) { + + if (instFilename.str().compare( + instFilename.str().length() - it->length(), + it->length(), *it) == 0) { + + instrumentBlock = true; + break; + + } - /* Continue only if we know where we actually are */ - if (!instFilename.str().empty()) { - for (std::list<std::string>::iterator it = myWhitelist.begin(); it != myWhitelist.end(); ++it) { - if (instFilename.str().length() >= it->length()) { - if (instFilename.str().compare(instFilename.str().length() - it->length(), it->length(), *it) == 0) { - instrumentBlock = true; - break; - } - } - } } - } - /* Either we couldn't figure out our location or the location is - * not whitelisted, so we skip instrumentation. */ - if (!instrumentBlock) { - if (!instFilename.str().empty()) - SAYF(cYEL "[!] " cBRI "Not in whitelist, skipping %s ...\n", instFilename.str().c_str()); - else - SAYF(cYEL "[!] " cBRI "No filename information found, skipping it"); - continue; + } + } + } - std::unordered_set<BasicBlock *> MS; - if (!MarkSetOpt) { - for (auto &BB : F) { - MS.insert(&BB); - } - total_rs += F.size(); + /* Either we couldn't figure out our location or the location is + * not whitelisted, so we skip instrumentation. */ + if (!instrumentBlock) { + + if (!instFilename.str().empty()) + SAYF(cYEL "[!] " cBRI "Not in whitelist, skipping %s ...\n", + instFilename.str().c_str()); + else + SAYF(cYEL "[!] " cBRI "No filename information found, skipping it"); + continue; + + } + + } + + std::unordered_set<BasicBlock *> MS; + if (!MarkSetOpt) { + + for (auto &BB : F) { + + MS.insert(&BB); + + } + + total_rs += F.size(); + + } else { + + auto Result = markNodes(&F); + auto RS = Result.first; + auto HS = Result.second; + + MS.insert(RS.begin(), RS.end()); + if (!LoopHeadOpt) { + + MS.insert(HS.begin(), HS.end()); + total_rs += MS.size(); + } else { - auto Result = markNodes(&F); - auto RS = Result.first; - auto HS = Result.second; - - MS.insert(RS.begin(), RS.end()); - if (!LoopHeadOpt) { - MS.insert(HS.begin(), HS.end()); - total_rs += MS.size(); - } else { - DenseSet<std::pair<BasicBlock *, BasicBlock *>> EdgeSet; - DominatorTreeWrapperPass *DTWP = &getAnalysis<DominatorTreeWrapperPass>(F); - auto DT = &DTWP->getDomTree(); - - total_rs += RS.size(); - total_hs += HS.size(); - - for (BasicBlock *BB : HS) { - bool Inserted = false; - for (auto BI = pred_begin(BB), BE = pred_end(BB); - BI != BE; ++BI - ) { - auto Edge = BasicBlockEdge(*BI, BB); - if (Edge.isSingleEdge() && DT->dominates(Edge, BB)) { - EdgeSet.insert({*BI, BB}); - Inserted = true; - break; - } - } - if (!Inserted) { - MS.insert(BB); - total_rs += 1; - total_hs -= 1; + + DenseSet<std::pair<BasicBlock *, BasicBlock *>> EdgeSet; + DominatorTreeWrapperPass * DTWP = + &getAnalysis<DominatorTreeWrapperPass>(F); + auto DT = &DTWP->getDomTree(); + + total_rs += RS.size(); + total_hs += HS.size(); + + for (BasicBlock *BB : HS) { + + bool Inserted = false; + for (auto BI = pred_begin(BB), BE = pred_end(BB); BI != BE; ++BI) { + + auto Edge = BasicBlockEdge(*BI, BB); + if (Edge.isSingleEdge() && DT->dominates(Edge, BB)) { + + EdgeSet.insert({*BI, BB}); + Inserted = true; + break; + } + } - for (auto I = EdgeSet.begin(), E = EdgeSet.end(); I != E; ++I) { - auto PredBB = I->first; - auto SuccBB = I->second; - auto NewBB = SplitBlockPredecessors(SuccBB, {PredBB}, ".split", - DT, nullptr, -#if LLVM_VERSION_MAJOR >= 8 - nullptr, -#endif - false); - MS.insert(NewBB); + + if (!Inserted) { + + MS.insert(BB); + total_rs += 1; + total_hs -= 1; + } - } - auto *EBB = &F.getEntryBlock(); - if (succ_begin(EBB) == succ_end(EBB)) { - MS.insert(EBB); - total_rs += 1; } - for (BasicBlock &BB : F) { - if (MS.find(&BB) == MS.end()) { - continue; - } - IRBuilder<> IRB(&*BB.getFirstInsertionPt()); - IRB.CreateStore(ConstantInt::get(Int32Ty, genLabel()), OldPrev); + for (auto I = EdgeSet.begin(), E = EdgeSet.end(); I != E; ++I) { + + auto PredBB = I->first; + auto SuccBB = I->second; + auto NewBB = + SplitBlockPredecessors(SuccBB, {PredBB}, ".split", DT, nullptr, +#if LLVM_VERSION_MAJOR >= 8 + nullptr, +#endif + false); + MS.insert(NewBB); + } + + } + + auto *EBB = &F.getEntryBlock(); + if (succ_begin(EBB) == succ_end(EBB)) { + + MS.insert(EBB); + total_rs += 1; + } for (BasicBlock &BB : F) { - auto PI = pred_begin(&BB); - auto PE = pred_end(&BB); - if (MarkSetOpt && MS.find(&BB) == MS.end()) { - continue; - } + if (MS.find(&BB) == MS.end()) { continue; } IRBuilder<> IRB(&*BB.getFirstInsertionPt()); - Value *L = NULL; - if (PI == PE) { - L = ConstantInt::get(Int32Ty, genLabel()); - } else { - auto *PN = PHINode::Create(Int32Ty, 0, "", &*BB.begin()); - DenseMap<BasicBlock *, unsigned> PredMap; - for (auto PI = pred_begin(&BB), PE = pred_end(&BB); - PI != PE; ++PI - ) { - BasicBlock *PBB = *PI; - auto It = PredMap.insert({PBB, genLabel()}); - unsigned Label = It.first->second; - PN->addIncoming(ConstantInt::get(Int32Ty, Label), PBB); - } - L = PN; + IRB.CreateStore(ConstantInt::get(Int32Ty, genLabel()), OldPrev); + + } + + } + + for (BasicBlock &BB : F) { + + auto PI = pred_begin(&BB); + auto PE = pred_end(&BB); + if (MarkSetOpt && MS.find(&BB) == MS.end()) { continue; } + + IRBuilder<> IRB(&*BB.getFirstInsertionPt()); + Value * L = NULL; + if (PI == PE) { + + L = ConstantInt::get(Int32Ty, genLabel()); + + } else { + + auto *PN = PHINode::Create(Int32Ty, 0, "", &*BB.begin()); + DenseMap<BasicBlock *, unsigned> PredMap; + for (auto PI = pred_begin(&BB), PE = pred_end(&BB); PI != PE; ++PI) { + + BasicBlock *PBB = *PI; + auto It = PredMap.insert({PBB, genLabel()}); + unsigned Label = It.first->second; + PN->addIncoming(ConstantInt::get(Int32Ty, Label), PBB); + } - /* Load prev_loc */ - LoadInst *PrevLoc = IRB.CreateLoad(OldPrev); - PrevLoc->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, None)); - Value *PrevLocCasted = IRB.CreateZExt(PrevLoc, IRB.getInt32Ty()); + L = PN; + + } + + /* Load prev_loc */ + LoadInst *PrevLoc = IRB.CreateLoad(OldPrev); + PrevLoc->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, None)); + Value *PrevLocCasted = IRB.CreateZExt(PrevLoc, IRB.getInt32Ty()); + + /* Load SHM pointer */ + LoadInst *MapPtr = IRB.CreateLoad(CovMapPtr); + MapPtr->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, None)); + Value *MapPtrIdx = + IRB.CreateGEP(MapPtr, IRB.CreateXor(PrevLocCasted, L)); - /* Load SHM pointer */ - LoadInst *MapPtr = IRB.CreateLoad(CovMapPtr); - MapPtr->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, None)); - Value *MapPtrIdx = IRB.CreateGEP(MapPtr, IRB.CreateXor(PrevLocCasted, L)); + /* Update bitmap */ + LoadInst *Counter = IRB.CreateLoad(MapPtrIdx); + Counter->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, None)); - /* Update bitmap */ - LoadInst *Counter = IRB.CreateLoad(MapPtrIdx); - Counter->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, None)); - - Value *Incr = IRB.CreateAdd(Counter, ConstantInt::get(Int8Ty, 1)); + Value *Incr = IRB.CreateAdd(Counter, ConstantInt::get(Int8Ty, 1)); #if LLVM_VERSION_MAJOR < 9 - if (neverZero_counters_str != NULL) // with llvm 9 we make this the default as the bug in llvm is then fixed + if (neverZero_counters_str != + NULL) // with llvm 9 we make this the default as the bug in llvm is + // then fixed #else - if (1) // with llvm 9 we make this the default as the bug in llvm is then fixed + if (1) // with llvm 9 we make this the default as the bug in llvm is + // then fixed #endif - { + { + /* hexcoder: Realize a counter that skips zero during overflow. - * Once this counter reaches its maximum value, it next increments to 1 + * Once this counter reaches its maximum value, it next increments to + * 1 * * Instead of * Counter + 1 -> Counter @@ -306,38 +387,52 @@ namespace { * Counter + 1 -> {Counter, OverflowFlag} * Counter + OverflowFlag -> Counter */ - auto cf = IRB.CreateICmpEQ(Incr, ConstantInt::get(Int8Ty, 0)); - auto carry = IRB.CreateZExt(cf, Int8Ty); - Incr = IRB.CreateAdd(Incr, carry); - } - - IRB.CreateStore(Incr, MapPtrIdx)->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, None)); - - /* Set prev_loc to cur_loc >> 1 */ - /* - StoreInst *Store = IRB.CreateStore(ConstantInt::get(Int32Ty, L >> 1), OldPrev); - Store->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, None)); - */ - - total_instr++; + auto cf = IRB.CreateICmpEQ(Incr, ConstantInt::get(Int8Ty, 0)); + auto carry = IRB.CreateZExt(cf, Int8Ty); + Incr = IRB.CreateAdd(Incr, carry); + } + + IRB.CreateStore(Incr, MapPtrIdx) + ->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, None)); + + /* Set prev_loc to cur_loc >> 1 */ + /* + StoreInst *Store = IRB.CreateStore(ConstantInt::get(Int32Ty, L >> 1), + OldPrev); Store->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, + None)); + */ + + total_instr++; + } - OKF("Instrumented %u locations (%llu, %llu) (%s mode)\n"/*", ratio %u%%)."*/, - total_instr, total_rs, total_hs, - getenv("AFL_HARDEN") ? "hardened" : - ((getenv("AFL_USE_ASAN") || getenv("AFL_USE_MSAN")) ? - "ASAN/MSAN" : "non-hardened")/*, inst_ratio*/); - return false; } - }; // end of struct InsTrim + + OKF("Instrumented %u locations (%llu, %llu) (%s mode)\n" /*", ratio + %u%%)."*/ + , + total_instr, total_rs, total_hs, + getenv("AFL_HARDEN") + ? "hardened" + : ((getenv("AFL_USE_ASAN") || getenv("AFL_USE_MSAN")) + ? "ASAN/MSAN" + : "non-hardened") /*, inst_ratio*/); + return false; + + } + +}; // end of struct InsTrim + } // end of anonymous namespace char InsTrim::ID = 0; static void registerAFLPass(const PassManagerBuilder &, legacy::PassManagerBase &PM) { + PM.add(new InsTrim()); + } static RegisterStandardPasses RegisterAFLPass( @@ -345,3 +440,4 @@ static RegisterStandardPasses RegisterAFLPass( static RegisterStandardPasses RegisterAFLPass0( PassManagerBuilder::EP_EnabledOnOptLevel0, registerAFLPass); + diff --git a/llvm_mode/MarkNodes.cc b/llvm_mode/MarkNodes.cc index 348dc264..2aeeda8d 100644 --- a/llvm_mode/MarkNodes.cc +++ b/llvm_mode/MarkNodes.cc @@ -19,207 +19,267 @@ using namespace llvm; -DenseMap<BasicBlock *, uint32_t> LMap; -std::vector<BasicBlock *> Blocks; -std::set<uint32_t> Marked , Markabove; -std::vector< std::vector<uint32_t> > Succs , Preds; +DenseMap<BasicBlock *, uint32_t> LMap; +std::vector<BasicBlock *> Blocks; +std::set<uint32_t> Marked, Markabove; +std::vector<std::vector<uint32_t> > Succs, Preds; + +void reset() { -void reset(){ LMap.clear(); Blocks.clear(); Marked.clear(); Markabove.clear(); + } uint32_t start_point; void labelEachBlock(Function *F) { + // Fake single endpoint; LMap[NULL] = Blocks.size(); Blocks.push_back(NULL); - + // Assign the unique LabelID to each block; for (auto I = F->begin(), E = F->end(); I != E; ++I) { + BasicBlock *BB = &*I; LMap[BB] = Blocks.size(); Blocks.push_back(BB); + } - + start_point = LMap[&F->getEntryBlock()]; + } void buildCFG(Function *F) { - Succs.resize( Blocks.size() ); - Preds.resize( Blocks.size() ); - for( size_t i = 0 ; i < Succs.size() ; i ++ ){ - Succs[ i ].clear(); - Preds[ i ].clear(); + + Succs.resize(Blocks.size()); + Preds.resize(Blocks.size()); + for (size_t i = 0; i < Succs.size(); i++) { + + Succs[i].clear(); + Preds[i].clear(); + } - //uint32_t FakeID = 0; + // uint32_t FakeID = 0; for (auto S = F->begin(), E = F->end(); S != E; ++S) { + BasicBlock *BB = &*S; - uint32_t MyID = LMap[BB]; - //if (succ_begin(BB) == succ_end(BB)) { - //Succs[MyID].push_back(FakeID); - //Marked.insert(MyID); + uint32_t MyID = LMap[BB]; + // if (succ_begin(BB) == succ_end(BB)) { + + // Succs[MyID].push_back(FakeID); + // Marked.insert(MyID); //} for (auto I = succ_begin(BB), E = succ_end(BB); I != E; ++I) { + Succs[MyID].push_back(LMap[*I]); + } + } + } -std::vector< std::vector<uint32_t> > tSuccs; -std::vector<bool> tag , indfs; +std::vector<std::vector<uint32_t> > tSuccs; +std::vector<bool> tag, indfs; void DFStree(size_t now_id) { - if(tag[now_id]) return; - tag[now_id]=true; - indfs[now_id]=true; - for (auto succ: tSuccs[now_id]) { - if(tag[succ] and indfs[succ]) { + + if (tag[now_id]) return; + tag[now_id] = true; + indfs[now_id] = true; + for (auto succ : tSuccs[now_id]) { + + if (tag[succ] and indfs[succ]) { + Marked.insert(succ); Markabove.insert(succ); continue; + } + Succs[now_id].push_back(succ); Preds[succ].push_back(now_id); DFStree(succ); + } - indfs[now_id]=false; + + indfs[now_id] = false; + } + void turnCFGintoDAG(Function *F) { + tSuccs = Succs; tag.resize(Blocks.size()); indfs.resize(Blocks.size()); - for (size_t i = 0; i < Blocks.size(); ++ i) { + for (size_t i = 0; i < Blocks.size(); ++i) { + Succs[i].clear(); - tag[i]=false; - indfs[i]=false; + tag[i] = false; + indfs[i] = false; + } + DFStree(start_point); - for (size_t i = 0; i < Blocks.size(); ++ i) - if( Succs[i].empty() ){ + for (size_t i = 0; i < Blocks.size(); ++i) + if (Succs[i].empty()) { + Succs[i].push_back(0); Preds[0].push_back(i); + } + } uint32_t timeStamp; -namespace DominatorTree{ - std::vector< std::vector<uint32_t> > cov; - std::vector<uint32_t> dfn, nfd, par, sdom, idom, mom, mn; +namespace DominatorTree { + +std::vector<std::vector<uint32_t> > cov; +std::vector<uint32_t> dfn, nfd, par, sdom, idom, mom, mn; + +bool Compare(uint32_t u, uint32_t v) { + + return dfn[u] < dfn[v]; + +} + +uint32_t eval(uint32_t u) { + + if (mom[u] == u) return u; + uint32_t res = eval(mom[u]); + if (Compare(sdom[mn[mom[u]]], sdom[mn[u]])) { mn[u] = mn[mom[u]]; } + return mom[u] = res; + +} + +void DFS(uint32_t now) { + + timeStamp += 1; + dfn[now] = timeStamp; + nfd[timeStamp - 1] = now; + for (auto succ : Succs[now]) { + + if (dfn[succ] == 0) { + + par[succ] = now; + DFS(succ); - bool Compare(uint32_t u, uint32_t v) { - return dfn[u] < dfn[v]; - } - uint32_t eval(uint32_t u) { - if( mom[u] == u ) return u; - uint32_t res = eval( mom[u] ); - if(Compare(sdom[mn[mom[u]]] , sdom[mn[u]])) { - mn[u] = mn[mom[u]]; } - return mom[u] = res; + } - void DFS(uint32_t now) { - timeStamp += 1; - dfn[now] = timeStamp; - nfd[timeStamp - 1] = now; - for( auto succ : Succs[now] ) { - if( dfn[succ] == 0 ) { - par[succ] = now; - DFS(succ); - } - } +} + +void DominatorTree(Function *F) { + + if (Blocks.empty()) return; + uint32_t s = start_point; + + // Initialization + mn.resize(Blocks.size()); + cov.resize(Blocks.size()); + dfn.resize(Blocks.size()); + nfd.resize(Blocks.size()); + par.resize(Blocks.size()); + mom.resize(Blocks.size()); + sdom.resize(Blocks.size()); + idom.resize(Blocks.size()); + + for (uint32_t i = 0; i < Blocks.size(); i++) { + + dfn[i] = 0; + nfd[i] = Blocks.size(); + cov[i].clear(); + idom[i] = mom[i] = mn[i] = sdom[i] = i; + } - void DominatorTree(Function *F) { - if( Blocks.empty() ) return; - uint32_t s = start_point; - - // Initialization - mn.resize(Blocks.size()); - cov.resize(Blocks.size()); - dfn.resize(Blocks.size()); - nfd.resize(Blocks.size()); - par.resize(Blocks.size()); - mom.resize(Blocks.size()); - sdom.resize(Blocks.size()); - idom.resize(Blocks.size()); - - for( uint32_t i = 0 ; i < Blocks.size() ; i ++ ) { - dfn[i] = 0; - nfd[i] = Blocks.size(); - cov[i].clear(); - idom[i] = mom[i] = mn[i] = sdom[i] = i; - } + timeStamp = 0; + DFS(s); - timeStamp = 0; - DFS(s); + for (uint32_t i = Blocks.size() - 1; i >= 1u; i--) { + + uint32_t now = nfd[i]; + if (now == Blocks.size()) { continue; } + for (uint32_t pre : Preds[now]) { + + if (dfn[pre]) { + + eval(pre); + if (Compare(sdom[mn[pre]], sdom[now])) { sdom[now] = sdom[mn[pre]]; } - for( uint32_t i = Blocks.size() - 1 ; i >= 1u ; i -- ) { - uint32_t now = nfd[i]; - if( now == Blocks.size() ) { - continue; - } - for( uint32_t pre : Preds[ now ] ) { - if( dfn[ pre ] ) { - eval(pre); - if( Compare(sdom[mn[pre]], sdom[now]) ) { - sdom[now] = sdom[mn[pre]]; - } - } - } - cov[sdom[now]].push_back(now); - mom[now] = par[now]; - for( uint32_t x : cov[par[now]] ) { - eval(x); - if( Compare(sdom[mn[x]], par[now]) ) { - idom[x] = mn[x]; - } else { - idom[x] = par[now]; - } } + } - for( uint32_t i = 1 ; i < Blocks.size() ; i += 1 ) { - uint32_t now = nfd[i]; - if( now == Blocks.size() ) { - continue; + cov[sdom[now]].push_back(now); + mom[now] = par[now]; + for (uint32_t x : cov[par[now]]) { + + eval(x); + if (Compare(sdom[mn[x]], par[now])) { + + idom[x] = mn[x]; + + } else { + + idom[x] = par[now]; + } - if(idom[now] != sdom[now]) - idom[now] = idom[idom[now]]; + } + } -} // End of DominatorTree -std::vector<uint32_t> Visited, InStack; -std::vector<uint32_t> TopoOrder, InDeg; -std::vector< std::vector<uint32_t> > t_Succ , t_Pred; + for (uint32_t i = 1; i < Blocks.size(); i += 1) { + + uint32_t now = nfd[i]; + if (now == Blocks.size()) { continue; } + if (idom[now] != sdom[now]) idom[now] = idom[idom[now]]; + + } + +} + +} // namespace DominatorTree + +std::vector<uint32_t> Visited, InStack; +std::vector<uint32_t> TopoOrder, InDeg; +std::vector<std::vector<uint32_t> > t_Succ, t_Pred; void Go(uint32_t now, uint32_t tt) { - if( now == tt ) return; + + if (now == tt) return; Visited[now] = InStack[now] = timeStamp; - for(uint32_t nxt : Succs[now]) { - if(Visited[nxt] == timeStamp and InStack[nxt] == timeStamp) { + for (uint32_t nxt : Succs[now]) { + + if (Visited[nxt] == timeStamp and InStack[nxt] == timeStamp) { + Marked.insert(nxt); + } + t_Succ[now].push_back(nxt); t_Pred[nxt].push_back(now); InDeg[nxt] += 1; - if(Visited[nxt] == timeStamp) { - continue; - } + if (Visited[nxt] == timeStamp) { continue; } Go(nxt, tt); + } InStack[now] = 0; + } void TopologicalSort(uint32_t ss, uint32_t tt) { + timeStamp += 1; Go(ss, tt); @@ -227,76 +287,111 @@ void TopologicalSort(uint32_t ss, uint32_t tt) { TopoOrder.clear(); std::queue<uint32_t> wait; wait.push(ss); - while( not wait.empty() ) { - uint32_t now = wait.front(); wait.pop(); + while (not wait.empty()) { + + uint32_t now = wait.front(); + wait.pop(); TopoOrder.push_back(now); - for(uint32_t nxt : t_Succ[now]) { + for (uint32_t nxt : t_Succ[now]) { + InDeg[nxt] -= 1; - if(InDeg[nxt] == 0u) { - wait.push(nxt); - } + if (InDeg[nxt] == 0u) { wait.push(nxt); } + } + } + } -std::vector< std::set<uint32_t> > NextMarked; -bool Indistinguish(uint32_t node1, uint32_t node2) { - if(NextMarked[node1].size() > NextMarked[node2].size()){ +std::vector<std::set<uint32_t> > NextMarked; +bool Indistinguish(uint32_t node1, uint32_t node2) { + + if (NextMarked[node1].size() > NextMarked[node2].size()) { + uint32_t _swap = node1; node1 = node2; node2 = _swap; + } - for(uint32_t x : NextMarked[node1]) { - if( NextMarked[node2].find(x) != NextMarked[node2].end() ) { - return true; - } + + for (uint32_t x : NextMarked[node1]) { + + if (NextMarked[node2].find(x) != NextMarked[node2].end()) { return true; } + } + return false; + } void MakeUniq(uint32_t now) { + bool StopFlag = false; if (Marked.find(now) == Marked.end()) { - for(uint32_t pred1 : t_Pred[now]) { - for(uint32_t pred2 : t_Pred[now]) { - if(pred1 == pred2) continue; - if(Indistinguish(pred1, pred2)) { + + for (uint32_t pred1 : t_Pred[now]) { + + for (uint32_t pred2 : t_Pred[now]) { + + if (pred1 == pred2) continue; + if (Indistinguish(pred1, pred2)) { + Marked.insert(now); StopFlag = true; break; + } + } - if (StopFlag) { - break; - } + + if (StopFlag) { break; } + } + } - if(Marked.find(now) != Marked.end()) { + + if (Marked.find(now) != Marked.end()) { + NextMarked[now].insert(now); + } else { - for(uint32_t pred : t_Pred[now]) { - for(uint32_t x : NextMarked[pred]) { + + for (uint32_t pred : t_Pred[now]) { + + for (uint32_t x : NextMarked[pred]) { + NextMarked[now].insert(x); + } + } + } + } void MarkSubGraph(uint32_t ss, uint32_t tt) { + TopologicalSort(ss, tt); - if(TopoOrder.empty()) return; + if (TopoOrder.empty()) return; + + for (uint32_t i : TopoOrder) { - for(uint32_t i : TopoOrder) { NextMarked[i].clear(); + } NextMarked[TopoOrder[0]].insert(TopoOrder[0]); - for(uint32_t i = 1 ; i < TopoOrder.size() ; i += 1) { + for (uint32_t i = 1; i < TopoOrder.size(); i += 1) { + MakeUniq(TopoOrder[i]); + } + } void MarkVertice(Function *F) { + uint32_t s = start_point; InDeg.resize(Blocks.size()); @@ -306,26 +401,32 @@ void MarkVertice(Function *F) { t_Pred.resize(Blocks.size()); NextMarked.resize(Blocks.size()); - for( uint32_t i = 0 ; i < Blocks.size() ; i += 1 ) { + for (uint32_t i = 0; i < Blocks.size(); i += 1) { + Visited[i] = InStack[i] = InDeg[i] = 0; t_Succ[i].clear(); t_Pred[i].clear(); + } + timeStamp = 0; uint32_t t = 0; - //MarkSubGraph(s, t); - //return; + // MarkSubGraph(s, t); + // return; + + while (s != t) { - while( s != t ) { MarkSubGraph(DominatorTree::idom[t], t); t = DominatorTree::idom[t]; + } } // return {marked nodes} -std::pair<std::vector<BasicBlock *>, - std::vector<BasicBlock *> >markNodes(Function *F) { +std::pair<std::vector<BasicBlock *>, std::vector<BasicBlock *> > markNodes( + Function *F) { + assert(F->size() > 0 && "Function can not be empty"); reset(); @@ -335,21 +436,30 @@ std::pair<std::vector<BasicBlock *>, DominatorTree::DominatorTree(F); MarkVertice(F); - std::vector<BasicBlock *> Result , ResultAbove; - for( uint32_t x : Markabove ) { - auto it = Marked.find( x ); - if( it != Marked.end() ) - Marked.erase( it ); - if( x ) - ResultAbove.push_back(Blocks[x]); + std::vector<BasicBlock *> Result, ResultAbove; + for (uint32_t x : Markabove) { + + auto it = Marked.find(x); + if (it != Marked.end()) Marked.erase(it); + if (x) ResultAbove.push_back(Blocks[x]); + } - for( uint32_t x : Marked ) { + + for (uint32_t x : Marked) { + if (x == 0) { + continue; + } else { + Result.push_back(Blocks[x]); + } + } - return { Result , ResultAbove }; + return {Result, ResultAbove}; + } + diff --git a/llvm_mode/MarkNodes.h b/llvm_mode/MarkNodes.h index e3bf3ce5..23316652 100644 --- a/llvm_mode/MarkNodes.h +++ b/llvm_mode/MarkNodes.h @@ -1,11 +1,12 @@ #ifndef __MARK_NODES__ -#define __MARK_NODES__ +# define __MARK_NODES__ -#include "llvm/IR/BasicBlock.h" -#include "llvm/IR/Function.h" -#include<vector> +# include "llvm/IR/BasicBlock.h" +# include "llvm/IR/Function.h" +# include <vector> -std::pair<std::vector<llvm::BasicBlock *>, - std::vector<llvm::BasicBlock *>> markNodes(llvm::Function *F); +std::pair<std::vector<llvm::BasicBlock *>, std::vector<llvm::BasicBlock *>> +markNodes(llvm::Function *F); #endif + diff --git a/llvm_mode/afl-clang-fast.c b/llvm_mode/afl-clang-fast.c index 1b810edf..666fd043 100644 --- a/llvm_mode/afl-clang-fast.c +++ b/llvm_mode/afl-clang-fast.c @@ -34,16 +34,15 @@ #include <string.h> #include <assert.h> -static u8* obj_path; /* Path to runtime libraries */ -static u8** cc_params; /* Parameters passed to the real CC */ -static u32 cc_par_cnt = 1; /* Param count, including argv0 */ - +static u8* obj_path; /* Path to runtime libraries */ +static u8** cc_params; /* Parameters passed to the real CC */ +static u32 cc_par_cnt = 1; /* Param count, including argv0 */ /* Try to find the runtime libraries. If that fails, abort. */ static void find_obj(u8* argv0) { - u8 *afl_path = getenv("AFL_PATH"); + u8* afl_path = getenv("AFL_PATH"); u8 *slash, *tmp; if (afl_path) { @@ -51,9 +50,11 @@ static void find_obj(u8* argv0) { tmp = alloc_printf("%s/afl-llvm-rt.o", afl_path); if (!access(tmp, R_OK)) { + obj_path = afl_path; ck_free(tmp); return; + } ck_free(tmp); @@ -64,7 +65,7 @@ static void find_obj(u8* argv0) { if (slash) { - u8 *dir; + u8* dir; *slash = 0; dir = ck_strdup(argv0); @@ -73,9 +74,11 @@ static void find_obj(u8* argv0) { tmp = alloc_printf("%s/afl-llvm-rt.o", dir); if (!access(tmp, R_OK)) { + obj_path = dir; ck_free(tmp); return; + } ck_free(tmp); @@ -84,33 +87,43 @@ static void find_obj(u8* argv0) { } if (!access(AFL_PATH "/afl-llvm-rt.o", R_OK)) { + obj_path = AFL_PATH; return; + } - FATAL("Unable to find 'afl-llvm-rt.o' or 'afl-llvm-pass.so.cc'. Please set AFL_PATH"); - -} + FATAL( + "Unable to find 'afl-llvm-rt.o' or 'afl-llvm-pass.so.cc'. Please set " + "AFL_PATH"); +} /* Copy argv to cc_params, making the necessary edits. */ static void edit_params(u32 argc, char** argv) { - u8 fortify_set = 0, asan_set = 0, x_set = 0, maybe_linking = 1, bit_mode = 0; - u8 *name; + u8 fortify_set = 0, asan_set = 0, x_set = 0, maybe_linking = 1, bit_mode = 0; + u8* name; cc_params = ck_alloc((argc + 128) * sizeof(u8*)); name = strrchr(argv[0], '/'); - if (!name) name = argv[0]; else name++; + if (!name) + name = argv[0]; + else + name++; if (!strcmp(name, "afl-clang-fast++")) { + u8* alt_cxx = getenv("AFL_CXX"); cc_params[0] = alt_cxx ? alt_cxx : (u8*)"clang++"; + } else { + u8* alt_cc = getenv("AFL_CC"); cc_params[0] = alt_cc ? alt_cc : (u8*)"clang"; + } /* There are three ways to compile with afl-clang-fast. In the traditional @@ -118,36 +131,50 @@ static void edit_params(u32 argc, char** argv) { much faster but has less coverage. Finally tere is the experimental 'trace-pc-guard' mode, we use native LLVM instrumentation callbacks instead. For trace-pc-guard see: - http://clang.llvm.org/docs/SanitizerCoverage.html#tracing-pcs-with-guards */ + http://clang.llvm.org/docs/SanitizerCoverage.html#tracing-pcs-with-guards + */ // laf - if (getenv("LAF_SPLIT_SWITCHES")||getenv("AFL_LLVM_LAF_SPLIT_SWITCHES")) { + if (getenv("LAF_SPLIT_SWITCHES") || getenv("AFL_LLVM_LAF_SPLIT_SWITCHES")) { + cc_params[cc_par_cnt++] = "-Xclang"; cc_params[cc_par_cnt++] = "-load"; cc_params[cc_par_cnt++] = "-Xclang"; - cc_params[cc_par_cnt++] = alloc_printf("%s/split-switches-pass.so", obj_path); + cc_params[cc_par_cnt++] = + alloc_printf("%s/split-switches-pass.so", obj_path); + } - if (getenv("LAF_TRANSFORM_COMPARES")||getenv("AFL_LLVM_LAF_TRANSFORM_COMPARES")) { + if (getenv("LAF_TRANSFORM_COMPARES") || + getenv("AFL_LLVM_LAF_TRANSFORM_COMPARES")) { + cc_params[cc_par_cnt++] = "-Xclang"; cc_params[cc_par_cnt++] = "-load"; cc_params[cc_par_cnt++] = "-Xclang"; - cc_params[cc_par_cnt++] = alloc_printf("%s/compare-transform-pass.so", obj_path); + cc_params[cc_par_cnt++] = + alloc_printf("%s/compare-transform-pass.so", obj_path); + } - if (getenv("LAF_SPLIT_COMPARES")||getenv("AFL_LLVM_LAF_SPLIT_COMPARES")) { + if (getenv("LAF_SPLIT_COMPARES") || getenv("AFL_LLVM_LAF_SPLIT_COMPARES")) { + cc_params[cc_par_cnt++] = "-Xclang"; cc_params[cc_par_cnt++] = "-load"; cc_params[cc_par_cnt++] = "-Xclang"; - cc_params[cc_par_cnt++] = alloc_printf("%s/split-compares-pass.so", obj_path); + cc_params[cc_par_cnt++] = + alloc_printf("%s/split-compares-pass.so", obj_path); + } + // /laf #ifdef USE_TRACE_PC - cc_params[cc_par_cnt++] = "-fsanitize-coverage=trace-pc-guard"; // edge coverage by default - //cc_params[cc_par_cnt++] = "-mllvm"; - //cc_params[cc_par_cnt++] = "-fsanitize-coverage=trace-cmp,trace-div,trace-gep"; - //cc_params[cc_par_cnt++] = "-sanitizer-coverage-block-threshold=0"; + cc_params[cc_par_cnt++] = + "-fsanitize-coverage=trace-pc-guard"; // edge coverage by default + // cc_params[cc_par_cnt++] = "-mllvm"; + // cc_params[cc_par_cnt++] = + // "-fsanitize-coverage=trace-cmp,trace-div,trace-gep"; cc_params[cc_par_cnt++] + // = "-sanitizer-coverage-block-threshold=0"; #else cc_params[cc_par_cnt++] = "-Xclang"; cc_params[cc_par_cnt++] = "-load"; @@ -165,6 +192,7 @@ static void edit_params(u32 argc, char** argv) { if (argc == 1 && !strcmp(argv[1], "-v")) maybe_linking = 0; while (--argc) { + u8* cur = *(++argv); if (!strcmp(cur, "-m32")) bit_mode = 32; @@ -175,15 +203,15 @@ static void edit_params(u32 argc, char** argv) { if (!strcmp(cur, "-c") || !strcmp(cur, "-S") || !strcmp(cur, "-E")) maybe_linking = 0; - if (!strcmp(cur, "-fsanitize=address") || - !strcmp(cur, "-fsanitize=memory")) asan_set = 1; + if (!strcmp(cur, "-fsanitize=address") || !strcmp(cur, "-fsanitize=memory")) + asan_set = 1; if (strstr(cur, "FORTIFY_SOURCE")) fortify_set = 1; if (!strcmp(cur, "-shared")) maybe_linking = 0; - if (!strcmp(cur, "-Wl,-z,defs") || - !strcmp(cur, "-Wl,--no-undefined")) continue; + if (!strcmp(cur, "-Wl,-z,defs") || !strcmp(cur, "-Wl,--no-undefined")) + continue; cc_params[cc_par_cnt++] = cur; @@ -193,8 +221,7 @@ static void edit_params(u32 argc, char** argv) { cc_params[cc_par_cnt++] = "-fstack-protector-all"; - if (!fortify_set) - cc_params[cc_par_cnt++] = "-D_FORTIFY_SOURCE=2"; + if (!fortify_set) cc_params[cc_par_cnt++] = "-D_FORTIFY_SOURCE=2"; } @@ -202,8 +229,7 @@ static void edit_params(u32 argc, char** argv) { if (getenv("AFL_USE_ASAN")) { - if (getenv("AFL_USE_MSAN")) - FATAL("ASAN and MSAN are mutually exclusive"); + if (getenv("AFL_USE_MSAN")) FATAL("ASAN and MSAN are mutually exclusive"); if (getenv("AFL_HARDEN")) FATAL("ASAN and AFL_HARDEN are mutually exclusive"); @@ -213,8 +239,7 @@ static void edit_params(u32 argc, char** argv) { } else if (getenv("AFL_USE_MSAN")) { - if (getenv("AFL_USE_ASAN")) - FATAL("ASAN and MSAN are mutually exclusive"); + if (getenv("AFL_USE_ASAN")) FATAL("ASAN and MSAN are mutually exclusive"); if (getenv("AFL_HARDEN")) FATAL("MSAN and AFL_HARDEN are mutually exclusive"); @@ -279,35 +304,41 @@ static void edit_params(u32 argc, char** argv) { */ - cc_params[cc_par_cnt++] = "-D__AFL_LOOP(_A)=" - "({ static volatile char *_B __attribute__((used)); " - " _B = (char*)\"" PERSIST_SIG "\"; " + cc_params[cc_par_cnt++] = + "-D__AFL_LOOP(_A)=" + "({ static volatile char *_B __attribute__((used)); " + " _B = (char*)\"" PERSIST_SIG + "\"; " #ifdef __APPLE__ - "__attribute__((visibility(\"default\"))) " - "int _L(unsigned int) __asm__(\"___afl_persistent_loop\"); " + "__attribute__((visibility(\"default\"))) " + "int _L(unsigned int) __asm__(\"___afl_persistent_loop\"); " #else - "__attribute__((visibility(\"default\"))) " - "int _L(unsigned int) __asm__(\"__afl_persistent_loop\"); " + "__attribute__((visibility(\"default\"))) " + "int _L(unsigned int) __asm__(\"__afl_persistent_loop\"); " #endif /* ^__APPLE__ */ - "_L(_A); })"; + "_L(_A); })"; - cc_params[cc_par_cnt++] = "-D__AFL_INIT()=" - "do { static volatile char *_A __attribute__((used)); " - " _A = (char*)\"" DEFER_SIG "\"; " + cc_params[cc_par_cnt++] = + "-D__AFL_INIT()=" + "do { static volatile char *_A __attribute__((used)); " + " _A = (char*)\"" DEFER_SIG + "\"; " #ifdef __APPLE__ - "__attribute__((visibility(\"default\"))) " - "void _I(void) __asm__(\"___afl_manual_init\"); " + "__attribute__((visibility(\"default\"))) " + "void _I(void) __asm__(\"___afl_manual_init\"); " #else - "__attribute__((visibility(\"default\"))) " - "void _I(void) __asm__(\"__afl_manual_init\"); " + "__attribute__((visibility(\"default\"))) " + "void _I(void) __asm__(\"__afl_manual_init\"); " #endif /* ^__APPLE__ */ - "_I(); } while (0)"; + "_I(); } while (0)"; if (maybe_linking) { if (x_set) { + cc_params[cc_par_cnt++] = "-x"; cc_params[cc_par_cnt++] = "none"; + } switch (bit_mode) { @@ -340,7 +371,6 @@ static void edit_params(u32 argc, char** argv) { } - /* Main entry point */ int main(int argc, char** argv) { @@ -348,46 +378,53 @@ int main(int argc, char** argv) { if (isatty(2) && !getenv("AFL_QUIET")) { #ifdef USE_TRACE_PC - SAYF(cCYA "afl-clang-fast" VERSION cRST " [tpcg] by <lszekeres@google.com>\n"); + SAYF(cCYA "afl-clang-fast" VERSION cRST + " [tpcg] by <lszekeres@google.com>\n"); #else - SAYF(cCYA "afl-clang-fast" VERSION cRST " by <lszekeres@google.com>\n"); + SAYF(cCYA "afl-clang-fast" VERSION cRST " by <lszekeres@google.com>\n"); #endif /* ^USE_TRACE_PC */ } if (argc < 2) { - SAYF("\n" - "This is a helper application for afl-fuzz. It serves as a drop-in replacement\n" - "for clang, letting you recompile third-party code with the required runtime\n" - "instrumentation. A common use pattern would be one of the following:\n\n" + SAYF( + "\n" + "This is a helper application for afl-fuzz. It serves as a drop-in " + "replacement\n" + "for clang, letting you recompile third-party code with the required " + "runtime\n" + "instrumentation. A common use pattern would be one of the " + "following:\n\n" - " CC=%s/afl-clang-fast ./configure\n" - " CXX=%s/afl-clang-fast++ ./configure\n\n" + " CC=%s/afl-clang-fast ./configure\n" + " CXX=%s/afl-clang-fast++ ./configure\n\n" - "In contrast to the traditional afl-clang tool, this version is implemented as\n" - "an LLVM pass and tends to offer improved performance with slow programs.\n\n" + "In contrast to the traditional afl-clang tool, this version is " + "implemented as\n" + "an LLVM pass and tends to offer improved performance with slow " + "programs.\n\n" - "You can specify custom next-stage toolchain via AFL_CC and AFL_CXX. Setting\n" - "AFL_HARDEN enables hardening optimizations in the compiled code.\n\n", - BIN_PATH, BIN_PATH); + "You can specify custom next-stage toolchain via AFL_CC and AFL_CXX. " + "Setting\n" + "AFL_HARDEN enables hardening optimizations in the compiled code.\n\n", + BIN_PATH, BIN_PATH); exit(1); } - find_obj(argv[0]); edit_params(argc, argv); -/* - int i = 0; - printf("EXEC:"); - while (cc_params[i] != NULL) - printf(" %s", cc_params[i++]); - printf("\n"); -*/ + /* + int i = 0; + printf("EXEC:"); + while (cc_params[i] != NULL) + printf(" %s", cc_params[i++]); + printf("\n"); + */ execvp(cc_params[0], (char**)cc_params); @@ -396,3 +433,4 @@ int main(int argc, char** argv) { return 0; } + diff --git a/llvm_mode/afl-llvm-pass.so.cc b/llvm_mode/afl-llvm-pass.so.cc index b242163e..5d531a87 100644 --- a/llvm_mode/afl-llvm-pass.so.cc +++ b/llvm_mode/afl-llvm-pass.so.cc @@ -48,50 +48,52 @@ using namespace llvm; namespace { - class AFLCoverage : public ModulePass { - - public: - - static char ID; - AFLCoverage() : ModulePass(ID) { - char* instWhiteListFilename = getenv("AFL_LLVM_WHITELIST"); - if (instWhiteListFilename) { - std::string line; - std::ifstream fileStream; - fileStream.open(instWhiteListFilename); - if (!fileStream) - report_fatal_error("Unable to open AFL_LLVM_WHITELIST"); - getline(fileStream, line); - while (fileStream) { - myWhitelist.push_back(line); - getline(fileStream, line); - } - } +class AFLCoverage : public ModulePass { + + public: + static char ID; + AFLCoverage() : ModulePass(ID) { + + char *instWhiteListFilename = getenv("AFL_LLVM_WHITELIST"); + if (instWhiteListFilename) { + + std::string line; + std::ifstream fileStream; + fileStream.open(instWhiteListFilename); + if (!fileStream) report_fatal_error("Unable to open AFL_LLVM_WHITELIST"); + getline(fileStream, line); + while (fileStream) { + + myWhitelist.push_back(line); + getline(fileStream, line); + } - bool runOnModule(Module &M) override; + } - // StringRef getPassName() const override { - // return "American Fuzzy Lop Instrumentation"; - // } + } - protected: + bool runOnModule(Module &M) override; - std::list<std::string> myWhitelist; + // StringRef getPassName() const override { - }; + // return "American Fuzzy Lop Instrumentation"; + // } -} + protected: + std::list<std::string> myWhitelist; +}; -char AFLCoverage::ID = 0; +} // namespace +char AFLCoverage::ID = 0; bool AFLCoverage::runOnModule(Module &M) { LLVMContext &C = M.getContext(); - IntegerType *Int8Ty = IntegerType::getInt8Ty(C); + IntegerType *Int8Ty = IntegerType::getInt8Ty(C); IntegerType *Int32Ty = IntegerType::getInt32Ty(C); unsigned int cur_loc = 0; @@ -103,11 +105,13 @@ bool AFLCoverage::runOnModule(Module &M) { SAYF(cCYA "afl-llvm-pass" VERSION cRST " by <lszekeres@google.com>\n"); - } else be_quiet = 1; + } else + + be_quiet = 1; /* Decide instrumentation ratio */ - char* inst_ratio_str = getenv("AFL_INST_RATIO"); + char * inst_ratio_str = getenv("AFL_INST_RATIO"); unsigned int inst_ratio = 100; if (inst_ratio_str) { @@ -119,7 +123,7 @@ bool AFLCoverage::runOnModule(Module &M) { } #if LLVM_VERSION_MAJOR < 9 - char* neverZero_counters_str = getenv("AFL_LLVM_NOT_ZERO"); + char *neverZero_counters_str = getenv("AFL_LLVM_NOT_ZERO"); #endif /* Get globals for the SHM region and the previous location. Note that @@ -134,8 +138,8 @@ bool AFLCoverage::runOnModule(Module &M) { M, Int32Ty, false, GlobalValue::ExternalLinkage, 0, "__afl_prev_loc"); #else GlobalVariable *AFLPrevLoc = new GlobalVariable( - M, Int32Ty, false, GlobalValue::ExternalLinkage, 0, "__afl_prev_loc", - 0, GlobalVariable::GeneralDynamicTLSModel, 0, false); + M, Int32Ty, false, GlobalValue::ExternalLinkage, 0, "__afl_prev_loc", 0, + GlobalVariable::GeneralDynamicTLSModel, 0, false); #endif /* Instrument all the things! */ @@ -146,58 +150,77 @@ bool AFLCoverage::runOnModule(Module &M) { for (auto &BB : F) { BasicBlock::iterator IP = BB.getFirstInsertionPt(); - IRBuilder<> IRB(&(*IP)); - + IRBuilder<> IRB(&(*IP)); + if (!myWhitelist.empty()) { - bool instrumentBlock = false; - - /* Get the current location using debug information. - * For now, just instrument the block if we are not able - * to determine our location. */ - DebugLoc Loc = IP->getDebugLoc(); - if ( Loc ) { - DILocation *cDILoc = dyn_cast<DILocation>(Loc.getAsMDNode()); - - unsigned int instLine = cDILoc->getLine(); - StringRef instFilename = cDILoc->getFilename(); - - if (instFilename.str().empty()) { - /* If the original location is empty, try using the inlined location */ - DILocation *oDILoc = cDILoc->getInlinedAt(); - if (oDILoc) { - instFilename = oDILoc->getFilename(); - instLine = oDILoc->getLine(); - } - } - /* Continue only if we know where we actually are */ - if (!instFilename.str().empty()) { - for (std::list<std::string>::iterator it = myWhitelist.begin(); it != myWhitelist.end(); ++it) { - /* We don't check for filename equality here because - * filenames might actually be full paths. Instead we - * check that the actual filename ends in the filename - * specified in the list. */ - if (instFilename.str().length() >= it->length()) { - if (instFilename.str().compare(instFilename.str().length() - it->length(), it->length(), *it) == 0) { - instrumentBlock = true; - break; - } - } - } + bool instrumentBlock = false; + + /* Get the current location using debug information. + * For now, just instrument the block if we are not able + * to determine our location. */ + DebugLoc Loc = IP->getDebugLoc(); + if (Loc) { + + DILocation *cDILoc = dyn_cast<DILocation>(Loc.getAsMDNode()); + + unsigned int instLine = cDILoc->getLine(); + StringRef instFilename = cDILoc->getFilename(); + + if (instFilename.str().empty()) { + + /* If the original location is empty, try using the inlined location + */ + DILocation *oDILoc = cDILoc->getInlinedAt(); + if (oDILoc) { + + instFilename = oDILoc->getFilename(); + instLine = oDILoc->getLine(); + + } + + } + + /* Continue only if we know where we actually are */ + if (!instFilename.str().empty()) { + + for (std::list<std::string>::iterator it = myWhitelist.begin(); + it != myWhitelist.end(); ++it) { + + /* We don't check for filename equality here because + * filenames might actually be full paths. Instead we + * check that the actual filename ends in the filename + * specified in the list. */ + if (instFilename.str().length() >= it->length()) { + + if (instFilename.str().compare( + instFilename.str().length() - it->length(), + it->length(), *it) == 0) { + + instrumentBlock = true; + break; + + } + } + + } + } - /* Either we couldn't figure out our location or the location is - * not whitelisted, so we skip instrumentation. */ - if (!instrumentBlock) continue; - } + } + + /* Either we couldn't figure out our location or the location is + * not whitelisted, so we skip instrumentation. */ + if (!instrumentBlock) continue; + } if (AFL_R(100) >= inst_ratio) continue; /* Make up cur_loc */ - //cur_loc++; + // cur_loc++; cur_loc = AFL_R(MAP_SIZE); // only instrument if this basic block is the destination of a previous @@ -205,24 +228,27 @@ bool AFLCoverage::runOnModule(Module &M) { // this gets rid of ~5-10% of instrumentations that are unnecessary // result: a little more speed and less map pollution int more_than_one = -1; - //fprintf(stderr, "BB %u: ", cur_loc); + // fprintf(stderr, "BB %u: ", cur_loc); for (BasicBlock *Pred : predecessors(&BB)) { + int count = 0; - if (more_than_one == -1) - more_than_one = 0; - //fprintf(stderr, " %p=>", Pred); + if (more_than_one == -1) more_than_one = 0; + // fprintf(stderr, " %p=>", Pred); for (BasicBlock *Succ : successors(Pred)) { - //if (count > 0) + + // if (count > 0) // fprintf(stderr, "|"); if (Succ != NULL) count++; - //fprintf(stderr, "%p", Succ); + // fprintf(stderr, "%p", Succ); + } - if (count > 1) - more_than_one = 1; + + if (count > 1) more_than_one = 1; + } - //fprintf(stderr, " == %d\n", more_than_one); - if (more_than_one != 1) - continue; + + // fprintf(stderr, " == %d\n", more_than_one); + if (more_than_one != 1) continue; ConstantInt *CurLoc = ConstantInt::get(Int32Ty, cur_loc); @@ -236,7 +262,8 @@ bool AFLCoverage::runOnModule(Module &M) { LoadInst *MapPtr = IRB.CreateLoad(AFLMapPtr); MapPtr->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, None)); - Value *MapPtrIdx = IRB.CreateGEP(MapPtr, IRB.CreateXor(PrevLocCasted, CurLoc)); + Value *MapPtrIdx = + IRB.CreateGEP(MapPtr, IRB.CreateXor(PrevLocCasted, CurLoc)); /* Update bitmap */ @@ -246,7 +273,9 @@ bool AFLCoverage::runOnModule(Module &M) { Value *Incr = IRB.CreateAdd(Counter, ConstantInt::get(Int8Ty, 1)); #if LLVM_VERSION_MAJOR < 9 - if (neverZero_counters_str != NULL) { // with llvm 9 we make this the default as the bug in llvm is then fixed + if (neverZero_counters_str != + NULL) { // with llvm 9 we make this the default as the bug in llvm is + // then fixed #endif /* hexcoder: Realize a counter that skips zero during overflow. * Once this counter reaches its maximum value, it next increments to 1 @@ -257,48 +286,67 @@ bool AFLCoverage::runOnModule(Module &M) { * Counter + 1 -> {Counter, OverflowFlag} * Counter + OverflowFlag -> Counter */ -/* // we keep the old solutions just in case - // Solution #1 - if (neverZero_counters_str[0] == '1') { - CallInst *AddOv = IRB.CreateBinaryIntrinsic(Intrinsic::uadd_with_overflow, Counter, ConstantInt::get(Int8Ty, 1)); - AddOv->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, None)); - Value *SumWithOverflowBit = AddOv; - Incr = IRB.CreateAdd(IRB.CreateExtractValue(SumWithOverflowBit, 0), // sum - IRB.CreateZExt( // convert from one bit type to 8 bits type - IRB.CreateExtractValue(SumWithOverflowBit, 1), // overflow - Int8Ty)); - // Solution #2 - } else if (neverZero_counters_str[0] == '2') { - auto cf = IRB.CreateICmpEQ(Counter, ConstantInt::get(Int8Ty, 255)); - Value *HowMuch = IRB.CreateAdd(ConstantInt::get(Int8Ty, 1), cf); - Incr = IRB.CreateAdd(Counter, HowMuch); - // Solution #3 - } else if (neverZero_counters_str[0] == '3') { -*/ - // this is the solution we choose because llvm9 should do the right thing here - auto cf = IRB.CreateICmpEQ(Incr, ConstantInt::get(Int8Ty, 0)); - auto carry = IRB.CreateZExt(cf, Int8Ty); - Incr = IRB.CreateAdd(Incr, carry); + /* // we keep the old solutions just in case + // Solution #1 + if (neverZero_counters_str[0] == '1') { + + CallInst *AddOv = + IRB.CreateBinaryIntrinsic(Intrinsic::uadd_with_overflow, Counter, + ConstantInt::get(Int8Ty, 1)); + AddOv->setMetadata(M.getMDKindID("nosanitize"), + MDNode::get(C, None)); Value *SumWithOverflowBit = AddOv; Incr = + IRB.CreateAdd(IRB.CreateExtractValue(SumWithOverflowBit, 0), // sum + IRB.CreateZExt( // convert from one bit + type to 8 bits type IRB.CreateExtractValue(SumWithOverflowBit, 1), // + overflow Int8Ty)); + // Solution #2 + + } else if (neverZero_counters_str[0] == '2') { + + auto cf = IRB.CreateICmpEQ(Counter, + ConstantInt::get(Int8Ty, 255)); Value *HowMuch = + IRB.CreateAdd(ConstantInt::get(Int8Ty, 1), cf); Incr = + IRB.CreateAdd(Counter, HowMuch); + // Solution #3 + + } else if (neverZero_counters_str[0] == '3') { + + */ + // this is the solution we choose because llvm9 should do the right + // thing here + auto cf = IRB.CreateICmpEQ(Incr, ConstantInt::get(Int8Ty, 0)); + auto carry = IRB.CreateZExt(cf, Int8Ty); + Incr = IRB.CreateAdd(Incr, carry); /* // Solution #4 + } else if (neverZero_counters_str[0] == '4') { + auto cf = IRB.CreateICmpULT(Incr, ConstantInt::get(Int8Ty, 1)); auto carry = IRB.CreateZExt(cf, Int8Ty); Incr = IRB.CreateAdd(Incr, carry); + } else { - fprintf(stderr, "Error: unknown value for AFL_NZERO_COUNTS: %s (valid is 1-4)\n", neverZero_counters_str); - exit(-1); + + fprintf(stderr, "Error: unknown value for AFL_NZERO_COUNTS: %s + (valid is 1-4)\n", neverZero_counters_str); exit(-1); + } + */ #if LLVM_VERSION_MAJOR < 9 + } + #endif - IRB.CreateStore(Incr, MapPtrIdx)->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, None)); + IRB.CreateStore(Incr, MapPtrIdx) + ->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, None)); /* Set prev_loc to cur_loc >> 1 */ - StoreInst *Store = IRB.CreateStore(ConstantInt::get(Int32Ty, cur_loc >> 1), AFLPrevLoc); + StoreInst *Store = + IRB.CreateStore(ConstantInt::get(Int32Ty, cur_loc >> 1), AFLPrevLoc); Store->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, None)); inst_blocks++; @@ -309,11 +357,16 @@ bool AFLCoverage::runOnModule(Module &M) { if (!be_quiet) { - if (!inst_blocks) WARNF("No instrumentation targets found."); - else OKF("Instrumented %u locations (%s mode, ratio %u%%).", - inst_blocks, getenv("AFL_HARDEN") ? "hardened" : - ((getenv("AFL_USE_ASAN") || getenv("AFL_USE_MSAN")) ? - "ASAN/MSAN" : "non-hardened"), inst_ratio); + if (!inst_blocks) + WARNF("No instrumentation targets found."); + else + OKF("Instrumented %u locations (%s mode, ratio %u%%).", inst_blocks, + getenv("AFL_HARDEN") + ? "hardened" + : ((getenv("AFL_USE_ASAN") || getenv("AFL_USE_MSAN")) + ? "ASAN/MSAN" + : "non-hardened"), + inst_ratio); } @@ -321,7 +374,6 @@ bool AFLCoverage::runOnModule(Module &M) { } - static void registerAFLPass(const PassManagerBuilder &, legacy::PassManagerBase &PM) { @@ -329,9 +381,9 @@ static void registerAFLPass(const PassManagerBuilder &, } - static RegisterStandardPasses RegisterAFLPass( PassManagerBuilder::EP_OptimizerLast, registerAFLPass); static RegisterStandardPasses RegisterAFLPass0( PassManagerBuilder::EP_EnabledOnOptLevel0, registerAFLPass); + diff --git a/llvm_mode/afl-llvm-rt.o.c b/llvm_mode/afl-llvm-rt.o.c index e6d9b993..bc38f1ec 100644 --- a/llvm_mode/afl-llvm-rt.o.c +++ b/llvm_mode/afl-llvm-rt.o.c @@ -20,7 +20,7 @@ */ #ifdef __ANDROID__ - #include "android-ashmem.h" +# include "android-ashmem.h" #endif #include "config.h" #include "types.h" @@ -50,10 +50,9 @@ #include <sys/mman.h> #include <fcntl.h> - /* Globals needed by the injected instrumentation. The __afl_area_initial region - is used for instrumentation output before __afl_map_shm() has a chance to run. - It will end up as .comm, so it shouldn't be too wasteful. */ + is used for instrumentation output before __afl_map_shm() has a chance to + run. It will end up as .comm, so it shouldn't be too wasteful. */ u8 __afl_area_initial[MAP_SIZE]; u8* __afl_area_ptr = __afl_area_initial; @@ -64,43 +63,46 @@ u32 __afl_prev_loc; __thread u32 __afl_prev_loc; #endif - /* Running in persistent mode? */ static u8 is_persistent; - /* SHM setup. */ static void __afl_map_shm(void) { - u8 *id_str = getenv(SHM_ENV_VAR); + u8* id_str = getenv(SHM_ENV_VAR); /* If we're running under AFL, attach to the appropriate region, replacing the early-stage __afl_area_initial region that is needed to allow some really hacky .init code to work correctly in projects such as OpenSSL. */ if (id_str) { + #ifdef USEMMAP - const char *shm_file_path = id_str; - int shm_fd = -1; - unsigned char *shm_base = NULL; + const char* shm_file_path = id_str; + int shm_fd = -1; + unsigned char* shm_base = NULL; /* create the shared memory segment as if it was a file */ shm_fd = shm_open(shm_file_path, O_RDWR, 0600); if (shm_fd == -1) { + printf("shm_open() failed\n"); exit(1); + } /* map the shared memory segment to the address space of the process */ shm_base = mmap(0, MAP_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, shm_fd, 0); if (shm_base == MAP_FAILED) { + close(shm_fd); shm_fd = -1; printf("mmap() failed\n"); exit(2); + } __afl_area_ptr = shm_base; @@ -112,7 +114,7 @@ static void __afl_map_shm(void) { /* Whooooops. */ - if (__afl_area_ptr == (void *)-1) _exit(1); + if (__afl_area_ptr == (void*)-1) _exit(1); /* Write something into the bitmap so that even with low AFL_INST_RATIO, our parent doesn't give up on us. */ @@ -123,16 +125,15 @@ static void __afl_map_shm(void) { } - /* Fork server logic. */ static void __afl_start_forkserver(void) { static u8 tmp[4]; - s32 child_pid; + s32 child_pid; + + u8 child_stopped = 0; - u8 child_stopped = 0; - void (*old_sigchld_handler)(int) = signal(SIGCHLD, SIG_DFL); /* Phone home and tell the parent that we're OK. If parent isn't there, @@ -154,8 +155,10 @@ static void __afl_start_forkserver(void) { process. */ if (child_stopped && was_killed) { + child_stopped = 0; if (waitpid(child_pid, &status, 0) < 0) _exit(1); + } if (!child_stopped) { @@ -168,12 +171,13 @@ static void __afl_start_forkserver(void) { /* In child process: close fds, resume execution. */ if (!child_pid) { + signal(SIGCHLD, old_sigchld_handler); close(FORKSRV_FD); close(FORKSRV_FD + 1); return; - + } } else { @@ -207,7 +211,6 @@ static void __afl_start_forkserver(void) { } - /* A simplified persistent mode handler, used as explained in README.llvm. */ int __afl_persistent_loop(unsigned int max_cnt) { @@ -227,9 +230,10 @@ int __afl_persistent_loop(unsigned int max_cnt) { memset(__afl_area_ptr, 0, MAP_SIZE); __afl_area_ptr[0] = 1; __afl_prev_loc = 0; + } - cycle_cnt = max_cnt; + cycle_cnt = max_cnt; first_pass = 0; return 1; @@ -262,7 +266,6 @@ int __afl_persistent_loop(unsigned int max_cnt) { } - /* This one can be called from user code when deferred forkserver mode is enabled. */ @@ -280,7 +283,6 @@ void __afl_manual_init(void) { } - /* Proper initialization routine. */ __attribute__((constructor(CONST_PRIO))) void __afl_auto_init(void) { @@ -293,7 +295,6 @@ __attribute__((constructor(CONST_PRIO))) void __afl_auto_init(void) { } - /* The following stuff deals with supporting -fsanitize-coverage=trace-pc-guard. It remains non-operational in the traditional, plugin-backed LLVM mode. For more info about 'trace-pc-guard', see README.llvm. @@ -302,9 +303,10 @@ __attribute__((constructor(CONST_PRIO))) void __afl_auto_init(void) { edge (as opposed to every basic block). */ void __sanitizer_cov_trace_pc_guard(uint32_t* guard) { + __afl_area_ptr[*guard]++; -} +} /* Init callback. Populates instrumentation IDs. Note that we're using ID of 0 as a special value to indicate non-instrumented bits. That may @@ -321,8 +323,10 @@ void __sanitizer_cov_trace_pc_guard_init(uint32_t* start, uint32_t* stop) { if (x) inst_ratio = atoi(x); if (!inst_ratio || inst_ratio > 100) { + fprintf(stderr, "[-] ERROR: Invalid AFL_INST_RATIO (must be 1-100).\n"); abort(); + } /* Make sure that the first element in the range is always set - we use that @@ -333,11 +337,14 @@ void __sanitizer_cov_trace_pc_guard_init(uint32_t* start, uint32_t* stop) { while (start < stop) { - if (R(100) < inst_ratio) *start = R(MAP_SIZE - 1) + 1; - else *start = 0; + if (R(100) < inst_ratio) + *start = R(MAP_SIZE - 1) + 1; + else + *start = 0; start++; } } + diff --git a/llvm_mode/compare-transform-pass.so.cc b/llvm_mode/compare-transform-pass.so.cc index e7886db1..e1b6e671 100644 --- a/llvm_mode/compare-transform-pass.so.cc +++ b/llvm_mode/compare-transform-pass.so.cc @@ -36,202 +36,236 @@ using namespace llvm; namespace { - class CompareTransform : public ModulePass { +class CompareTransform : public ModulePass { - public: - static char ID; - CompareTransform() : ModulePass(ID) { - } + public: + static char ID; + CompareTransform() : ModulePass(ID) { - bool runOnModule(Module &M) override; + } + + bool runOnModule(Module &M) override; #if LLVM_VERSION_MAJOR < 4 - const char * getPassName() const override { + const char *getPassName() const override { + #else - StringRef getPassName() const override { + StringRef getPassName() const override { + #endif - return "transforms compare functions"; - } - private: - bool transformCmps(Module &M, const bool processStrcmp, const bool processMemcmp - ,const bool processStrncmp, const bool processStrcasecmp, const bool processStrncasecmp); - }; -} + return "transforms compare functions"; + } + + private: + bool transformCmps(Module &M, const bool processStrcmp, + const bool processMemcmp, const bool processStrncmp, + const bool processStrcasecmp, + const bool processStrncasecmp); + +}; + +} // namespace char CompareTransform::ID = 0; -bool CompareTransform::transformCmps(Module &M, const bool processStrcmp, const bool processMemcmp - , const bool processStrncmp, const bool processStrcasecmp, const bool processStrncasecmp) { +bool CompareTransform::transformCmps(Module &M, const bool processStrcmp, + const bool processMemcmp, + const bool processStrncmp, + const bool processStrcasecmp, + const bool processStrncasecmp) { - std::vector<CallInst*> calls; - LLVMContext &C = M.getContext(); - IntegerType *Int8Ty = IntegerType::getInt8Ty(C); - IntegerType *Int32Ty = IntegerType::getInt32Ty(C); - IntegerType *Int64Ty = IntegerType::getInt64Ty(C); + std::vector<CallInst *> calls; + LLVMContext & C = M.getContext(); + IntegerType * Int8Ty = IntegerType::getInt8Ty(C); + IntegerType * Int32Ty = IntegerType::getInt32Ty(C); + IntegerType * Int64Ty = IntegerType::getInt64Ty(C); #if LLVM_VERSION_MAJOR < 9 - Constant* + Constant * #else FunctionCallee #endif - c = M.getOrInsertFunction("tolower", - Int32Ty, - Int32Ty + c = M.getOrInsertFunction("tolower", Int32Ty, Int32Ty #if LLVM_VERSION_MAJOR < 5 - , nullptr + , + nullptr #endif - ); + ); #if LLVM_VERSION_MAJOR < 9 - Function* tolowerFn = cast<Function>(c); + Function *tolowerFn = cast<Function>(c); #else FunctionCallee tolowerFn = c; #endif - /* iterate over all functions, bbs and instruction and add suitable calls to strcmp/memcmp/strncmp/strcasecmp/strncasecmp */ + /* iterate over all functions, bbs and instruction and add suitable calls to + * strcmp/memcmp/strncmp/strcasecmp/strncasecmp */ for (auto &F : M) { + for (auto &BB : F) { - for(auto &IN: BB) { - CallInst* callInst = nullptr; + + for (auto &IN : BB) { + + CallInst *callInst = nullptr; if ((callInst = dyn_cast<CallInst>(&IN))) { - bool isStrcmp = processStrcmp; - bool isMemcmp = processMemcmp; - bool isStrncmp = processStrncmp; - bool isStrcasecmp = processStrcasecmp; + bool isStrcmp = processStrcmp; + bool isMemcmp = processMemcmp; + bool isStrncmp = processStrncmp; + bool isStrcasecmp = processStrcasecmp; bool isStrncasecmp = processStrncasecmp; Function *Callee = callInst->getCalledFunction(); - if (!Callee) - continue; - if (callInst->getCallingConv() != llvm::CallingConv::C) - continue; + if (!Callee) continue; + if (callInst->getCallingConv() != llvm::CallingConv::C) continue; StringRef FuncName = Callee->getName(); - isStrcmp &= !FuncName.compare(StringRef("strcmp")); - isMemcmp &= !FuncName.compare(StringRef("memcmp")); - isStrncmp &= !FuncName.compare(StringRef("strncmp")); - isStrcasecmp &= !FuncName.compare(StringRef("strcasecmp")); + isStrcmp &= !FuncName.compare(StringRef("strcmp")); + isMemcmp &= !FuncName.compare(StringRef("memcmp")); + isStrncmp &= !FuncName.compare(StringRef("strncmp")); + isStrcasecmp &= !FuncName.compare(StringRef("strcasecmp")); isStrncasecmp &= !FuncName.compare(StringRef("strncasecmp")); - if (!isStrcmp && !isMemcmp && !isStrncmp && !isStrcasecmp && !isStrncasecmp) + if (!isStrcmp && !isMemcmp && !isStrncmp && !isStrcasecmp && + !isStrncasecmp) continue; - /* Verify the strcmp/memcmp/strncmp/strcasecmp/strncasecmp function prototype */ + /* Verify the strcmp/memcmp/strncmp/strcasecmp/strncasecmp function + * prototype */ FunctionType *FT = Callee->getFunctionType(); - - isStrcmp &= FT->getNumParams() == 2 && - FT->getReturnType()->isIntegerTy(32) && - FT->getParamType(0) == FT->getParamType(1) && - FT->getParamType(0) == IntegerType::getInt8PtrTy(M.getContext()); - isStrcasecmp &= FT->getNumParams() == 2 && - FT->getReturnType()->isIntegerTy(32) && - FT->getParamType(0) == FT->getParamType(1) && - FT->getParamType(0) == IntegerType::getInt8PtrTy(M.getContext()); - isMemcmp &= FT->getNumParams() == 3 && + isStrcmp &= + FT->getNumParams() == 2 && FT->getReturnType()->isIntegerTy(32) && + FT->getParamType(0) == FT->getParamType(1) && + FT->getParamType(0) == IntegerType::getInt8PtrTy(M.getContext()); + isStrcasecmp &= + FT->getNumParams() == 2 && FT->getReturnType()->isIntegerTy(32) && + FT->getParamType(0) == FT->getParamType(1) && + FT->getParamType(0) == IntegerType::getInt8PtrTy(M.getContext()); + isMemcmp &= FT->getNumParams() == 3 && FT->getReturnType()->isIntegerTy(32) && FT->getParamType(0)->isPointerTy() && FT->getParamType(1)->isPointerTy() && FT->getParamType(2)->isIntegerTy(); - isStrncmp &= FT->getNumParams() == 3 && - FT->getReturnType()->isIntegerTy(32) && - FT->getParamType(0) == FT->getParamType(1) && - FT->getParamType(0) == IntegerType::getInt8PtrTy(M.getContext()) && - FT->getParamType(2)->isIntegerTy(); + isStrncmp &= FT->getNumParams() == 3 && + FT->getReturnType()->isIntegerTy(32) && + FT->getParamType(0) == FT->getParamType(1) && + FT->getParamType(0) == + IntegerType::getInt8PtrTy(M.getContext()) && + FT->getParamType(2)->isIntegerTy(); isStrncasecmp &= FT->getNumParams() == 3 && - FT->getReturnType()->isIntegerTy(32) && - FT->getParamType(0) == FT->getParamType(1) && - FT->getParamType(0) == IntegerType::getInt8PtrTy(M.getContext()) && - FT->getParamType(2)->isIntegerTy(); - - if (!isStrcmp && !isMemcmp && !isStrncmp && !isStrcasecmp && !isStrncasecmp) + FT->getReturnType()->isIntegerTy(32) && + FT->getParamType(0) == FT->getParamType(1) && + FT->getParamType(0) == + IntegerType::getInt8PtrTy(M.getContext()) && + FT->getParamType(2)->isIntegerTy(); + + if (!isStrcmp && !isMemcmp && !isStrncmp && !isStrcasecmp && + !isStrncasecmp) continue; /* is a str{n,}{case,}cmp/memcmp, check if we have * str{case,}cmp(x, "const") or str{case,}cmp("const", x) * strn{case,}cmp(x, "const", ..) or strn{case,}cmp("const", x, ..) * memcmp(x, "const", ..) or memcmp("const", x, ..) */ - Value *Str1P = callInst->getArgOperand(0), *Str2P = callInst->getArgOperand(1); + Value *Str1P = callInst->getArgOperand(0), + *Str2P = callInst->getArgOperand(1); StringRef Str1, Str2; - bool HasStr1 = getConstantStringInfo(Str1P, Str1); - bool HasStr2 = getConstantStringInfo(Str2P, Str2); + bool HasStr1 = getConstantStringInfo(Str1P, Str1); + bool HasStr2 = getConstantStringInfo(Str2P, Str2); /* handle cases of one string is const, one string is variable */ - if (!(HasStr1 ^ HasStr2)) - continue; + if (!(HasStr1 ^ HasStr2)) continue; if (isMemcmp || isStrncmp || isStrncasecmp) { + /* check if third operand is a constant integer * strlen("constStr") and sizeof() are treated as constant */ - Value *op2 = callInst->getArgOperand(2); - ConstantInt* ilen = dyn_cast<ConstantInt>(op2); - if (!ilen) - continue; - /* final precaution: if size of compare is larger than constant string skip it*/ - uint64_t literalLength = HasStr1 ? GetStringLength(Str1P) : GetStringLength(Str2P); - if (literalLength < ilen->getZExtValue()) - continue; + Value * op2 = callInst->getArgOperand(2); + ConstantInt *ilen = dyn_cast<ConstantInt>(op2); + if (!ilen) continue; + /* final precaution: if size of compare is larger than constant + * string skip it*/ + uint64_t literalLength = + HasStr1 ? GetStringLength(Str1P) : GetStringLength(Str2P); + if (literalLength < ilen->getZExtValue()) continue; + } calls.push_back(callInst); + } + } + } + } - if (!calls.size()) - return false; - errs() << "Replacing " << calls.size() << " calls to strcmp/memcmp/strncmp/strcasecmp/strncasecmp\n"; + if (!calls.size()) return false; + errs() << "Replacing " << calls.size() + << " calls to strcmp/memcmp/strncmp/strcasecmp/strncasecmp\n"; - for (auto &callInst: calls) { + for (auto &callInst : calls) { - Value *Str1P = callInst->getArgOperand(0), *Str2P = callInst->getArgOperand(1); - StringRef Str1, Str2, ConstStr; + Value *Str1P = callInst->getArgOperand(0), + *Str2P = callInst->getArgOperand(1); + StringRef Str1, Str2, ConstStr; std::string TmpConstStr; - Value *VarStr; - bool HasStr1 = getConstantStringInfo(Str1P, Str1); + Value * VarStr; + bool HasStr1 = getConstantStringInfo(Str1P, Str1); getConstantStringInfo(Str2P, Str2); uint64_t constLen, sizedLen; - bool isMemcmp = !callInst->getCalledFunction()->getName().compare(StringRef("memcmp")); - bool isSizedcmp = isMemcmp - || !callInst->getCalledFunction()->getName().compare(StringRef("strncmp")) - || !callInst->getCalledFunction()->getName().compare(StringRef("strncasecmp")); - bool isCaseInsensitive = !callInst->getCalledFunction()->getName().compare(StringRef("strcasecmp")) - || !callInst->getCalledFunction()->getName().compare(StringRef("strncasecmp")); + bool isMemcmp = + !callInst->getCalledFunction()->getName().compare(StringRef("memcmp")); + bool isSizedcmp = isMemcmp || + !callInst->getCalledFunction()->getName().compare( + StringRef("strncmp")) || + !callInst->getCalledFunction()->getName().compare( + StringRef("strncasecmp")); + bool isCaseInsensitive = !callInst->getCalledFunction()->getName().compare( + StringRef("strcasecmp")) || + !callInst->getCalledFunction()->getName().compare( + StringRef("strncasecmp")); if (isSizedcmp) { - Value *op2 = callInst->getArgOperand(2); - ConstantInt* ilen = dyn_cast<ConstantInt>(op2); + + Value * op2 = callInst->getArgOperand(2); + ConstantInt *ilen = dyn_cast<ConstantInt>(op2); sizedLen = ilen->getZExtValue(); + } if (HasStr1) { + TmpConstStr = Str1.str(); VarStr = Str2P; constLen = isMemcmp ? sizedLen : GetStringLength(Str1P); - } - else { + + } else { + TmpConstStr = Str2.str(); VarStr = Str1P; constLen = isMemcmp ? sizedLen : GetStringLength(Str2P); + } /* properly handle zero terminated C strings by adding the terminating 0 to * the StringRef (in comparison to std::string a StringRef has built-in * runtime bounds checking, which makes debugging easier) */ - TmpConstStr.append("\0", 1); ConstStr = StringRef(TmpConstStr); + TmpConstStr.append("\0", 1); + ConstStr = StringRef(TmpConstStr); - if (isSizedcmp && constLen > sizedLen) { - constLen = sizedLen; - } + if (isSizedcmp && constLen > sizedLen) { constLen = sizedLen; } - errs() << callInst->getCalledFunction()->getName() << ": len " << constLen << ": " << ConstStr << "\n"; + errs() << callInst->getCalledFunction()->getName() << ": len " << constLen + << ": " << ConstStr << "\n"; /* split before the call instruction */ BasicBlock *bb = callInst->getParent(); BasicBlock *end_bb = bb->splitBasicBlock(BasicBlock::iterator(callInst)); - BasicBlock *next_bb = BasicBlock::Create(C, "cmp_added", end_bb->getParent(), end_bb); + BasicBlock *next_bb = + BasicBlock::Create(C, "cmp_added", end_bb->getParent(), end_bb); BranchInst::Create(end_bb, next_bb); PHINode *PN = PHINode::Create(Int32Ty, constLen + 1, "cmp_phi"); @@ -249,71 +283,81 @@ bool CompareTransform::transformCmps(Module &M, const bool processStrcmp, const char c = isCaseInsensitive ? tolower(ConstStr[i]) : ConstStr[i]; - BasicBlock::iterator IP = next_bb->getFirstInsertionPt(); - IRBuilder<> IRB(&*IP); + IRBuilder<> IRB(&*IP); - Value* v = ConstantInt::get(Int64Ty, i); - Value *ele = IRB.CreateInBoundsGEP(VarStr, v, "empty"); + Value *v = ConstantInt::get(Int64Ty, i); + Value *ele = IRB.CreateInBoundsGEP(VarStr, v, "empty"); Value *load = IRB.CreateLoad(ele); if (isCaseInsensitive) { + // load >= 'A' && load <= 'Z' ? load | 0x020 : load std::vector<Value *> args; args.push_back(load); load = IRB.CreateCall(tolowerFn, args, "tmp"); load = IRB.CreateTrunc(load, Int8Ty); + } + Value *isub; if (HasStr1) isub = IRB.CreateSub(ConstantInt::get(Int8Ty, c), load); else isub = IRB.CreateSub(load, ConstantInt::get(Int8Ty, c)); - Value *sext = IRB.CreateSExt(isub, Int32Ty); + Value *sext = IRB.CreateSExt(isub, Int32Ty); PN->addIncoming(sext, cur_bb); - if (i < constLen - 1) { - next_bb = BasicBlock::Create(C, "cmp_added", end_bb->getParent(), end_bb); + + next_bb = + BasicBlock::Create(C, "cmp_added", end_bb->getParent(), end_bb); BranchInst::Create(end_bb, next_bb); Value *icmp = IRB.CreateICmpEQ(isub, ConstantInt::get(Int8Ty, 0)); IRB.CreateCondBr(icmp, next_bb, end_bb); cur_bb->getTerminator()->eraseFromParent(); + } else { - //IRB.CreateBr(end_bb); + + // IRB.CreateBr(end_bb); + } - //add offset to varstr - //create load - //create signed isub - //create icmp - //create jcc - //create next_bb + // add offset to varstr + // create load + // create signed isub + // create icmp + // create jcc + // create next_bb + } /* since the call is the first instruction of the bb it is safe to * replace it with a phi instruction */ BasicBlock::iterator ii(callInst); ReplaceInstWithInst(callInst->getParent()->getInstList(), ii, PN); - } + } return true; + } bool CompareTransform::runOnModule(Module &M) { if (getenv("AFL_QUIET") == NULL) - llvm::errs() << "Running compare-transform-pass by laf.intel@gmail.com, extended by heiko@hexco.de\n"; + llvm::errs() << "Running compare-transform-pass by laf.intel@gmail.com, " + "extended by heiko@hexco.de\n"; transformCmps(M, true, true, true, true, true); verifyModule(M); return true; + } static void registerCompTransPass(const PassManagerBuilder &, - legacy::PassManagerBase &PM) { + legacy::PassManagerBase &PM) { auto p = new CompareTransform(); PM.add(p); diff --git a/llvm_mode/split-compares-pass.so.cc b/llvm_mode/split-compares-pass.so.cc index a74b60fa..1e9d6542 100644 --- a/llvm_mode/split-compares-pass.so.cc +++ b/llvm_mode/split-compares-pass.so.cc @@ -27,117 +27,126 @@ using namespace llvm; namespace { - class SplitComparesTransform : public ModulePass { - public: - static char ID; - SplitComparesTransform() : ModulePass(ID) {} - bool runOnModule(Module &M) override; +class SplitComparesTransform : public ModulePass { + + public: + static char ID; + SplitComparesTransform() : ModulePass(ID) { + + } + + bool runOnModule(Module &M) override; #if LLVM_VERSION_MAJOR >= 4 - StringRef getPassName() const override { + StringRef getPassName() const override { + #else - const char * getPassName() const override { + const char *getPassName() const override { + #endif - return "simplifies and splits ICMP instructions"; - } - private: - bool splitCompares(Module &M, unsigned bitw); - bool simplifyCompares(Module &M); - bool simplifySignedness(Module &M); + return "simplifies and splits ICMP instructions"; - }; -} + } + + private: + bool splitCompares(Module &M, unsigned bitw); + bool simplifyCompares(Module &M); + bool simplifySignedness(Module &M); + +}; + +} // namespace char SplitComparesTransform::ID = 0; -/* This function splits ICMP instructions with xGE or xLE predicates into two +/* This function splits ICMP instructions with xGE or xLE predicates into two * ICMP instructions with predicate xGT or xLT and EQ */ bool SplitComparesTransform::simplifyCompares(Module &M) { - LLVMContext &C = M.getContext(); - std::vector<Instruction*> icomps; - IntegerType *Int1Ty = IntegerType::getInt1Ty(C); + + LLVMContext & C = M.getContext(); + std::vector<Instruction *> icomps; + IntegerType * Int1Ty = IntegerType::getInt1Ty(C); /* iterate over all functions, bbs and instruction and add * all integer comparisons with >= and <= predicates to the icomps vector */ for (auto &F : M) { + for (auto &BB : F) { - for (auto &IN: BB) { - CmpInst* selectcmpInst = nullptr; + + for (auto &IN : BB) { + + CmpInst *selectcmpInst = nullptr; if ((selectcmpInst = dyn_cast<CmpInst>(&IN))) { if (selectcmpInst->getPredicate() != CmpInst::ICMP_UGE && selectcmpInst->getPredicate() != CmpInst::ICMP_SGE && selectcmpInst->getPredicate() != CmpInst::ICMP_ULE && - selectcmpInst->getPredicate() != CmpInst::ICMP_SLE ) { + selectcmpInst->getPredicate() != CmpInst::ICMP_SLE) { + continue; + } auto op0 = selectcmpInst->getOperand(0); auto op1 = selectcmpInst->getOperand(1); - IntegerType* intTyOp0 = dyn_cast<IntegerType>(op0->getType()); - IntegerType* intTyOp1 = dyn_cast<IntegerType>(op1->getType()); + IntegerType *intTyOp0 = dyn_cast<IntegerType>(op0->getType()); + IntegerType *intTyOp1 = dyn_cast<IntegerType>(op1->getType()); /* this is probably not needed but we do it anyway */ - if (!intTyOp0 || !intTyOp1) { - continue; - } + if (!intTyOp0 || !intTyOp1) { continue; } icomps.push_back(selectcmpInst); + } + } + } - } - if (!icomps.size()) { - return false; } + if (!icomps.size()) { return false; } + + for (auto &IcmpInst : icomps) { - for (auto &IcmpInst: icomps) { - BasicBlock* bb = IcmpInst->getParent(); + BasicBlock *bb = IcmpInst->getParent(); auto op0 = IcmpInst->getOperand(0); auto op1 = IcmpInst->getOperand(1); /* find out what the new predicate is going to be */ - auto pred = dyn_cast<CmpInst>(IcmpInst)->getPredicate(); + auto pred = dyn_cast<CmpInst>(IcmpInst)->getPredicate(); CmpInst::Predicate new_pred; - switch(pred) { - case CmpInst::ICMP_UGE: - new_pred = CmpInst::ICMP_UGT; - break; - case CmpInst::ICMP_SGE: - new_pred = CmpInst::ICMP_SGT; - break; - case CmpInst::ICMP_ULE: - new_pred = CmpInst::ICMP_ULT; - break; - case CmpInst::ICMP_SLE: - new_pred = CmpInst::ICMP_SLT; - break; - default: // keep the compiler happy + switch (pred) { + + case CmpInst::ICMP_UGE: new_pred = CmpInst::ICMP_UGT; break; + case CmpInst::ICMP_SGE: new_pred = CmpInst::ICMP_SGT; break; + case CmpInst::ICMP_ULE: new_pred = CmpInst::ICMP_ULT; break; + case CmpInst::ICMP_SLE: new_pred = CmpInst::ICMP_SLT; break; + default: // keep the compiler happy continue; + } /* split before the icmp instruction */ - BasicBlock* end_bb = bb->splitBasicBlock(BasicBlock::iterator(IcmpInst)); + BasicBlock *end_bb = bb->splitBasicBlock(BasicBlock::iterator(IcmpInst)); /* the old bb now contains a unconditional jump to the new one (end_bb) * we need to delete it later */ /* create the ICMP instruction with new_pred and add it to the old basic * block bb it is now at the position where the old IcmpInst was */ - Instruction* icmp_np; + Instruction *icmp_np; icmp_np = CmpInst::Create(Instruction::ICmp, new_pred, op0, op1); bb->getInstList().insert(bb->getTerminator()->getIterator(), icmp_np); /* create a new basic block which holds the new EQ icmp */ Instruction *icmp_eq; /* insert middle_bb before end_bb */ - BasicBlock* middle_bb = BasicBlock::Create(C, "injected", - end_bb->getParent(), end_bb); + BasicBlock *middle_bb = + BasicBlock::Create(C, "injected", end_bb->getParent(), end_bb); icmp_eq = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, op0, op1); middle_bb->getInstList().push_back(icmp_eq); /* add an unconditional branch to the end of middle_bb with destination @@ -150,7 +159,6 @@ bool SplitComparesTransform::simplifyCompares(Module &M) { BranchInst::Create(end_bb, middle_bb, icmp_np, bb); term->eraseFromParent(); - /* replace the old IcmpInst (which is the first inst in end_bb) with a PHI * inst to wire up the loose ends */ PHINode *PN = PHINode::Create(Int1Ty, 2, ""); @@ -162,118 +170,139 @@ bool SplitComparesTransform::simplifyCompares(Module &M) { /* replace the old IcmpInst with our new and shiny PHI inst */ BasicBlock::iterator ii(IcmpInst); ReplaceInstWithInst(IcmpInst->getParent()->getInstList(), ii, PN); + } return true; + } /* this function transforms signed compares to equivalent unsigned compares */ bool SplitComparesTransform::simplifySignedness(Module &M) { - LLVMContext &C = M.getContext(); - std::vector<Instruction*> icomps; - IntegerType *Int1Ty = IntegerType::getInt1Ty(C); + + LLVMContext & C = M.getContext(); + std::vector<Instruction *> icomps; + IntegerType * Int1Ty = IntegerType::getInt1Ty(C); /* iterate over all functions, bbs and instruction and add * all signed compares to icomps vector */ for (auto &F : M) { + for (auto &BB : F) { - for(auto &IN: BB) { - CmpInst* selectcmpInst = nullptr; + + for (auto &IN : BB) { + + CmpInst *selectcmpInst = nullptr; if ((selectcmpInst = dyn_cast<CmpInst>(&IN))) { if (selectcmpInst->getPredicate() != CmpInst::ICMP_SGT && - selectcmpInst->getPredicate() != CmpInst::ICMP_SLT - ) { + selectcmpInst->getPredicate() != CmpInst::ICMP_SLT) { + continue; + } auto op0 = selectcmpInst->getOperand(0); auto op1 = selectcmpInst->getOperand(1); - IntegerType* intTyOp0 = dyn_cast<IntegerType>(op0->getType()); - IntegerType* intTyOp1 = dyn_cast<IntegerType>(op1->getType()); + IntegerType *intTyOp0 = dyn_cast<IntegerType>(op0->getType()); + IntegerType *intTyOp1 = dyn_cast<IntegerType>(op1->getType()); /* see above */ - if (!intTyOp0 || !intTyOp1) { - continue; - } + if (!intTyOp0 || !intTyOp1) { continue; } /* i think this is not possible but to lazy to look it up */ - if (intTyOp0->getBitWidth() != intTyOp1->getBitWidth()) { - continue; - } + if (intTyOp0->getBitWidth() != intTyOp1->getBitWidth()) { continue; } icomps.push_back(selectcmpInst); + } + } + } - } - if (!icomps.size()) { - return false; } - for (auto &IcmpInst: icomps) { - BasicBlock* bb = IcmpInst->getParent(); + if (!icomps.size()) { return false; } + + for (auto &IcmpInst : icomps) { + + BasicBlock *bb = IcmpInst->getParent(); auto op0 = IcmpInst->getOperand(0); auto op1 = IcmpInst->getOperand(1); - IntegerType* intTyOp0 = dyn_cast<IntegerType>(op0->getType()); - unsigned bitw = intTyOp0->getBitWidth(); + IntegerType *intTyOp0 = dyn_cast<IntegerType>(op0->getType()); + unsigned bitw = intTyOp0->getBitWidth(); IntegerType *IntType = IntegerType::get(C, bitw); - /* get the new predicate */ - auto pred = dyn_cast<CmpInst>(IcmpInst)->getPredicate(); + auto pred = dyn_cast<CmpInst>(IcmpInst)->getPredicate(); CmpInst::Predicate new_pred; if (pred == CmpInst::ICMP_SGT) { + new_pred = CmpInst::ICMP_UGT; + } else { + new_pred = CmpInst::ICMP_ULT; + } - BasicBlock* end_bb = bb->splitBasicBlock(BasicBlock::iterator(IcmpInst)); + BasicBlock *end_bb = bb->splitBasicBlock(BasicBlock::iterator(IcmpInst)); /* create a 1 bit compare for the sign bit. to do this shift and trunc * the original operands so only the first bit remains.*/ Instruction *s_op0, *t_op0, *s_op1, *t_op1, *icmp_sign_bit; - s_op0 = BinaryOperator::Create(Instruction::LShr, op0, ConstantInt::get(IntType, bitw - 1)); + s_op0 = BinaryOperator::Create(Instruction::LShr, op0, + ConstantInt::get(IntType, bitw - 1)); bb->getInstList().insert(bb->getTerminator()->getIterator(), s_op0); t_op0 = new TruncInst(s_op0, Int1Ty); bb->getInstList().insert(bb->getTerminator()->getIterator(), t_op0); - s_op1 = BinaryOperator::Create(Instruction::LShr, op1, ConstantInt::get(IntType, bitw - 1)); + s_op1 = BinaryOperator::Create(Instruction::LShr, op1, + ConstantInt::get(IntType, bitw - 1)); bb->getInstList().insert(bb->getTerminator()->getIterator(), s_op1); t_op1 = new TruncInst(s_op1, Int1Ty); bb->getInstList().insert(bb->getTerminator()->getIterator(), t_op1); /* compare of the sign bits */ - icmp_sign_bit = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, t_op0, t_op1); + icmp_sign_bit = + CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, t_op0, t_op1); bb->getInstList().insert(bb->getTerminator()->getIterator(), icmp_sign_bit); /* create a new basic block which is executed if the signedness bit is - * different */ + * different */ Instruction *icmp_inv_sig_cmp; - BasicBlock* sign_bb = BasicBlock::Create(C, "sign", end_bb->getParent(), end_bb); + BasicBlock * sign_bb = + BasicBlock::Create(C, "sign", end_bb->getParent(), end_bb); if (pred == CmpInst::ICMP_SGT) { + /* if we check for > and the op0 positive and op1 negative then the final * result is true. if op0 negative and op1 pos, the cmp must result * in false */ - icmp_inv_sig_cmp = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_ULT, t_op0, t_op1); + icmp_inv_sig_cmp = + CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_ULT, t_op0, t_op1); + } else { + /* just the inverse of the above statement */ - icmp_inv_sig_cmp = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_UGT, t_op0, t_op1); + icmp_inv_sig_cmp = + CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_UGT, t_op0, t_op1); + } + sign_bb->getInstList().push_back(icmp_inv_sig_cmp); BranchInst::Create(end_bb, sign_bb); /* create a new bb which is executed if signedness is equal */ Instruction *icmp_usign_cmp; - BasicBlock* middle_bb = BasicBlock::Create(C, "injected", end_bb->getParent(), end_bb); + BasicBlock * middle_bb = + BasicBlock::Create(C, "injected", end_bb->getParent(), end_bb); /* we can do a normal unsigned compare now */ icmp_usign_cmp = CmpInst::Create(Instruction::ICmp, new_pred, op0, op1); middle_bb->getInstList().push_back(icmp_usign_cmp); @@ -285,7 +314,6 @@ bool SplitComparesTransform::simplifySignedness(Module &M) { BranchInst::Create(middle_bb, sign_bb, icmp_sign_bit, bb); term->eraseFromParent(); - PHINode *PN = PHINode::Create(Int1Ty, 2, ""); PN->addIncoming(icmp_usign_cmp, middle_bb); @@ -293,91 +321,100 @@ bool SplitComparesTransform::simplifySignedness(Module &M) { BasicBlock::iterator ii(IcmpInst); ReplaceInstWithInst(IcmpInst->getParent()->getInstList(), ii, PN); + } return true; + } /* splits icmps of size bitw into two nested icmps with bitw/2 size each */ bool SplitComparesTransform::splitCompares(Module &M, unsigned bitw) { + LLVMContext &C = M.getContext(); IntegerType *Int1Ty = IntegerType::getInt1Ty(C); IntegerType *OldIntType = IntegerType::get(C, bitw); IntegerType *NewIntType = IntegerType::get(C, bitw / 2); - std::vector<Instruction*> icomps; + std::vector<Instruction *> icomps; - if (bitw % 2) { - return false; - } + if (bitw % 2) { return false; } /* not supported yet */ - if (bitw > 64) { - return false; - } + if (bitw > 64) { return false; } - /* get all EQ, NE, UGT, and ULT icmps of width bitw. if the other two + /* get all EQ, NE, UGT, and ULT icmps of width bitw. if the other two * unctions were executed only these four predicates should exist */ for (auto &F : M) { + for (auto &BB : F) { - for(auto &IN: BB) { - CmpInst* selectcmpInst = nullptr; + + for (auto &IN : BB) { + + CmpInst *selectcmpInst = nullptr; if ((selectcmpInst = dyn_cast<CmpInst>(&IN))) { - if(selectcmpInst->getPredicate() != CmpInst::ICMP_EQ && - selectcmpInst->getPredicate() != CmpInst::ICMP_NE && - selectcmpInst->getPredicate() != CmpInst::ICMP_UGT && - selectcmpInst->getPredicate() != CmpInst::ICMP_ULT - ) { + if (selectcmpInst->getPredicate() != CmpInst::ICMP_EQ && + selectcmpInst->getPredicate() != CmpInst::ICMP_NE && + selectcmpInst->getPredicate() != CmpInst::ICMP_UGT && + selectcmpInst->getPredicate() != CmpInst::ICMP_ULT) { + continue; + } auto op0 = selectcmpInst->getOperand(0); auto op1 = selectcmpInst->getOperand(1); - IntegerType* intTyOp0 = dyn_cast<IntegerType>(op0->getType()); - IntegerType* intTyOp1 = dyn_cast<IntegerType>(op1->getType()); + IntegerType *intTyOp0 = dyn_cast<IntegerType>(op0->getType()); + IntegerType *intTyOp1 = dyn_cast<IntegerType>(op1->getType()); - if (!intTyOp0 || !intTyOp1) { - continue; - } + if (!intTyOp0 || !intTyOp1) { continue; } /* check if the bitwidths are the one we are looking for */ - if (intTyOp0->getBitWidth() != bitw || intTyOp1->getBitWidth() != bitw) { + if (intTyOp0->getBitWidth() != bitw || + intTyOp1->getBitWidth() != bitw) { + continue; + } icomps.push_back(selectcmpInst); + } + } + } - } - if (!icomps.size()) { - return false; } - for (auto &IcmpInst: icomps) { - BasicBlock* bb = IcmpInst->getParent(); + if (!icomps.size()) { return false; } + + for (auto &IcmpInst : icomps) { + + BasicBlock *bb = IcmpInst->getParent(); auto op0 = IcmpInst->getOperand(0); auto op1 = IcmpInst->getOperand(1); auto pred = dyn_cast<CmpInst>(IcmpInst)->getPredicate(); - BasicBlock* end_bb = bb->splitBasicBlock(BasicBlock::iterator(IcmpInst)); + BasicBlock *end_bb = bb->splitBasicBlock(BasicBlock::iterator(IcmpInst)); /* create the comparison of the top halves of the original operands */ Instruction *s_op0, *op0_high, *s_op1, *op1_high, *icmp_high; - s_op0 = BinaryOperator::Create(Instruction::LShr, op0, ConstantInt::get(OldIntType, bitw / 2)); + s_op0 = BinaryOperator::Create(Instruction::LShr, op0, + ConstantInt::get(OldIntType, bitw / 2)); bb->getInstList().insert(bb->getTerminator()->getIterator(), s_op0); op0_high = new TruncInst(s_op0, NewIntType); bb->getInstList().insert(bb->getTerminator()->getIterator(), op0_high); - s_op1 = BinaryOperator::Create(Instruction::LShr, op1, ConstantInt::get(OldIntType, bitw / 2)); + s_op1 = BinaryOperator::Create(Instruction::LShr, op1, + ConstantInt::get(OldIntType, bitw / 2)); bb->getInstList().insert(bb->getTerminator()->getIterator(), s_op1); op1_high = new TruncInst(s_op1, NewIntType); bb->getInstList().insert(bb->getTerminator()->getIterator(), op1_high); @@ -387,11 +424,13 @@ bool SplitComparesTransform::splitCompares(Module &M, unsigned bitw) { /* now we have to destinguish between == != and > < */ if (pred == CmpInst::ICMP_EQ || pred == CmpInst::ICMP_NE) { + /* transformation for == and != icmps */ /* create a compare for the lower half of the original operands */ Instruction *op0_low, *op1_low, *icmp_low; - BasicBlock* cmp_low_bb = BasicBlock::Create(C, "injected", end_bb->getParent(), end_bb); + BasicBlock * cmp_low_bb = + BasicBlock::Create(C, "injected", end_bb->getParent(), end_bb); op0_low = new TruncInst(op0, NewIntType); cmp_low_bb->getInstList().push_back(op0_low); @@ -407,21 +446,30 @@ bool SplitComparesTransform::splitCompares(Module &M, unsigned bitw) { * the comparison */ auto term = bb->getTerminator(); if (pred == CmpInst::ICMP_EQ) { + BranchInst::Create(cmp_low_bb, end_bb, icmp_high, bb); + } else { + /* CmpInst::ICMP_NE */ BranchInst::Create(end_bb, cmp_low_bb, icmp_high, bb); + } + term->eraseFromParent(); /* create the PHI and connect the edges accordingly */ PHINode *PN = PHINode::Create(Int1Ty, 2, ""); PN->addIncoming(icmp_low, cmp_low_bb); if (pred == CmpInst::ICMP_EQ) { + PN->addIncoming(ConstantInt::get(Int1Ty, 0), bb); + } else { + /* CmpInst::ICMP_NE */ PN->addIncoming(ConstantInt::get(Int1Ty, 1), bb); + } /* replace the old icmp with the new PHI */ @@ -429,19 +477,28 @@ bool SplitComparesTransform::splitCompares(Module &M, unsigned bitw) { ReplaceInstWithInst(IcmpInst->getParent()->getInstList(), ii, PN); } else { + /* CmpInst::ICMP_UGT and CmpInst::ICMP_ULT */ /* transformations for < and > */ - /* create a basic block which checks for the inverse predicate. + /* create a basic block which checks for the inverse predicate. * if this is true we can go to the end if not we have to got to the * bb which checks the lower half of the operands */ Instruction *icmp_inv_cmp, *op0_low, *op1_low, *icmp_low; - BasicBlock* inv_cmp_bb = BasicBlock::Create(C, "inv_cmp", end_bb->getParent(), end_bb); + BasicBlock * inv_cmp_bb = + BasicBlock::Create(C, "inv_cmp", end_bb->getParent(), end_bb); if (pred == CmpInst::ICMP_UGT) { - icmp_inv_cmp = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_ULT, op0_high, op1_high); + + icmp_inv_cmp = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_ULT, + op0_high, op1_high); + } else { - icmp_inv_cmp = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_UGT, op0_high, op1_high); + + icmp_inv_cmp = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_UGT, + op0_high, op1_high); + } + inv_cmp_bb->getInstList().push_back(icmp_inv_cmp); auto term = bb->getTerminator(); @@ -449,7 +506,8 @@ bool SplitComparesTransform::splitCompares(Module &M, unsigned bitw) { BranchInst::Create(end_bb, inv_cmp_bb, icmp_high, bb); /* create a bb which handles the cmp of the lower halves */ - BasicBlock* cmp_low_bb = BasicBlock::Create(C, "injected", end_bb->getParent(), end_bb); + BasicBlock *cmp_low_bb = + BasicBlock::Create(C, "injected", end_bb->getParent(), end_bb); op0_low = new TruncInst(op0, NewIntType); cmp_low_bb->getInstList().push_back(op0_low); op1_low = new TruncInst(op1, NewIntType); @@ -468,57 +526,64 @@ bool SplitComparesTransform::splitCompares(Module &M, unsigned bitw) { BasicBlock::iterator ii(IcmpInst); ReplaceInstWithInst(IcmpInst->getParent()->getInstList(), ii, PN); + } + } - return true; + + return true; + } bool SplitComparesTransform::runOnModule(Module &M) { + int bitw = 64; - char* bitw_env = getenv("LAF_SPLIT_COMPARES_BITW"); - if (!bitw_env) - bitw_env = getenv("AFL_LLVM_LAF_SPLIT_COMPARES_BITW"); - if (bitw_env) { - bitw = atoi(bitw_env); - } + char *bitw_env = getenv("LAF_SPLIT_COMPARES_BITW"); + if (!bitw_env) bitw_env = getenv("AFL_LLVM_LAF_SPLIT_COMPARES_BITW"); + if (bitw_env) { bitw = atoi(bitw_env); } simplifyCompares(M); simplifySignedness(M); if (getenv("AFL_QUIET") == NULL) - errs() << "Split-compare-pass by laf.intel@gmail.com\n"; + errs() << "Split-compare-pass by laf.intel@gmail.com\n"; switch (bitw) { + case 64: - errs() << "Running split-compare-pass " << 64 << "\n"; + errs() << "Running split-compare-pass " << 64 << "\n"; splitCompares(M, 64); - [[clang::fallthrough]]; /*FALLTHRU*/ /* FALLTHROUGH */ + [[clang::fallthrough]]; /*FALLTHRU*/ /* FALLTHROUGH */ case 32: - errs() << "Running split-compare-pass " << 32 << "\n"; + errs() << "Running split-compare-pass " << 32 << "\n"; splitCompares(M, 32); - [[clang::fallthrough]]; /*FALLTHRU*/ /* FALLTHROUGH */ + [[clang::fallthrough]]; /*FALLTHRU*/ /* FALLTHROUGH */ case 16: - errs() << "Running split-compare-pass " << 16 << "\n"; + errs() << "Running split-compare-pass " << 16 << "\n"; splitCompares(M, 16); break; default: - errs() << "NOT Running split-compare-pass \n"; + errs() << "NOT Running split-compare-pass \n"; return false; break; + } verifyModule(M); return true; + } static void registerSplitComparesPass(const PassManagerBuilder &, - legacy::PassManagerBase &PM) { + legacy::PassManagerBase &PM) { + PM.add(new SplitComparesTransform()); + } static RegisterStandardPasses RegisterSplitComparesPass( @@ -526,3 +591,4 @@ static RegisterStandardPasses RegisterSplitComparesPass( static RegisterStandardPasses RegisterSplitComparesTransPass0( PassManagerBuilder::EP_EnabledOnOptLevel0, registerSplitComparesPass); + diff --git a/llvm_mode/split-switches-pass.so.cc b/llvm_mode/split-switches-pass.so.cc index 1ace3185..2743a71a 100644 --- a/llvm_mode/split-switches-pass.so.cc +++ b/llvm_mode/split-switches-pass.so.cc @@ -36,54 +36,65 @@ using namespace llvm; namespace { - class SplitSwitchesTransform : public ModulePass { +class SplitSwitchesTransform : public ModulePass { - public: - static char ID; - SplitSwitchesTransform() : ModulePass(ID) { - } + public: + static char ID; + SplitSwitchesTransform() : ModulePass(ID) { - bool runOnModule(Module &M) override; + } + + bool runOnModule(Module &M) override; #if LLVM_VERSION_MAJOR >= 4 - StringRef getPassName() const override { + StringRef getPassName() const override { + #else - const char * getPassName() const override { + const char *getPassName() const override { + #endif - return "splits switch constructs"; - } - struct CaseExpr { - ConstantInt* Val; - BasicBlock* BB; - - CaseExpr(ConstantInt *val = nullptr, BasicBlock *bb = nullptr) : - Val(val), BB(bb) { } - }; - - typedef std::vector<CaseExpr> CaseVector; - - private: - bool splitSwitches(Module &M); - bool transformCmps(Module &M, const bool processStrcmp, const bool processMemcmp); - BasicBlock* switchConvert(CaseVector Cases, std::vector<bool> bytesChecked, - BasicBlock* OrigBlock, BasicBlock* NewDefault, - Value* Val, unsigned level); + return "splits switch constructs"; + + } + + struct CaseExpr { + + ConstantInt *Val; + BasicBlock * BB; + + CaseExpr(ConstantInt *val = nullptr, BasicBlock *bb = nullptr) + : Val(val), BB(bb) { + + } + }; -} + typedef std::vector<CaseExpr> CaseVector; -char SplitSwitchesTransform::ID = 0; + private: + bool splitSwitches(Module &M); + bool transformCmps(Module &M, const bool processStrcmp, + const bool processMemcmp); + BasicBlock *switchConvert(CaseVector Cases, std::vector<bool> bytesChecked, + BasicBlock *OrigBlock, BasicBlock *NewDefault, + Value *Val, unsigned level); + +}; +} // namespace + +char SplitSwitchesTransform::ID = 0; /* switchConvert - Transform simple list of Cases into list of CaseRange's */ -BasicBlock* SplitSwitchesTransform::switchConvert(CaseVector Cases, std::vector<bool> bytesChecked, - BasicBlock* OrigBlock, BasicBlock* NewDefault, - Value* Val, unsigned level) { - - unsigned ValTypeBitWidth = Cases[0].Val->getBitWidth(); - IntegerType *ValType = IntegerType::get(OrigBlock->getContext(), ValTypeBitWidth); - IntegerType *ByteType = IntegerType::get(OrigBlock->getContext(), 8); - unsigned BytesInValue = bytesChecked.size(); +BasicBlock *SplitSwitchesTransform::switchConvert( + CaseVector Cases, std::vector<bool> bytesChecked, BasicBlock *OrigBlock, + BasicBlock *NewDefault, Value *Val, unsigned level) { + + unsigned ValTypeBitWidth = Cases[0].Val->getBitWidth(); + IntegerType *ValType = + IntegerType::get(OrigBlock->getContext(), ValTypeBitWidth); + IntegerType * ByteType = IntegerType::get(OrigBlock->getContext(), 8); + unsigned BytesInValue = bytesChecked.size(); std::vector<uint8_t> setSizes; std::vector<std::set<uint8_t>> byteSets(BytesInValue, std::set<uint8_t>()); @@ -91,43 +102,54 @@ BasicBlock* SplitSwitchesTransform::switchConvert(CaseVector Cases, std::vector< /* for each of the possible cases we iterate over all bytes of the values * build a set of possible values at each byte position in byteSets */ - for (CaseExpr& Case: Cases) { + for (CaseExpr &Case : Cases) { + for (unsigned i = 0; i < BytesInValue; i++) { - uint8_t byte = (Case.Val->getZExtValue() >> (i*8)) & 0xFF; + uint8_t byte = (Case.Val->getZExtValue() >> (i * 8)) & 0xFF; byteSets[i].insert(byte); + } + } /* find the index of the first byte position that was not yet checked. then * save the number of possible values at that byte position */ unsigned smallestIndex = 0; unsigned smallestSize = 257; - for(unsigned i = 0; i < byteSets.size(); i++) { - if (bytesChecked[i]) - continue; + for (unsigned i = 0; i < byteSets.size(); i++) { + + if (bytesChecked[i]) continue; if (byteSets[i].size() < smallestSize) { + smallestIndex = i; smallestSize = byteSets[i].size(); + } + } + assert(bytesChecked[smallestIndex] == false); /* there are only smallestSize different bytes at index smallestIndex */ - + Instruction *Shift, *Trunc; - Function* F = OrigBlock->getParent(); - BasicBlock* NewNode = BasicBlock::Create(Val->getContext(), "NodeBlock", F); - Shift = BinaryOperator::Create(Instruction::LShr, Val, ConstantInt::get(ValType, smallestIndex * 8)); + Function * F = OrigBlock->getParent(); + BasicBlock * NewNode = BasicBlock::Create(Val->getContext(), "NodeBlock", F); + Shift = BinaryOperator::Create(Instruction::LShr, Val, + ConstantInt::get(ValType, smallestIndex * 8)); NewNode->getInstList().push_back(Shift); if (ValTypeBitWidth > 8) { + Trunc = new TruncInst(Shift, ByteType); NewNode->getInstList().push_back(Trunc); - } - else { + + } else { + /* not necessary to trunc */ Trunc = Shift; + } /* this is a trivial case, we can directly check for the byte, @@ -135,118 +157,155 @@ BasicBlock* SplitSwitchesTransform::switchConvert(CaseVector Cases, std::vector< * mark the byte as checked. if this was the last byte to check * we can finally execute the block belonging to this case */ - if (smallestSize == 1) { + uint8_t byte = *(byteSets[smallestIndex].begin()); - /* insert instructions to check whether the value we are switching on is equal to byte */ - ICmpInst* Comp = new ICmpInst(ICmpInst::ICMP_EQ, Trunc, ConstantInt::get(ByteType, byte), "byteMatch"); + /* insert instructions to check whether the value we are switching on is + * equal to byte */ + ICmpInst *Comp = + new ICmpInst(ICmpInst::ICMP_EQ, Trunc, ConstantInt::get(ByteType, byte), + "byteMatch"); NewNode->getInstList().push_back(Comp); bytesChecked[smallestIndex] = true; - if (std::all_of(bytesChecked.begin(), bytesChecked.end(), [](bool b){return b;} )) { + if (std::all_of(bytesChecked.begin(), bytesChecked.end(), + [](bool b) { return b; })) { + assert(Cases.size() == 1); BranchInst::Create(Cases[0].BB, NewDefault, Comp, NewNode); /* we have to update the phi nodes! */ - for (BasicBlock::iterator I = Cases[0].BB->begin(); I != Cases[0].BB->end(); ++I) { - if (!isa<PHINode>(&*I)) { - continue; - } + for (BasicBlock::iterator I = Cases[0].BB->begin(); + I != Cases[0].BB->end(); ++I) { + + if (!isa<PHINode>(&*I)) { continue; } PHINode *PN = cast<PHINode>(I); /* Only update the first occurrence. */ unsigned Idx = 0, E = PN->getNumIncomingValues(); for (; Idx != E; ++Idx) { + if (PN->getIncomingBlock(Idx) == OrigBlock) { + PN->setIncomingBlock(Idx, NewNode); break; + } + } + } - } - else { - BasicBlock* BB = switchConvert(Cases, bytesChecked, OrigBlock, NewDefault, Val, level + 1); + + } else { + + BasicBlock *BB = switchConvert(Cases, bytesChecked, OrigBlock, NewDefault, + Val, level + 1); BranchInst::Create(BB, NewDefault, Comp, NewNode); + } + } + /* there is no byte which we can directly check on, split the tree */ else { std::vector<uint8_t> byteVector; - std::copy(byteSets[smallestIndex].begin(), byteSets[smallestIndex].end(), std::back_inserter(byteVector)); + std::copy(byteSets[smallestIndex].begin(), byteSets[smallestIndex].end(), + std::back_inserter(byteVector)); std::sort(byteVector.begin(), byteVector.end()); uint8_t pivot = byteVector[byteVector.size() / 2]; - /* we already chose to divide the cases based on the value of byte at index smallestIndex - * the pivot value determines the threshold for the decicion; if a case value - * is smaller at this byte index move it to the LHS vector, otherwise to the RHS vector */ + /* we already chose to divide the cases based on the value of byte at index + * smallestIndex the pivot value determines the threshold for the decicion; + * if a case value + * is smaller at this byte index move it to the LHS vector, otherwise to the + * RHS vector */ CaseVector LHSCases, RHSCases; - for (CaseExpr& Case: Cases) { - uint8_t byte = (Case.Val->getZExtValue() >> (smallestIndex*8)) & 0xFF; + for (CaseExpr &Case : Cases) { + + uint8_t byte = (Case.Val->getZExtValue() >> (smallestIndex * 8)) & 0xFF; if (byte < pivot) { + LHSCases.push_back(Case); - } - else { + + } else { + RHSCases.push_back(Case); + } + } - BasicBlock *LBB, *RBB; - LBB = switchConvert(LHSCases, bytesChecked, OrigBlock, NewDefault, Val, level + 1); - RBB = switchConvert(RHSCases, bytesChecked, OrigBlock, NewDefault, Val, level + 1); - /* insert instructions to check whether the value we are switching on is equal to byte */ - ICmpInst* Comp = new ICmpInst(ICmpInst::ICMP_ULT, Trunc, ConstantInt::get(ByteType, pivot), "byteMatch"); + BasicBlock *LBB, *RBB; + LBB = switchConvert(LHSCases, bytesChecked, OrigBlock, NewDefault, Val, + level + 1); + RBB = switchConvert(RHSCases, bytesChecked, OrigBlock, NewDefault, Val, + level + 1); + + /* insert instructions to check whether the value we are switching on is + * equal to byte */ + ICmpInst *Comp = + new ICmpInst(ICmpInst::ICMP_ULT, Trunc, + ConstantInt::get(ByteType, pivot), "byteMatch"); NewNode->getInstList().push_back(Comp); BranchInst::Create(LBB, RBB, Comp, NewNode); } return NewNode; + } bool SplitSwitchesTransform::splitSwitches(Module &M) { - std::vector<SwitchInst*> switches; + std::vector<SwitchInst *> switches; /* iterate over all functions, bbs and instruction and add * all switches to switches vector for later processing */ for (auto &F : M) { + for (auto &BB : F) { - SwitchInst* switchInst = nullptr; + + SwitchInst *switchInst = nullptr; if ((switchInst = dyn_cast<SwitchInst>(BB.getTerminator()))) { - if (switchInst->getNumCases() < 1) - continue; - switches.push_back(switchInst); + + if (switchInst->getNumCases() < 1) continue; + switches.push_back(switchInst); + } + } + } - if (!switches.size()) - return false; - errs() << "Rewriting " << switches.size() << " switch statements " << "\n"; + if (!switches.size()) return false; + errs() << "Rewriting " << switches.size() << " switch statements " + << "\n"; - for (auto &SI: switches) { + for (auto &SI : switches) { BasicBlock *CurBlock = SI->getParent(); BasicBlock *OrigBlock = CurBlock; - Function *F = CurBlock->getParent(); + Function * F = CurBlock->getParent(); /* this is the value we are switching on */ - Value *Val = SI->getCondition(); - BasicBlock* Default = SI->getDefaultDest(); - unsigned bitw = Val->getType()->getIntegerBitWidth(); + Value * Val = SI->getCondition(); + BasicBlock *Default = SI->getDefaultDest(); + unsigned bitw = Val->getType()->getIntegerBitWidth(); errs() << "switch: " << SI->getNumCases() << " cases " << bitw << " bit\n"; - /* If there is only the default destination or the condition checks 8 bit or less, don't bother with the code below. */ + /* If there is only the default destination or the condition checks 8 bit or + * less, don't bother with the code below. */ if (!SI->getNumCases() || bitw <= 8) { - if (getenv("AFL_QUIET") == NULL) - errs() << "skip trivial switch..\n"; + + if (getenv("AFL_QUIET") == NULL) errs() << "skip trivial switch..\n"; continue; + } /* Create a new, empty default block so that the new hierarchy of @@ -258,10 +317,10 @@ bool SplitSwitchesTransform::splitSwitches(Module &M) { NewDefault->insertInto(F, Default); BranchInst::Create(Default, NewDefault); - /* Prepare cases vector. */ CaseVector Cases; - for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end(); i != e; ++i) + for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end(); i != e; + ++i) #if LLVM_VERSION_MAJOR < 5 Cases.push_back(CaseExpr(i.getCaseValue(), i.getCaseSuccessor())); #else @@ -269,8 +328,10 @@ bool SplitSwitchesTransform::splitSwitches(Module &M) { #endif /* bugfix thanks to pbst * round up bytesChecked (in case getBitWidth() % 8 != 0) */ - std::vector<bool> bytesChecked((7 + Cases[0].Val->getBitWidth()) / 8, false); - BasicBlock* SwitchBlock = switchConvert(Cases, bytesChecked, OrigBlock, NewDefault, Val, 0); + std::vector<bool> bytesChecked((7 + Cases[0].Val->getBitWidth()) / 8, + false); + BasicBlock * SwitchBlock = + switchConvert(Cases, bytesChecked, OrigBlock, NewDefault, Val, 0); /* Branch to our shiny new if-then stuff... */ BranchInst::Create(SwitchBlock, OrigBlock); @@ -278,41 +339,47 @@ bool SplitSwitchesTransform::splitSwitches(Module &M) { /* We are now done with the switch instruction, delete it. */ CurBlock->getInstList().erase(SI); + /* we have to update the phi nodes! */ + for (BasicBlock::iterator I = Default->begin(); I != Default->end(); ++I) { + + if (!isa<PHINode>(&*I)) { continue; } + PHINode *PN = cast<PHINode>(I); + + /* Only update the first occurrence. */ + unsigned Idx = 0, E = PN->getNumIncomingValues(); + for (; Idx != E; ++Idx) { + + if (PN->getIncomingBlock(Idx) == OrigBlock) { + + PN->setIncomingBlock(Idx, NewDefault); + break; + + } + + } + + } + + } + + verifyModule(M); + return true; - /* we have to update the phi nodes! */ - for (BasicBlock::iterator I = Default->begin(); I != Default->end(); ++I) { - if (!isa<PHINode>(&*I)) { - continue; - } - PHINode *PN = cast<PHINode>(I); - - /* Only update the first occurrence. */ - unsigned Idx = 0, E = PN->getNumIncomingValues(); - for (; Idx != E; ++Idx) { - if (PN->getIncomingBlock(Idx) == OrigBlock) { - PN->setIncomingBlock(Idx, NewDefault); - break; - } - } - } - } - - verifyModule(M); - return true; } bool SplitSwitchesTransform::runOnModule(Module &M) { if (getenv("AFL_QUIET") == NULL) - llvm::errs() << "Running split-switches-pass by laf.intel@gmail.com\n"; + llvm::errs() << "Running split-switches-pass by laf.intel@gmail.com\n"; splitSwitches(M); verifyModule(M); return true; + } static void registerSplitSwitchesTransPass(const PassManagerBuilder &, - legacy::PassManagerBase &PM) { + legacy::PassManagerBase &PM) { auto p = new SplitSwitchesTransform(); PM.add(p); @@ -324,3 +391,4 @@ static RegisterStandardPasses RegisterSplitSwitchesTransPass( static RegisterStandardPasses RegisterSplitSwitchesTransPass0( PassManagerBuilder::EP_EnabledOnOptLevel0, registerSplitSwitchesTransPass); + diff --git a/qemu_mode/libcompcov/compcovtest.cc b/qemu_mode/libcompcov/compcovtest.cc index fd1fda00..171e4526 100644 --- a/qemu_mode/libcompcov/compcovtest.cc +++ b/qemu_mode/libcompcov/compcovtest.cc @@ -3,13 +3,13 @@ // Author: Mateusz Jurczyk (mjurczyk@google.com) // // Copyright 2019 Google LLC -// +// // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at -// +// // https://www.apache.org/licenses/LICENSE-2.0 -// +// // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -17,7 +17,8 @@ // limitations under the License. // -// solution: echo -ne 'The quick brown fox jumps over the lazy dog\xbe\xba\xfe\xca\xbe\xba\xfe\xca\xde\xc0\xad\xde\xef\xbe' | ./compcovtest +// solution: echo -ne 'The quick brown fox jumps over the lazy +// dog\xbe\xba\xfe\xca\xbe\xba\xfe\xca\xde\xc0\xad\xde\xef\xbe' | ./compcovtest #include <cstdint> #include <cstdio> @@ -25,39 +26,40 @@ #include <cstring> int main() { - char buffer[44] = { /* zero padding */ }; + + char buffer[44] = {/* zero padding */}; fread(buffer, 1, sizeof(buffer) - 1, stdin); if (memcmp(&buffer[0], "The quick brown fox ", 20) != 0 || strncmp(&buffer[20], "jumps over ", 11) != 0 || strcmp(&buffer[31], "the lazy dog") != 0) { + return 1; + } uint64_t x = 0; fread(&x, sizeof(x), 1, stdin); - if (x != 0xCAFEBABECAFEBABE) { - return 2; - } + if (x != 0xCAFEBABECAFEBABE) { return 2; } uint32_t y = 0; fread(&y, sizeof(y), 1, stdin); - if (y != 0xDEADC0DE) { - return 3; - } + if (y != 0xDEADC0DE) { return 3; } uint16_t z = 0; fread(&z, sizeof(z), 1, stdin); switch (z) { - case 0xBEEF: - break; + + case 0xBEEF: break; - default: - return 4; + default: return 4; + } printf("Puzzle solved, congrats!\n"); abort(); return 0; + } + diff --git a/qemu_mode/libcompcov/libcompcov.so.c b/qemu_mode/libcompcov/libcompcov.so.c index 9e44067e..e758c034 100644 --- a/qemu_mode/libcompcov/libcompcov.so.c +++ b/qemu_mode/libcompcov/libcompcov.so.c @@ -40,10 +40,9 @@ #define MAX_CMP_LENGTH 32 -static void *__compcov_code_start, - *__compcov_code_end; +static void *__compcov_code_start, *__compcov_code_end; -static u8 *__compcov_afl_map; +static u8* __compcov_afl_map; static u32 __compcov_level; @@ -55,15 +54,11 @@ static int (*__libc_memcmp)(const void*, const void*, size_t); static int debug_fd = -1; - #define MAX_MAPPINGS 1024 -static struct mapping { - void *st, *en; -} __compcov_ro[MAX_MAPPINGS]; - -static u32 __compcov_ro_cnt; +static struct mapping { void *st, *en; } __compcov_ro[MAX_MAPPINGS]; +static u32 __compcov_ro_cnt; /* Check an address against the list of read-only mappings. */ @@ -71,42 +66,42 @@ static u8 __compcov_is_ro(const void* ptr) { u32 i; - for (i = 0; i < __compcov_ro_cnt; i++) + for (i = 0; i < __compcov_ro_cnt; i++) if (ptr >= __compcov_ro[i].st && ptr <= __compcov_ro[i].en) return 1; return 0; + } +static size_t __strlen2(const char* s1, const char* s2, size_t max_length) { -static size_t __strlen2(const char *s1, const char *s2, size_t max_length) { // from https://github.com/googleprojectzero/CompareCoverage - + size_t len = 0; - for (; len < max_length && s1[len] != '\0' && s2[len] != '\0'; len++) { } + for (; len < max_length && s1[len] != '\0' && s2[len] != '\0'; len++) {} return len; + } /* Identify the binary boundaries in the memory mapping */ static void __compcov_load(void) { - + __libc_strcmp = dlsym(RTLD_NEXT, "strcmp"); __libc_strncmp = dlsym(RTLD_NEXT, "strncmp"); __libc_strcasecmp = dlsym(RTLD_NEXT, "strcasecmp"); __libc_strncasecmp = dlsym(RTLD_NEXT, "strncasecmp"); __libc_memcmp = dlsym(RTLD_NEXT, "memcmp"); - if (getenv("AFL_QEMU_COMPCOV")) { - - __compcov_level = 1; - } + if (getenv("AFL_QEMU_COMPCOV")) { __compcov_level = 1; } if (getenv("AFL_COMPCOV_LEVEL")) { __compcov_level = atoi(getenv("AFL_COMPCOV_LEVEL")); + } - - char *id_str = getenv(SHM_ENV_VAR); - int shm_id; + + char* id_str = getenv(SHM_ENV_VAR); + int shm_id; if (id_str) { @@ -114,61 +109,72 @@ static void __compcov_load(void) { __compcov_afl_map = shmat(shm_id, NULL, 0); if (__compcov_afl_map == (void*)-1) exit(1); + } else { - + __compcov_afl_map = calloc(1, MAP_SIZE); + } if (getenv("AFL_INST_LIBS")) { - + __compcov_code_start = (void*)0; __compcov_code_end = (void*)-1; return; + } char* bin_name = getenv("AFL_COMPCOV_BINNAME"); procmaps_iterator* maps = pmparser_parse(-1); - procmaps_struct* maps_tmp = NULL; + procmaps_struct* maps_tmp = NULL; while ((maps_tmp = pmparser_next(maps)) != NULL) { - + /* If AFL_COMPCOV_BINNAME is not set pick the first executable segment */ if (!bin_name || strstr(maps_tmp->pathname, bin_name) != NULL) { - + if (maps_tmp->is_x) { - if (!__compcov_code_start) - __compcov_code_start = maps_tmp->addr_start; - if (!__compcov_code_end) - __compcov_code_end = maps_tmp->addr_end; + + if (!__compcov_code_start) __compcov_code_start = maps_tmp->addr_start; + if (!__compcov_code_end) __compcov_code_end = maps_tmp->addr_end; + } + } - + if ((maps_tmp->is_w && !maps_tmp->is_r) || __compcov_ro_cnt == MAX_MAPPINGS) continue; - + __compcov_ro[__compcov_ro_cnt].st = maps_tmp->addr_start; __compcov_ro[__compcov_ro_cnt].en = maps_tmp->addr_end; + } pmparser_free(maps); -} +} static void __compcov_trace(u64 cur_loc, const u8* v0, const u8* v1, size_t n) { size_t i; - + if (debug_fd != 1) { + char debugbuf[4096]; - snprintf(debugbuf, sizeof(debugbuf), "0x%llx %s %s %lu\n", cur_loc, v0 == NULL ? "(null)" : (char*)v0, v1 == NULL ? "(null)" : (char*)v1, n); + snprintf(debugbuf, sizeof(debugbuf), "0x%llx %s %s %lu\n", cur_loc, + v0 == NULL ? "(null)" : (char*)v0, + v1 == NULL ? "(null)" : (char*)v1, n); write(debug_fd, debugbuf, strlen(debugbuf)); + } - + for (i = 0; i < n && v0[i] == v1[i]; ++i) { - - __compcov_afl_map[cur_loc +i]++; + + __compcov_afl_map[cur_loc + i]++; + } + } /* Check an address against the list of read-only mappings. */ @@ -176,8 +182,8 @@ static void __compcov_trace(u64 cur_loc, const u8* v0, const u8* v1, size_t n) { static u8 __compcov_is_in_bound(const void* ptr) { return ptr >= __compcov_code_start && ptr < __compcov_code_end; -} +} /* Replacements for strcmp(), memcmp(), and so on. Note that these will be used only if the target is compiled with -fno-builtins and linked dynamically. */ @@ -187,127 +193,145 @@ static u8 __compcov_is_in_bound(const void* ptr) { int strcmp(const char* str1, const char* str2) { void* retaddr = __builtin_return_address(0); - - if (__compcov_is_in_bound(retaddr) && !(__compcov_level < 2 && - !__compcov_is_ro(str1) && !__compcov_is_ro(str2))) { - size_t n = __strlen2(str1, str2, MAX_CMP_LENGTH +1); - + if (__compcov_is_in_bound(retaddr) && + !(__compcov_level < 2 && !__compcov_is_ro(str1) && + !__compcov_is_ro(str2))) { + + size_t n = __strlen2(str1, str2, MAX_CMP_LENGTH + 1); + if (n <= MAX_CMP_LENGTH) { - + u64 cur_loc = (u64)retaddr; - cur_loc = (cur_loc >> 4) ^ (cur_loc << 8); + cur_loc = (cur_loc >> 4) ^ (cur_loc << 8); cur_loc &= MAP_SIZE - 1; - + __compcov_trace(cur_loc, str1, str2, n); + } + } return __libc_strcmp(str1, str2); -} +} #undef strncmp int strncmp(const char* str1, const char* str2, size_t len) { void* retaddr = __builtin_return_address(0); - - if (__compcov_is_in_bound(retaddr) && !(__compcov_level < 2 && - !__compcov_is_ro(str1) && !__compcov_is_ro(str2))) { - size_t n = __strlen2(str1, str2, MAX_CMP_LENGTH +1); + if (__compcov_is_in_bound(retaddr) && + !(__compcov_level < 2 && !__compcov_is_ro(str1) && + !__compcov_is_ro(str2))) { + + size_t n = __strlen2(str1, str2, MAX_CMP_LENGTH + 1); n = MIN(n, len); - + if (n <= MAX_CMP_LENGTH) { - + u64 cur_loc = (u64)retaddr; - cur_loc = (cur_loc >> 4) ^ (cur_loc << 8); + cur_loc = (cur_loc >> 4) ^ (cur_loc << 8); cur_loc &= MAP_SIZE - 1; - + __compcov_trace(cur_loc, str1, str2, n); + } + } - + return __libc_strncmp(str1, str2, len); -} +} #undef strcasecmp int strcasecmp(const char* str1, const char* str2) { void* retaddr = __builtin_return_address(0); - - if (__compcov_is_in_bound(retaddr) && !(__compcov_level < 2 && - !__compcov_is_ro(str1) && !__compcov_is_ro(str2))) { + + if (__compcov_is_in_bound(retaddr) && + !(__compcov_level < 2 && !__compcov_is_ro(str1) && + !__compcov_is_ro(str2))) { + /* Fallback to strcmp, maybe improve in future */ - size_t n = __strlen2(str1, str2, MAX_CMP_LENGTH +1); - + size_t n = __strlen2(str1, str2, MAX_CMP_LENGTH + 1); + if (n <= MAX_CMP_LENGTH) { - + u64 cur_loc = (u64)retaddr; - cur_loc = (cur_loc >> 4) ^ (cur_loc << 8); + cur_loc = (cur_loc >> 4) ^ (cur_loc << 8); cur_loc &= MAP_SIZE - 1; - + __compcov_trace(cur_loc, str1, str2, n); + } + } return __libc_strcasecmp(str1, str2); -} +} #undef strncasecmp int strncasecmp(const char* str1, const char* str2, size_t len) { void* retaddr = __builtin_return_address(0); - - if (__compcov_is_in_bound(retaddr) && !(__compcov_level < 2 && - !__compcov_is_ro(str1) && !__compcov_is_ro(str2))) { + + if (__compcov_is_in_bound(retaddr) && + !(__compcov_level < 2 && !__compcov_is_ro(str1) && + !__compcov_is_ro(str2))) { + /* Fallback to strncmp, maybe improve in future */ - size_t n = __strlen2(str1, str2, MAX_CMP_LENGTH +1); + size_t n = __strlen2(str1, str2, MAX_CMP_LENGTH + 1); n = MIN(n, len); - + if (n <= MAX_CMP_LENGTH) { - + u64 cur_loc = (u64)retaddr; - cur_loc = (cur_loc >> 4) ^ (cur_loc << 8); + cur_loc = (cur_loc >> 4) ^ (cur_loc << 8); cur_loc &= MAP_SIZE - 1; - + __compcov_trace(cur_loc, str1, str2, n); + } + } return __libc_strncasecmp(str1, str2, len); -} +} #undef memcmp int memcmp(const void* mem1, const void* mem2, size_t len) { void* retaddr = __builtin_return_address(0); - - if (__compcov_is_in_bound(retaddr) && !(__compcov_level < 2 && - !__compcov_is_ro(mem1) && !__compcov_is_ro(mem2))) { + + if (__compcov_is_in_bound(retaddr) && + !(__compcov_level < 2 && !__compcov_is_ro(mem1) && + !__compcov_is_ro(mem2))) { size_t n = len; - + if (n <= MAX_CMP_LENGTH) { - + u64 cur_loc = (u64)retaddr; - cur_loc = (cur_loc >> 4) ^ (cur_loc << 8); + cur_loc = (cur_loc >> 4) ^ (cur_loc << 8); cur_loc &= MAP_SIZE - 1; - + __compcov_trace(cur_loc, mem1, mem2, n); + } + } return __libc_memcmp(mem1, mem2, len); + } /* Init code to open init the library. */ @@ -315,9 +339,10 @@ int memcmp(const void* mem1, const void* mem2, size_t len) { __attribute__((constructor)) void __compcov_init(void) { if (getenv("AFL_QEMU_COMPCOV_DEBUG") != NULL) - debug_fd = open("compcov.debug", O_WRONLY | O_CREAT | O_TRUNC | O_SYNC, 0644); + debug_fd = + open("compcov.debug", O_WRONLY | O_CREAT | O_TRUNC | O_SYNC, 0644); __compcov_load(); -} +} diff --git a/qemu_mode/libcompcov/pmparser.h b/qemu_mode/libcompcov/pmparser.h index 34d0cd50..91dfd032 100644 --- a/qemu_mode/libcompcov/pmparser.h +++ b/qemu_mode/libcompcov/pmparser.h @@ -13,54 +13,60 @@ implied warranty. */ #ifndef H_PMPARSER -#define H_PMPARSER -#include <stdio.h> -#include <stdlib.h> -#include <unistd.h> -#include <string.h> -#include <sys/types.h> -#include <sys/stat.h> -#include <fcntl.h> -#include <errno.h> -#include <linux/limits.h> - -//maximum line length in a procmaps file -#define PROCMAPS_LINE_MAX_LENGTH (PATH_MAX + 100) +# define H_PMPARSER +# include <stdio.h> +# include <stdlib.h> +# include <unistd.h> +# include <string.h> +# include <sys/types.h> +# include <sys/stat.h> +# include <fcntl.h> +# include <errno.h> +# include <linux/limits.h> + +// maximum line length in a procmaps file +# define PROCMAPS_LINE_MAX_LENGTH (PATH_MAX + 100) /** * procmaps_struct * @desc hold all the information about an area in the process's VM */ -typedef struct procmaps_struct{ - void* addr_start; //< start address of the area - void* addr_end; //< end address - unsigned long length; //< size of the range - - char perm[5]; //< permissions rwxp - short is_r; //< rewrote of perm with short flags - short is_w; - short is_x; - short is_p; - - long offset; //< offset - char dev[12]; //< dev major:minor - int inode; //< inode of the file that backs the area - - char pathname[600]; //< the path of the file that backs the area - //chained list - struct procmaps_struct* next; //<handler of the chinaed list +typedef struct procmaps_struct { + + void* addr_start; //< start address of the area + void* addr_end; //< end address + unsigned long length; //< size of the range + + char perm[5]; //< permissions rwxp + short is_r; //< rewrote of perm with short flags + short is_w; + short is_x; + short is_p; + + long offset; //< offset + char dev[12]; //< dev major:minor + int inode; //< inode of the file that backs the area + + char pathname[600]; //< the path of the file that backs the area + // chained list + struct procmaps_struct* next; //<handler of the chinaed list + } procmaps_struct; /** * procmaps_iterator * @desc holds iterating information */ -typedef struct procmaps_iterator{ - procmaps_struct* head; - procmaps_struct* current; +typedef struct procmaps_iterator { + + procmaps_struct* head; + procmaps_struct* current; + } procmaps_iterator; + /** * pmparser_parse - * @param pid the process id whose memory map to be parser. the current process if pid<0 + * @param pid the process id whose memory map to be parser. the current process + * if pid<0 * @return an iterator over all the nodes */ procmaps_iterator* pmparser_parse(int pid); @@ -83,198 +89,238 @@ void pmparser_free(procmaps_iterator* p_procmaps_it); * _pmparser_split_line * @description internal usage */ -void _pmparser_split_line(char*buf,char*addr1,char*addr2,char*perm, char* offset, char* device,char*inode,char* pathname); +void _pmparser_split_line(char* buf, char* addr1, char* addr2, char* perm, + char* offset, char* device, char* inode, + char* pathname); /** * pmparser_print * @param map the head of the list * @order the order of the area to print, -1 to print everything */ -void pmparser_print(procmaps_struct* map,int order); - +void pmparser_print(procmaps_struct* map, int order); /** * gobal variables */ -//procmaps_struct* g_last_head=NULL; -//procmaps_struct* g_current=NULL; - - -procmaps_iterator* pmparser_parse(int pid){ - procmaps_iterator* maps_it = malloc(sizeof(procmaps_iterator)); - char maps_path[500]; - if(pid>=0 ){ - sprintf(maps_path,"/proc/%d/maps",pid); - }else{ - sprintf(maps_path,"/proc/self/maps"); - } - FILE* file=fopen(maps_path,"r"); - if(!file){ - fprintf(stderr,"pmparser : cannot open the memory maps, %s\n",strerror(errno)); - return NULL; - } - int ind=0;char buf[PROCMAPS_LINE_MAX_LENGTH]; - //int c; - procmaps_struct* list_maps=NULL; - procmaps_struct* tmp; - procmaps_struct* current_node=list_maps; - char addr1[20],addr2[20], perm[8], offset[20], dev[10],inode[30],pathname[PATH_MAX]; - while( !feof(file) ){ - fgets(buf,PROCMAPS_LINE_MAX_LENGTH,file); - //allocate a node - tmp=(procmaps_struct*)malloc(sizeof(procmaps_struct)); - //fill the node - _pmparser_split_line(buf,addr1,addr2,perm,offset, dev,inode,pathname); - //printf("#%s",buf); - //printf("%s-%s %s %s %s %s\t%s\n",addr1,addr2,perm,offset,dev,inode,pathname); - //addr_start & addr_end - //unsigned long l_addr_start; - sscanf(addr1,"%lx",(long unsigned *)&tmp->addr_start ); - sscanf(addr2,"%lx",(long unsigned *)&tmp->addr_end ); - //size - tmp->length=(unsigned long)(tmp->addr_end-tmp->addr_start); - //perm - strcpy(tmp->perm,perm); - tmp->is_r=(perm[0]=='r'); - tmp->is_w=(perm[1]=='w'); - tmp->is_x=(perm[2]=='x'); - tmp->is_p=(perm[3]=='p'); - - //offset - sscanf(offset,"%lx",&tmp->offset ); - //device - strcpy(tmp->dev,dev); - //inode - tmp->inode=atoi(inode); - //pathname - strcpy(tmp->pathname,pathname); - tmp->next=NULL; - //attach the node - if(ind==0){ - list_maps=tmp; - list_maps->next=NULL; - current_node=list_maps; - } - current_node->next=tmp; - current_node=tmp; - ind++; - //printf("%s",buf); - } - - //close file - fclose(file); - - - //g_last_head=list_maps; - maps_it->head = list_maps; - maps_it->current = list_maps; - return maps_it; +// procmaps_struct* g_last_head=NULL; +// procmaps_struct* g_current=NULL; + +procmaps_iterator* pmparser_parse(int pid) { + + procmaps_iterator* maps_it = malloc(sizeof(procmaps_iterator)); + char maps_path[500]; + if (pid >= 0) { + + sprintf(maps_path, "/proc/%d/maps", pid); + + } else { + + sprintf(maps_path, "/proc/self/maps"); + + } + + FILE* file = fopen(maps_path, "r"); + if (!file) { + + fprintf(stderr, "pmparser : cannot open the memory maps, %s\n", + strerror(errno)); + return NULL; + + } + + int ind = 0; + char buf[PROCMAPS_LINE_MAX_LENGTH]; + // int c; + procmaps_struct* list_maps = NULL; + procmaps_struct* tmp; + procmaps_struct* current_node = list_maps; + char addr1[20], addr2[20], perm[8], offset[20], dev[10], inode[30], + pathname[PATH_MAX]; + while (!feof(file)) { + + fgets(buf, PROCMAPS_LINE_MAX_LENGTH, file); + // allocate a node + tmp = (procmaps_struct*)malloc(sizeof(procmaps_struct)); + // fill the node + _pmparser_split_line(buf, addr1, addr2, perm, offset, dev, inode, pathname); + // printf("#%s",buf); + // printf("%s-%s %s %s %s + // %s\t%s\n",addr1,addr2,perm,offset,dev,inode,pathname); addr_start & + // addr_end unsigned long l_addr_start; + sscanf(addr1, "%lx", (long unsigned*)&tmp->addr_start); + sscanf(addr2, "%lx", (long unsigned*)&tmp->addr_end); + // size + tmp->length = (unsigned long)(tmp->addr_end - tmp->addr_start); + // perm + strcpy(tmp->perm, perm); + tmp->is_r = (perm[0] == 'r'); + tmp->is_w = (perm[1] == 'w'); + tmp->is_x = (perm[2] == 'x'); + tmp->is_p = (perm[3] == 'p'); + + // offset + sscanf(offset, "%lx", &tmp->offset); + // device + strcpy(tmp->dev, dev); + // inode + tmp->inode = atoi(inode); + // pathname + strcpy(tmp->pathname, pathname); + tmp->next = NULL; + // attach the node + if (ind == 0) { + + list_maps = tmp; + list_maps->next = NULL; + current_node = list_maps; + + } + + current_node->next = tmp; + current_node = tmp; + ind++; + // printf("%s",buf); + + } + + // close file + fclose(file); + + // g_last_head=list_maps; + maps_it->head = list_maps; + maps_it->current = list_maps; + return maps_it; + } +procmaps_struct* pmparser_next(procmaps_iterator* p_procmaps_it) { -procmaps_struct* pmparser_next(procmaps_iterator* p_procmaps_it){ - if(p_procmaps_it->current == NULL) - return NULL; - procmaps_struct* p_current = p_procmaps_it->current; - p_procmaps_it->current = p_procmaps_it->current->next; - return p_current; - /* - if(g_current==NULL){ - g_current=g_last_head; - }else - g_current=g_current->next; - - return g_current; - */ -} + if (p_procmaps_it->current == NULL) return NULL; + procmaps_struct* p_current = p_procmaps_it->current; + p_procmaps_it->current = p_procmaps_it->current->next; + return p_current; + /* + if(g_current==NULL){ + + g_current=g_last_head; + }else + g_current=g_current->next; -void pmparser_free(procmaps_iterator* p_procmaps_it){ - procmaps_struct* maps_list = p_procmaps_it->head; - if(maps_list==NULL) return ; - procmaps_struct* act=maps_list; - procmaps_struct* nxt=act->next; - while(act!=NULL){ - free(act); - act=nxt; - if(nxt!=NULL) - nxt=nxt->next; - } + return g_current; + */ } +void pmparser_free(procmaps_iterator* p_procmaps_it) { + + procmaps_struct* maps_list = p_procmaps_it->head; + if (maps_list == NULL) return; + procmaps_struct* act = maps_list; + procmaps_struct* nxt = act->next; + while (act != NULL) { -void _pmparser_split_line( - char*buf,char*addr1,char*addr2, - char*perm,char* offset,char* device,char*inode, - char* pathname){ - // - int orig=0; - int i=0; - //addr1 - while(buf[i]!='-'){ - addr1[i-orig]=buf[i]; - i++; - } - addr1[i]='\0'; - i++; - //addr2 - orig=i; - while(buf[i]!='\t' && buf[i]!=' '){ - addr2[i-orig]=buf[i]; - i++; - } - addr2[i-orig]='\0'; - - //perm - while(buf[i]=='\t' || buf[i]==' ') - i++; - orig=i; - while(buf[i]!='\t' && buf[i]!=' '){ - perm[i-orig]=buf[i]; - i++; - } - perm[i-orig]='\0'; - //offset - while(buf[i]=='\t' || buf[i]==' ') - i++; - orig=i; - while(buf[i]!='\t' && buf[i]!=' '){ - offset[i-orig]=buf[i]; - i++; - } - offset[i-orig]='\0'; - //dev - while(buf[i]=='\t' || buf[i]==' ') - i++; - orig=i; - while(buf[i]!='\t' && buf[i]!=' '){ - device[i-orig]=buf[i]; - i++; - } - device[i-orig]='\0'; - //inode - while(buf[i]=='\t' || buf[i]==' ') - i++; - orig=i; - while(buf[i]!='\t' && buf[i]!=' '){ - inode[i-orig]=buf[i]; - i++; - } - inode[i-orig]='\0'; - //pathname - pathname[0]='\0'; - while(buf[i]=='\t' || buf[i]==' ') - i++; - orig=i; - while(buf[i]!='\t' && buf[i]!=' ' && buf[i]!='\n'){ - pathname[i-orig]=buf[i]; - i++; - } - pathname[i-orig]='\0'; + free(act); + act = nxt; + if (nxt != NULL) nxt = nxt->next; + + } } +void _pmparser_split_line(char* buf, char* addr1, char* addr2, char* perm, + char* offset, char* device, char* inode, + char* pathname) { + + // + int orig = 0; + int i = 0; + // addr1 + while (buf[i] != '-') { + + addr1[i - orig] = buf[i]; + i++; + + } + + addr1[i] = '\0'; + i++; + // addr2 + orig = i; + while (buf[i] != '\t' && buf[i] != ' ') { + + addr2[i - orig] = buf[i]; + i++; + + } + + addr2[i - orig] = '\0'; + + // perm + while (buf[i] == '\t' || buf[i] == ' ') + i++; + orig = i; + while (buf[i] != '\t' && buf[i] != ' ') { + + perm[i - orig] = buf[i]; + i++; + + } + + perm[i - orig] = '\0'; + // offset + while (buf[i] == '\t' || buf[i] == ' ') + i++; + orig = i; + while (buf[i] != '\t' && buf[i] != ' ') { + + offset[i - orig] = buf[i]; + i++; + + } + + offset[i - orig] = '\0'; + // dev + while (buf[i] == '\t' || buf[i] == ' ') + i++; + orig = i; + while (buf[i] != '\t' && buf[i] != ' ') { + + device[i - orig] = buf[i]; + i++; + + } + + device[i - orig] = '\0'; + // inode + while (buf[i] == '\t' || buf[i] == ' ') + i++; + orig = i; + while (buf[i] != '\t' && buf[i] != ' ') { + + inode[i - orig] = buf[i]; + i++; + + } + + inode[i - orig] = '\0'; + // pathname + pathname[0] = '\0'; + while (buf[i] == '\t' || buf[i] == ' ') + i++; + orig = i; + while (buf[i] != '\t' && buf[i] != ' ' && buf[i] != '\n') { + + pathname[i - orig] = buf[i]; + i++; + + } + + pathname[i - orig] = '\0'; + +} #endif + diff --git a/qemu_mode/patches/afl-qemu-common.h b/qemu_mode/patches/afl-qemu-common.h index c475cb58..c87bacb6 100644 --- a/qemu_mode/patches/afl-qemu-common.h +++ b/qemu_mode/patches/afl-qemu-common.h @@ -33,19 +33,17 @@ #include "../../config.h" -/* NeverZero */ +/* NeverZero */ #if (defined(__x86_64__) || defined(__i386__)) && defined(AFL_QEMU_NOT_ZERO) -# define INC_AFL_AREA(loc) \ - asm volatile ( \ - "incb (%0, %1, 1)\n" \ - "adcb $0, (%0, %1, 1)\n" \ - : /* no out */ \ - : "r" (afl_area_ptr), "r" (loc) \ - : "memory", "eax" \ - ) +# define INC_AFL_AREA(loc) \ + asm volatile( \ + "incb (%0, %1, 1)\n" \ + "adcb $0, (%0, %1, 1)\n" \ + : /* no out */ \ + : "r"(afl_area_ptr), "r"(loc) \ + : "memory", "eax") #else -# define INC_AFL_AREA(loc) \ - afl_area_ptr[loc]++ +# define INC_AFL_AREA(loc) afl_area_ptr[loc]++ #endif diff --git a/qemu_mode/patches/afl-qemu-cpu-inl.h b/qemu_mode/patches/afl-qemu-cpu-inl.h index 4ad31b60..2a1331cb 100644 --- a/qemu_mode/patches/afl-qemu-cpu-inl.h +++ b/qemu_mode/patches/afl-qemu-cpu-inl.h @@ -42,11 +42,16 @@ _start and does the usual forkserver stuff, not very different from regular instrumentation injected via afl-as.h. */ -#define AFL_QEMU_CPU_SNIPPET2 do { \ - if(itb->pc == afl_entry_point) { \ - afl_setup(); \ - afl_forkserver(cpu); \ - } \ +#define AFL_QEMU_CPU_SNIPPET2 \ + do { \ + \ + if (itb->pc == afl_entry_point) { \ + \ + afl_setup(); \ + afl_forkserver(cpu); \ + \ + } \ + \ } while (0) /* We use one additional file descriptor to relay "needs translation" @@ -56,60 +61,71 @@ /* This is equivalent to afl-as.h: */ -static unsigned char dummy[MAP_SIZE]; /* costs MAP_SIZE but saves a few instructions */ -unsigned char *afl_area_ptr = dummy; /* Exported for afl_gen_trace */ +static unsigned char + dummy[MAP_SIZE]; /* costs MAP_SIZE but saves a few instructions */ +unsigned char *afl_area_ptr = dummy; /* Exported for afl_gen_trace */ /* Exported variables populated by the code patched into elfload.c: */ -abi_ulong afl_entry_point, /* ELF entry point (_start) */ - afl_start_code, /* .text start pointer */ - afl_end_code; /* .text end pointer */ +abi_ulong afl_entry_point, /* ELF entry point (_start) */ + afl_start_code, /* .text start pointer */ + afl_end_code; /* .text end pointer */ u8 afl_compcov_level; /* Set in the child process in forkserver mode: */ -static int forkserver_installed = 0; +static int forkserver_installed = 0; static unsigned char afl_fork_child; -unsigned int afl_forksrv_pid; +unsigned int afl_forksrv_pid; /* Instrumentation ratio: */ -unsigned int afl_inst_rms = MAP_SIZE; /* Exported for afl_gen_trace */ +unsigned int afl_inst_rms = MAP_SIZE; /* Exported for afl_gen_trace */ /* Function declarations. */ static void afl_setup(void); -static void afl_forkserver(CPUState*); +static void afl_forkserver(CPUState *); -static void afl_wait_tsl(CPUState*, int); -static void afl_request_tsl(target_ulong, target_ulong, uint32_t, uint32_t, TranslationBlock*, int); +static void afl_wait_tsl(CPUState *, int); +static void afl_request_tsl(target_ulong, target_ulong, uint32_t, uint32_t, + TranslationBlock *, int); /* Data structures passed around by the translate handlers: */ struct afl_tb { + target_ulong pc; target_ulong cs_base; - uint32_t flags; - uint32_t cf_mask; + uint32_t flags; + uint32_t cf_mask; + }; struct afl_tsl { + struct afl_tb tb; - char is_chain; + char is_chain; + }; struct afl_chain { + struct afl_tb last_tb; - uint32_t cf_mask; - int tb_exit; + uint32_t cf_mask; + int tb_exit; + }; /* Some forward decls: */ -TranslationBlock *tb_htable_lookup(CPUState*, target_ulong, target_ulong, uint32_t, uint32_t); -static inline TranslationBlock *tb_find(CPUState*, TranslationBlock*, int, uint32_t); -static inline void tb_add_jump(TranslationBlock *tb, int n, TranslationBlock *tb_next); +TranslationBlock *tb_htable_lookup(CPUState *, target_ulong, target_ulong, + uint32_t, uint32_t); +static inline TranslationBlock *tb_find(CPUState *, TranslationBlock *, int, + uint32_t); +static inline void tb_add_jump(TranslationBlock *tb, int n, + TranslationBlock *tb_next); /************************* * ACTUAL IMPLEMENTATION * @@ -119,8 +135,7 @@ static inline void tb_add_jump(TranslationBlock *tb, int n, TranslationBlock *tb static void afl_setup(void) { - char *id_str = getenv(SHM_ENV_VAR), - *inst_r = getenv("AFL_INST_RATIO"); + char *id_str = getenv(SHM_ENV_VAR), *inst_r = getenv("AFL_INST_RATIO"); int shm_id; @@ -142,7 +157,7 @@ static void afl_setup(void) { shm_id = atoi(id_str); afl_area_ptr = shmat(shm_id, NULL, 0); - if (afl_area_ptr == (void*)-1) exit(1); + if (afl_area_ptr == (void *)-1) exit(1); /* With AFL_INST_RATIO set to a low value, we want to touch the bitmap so that the parent doesn't give up on us. */ @@ -154,18 +169,16 @@ static void afl_setup(void) { if (getenv("AFL_INST_LIBS")) { afl_start_code = 0; - afl_end_code = (abi_ulong)-1; + afl_end_code = (abi_ulong)-1; } - - /* Maintain for compatibility */ - if (getenv("AFL_QEMU_COMPCOV")) { - afl_compcov_level = 1; - } + /* Maintain for compatibility */ + if (getenv("AFL_QEMU_COMPCOV")) { afl_compcov_level = 1; } if (getenv("AFL_COMPCOV_LEVEL")) { afl_compcov_level = atoi(getenv("AFL_COMPCOV_LEVEL")); + } /* pthread_atfork() seems somewhat broken in util/rcu.c, and I'm @@ -176,17 +189,15 @@ static void afl_setup(void) { } - /* Fork server logic, invoked once we hit _start. */ static void afl_forkserver(CPUState *cpu) { static unsigned char tmp[4]; - if (forkserver_installed == 1) - return; + if (forkserver_installed == 1) return; forkserver_installed = 1; - //if (!afl_area_ptr) return; // not necessary because of fixed dummy buffer + // if (!afl_area_ptr) return; // not necessary because of fixed dummy buffer /* Tell the parent that we're alive. If the parent doesn't want to talk, assume that we're not running in forkserver mode. */ @@ -200,7 +211,7 @@ static void afl_forkserver(CPUState *cpu) { while (1) { pid_t child_pid; - int status, t_fd[2]; + int status, t_fd[2]; /* Whoops, parent dead? */ @@ -246,59 +257,60 @@ static void afl_forkserver(CPUState *cpu) { } - /* This code is invoked whenever QEMU decides that it doesn't have a translation of a particular block and needs to compute it, or when it decides to chain two TBs together. When this happens, we tell the parent to mirror the operation, so that the next fork() has a cached copy. */ -static void afl_request_tsl(target_ulong pc, target_ulong cb, uint32_t flags, uint32_t cf_mask, - TranslationBlock *last_tb, int tb_exit) { +static void afl_request_tsl(target_ulong pc, target_ulong cb, uint32_t flags, + uint32_t cf_mask, TranslationBlock *last_tb, + int tb_exit) { - struct afl_tsl t; + struct afl_tsl t; struct afl_chain c; if (!afl_fork_child) return; - t.tb.pc = pc; + t.tb.pc = pc; t.tb.cs_base = cb; - t.tb.flags = flags; + t.tb.flags = flags; t.tb.cf_mask = cf_mask; - t.is_chain = (last_tb != NULL); + t.is_chain = (last_tb != NULL); if (write(TSL_FD, &t, sizeof(struct afl_tsl)) != sizeof(struct afl_tsl)) return; if (t.is_chain) { - c.last_tb.pc = last_tb->pc; + + c.last_tb.pc = last_tb->pc; c.last_tb.cs_base = last_tb->cs_base; - c.last_tb.flags = last_tb->flags; - c.cf_mask = cf_mask; - c.tb_exit = tb_exit; + c.last_tb.flags = last_tb->flags; + c.cf_mask = cf_mask; + c.tb_exit = tb_exit; if (write(TSL_FD, &c, sizeof(struct afl_chain)) != sizeof(struct afl_chain)) return; + } } - /* Check if an address is valid in the current mapping */ static inline int is_valid_addr(target_ulong addr) { - int l, flags; - target_ulong page; - void * p; - - page = addr & TARGET_PAGE_MASK; - l = (page + TARGET_PAGE_SIZE) - addr; - - flags = page_get_flags(page); - if (!(flags & PAGE_VALID) || !(flags & PAGE_READ)) - return 0; - - return 1; + int l, flags; + target_ulong page; + void * p; + + page = addr & TARGET_PAGE_MASK; + l = (page + TARGET_PAGE_SIZE) - addr; + + flags = page_get_flags(page); + if (!(flags & PAGE_VALID) || !(flags & PAGE_READ)) return 0; + + return 1; + } /* This is the other side of the same channel. Since timeouts are handled by @@ -306,8 +318,8 @@ static inline int is_valid_addr(target_ulong addr) { static void afl_wait_tsl(CPUState *cpu, int fd) { - struct afl_tsl t; - struct afl_chain c; + struct afl_tsl t; + struct afl_chain c; TranslationBlock *tb, *last_tb; while (1) { @@ -316,30 +328,33 @@ static void afl_wait_tsl(CPUState *cpu, int fd) { /* Broken pipe means it's time to return to the fork server routine. */ - if (read(fd, &t, sizeof(struct afl_tsl)) != sizeof(struct afl_tsl)) - break; + if (read(fd, &t, sizeof(struct afl_tsl)) != sizeof(struct afl_tsl)) break; tb = tb_htable_lookup(cpu, t.tb.pc, t.tb.cs_base, t.tb.flags, t.tb.cf_mask); - if(!tb) { - + if (!tb) { + /* The child may request to transate a block of memory that is not mapped in the parent (e.g. jitted code or dlopened code). This causes a SIGSEV in gen_intermediate_code() and associated subroutines. We simply avoid caching of such blocks. */ if (is_valid_addr(t.tb.pc)) { - + mmap_lock(); tb = tb_gen_code(cpu, t.tb.pc, t.tb.cs_base, t.tb.flags, t.tb.cf_mask); mmap_unlock(); + } else { - - invalid_pc = 1; + + invalid_pc = 1; + } + } if (t.is_chain) { + if (read(fd, &c, sizeof(struct afl_chain)) != sizeof(struct afl_chain)) break; @@ -347,10 +362,10 @@ static void afl_wait_tsl(CPUState *cpu, int fd) { last_tb = tb_htable_lookup(cpu, c.last_tb.pc, c.last_tb.cs_base, c.last_tb.flags, c.cf_mask); - if (last_tb) { - tb_add_jump(last_tb, c.tb_exit, tb); - } + if (last_tb) { tb_add_jump(last_tb, c.tb_exit, tb); } + } + } } @@ -358,3 +373,4 @@ static void afl_wait_tsl(CPUState *cpu, int fd) { close(fd); } + diff --git a/qemu_mode/patches/afl-qemu-cpu-translate-inl.h b/qemu_mode/patches/afl-qemu-cpu-translate-inl.h index 09ecb9d2..3d3c1b6b 100644 --- a/qemu_mode/patches/afl-qemu-cpu-translate-inl.h +++ b/qemu_mode/patches/afl-qemu-cpu-translate-inl.h @@ -37,9 +37,9 @@ /* Declared in afl-qemu-cpu-inl.h */ extern unsigned char *afl_area_ptr; -extern unsigned int afl_inst_rms; -extern abi_ulong afl_start_code, afl_end_code; -extern u8 afl_compcov_level; +extern unsigned int afl_inst_rms; +extern abi_ulong afl_start_code, afl_end_code; +extern u8 afl_compcov_level; void tcg_gen_afl_compcov_log_call(void *func, target_ulong cur_loc, TCGv_i64 arg1, TCGv_i64 arg2); @@ -47,81 +47,93 @@ void tcg_gen_afl_compcov_log_call(void *func, target_ulong cur_loc, static void afl_compcov_log_16(target_ulong cur_loc, target_ulong arg1, target_ulong arg2) { - if ((arg1 & 0xff) == (arg2 & 0xff)) { - INC_AFL_AREA(cur_loc); - } + if ((arg1 & 0xff) == (arg2 & 0xff)) { INC_AFL_AREA(cur_loc); } + } static void afl_compcov_log_32(target_ulong cur_loc, target_ulong arg1, target_ulong arg2) { if ((arg1 & 0xff) == (arg2 & 0xff)) { + INC_AFL_AREA(cur_loc); if ((arg1 & 0xffff) == (arg2 & 0xffff)) { - INC_AFL_AREA(cur_loc +1); - if ((arg1 & 0xffffff) == (arg2 & 0xffffff)) { - INC_AFL_AREA(cur_loc +2); - } + + INC_AFL_AREA(cur_loc + 1); + if ((arg1 & 0xffffff) == (arg2 & 0xffffff)) { INC_AFL_AREA(cur_loc + 2); } + } + } + } static void afl_compcov_log_64(target_ulong cur_loc, target_ulong arg1, target_ulong arg2) { if ((arg1 & 0xff) == (arg2 & 0xff)) { + INC_AFL_AREA(cur_loc); if ((arg1 & 0xffff) == (arg2 & 0xffff)) { - INC_AFL_AREA(cur_loc +1); + + INC_AFL_AREA(cur_loc + 1); if ((arg1 & 0xffffff) == (arg2 & 0xffffff)) { - INC_AFL_AREA(cur_loc +2); + + INC_AFL_AREA(cur_loc + 2); if ((arg1 & 0xffffffff) == (arg2 & 0xffffffff)) { - INC_AFL_AREA(cur_loc +3); + + INC_AFL_AREA(cur_loc + 3); if ((arg1 & 0xffffffffff) == (arg2 & 0xffffffffff)) { - INC_AFL_AREA(cur_loc +4); + + INC_AFL_AREA(cur_loc + 4); if ((arg1 & 0xffffffffffff) == (arg2 & 0xffffffffffff)) { - INC_AFL_AREA(cur_loc +5); + + INC_AFL_AREA(cur_loc + 5); if ((arg1 & 0xffffffffffffff) == (arg2 & 0xffffffffffffff)) { - INC_AFL_AREA(cur_loc +6); + + INC_AFL_AREA(cur_loc + 6); + } + } + } + } + } + } + } -} +} static void afl_gen_compcov(target_ulong cur_loc, TCGv_i64 arg1, TCGv_i64 arg2, TCGMemOp ot, int is_imm) { void *func; - + if (!afl_compcov_level || cur_loc > afl_end_code || cur_loc < afl_start_code) return; - - if (!is_imm && afl_compcov_level < 2) - return; + + if (!is_imm && afl_compcov_level < 2) return; switch (ot) { - case MO_64: - func = &afl_compcov_log_64; - break; - case MO_32: - func = &afl_compcov_log_32; - break; - case MO_16: - func = &afl_compcov_log_16; - break; - default: - return; + + case MO_64: func = &afl_compcov_log_64; break; + case MO_32: func = &afl_compcov_log_32; break; + case MO_16: func = &afl_compcov_log_16; break; + default: return; + } - - cur_loc = (cur_loc >> 4) ^ (cur_loc << 8); + + cur_loc = (cur_loc >> 4) ^ (cur_loc << 8); cur_loc &= MAP_SIZE - 7; - + if (cur_loc >= afl_inst_rms) return; - + tcg_gen_afl_compcov_log_call(func, cur_loc, arg1, arg2); + } + diff --git a/qemu_mode/patches/afl-qemu-tcg-inl.h b/qemu_mode/patches/afl-qemu-tcg-inl.h index a9c53b8c..d53a1ccf 100644 --- a/qemu_mode/patches/afl-qemu-tcg-inl.h +++ b/qemu_mode/patches/afl-qemu-tcg-inl.h @@ -31,275 +31,343 @@ */ -void afl_maybe_log(void* cur_loc); +void afl_maybe_log(void *cur_loc); /* Note: we convert the 64 bit args to 32 bit and do some alignment and endian swap. Maybe it would be better to do the alignment and endian swap in tcg_reg_alloc_call(). */ -void tcg_gen_afl_maybe_log_call(target_ulong cur_loc) -{ - int real_args, pi; - unsigned sizemask, flags; - TCGOp *op; - - TCGTemp *arg = tcgv_i64_temp( tcg_const_tl(cur_loc) ); - - flags = 0; - sizemask = dh_sizemask(void, 0) | dh_sizemask(i64, 1); - -#if defined(__sparc__) && !defined(__arch64__) \ - && !defined(CONFIG_TCG_INTERPRETER) - /* We have 64-bit values in one register, but need to pass as two - separate parameters. Split them. */ - int orig_sizemask = sizemask; - TCGv_i64 retl, reth; - TCGTemp *split_args[MAX_OPC_PARAM]; - - retl = NULL; - reth = NULL; - if (sizemask != 0) { - real_args = 0; - int is_64bit = sizemask & (1 << 2); - if (is_64bit) { - TCGv_i64 orig = temp_tcgv_i64(arg); - TCGv_i32 h = tcg_temp_new_i32(); - TCGv_i32 l = tcg_temp_new_i32(); - tcg_gen_extr_i64_i32(l, h, orig); - split_args[real_args++] = tcgv_i32_temp(h); - split_args[real_args++] = tcgv_i32_temp(l); - } else { - split_args[real_args++] = arg; - } - nargs = real_args; - args = split_args; - sizemask = 0; +void tcg_gen_afl_maybe_log_call(target_ulong cur_loc) { + + int real_args, pi; + unsigned sizemask, flags; + TCGOp * op; + + TCGTemp *arg = tcgv_i64_temp(tcg_const_tl(cur_loc)); + + flags = 0; + sizemask = dh_sizemask(void, 0) | dh_sizemask(i64, 1); + +#if defined(__sparc__) && !defined(__arch64__) && \ + !defined(CONFIG_TCG_INTERPRETER) + /* We have 64-bit values in one register, but need to pass as two + separate parameters. Split them. */ + int orig_sizemask = sizemask; + TCGv_i64 retl, reth; + TCGTemp *split_args[MAX_OPC_PARAM]; + + retl = NULL; + reth = NULL; + if (sizemask != 0) { + + real_args = 0; + int is_64bit = sizemask & (1 << 2); + if (is_64bit) { + + TCGv_i64 orig = temp_tcgv_i64(arg); + TCGv_i32 h = tcg_temp_new_i32(); + TCGv_i32 l = tcg_temp_new_i32(); + tcg_gen_extr_i64_i32(l, h, orig); + split_args[real_args++] = tcgv_i32_temp(h); + split_args[real_args++] = tcgv_i32_temp(l); + + } else { + + split_args[real_args++] = arg; + } + + nargs = real_args; + args = split_args; + sizemask = 0; + + } + #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64 - int is_64bit = sizemask & (1 << 2); - int is_signed = sizemask & (2 << 2); - if (!is_64bit) { - TCGv_i64 temp = tcg_temp_new_i64(); - TCGv_i64 orig = temp_tcgv_i64(arg); - if (is_signed) { - tcg_gen_ext32s_i64(temp, orig); - } else { - tcg_gen_ext32u_i64(temp, orig); - } - arg = tcgv_i64_temp(temp); + int is_64bit = sizemask & (1 << 2); + int is_signed = sizemask & (2 << 2); + if (!is_64bit) { + + TCGv_i64 temp = tcg_temp_new_i64(); + TCGv_i64 orig = temp_tcgv_i64(arg); + if (is_signed) { + + tcg_gen_ext32s_i64(temp, orig); + + } else { + + tcg_gen_ext32u_i64(temp, orig); + } + + arg = tcgv_i64_temp(temp); + + } + #endif /* TCG_TARGET_EXTEND_ARGS */ - op = tcg_emit_op(INDEX_op_call); + op = tcg_emit_op(INDEX_op_call); - pi = 0; + pi = 0; - TCGOP_CALLO(op) = 0; + TCGOP_CALLO(op) = 0; + + real_args = 0; + int is_64bit = sizemask & (1 << 2); + if (TCG_TARGET_REG_BITS < 64 && is_64bit) { - real_args = 0; - int is_64bit = sizemask & (1 << 2); - if (TCG_TARGET_REG_BITS < 64 && is_64bit) { #ifdef TCG_TARGET_CALL_ALIGN_ARGS - /* some targets want aligned 64 bit args */ - if (real_args & 1) { - op->args[pi++] = TCG_CALL_DUMMY_ARG; - real_args++; - } + /* some targets want aligned 64 bit args */ + if (real_args & 1) { + + op->args[pi++] = TCG_CALL_DUMMY_ARG; + real_args++; + + } + #endif - /* If stack grows up, then we will be placing successive - arguments at lower addresses, which means we need to - reverse the order compared to how we would normally - treat either big or little-endian. For those arguments - that will wind up in registers, this still works for - HPPA (the only current STACK_GROWSUP target) since the - argument registers are *also* allocated in decreasing - order. If another such target is added, this logic may - have to get more complicated to differentiate between - stack arguments and register arguments. */ + /* If stack grows up, then we will be placing successive + arguments at lower addresses, which means we need to + reverse the order compared to how we would normally + treat either big or little-endian. For those arguments + that will wind up in registers, this still works for + HPPA (the only current STACK_GROWSUP target) since the + argument registers are *also* allocated in decreasing + order. If another such target is added, this logic may + have to get more complicated to differentiate between + stack arguments and register arguments. */ #if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP) - op->args[pi++] = temp_arg(arg + 1); - op->args[pi++] = temp_arg(arg); + op->args[pi++] = temp_arg(arg + 1); + op->args[pi++] = temp_arg(arg); #else - op->args[pi++] = temp_arg(arg); - op->args[pi++] = temp_arg(arg + 1); + op->args[pi++] = temp_arg(arg); + op->args[pi++] = temp_arg(arg + 1); #endif - real_args += 2; - } + real_args += 2; + + } + + op->args[pi++] = temp_arg(arg); + real_args++; + + op->args[pi++] = (uintptr_t)&afl_maybe_log; + op->args[pi++] = flags; + TCGOP_CALLI(op) = real_args; + + /* Make sure the fields didn't overflow. */ + tcg_debug_assert(TCGOP_CALLI(op) == real_args); + tcg_debug_assert(pi <= ARRAY_SIZE(op->args)); + +#if defined(__sparc__) && !defined(__arch64__) && \ + !defined(CONFIG_TCG_INTERPRETER) + /* Free all of the parts we allocated above. */ + real_args = 0; + int is_64bit = orig_sizemask & (1 << 2); + if (is_64bit) { + + tcg_temp_free_internal(args[real_args++]); + tcg_temp_free_internal(args[real_args++]); + + } else { - op->args[pi++] = temp_arg(arg); real_args++; - op->args[pi++] = (uintptr_t)&afl_maybe_log; - op->args[pi++] = flags; - TCGOP_CALLI(op) = real_args; + } - /* Make sure the fields didn't overflow. */ - tcg_debug_assert(TCGOP_CALLI(op) == real_args); - tcg_debug_assert(pi <= ARRAY_SIZE(op->args)); + if (orig_sizemask & 1) { + + /* The 32-bit ABI returned two 32-bit pieces. Re-assemble them. + Note that describing these as TCGv_i64 eliminates an unnecessary + zero-extension that tcg_gen_concat_i32_i64 would create. */ + tcg_gen_concat32_i64(temp_tcgv_i64(NULL), retl, reth); + tcg_temp_free_i64(retl); + tcg_temp_free_i64(reth); + + } -#if defined(__sparc__) && !defined(__arch64__) \ - && !defined(CONFIG_TCG_INTERPRETER) - /* Free all of the parts we allocated above. */ - real_args = 0; - int is_64bit = orig_sizemask & (1 << 2); - if (is_64bit) { - tcg_temp_free_internal(args[real_args++]); - tcg_temp_free_internal(args[real_args++]); - } else { - real_args++; - } - if (orig_sizemask & 1) { - /* The 32-bit ABI returned two 32-bit pieces. Re-assemble them. - Note that describing these as TCGv_i64 eliminates an unnecessary - zero-extension that tcg_gen_concat_i32_i64 would create. */ - tcg_gen_concat32_i64(temp_tcgv_i64(NULL), retl, reth); - tcg_temp_free_i64(retl); - tcg_temp_free_i64(reth); - } #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64 - int is_64bit = sizemask & (1 << 2); - if (!is_64bit) { - tcg_temp_free_internal(arg); - } + int is_64bit = sizemask & (1 << 2); + if (!is_64bit) { tcg_temp_free_internal(arg); } #endif /* TCG_TARGET_EXTEND_ARGS */ + } -void tcg_gen_afl_compcov_log_call(void *func, target_ulong cur_loc, TCGv_i64 arg1, TCGv_i64 arg2) -{ - int i, real_args, nb_rets, pi; - unsigned sizemask, flags; - TCGOp *op; - - const int nargs = 3; - TCGTemp *args[3] = { tcgv_i64_temp( tcg_const_tl(cur_loc) ), - tcgv_i64_temp(arg1), - tcgv_i64_temp(arg2) }; - - flags = 0; - sizemask = dh_sizemask(void, 0) | dh_sizemask(i64, 1) | - dh_sizemask(i64, 2) | dh_sizemask(i64, 3); - -#if defined(__sparc__) && !defined(__arch64__) \ - && !defined(CONFIG_TCG_INTERPRETER) - /* We have 64-bit values in one register, but need to pass as two - separate parameters. Split them. */ - int orig_sizemask = sizemask; - int orig_nargs = nargs; - TCGv_i64 retl, reth; - TCGTemp *split_args[MAX_OPC_PARAM]; - - retl = NULL; - reth = NULL; - if (sizemask != 0) { - for (i = real_args = 0; i < nargs; ++i) { - int is_64bit = sizemask & (1 << (i+1)*2); - if (is_64bit) { - TCGv_i64 orig = temp_tcgv_i64(args[i]); - TCGv_i32 h = tcg_temp_new_i32(); - TCGv_i32 l = tcg_temp_new_i32(); - tcg_gen_extr_i64_i32(l, h, orig); - split_args[real_args++] = tcgv_i32_temp(h); - split_args[real_args++] = tcgv_i32_temp(l); - } else { - split_args[real_args++] = args[i]; - } - } - nargs = real_args; - args = split_args; - sizemask = 0; +void tcg_gen_afl_compcov_log_call(void *func, target_ulong cur_loc, + TCGv_i64 arg1, TCGv_i64 arg2) { + + int i, real_args, nb_rets, pi; + unsigned sizemask, flags; + TCGOp * op; + + const int nargs = 3; + TCGTemp *args[3] = {tcgv_i64_temp(tcg_const_tl(cur_loc)), tcgv_i64_temp(arg1), + tcgv_i64_temp(arg2)}; + + flags = 0; + sizemask = dh_sizemask(void, 0) | dh_sizemask(i64, 1) | dh_sizemask(i64, 2) | + dh_sizemask(i64, 3); + +#if defined(__sparc__) && !defined(__arch64__) && \ + !defined(CONFIG_TCG_INTERPRETER) + /* We have 64-bit values in one register, but need to pass as two + separate parameters. Split them. */ + int orig_sizemask = sizemask; + int orig_nargs = nargs; + TCGv_i64 retl, reth; + TCGTemp *split_args[MAX_OPC_PARAM]; + + retl = NULL; + reth = NULL; + if (sizemask != 0) { + + for (i = real_args = 0; i < nargs; ++i) { + + int is_64bit = sizemask & (1 << (i + 1) * 2); + if (is_64bit) { + + TCGv_i64 orig = temp_tcgv_i64(args[i]); + TCGv_i32 h = tcg_temp_new_i32(); + TCGv_i32 l = tcg_temp_new_i32(); + tcg_gen_extr_i64_i32(l, h, orig); + split_args[real_args++] = tcgv_i32_temp(h); + split_args[real_args++] = tcgv_i32_temp(l); + + } else { + + split_args[real_args++] = args[i]; + + } + } + + nargs = real_args; + args = split_args; + sizemask = 0; + + } + #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64 - for (i = 0; i < nargs; ++i) { - int is_64bit = sizemask & (1 << (i+1)*2); - int is_signed = sizemask & (2 << (i+1)*2); - if (!is_64bit) { - TCGv_i64 temp = tcg_temp_new_i64(); - TCGv_i64 orig = temp_tcgv_i64(args[i]); - if (is_signed) { - tcg_gen_ext32s_i64(temp, orig); - } else { - tcg_gen_ext32u_i64(temp, orig); - } - args[i] = tcgv_i64_temp(temp); - } + for (i = 0; i < nargs; ++i) { + + int is_64bit = sizemask & (1 << (i + 1) * 2); + int is_signed = sizemask & (2 << (i + 1) * 2); + if (!is_64bit) { + + TCGv_i64 temp = tcg_temp_new_i64(); + TCGv_i64 orig = temp_tcgv_i64(args[i]); + if (is_signed) { + + tcg_gen_ext32s_i64(temp, orig); + + } else { + + tcg_gen_ext32u_i64(temp, orig); + + } + + args[i] = tcgv_i64_temp(temp); + } + + } + #endif /* TCG_TARGET_EXTEND_ARGS */ - op = tcg_emit_op(INDEX_op_call); + op = tcg_emit_op(INDEX_op_call); - pi = 0; - nb_rets = 0; - TCGOP_CALLO(op) = nb_rets; + pi = 0; + nb_rets = 0; + TCGOP_CALLO(op) = nb_rets; + + real_args = 0; + for (i = 0; i < nargs; i++) { + + int is_64bit = sizemask & (1 << (i + 1) * 2); + if (TCG_TARGET_REG_BITS < 64 && is_64bit) { - real_args = 0; - for (i = 0; i < nargs; i++) { - int is_64bit = sizemask & (1 << (i+1)*2); - if (TCG_TARGET_REG_BITS < 64 && is_64bit) { #ifdef TCG_TARGET_CALL_ALIGN_ARGS - /* some targets want aligned 64 bit args */ - if (real_args & 1) { - op->args[pi++] = TCG_CALL_DUMMY_ARG; - real_args++; - } + /* some targets want aligned 64 bit args */ + if (real_args & 1) { + + op->args[pi++] = TCG_CALL_DUMMY_ARG; + real_args++; + + } + #endif - /* If stack grows up, then we will be placing successive - arguments at lower addresses, which means we need to - reverse the order compared to how we would normally - treat either big or little-endian. For those arguments - that will wind up in registers, this still works for - HPPA (the only current STACK_GROWSUP target) since the - argument registers are *also* allocated in decreasing - order. If another such target is added, this logic may - have to get more complicated to differentiate between - stack arguments and register arguments. */ + /* If stack grows up, then we will be placing successive + arguments at lower addresses, which means we need to + reverse the order compared to how we would normally + treat either big or little-endian. For those arguments + that will wind up in registers, this still works for + HPPA (the only current STACK_GROWSUP target) since the + argument registers are *also* allocated in decreasing + order. If another such target is added, this logic may + have to get more complicated to differentiate between + stack arguments and register arguments. */ #if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP) - op->args[pi++] = temp_arg(args[i] + 1); - op->args[pi++] = temp_arg(args[i]); + op->args[pi++] = temp_arg(args[i] + 1); + op->args[pi++] = temp_arg(args[i]); #else - op->args[pi++] = temp_arg(args[i]); - op->args[pi++] = temp_arg(args[i] + 1); + op->args[pi++] = temp_arg(args[i]); + op->args[pi++] = temp_arg(args[i] + 1); #endif - real_args += 2; - continue; - } + real_args += 2; + continue; - op->args[pi++] = temp_arg(args[i]); - real_args++; - } - op->args[pi++] = (uintptr_t)func; - op->args[pi++] = flags; - TCGOP_CALLI(op) = real_args; - - /* Make sure the fields didn't overflow. */ - tcg_debug_assert(TCGOP_CALLI(op) == real_args); - tcg_debug_assert(pi <= ARRAY_SIZE(op->args)); - -#if defined(__sparc__) && !defined(__arch64__) \ - && !defined(CONFIG_TCG_INTERPRETER) - /* Free all of the parts we allocated above. */ - for (i = real_args = 0; i < orig_nargs; ++i) { - int is_64bit = orig_sizemask & (1 << (i+1)*2); - if (is_64bit) { - tcg_temp_free_internal(args[real_args++]); - tcg_temp_free_internal(args[real_args++]); - } else { - real_args++; - } } - if (orig_sizemask & 1) { - /* The 32-bit ABI returned two 32-bit pieces. Re-assemble them. - Note that describing these as TCGv_i64 eliminates an unnecessary - zero-extension that tcg_gen_concat_i32_i64 would create. */ - tcg_gen_concat32_i64(temp_tcgv_i64(NULL), retl, reth); - tcg_temp_free_i64(retl); - tcg_temp_free_i64(reth); + + op->args[pi++] = temp_arg(args[i]); + real_args++; + + } + + op->args[pi++] = (uintptr_t)func; + op->args[pi++] = flags; + TCGOP_CALLI(op) = real_args; + + /* Make sure the fields didn't overflow. */ + tcg_debug_assert(TCGOP_CALLI(op) == real_args); + tcg_debug_assert(pi <= ARRAY_SIZE(op->args)); + +#if defined(__sparc__) && !defined(__arch64__) && \ + !defined(CONFIG_TCG_INTERPRETER) + /* Free all of the parts we allocated above. */ + for (i = real_args = 0; i < orig_nargs; ++i) { + + int is_64bit = orig_sizemask & (1 << (i + 1) * 2); + if (is_64bit) { + + tcg_temp_free_internal(args[real_args++]); + tcg_temp_free_internal(args[real_args++]); + + } else { + + real_args++; + } + + } + + if (orig_sizemask & 1) { + + /* The 32-bit ABI returned two 32-bit pieces. Re-assemble them. + Note that describing these as TCGv_i64 eliminates an unnecessary + zero-extension that tcg_gen_concat_i32_i64 would create. */ + tcg_gen_concat32_i64(temp_tcgv_i64(NULL), retl, reth); + tcg_temp_free_i64(retl); + tcg_temp_free_i64(reth); + + } + #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64 - for (i = 0; i < nargs; ++i) { - int is_64bit = sizemask & (1 << (i+1)*2); - if (!is_64bit) { - tcg_temp_free_internal(args[i]); - } - } + for (i = 0; i < nargs; ++i) { + + int is_64bit = sizemask & (1 << (i + 1) * 2); + if (!is_64bit) { tcg_temp_free_internal(args[i]); } + + } + #endif /* TCG_TARGET_EXTEND_ARGS */ + } diff --git a/qemu_mode/patches/afl-qemu-translate-inl.h b/qemu_mode/patches/afl-qemu-translate-inl.h index ffe43dba..9abaa961 100644 --- a/qemu_mode/patches/afl-qemu-translate-inl.h +++ b/qemu_mode/patches/afl-qemu-translate-inl.h @@ -36,8 +36,8 @@ /* Declared in afl-qemu-cpu-inl.h */ extern unsigned char *afl_area_ptr; -extern unsigned int afl_inst_rms; -extern abi_ulong afl_start_code, afl_end_code; +extern unsigned int afl_inst_rms; +extern abi_ulong afl_start_code, afl_end_code; void tcg_gen_afl_maybe_log_call(target_ulong cur_loc); @@ -59,14 +59,16 @@ static void afl_gen_trace(target_ulong cur_loc) { /* Optimize for cur_loc > afl_end_code, which is the most likely case on Linux systems. */ - if (cur_loc > afl_end_code || cur_loc < afl_start_code /*|| !afl_area_ptr*/) // not needed because of static dummy buffer + if (cur_loc > afl_end_code || + cur_loc < afl_start_code /*|| !afl_area_ptr*/) // not needed because of + // static dummy buffer return; /* Looks like QEMU always maps to fixed locations, so ASLR is not a concern. Phew. But instruction addresses may be aligned. Let's mangle the value to get something quasi-uniform. */ - cur_loc = (cur_loc >> 4) ^ (cur_loc << 8); + cur_loc = (cur_loc >> 4) ^ (cur_loc << 8); cur_loc &= MAP_SIZE - 1; /* Implement probabilistic instrumentation by looking at scrambled block @@ -75,5 +77,6 @@ static void afl_gen_trace(target_ulong cur_loc) { if (cur_loc >= afl_inst_rms) return; tcg_gen_afl_maybe_log_call(cur_loc); - + } + diff --git a/src/afl-analyze.c b/src/afl-analyze.c index 5bb96154..e3014256 100644 --- a/src/afl-analyze.c +++ b/src/afl-analyze.c @@ -22,7 +22,7 @@ #define AFL_MAIN #ifdef __ANDROID__ - #include "android-ashmem.h" +# include "android-ashmem.h" #endif #include "config.h" #include "types.h" @@ -50,61 +50,59 @@ #include <sys/types.h> #include <sys/resource.h> -static s32 child_pid; /* PID of the tested program */ +static s32 child_pid; /* PID of the tested program */ - u8* trace_bits; /* SHM with instrumentation bitmap */ +u8* trace_bits; /* SHM with instrumentation bitmap */ -static u8 *in_file, /* Analyzer input test case */ - *prog_in, /* Targeted program input file */ - *target_path, /* Path to target binary */ - *doc_path; /* Path to docs */ +static u8 *in_file, /* Analyzer input test case */ + *prog_in, /* Targeted program input file */ + *target_path, /* Path to target binary */ + *doc_path; /* Path to docs */ -static u8 *in_data; /* Input data for analysis */ +static u8* in_data; /* Input data for analysis */ -static u32 in_len, /* Input data length */ - orig_cksum, /* Original checksum */ - total_execs, /* Total number of execs */ - exec_hangs, /* Total number of hangs */ - exec_tmout = EXEC_TIMEOUT; /* Exec timeout (ms) */ +static u32 in_len, /* Input data length */ + orig_cksum, /* Original checksum */ + total_execs, /* Total number of execs */ + exec_hangs, /* Total number of hangs */ + exec_tmout = EXEC_TIMEOUT; /* Exec timeout (ms) */ -static u64 mem_limit = MEM_LIMIT; /* Memory limit (MB) */ +static u64 mem_limit = MEM_LIMIT; /* Memory limit (MB) */ -static s32 dev_null_fd = -1; /* FD to /dev/null */ +static s32 dev_null_fd = -1; /* FD to /dev/null */ -static u8 edges_only, /* Ignore hit counts? */ - use_hex_offsets, /* Show hex offsets? */ - use_stdin = 1; /* Use stdin for program input? */ - -static volatile u8 - stop_soon, /* Ctrl-C pressed? */ - child_timed_out; /* Child timed out? */ +static u8 edges_only, /* Ignore hit counts? */ + use_hex_offsets, /* Show hex offsets? */ + use_stdin = 1; /* Use stdin for program input? */ +static volatile u8 stop_soon, /* Ctrl-C pressed? */ + child_timed_out; /* Child timed out? */ /* Constants used for describing byte behavior. */ -#define RESP_NONE 0x00 /* Changing byte is a no-op. */ -#define RESP_MINOR 0x01 /* Some changes have no effect. */ -#define RESP_VARIABLE 0x02 /* Changes produce variable paths. */ -#define RESP_FIXED 0x03 /* Changes produce fixed patterns. */ - -#define RESP_LEN 0x04 /* Potential length field */ -#define RESP_CKSUM 0x05 /* Potential checksum */ -#define RESP_SUSPECT 0x06 /* Potential "suspect" blob */ +#define RESP_NONE 0x00 /* Changing byte is a no-op. */ +#define RESP_MINOR 0x01 /* Some changes have no effect. */ +#define RESP_VARIABLE 0x02 /* Changes produce variable paths. */ +#define RESP_FIXED 0x03 /* Changes produce fixed patterns. */ +#define RESP_LEN 0x04 /* Potential length field */ +#define RESP_CKSUM 0x05 /* Potential checksum */ +#define RESP_SUSPECT 0x06 /* Potential "suspect" blob */ -/* Classify tuple counts. This is a slow & naive version, but good enough here. */ +/* Classify tuple counts. This is a slow & naive version, but good enough here. + */ static u8 count_class_lookup[256] = { - [0] = 0, - [1] = 1, - [2] = 2, - [3] = 4, - [4 ... 7] = 8, - [8 ... 15] = 16, - [16 ... 31] = 32, - [32 ... 127] = 64, - [128 ... 255] = 128 + [0] = 0, + [1] = 1, + [2] = 2, + [3] = 4, + [4 ... 7] = 8, + [8 ... 15] = 16, + [16 ... 31] = 32, + [32 ... 127] = 64, + [128 ... 255] = 128 }; @@ -115,61 +113,62 @@ static void classify_counts(u8* mem) { if (edges_only) { while (i--) { + if (*mem) *mem = 1; mem++; + } } else { while (i--) { + *mem = count_class_lookup[*mem]; mem++; + } } } - /* See if any bytes are set in the bitmap. */ static inline u8 anything_set(void) { u32* ptr = (u32*)trace_bits; - u32 i = (MAP_SIZE >> 2); + u32 i = (MAP_SIZE >> 2); - while (i--) if (*(ptr++)) return 1; + while (i--) + if (*(ptr++)) return 1; return 0; } - /* Get rid of temp files (atexit handler). */ static void at_exit_handler(void) { - unlink(prog_in); /* Ignore errors */ + unlink(prog_in); /* Ignore errors */ } - /* Read initial file. */ static void read_initial_file(void) { struct stat st; - s32 fd = open(in_file, O_RDONLY); + s32 fd = open(in_file, O_RDONLY); if (fd < 0) PFATAL("Unable to open '%s'", in_file); - if (fstat(fd, &st) || !st.st_size) - FATAL("Zero-sized input file."); + if (fstat(fd, &st) || !st.st_size) FATAL("Zero-sized input file."); if (st.st_size >= TMIN_MAX_FILE) FATAL("Input file is too large (%u MB max)", TMIN_MAX_FILE / 1024 / 1024); - in_len = st.st_size; + in_len = st.st_size; in_data = ck_alloc_nozero(in_len); ck_read(fd, in_data, in_len, in_file); @@ -180,14 +179,13 @@ static void read_initial_file(void) { } - /* Write output file. */ static s32 write_to_file(u8* path, u8* mem, u32 len) { s32 ret; - unlink(path); /* Ignore errors */ + unlink(path); /* Ignore errors */ ret = open(path, O_RDWR | O_CREAT | O_EXCL, 0600); @@ -201,7 +199,6 @@ static s32 write_to_file(u8* path, u8* mem, u32 len) { } - /* Handle timeout signal. */ static void handle_timeout(int sig) { @@ -211,14 +208,13 @@ static void handle_timeout(int sig) { } - /* Execute target application. Returns exec checksum, or 0 if program times out. */ static u32 run_target(char** argv, u8* mem, u32 len, u8 first_run) { static struct itimerval it; - int status = 0; + int status = 0; s32 prog_in_fd; u32 cksum; @@ -237,8 +233,7 @@ static u32 run_target(char** argv, u8* mem, u32 len, u8 first_run) { struct rlimit r; if (dup2(use_stdin ? prog_in_fd : dev_null_fd, 0) < 0 || - dup2(dev_null_fd, 1) < 0 || - dup2(dev_null_fd, 2) < 0) { + dup2(dev_null_fd, 1) < 0 || dup2(dev_null_fd, 2) < 0) { *(u32*)trace_bits = EXEC_FAIL_SIG; PFATAL("dup2() failed"); @@ -254,18 +249,18 @@ static u32 run_target(char** argv, u8* mem, u32 len, u8 first_run) { #ifdef RLIMIT_AS - setrlimit(RLIMIT_AS, &r); /* Ignore errors */ + setrlimit(RLIMIT_AS, &r); /* Ignore errors */ #else - setrlimit(RLIMIT_DATA, &r); /* Ignore errors */ + setrlimit(RLIMIT_DATA, &r); /* Ignore errors */ #endif /* ^RLIMIT_AS */ } r.rlim_max = r.rlim_cur = 0; - setrlimit(RLIMIT_CORE, &r); /* Ignore errors */ + setrlimit(RLIMIT_CORE, &r); /* Ignore errors */ execv(target_path, argv); @@ -303,8 +298,10 @@ static u32 run_target(char** argv, u8* mem, u32 len, u8 first_run) { total_execs++; if (stop_soon) { + SAYF(cRST cLRD "\n+++ Analysis aborted by user +++\n" cRST); exit(1); + } /* Always discard inputs that time out. */ @@ -335,7 +332,6 @@ static u32 run_target(char** argv, u8* mem, u32 len, u8 first_run) { } - #ifdef USE_COLOR /* Helper function to display a human-readable character. */ @@ -353,24 +349,25 @@ static void show_char(u8 val) { } - /* Show the legend */ static void show_legend(void) { - SAYF(" " cLGR bgGRA " 01 " cRST " - no-op block " - cBLK bgLGN " 01 " cRST " - suspected length field\n" - " " cBRI bgGRA " 01 " cRST " - superficial content " - cBLK bgYEL " 01 " cRST " - suspected cksum or magic int\n" - " " cBLK bgCYA " 01 " cRST " - critical stream " - cBLK bgLRD " 01 " cRST " - suspected checksummed block\n" + SAYF(" " cLGR bgGRA " 01 " cRST " - no-op block " cBLK bgLGN + " 01 " cRST + " - suspected length field\n" + " " cBRI bgGRA " 01 " cRST " - superficial content " cBLK bgYEL + " 01 " cRST + " - suspected cksum or magic int\n" + " " cBLK bgCYA " 01 " cRST " - critical stream " cBLK bgLRD + " 01 " cRST + " - suspected checksummed block\n" " " cBLK bgMGN " 01 " cRST " - \"magic value\" section\n\n"); } #endif /* USE_COLOR */ - /* Interpret and report a pattern in the input file. */ static void dump_hex(u8* buf, u32 len, u8* b_data) { @@ -385,7 +382,7 @@ static void dump_hex(u8* buf, u32 len, u8* b_data) { u32 rlen = 1; #endif /* ^USE_COLOR */ - u8 rtype = b_data[i] & 0x0f; + u8 rtype = b_data[i] & 0x0f; /* Look ahead to determine the length of run. */ @@ -404,51 +401,61 @@ static void dump_hex(u8* buf, u32 len, u8* b_data) { case 2: { - u16 val = *(u16*)(in_data + i); + u16 val = *(u16*)(in_data + i); + + /* Small integers may be length fields. */ - /* Small integers may be length fields. */ + if (val && (val <= in_len || SWAP16(val) <= in_len)) { - if (val && (val <= in_len || SWAP16(val) <= in_len)) { - rtype = RESP_LEN; - break; - } + rtype = RESP_LEN; + break; + + } - /* Uniform integers may be checksums. */ + /* Uniform integers may be checksums. */ - if (val && abs(in_data[i] - in_data[i + 1]) > 32) { - rtype = RESP_CKSUM; - break; - } + if (val && abs(in_data[i] - in_data[i + 1]) > 32) { + rtype = RESP_CKSUM; break; } + break; + + } + case 4: { - u32 val = *(u32*)(in_data + i); + u32 val = *(u32*)(in_data + i); - /* Small integers may be length fields. */ + /* Small integers may be length fields. */ - if (val && (val <= in_len || SWAP32(val) <= in_len)) { - rtype = RESP_LEN; - break; - } + if (val && (val <= in_len || SWAP32(val) <= in_len)) { - /* Uniform integers may be checksums. */ + rtype = RESP_LEN; + break; - if (val && (in_data[i] >> 7 != in_data[i + 1] >> 7 || - in_data[i] >> 7 != in_data[i + 2] >> 7 || - in_data[i] >> 7 != in_data[i + 3] >> 7)) { - rtype = RESP_CKSUM; - break; - } + } + /* Uniform integers may be checksums. */ + + if (val && (in_data[i] >> 7 != in_data[i + 1] >> 7 || + in_data[i] >> 7 != in_data[i + 2] >> 7 || + in_data[i] >> 7 != in_data[i + 3] >> 7)) { + + rtype = RESP_CKSUM; break; } - case 1: case 3: case 5 ... MAX_AUTO_EXTRA - 1: break; + break; + + } + + case 1: + case 3: + case 5 ... MAX_AUTO_EXTRA - 1: break; default: rtype = RESP_SUSPECT; @@ -477,19 +484,22 @@ static void dump_hex(u8* buf, u32 len, u8* b_data) { switch (rtype) { - case RESP_NONE: SAYF(cLGR bgGRA); break; - case RESP_MINOR: SAYF(cBRI bgGRA); break; + case RESP_NONE: SAYF(cLGR bgGRA); break; + case RESP_MINOR: SAYF(cBRI bgGRA); break; case RESP_VARIABLE: SAYF(cBLK bgCYA); break; - case RESP_FIXED: SAYF(cBLK bgMGN); break; - case RESP_LEN: SAYF(cBLK bgLGN); break; - case RESP_CKSUM: SAYF(cBLK bgYEL); break; - case RESP_SUSPECT: SAYF(cBLK bgLRD); break; + case RESP_FIXED: SAYF(cBLK bgMGN); break; + case RESP_LEN: SAYF(cBLK bgLGN); break; + case RESP_CKSUM: SAYF(cBLK bgYEL); break; + case RESP_SUSPECT: SAYF(cBLK bgLRD); break; } show_char(in_data[i + off]); - if (off != rlen - 1 && (i + off + 1) % 16) SAYF(" "); else SAYF(cRST " "); + if (off != rlen - 1 && (i + off + 1) % 16) + SAYF(" "); + else + SAYF(cRST " "); } @@ -502,13 +512,13 @@ static void dump_hex(u8* buf, u32 len, u8* b_data) { switch (rtype) { - case RESP_NONE: SAYF("no-op block\n"); break; - case RESP_MINOR: SAYF("superficial content\n"); break; + case RESP_NONE: SAYF("no-op block\n"); break; + case RESP_MINOR: SAYF("superficial content\n"); break; case RESP_VARIABLE: SAYF("critical stream\n"); break; - case RESP_FIXED: SAYF("\"magic value\" section\n"); break; - case RESP_LEN: SAYF("suspected length field\n"); break; - case RESP_CKSUM: SAYF("suspected cksum or magic int\n"); break; - case RESP_SUSPECT: SAYF("suspected checksummed block\n"); break; + case RESP_FIXED: SAYF("\"magic value\" section\n"); break; + case RESP_LEN: SAYF("suspected length field\n"); break; + case RESP_CKSUM: SAYF("suspected cksum or magic int\n"); break; + case RESP_SUSPECT: SAYF("suspected checksummed block\n"); break; } @@ -524,8 +534,6 @@ static void dump_hex(u8* buf, u32 len, u8* b_data) { } - - /* Actually analyze! */ static void analyze(char** argv) { @@ -536,7 +544,7 @@ static void analyze(char** argv) { u8* b_data = ck_alloc(in_len + 1); u8 seq_byte = 0; - b_data[in_len] = 0xff; /* Intentional terminator. */ + b_data[in_len] = 0xff; /* Intentional terminator. */ ACTF("Analyzing input file (this may take a while)...\n"); @@ -587,12 +595,15 @@ static void analyze(char** argv) { b_data[i] = RESP_FIXED; - } else b_data[i] = RESP_VARIABLE; + } else + + b_data[i] = RESP_VARIABLE; /* When all checksums change, flip most significant bit of b_data. */ - if (prev_xff != xor_ff && prev_x01 != xor_01 && - prev_s10 != sub_10 && prev_a10 != add_10) seq_byte ^= 0x80; + if (prev_xff != xor_ff && prev_x01 != xor_01 && prev_s10 != sub_10 && + prev_a10 != add_10) + seq_byte ^= 0x80; b_data[i] |= seq_byte; @@ -601,7 +612,7 @@ static void analyze(char** argv) { prev_s10 = sub_10; prev_a10 = add_10; - } + } dump_hex(in_data, in_len, b_data); @@ -618,8 +629,6 @@ static void analyze(char** argv) { } - - /* Handle Ctrl-C and the like. */ static void handle_stop_sig(int sig) { @@ -630,7 +639,6 @@ static void handle_stop_sig(int sig) { } - /* Do basic preparations - persistent fds, filenames, etc. */ static void set_up_environment(void) { @@ -674,18 +682,20 @@ static void set_up_environment(void) { if (x) { if (!strstr(x, "exit_code=" STRINGIFY(MSAN_ERROR))) - FATAL("Custom MSAN_OPTIONS set without exit_code=" - STRINGIFY(MSAN_ERROR) " - please fix!"); + FATAL("Custom MSAN_OPTIONS set without exit_code=" STRINGIFY( + MSAN_ERROR) " - please fix!"); if (!strstr(x, "symbolize=0")) FATAL("Custom MSAN_OPTIONS set without symbolize=0 - please fix!"); } - setenv("ASAN_OPTIONS", "abort_on_error=1:" - "detect_leaks=0:" - "symbolize=0:" - "allocator_may_return_null=1", 0); + setenv("ASAN_OPTIONS", + "abort_on_error=1:" + "detect_leaks=0:" + "symbolize=0:" + "allocator_may_return_null=1", + 0); setenv("MSAN_OPTIONS", "exit_code=" STRINGIFY(MSAN_ERROR) ":" "symbolize=0:" @@ -694,21 +704,22 @@ static void set_up_environment(void) { "msan_track_origins=0", 0); if (getenv("AFL_PRELOAD")) { + setenv("LD_PRELOAD", getenv("AFL_PRELOAD"), 1); setenv("DYLD_INSERT_LIBRARIES", getenv("AFL_PRELOAD"), 1); + } } - /* Setup signal handlers, duh. */ static void setup_signal_handlers(void) { struct sigaction sa; - sa.sa_handler = NULL; - sa.sa_flags = SA_RESTART; + sa.sa_handler = NULL; + sa.sa_flags = SA_RESTART; sa.sa_sigaction = NULL; sigemptyset(&sa.sa_mask); @@ -727,43 +738,42 @@ static void setup_signal_handlers(void) { } - /* Display usage hints. */ static void usage(u8* argv0) { - SAYF("\n%s [ options ] -- /path/to/target_app [ ... ]\n\n" + SAYF( + "\n%s [ options ] -- /path/to/target_app [ ... ]\n\n" - "Required parameters:\n\n" + "Required parameters:\n\n" - " -i file - input test case to be analyzed by the tool\n" + " -i file - input test case to be analyzed by the tool\n" - "Execution control settings:\n\n" + "Execution control settings:\n\n" - " -f file - input file read by the tested program (stdin)\n" - " -t msec - timeout for each run (%d ms)\n" - " -m megs - memory limit for child process (%d MB)\n" - " -Q - use binary-only instrumentation (QEMU mode)\n" - " -U - use unicorn-based instrumentation (Unicorn mode)\n\n" + " -f file - input file read by the tested program (stdin)\n" + " -t msec - timeout for each run (%d ms)\n" + " -m megs - memory limit for child process (%d MB)\n" + " -Q - use binary-only instrumentation (QEMU mode)\n" + " -U - use unicorn-based instrumentation (Unicorn mode)\n\n" - "Analysis settings:\n\n" + "Analysis settings:\n\n" - " -e - look for edge coverage only, ignore hit counts\n\n" + " -e - look for edge coverage only, ignore hit counts\n\n" - "For additional tips, please consult %s/README.\n\n", + "For additional tips, please consult %s/README.\n\n", - argv0, EXEC_TIMEOUT, MEM_LIMIT, doc_path); + argv0, EXEC_TIMEOUT, MEM_LIMIT, doc_path); exit(1); } - /* Find binary. */ static void find_binary(u8* fname) { - u8* env_path = 0; + u8* env_path = 0; struct stat st; if (strchr(fname, '/') || !(env_path = getenv("PATH"))) { @@ -786,7 +796,9 @@ static void find_binary(u8* fname) { memcpy(cur_elem, env_path, delim - env_path); delim++; - } else cur_elem = ck_strdup(env_path); + } else + + cur_elem = ck_strdup(env_path); env_path = delim; @@ -798,7 +810,8 @@ static void find_binary(u8* fname) { ck_free(cur_elem); if (!stat(target_path, &st) && S_ISREG(st.st_mode) && - (st.st_mode & 0111) && st.st_size >= 4) break; + (st.st_mode & 0111) && st.st_size >= 4) + break; ck_free(target_path); target_path = 0; @@ -811,13 +824,12 @@ static void find_binary(u8* fname) { } - /* Fix up argv for QEMU. */ static char** get_qemu_argv(u8* own_loc, char** argv, int argc) { char** new_argv = ck_alloc(sizeof(char*) * (argc + 4)); - u8 *tmp, *cp, *rsl, *own_copy; + u8 * tmp, *cp, *rsl, *own_copy; memcpy(new_argv + 3, argv + 1, sizeof(char*) * argc); @@ -832,8 +844,7 @@ static char** get_qemu_argv(u8* own_loc, char** argv, int argc) { cp = alloc_printf("%s/afl-qemu-trace", tmp); - if (access(cp, X_OK)) - FATAL("Unable to find '%s'", tmp); + if (access(cp, X_OK)) FATAL("Unable to find '%s'", tmp); target_path = new_argv[0] = cp; return new_argv; @@ -857,7 +868,9 @@ static char** get_qemu_argv(u8* own_loc, char** argv, int argc) { } - } else ck_free(own_copy); + } else + + ck_free(own_copy); if (!access(BIN_PATH "/afl-qemu-trace", X_OK)) { @@ -882,7 +895,7 @@ int main(int argc, char** argv) { SAYF(cCYA "afl-analyze" VERSION cRST " by <lcamtuf@google.com>\n"); - while ((opt = getopt(argc,argv,"+i:f:m:t:eQU")) > 0) + while ((opt = getopt(argc, argv, "+i:f:m:t:eQU")) > 0) switch (opt) { @@ -896,7 +909,7 @@ int main(int argc, char** argv) { if (prog_in) FATAL("Multiple -f options not supported"); use_stdin = 0; - prog_in = optarg; + prog_in = optarg; break; case 'e': @@ -907,40 +920,41 @@ int main(int argc, char** argv) { case 'm': { - u8 suffix = 'M'; + u8 suffix = 'M'; - if (mem_limit_given) FATAL("Multiple -m options not supported"); - mem_limit_given = 1; + if (mem_limit_given) FATAL("Multiple -m options not supported"); + mem_limit_given = 1; - if (!strcmp(optarg, "none")) { + if (!strcmp(optarg, "none")) { - mem_limit = 0; - break; + mem_limit = 0; + break; - } + } - if (sscanf(optarg, "%llu%c", &mem_limit, &suffix) < 1 || - optarg[0] == '-') FATAL("Bad syntax used for -m"); + if (sscanf(optarg, "%llu%c", &mem_limit, &suffix) < 1 || + optarg[0] == '-') + FATAL("Bad syntax used for -m"); - switch (suffix) { + switch (suffix) { - case 'T': mem_limit *= 1024 * 1024; break; - case 'G': mem_limit *= 1024; break; - case 'k': mem_limit /= 1024; break; - case 'M': break; + case 'T': mem_limit *= 1024 * 1024; break; + case 'G': mem_limit *= 1024; break; + case 'k': mem_limit /= 1024; break; + case 'M': break; - default: FATAL("Unsupported suffix or bad syntax for -m"); + default: FATAL("Unsupported suffix or bad syntax for -m"); - } + } - if (mem_limit < 5) FATAL("Dangerously low value of -m"); + if (mem_limit < 5) FATAL("Dangerously low value of -m"); - if (sizeof(rlim_t) == 4 && mem_limit > 2000) - FATAL("Value of -m out of range on 32-bit systems"); + if (sizeof(rlim_t) == 4 && mem_limit > 2000) + FATAL("Value of -m out of range on 32-bit systems"); - } + } - break; + break; case 't': @@ -970,9 +984,7 @@ int main(int argc, char** argv) { unicorn_mode = 1; break; - default: - - usage(argv[0]); + default: usage(argv[0]); } diff --git a/src/afl-as.c b/src/afl-as.c index 94595f24..57f4c4a3 100644 --- a/src/afl-as.c +++ b/src/afl-as.c @@ -48,39 +48,38 @@ #include <sys/wait.h> #include <sys/time.h> -static u8** as_params; /* Parameters passed to the real 'as' */ +static u8** as_params; /* Parameters passed to the real 'as' */ -static u8* input_file; /* Originally specified input file */ -static u8* modified_file; /* Instrumented file for the real 'as' */ +static u8* input_file; /* Originally specified input file */ +static u8* modified_file; /* Instrumented file for the real 'as' */ -static u8 be_quiet, /* Quiet mode (no stderr output) */ - clang_mode, /* Running in clang mode? */ - pass_thru, /* Just pass data through? */ - just_version, /* Just show version? */ - sanitizer; /* Using ASAN / MSAN */ +static u8 be_quiet, /* Quiet mode (no stderr output) */ + clang_mode, /* Running in clang mode? */ + pass_thru, /* Just pass data through? */ + just_version, /* Just show version? */ + sanitizer; /* Using ASAN / MSAN */ -static u32 inst_ratio = 100, /* Instrumentation probability (%) */ - as_par_cnt = 1; /* Number of params to 'as' */ +static u32 inst_ratio = 100, /* Instrumentation probability (%) */ + as_par_cnt = 1; /* Number of params to 'as' */ -/* If we don't find --32 or --64 in the command line, default to +/* If we don't find --32 or --64 in the command line, default to instrumentation for whichever mode we were compiled with. This is not perfect, but should do the trick for almost all use cases. */ #ifdef __x86_64__ -static u8 use_64bit = 1; +static u8 use_64bit = 1; #else -static u8 use_64bit = 0; +static u8 use_64bit = 0; -#ifdef __APPLE__ -# error "Sorry, 32-bit Apple platforms are not supported." -#endif /* __APPLE__ */ +# ifdef __APPLE__ +# error "Sorry, 32-bit Apple platforms are not supported." +# endif /* __APPLE__ */ #endif /* ^__x86_64__ */ - /* Examine and modify parameters to pass to 'as'. Note that the file name is always the last parameter passed by GCC, so we exploit this property to keep the code simple. */ @@ -134,8 +133,10 @@ static void edit_params(int argc, char** argv) { for (i = 1; i < argc - 1; i++) { - if (!strcmp(argv[i], "--64")) use_64bit = 1; - else if (!strcmp(argv[i], "--32")) use_64bit = 0; + if (!strcmp(argv[i], "--64")) + use_64bit = 1; + else if (!strcmp(argv[i], "--32")) + use_64bit = 0; #ifdef __APPLE__ @@ -143,7 +144,8 @@ static void edit_params(int argc, char** argv) { if (!strcmp(argv[i], "-arch") && i + 1 < argc) { - if (!strcmp(argv[i + 1], "x86_64")) use_64bit = 1; + if (!strcmp(argv[i + 1], "x86_64")) + use_64bit = 1; else if (!strcmp(argv[i + 1], "i386")) FATAL("Sorry, 32-bit Apple platforms are not supported."); @@ -181,13 +183,17 @@ static void edit_params(int argc, char** argv) { if (input_file[0] == '-') { if (!strcmp(input_file + 1, "-version")) { + just_version = 1; modified_file = input_file; goto wrap_things_up; + } - if (input_file[1]) FATAL("Incorrect use (not called through afl-gcc?)"); - else input_file = NULL; + if (input_file[1]) + FATAL("Incorrect use (not called through afl-gcc?)"); + else + input_file = NULL; } else { @@ -197,22 +203,21 @@ static void edit_params(int argc, char** argv) { NSS. */ if (strncmp(input_file, tmp_dir, strlen(tmp_dir)) && - strncmp(input_file, "/var/tmp/", 9) && - strncmp(input_file, "/tmp/", 5)) pass_thru = 1; + strncmp(input_file, "/var/tmp/", 9) && strncmp(input_file, "/tmp/", 5)) + pass_thru = 1; } - modified_file = alloc_printf("%s/.afl-%u-%u.s", tmp_dir, getpid(), - (u32)time(NULL)); + modified_file = + alloc_printf("%s/.afl-%u-%u.s", tmp_dir, getpid(), (u32)time(NULL)); wrap_things_up: as_params[as_par_cnt++] = modified_file; - as_params[as_par_cnt] = NULL; + as_params[as_par_cnt] = NULL; } - /* Process input file, generate modified_file. Insert instrumentation in all the appropriate places. */ @@ -222,11 +227,11 @@ static void add_instrumentation(void) { FILE* inf; FILE* outf; - s32 outfd; - u32 ins_lines = 0; + s32 outfd; + u32 ins_lines = 0; - u8 instr_ok = 0, skip_csect = 0, skip_next_label = 0, - skip_intel = 0, skip_app = 0, instrument_next = 0; + u8 instr_ok = 0, skip_csect = 0, skip_next_label = 0, skip_intel = 0, + skip_app = 0, instrument_next = 0; #ifdef __APPLE__ @@ -239,7 +244,9 @@ static void add_instrumentation(void) { inf = fopen(input_file, "r"); if (!inf) PFATAL("Unable to read '%s'", input_file); - } else inf = stdin; + } else + + inf = stdin; outfd = open(modified_file, O_WRONLY | O_EXCL | O_CREAT, 0600); @@ -247,7 +254,7 @@ static void add_instrumentation(void) { outf = fdopen(outfd, "w"); - if (!outf) PFATAL("fdopen() failed"); + if (!outf) PFATAL("fdopen() failed"); while (fgets(line, MAX_LINE, inf)) { @@ -284,22 +291,26 @@ static void add_instrumentation(void) { around them, so we use that as a signal. */ if (!clang_mode && instr_ok && !strncmp(line + 2, "p2align ", 8) && - isdigit(line[10]) && line[11] == '\n') skip_next_label = 1; + isdigit(line[10]) && line[11] == '\n') + skip_next_label = 1; if (!strncmp(line + 2, "text\n", 5) || !strncmp(line + 2, "section\t.text", 13) || !strncmp(line + 2, "section\t__TEXT,__text", 21) || !strncmp(line + 2, "section __TEXT,__text", 21)) { + instr_ok = 1; - continue; + continue; + } if (!strncmp(line + 2, "section\t", 8) || - !strncmp(line + 2, "section ", 8) || - !strncmp(line + 2, "bss\n", 4) || + !strncmp(line + 2, "section ", 8) || !strncmp(line + 2, "bss\n", 4) || !strncmp(line + 2, "data\n", 5)) { + instr_ok = 0; continue; + } } @@ -354,8 +365,9 @@ static void add_instrumentation(void) { */ - if (skip_intel || skip_app || skip_csect || !instr_ok || - line[0] == '#' || line[0] == ' ') continue; + if (skip_intel || skip_app || skip_csect || !instr_ok || line[0] == '#' || + line[0] == ' ') + continue; /* Conditional branch instruction (jnz, etc). We append the instrumentation right after the branch (to instrument the not-taken path) and at the @@ -404,15 +416,16 @@ static void add_instrumentation(void) { /* Apple: L<num> / LBB<num> */ - if ((isdigit(line[1]) || (clang_mode && !strncmp(line, "LBB", 3))) - && R(100) < inst_ratio) { + if ((isdigit(line[1]) || (clang_mode && !strncmp(line, "LBB", 3))) && + R(100) < inst_ratio) { #else /* Apple: .L<num> / .LBB<num> */ - if ((isdigit(line[2]) || (clang_mode && !strncmp(line + 1, "LBB", 3))) - && R(100) < inst_ratio) { + if ((isdigit(line[2]) || + (clang_mode && !strncmp(line + 1, "LBB", 3))) && + R(100) < inst_ratio) { #endif /* __APPLE__ */ @@ -427,7 +440,10 @@ static void add_instrumentation(void) { .Lfunc_begin0-style exception handling calculations (a problem on MacOS X). */ - if (!skip_next_label) instrument_next = 1; else skip_next_label = 0; + if (!skip_next_label) + instrument_next = 1; + else + skip_next_label = 0; } @@ -436,34 +452,34 @@ static void add_instrumentation(void) { /* Function label (always instrumented, deferred mode). */ instrument_next = 1; - + } } } - if (ins_lines) - fputs(use_64bit ? main_payload_64 : main_payload_32, outf); + if (ins_lines) fputs(use_64bit ? main_payload_64 : main_payload_32, outf); if (input_file) fclose(inf); fclose(outf); if (!be_quiet) { - if (!ins_lines) WARNF("No instrumentation targets found%s.", - pass_thru ? " (pass-thru mode)" : ""); - else OKF("Instrumented %u locations (%s-bit, %s mode, ratio %u%%).", - ins_lines, use_64bit ? "64" : "32", - getenv("AFL_HARDEN") ? "hardened" : - (sanitizer ? "ASAN/MSAN" : "non-hardened"), - inst_ratio); - + if (!ins_lines) + WARNF("No instrumentation targets found%s.", + pass_thru ? " (pass-thru mode)" : ""); + else + OKF("Instrumented %u locations (%s-bit, %s mode, ratio %u%%).", ins_lines, + use_64bit ? "64" : "32", + getenv("AFL_HARDEN") ? "hardened" + : (sanitizer ? "ASAN/MSAN" : "non-hardened"), + inst_ratio); + } } - /* Main entry point */ int main(int argc, char** argv) { @@ -473,7 +489,7 @@ int main(int argc, char** argv) { int status; u8* inst_ratio_str = getenv("AFL_INST_RATIO"); - struct timeval tv; + struct timeval tv; struct timezone tz; clang_mode = !!getenv(CLANG_ENV_VAR); @@ -481,19 +497,26 @@ int main(int argc, char** argv) { if (isatty(2) && !getenv("AFL_QUIET")) { SAYF(cCYA "afl-as" VERSION cRST " by <lcamtuf@google.com>\n"); - - } else be_quiet = 1; + + } else + + be_quiet = 1; if (argc < 2) { - SAYF("\n" - "This is a helper application for afl-fuzz. It is a wrapper around GNU 'as',\n" - "executed by the toolchain whenever using afl-gcc or afl-clang. You probably\n" - "don't want to run this program directly.\n\n" + SAYF( + "\n" + "This is a helper application for afl-fuzz. It is a wrapper around GNU " + "'as',\n" + "executed by the toolchain whenever using afl-gcc or afl-clang. You " + "probably\n" + "don't want to run this program directly.\n\n" - "Rarely, when dealing with extremely complex projects, it may be advisable to\n" - "set AFL_INST_RATIO to a value less than 100 in order to reduce the odds of\n" - "instrumenting every discovered branch.\n\n"); + "Rarely, when dealing with extremely complex projects, it may be " + "advisable to\n" + "set AFL_INST_RATIO to a value less than 100 in order to reduce the " + "odds of\n" + "instrumenting every discovered branch.\n\n"); exit(1); @@ -509,7 +532,7 @@ int main(int argc, char** argv) { if (inst_ratio_str) { - if (sscanf(inst_ratio_str, "%u", &inst_ratio) != 1 || inst_ratio > 100) + if (sscanf(inst_ratio_str, "%u", &inst_ratio) != 1 || inst_ratio > 100) FATAL("Bad value of AFL_INST_RATIO (must be between 0 and 100)"); } @@ -524,9 +547,10 @@ int main(int argc, char** argv) { that... */ if (getenv("AFL_USE_ASAN") || getenv("AFL_USE_MSAN")) { + sanitizer = 1; - if (!getenv("AFL_INST_RATIO")) - inst_ratio /= 3; + if (!getenv("AFL_INST_RATIO")) inst_ratio /= 3; + } if (!just_version) add_instrumentation(); diff --git a/src/afl-common.c b/src/afl-common.c index f3bbdfb4..9f1f45eb 100644 --- a/src/afl-common.c +++ b/src/afl-common.c @@ -13,25 +13,29 @@ /* Detect @@ in args. */ #ifndef __glibc__ -#include <unistd.h> +# include <unistd.h> #endif - void detect_file_args(char** argv, u8* prog_in) { u32 i = 0; #ifdef __GLIBC__ - u8* cwd = getcwd(NULL, 0); /* non portable glibc extension */ + u8* cwd = getcwd(NULL, 0); /* non portable glibc extension */ #else - u8* cwd; - char *buf; - long size = pathconf(".", _PC_PATH_MAX); - if ((buf = (char *)malloc((size_t)size)) != NULL) { - cwd = getcwd(buf, (size_t)size); /* portable version */ + u8* cwd; + char* buf; + long size = pathconf(".", _PC_PATH_MAX); + if ((buf = (char*)malloc((size_t)size)) != NULL) { + + cwd = getcwd(buf, (size_t)size); /* portable version */ + } else { + PFATAL("getcwd() failed"); - cwd = 0; /* for dumb compilers */ + cwd = 0; /* for dumb compilers */ + } + #endif if (!cwd) PFATAL("getcwd() failed"); @@ -48,8 +52,10 @@ void detect_file_args(char** argv, u8* prog_in) { /* Be sure that we're always using fully-qualified paths. */ - if (prog_in[0] == '/') aa_subst = prog_in; - else aa_subst = alloc_printf("%s/%s", cwd, prog_in); + if (prog_in[0] == '/') + aa_subst = prog_in; + else + aa_subst = alloc_printf("%s/%s", cwd, prog_in); /* Construct a replacement argv value. */ @@ -66,7 +72,7 @@ void detect_file_args(char** argv, u8* prog_in) { } - free(cwd); /* not tracked */ + free(cwd); /* not tracked */ } diff --git a/src/afl-forkserver.c b/src/afl-forkserver.c index 0051f6b0..152ae802 100644 --- a/src/afl-forkserver.c +++ b/src/afl-forkserver.c @@ -15,34 +15,39 @@ #include <sys/resource.h> /* a program that includes afl-forkserver needs to define these */ -extern u8 uses_asan; +extern u8 uses_asan; extern u8 *trace_bits; extern s32 forksrv_pid, child_pid, fsrv_ctl_fd, fsrv_st_fd; -extern s32 out_fd, out_dir_fd, dev_urandom_fd, dev_null_fd; /* initialize these with -1 */ -extern u32 exec_tmout; -extern u64 mem_limit; -extern u8 *out_file, *target_path, *doc_path; +extern s32 out_fd, out_dir_fd, dev_urandom_fd, + dev_null_fd; /* initialize these with -1 */ +extern u32 exec_tmout; +extern u64 mem_limit; +extern u8 * out_file, *target_path, *doc_path; extern FILE *plot_file; -/* we need this internally but can be defined and read extern in the main source */ +/* we need this internally but can be defined and read extern in the main source + */ u8 child_timed_out; - /* Describe integer as memory size. */ -u8* forkserver_DMS(u64 val) { +u8 *forkserver_DMS(u64 val) { static u8 tmp[12][16]; static u8 cur; -#define CHK_FORMAT(_divisor, _limit_mult, _fmt, _cast) do { \ - if (val < (_divisor) * (_limit_mult)) { \ +#define CHK_FORMAT(_divisor, _limit_mult, _fmt, _cast) \ + do { \ + \ + if (val < (_divisor) * (_limit_mult)) { \ + \ sprintf(tmp[cur], _fmt, ((_cast)val) / (_divisor)); \ - return tmp[cur]; \ - } \ + return tmp[cur]; \ + \ + } \ + \ } while (0) - cur = (cur + 1) % 12; /* 0-9999 */ @@ -86,20 +91,23 @@ u8* forkserver_DMS(u64 val) { } - - /* the timeout handler */ void handle_timeout(int sig) { + if (child_pid > 0) { - child_timed_out = 1; + + child_timed_out = 1; kill(child_pid, SIGKILL); + } else if (child_pid == -1 && forksrv_pid > 0) { - child_timed_out = 1; + + child_timed_out = 1; kill(forksrv_pid, SIGKILL); + } -} +} /* Spin up fork server (instrumented mode only). The idea is explained here: @@ -112,20 +120,18 @@ void handle_timeout(int sig) { void init_forkserver(char **argv) { static struct itimerval it; - int st_pipe[2], ctl_pipe[2]; - int status; - s32 rlen; + int st_pipe[2], ctl_pipe[2]; + int status; + s32 rlen; ACTF("Spinning up the fork server..."); - if (pipe(st_pipe) || pipe(ctl_pipe)) - PFATAL("pipe() failed"); + if (pipe(st_pipe) || pipe(ctl_pipe)) PFATAL("pipe() failed"); child_timed_out = 0; forksrv_pid = fork(); - if (forksrv_pid < 0) - PFATAL("fork() failed"); + if (forksrv_pid < 0) PFATAL("fork() failed"); if (!forksrv_pid) { @@ -137,29 +143,33 @@ void init_forkserver(char **argv) { soft 128. Let's try to fix that... */ if (!getrlimit(RLIMIT_NOFILE, &r) && r.rlim_cur < FORKSRV_FD + 2) { + r.rlim_cur = FORKSRV_FD + 2; - setrlimit(RLIMIT_NOFILE, &r); /* Ignore errors */ + setrlimit(RLIMIT_NOFILE, &r); /* Ignore errors */ + } if (mem_limit) { + r.rlim_max = r.rlim_cur = ((rlim_t)mem_limit) << 20; #ifdef RLIMIT_AS - setrlimit(RLIMIT_AS, &r); /* Ignore errors */ + setrlimit(RLIMIT_AS, &r); /* Ignore errors */ #else /* This takes care of OpenBSD, which doesn't have RLIMIT_AS, but according to reliable sources, RLIMIT_DATA covers anonymous maps - so we should be getting good protection against OOM bugs. */ - setrlimit(RLIMIT_DATA, &r); /* Ignore errors */ + setrlimit(RLIMIT_DATA, &r); /* Ignore errors */ #endif /* ^RLIMIT_AS */ + } /* Dumping cores is slow and can lead to anomalies if SIGKILL is delivered before the dump is complete. */ -// r.rlim_max = r.rlim_cur = 0; -// setrlimit(RLIMIT_CORE, &r); /* Ignore errors */ + // r.rlim_max = r.rlim_cur = 0; + // setrlimit(RLIMIT_CORE, &r); /* Ignore errors */ /* Isolate the process and configure standard descriptors. If out_file is specified, stdin is /dev/null; otherwise, out_fd is cloned instead. */ @@ -167,23 +177,27 @@ void init_forkserver(char **argv) { setsid(); if (!getenv("AFL_DEBUG_CHILD_OUTPUT")) { + dup2(dev_null_fd, 1); dup2(dev_null_fd, 2); + } if (out_file) { + dup2(dev_null_fd, 0); + } else { + dup2(out_fd, 0); close(out_fd); + } /* Set up control and status pipes, close the unneeded original fds. */ - if (dup2(ctl_pipe[0], FORKSRV_FD) < 0) - PFATAL("dup2() failed"); - if (dup2(st_pipe[1], FORKSRV_FD + 1) < 0) - PFATAL("dup2() failed"); + if (dup2(ctl_pipe[0], FORKSRV_FD) < 0) PFATAL("dup2() failed"); + if (dup2(st_pipe[1], FORKSRV_FD + 1) < 0) PFATAL("dup2() failed"); close(ctl_pipe[0]); close(ctl_pipe[1]); @@ -198,8 +212,7 @@ void init_forkserver(char **argv) { /* This should improve performance a bit, since it stops the linker from doing extra work post-fork(). */ - if (!getenv("LD_BIND_LAZY")) - setenv("LD_BIND_NOW", "1", 0); + if (!getenv("LD_BIND_LAZY")) setenv("LD_BIND_NOW", "1", 0); /* Set sane defaults for ASAN if nothing else specified. */ @@ -228,6 +241,7 @@ void init_forkserver(char **argv) { *(u32 *)trace_bits = EXEC_FAIL_SIG; exit(0); + } /* PARENT PROCESS */ @@ -243,8 +257,10 @@ void init_forkserver(char **argv) { /* Wait for the fork server to come up, but don't wait too long. */ if (exec_tmout) { + it.it_value.tv_sec = ((exec_tmout * FORK_WAIT_MULT) / 1000); it.it_value.tv_usec = ((exec_tmout * FORK_WAIT_MULT) % 1000) * 1000; + } setitimer(ITIMER_REAL, &it, NULL); @@ -260,22 +276,24 @@ void init_forkserver(char **argv) { Otherwise, try to figure out what went wrong. */ if (rlen == 4) { + OKF("All right - fork server is up."); return; + } if (child_timed_out) FATAL("Timeout while initializing fork server (adjusting -t may help)"); - if (waitpid(forksrv_pid, &status, 0) <= 0) - PFATAL("waitpid() failed"); + if (waitpid(forksrv_pid, &status, 0) <= 0) PFATAL("waitpid() failed"); if (WIFSIGNALED(status)) { if (mem_limit && mem_limit < 500 && uses_asan) { - SAYF("\n" cLRD "[-] " cRST "Whoops, the target binary crashed suddenly, " - "before receiving any input\n" + SAYF("\n" cLRD "[-] " cRST + "Whoops, the target binary crashed suddenly, " + "before receiving any input\n" " from the fuzzer! Since it seems to be built with ASAN and you " "have a\n" " restrictive memory limit configured, this is expected; please " @@ -285,8 +303,9 @@ void init_forkserver(char **argv) { } else if (!mem_limit) { - SAYF("\n" cLRD "[-] " cRST "Whoops, the target binary crashed suddenly, " - "before receiving any input\n" + SAYF("\n" cLRD "[-] " cRST + "Whoops, the target binary crashed suddenly, " + "before receiving any input\n" " from the fuzzer! There are several probable explanations:\n\n" " - The binary is just buggy and explodes entirely on its own. " @@ -303,8 +322,9 @@ void init_forkserver(char **argv) { } else { - SAYF("\n" cLRD "[-] " cRST "Whoops, the target binary crashed suddenly, " - "before receiving any input\n" + SAYF("\n" cLRD "[-] " cRST + "Whoops, the target binary crashed suddenly, " + "before receiving any input\n" " from the fuzzer! There are several probable explanations:\n\n" " - The current memory limit (%s) is too restrictive, causing " @@ -315,7 +335,8 @@ void init_forkserver(char **argv) { "way confirm\n" " this diagnosis would be:\n\n" - MSG_ULIMIT_USAGE " /path/to/fuzzed_app )\n\n" + MSG_ULIMIT_USAGE + " /path/to/fuzzed_app )\n\n" " Tip: you can use http://jwilk.net/software/recidivm to " "quickly\n" @@ -334,9 +355,11 @@ void init_forkserver(char **argv) { " fail, poke <afl-users@googlegroups.com> for troubleshooting " "tips.\n", forkserver_DMS(mem_limit << 20), mem_limit - 1); + } FATAL("Fork server crashed with signal %d", WTERMSIG(status)); + } if (*(u32 *)trace_bits == EXEC_FAIL_SIG) @@ -344,8 +367,9 @@ void init_forkserver(char **argv) { if (mem_limit && mem_limit < 500 && uses_asan) { - SAYF("\n" cLRD "[-] " cRST "Hmm, looks like the target binary terminated " - "before we could complete a\n" + SAYF("\n" cLRD "[-] " cRST + "Hmm, looks like the target binary terminated " + "before we could complete a\n" " handshake with the injected code. Since it seems to be built " "with ASAN and\n" " you have a restrictive memory limit configured, this is " @@ -355,8 +379,9 @@ void init_forkserver(char **argv) { } else if (!mem_limit) { - SAYF("\n" cLRD "[-] " cRST "Hmm, looks like the target binary terminated " - "before we could complete a\n" + SAYF("\n" cLRD "[-] " cRST + "Hmm, looks like the target binary terminated " + "before we could complete a\n" " handshake with the injected code. Perhaps there is a horrible " "bug in the\n" " fuzzer. Poke <afl-users@googlegroups.com> for troubleshooting " @@ -365,8 +390,9 @@ void init_forkserver(char **argv) { } else { SAYF( - "\n" cLRD "[-] " cRST "Hmm, looks like the target binary terminated " - "before we could complete a\n" + "\n" cLRD "[-] " cRST + "Hmm, looks like the target binary terminated " + "before we could complete a\n" " handshake with the injected code. There are %s probable " "explanations:\n\n" @@ -377,7 +403,8 @@ void init_forkserver(char **argv) { "option. A\n" " simple way to confirm the diagnosis may be:\n\n" - MSG_ULIMIT_USAGE " /path/to/fuzzed_app )\n\n" + MSG_ULIMIT_USAGE + " /path/to/fuzzed_app )\n\n" " Tip: you can use http://jwilk.net/software/recidivm to quickly\n" " estimate the required amount of virtual memory for the " @@ -394,8 +421,10 @@ void init_forkserver(char **argv) { " reached before the program terminates.\n\n" : "", forkserver_DMS(mem_limit << 20), mem_limit - 1); + } FATAL("Fork server handshake failed"); + } diff --git a/src/afl-fuzz-bitmap.c b/src/afl-fuzz-bitmap.c index 1a77dc13..be187fff 100644 --- a/src/afl-fuzz-bitmap.c +++ b/src/afl-fuzz-bitmap.c @@ -46,7 +46,6 @@ void write_bitmap(void) { } - /* Read bitmap from file. This is for the -B option again. */ void read_bitmap(u8* fname) { @@ -61,10 +60,9 @@ void read_bitmap(u8* fname) { } - /* Check if the current execution path brings anything new to the table. Update virgin bits to reflect the finds. Returns 1 if the only change is - the hit-count for a particular tuple; 2 if there are new tuples seen. + the hit-count for a particular tuple; 2 if there are new tuples seen. Updates the map, so subsequent calls will always return 0. This function is called after every exec() on a fairly large buffer, so @@ -75,20 +73,20 @@ u8 has_new_bits(u8* virgin_map) { #ifdef __x86_64__ u64* current = (u64*)trace_bits; - u64* virgin = (u64*)virgin_map; + u64* virgin = (u64*)virgin_map; - u32 i = (MAP_SIZE >> 3); + u32 i = (MAP_SIZE >> 3); #else u32* current = (u32*)trace_bits; - u32* virgin = (u32*)virgin_map; + u32* virgin = (u32*)virgin_map; - u32 i = (MAP_SIZE >> 2); + u32 i = (MAP_SIZE >> 2); #endif /* ^__x86_64__ */ - u8 ret = 0; + u8 ret = 0; while (i--) { @@ -111,14 +109,18 @@ u8 has_new_bits(u8* virgin_map) { if ((cur[0] && vir[0] == 0xff) || (cur[1] && vir[1] == 0xff) || (cur[2] && vir[2] == 0xff) || (cur[3] && vir[3] == 0xff) || (cur[4] && vir[4] == 0xff) || (cur[5] && vir[5] == 0xff) || - (cur[6] && vir[6] == 0xff) || (cur[7] && vir[7] == 0xff)) ret = 2; - else ret = 1; + (cur[6] && vir[6] == 0xff) || (cur[7] && vir[7] == 0xff)) + ret = 2; + else + ret = 1; #else if ((cur[0] && vir[0] == 0xff) || (cur[1] && vir[1] == 0xff) || - (cur[2] && vir[2] == 0xff) || (cur[3] && vir[3] == 0xff)) ret = 2; - else ret = 1; + (cur[2] && vir[2] == 0xff) || (cur[3] && vir[3] == 0xff)) + ret = 2; + else + ret = 1; #endif /* ^__x86_64__ */ @@ -139,14 +141,13 @@ u8 has_new_bits(u8* virgin_map) { } - /* Count the number of bits set in the provided bitmap. Used for the status screen several times every second, does not have to be fast. */ u32 count_bits(u8* mem) { u32* ptr = (u32*)mem; - u32 i = (MAP_SIZE >> 2); + u32 i = (MAP_SIZE >> 2); u32 ret = 0; while (i--) { @@ -157,8 +158,10 @@ u32 count_bits(u8* mem) { data. */ if (v == 0xffffffff) { + ret += 32; continue; + } v -= ((v >> 1) & 0x55555555); @@ -171,8 +174,7 @@ u32 count_bits(u8* mem) { } - -#define FF(_b) (0xff << ((_b) << 3)) +#define FF(_b) (0xff << ((_b) << 3)) /* Count the number of bytes set in the bitmap. Called fairly sporadically, mostly to update the status screen or calibrate and examine confirmed @@ -181,7 +183,7 @@ u32 count_bits(u8* mem) { u32 count_bytes(u8* mem) { u32* ptr = (u32*)mem; - u32 i = (MAP_SIZE >> 2); + u32 i = (MAP_SIZE >> 2); u32 ret = 0; while (i--) { @@ -200,14 +202,13 @@ u32 count_bytes(u8* mem) { } - /* Count the number of non-255 bytes set in the bitmap. Used strictly for the status screen, several calls per second or so. */ u32 count_non_255_bytes(u8* mem) { u32* ptr = (u32*)mem; - u32 i = (MAP_SIZE >> 2); + u32 i = (MAP_SIZE >> 2); u32 ret = 0; while (i--) { @@ -229,16 +230,14 @@ u32 count_non_255_bytes(u8* mem) { } - /* Destructively simplify trace by eliminating hit count information and replacing it with 0x80 or 0x01 depending on whether the tuple is hit or not. Called on every new crash or timeout, should be reasonably fast. */ -const u8 simplify_lookup[256] = { +const u8 simplify_lookup[256] = { - [0] = 1, - [1 ... 255] = 128 + [0] = 1, [1 ... 255] = 128 }; @@ -265,7 +264,9 @@ void simplify_trace(u64* mem) { mem8[6] = simplify_lookup[mem8[6]]; mem8[7] = simplify_lookup[mem8[7]]; - } else *mem = 0x0101010101010101ULL; + } else + + *mem = 0x0101010101010101ULL; ++mem; @@ -292,50 +293,49 @@ void simplify_trace(u32* mem) { mem8[2] = simplify_lookup[mem8[2]]; mem8[3] = simplify_lookup[mem8[3]]; - } else *mem = 0x01010101; + } else + + *mem = 0x01010101; ++mem; + } } #endif /* ^__x86_64__ */ - /* Destructively classify execution counts in a trace. This is used as a preprocessing step for any newly acquired traces. Called on every exec, must be fast. */ static const u8 count_class_lookup8[256] = { - [0] = 0, - [1] = 1, - [2] = 2, - [3] = 4, - [4 ... 7] = 8, - [8 ... 15] = 16, - [16 ... 31] = 32, - [32 ... 127] = 64, - [128 ... 255] = 128 + [0] = 0, + [1] = 1, + [2] = 2, + [3] = 4, + [4 ... 7] = 8, + [8 ... 15] = 16, + [16 ... 31] = 32, + [32 ... 127] = 64, + [128 ... 255] = 128 }; static u16 count_class_lookup16[65536]; - void init_count_class16(void) { u32 b1, b2; - for (b1 = 0; b1 < 256; b1++) + for (b1 = 0; b1 < 256; b1++) for (b2 = 0; b2 < 256; b2++) - count_class_lookup16[(b1 << 8) + b2] = - (count_class_lookup8[b1] << 8) | - count_class_lookup8[b2]; + count_class_lookup16[(b1 << 8) + b2] = + (count_class_lookup8[b1] << 8) | count_class_lookup8[b2]; } - #ifdef __x86_64__ void classify_counts(u64* mem) { @@ -390,7 +390,6 @@ void classify_counts(u32* mem) { #endif /* ^__x86_64__ */ - /* Compact trace bytes into a smaller bitmap. We effectively just drop the count information here. This is called only sporadically, for some new paths. */ @@ -408,7 +407,6 @@ void minimize_bits(u8* dst, u8* src) { } - #ifndef SIMPLE_FILES /* Construct a file name for a new test case, capturing the operation @@ -428,8 +426,7 @@ u8* describe_op(u8 hnb) { sprintf(ret + strlen(ret), ",time:%llu", get_cur_time() - start_time); - if (splicing_with >= 0) - sprintf(ret + strlen(ret), "+%06d", splicing_with); + if (splicing_with >= 0) sprintf(ret + strlen(ret), "+%06d", splicing_with); sprintf(ret + strlen(ret), ",op:%s", stage_short); @@ -438,11 +435,12 @@ u8* describe_op(u8 hnb) { sprintf(ret + strlen(ret), ",pos:%d", stage_cur_byte); if (stage_val_type != STAGE_VAL_NONE) - sprintf(ret + strlen(ret), ",val:%s%+d", - (stage_val_type == STAGE_VAL_BE) ? "be:" : "", - stage_cur_val); + sprintf(ret + strlen(ret), ",val:%s%+d", + (stage_val_type == STAGE_VAL_BE) ? "be:" : "", stage_cur_val); - } else sprintf(ret + strlen(ret), ",rep:%d", stage_cur_val); + } else + + sprintf(ret + strlen(ret), ",rep:%d", stage_cur_val); } @@ -454,13 +452,12 @@ u8* describe_op(u8 hnb) { #endif /* !SIMPLE_FILES */ - /* Write a message accompanying the crash directory :-) */ static void write_crash_readme(void) { - u8* fn = alloc_printf("%s/crashes/README.txt", out_dir); - s32 fd; + u8* fn = alloc_printf("%s/crashes/README.txt", out_dir); + s32 fd; FILE* f; fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600); @@ -473,32 +470,38 @@ static void write_crash_readme(void) { f = fdopen(fd, "w"); if (!f) { + close(fd); return; + } - fprintf(f, "Command line used to find this crash:\n\n" + fprintf( + f, + "Command line used to find this crash:\n\n" - "%s\n\n" + "%s\n\n" - "If you can't reproduce a bug outside of afl-fuzz, be sure to set the same\n" - "memory limit. The limit used for this fuzzing session was %s.\n\n" + "If you can't reproduce a bug outside of afl-fuzz, be sure to set the " + "same\n" + "memory limit. The limit used for this fuzzing session was %s.\n\n" - "Need a tool to minimize test cases before investigating the crashes or sending\n" - "them to a vendor? Check out the afl-tmin that comes with the fuzzer!\n\n" + "Need a tool to minimize test cases before investigating the crashes or " + "sending\n" + "them to a vendor? Check out the afl-tmin that comes with the fuzzer!\n\n" - "Found any cool bugs in open-source tools using afl-fuzz? If yes, please drop\n" - "an mail at <afl-users@googlegroups.com> once the issues are fixed\n\n" + "Found any cool bugs in open-source tools using afl-fuzz? If yes, please " + "drop\n" + "an mail at <afl-users@googlegroups.com> once the issues are fixed\n\n" - " https://github.com/vanhauser-thc/AFLplusplus\n\n", + " https://github.com/vanhauser-thc/AFLplusplus\n\n", - orig_cmdline, DMS(mem_limit << 20)); /* ignore errors */ + orig_cmdline, DMS(mem_limit << 20)); /* ignore errors */ fclose(f); } - /* Check if the result of an execve() during routine fuzzing is interesting, save or queue the input test case for further analysis if so. Returns 1 if entry is saved, 0 otherwise. */ @@ -507,7 +510,7 @@ u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) { if (len == 0) return 0; - u8 *fn = ""; + u8* fn = ""; u8 hnb; s32 fd; u8 keeping = 0, res; @@ -517,8 +520,8 @@ u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) { struct queue_entry* q = queue; while (q) { - if (q->exec_cksum == cksum) - q->n_fuzz = q->n_fuzz + 1; + + if (q->exec_cksum == cksum) q->n_fuzz = q->n_fuzz + 1; q = q->next; @@ -530,9 +533,11 @@ u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) { future fuzzing, etc. */ if (!(hnb = has_new_bits(virgin_bits))) { + if (crash_mode) ++total_crashes; return 0; - } + + } #ifndef SIMPLE_FILES @@ -548,8 +553,10 @@ u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) { add_to_queue(fn, len, 0); if (hnb == 2) { + queue_top->has_new_cov = 1; ++queued_with_cov; + } queue_top->exec_cksum = cksum; @@ -559,8 +566,7 @@ u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) { res = calibrate_case(argv, queue_top, mem, queue_cycle - 1, 0); - if (res == FAULT_ERROR) - FATAL("Unable to execute target application"); + if (res == FAULT_ERROR) FATAL("Unable to execute target application"); fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600); if (fd < 0) PFATAL("Unable to create '%s'", fn); @@ -620,13 +626,12 @@ u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) { #ifndef SIMPLE_FILES - fn = alloc_printf("%s/hangs/id:%06llu,%s", out_dir, - unique_hangs, describe_op(0)); + fn = alloc_printf("%s/hangs/id:%06llu,%s", out_dir, unique_hangs, + describe_op(0)); #else - fn = alloc_printf("%s/hangs/id_%06llu", out_dir, - unique_hangs); + fn = alloc_printf("%s/hangs/id_%06llu", out_dir, unique_hangs); #endif /* ^!SIMPLE_FILES */ @@ -638,7 +643,7 @@ u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) { case FAULT_CRASH: -keep_as_crash: + keep_as_crash: /* This is handled in a manner roughly similar to timeouts, except for slightly different limits and no need to re-run test diff --git a/src/afl-fuzz-extras.c b/src/afl-fuzz-extras.c index 1f52181d..f43c86f4 100644 --- a/src/afl-fuzz-extras.c +++ b/src/afl-fuzz-extras.c @@ -22,32 +22,32 @@ #include "afl-fuzz.h" - /* Helper function for load_extras. */ static int compare_extras_len(const void* p1, const void* p2) { - struct extra_data *e1 = (struct extra_data*)p1, - *e2 = (struct extra_data*)p2; + + struct extra_data *e1 = (struct extra_data*)p1, *e2 = (struct extra_data*)p2; return e1->len - e2->len; + } static int compare_extras_use_d(const void* p1, const void* p2) { - struct extra_data *e1 = (struct extra_data*)p1, - *e2 = (struct extra_data*)p2; + + struct extra_data *e1 = (struct extra_data*)p1, *e2 = (struct extra_data*)p2; return e2->hit_cnt - e1->hit_cnt; -} +} /* Read extras from a file, sort by size. */ void load_extras_file(u8* fname, u32* min_len, u32* max_len, u32 dict_level) { FILE* f; - u8 buf[MAX_LINE]; - u8 *lptr; - u32 cur_line = 0; + u8 buf[MAX_LINE]; + u8* lptr; + u32 cur_line = 0; f = fopen(fname, "r"); @@ -62,10 +62,12 @@ void load_extras_file(u8* fname, u32* min_len, u32* max_len, u32 dict_level) { /* Trim on left and right. */ - while (isspace(*lptr)) ++lptr; + while (isspace(*lptr)) + ++lptr; rptr = lptr + strlen(lptr) - 1; - while (rptr >= lptr && isspace(*rptr)) --rptr; + while (rptr >= lptr && isspace(*rptr)) + --rptr; ++rptr; *rptr = 0; @@ -84,7 +86,8 @@ void load_extras_file(u8* fname, u32* min_len, u32* max_len, u32 dict_level) { /* Skip alphanumerics and dashes (label). */ - while (isalnum(*lptr) || *lptr == '_') ++lptr; + while (isalnum(*lptr) || *lptr == '_') + ++lptr; /* If @number follows, parse that. */ @@ -92,13 +95,15 @@ void load_extras_file(u8* fname, u32* min_len, u32* max_len, u32 dict_level) { ++lptr; if (atoi(lptr) > dict_level) continue; - while (isdigit(*lptr)) ++lptr; + while (isdigit(*lptr)) + ++lptr; } /* Skip whitespace and = signs. */ - while (isspace(*lptr) || *lptr == '=') ++lptr; + while (isspace(*lptr) || *lptr == '=') + ++lptr; /* Consume opening '"'. */ @@ -112,8 +117,8 @@ void load_extras_file(u8* fname, u32* min_len, u32* max_len, u32 dict_level) { /* Okay, let's allocate memory and copy data between "...", handling \xNN escaping, \\, and \". */ - extras = ck_realloc_block(extras, (extras_cnt + 1) * - sizeof(struct extra_data)); + extras = + ck_realloc_block(extras, (extras_cnt + 1) * sizeof(struct extra_data)); wptr = extras[extras_cnt].data = ck_alloc(rptr - lptr); @@ -132,27 +137,25 @@ void load_extras_file(u8* fname, u32* min_len, u32* max_len, u32 dict_level) { ++lptr; if (*lptr == '\\' || *lptr == '"') { + *(wptr++) = *(lptr++); klen++; break; + } if (*lptr != 'x' || !isxdigit(lptr[1]) || !isxdigit(lptr[2])) FATAL("Invalid escaping (not \\xNN) in line %u.", cur_line); - *(wptr++) = - ((strchr(hexdigits, tolower(lptr[1])) - hexdigits) << 4) | - (strchr(hexdigits, tolower(lptr[2])) - hexdigits); + *(wptr++) = ((strchr(hexdigits, tolower(lptr[1])) - hexdigits) << 4) | + (strchr(hexdigits, tolower(lptr[2])) - hexdigits); lptr += 3; ++klen; break; - default: - - *(wptr++) = *(lptr++); - ++klen; + default: *(wptr++) = *(lptr++); ++klen; } @@ -161,8 +164,8 @@ void load_extras_file(u8* fname, u32* min_len, u32* max_len, u32 dict_level) { extras[extras_cnt].len = klen; if (extras[extras_cnt].len > MAX_DICT_FILE) - FATAL("Keyword too big in line %u (%s, limit is %s)", cur_line, - DMS(klen), DMS(MAX_DICT_FILE)); + FATAL("Keyword too big in line %u (%s, limit is %s)", cur_line, DMS(klen), + DMS(MAX_DICT_FILE)); if (*min_len > klen) *min_len = klen; if (*max_len < klen) *max_len = klen; @@ -175,15 +178,14 @@ void load_extras_file(u8* fname, u32* min_len, u32* max_len, u32 dict_level) { } - /* Read extras from the extras directory and sort them by size. */ void load_extras(u8* dir) { - DIR* d; + DIR* d; struct dirent* de; - u32 min_len = MAX_DICT_FILE, max_len = 0, dict_level = 0; - u8* x; + u32 min_len = MAX_DICT_FILE, max_len = 0, dict_level = 0; + u8* x; /* If the name ends with @, extract level and continue. */ @@ -201,8 +203,10 @@ void load_extras(u8* dir) { if (!d) { if (errno == ENOTDIR) { + load_extras_file(dir, &min_len, &max_len, dict_level); goto check_and_sort; + } PFATAL("Unable to open '%s'", dir); @@ -214,11 +218,10 @@ void load_extras(u8* dir) { while ((de = readdir(d))) { struct stat st; - u8* fn = alloc_printf("%s/%s", dir, de->d_name); - s32 fd; + u8* fn = alloc_printf("%s/%s", dir, de->d_name); + s32 fd; - if (lstat(fn, &st) || access(fn, R_OK)) - PFATAL("Unable to access '%s'", fn); + if (lstat(fn, &st) || access(fn, R_OK)) PFATAL("Unable to access '%s'", fn); /* This also takes care of . and .. */ if (!S_ISREG(st.st_mode) || !st.st_size) { @@ -229,17 +232,17 @@ void load_extras(u8* dir) { } if (st.st_size > MAX_DICT_FILE) - FATAL("Extra '%s' is too big (%s, limit is %s)", fn, - DMS(st.st_size), DMS(MAX_DICT_FILE)); + FATAL("Extra '%s' is too big (%s, limit is %s)", fn, DMS(st.st_size), + DMS(MAX_DICT_FILE)); if (min_len > st.st_size) min_len = st.st_size; if (max_len < st.st_size) max_len = st.st_size; - extras = ck_realloc_block(extras, (extras_cnt + 1) * - sizeof(struct extra_data)); + extras = + ck_realloc_block(extras, (extras_cnt + 1) * sizeof(struct extra_data)); extras[extras_cnt].data = ck_alloc(st.st_size); - extras[extras_cnt].len = st.st_size; + extras[extras_cnt].len = st.st_size; fd = open(fn, O_RDONLY); @@ -262,8 +265,8 @@ check_and_sort: qsort(extras, extras_cnt, sizeof(struct extra_data), compare_extras_len); - OKF("Loaded %u extra tokens, size range %s to %s.", extras_cnt, - DMS(min_len), DMS(max_len)); + OKF("Loaded %u extra tokens, size range %s to %s.", extras_cnt, DMS(min_len), + DMS(max_len)); if (max_len > 32) WARNF("Some tokens are relatively large (%s) - consider trimming.", @@ -275,18 +278,16 @@ check_and_sort: } - - /* Helper function for maybe_add_auto() */ static inline u8 memcmp_nocase(u8* m1, u8* m2, u32 len) { - while (len--) if (tolower(*(m1++)) ^ tolower(*(m2++))) return 1; + while (len--) + if (tolower(*(m1++)) ^ tolower(*(m2++))) return 1; return 0; } - /* Maybe add automatic extra. */ void maybe_add_auto(u8* mem, u32 len) { @@ -310,9 +311,10 @@ void maybe_add_auto(u8* mem, u32 len) { i = sizeof(interesting_16) >> 1; - while (i--) + while (i--) if (*((u16*)mem) == interesting_16[i] || - *((u16*)mem) == SWAP16(interesting_16[i])) return; + *((u16*)mem) == SWAP16(interesting_16[i])) + return; } @@ -320,9 +322,10 @@ void maybe_add_auto(u8* mem, u32 len) { i = sizeof(interesting_32) >> 2; - while (i--) + while (i--) if (*((u32*)mem) == interesting_32[i] || - *((u32*)mem) == SWAP32(interesting_32[i])) return; + *((u32*)mem) == SWAP32(interesting_32[i])) + return; } @@ -358,22 +361,21 @@ void maybe_add_auto(u8* mem, u32 len) { if (a_extras_cnt < MAX_AUTO_EXTRAS) { - a_extras = ck_realloc_block(a_extras, (a_extras_cnt + 1) * - sizeof(struct extra_data)); + a_extras = ck_realloc_block(a_extras, + (a_extras_cnt + 1) * sizeof(struct extra_data)); a_extras[a_extras_cnt].data = ck_memdup(mem, len); - a_extras[a_extras_cnt].len = len; + a_extras[a_extras_cnt].len = len; ++a_extras_cnt; } else { - i = MAX_AUTO_EXTRAS / 2 + - UR((MAX_AUTO_EXTRAS + 1) / 2); + i = MAX_AUTO_EXTRAS / 2 + UR((MAX_AUTO_EXTRAS + 1) / 2); ck_free(a_extras[i].data); - a_extras[i].data = ck_memdup(mem, len); - a_extras[i].len = len; + a_extras[i].data = ck_memdup(mem, len); + a_extras[i].len = len; a_extras[i].hit_cnt = 0; } @@ -387,12 +389,11 @@ sort_a_extras: /* Then, sort the top USE_AUTO_EXTRAS entries by size. */ - qsort(a_extras, MIN(USE_AUTO_EXTRAS, a_extras_cnt), - sizeof(struct extra_data), compare_extras_len); + qsort(a_extras, MIN(USE_AUTO_EXTRAS, a_extras_cnt), sizeof(struct extra_data), + compare_extras_len); } - /* Save automatically generated extras. */ void save_auto(void) { @@ -420,7 +421,6 @@ void save_auto(void) { } - /* Load automatically generated extras. */ void load_auto(void) { @@ -458,24 +458,25 @@ void load_auto(void) { } - if (i) OKF("Loaded %u auto-discovered dictionary tokens.", i); - else OKF("No auto-generated dictionary tokens to reuse."); + if (i) + OKF("Loaded %u auto-discovered dictionary tokens.", i); + else + OKF("No auto-generated dictionary tokens to reuse."); } - /* Destroy extras. */ void destroy_extras(void) { u32 i; - for (i = 0; i < extras_cnt; ++i) + for (i = 0; i < extras_cnt; ++i) ck_free(extras[i].data); ck_free(extras); - for (i = 0; i < a_extras_cnt; ++i) + for (i = 0; i < a_extras_cnt; ++i) ck_free(a_extras[i].data); ck_free(a_extras); diff --git a/src/afl-fuzz-globals.c b/src/afl-fuzz-globals.c index e28c3099..8fded173 100644 --- a/src/afl-fuzz-globals.c +++ b/src/afl-fuzz-globals.c @@ -25,27 +25,13 @@ /* MOpt: Lots of globals, but mostly for the status UI and other things where it really makes no sense to haul them around as function parameters. */ -u64 limit_time_puppet, - orig_hit_cnt_puppet, - last_limit_time_start, - tmp_pilot_time, - total_pacemaker_time, - total_puppet_find, - temp_puppet_find, - most_time_key, - most_time, - most_execs_key, - most_execs, - old_hit_count; - -s32 SPLICE_CYCLES_puppet, - limit_time_sig, - key_puppet, - key_module; - -double w_init = 0.9, - w_end = 0.3, - w_now; +u64 limit_time_puppet, orig_hit_cnt_puppet, last_limit_time_start, + tmp_pilot_time, total_pacemaker_time, total_puppet_find, temp_puppet_find, + most_time_key, most_time, most_execs_key, most_execs, old_hit_count; + +s32 SPLICE_CYCLES_puppet, limit_time_sig, key_puppet, key_module; + +double w_init = 0.9, w_end = 0.3, w_now; s32 g_now; s32 g_max = 5000; @@ -53,15 +39,13 @@ s32 g_max = 5000; u64 tmp_core_time; s32 swarm_now; -double x_now[swarm_num][operator_num], - L_best[swarm_num][operator_num], - eff_best[swarm_num][operator_num], - G_best[operator_num], - v_now[swarm_num][operator_num], - probability_now[swarm_num][operator_num], - swarm_fitness[swarm_num]; +double x_now[swarm_num][operator_num], L_best[swarm_num][operator_num], + eff_best[swarm_num][operator_num], G_best[operator_num], + v_now[swarm_num][operator_num], probability_now[swarm_num][operator_num], + swarm_fitness[swarm_num]; -u64 stage_finds_puppet[swarm_num][operator_num], /* Patterns found per fuzz stage */ +u64 stage_finds_puppet[swarm_num] + [operator_num], /* Patterns found per fuzz stage */ stage_finds_puppet_v2[swarm_num][operator_num], stage_cycles_puppet_v2[swarm_num][operator_num], stage_cycles_puppet_v3[swarm_num][operator_num], @@ -71,207 +55,197 @@ u64 stage_finds_puppet[swarm_num][operator_num], /* Patterns found per core_operator_finds_puppet_v2[operator_num], core_operator_cycles_puppet[operator_num], core_operator_cycles_puppet_v2[operator_num], - core_operator_cycles_puppet_v3[operator_num]; /* Execs per fuzz stage */ + core_operator_cycles_puppet_v3[operator_num]; /* Execs per fuzz stage */ double period_pilot_tmp = 5000.0; -s32 key_lv; - -u8 *in_dir, /* Input directory with test cases */ - *out_dir, /* Working & output directory */ - *tmp_dir , /* Temporary directory for input */ - *sync_dir, /* Synchronization directory */ - *sync_id, /* Fuzzer ID */ - *power_name, /* Power schedule name */ - *use_banner, /* Display banner */ - *in_bitmap, /* Input bitmap */ - *file_extension, /* File extension */ - *orig_cmdline; /* Original command line */ -u8 *doc_path, /* Path to documentation dir */ - *target_path, /* Path to target binary */ - *out_file; /* File to fuzz, if any */ - -u32 exec_tmout = EXEC_TIMEOUT; /* Configurable exec timeout (ms) */ -u32 hang_tmout = EXEC_TIMEOUT; /* Timeout used for hang det (ms) */ - -u64 mem_limit = MEM_LIMIT; /* Memory cap for child (MB) */ - -u8 cal_cycles = CAL_CYCLES, /* Calibration cycles defaults */ - cal_cycles_long = CAL_CYCLES_LONG, - debug, /* Debug mode */ - python_only; /* Python-only mode */ - -u32 stats_update_freq = 1; /* Stats update frequency (execs) */ - -char *power_names[POWER_SCHEDULES_NUM] = { - "explore", - "fast", - "coe", - "lin", - "quad", - "exploit" -}; - -u8 schedule = EXPLORE; /* Power schedule (default: EXPLORE)*/ +s32 key_lv; + +u8 *in_dir, /* Input directory with test cases */ + *out_dir, /* Working & output directory */ + *tmp_dir, /* Temporary directory for input */ + *sync_dir, /* Synchronization directory */ + *sync_id, /* Fuzzer ID */ + *power_name, /* Power schedule name */ + *use_banner, /* Display banner */ + *in_bitmap, /* Input bitmap */ + *file_extension, /* File extension */ + *orig_cmdline; /* Original command line */ +u8 *doc_path, /* Path to documentation dir */ + *target_path, /* Path to target binary */ + *out_file; /* File to fuzz, if any */ + +u32 exec_tmout = EXEC_TIMEOUT; /* Configurable exec timeout (ms) */ +u32 hang_tmout = EXEC_TIMEOUT; /* Timeout used for hang det (ms) */ + +u64 mem_limit = MEM_LIMIT; /* Memory cap for child (MB) */ + +u8 cal_cycles = CAL_CYCLES, /* Calibration cycles defaults */ + cal_cycles_long = CAL_CYCLES_LONG, debug, /* Debug mode */ + python_only; /* Python-only mode */ + +u32 stats_update_freq = 1; /* Stats update frequency (execs) */ + +char *power_names[POWER_SCHEDULES_NUM] = {"explore", "fast", "coe", + "lin", "quad", "exploit"}; + +u8 schedule = EXPLORE; /* Power schedule (default: EXPLORE)*/ u8 havoc_max_mult = HAVOC_MAX_MULT; -u8 skip_deterministic, /* Skip deterministic stages? */ - force_deterministic, /* Force deterministic stages? */ - use_splicing, /* Recombine input files? */ - dumb_mode, /* Run in non-instrumented mode? */ - score_changed, /* Scoring for favorites changed? */ - kill_signal, /* Signal that killed the child */ - resuming_fuzz, /* Resuming an older fuzzing job? */ - timeout_given, /* Specific timeout given? */ - not_on_tty, /* stdout is not a tty */ - term_too_small, /* terminal dimensions too small */ - no_forkserver, /* Disable forkserver? */ - crash_mode, /* Crash mode! Yeah! */ - in_place_resume, /* Attempt in-place resume? */ - auto_changed, /* Auto-generated tokens changed? */ - no_cpu_meter_red, /* Feng shui on the status screen */ - no_arith, /* Skip most arithmetic ops */ - shuffle_queue, /* Shuffle input queue? */ - bitmap_changed = 1, /* Time to update bitmap? */ - qemu_mode, /* Running in QEMU mode? */ - unicorn_mode, /* Running in Unicorn mode? */ - skip_requested, /* Skip request, via SIGUSR1 */ - run_over10m, /* Run time over 10 minutes? */ - persistent_mode, /* Running in persistent mode? */ - deferred_mode, /* Deferred forkserver mode? */ - fixed_seed, /* do not reseed */ - fast_cal, /* Try to calibrate faster? */ - uses_asan; /* Target uses ASAN? */ - -s32 out_fd, /* Persistent fd for out_file */ +u8 skip_deterministic, /* Skip deterministic stages? */ + force_deterministic, /* Force deterministic stages? */ + use_splicing, /* Recombine input files? */ + dumb_mode, /* Run in non-instrumented mode? */ + score_changed, /* Scoring for favorites changed? */ + kill_signal, /* Signal that killed the child */ + resuming_fuzz, /* Resuming an older fuzzing job? */ + timeout_given, /* Specific timeout given? */ + not_on_tty, /* stdout is not a tty */ + term_too_small, /* terminal dimensions too small */ + no_forkserver, /* Disable forkserver? */ + crash_mode, /* Crash mode! Yeah! */ + in_place_resume, /* Attempt in-place resume? */ + auto_changed, /* Auto-generated tokens changed? */ + no_cpu_meter_red, /* Feng shui on the status screen */ + no_arith, /* Skip most arithmetic ops */ + shuffle_queue, /* Shuffle input queue? */ + bitmap_changed = 1, /* Time to update bitmap? */ + qemu_mode, /* Running in QEMU mode? */ + unicorn_mode, /* Running in Unicorn mode? */ + skip_requested, /* Skip request, via SIGUSR1 */ + run_over10m, /* Run time over 10 minutes? */ + persistent_mode, /* Running in persistent mode? */ + deferred_mode, /* Deferred forkserver mode? */ + fixed_seed, /* do not reseed */ + fast_cal, /* Try to calibrate faster? */ + uses_asan; /* Target uses ASAN? */ + +s32 out_fd, /* Persistent fd for out_file */ #ifndef HAVE_ARC4RANDOM - dev_urandom_fd = -1, /* Persistent fd for /dev/urandom */ + dev_urandom_fd = -1, /* Persistent fd for /dev/urandom */ #endif - dev_null_fd = -1, /* Persistent fd for /dev/null */ - fsrv_ctl_fd, /* Fork server control pipe (write) */ - fsrv_st_fd; /* Fork server status pipe (read) */ - - s32 forksrv_pid, /* PID of the fork server */ - child_pid = -1, /* PID of the fuzzed program */ - out_dir_fd = -1; /* FD of the lock file */ - - u8* trace_bits; /* SHM with instrumentation bitmap */ - -u8 virgin_bits[MAP_SIZE], /* Regions yet untouched by fuzzing */ - virgin_tmout[MAP_SIZE], /* Bits we haven't seen in tmouts */ - virgin_crash[MAP_SIZE]; /* Bits we haven't seen in crashes */ - -u8 var_bytes[MAP_SIZE]; /* Bytes that appear to be variable */ - -volatile u8 stop_soon, /* Ctrl-C pressed? */ - clear_screen = 1, /* Window resized? */ - child_timed_out; /* Traced process timed out? */ - -u32 queued_paths, /* Total number of queued testcases */ - queued_variable, /* Testcases with variable behavior */ - queued_at_start, /* Total number of initial inputs */ - queued_discovered, /* Items discovered during this run */ - queued_imported, /* Items imported via -S */ - queued_favored, /* Paths deemed favorable */ - queued_with_cov, /* Paths with new coverage bytes */ - pending_not_fuzzed, /* Queued but not done yet */ - pending_favored, /* Pending favored paths */ - cur_skipped_paths, /* Abandoned inputs in cur cycle */ - cur_depth, /* Current path depth */ - max_depth, /* Max path depth */ - useless_at_start, /* Number of useless starting paths */ - var_byte_count, /* Bitmap bytes with var behavior */ - current_entry, /* Current queue entry ID */ - havoc_div = 1; /* Cycle count divisor for havoc */ - -u64 total_crashes, /* Total number of crashes */ - unique_crashes, /* Crashes with unique signatures */ - total_tmouts, /* Total number of timeouts */ - unique_tmouts, /* Timeouts with unique signatures */ - unique_hangs, /* Hangs with unique signatures */ - total_execs, /* Total execve() calls */ - slowest_exec_ms, /* Slowest testcase non hang in ms */ - start_time, /* Unix start time (ms) */ - last_path_time, /* Time for most recent path (ms) */ - last_crash_time, /* Time for most recent crash (ms) */ - last_hang_time, /* Time for most recent hang (ms) */ - last_crash_execs, /* Exec counter at last crash */ - queue_cycle, /* Queue round counter */ - cycles_wo_finds, /* Cycles without any new paths */ - trim_execs, /* Execs done to trim input files */ - bytes_trim_in, /* Bytes coming into the trimmer */ - bytes_trim_out, /* Bytes coming outa the trimmer */ - blocks_eff_total, /* Blocks subject to effector maps */ - blocks_eff_select; /* Blocks selected as fuzzable */ - -u32 subseq_tmouts; /* Number of timeouts in a row */ - -u8 *stage_name = "init", /* Name of the current fuzz stage */ - *stage_short, /* Short stage name */ - *syncing_party; /* Currently syncing with... */ - -s32 stage_cur, stage_max; /* Stage progression */ -s32 splicing_with = -1; /* Splicing with which test case? */ - -u32 master_id, master_max; /* Master instance job splitting */ - -u32 syncing_case; /* Syncing with case #... */ - -s32 stage_cur_byte, /* Byte offset of current stage op */ - stage_cur_val; /* Value used for stage op */ - -u8 stage_val_type; /* Value type (STAGE_VAL_*) */ - -u64 stage_finds[32], /* Patterns found per fuzz stage */ - stage_cycles[32]; /* Execs per fuzz stage */ + dev_null_fd = -1, /* Persistent fd for /dev/null */ + fsrv_ctl_fd, /* Fork server control pipe (write) */ + fsrv_st_fd; /* Fork server status pipe (read) */ + +s32 forksrv_pid, /* PID of the fork server */ + child_pid = -1, /* PID of the fuzzed program */ + out_dir_fd = -1; /* FD of the lock file */ + +u8 *trace_bits; /* SHM with instrumentation bitmap */ + +u8 virgin_bits[MAP_SIZE], /* Regions yet untouched by fuzzing */ + virgin_tmout[MAP_SIZE], /* Bits we haven't seen in tmouts */ + virgin_crash[MAP_SIZE]; /* Bits we haven't seen in crashes */ + +u8 var_bytes[MAP_SIZE]; /* Bytes that appear to be variable */ + +volatile u8 stop_soon, /* Ctrl-C pressed? */ + clear_screen = 1, /* Window resized? */ + child_timed_out; /* Traced process timed out? */ + +u32 queued_paths, /* Total number of queued testcases */ + queued_variable, /* Testcases with variable behavior */ + queued_at_start, /* Total number of initial inputs */ + queued_discovered, /* Items discovered during this run */ + queued_imported, /* Items imported via -S */ + queued_favored, /* Paths deemed favorable */ + queued_with_cov, /* Paths with new coverage bytes */ + pending_not_fuzzed, /* Queued but not done yet */ + pending_favored, /* Pending favored paths */ + cur_skipped_paths, /* Abandoned inputs in cur cycle */ + cur_depth, /* Current path depth */ + max_depth, /* Max path depth */ + useless_at_start, /* Number of useless starting paths */ + var_byte_count, /* Bitmap bytes with var behavior */ + current_entry, /* Current queue entry ID */ + havoc_div = 1; /* Cycle count divisor for havoc */ + +u64 total_crashes, /* Total number of crashes */ + unique_crashes, /* Crashes with unique signatures */ + total_tmouts, /* Total number of timeouts */ + unique_tmouts, /* Timeouts with unique signatures */ + unique_hangs, /* Hangs with unique signatures */ + total_execs, /* Total execve() calls */ + slowest_exec_ms, /* Slowest testcase non hang in ms */ + start_time, /* Unix start time (ms) */ + last_path_time, /* Time for most recent path (ms) */ + last_crash_time, /* Time for most recent crash (ms) */ + last_hang_time, /* Time for most recent hang (ms) */ + last_crash_execs, /* Exec counter at last crash */ + queue_cycle, /* Queue round counter */ + cycles_wo_finds, /* Cycles without any new paths */ + trim_execs, /* Execs done to trim input files */ + bytes_trim_in, /* Bytes coming into the trimmer */ + bytes_trim_out, /* Bytes coming outa the trimmer */ + blocks_eff_total, /* Blocks subject to effector maps */ + blocks_eff_select; /* Blocks selected as fuzzable */ + +u32 subseq_tmouts; /* Number of timeouts in a row */ + +u8 *stage_name = "init", /* Name of the current fuzz stage */ + *stage_short, /* Short stage name */ + *syncing_party; /* Currently syncing with... */ + +s32 stage_cur, stage_max; /* Stage progression */ +s32 splicing_with = -1; /* Splicing with which test case? */ + +u32 master_id, master_max; /* Master instance job splitting */ + +u32 syncing_case; /* Syncing with case #... */ + +s32 stage_cur_byte, /* Byte offset of current stage op */ + stage_cur_val; /* Value used for stage op */ + +u8 stage_val_type; /* Value type (STAGE_VAL_*) */ + +u64 stage_finds[32], /* Patterns found per fuzz stage */ + stage_cycles[32]; /* Execs per fuzz stage */ #ifndef HAVE_ARC4RANDOM -u32 rand_cnt; /* Random number counter */ +u32 rand_cnt; /* Random number counter */ #endif -u64 total_cal_us, /* Total calibration time (us) */ - total_cal_cycles; /* Total calibration cycles */ +u64 total_cal_us, /* Total calibration time (us) */ + total_cal_cycles; /* Total calibration cycles */ -u64 total_bitmap_size, /* Total bit count for all bitmaps */ - total_bitmap_entries; /* Number of bitmaps counted */ +u64 total_bitmap_size, /* Total bit count for all bitmaps */ + total_bitmap_entries; /* Number of bitmaps counted */ -s32 cpu_core_count; /* CPU core count */ +s32 cpu_core_count; /* CPU core count */ #ifdef HAVE_AFFINITY -s32 cpu_aff = -1; /* Selected CPU core */ +s32 cpu_aff = -1; /* Selected CPU core */ #endif /* HAVE_AFFINITY */ -FILE* plot_file; /* Gnuplot output file */ +FILE *plot_file; /* Gnuplot output file */ +struct queue_entry *queue, /* Fuzzing queue (linked list) */ + *queue_cur, /* Current offset within the queue */ + *queue_top, /* Top of the list */ + *q_prev100; /* Previous 100 marker */ +struct queue_entry *top_rated[MAP_SIZE]; /* Top entries for bitmap bytes */ -struct queue_entry *queue, /* Fuzzing queue (linked list) */ - *queue_cur, /* Current offset within the queue */ - *queue_top, /* Top of the list */ - *q_prev100; /* Previous 100 marker */ +struct extra_data *extras; /* Extra tokens to fuzz with */ +u32 extras_cnt; /* Total number of tokens read */ -struct queue_entry* - top_rated[MAP_SIZE]; /* Top entries for bitmap bytes */ +struct extra_data *a_extras; /* Automatically selected extras */ +u32 a_extras_cnt; /* Total number of tokens available */ -struct extra_data* extras; /* Extra tokens to fuzz with */ -u32 extras_cnt; /* Total number of tokens read */ - -struct extra_data* a_extras; /* Automatically selected extras */ -u32 a_extras_cnt; /* Total number of tokens available */ - -u8* (*post_handler)(u8* buf, u32* len); +u8 *(*post_handler)(u8 *buf, u32 *len); /* hooks for the custom mutator function */ -size_t (*custom_mutator)(u8 *data, size_t size, u8* mutated_out, size_t max_size, unsigned int seed); +size_t (*custom_mutator)(u8 *data, size_t size, u8 *mutated_out, + size_t max_size, unsigned int seed); size_t (*pre_save_handler)(u8 *data, size_t size, u8 **new_data); - /* Interesting values, as per config.h */ -s8 interesting_8[] = { INTERESTING_8 }; -s16 interesting_16[] = { INTERESTING_8, INTERESTING_16 }; -s32 interesting_32[] = { INTERESTING_8, INTERESTING_16, INTERESTING_32 }; +s8 interesting_8[] = {INTERESTING_8}; +s16 interesting_16[] = {INTERESTING_8, INTERESTING_16}; +s32 interesting_32[] = {INTERESTING_8, INTERESTING_16, INTERESTING_32}; /* Python stuff */ #ifdef USE_PYTHON diff --git a/src/afl-fuzz-init.c b/src/afl-fuzz-init.c index f66db74c..8a3ee6fa 100644 --- a/src/afl-fuzz-init.c +++ b/src/afl-fuzz-init.c @@ -22,7 +22,6 @@ #include "afl-fuzz.h" - #ifdef HAVE_AFFINITY /* Build a list of processes bound to specific cores. Returns -1 if nothing @@ -30,11 +29,11 @@ void bind_to_free_cpu(void) { - DIR* d; + DIR* d; struct dirent* de; - cpu_set_t c; + cpu_set_t c; - u8 cpu_used[4096] = { 0 }; + u8 cpu_used[4096] = {0}; u32 i; if (cpu_core_count < 2) return; @@ -69,18 +68,20 @@ void bind_to_free_cpu(void) { while ((de = readdir(d))) { - u8* fn; + u8* fn; FILE* f; - u8 tmp[MAX_LINE]; - u8 has_vmsize = 0; + u8 tmp[MAX_LINE]; + u8 has_vmsize = 0; if (!isdigit(de->d_name[0])) continue; fn = alloc_printf("/proc/%s/status", de->d_name); if (!(f = fopen(fn, "r"))) { + ck_free(fn); continue; + } while (fgets(tmp, MAX_LINE, f)) { @@ -91,10 +92,9 @@ void bind_to_free_cpu(void) { if (!strncmp(tmp, "VmSize:\t", 8)) has_vmsize = 1; - if (!strncmp(tmp, "Cpus_allowed_list:\t", 19) && - !strchr(tmp, '-') && !strchr(tmp, ',') && - sscanf(tmp + 19, "%u", &hval) == 1 && hval < sizeof(cpu_used) && - has_vmsize) { + if (!strncmp(tmp, "Cpus_allowed_list:\t", 19) && !strchr(tmp, '-') && + !strchr(tmp, ',') && sscanf(tmp + 19, "%u", &hval) == 1 && + hval < sizeof(cpu_used) && has_vmsize) { cpu_used[hval] = 1; break; @@ -110,14 +110,17 @@ void bind_to_free_cpu(void) { closedir(d); - for (i = 0; i < cpu_core_count; ++i) if (!cpu_used[i]) break; + for (i = 0; i < cpu_core_count; ++i) + if (!cpu_used[i]) break; if (i == cpu_core_count) { SAYF("\n" cLRD "[-] " cRST "Uh-oh, looks like all %d CPU cores on your system are allocated to\n" - " other instances of afl-fuzz (or similar CPU-locked tasks). Starting\n" - " another fuzzer on this machine is probably a bad plan, but if you are\n" + " other instances of afl-fuzz (or similar CPU-locked tasks). " + "Starting\n" + " another fuzzer on this machine is probably a bad plan, but if " + "you are\n" " absolutely sure, you can set AFL_NO_AFFINITY and try again.\n", cpu_core_count); @@ -132,8 +135,7 @@ void bind_to_free_cpu(void) { CPU_ZERO(&c); CPU_SET(i, &c); - if (sched_setaffinity(0, sizeof(c), &c)) - PFATAL("sched_setaffinity failed"); + if (sched_setaffinity(0, sizeof(c), &c)) PFATAL("sched_setaffinity failed"); } @@ -144,8 +146,8 @@ void bind_to_free_cpu(void) { void setup_post(void) { void* dh; - u8* fn = getenv("AFL_POST_LIBRARY"); - u32 tlen = 6; + u8* fn = getenv("AFL_POST_LIBRARY"); + u32 tlen = 6; if (!fn) return; @@ -166,8 +168,9 @@ void setup_post(void) { } void setup_custom_mutator(void) { + void* dh; - u8* fn = getenv("AFL_CUSTOM_MUTATOR_LIBRARY"); + u8* fn = getenv("AFL_CUSTOM_MUTATOR_LIBRARY"); if (!fn) return; @@ -180,11 +183,11 @@ void setup_custom_mutator(void) { if (!custom_mutator) FATAL("Symbol 'afl_custom_mutator' not found."); pre_save_handler = dlsym(dh, "afl_pre_save_handler"); -// if (!pre_save_handler) WARNF("Symbol 'afl_pre_save_handler' not found."); + // if (!pre_save_handler) WARNF("Symbol 'afl_pre_save_handler' not found."); OKF("Custom mutator installed successfully."); -} +} /* Shuffle an array of pointers. Might be slightly biased. */ @@ -194,8 +197,8 @@ static void shuffle_ptrs(void** ptrs, u32 cnt) { for (i = 0; i < cnt - 2; ++i) { - u32 j = i + UR(cnt - i); - void *s = ptrs[i]; + u32 j = i + UR(cnt - i); + void* s = ptrs[i]; ptrs[i] = ptrs[j]; ptrs[j] = s; @@ -208,15 +211,18 @@ static void shuffle_ptrs(void** ptrs, u32 cnt) { void read_testcases(void) { - struct dirent **nl; - s32 nl_cnt; - u32 i; - u8* fn1; + struct dirent** nl; + s32 nl_cnt; + u32 i; + u8* fn1; /* Auto-detect non-in-place resumption attempts. */ fn1 = alloc_printf("%s/queue", in_dir); - if (!access(fn1, F_OK)) in_dir = fn1; else ck_free(fn1); + if (!access(fn1, F_OK)) + in_dir = fn1; + else + ck_free(fn1); ACTF("Scanning '%s'...", in_dir); @@ -231,9 +237,12 @@ void read_testcases(void) { if (errno == ENOENT || errno == ENOTDIR) SAYF("\n" cLRD "[-] " cRST - "The input directory does not seem to be valid - try again. The fuzzer needs\n" - " one or more test case to start with - ideally, a small file under 1 kB\n" - " or so. The cases must be stored as regular files directly in the input\n" + "The input directory does not seem to be valid - try again. The " + "fuzzer needs\n" + " one or more test case to start with - ideally, a small file " + "under 1 kB\n" + " or so. The cases must be stored as regular files directly in " + "the input\n" " directory.\n"); PFATAL("Unable to open '%s'", in_dir); @@ -252,12 +261,13 @@ void read_testcases(void) { struct stat st; u8* fn2 = alloc_printf("%s/%s", in_dir, nl[i]->d_name); - u8* dfn = alloc_printf("%s/.state/deterministic_done/%s", in_dir, nl[i]->d_name); + u8* dfn = + alloc_printf("%s/.state/deterministic_done/%s", in_dir, nl[i]->d_name); + + u8 passed_det = 0; - u8 passed_det = 0; + free(nl[i]); /* not tracked */ - free(nl[i]); /* not tracked */ - if (lstat(fn2, &st) || access(fn2, R_OK)) PFATAL("Unable to access '%s'", fn2); @@ -271,9 +281,9 @@ void read_testcases(void) { } - if (st.st_size > MAX_FILE) - FATAL("Test case '%s' is too big (%s, limit is %s)", fn2, - DMS(st.st_size), DMS(MAX_FILE)); + if (st.st_size > MAX_FILE) + FATAL("Test case '%s' is too big (%s, limit is %s)", fn2, DMS(st.st_size), + DMS(MAX_FILE)); /* Check for metadata that indicates that deterministic fuzzing is complete for this entry. We don't want to repeat deterministic @@ -287,14 +297,17 @@ void read_testcases(void) { } - free(nl); /* not tracked */ + free(nl); /* not tracked */ if (!queued_paths) { SAYF("\n" cLRD "[-] " cRST - "Looks like there are no valid test cases in the input directory! The fuzzer\n" - " needs one or more test case to start with - ideally, a small file under\n" - " 1 kB or so. The cases must be stored as regular files directly in the\n" + "Looks like there are no valid test cases in the input directory! The " + "fuzzer\n" + " needs one or more test case to start with - ideally, a small " + "file under\n" + " 1 kB or so. The cases must be stored as regular files directly " + "in the\n" " input directory.\n"); FATAL("No usable test cases in '%s'", in_dir); @@ -306,7 +319,6 @@ void read_testcases(void) { } - /* Examine map coverage. Called once, for first test case. */ static void check_map_coverage(void) { @@ -322,15 +334,14 @@ static void check_map_coverage(void) { } - /* Perform dry run of all test cases to confirm that the app is working as expected. This is done only for the initial inputs, and only once. */ void perform_dry_run(char** argv) { struct queue_entry* q = queue; - u32 cal_failures = 0; - u8* skip_crashes = getenv("AFL_SKIP_CRASHES"); + u32 cal_failures = 0; + u8* skip_crashes = getenv("AFL_SKIP_CRASHES"); while (q) { @@ -358,7 +369,7 @@ void perform_dry_run(char** argv) { if (stop_soon) return; if (res == crash_mode || res == FAULT_NOBITS) - SAYF(cGRA " len = %u, map size = %u, exec speed = %llu us\n" cRST, + SAYF(cGRA " len = %u, map size = %u, exec speed = %llu us\n" cRST, q->len, q->bitmap_size, q->exec_us); switch (res) { @@ -380,90 +391,119 @@ void perform_dry_run(char** argv) { out. */ if (timeout_given > 1) { + WARNF("Test case results in a timeout (skipping)"); q->cal_failed = CAL_CHANCES; ++cal_failures; break; + } SAYF("\n" cLRD "[-] " cRST - "The program took more than %u ms to process one of the initial test cases.\n" - " Usually, the right thing to do is to relax the -t option - or to delete it\n" - " altogether and allow the fuzzer to auto-calibrate. That said, if you know\n" - " what you are doing and want to simply skip the unruly test cases, append\n" - " '+' at the end of the value passed to -t ('-t %u+').\n", exec_tmout, - exec_tmout); + "The program took more than %u ms to process one of the initial " + "test cases.\n" + " Usually, the right thing to do is to relax the -t option - " + "or to delete it\n" + " altogether and allow the fuzzer to auto-calibrate. That " + "said, if you know\n" + " what you are doing and want to simply skip the unruly test " + "cases, append\n" + " '+' at the end of the value passed to -t ('-t %u+').\n", + exec_tmout, exec_tmout); FATAL("Test case '%s' results in a timeout", fn); } else { SAYF("\n" cLRD "[-] " cRST - "The program took more than %u ms to process one of the initial test cases.\n" - " This is bad news; raising the limit with the -t option is possible, but\n" + "The program took more than %u ms to process one of the initial " + "test cases.\n" + " This is bad news; raising the limit with the -t option is " + "possible, but\n" " will probably make the fuzzing process extremely slow.\n\n" - " If this test case is just a fluke, the other option is to just avoid it\n" - " altogether, and find one that is less of a CPU hog.\n", exec_tmout); + " If this test case is just a fluke, the other option is to " + "just avoid it\n" + " altogether, and find one that is less of a CPU hog.\n", + exec_tmout); FATAL("Test case '%s' results in a timeout", fn); } - case FAULT_CRASH: + case FAULT_CRASH: if (crash_mode) break; if (skip_crashes) { + WARNF("Test case results in a crash (skipping)"); q->cal_failed = CAL_CHANCES; ++cal_failures; break; + } if (mem_limit) { SAYF("\n" cLRD "[-] " cRST - "Oops, the program crashed with one of the test cases provided. There are\n" + "Oops, the program crashed with one of the test cases provided. " + "There are\n" " several possible explanations:\n\n" - " - The test case causes known crashes under normal working conditions. If\n" - " so, please remove it. The fuzzer should be seeded with interesting\n" + " - The test case causes known crashes under normal working " + "conditions. If\n" + " so, please remove it. The fuzzer should be seeded with " + "interesting\n" " inputs - but not ones that cause an outright crash.\n\n" - " - The current memory limit (%s) is too low for this program, causing\n" - " it to die due to OOM when parsing valid files. To fix this, try\n" - " bumping it up with the -m setting in the command line. If in doubt,\n" + " - The current memory limit (%s) is too low for this " + "program, causing\n" + " it to die due to OOM when parsing valid files. To fix " + "this, try\n" + " bumping it up with the -m setting in the command line. " + "If in doubt,\n" " try something along the lines of:\n\n" - MSG_ULIMIT_USAGE " /path/to/binary [...] <testcase )\n\n" + MSG_ULIMIT_USAGE + " /path/to/binary [...] <testcase )\n\n" - " Tip: you can use http://jwilk.net/software/recidivm to quickly\n" - " estimate the required amount of virtual memory for the binary. Also,\n" + " Tip: you can use http://jwilk.net/software/recidivm to " + "quickly\n" + " estimate the required amount of virtual memory for the " + "binary. Also,\n" " if you are using ASAN, see %s/notes_for_asan.txt.\n\n" MSG_FORK_ON_APPLE - " - Least likely, there is a horrible bug in the fuzzer. If other options\n" - " fail, poke <afl-users@googlegroups.com> for troubleshooting tips.\n", + " - Least likely, there is a horrible bug in the fuzzer. If " + "other options\n" + " fail, poke <afl-users@googlegroups.com> for " + "troubleshooting tips.\n", DMS(mem_limit << 20), mem_limit - 1, doc_path); } else { SAYF("\n" cLRD "[-] " cRST - "Oops, the program crashed with one of the test cases provided. There are\n" + "Oops, the program crashed with one of the test cases provided. " + "There are\n" " several possible explanations:\n\n" - " - The test case causes known crashes under normal working conditions. If\n" - " so, please remove it. The fuzzer should be seeded with interesting\n" + " - The test case causes known crashes under normal working " + "conditions. If\n" + " so, please remove it. The fuzzer should be seeded with " + "interesting\n" " inputs - but not ones that cause an outright crash.\n\n" MSG_FORK_ON_APPLE - " - Least likely, there is a horrible bug in the fuzzer. If other options\n" - " fail, poke <afl-users@googlegroups.com> for troubleshooting tips.\n"); + " - Least likely, there is a horrible bug in the fuzzer. If " + "other options\n" + " fail, poke <afl-users@googlegroups.com> for " + "troubleshooting tips.\n"); } + #undef MSG_ULIMIT_USAGE #undef MSG_FORK_ON_APPLE @@ -473,11 +513,9 @@ void perform_dry_run(char** argv) { FATAL("Unable to execute target application ('%s')", argv[0]); - case FAULT_NOINST: + case FAULT_NOINST: FATAL("No instrumentation detected"); - FATAL("No instrumentation detected"); - - case FAULT_NOBITS: + case FAULT_NOBITS: ++useless_at_start; @@ -513,7 +551,6 @@ void perform_dry_run(char** argv) { } - /* Helper function: link() if possible, copy otherwise. */ static void link_or_copy(u8* old_path, u8* new_path) { @@ -532,7 +569,7 @@ static void link_or_copy(u8* old_path, u8* new_path) { tmp = ck_alloc(64 * 1024); - while ((i = read(sfd, tmp, 64 * 1024)) > 0) + while ((i = read(sfd, tmp, 64 * 1024)) > 0) ck_write(dfd, tmp, i, new_path); if (i < 0) PFATAL("read() failed"); @@ -543,23 +580,25 @@ static void link_or_copy(u8* old_path, u8* new_path) { } - /* Create hard links for input test cases in the output directory, choosing good names and pivoting accordingly. */ void pivot_inputs(void) { struct queue_entry* q = queue; - u32 id = 0; + u32 id = 0; ACTF("Creating hard links for all input files..."); while (q) { - u8 *nfn, *rsl = strrchr(q->fname, '/'); + u8 *nfn, *rsl = strrchr(q->fname, '/'); u32 orig_id; - if (!rsl) rsl = q->fname; else ++rsl; + if (!rsl) + rsl = q->fname; + else + ++rsl; /* If the original file name conforms to the syntax and the recorded ID matches the one we'd assign, just use the original file name. @@ -582,7 +621,8 @@ void pivot_inputs(void) { if (src_str && sscanf(src_str + 1, "%06u", &src_id) == 1) { struct queue_entry* s = queue; - while (src_id-- && s) s = s->next; + while (src_id-- && s) + s = s->next; if (s) q->depth = s->depth + 1; if (max_depth < q->depth) max_depth = q->depth; @@ -598,7 +638,10 @@ void pivot_inputs(void) { u8* use_name = strstr(rsl, ",orig:"); - if (use_name) use_name += 6; else use_name = rsl; + if (use_name) + use_name += 6; + else + use_name = rsl; nfn = alloc_printf("%s/queue/id:%06u,orig:%s", out_dir, id, use_name); #else @@ -628,29 +671,31 @@ void pivot_inputs(void) { } - /* When resuming, try to find the queue position to start from. This makes sense only when resuming, and when we can find the original fuzzer_stats. */ u32 find_start_position(void) { - static u8 tmp[4096]; /* Ought to be enough for anybody. */ + static u8 tmp[4096]; /* Ought to be enough for anybody. */ - u8 *fn, *off; + u8 *fn, *off; s32 fd, i; u32 ret; if (!resuming_fuzz) return 0; - if (in_place_resume) fn = alloc_printf("%s/fuzzer_stats", out_dir); - else fn = alloc_printf("%s/../fuzzer_stats", in_dir); + if (in_place_resume) + fn = alloc_printf("%s/fuzzer_stats", out_dir); + else + fn = alloc_printf("%s/../fuzzer_stats", in_dir); fd = open(fn, O_RDONLY); ck_free(fn); if (fd < 0) return 0; - i = read(fd, tmp, sizeof(tmp) - 1); (void)i; /* Ignore errors */ + i = read(fd, tmp, sizeof(tmp) - 1); + (void)i; /* Ignore errors */ close(fd); off = strstr(tmp, "cur_path : "); @@ -662,30 +707,32 @@ u32 find_start_position(void) { } - /* The same, but for timeouts. The idea is that when resuming sessions without -t given, we don't want to keep auto-scaling the timeout over and over again to prevent it from growing due to random flukes. */ void find_timeout(void) { - static u8 tmp[4096]; /* Ought to be enough for anybody. */ + static u8 tmp[4096]; /* Ought to be enough for anybody. */ - u8 *fn, *off; + u8 *fn, *off; s32 fd, i; u32 ret; if (!resuming_fuzz) return; - if (in_place_resume) fn = alloc_printf("%s/fuzzer_stats", out_dir); - else fn = alloc_printf("%s/../fuzzer_stats", in_dir); + if (in_place_resume) + fn = alloc_printf("%s/fuzzer_stats", out_dir); + else + fn = alloc_printf("%s/../fuzzer_stats", in_dir); fd = open(fn, O_RDONLY); ck_free(fn); if (fd < 0) return; - i = read(fd, tmp, sizeof(tmp) - 1); (void)i; /* Ignore errors */ + i = read(fd, tmp, sizeof(tmp) - 1); + (void)i; /* Ignore errors */ close(fd); off = strstr(tmp, "exec_timeout : "); @@ -699,14 +746,12 @@ void find_timeout(void) { } - - /* A helper function for maybe_delete_out_dir(), deleting all prefixed files in a directory. */ static u8 delete_files(u8* path, u8* prefix) { - DIR* d; + DIR* d; struct dirent* d_ent; d = opendir(path); @@ -715,8 +760,8 @@ static u8 delete_files(u8* path, u8* prefix) { while ((d_ent = readdir(d))) { - if (d_ent->d_name[0] != '.' && (!prefix || - !strncmp(d_ent->d_name, prefix, strlen(prefix)))) { + if (d_ent->d_name[0] != '.' && + (!prefix || !strncmp(d_ent->d_name, prefix, strlen(prefix)))) { u8* fname = alloc_printf("%s/%s", path, d_ent->d_name); if (unlink(fname)) PFATAL("Unable to delete '%s'", fname); @@ -732,14 +777,13 @@ static u8 delete_files(u8* path, u8* prefix) { } - /* Get the number of runnable processes, with some simple smoothing. */ double get_runnable_processes(void) { static double res; -#if defined(__APPLE__) || defined(__FreeBSD__) || defined (__OpenBSD__) +#if defined(__APPLE__) || defined(__FreeBSD__) || defined(__OpenBSD__) /* I don't see any portable sysctl or so that would quickly give us the number of runnable processes; the 1-minute load average can be a @@ -762,10 +806,11 @@ double get_runnable_processes(void) { while (fgets(tmp, sizeof(tmp), f)) { if (!strncmp(tmp, "procs_running ", 14) || - !strncmp(tmp, "procs_blocked ", 14)) val += atoi(tmp + 14); + !strncmp(tmp, "procs_blocked ", 14)) + val += atoi(tmp + 14); } - + fclose(f); if (!res) { @@ -785,7 +830,6 @@ double get_runnable_processes(void) { } - /* Delete the temporary directory used for in-place session resume. */ void nuke_resume_dir(void) { @@ -824,14 +868,13 @@ dir_cleanup_failed: } - /* Delete fuzzer output directory if we recognize it as ours, if the fuzzer is not currently running, and if the last run time isn't too great. */ void maybe_delete_out_dir(void) { FILE* f; - u8 *fn = alloc_printf("%s/fuzzer_stats", out_dir); + u8* fn = alloc_printf("%s/fuzzer_stats", out_dir); /* See if the output directory is locked. If yes, bail out. If not, create a lock that will persist for the lifetime of the process @@ -845,7 +888,8 @@ void maybe_delete_out_dir(void) { if (flock(out_dir_fd, LOCK_EX | LOCK_NB) && errno == EWOULDBLOCK) { SAYF("\n" cLRD "[-] " cRST - "Looks like the job output directory is being actively used by another\n" + "Looks like the job output directory is being actively used by " + "another\n" " instance of afl-fuzz. You will need to choose a different %s\n" " or stop the other process first.\n", sync_id ? "fuzzer ID" : "output location"); @@ -862,8 +906,10 @@ void maybe_delete_out_dir(void) { u64 start_time2, last_update; - if (fscanf(f, "start_time : %llu\n" - "last_update : %llu\n", &start_time2, &last_update) != 2) + if (fscanf(f, + "start_time : %llu\n" + "last_update : %llu\n", + &start_time2, &last_update) != 2) FATAL("Malformed data in '%s'", fn); fclose(f); @@ -873,16 +919,22 @@ void maybe_delete_out_dir(void) { if (!in_place_resume && last_update - start_time2 > OUTPUT_GRACE * 60) { SAYF("\n" cLRD "[-] " cRST - "The job output directory already exists and contains the results of more\n" - " than %d minutes worth of fuzzing. To avoid data loss, afl-fuzz will *NOT*\n" + "The job output directory already exists and contains the results " + "of more\n" + " than %d minutes worth of fuzzing. To avoid data loss, afl-fuzz " + "will *NOT*\n" " automatically delete this data for you.\n\n" - " If you wish to start a new session, remove or rename the directory manually,\n" - " or specify a different output location for this job. To resume the old\n" - " session, put '-' as the input directory in the command line ('-i -') and\n" - " try again.\n", OUTPUT_GRACE); + " If you wish to start a new session, remove or rename the " + "directory manually,\n" + " or specify a different output location for this job. To resume " + "the old\n" + " session, put '-' as the input directory in the command line " + "('-i -') and\n" + " try again.\n", + OUTPUT_GRACE); - FATAL("At-risk data found in '%s'", out_dir); + FATAL("At-risk data found in '%s'", out_dir); } @@ -902,7 +954,7 @@ void maybe_delete_out_dir(void) { in_dir = alloc_printf("%s/_resume", out_dir); - rename(orig_q, in_dir); /* Ignore errors */ + rename(orig_q, in_dir); /* Ignore errors */ OKF("Output directory exists, will attempt session resume."); @@ -961,7 +1013,7 @@ void maybe_delete_out_dir(void) { if (!in_place_resume) { fn = alloc_printf("%s/crashes/README.txt", out_dir); - unlink(fn); /* Ignore errors */ + unlink(fn); /* Ignore errors */ ck_free(fn); } @@ -973,7 +1025,7 @@ void maybe_delete_out_dir(void) { if (in_place_resume && rmdir(fn)) { - time_t cur_t = time(0); + time_t cur_t = time(0); struct tm* t = localtime(&cur_t); #ifndef SIMPLE_FILES @@ -984,13 +1036,13 @@ void maybe_delete_out_dir(void) { #else - u8* nfn = alloc_printf("%s_%04d%02d%02d%02d%02d%02d", fn, - t->tm_year + 1900, t->tm_mon + 1, t->tm_mday, - t->tm_hour, t->tm_min, t->tm_sec); + u8* nfn = alloc_printf("%s_%04d%02d%02d%02d%02d%02d", fn, t->tm_year + 1900, + t->tm_mon + 1, t->tm_mday, t->tm_hour, t->tm_min, + t->tm_sec); #endif /* ^!SIMPLE_FILES */ - rename(fn, nfn); /* Ignore errors. */ + rename(fn, nfn); /* Ignore errors. */ ck_free(nfn); } @@ -1004,7 +1056,7 @@ void maybe_delete_out_dir(void) { if (in_place_resume && rmdir(fn)) { - time_t cur_t = time(0); + time_t cur_t = time(0); struct tm* t = localtime(&cur_t); #ifndef SIMPLE_FILES @@ -1015,13 +1067,13 @@ void maybe_delete_out_dir(void) { #else - u8* nfn = alloc_printf("%s_%04d%02d%02d%02d%02d%02d", fn, - t->tm_year + 1900, t->tm_mon + 1, t->tm_mday, - t->tm_hour, t->tm_min, t->tm_sec); + u8* nfn = alloc_printf("%s_%04d%02d%02d%02d%02d%02d", fn, t->tm_year + 1900, + t->tm_mon + 1, t->tm_mday, t->tm_hour, t->tm_min, + t->tm_sec); #endif /* ^!SIMPLE_FILES */ - rename(fn, nfn); /* Ignore errors. */ + rename(fn, nfn); /* Ignore errors. */ ck_free(nfn); } @@ -1032,9 +1084,13 @@ void maybe_delete_out_dir(void) { /* And now, for some finishing touches. */ if (file_extension) { + fn = alloc_printf("%s/.cur_input.%s", out_dir, file_extension); + } else { + fn = alloc_printf("%s/.cur_input", out_dir); + } if (unlink(fn) && errno != ENOENT) goto dir_cleanup_failed; @@ -1045,9 +1101,11 @@ void maybe_delete_out_dir(void) { ck_free(fn); if (!in_place_resume) { - fn = alloc_printf("%s/fuzzer_stats", out_dir); + + fn = alloc_printf("%s/fuzzer_stats", out_dir); if (unlink(fn) && errno != ENOENT) goto dir_cleanup_failed; ck_free(fn); + } fn = alloc_printf("%s/plot_data", out_dir); @@ -1067,19 +1125,22 @@ void maybe_delete_out_dir(void) { dir_cleanup_failed: SAYF("\n" cLRD "[-] " cRST - "Whoops, the fuzzer tried to reuse your output directory, but bumped into\n" - " some files that shouldn't be there or that couldn't be removed - so it\n" + "Whoops, the fuzzer tried to reuse your output directory, but bumped " + "into\n" + " some files that shouldn't be there or that couldn't be removed - " + "so it\n" " decided to abort! This happened while processing this path:\n\n" " %s\n\n" - " Please examine and manually delete the files, or specify a different\n" - " output location for the tool.\n", fn); + " Please examine and manually delete the files, or specify a " + "different\n" + " output location for the tool.\n", + fn); FATAL("Output directory cleanup failed"); } - /* Prepare output directories and fds. */ void setup_dirs_fds(void) { @@ -1090,7 +1151,7 @@ void setup_dirs_fds(void) { ACTF("Setting up output directories..."); if (sync_id && mkdir(sync_dir, 0700) && errno != EEXIST) - PFATAL("Unable to create '%s'", sync_dir); + PFATAL("Unable to create '%s'", sync_dir); if (mkdir(out_dir, 0700)) { @@ -1197,14 +1258,16 @@ void setup_dirs_fds(void) { plot_file = fdopen(fd, "w"); if (!plot_file) PFATAL("fdopen() failed"); - fprintf(plot_file, "# unix_time, cycles_done, cur_path, paths_total, " - "pending_total, pending_favs, map_size, unique_crashes, " - "unique_hangs, max_depth, execs_per_sec\n"); - /* ignore errors */ + fprintf(plot_file, + "# unix_time, cycles_done, cur_path, paths_total, " + "pending_total, pending_favs, map_size, unique_crashes, " + "unique_hangs, max_depth, execs_per_sec\n"); + /* ignore errors */ } void setup_cmdline_file(char** argv) { + u8* tmp; s32 fd; u32 i = 0; @@ -1221,13 +1284,15 @@ void setup_cmdline_file(char** argv) { if (!cmdline_file) PFATAL("fdopen() failed"); while (argv[i]) { + fprintf(cmdline_file, "%s\n", argv[i]); ++i; + } fclose(cmdline_file); -} +} /* Setup the output file for fuzzed data, if not using -f. */ @@ -1235,12 +1300,16 @@ void setup_stdio_file(void) { u8* fn; if (file_extension) { + fn = alloc_printf("%s/.cur_input.%s", out_dir, file_extension); + } else { + fn = alloc_printf("%s/.cur_input", out_dir); + } - unlink(fn); /* Ignore errors */ + unlink(fn); /* Ignore errors */ out_fd = open(fn, O_RDWR | O_CREAT | O_EXCL, 0600); @@ -1250,32 +1319,34 @@ void setup_stdio_file(void) { } - /* Make sure that core dumps don't go to a program. */ void check_crash_handling(void) { #ifdef __APPLE__ - /* Yuck! There appears to be no simple C API to query for the state of + /* Yuck! There appears to be no simple C API to query for the state of loaded daemons on MacOS X, and I'm a bit hesitant to do something more sophisticated, such as disabling crash reporting via Mach ports, until I get a box to test the code. So, for now, we check for crash reporting the awful way. */ - - if (system("launchctl list 2>/dev/null | grep -q '\\.ReportCrash$'")) return; - SAYF("\n" cLRD "[-] " cRST - "Whoops, your system is configured to forward crash notifications to an\n" - " external crash reporting utility. This will cause issues due to the\n" - " extended delay between the fuzzed binary malfunctioning and this fact\n" - " being relayed to the fuzzer via the standard waitpid() API.\n\n" - " To avoid having crashes misinterpreted as timeouts, please run the\n" - " following commands:\n\n" + if (system("launchctl list 2>/dev/null | grep -q '\\.ReportCrash$'")) return; - " SL=/System/Library; PL=com.apple.ReportCrash\n" - " launchctl unload -w ${SL}/LaunchAgents/${PL}.plist\n" - " sudo launchctl unload -w ${SL}/LaunchDaemons/${PL}.Root.plist\n"); + SAYF( + "\n" cLRD "[-] " cRST + "Whoops, your system is configured to forward crash notifications to an\n" + " external crash reporting utility. This will cause issues due to " + "the\n" + " extended delay between the fuzzed binary malfunctioning and this " + "fact\n" + " being relayed to the fuzzer via the standard waitpid() API.\n\n" + " To avoid having crashes misinterpreted as timeouts, please run the\n" + " following commands:\n\n" + + " SL=/System/Library; PL=com.apple.ReportCrash\n" + " launchctl unload -w ${SL}/LaunchAgents/${PL}.plist\n" + " sudo launchctl unload -w ${SL}/LaunchDaemons/${PL}.Root.plist\n"); if (!getenv("AFL_I_DONT_CARE_ABOUT_MISSING_CRASHES")) FATAL("Crash reporter detected"); @@ -1283,10 +1354,10 @@ void check_crash_handling(void) { #else /* This is Linux specific, but I don't think there's anything equivalent on - *BSD, so we can just let it slide for now. */ + *BSD, so we can just let it slide for now. */ s32 fd = open("/proc/sys/kernel/core_pattern", O_RDONLY); - u8 fchar; + u8 fchar; if (fd < 0) return; @@ -1294,54 +1365,68 @@ void check_crash_handling(void) { if (read(fd, &fchar, 1) == 1 && fchar == '|') { - SAYF("\n" cLRD "[-] " cRST - "Hmm, your system is configured to send core dump notifications to an\n" - " external utility. This will cause issues: there will be an extended delay\n" - " between stumbling upon a crash and having this information relayed to the\n" - " fuzzer via the standard waitpid() API.\n\n" + SAYF( + "\n" cLRD "[-] " cRST + "Hmm, your system is configured to send core dump notifications to an\n" + " external utility. This will cause issues: there will be an " + "extended delay\n" + " between stumbling upon a crash and having this information " + "relayed to the\n" + " fuzzer via the standard waitpid() API.\n\n" - " To avoid having crashes misinterpreted as timeouts, please log in as root\n" - " and temporarily modify /proc/sys/kernel/core_pattern, like so:\n\n" + " To avoid having crashes misinterpreted as timeouts, please log in " + "as root\n" + " and temporarily modify /proc/sys/kernel/core_pattern, like so:\n\n" - " echo core >/proc/sys/kernel/core_pattern\n"); + " echo core >/proc/sys/kernel/core_pattern\n"); if (!getenv("AFL_I_DONT_CARE_ABOUT_MISSING_CRASHES")) FATAL("Pipe at the beginning of 'core_pattern'"); } - + close(fd); #endif /* ^__APPLE__ */ } - /* Check CPU governor. */ void check_cpu_governor(void) { + #ifdef __linux__ FILE* f; - u8 tmp[128]; - u64 min = 0, max = 0; + u8 tmp[128]; + u64 min = 0, max = 0; if (getenv("AFL_SKIP_CPUFREQ")) return; if (cpu_aff > 0) - snprintf(tmp, sizeof(tmp), "%s%d%s", "/sys/devices/system/cpu/cpu", cpu_aff, "/cpufreq/scaling_governor"); + snprintf(tmp, sizeof(tmp), "%s%d%s", "/sys/devices/system/cpu/cpu", cpu_aff, + "/cpufreq/scaling_governor"); else - snprintf(tmp, sizeof(tmp), "%s", "/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor"); + snprintf(tmp, sizeof(tmp), "%s", + "/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor"); f = fopen("/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor", "r"); if (!f) { + if (cpu_aff > 0) - snprintf(tmp, sizeof(tmp), "%s%d%s", "/sys/devices/system/cpu/cpufreq/policy", cpu_aff, "/scaling_governor"); + snprintf(tmp, sizeof(tmp), "%s%d%s", + "/sys/devices/system/cpu/cpufreq/policy", cpu_aff, + "/scaling_governor"); else - snprintf(tmp, sizeof(tmp), "%s", "/sys/devices/system/cpu/cpufreq/policy0/scaling_governor"); + snprintf(tmp, sizeof(tmp), "%s", + "/sys/devices/system/cpu/cpufreq/policy0/scaling_governor"); f = fopen(tmp, "r"); + } + if (!f) { + WARNF("Could not check CPU scaling governor"); return; + } ACTF("Checking CPU scaling governor..."); @@ -1355,71 +1440,79 @@ void check_cpu_governor(void) { f = fopen("/sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq", "r"); if (f) { + if (fscanf(f, "%llu", &min) != 1) min = 0; fclose(f); + } f = fopen("/sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq", "r"); if (f) { + if (fscanf(f, "%llu", &max) != 1) max = 0; fclose(f); + } if (min == max) return; SAYF("\n" cLRD "[-] " cRST "Whoops, your system uses on-demand CPU frequency scaling, adjusted\n" - " between %llu and %llu MHz. Unfortunately, the scaling algorithm in the\n" - " kernel is imperfect and can miss the short-lived processes spawned by\n" + " between %llu and %llu MHz. Unfortunately, the scaling algorithm in " + "the\n" + " kernel is imperfect and can miss the short-lived processes spawned " + "by\n" " afl-fuzz. To keep things moving, run these commands as root:\n\n" " cd /sys/devices/system/cpu\n" " echo performance | tee cpu*/cpufreq/scaling_governor\n\n" - " You can later go back to the original state by replacing 'performance' with\n" - " 'ondemand'. If you don't want to change the settings, set AFL_SKIP_CPUFREQ\n" - " to make afl-fuzz skip this check - but expect some performance drop.\n", + " You can later go back to the original state by replacing " + "'performance' with\n" + " 'ondemand'. If you don't want to change the settings, set " + "AFL_SKIP_CPUFREQ\n" + " to make afl-fuzz skip this check - but expect some performance " + "drop.\n", min / 1024, max / 1024); FATAL("Suboptimal CPU scaling governor"); #endif -} +} /* Count the number of logical CPU cores. */ void get_core_count(void) { -#if defined(__APPLE__) || defined(__FreeBSD__) || defined (__OpenBSD__) +#if defined(__APPLE__) || defined(__FreeBSD__) || defined(__OpenBSD__) size_t s = sizeof(cpu_core_count); /* On *BSD systems, we can just use a sysctl to get the number of CPUs. */ -#ifdef __APPLE__ +# ifdef __APPLE__ - if (sysctlbyname("hw.logicalcpu", &cpu_core_count, &s, NULL, 0) < 0) - return; + if (sysctlbyname("hw.logicalcpu", &cpu_core_count, &s, NULL, 0) < 0) return; -#else +# else - int s_name[2] = { CTL_HW, HW_NCPU }; + int s_name[2] = {CTL_HW, HW_NCPU}; if (sysctl(s_name, 2, &cpu_core_count, &s, NULL, 0) < 0) return; -#endif /* ^__APPLE__ */ +# endif /* ^__APPLE__ */ #else -#ifdef HAVE_AFFINITY +# ifdef HAVE_AFFINITY cpu_core_count = sysconf(_SC_NPROCESSORS_ONLN); -#else +# else FILE* f = fopen("/proc/stat", "r"); - u8 tmp[1024]; + u8 tmp[1024]; if (!f) return; @@ -1428,7 +1521,7 @@ void get_core_count(void) { fclose(f); -#endif /* ^HAVE_AFFINITY */ +# endif /* ^HAVE_AFFINITY */ #endif /* ^(__APPLE__ || __FreeBSD__ || __OpenBSD__) */ @@ -1438,7 +1531,7 @@ void get_core_count(void) { cur_runnable = (u32)get_runnable_processes(); -#if defined(__APPLE__) || defined(__FreeBSD__) || defined (__OpenBSD__) +#if defined(__APPLE__) || defined(__FreeBSD__) || defined(__OpenBSD__) /* Add ourselves, since the 1-minute average doesn't include that yet. */ @@ -1447,8 +1540,8 @@ void get_core_count(void) { #endif /* __APPLE__ || __FreeBSD__ || __OpenBSD__ */ OKF("You have %d CPU core%s and %u runnable tasks (utilization: %0.0f%%).", - cpu_core_count, cpu_core_count > 1 ? "s" : "", - cur_runnable, cur_runnable * 100.0 / cpu_core_count); + cpu_core_count, cpu_core_count > 1 ? "s" : "", cur_runnable, + cur_runnable * 100.0 / cpu_core_count); if (cpu_core_count > 1) { @@ -1459,7 +1552,7 @@ void get_core_count(void) { } else if (cur_runnable + 1 <= cpu_core_count) { OKF("Try parallel jobs - see %s/parallel_fuzzing.txt.", doc_path); - + } } @@ -1473,21 +1566,18 @@ void get_core_count(void) { } - /* Validate and fix up out_dir and sync_dir when using -S. */ void fix_up_sync(void) { u8* x = sync_id; - if (dumb_mode) - FATAL("-S / -M and -n are mutually exclusive"); + if (dumb_mode) FATAL("-S / -M and -n are mutually exclusive"); if (skip_deterministic) { - if (force_deterministic) - FATAL("use -S instead of -M -d"); - //else + if (force_deterministic) FATAL("use -S instead of -M -d"); + // else // FATAL("-S already implies -d"); } @@ -1506,26 +1596,29 @@ void fix_up_sync(void) { x = alloc_printf("%s/%s", out_dir, sync_id); sync_dir = out_dir; - out_dir = x; + out_dir = x; if (!force_deterministic) { + skip_deterministic = 1; use_splicing = 1; + } } - /* Handle screen resize (SIGWINCH). */ static void handle_resize(int sig) { + clear_screen = 1; -} +} /* Check ASAN options. */ void check_asan_opts(void) { + u8* x = getenv("ASAN_OPTIONS"); if (x) { @@ -1543,29 +1636,27 @@ void check_asan_opts(void) { if (x) { if (!strstr(x, "exit_code=" STRINGIFY(MSAN_ERROR))) - FATAL("Custom MSAN_OPTIONS set without exit_code=" - STRINGIFY(MSAN_ERROR) " - please fix!"); + FATAL("Custom MSAN_OPTIONS set without exit_code=" STRINGIFY( + MSAN_ERROR) " - please fix!"); if (!strstr(x, "symbolize=0")) FATAL("Custom MSAN_OPTIONS set without symbolize=0 - please fix!"); } -} - +} /* Handle stop signal (Ctrl-C, etc). */ static void handle_stop_sig(int sig) { - stop_soon = 1; + stop_soon = 1; if (child_pid > 0) kill(child_pid, SIGKILL); if (forksrv_pid > 0) kill(forksrv_pid, SIGKILL); } - /* Handle skip request (SIGUSR1). */ static void handle_skipreq(int sig) { @@ -1574,14 +1665,13 @@ static void handle_skipreq(int sig) { } - /* Do a PATH search and find target binary to see that it exists and isn't a shell script - a common and painful mistake. We also check for a valid ELF header and for evidence of AFL instrumentation. */ void check_binary(u8* fname) { - u8* env_path = 0; + u8* env_path = 0; struct stat st; s32 fd; @@ -1609,7 +1699,9 @@ void check_binary(u8* fname) { memcpy(cur_elem, env_path, delim - env_path); ++delim; - } else cur_elem = ck_strdup(env_path); + } else + + cur_elem = ck_strdup(env_path); env_path = delim; @@ -1621,7 +1713,8 @@ void check_binary(u8* fname) { ck_free(cur_elem); if (!stat(target_path, &st) && S_ISREG(st.st_mode) && - (st.st_mode & 0111) && (f_len = st.st_size) >= 4) break; + (st.st_mode & 0111) && (f_len = st.st_size) >= 4) + break; ck_free(target_path); target_path = 0; @@ -1638,7 +1731,7 @@ void check_binary(u8* fname) { if ((!strncmp(target_path, "/tmp/", 5) && !strchr(target_path + 5, '/')) || (!strncmp(target_path, "/var/tmp/", 9) && !strchr(target_path + 9, '/'))) - FATAL("Please don't keep binaries in /tmp or /var/tmp"); + FATAL("Please don't keep binaries in /tmp or /var/tmp"); fd = open(target_path, O_RDONLY); @@ -1653,13 +1746,19 @@ void check_binary(u8* fname) { if (f_data[0] == '#' && f_data[1] == '!') { SAYF("\n" cLRD "[-] " cRST - "Oops, the target binary looks like a shell script. Some build systems will\n" - " sometimes generate shell stubs for dynamically linked programs; try static\n" - " library mode (./configure --disable-shared) if that's the case.\n\n" - - " Another possible cause is that you are actually trying to use a shell\n" - " wrapper around the fuzzed component. Invoking shell can slow down the\n" - " fuzzing process by a factor of 20x or more; it's best to write the wrapper\n" + "Oops, the target binary looks like a shell script. Some build " + "systems will\n" + " sometimes generate shell stubs for dynamically linked programs; " + "try static\n" + " library mode (./configure --disable-shared) if that's the " + "case.\n\n" + + " Another possible cause is that you are actually trying to use a " + "shell\n" + " wrapper around the fuzzed component. Invoking shell can slow " + "down the\n" + " fuzzing process by a factor of 20x or more; it's best to write " + "the wrapper\n" " in a compiled language instead.\n"); FATAL("Program '%s' is a shell script", target_path); @@ -1673,28 +1772,35 @@ void check_binary(u8* fname) { #else -#if !defined(__arm__) && !defined(__arm64__) +# if !defined(__arm__) && !defined(__arm64__) if (f_data[0] != 0xCF || f_data[1] != 0xFA || f_data[2] != 0xED) FATAL("Program '%s' is not a 64-bit Mach-O binary", target_path); -#endif +# endif #endif /* ^!__APPLE__ */ if (!qemu_mode && !unicorn_mode && !dumb_mode && !memmem(f_data, f_len, SHM_ENV_VAR, strlen(SHM_ENV_VAR) + 1)) { - SAYF("\n" cLRD "[-] " cRST - "Looks like the target binary is not instrumented! The fuzzer depends on\n" - " compile-time instrumentation to isolate interesting test cases while\n" - " mutating the input data. For more information, and for tips on how to\n" - " instrument binaries, please see %s/README.\n\n" - - " When source code is not available, you may be able to leverage QEMU\n" - " mode support. Consult the README for tips on how to enable this.\n" - - " (It is also possible to use afl-fuzz as a traditional, \"dumb\" fuzzer.\n" - " For that, you can use the -n option - but expect much worse results.)\n", - doc_path); + SAYF( + "\n" cLRD "[-] " cRST + "Looks like the target binary is not instrumented! The fuzzer depends " + "on\n" + " compile-time instrumentation to isolate interesting test cases " + "while\n" + " mutating the input data. For more information, and for tips on " + "how to\n" + " instrument binaries, please see %s/README.\n\n" + + " When source code is not available, you may be able to leverage " + "QEMU\n" + " mode support. Consult the README for tips on how to enable this.\n" + + " (It is also possible to use afl-fuzz as a traditional, \"dumb\" " + "fuzzer.\n" + " For that, you can use the -n option - but expect much worse " + "results.)\n", + doc_path); FATAL("No instrumentation detected"); @@ -1704,8 +1810,10 @@ void check_binary(u8* fname) { memmem(f_data, f_len, SHM_ENV_VAR, strlen(SHM_ENV_VAR) + 1)) { SAYF("\n" cLRD "[-] " cRST - "This program appears to be instrumented with afl-gcc, but is being run in\n" - " QEMU or Unicorn mode (-Q or -U). This is probably not what you want -\n" + "This program appears to be instrumented with afl-gcc, but is being " + "run in\n" + " QEMU or Unicorn mode (-Q or -U). This is probably not what you " + "want -\n" " this setup will be slow and offer no practical benefits.\n"); FATAL("Instrumentation found in -Q or -U mode"); @@ -1713,7 +1821,8 @@ void check_binary(u8* fname) { } if (memmem(f_data, f_len, "libasan.so", 10) || - memmem(f_data, f_len, "__msan_init", 11)) uses_asan = 1; + memmem(f_data, f_len, "__msan_init", 11)) + uses_asan = 1; /* Detect persistent & deferred init signatures in the binary. */ @@ -1745,7 +1854,6 @@ void check_binary(u8* fname) { } - /* Trim and possibly create a banner for the run. */ void fix_up_banner(u8* name) { @@ -1759,7 +1867,10 @@ void fix_up_banner(u8* name) { } else { u8* trim = strrchr(name, '/'); - if (!trim) use_banner = name; else use_banner = trim + 1; + if (!trim) + use_banner = name; + else + use_banner = trim + 1; } @@ -1775,7 +1886,6 @@ void fix_up_banner(u8* name) { } - /* Check if we're on TTY. */ void check_if_tty(void) { @@ -1783,24 +1893,29 @@ void check_if_tty(void) { struct winsize ws; if (getenv("AFL_NO_UI")) { + OKF("Disabling the UI because AFL_NO_UI is set."); not_on_tty = 1; return; + } if (ioctl(1, TIOCGWINSZ, &ws)) { if (errno == ENOTTY) { - OKF("Looks like we're not running on a tty, so I'll be a bit less verbose."); + + OKF("Looks like we're not running on a tty, so I'll be a bit less " + "verbose."); not_on_tty = 1; + } return; + } } - /* Set up signal handlers. More complicated that needs to be, because libc on Solaris doesn't resume interrupted reads(), sets SA_RESETHAND when you call siginterrupt(), and does other stupid things. */ @@ -1809,8 +1924,8 @@ void setup_signal_handlers(void) { struct sigaction sa; - sa.sa_handler = NULL; - sa.sa_flags = SA_RESTART; + sa.sa_handler = NULL; + sa.sa_flags = SA_RESTART; sa.sa_sigaction = NULL; sigemptyset(&sa.sa_mask); @@ -1845,13 +1960,12 @@ void setup_signal_handlers(void) { } - /* Rewrite argv for QEMU. */ char** get_qemu_argv(u8* own_loc, char** argv, int argc) { char** new_argv = ck_alloc(sizeof(char*) * (argc + 4)); - u8 *tmp, *cp, *rsl, *own_copy; + u8 * tmp, *cp, *rsl, *own_copy; memcpy(new_argv + 3, argv + 1, sizeof(char*) * argc); @@ -1866,8 +1980,7 @@ char** get_qemu_argv(u8* own_loc, char** argv, int argc) { cp = alloc_printf("%s/afl-qemu-trace", tmp); - if (access(cp, X_OK)) - FATAL("Unable to find '%s'", tmp); + if (access(cp, X_OK)) FATAL("Unable to find '%s'", tmp); target_path = new_argv[0] = cp; return new_argv; @@ -1891,7 +2004,9 @@ char** get_qemu_argv(u8* own_loc, char** argv, int argc) { } - } else ck_free(own_copy); + } else + + ck_free(own_copy); if (!access(BIN_PATH "/afl-qemu-trace", X_OK)) { @@ -1901,14 +2016,20 @@ char** get_qemu_argv(u8* own_loc, char** argv, int argc) { } SAYF("\n" cLRD "[-] " cRST - "Oops, unable to find the 'afl-qemu-trace' binary. The binary must be built\n" - " separately by following the instructions in qemu_mode/README.qemu. If you\n" - " already have the binary installed, you may need to specify AFL_PATH in the\n" + "Oops, unable to find the 'afl-qemu-trace' binary. The binary must be " + "built\n" + " separately by following the instructions in qemu_mode/README.qemu. " + "If you\n" + " already have the binary installed, you may need to specify " + "AFL_PATH in the\n" " environment.\n\n" - " Of course, even without QEMU, afl-fuzz can still work with binaries that are\n" - " instrumented at compile time with afl-gcc. It is also possible to use it as a\n" - " traditional \"dumb\" fuzzer by specifying '-n' in the command line.\n"); + " Of course, even without QEMU, afl-fuzz can still work with " + "binaries that are\n" + " instrumented at compile time with afl-gcc. It is also possible to " + "use it as a\n" + " traditional \"dumb\" fuzzer by specifying '-n' in the command " + "line.\n"); FATAL("Failed to locate 'afl-qemu-trace'."); @@ -1923,7 +2044,7 @@ void save_cmdline(u32 argc, char** argv) { for (i = 0; i < argc; ++i) len += strlen(argv[i]) + 1; - + buf = orig_cmdline = ck_alloc(len); for (i = 0; i < argc; ++i) { diff --git a/src/afl-fuzz-misc.c b/src/afl-fuzz-misc.c index 69ff2f6b..eb0cc187 100644 --- a/src/afl-fuzz-misc.c +++ b/src/afl-fuzz-misc.c @@ -33,11 +33,16 @@ u8* DI(u64 val) { cur = (cur + 1) % 12; -#define CHK_FORMAT(_divisor, _limit_mult, _fmt, _cast) do { \ - if (val < (_divisor) * (_limit_mult)) { \ +#define CHK_FORMAT(_divisor, _limit_mult, _fmt, _cast) \ + do { \ + \ + if (val < (_divisor) * (_limit_mult)) { \ + \ sprintf(tmp[cur], _fmt, ((_cast)val) / (_divisor)); \ - return tmp[cur]; \ - } \ + return tmp[cur]; \ + \ + } \ + \ } while (0) /* 0-9999 */ @@ -79,8 +84,7 @@ u8* DI(u64 val) { } - -/* Describe float. Similar to the above, except with a single +/* Describe float. Similar to the above, except with a single static buffer. */ u8* DF(double val) { @@ -88,20 +92,23 @@ u8* DF(double val) { static u8 tmp[16]; if (val < 99.995) { + sprintf(tmp, "%0.02f", val); return tmp; + } if (val < 999.95) { + sprintf(tmp, "%0.01f", val); return tmp; + } return DI((u64)val); } - /* Describe integer as memory size. */ u8* DMS(u64 val) { @@ -152,14 +159,13 @@ u8* DMS(u64 val) { } - /* Describe time delta. Returns one static buffer, 34 chars of less. */ u8* DTD(u64 cur_ms, u64 event_ms) { static u8 tmp[64]; - u64 delta; - s32 t_d, t_h, t_m, t_s; + u64 delta; + s32 t_d, t_h, t_m, t_s; if (!event_ms) return "none seen yet"; @@ -174,3 +180,4 @@ u8* DTD(u64 cur_ms, u64 event_ms) { return tmp; } + diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index 59370c3d..1b7abedd 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -28,22 +28,31 @@ int select_algorithm(void) { int i_puppet, j_puppet; - double sele = ((double)(UR(10000))*0.0001); + double sele = ((double)(UR(10000)) * 0.0001); j_puppet = 0; for (i_puppet = 0; i_puppet < operator_num; ++i_puppet) { - if (unlikely(i_puppet == 0)) { - if (sele < probability_now[swarm_now][i_puppet]) - break; - } else { - if (sele < probability_now[swarm_now][i_puppet]) { - j_puppet =1; - break; - } + + if (unlikely(i_puppet == 0)) { + + if (sele < probability_now[swarm_now][i_puppet]) break; + + } else { + + if (sele < probability_now[swarm_now][i_puppet]) { + + j_puppet = 1; + break; + } + + } + } - if (j_puppet ==1 && sele < probability_now[swarm_now][i_puppet-1]) + + if (j_puppet == 1 && sele < probability_now[swarm_now][i_puppet - 1]) FATAL("error select_algorithm"); return i_puppet; + } /* Helper to choose random block len for block operations in fuzz_one(). @@ -58,27 +67,29 @@ static u32 choose_block_len(u32 limit) { switch (UR(rlim)) { - case 0: min_value = 1; - max_value = HAVOC_BLK_SMALL; - break; + case 0: + min_value = 1; + max_value = HAVOC_BLK_SMALL; + break; - case 1: min_value = HAVOC_BLK_SMALL; - max_value = HAVOC_BLK_MEDIUM; - break; + case 1: + min_value = HAVOC_BLK_SMALL; + max_value = HAVOC_BLK_MEDIUM; + break; - default: + default: - if (UR(10)) { + if (UR(10)) { - min_value = HAVOC_BLK_MEDIUM; - max_value = HAVOC_BLK_LARGE; + min_value = HAVOC_BLK_MEDIUM; + max_value = HAVOC_BLK_LARGE; - } else { + } else { - min_value = HAVOC_BLK_LARGE; - max_value = HAVOC_BLK_XL; + min_value = HAVOC_BLK_LARGE; + max_value = HAVOC_BLK_XL; - } + } } @@ -88,7 +99,6 @@ static u32 choose_block_len(u32 limit) { } - /* Helper function to see if a particular change (xor_val = old ^ new) could be a product of deterministic bit flips with the lengths and stepovers attempted by afl-fuzz. This is used to avoid dupes in some of the @@ -104,7 +114,12 @@ static u8 could_be_bitflip(u32 xor_val) { /* Shift left until first bit set. */ - while (!(xor_val & 1)) { ++sh; xor_val >>= 1; } + while (!(xor_val & 1)) { + + ++sh; + xor_val >>= 1; + + } /* 1-, 2-, and 4-bit patterns are OK anywhere. */ @@ -115,14 +130,12 @@ static u8 could_be_bitflip(u32 xor_val) { if (sh & 7) return 0; - if (xor_val == 0xff || xor_val == 0xffff || xor_val == 0xffffffff) - return 1; + if (xor_val == 0xff || xor_val == 0xffff || xor_val == 0xffffffff) return 1; return 0; } - /* Helper function to see if a particular value is reachable through arithmetic operations. Used for similar purposes. */ @@ -136,10 +149,15 @@ static u8 could_be_arith(u32 old_val, u32 new_val, u8 blen) { for (i = 0; i < blen; ++i) { - u8 a = old_val >> (8 * i), - b = new_val >> (8 * i); + u8 a = old_val >> (8 * i), b = new_val >> (8 * i); + + if (a != b) { + + ++diffs; + ov = a; + nv = b; - if (a != b) { ++diffs; ov = a; nv = b; } + } } @@ -147,8 +165,7 @@ static u8 could_be_arith(u32 old_val, u32 new_val, u8 blen) { if (diffs == 1) { - if ((u8)(ov - nv) <= ARITH_MAX || - (u8)(nv - ov) <= ARITH_MAX) return 1; + if ((u8)(ov - nv) <= ARITH_MAX || (u8)(nv - ov) <= ARITH_MAX) return 1; } @@ -160,10 +177,15 @@ static u8 could_be_arith(u32 old_val, u32 new_val, u8 blen) { for (i = 0; i < blen / 2; ++i) { - u16 a = old_val >> (16 * i), - b = new_val >> (16 * i); + u16 a = old_val >> (16 * i), b = new_val >> (16 * i); + + if (a != b) { - if (a != b) { ++diffs; ov = a; nv = b; } + ++diffs; + ov = a; + nv = b; + + } } @@ -171,13 +193,12 @@ static u8 could_be_arith(u32 old_val, u32 new_val, u8 blen) { if (diffs == 1) { - if ((u16)(ov - nv) <= ARITH_MAX || - (u16)(nv - ov) <= ARITH_MAX) return 1; + if ((u16)(ov - nv) <= ARITH_MAX || (u16)(nv - ov) <= ARITH_MAX) return 1; - ov = SWAP16(ov); nv = SWAP16(nv); + ov = SWAP16(ov); + nv = SWAP16(nv); - if ((u16)(ov - nv) <= ARITH_MAX || - (u16)(nv - ov) <= ARITH_MAX) return 1; + if ((u16)(ov - nv) <= ARITH_MAX || (u16)(nv - ov) <= ARITH_MAX) return 1; } @@ -186,13 +207,15 @@ static u8 could_be_arith(u32 old_val, u32 new_val, u8 blen) { if (blen == 4) { if ((u32)(old_val - new_val) <= ARITH_MAX || - (u32)(new_val - old_val) <= ARITH_MAX) return 1; + (u32)(new_val - old_val) <= ARITH_MAX) + return 1; new_val = SWAP32(new_val); old_val = SWAP32(old_val); if ((u32)(old_val - new_val) <= ARITH_MAX || - (u32)(new_val - old_val) <= ARITH_MAX) return 1; + (u32)(new_val - old_val) <= ARITH_MAX) + return 1; } @@ -200,8 +223,7 @@ static u8 could_be_arith(u32 old_val, u32 new_val, u8 blen) { } - -/* Last but not least, a similar helper to see if insertion of an +/* Last but not least, a similar helper to see if insertion of an interesting integer is redundant given the insertions done for shorter blen. The last param (check_le) is set if the caller already executed LE insertion for current blen and wants to see @@ -220,8 +242,8 @@ static u8 could_be_interest(u32 old_val, u32 new_val, u8 blen, u8 check_le) { for (j = 0; j < sizeof(interesting_8); ++j) { - u32 tval = (old_val & ~(0xff << (i * 8))) | - (((u8)interesting_8[j]) << (i * 8)); + u32 tval = + (old_val & ~(0xff << (i * 8))) | (((u8)interesting_8[j]) << (i * 8)); if (new_val == tval) return 1; @@ -274,11 +296,10 @@ static u8 could_be_interest(u32 old_val, u32 new_val, u8 blen, u8 check_le) { } - #ifndef IGNORE_FINDS -/* Helper function to compare buffers; returns first and last differing offset. We - use this to find reasonable locations for splicing two files. */ +/* Helper function to compare buffers; returns first and last differing offset. + We use this to find reasonable locations for splicing two files. */ static void locate_diffs(u8* ptr1, u8* ptr2, u32 len, s32* first, s32* last) { @@ -313,11 +334,11 @@ static void locate_diffs(u8* ptr1, u8* ptr2, u32 len, s32* first, s32* last) { u8 fuzz_one_original(char** argv) { s32 len, fd, temp_len, i, j; - u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0; - u64 havoc_queued = 0, orig_hit_cnt, new_hit_cnt; + u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0; + u64 havoc_queued = 0, orig_hit_cnt, new_hit_cnt; u32 splice_cycle = 0, perf_score = 100, orig_perf, prev_cksum, eff_cnt = 1; - u8 ret_val = 1, doing_det = 0; + u8 ret_val = 1, doing_det = 0; u8 a_collect[MAX_AUTO_EXTRA]; u32 a_len = 0; @@ -337,8 +358,10 @@ u8 fuzz_one_original(char** argv) { possibly skip to them at the expense of already-fuzzed or non-favored cases. */ - if (((queue_cur->was_fuzzed > 0 || queue_cur->fuzz_level > 0) || !queue_cur->favored) && - UR(100) < SKIP_TO_NEW_PROB) return 1; + if (((queue_cur->was_fuzzed > 0 || queue_cur->fuzz_level > 0) || + !queue_cur->favored) && + UR(100) < SKIP_TO_NEW_PROB) + return 1; } else if (!dumb_mode && !queue_cur->favored && queued_paths > 10) { @@ -346,7 +369,8 @@ u8 fuzz_one_original(char** argv) { The odds of skipping stuff are higher for already-fuzzed inputs and lower for never-fuzzed entries. */ - if (queue_cycle > 1 && (queue_cur->fuzz_level == 0 || queue_cur->was_fuzzed)) { + if (queue_cycle > 1 && + (queue_cur->fuzz_level == 0 || queue_cur->was_fuzzed)) { if (UR(100) < SKIP_NFAV_NEW_PROB) return 1; @@ -361,9 +385,11 @@ u8 fuzz_one_original(char** argv) { #endif /* ^IGNORE_FINDS */ if (not_on_tty) { + ACTF("Fuzzing test case #%u (%u total, %llu uniq crashes found)...", current_entry, queued_paths, unique_crashes); fflush(stdout); + } /* Map the test case into memory. */ @@ -376,7 +402,8 @@ u8 fuzz_one_original(char** argv) { orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); - if (orig_in == MAP_FAILED) PFATAL("Unable to mmap '%s' with len %d", queue_cur->fname, len); + if (orig_in == MAP_FAILED) + PFATAL("Unable to mmap '%s' with len %d", queue_cur->fname, len); close(fd); @@ -402,14 +429,15 @@ u8 fuzz_one_original(char** argv) { res = calibrate_case(argv, queue_cur, in_buf, queue_cycle - 1, 0); - if (res == FAULT_ERROR) - FATAL("Unable to execute target application"); + if (res == FAULT_ERROR) FATAL("Unable to execute target application"); } if (stop_soon || res != crash_mode) { + ++cur_skipped_paths; goto abandon_entry; + } } @@ -422,12 +450,13 @@ u8 fuzz_one_original(char** argv) { u8 res = trim_case(argv, queue_cur, in_buf); - if (res == FAULT_ERROR) - FATAL("Unable to execute target application"); + if (res == FAULT_ERROR) FATAL("Unable to execute target application"); if (stop_soon) { + ++cur_skipped_paths; goto abandon_entry; + } /* Don't retry trimming, even if it failed. */ @@ -449,49 +478,56 @@ u8 fuzz_one_original(char** argv) { if (perf_score == 0) goto abandon_entry; if (custom_mutator) { + stage_short = "custom"; stage_name = "custom mutator"; stage_max = len << 3; stage_val_type = STAGE_VAL_NONE; - const u32 max_seed_size = 4096*4096; - u8* mutated_buf = ck_alloc(max_seed_size); + const u32 max_seed_size = 4096 * 4096; + u8* mutated_buf = ck_alloc(max_seed_size); orig_hit_cnt = queued_paths + unique_crashes; - for (stage_cur = 0 ; stage_cur < stage_max ; ++stage_cur) { - size_t orig_size = (size_t) len; - size_t mutated_size = custom_mutator(out_buf, orig_size, mutated_buf, max_seed_size, UR(UINT32_MAX)); + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { + + size_t orig_size = (size_t)len; + size_t mutated_size = custom_mutator(out_buf, orig_size, mutated_buf, + max_seed_size, UR(UINT32_MAX)); if (mutated_size > 0) { + out_buf = ck_realloc(out_buf, mutated_size); memcpy(out_buf, mutated_buf, mutated_size); - if (common_fuzz_stuff(argv, out_buf, (u32) mutated_size)) { + if (common_fuzz_stuff(argv, out_buf, (u32)mutated_size)) { + goto abandon_entry; + } + } + } ck_free(mutated_buf); new_hit_cnt = queued_paths + unique_crashes; - stage_finds[STAGE_CUSTOM_MUTATOR] += new_hit_cnt - orig_hit_cnt; + stage_finds[STAGE_CUSTOM_MUTATOR] += new_hit_cnt - orig_hit_cnt; stage_cycles[STAGE_CUSTOM_MUTATOR] += stage_max; goto abandon_entry; - } + } /* Skip right away if -d is given, if it has not been chosen sufficiently often to warrant the expensive deterministic stage (fuzz_level), or if it has gone through deterministic testing in earlier, resumed runs (passed_det). */ - if (skip_deterministic - || ((!queue_cur->passed_det) - && perf_score < ( - queue_cur->depth * 30 <= havoc_max_mult * 100 - ? queue_cur->depth * 30 - : havoc_max_mult * 100)) - || queue_cur->passed_det) + if (skip_deterministic || + ((!queue_cur->passed_det) && + perf_score < (queue_cur->depth * 30 <= havoc_max_mult * 100 + ? queue_cur->depth * 30 + : havoc_max_mult * 100)) || + queue_cur->passed_det) #ifdef USE_PYTHON goto python_stage; #else @@ -514,17 +550,20 @@ u8 fuzz_one_original(char** argv) { * SIMPLE BITFLIP (+dictionary construction) * *********************************************/ -#define FLIP_BIT(_ar, _b) do { \ - u8* _arf = (u8*)(_ar); \ - u32 _bf = (_b); \ - _arf[(_bf) >> 3] ^= (128 >> ((_bf) & 7)); \ +#define FLIP_BIT(_ar, _b) \ + do { \ + \ + u8* _arf = (u8*)(_ar); \ + u32 _bf = (_b); \ + _arf[(_bf) >> 3] ^= (128 >> ((_bf)&7)); \ + \ } while (0) /* Single walking bit. */ stage_short = "flip1"; - stage_max = len << 3; - stage_name = "bitflip 1/1"; + stage_max = len << 3; + stage_name = "bitflip 1/1"; stage_val_type = STAGE_VAL_NONE; @@ -556,7 +595,7 @@ u8 fuzz_one_original(char** argv) { We do this here, rather than as a separate stage, because it's a nice way to keep the operation approximately "free" (i.e., no extra execs). - + Empirically, performing the check when flipping the least significant bit is advantageous, compared to doing it at the time of more disruptive changes, where the program flow may be affected in more violent ways. @@ -602,7 +641,7 @@ u8 fuzz_one_original(char** argv) { if (cksum != queue_cur->exec_cksum) { - if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3]; + if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3]; ++a_len; } @@ -613,14 +652,14 @@ u8 fuzz_one_original(char** argv) { new_hit_cnt = queued_paths + unique_crashes; - stage_finds[STAGE_FLIP1] += new_hit_cnt - orig_hit_cnt; + stage_finds[STAGE_FLIP1] += new_hit_cnt - orig_hit_cnt; stage_cycles[STAGE_FLIP1] += stage_max; /* Two walking bits. */ - stage_name = "bitflip 2/1"; + stage_name = "bitflip 2/1"; stage_short = "flip2"; - stage_max = (len << 3) - 1; + stage_max = (len << 3) - 1; orig_hit_cnt = new_hit_cnt; @@ -640,14 +679,14 @@ u8 fuzz_one_original(char** argv) { new_hit_cnt = queued_paths + unique_crashes; - stage_finds[STAGE_FLIP2] += new_hit_cnt - orig_hit_cnt; + stage_finds[STAGE_FLIP2] += new_hit_cnt - orig_hit_cnt; stage_cycles[STAGE_FLIP2] += stage_max; /* Four walking bits. */ - stage_name = "bitflip 4/1"; + stage_name = "bitflip 4/1"; stage_short = "flip4"; - stage_max = (len << 3) - 3; + stage_max = (len << 3) - 3; orig_hit_cnt = new_hit_cnt; @@ -671,7 +710,7 @@ u8 fuzz_one_original(char** argv) { new_hit_cnt = queued_paths + unique_crashes; - stage_finds[STAGE_FLIP4] += new_hit_cnt - orig_hit_cnt; + stage_finds[STAGE_FLIP4] += new_hit_cnt - orig_hit_cnt; stage_cycles[STAGE_FLIP4] += stage_max; /* Effector map setup. These macros calculate: @@ -682,27 +721,29 @@ u8 fuzz_one_original(char** argv) { */ -#define EFF_APOS(_p) ((_p) >> EFF_MAP_SCALE2) -#define EFF_REM(_x) ((_x) & ((1 << EFF_MAP_SCALE2) - 1)) -#define EFF_ALEN(_l) (EFF_APOS(_l) + !!EFF_REM(_l)) -#define EFF_SPAN_ALEN(_p, _l) (EFF_APOS((_p) + (_l) - 1) - EFF_APOS(_p) + 1) +#define EFF_APOS(_p) ((_p) >> EFF_MAP_SCALE2) +#define EFF_REM(_x) ((_x) & ((1 << EFF_MAP_SCALE2) - 1)) +#define EFF_ALEN(_l) (EFF_APOS(_l) + !!EFF_REM(_l)) +#define EFF_SPAN_ALEN(_p, _l) (EFF_APOS((_p) + (_l)-1) - EFF_APOS(_p) + 1) /* Initialize effector map for the next step (see comments below). Always flag first and last byte as doing something. */ - eff_map = ck_alloc(EFF_ALEN(len)); + eff_map = ck_alloc(EFF_ALEN(len)); eff_map[0] = 1; if (EFF_APOS(len - 1) != 0) { + eff_map[EFF_APOS(len - 1)] = 1; ++eff_cnt; + } /* Walking byte. */ - stage_name = "bitflip 8/8"; + stage_name = "bitflip 8/8"; stage_short = "flip8"; - stage_max = len; + stage_max = len; orig_hit_cnt = new_hit_cnt; @@ -732,8 +773,10 @@ u8 fuzz_one_original(char** argv) { cksum = ~queue_cur->exec_cksum; if (cksum != queue_cur->exec_cksum) { + eff_map[EFF_APOS(stage_cur)] = 1; ++eff_cnt; + } } @@ -763,17 +806,17 @@ u8 fuzz_one_original(char** argv) { new_hit_cnt = queued_paths + unique_crashes; - stage_finds[STAGE_FLIP8] += new_hit_cnt - orig_hit_cnt; + stage_finds[STAGE_FLIP8] += new_hit_cnt - orig_hit_cnt; stage_cycles[STAGE_FLIP8] += stage_max; /* Two walking bytes. */ if (len < 2) goto skip_bitflip; - stage_name = "bitflip 16/8"; + stage_name = "bitflip 16/8"; stage_short = "flip16"; - stage_cur = 0; - stage_max = len - 1; + stage_cur = 0; + stage_max = len - 1; orig_hit_cnt = new_hit_cnt; @@ -782,8 +825,10 @@ u8 fuzz_one_original(char** argv) { /* Let's consult the effector map... */ if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { + --stage_max; continue; + } stage_cur_byte = i; @@ -795,22 +840,21 @@ u8 fuzz_one_original(char** argv) { *(u16*)(out_buf + i) ^= 0xFFFF; - } new_hit_cnt = queued_paths + unique_crashes; - stage_finds[STAGE_FLIP16] += new_hit_cnt - orig_hit_cnt; + stage_finds[STAGE_FLIP16] += new_hit_cnt - orig_hit_cnt; stage_cycles[STAGE_FLIP16] += stage_max; if (len < 4) goto skip_bitflip; /* Four walking bytes. */ - stage_name = "bitflip 32/8"; + stage_name = "bitflip 32/8"; stage_short = "flip32"; - stage_cur = 0; - stage_max = len - 3; + stage_cur = 0; + stage_max = len - 3; orig_hit_cnt = new_hit_cnt; @@ -819,8 +863,10 @@ u8 fuzz_one_original(char** argv) { /* Let's consult the effector map... */ if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { + --stage_max; continue; + } stage_cur_byte = i; @@ -836,7 +882,7 @@ u8 fuzz_one_original(char** argv) { new_hit_cnt = queued_paths + unique_crashes; - stage_finds[STAGE_FLIP32] += new_hit_cnt - orig_hit_cnt; + stage_finds[STAGE_FLIP32] += new_hit_cnt - orig_hit_cnt; stage_cycles[STAGE_FLIP32] += stage_max; skip_bitflip: @@ -849,10 +895,10 @@ skip_bitflip: /* 8-bit arithmetics. */ - stage_name = "arith 8/8"; + stage_name = "arith 8/8"; stage_short = "arith8"; - stage_cur = 0; - stage_max = 2 * len * ARITH_MAX; + stage_cur = 0; + stage_max = 2 * len * ARITH_MAX; stage_val_type = STAGE_VAL_LE; @@ -865,8 +911,10 @@ skip_bitflip: /* Let's consult the effector map... */ if (!eff_map[EFF_APOS(i)]) { + stage_max -= 2 * ARITH_MAX; continue; + } stage_cur_byte = i; @@ -886,9 +934,11 @@ skip_bitflip: if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else --stage_max; + } else + + --stage_max; - r = orig ^ (orig - j); + r = orig ^ (orig - j); if (!could_be_bitflip(r)) { @@ -898,7 +948,9 @@ skip_bitflip: if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else --stage_max; + } else + + --stage_max; out_buf[i] = orig; @@ -908,17 +960,17 @@ skip_bitflip: new_hit_cnt = queued_paths + unique_crashes; - stage_finds[STAGE_ARITH8] += new_hit_cnt - orig_hit_cnt; + stage_finds[STAGE_ARITH8] += new_hit_cnt - orig_hit_cnt; stage_cycles[STAGE_ARITH8] += stage_max; /* 16-bit arithmetics, both endians. */ if (len < 2) goto skip_arith; - stage_name = "arith 16/8"; + stage_name = "arith 16/8"; stage_short = "arith16"; - stage_cur = 0; - stage_max = 4 * (len - 1) * ARITH_MAX; + stage_cur = 0; + stage_max = 4 * (len - 1) * ARITH_MAX; orig_hit_cnt = new_hit_cnt; @@ -929,25 +981,26 @@ skip_bitflip: /* Let's consult the effector map... */ if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { + stage_max -= 4 * ARITH_MAX; continue; + } stage_cur_byte = i; for (j = 1; j <= ARITH_MAX; ++j) { - u16 r1 = orig ^ (orig + j), - r2 = orig ^ (orig - j), + u16 r1 = orig ^ (orig + j), r2 = orig ^ (orig - j), r3 = orig ^ SWAP16(SWAP16(orig) + j), r4 = orig ^ SWAP16(SWAP16(orig) - j); /* Try little endian addition and subtraction first. Do it only - if the operation would affect more than one byte (hence the + if the operation would affect more than one byte (hence the & 0xff overflow checks) and if it couldn't be a product of a bitflip. */ - stage_val_type = STAGE_VAL_LE; + stage_val_type = STAGE_VAL_LE; if ((orig & 0xff) + j > 0xff && !could_be_bitflip(r1)) { @@ -956,8 +1009,10 @@ skip_bitflip: if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - - } else --stage_max; + + } else + + --stage_max; if ((orig & 0xff) < j && !could_be_bitflip(r2)) { @@ -967,13 +1022,14 @@ skip_bitflip: if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else --stage_max; + } else + + --stage_max; /* Big endian comes next. Same deal. */ stage_val_type = STAGE_VAL_BE; - if ((orig >> 8) + j > 0xff && !could_be_bitflip(r3)) { stage_cur_val = j; @@ -982,7 +1038,9 @@ skip_bitflip: if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else --stage_max; + } else + + --stage_max; if ((orig >> 8) < j && !could_be_bitflip(r4)) { @@ -992,7 +1050,9 @@ skip_bitflip: if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else --stage_max; + } else + + --stage_max; *(u16*)(out_buf + i) = orig; @@ -1002,17 +1062,17 @@ skip_bitflip: new_hit_cnt = queued_paths + unique_crashes; - stage_finds[STAGE_ARITH16] += new_hit_cnt - orig_hit_cnt; + stage_finds[STAGE_ARITH16] += new_hit_cnt - orig_hit_cnt; stage_cycles[STAGE_ARITH16] += stage_max; /* 32-bit arithmetics, both endians. */ if (len < 4) goto skip_arith; - stage_name = "arith 32/8"; + stage_name = "arith 32/8"; stage_short = "arith32"; - stage_cur = 0; - stage_max = 4 * (len - 3) * ARITH_MAX; + stage_cur = 0; + stage_max = 4 * (len - 3) * ARITH_MAX; orig_hit_cnt = new_hit_cnt; @@ -1024,16 +1084,17 @@ skip_bitflip: if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { + stage_max -= 4 * ARITH_MAX; continue; + } stage_cur_byte = i; for (j = 1; j <= ARITH_MAX; ++j) { - u32 r1 = orig ^ (orig + j), - r2 = orig ^ (orig - j), + u32 r1 = orig ^ (orig + j), r2 = orig ^ (orig - j), r3 = orig ^ SWAP32(SWAP32(orig) + j), r4 = orig ^ SWAP32(SWAP32(orig) - j); @@ -1050,7 +1111,9 @@ skip_bitflip: if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else --stage_max; + } else + + --stage_max; if ((orig & 0xffff) < j && !could_be_bitflip(r2)) { @@ -1060,7 +1123,9 @@ skip_bitflip: if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else --stage_max; + } else + + --stage_max; /* Big endian next. */ @@ -1074,7 +1139,9 @@ skip_bitflip: if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else --stage_max; + } else + + --stage_max; if ((SWAP32(orig) & 0xffff) < j && !could_be_bitflip(r4)) { @@ -1084,7 +1151,9 @@ skip_bitflip: if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else --stage_max; + } else + + --stage_max; *(u32*)(out_buf + i) = orig; @@ -1094,7 +1163,7 @@ skip_bitflip: new_hit_cnt = queued_paths + unique_crashes; - stage_finds[STAGE_ARITH32] += new_hit_cnt - orig_hit_cnt; + stage_finds[STAGE_ARITH32] += new_hit_cnt - orig_hit_cnt; stage_cycles[STAGE_ARITH32] += stage_max; skip_arith: @@ -1103,10 +1172,10 @@ skip_arith: * INTERESTING VALUES * **********************/ - stage_name = "interest 8/8"; + stage_name = "interest 8/8"; stage_short = "int8"; - stage_cur = 0; - stage_max = len * sizeof(interesting_8); + stage_cur = 0; + stage_max = len * sizeof(interesting_8); stage_val_type = STAGE_VAL_LE; @@ -1121,8 +1190,10 @@ skip_arith: /* Let's consult the effector map... */ if (!eff_map[EFF_APOS(i)]) { + stage_max -= sizeof(interesting_8); continue; + } stage_cur_byte = i; @@ -1133,8 +1204,10 @@ skip_arith: if (could_be_bitflip(orig ^ (u8)interesting_8[j]) || could_be_arith(orig, (u8)interesting_8[j], 1)) { + --stage_max; continue; + } stage_cur_val = interesting_8[j]; @@ -1151,17 +1224,17 @@ skip_arith: new_hit_cnt = queued_paths + unique_crashes; - stage_finds[STAGE_INTEREST8] += new_hit_cnt - orig_hit_cnt; + stage_finds[STAGE_INTEREST8] += new_hit_cnt - orig_hit_cnt; stage_cycles[STAGE_INTEREST8] += stage_max; /* Setting 16-bit integers, both endians. */ if (no_arith || len < 2) goto skip_interest; - stage_name = "interest 16/8"; + stage_name = "interest 16/8"; stage_short = "int16"; - stage_cur = 0; - stage_max = 2 * (len - 1) * (sizeof(interesting_16) >> 1); + stage_cur = 0; + stage_max = 2 * (len - 1) * (sizeof(interesting_16) >> 1); orig_hit_cnt = new_hit_cnt; @@ -1172,8 +1245,10 @@ skip_arith: /* Let's consult the effector map... */ if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { + stage_max -= sizeof(interesting_16); continue; + } stage_cur_byte = i; @@ -1196,7 +1271,9 @@ skip_arith: if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else --stage_max; + } else + + --stage_max; if ((u16)interesting_16[j] != SWAP16(interesting_16[j]) && !could_be_bitflip(orig ^ SWAP16(interesting_16[j])) && @@ -1209,7 +1286,9 @@ skip_arith: if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else --stage_max; + } else + + --stage_max; } @@ -1219,17 +1298,17 @@ skip_arith: new_hit_cnt = queued_paths + unique_crashes; - stage_finds[STAGE_INTEREST16] += new_hit_cnt - orig_hit_cnt; + stage_finds[STAGE_INTEREST16] += new_hit_cnt - orig_hit_cnt; stage_cycles[STAGE_INTEREST16] += stage_max; if (len < 4) goto skip_interest; /* Setting 32-bit integers, both endians. */ - stage_name = "interest 32/8"; + stage_name = "interest 32/8"; stage_short = "int32"; - stage_cur = 0; - stage_max = 2 * (len - 3) * (sizeof(interesting_32) >> 2); + stage_cur = 0; + stage_max = 2 * (len - 3) * (sizeof(interesting_32) >> 2); orig_hit_cnt = new_hit_cnt; @@ -1241,8 +1320,10 @@ skip_arith: if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { + stage_max -= sizeof(interesting_32) >> 1; continue; + } stage_cur_byte = i; @@ -1265,7 +1346,9 @@ skip_arith: if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else --stage_max; + } else + + --stage_max; if ((u32)interesting_32[j] != SWAP32(interesting_32[j]) && !could_be_bitflip(orig ^ SWAP32(interesting_32[j])) && @@ -1278,7 +1361,9 @@ skip_arith: if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else --stage_max; + } else + + --stage_max; } @@ -1288,7 +1373,7 @@ skip_arith: new_hit_cnt = queued_paths + unique_crashes; - stage_finds[STAGE_INTEREST32] += new_hit_cnt - orig_hit_cnt; + stage_finds[STAGE_INTEREST32] += new_hit_cnt - orig_hit_cnt; stage_cycles[STAGE_INTEREST32] += stage_max; skip_interest: @@ -1301,10 +1386,10 @@ skip_interest: /* Overwrite with user-supplied extras. */ - stage_name = "user extras (over)"; + stage_name = "user extras (over)"; stage_short = "ext_UO"; - stage_cur = 0; - stage_max = extras_cnt * len; + stage_cur = 0; + stage_max = extras_cnt * len; stage_val_type = STAGE_VAL_NONE; @@ -1354,15 +1439,15 @@ skip_interest: new_hit_cnt = queued_paths + unique_crashes; - stage_finds[STAGE_EXTRAS_UO] += new_hit_cnt - orig_hit_cnt; + stage_finds[STAGE_EXTRAS_UO] += new_hit_cnt - orig_hit_cnt; stage_cycles[STAGE_EXTRAS_UO] += stage_max; /* Insertion of user-supplied extras. */ - stage_name = "user extras (insert)"; + stage_name = "user extras (insert)"; stage_short = "ext_UI"; - stage_cur = 0; - stage_max = extras_cnt * len; + stage_cur = 0; + stage_max = extras_cnt * len; orig_hit_cnt = new_hit_cnt; @@ -1375,8 +1460,10 @@ skip_interest: for (j = 0; j < extras_cnt; ++j) { if (len + extras[j].len > MAX_FILE) { - --stage_max; + + --stage_max; continue; + } /* Insert token */ @@ -1386,8 +1473,10 @@ skip_interest: memcpy(ex_tmp + i + extras[j].len, out_buf + i, len - i); if (common_fuzz_stuff(argv, ex_tmp, len + extras[j].len)) { + ck_free(ex_tmp); goto abandon_entry; + } ++stage_cur; @@ -1403,17 +1492,17 @@ skip_interest: new_hit_cnt = queued_paths + unique_crashes; - stage_finds[STAGE_EXTRAS_UI] += new_hit_cnt - orig_hit_cnt; + stage_finds[STAGE_EXTRAS_UI] += new_hit_cnt - orig_hit_cnt; stage_cycles[STAGE_EXTRAS_UI] += stage_max; skip_user_extras: if (!a_extras_cnt) goto skip_extras; - stage_name = "auto extras (over)"; + stage_name = "auto extras (over)"; stage_short = "ext_AO"; - stage_cur = 0; - stage_max = MIN(a_extras_cnt, USE_AUTO_EXTRAS) * len; + stage_cur = 0; + stage_max = MIN(a_extras_cnt, USE_AUTO_EXTRAS) * len; stage_val_type = STAGE_VAL_NONE; @@ -1431,7 +1520,8 @@ skip_user_extras: if (a_extras[j].len > len - i || !memcmp(a_extras[j].data, out_buf + i, a_extras[j].len) || - !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, a_extras[j].len))) { + !memchr(eff_map + EFF_APOS(i), 1, + EFF_SPAN_ALEN(i, a_extras[j].len))) { --stage_max; continue; @@ -1454,7 +1544,7 @@ skip_user_extras: new_hit_cnt = queued_paths + unique_crashes; - stage_finds[STAGE_EXTRAS_AO] += new_hit_cnt - orig_hit_cnt; + stage_finds[STAGE_EXTRAS_AO] += new_hit_cnt - orig_hit_cnt; stage_cycles[STAGE_EXTRAS_AO] += stage_max; skip_extras: @@ -1473,36 +1563,51 @@ python_stage: if (!py_module) goto havoc_stage; - stage_name = "python"; + stage_name = "python"; stage_short = "python"; - stage_max = HAVOC_CYCLES * perf_score / havoc_div / 100; + stage_max = HAVOC_CYCLES * perf_score / havoc_div / 100; if (stage_max < HAVOC_MIN) stage_max = HAVOC_MIN; orig_hit_cnt = queued_paths + unique_crashes; - char* retbuf = NULL; + char* retbuf = NULL; size_t retlen = 0; for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { + struct queue_entry* target; - u32 tid; - u8* new_buf; + u32 tid; + u8* new_buf; -retry_external_pick: + retry_external_pick: /* Pick a random other queue entry for passing to external API */ - do { tid = UR(queued_paths); } while (tid == current_entry && queued_paths > 1); + do { + + tid = UR(queued_paths); + + } while (tid == current_entry && queued_paths > 1); target = queue; - while (tid >= 100) { target = target->next_100; tid -= 100; } - while (tid--) target = target->next; + while (tid >= 100) { + + target = target->next_100; + tid -= 100; + + } + + while (tid--) + target = target->next; /* Make sure that the target has a reasonable length. */ - while (target && (target->len < 2 || target == queue_cur) && queued_paths > 1) { + while (target && (target->len < 2 || target == queue_cur) && + queued_paths > 1) { + target = target->next; ++splicing_with; + } if (!target) goto retry_external_pick; @@ -1519,12 +1624,14 @@ retry_external_pick: ck_free(new_buf); if (retbuf) { - if (!retlen) - goto abandon_entry; + + if (!retlen) goto abandon_entry; if (common_fuzz_stuff(argv, retbuf, retlen)) { + free(retbuf); goto abandon_entry; + } /* Reset retbuf/retlen */ @@ -1536,26 +1643,35 @@ retry_external_pick: permitting. */ if (queued_paths != havoc_queued) { + if (perf_score <= havoc_max_mult * 100) { - stage_max *= 2; + + stage_max *= 2; perf_score *= 2; + } havoc_queued = queued_paths; + } + } + } new_hit_cnt = queued_paths + unique_crashes; - stage_finds[STAGE_PYTHON] += new_hit_cnt - orig_hit_cnt; + stage_finds[STAGE_PYTHON] += new_hit_cnt - orig_hit_cnt; stage_cycles[STAGE_PYTHON] += stage_max; if (python_only) { + /* Skip other stages */ ret_val = 0; goto abandon_entry; + } + #endif /**************** @@ -1571,10 +1687,10 @@ havoc_stage: if (!splice_cycle) { - stage_name = "havoc"; + stage_name = "havoc"; stage_short = "havoc"; - stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * - perf_score / havoc_div / 100; + stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * perf_score / + havoc_div / 100; } else { @@ -1583,9 +1699,9 @@ havoc_stage: perf_score = orig_perf; sprintf(tmp, "splice %u", splice_cycle); - stage_name = tmp; + stage_name = tmp; stage_short = "splice"; - stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100; + stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100; } @@ -1605,7 +1721,7 @@ havoc_stage: u32 use_stacking = 1 << (1 + UR(HAVOC_STACK_POW2)); stage_cur_val = use_stacking; - + for (i = 0; i < use_stacking; ++i) { switch (UR(15 + ((extras_cnt + a_extras_cnt) ? 2 : 0))) { @@ -1617,7 +1733,7 @@ havoc_stage: FLIP_BIT(out_buf, UR(temp_len << 3)); break; - case 1: + case 1: /* Set byte to interesting value. */ @@ -1633,12 +1749,12 @@ havoc_stage: if (UR(2)) { *(u16*)(out_buf + UR(temp_len - 1)) = - interesting_16[UR(sizeof(interesting_16) >> 1)]; + interesting_16[UR(sizeof(interesting_16) >> 1)]; } else { - *(u16*)(out_buf + UR(temp_len - 1)) = SWAP16( - interesting_16[UR(sizeof(interesting_16) >> 1)]); + *(u16*)(out_buf + UR(temp_len - 1)) = + SWAP16(interesting_16[UR(sizeof(interesting_16) >> 1)]); } @@ -1651,14 +1767,14 @@ havoc_stage: if (temp_len < 4) break; if (UR(2)) { - + *(u32*)(out_buf + UR(temp_len - 3)) = - interesting_32[UR(sizeof(interesting_32) >> 2)]; + interesting_32[UR(sizeof(interesting_32) >> 2)]; } else { - *(u32*)(out_buf + UR(temp_len - 3)) = SWAP32( - interesting_32[UR(sizeof(interesting_32) >> 2)]); + *(u32*)(out_buf + UR(temp_len - 3)) = + SWAP32(interesting_32[UR(sizeof(interesting_32) >> 2)]); } @@ -1696,7 +1812,7 @@ havoc_stage: u16 num = 1 + UR(ARITH_MAX); *(u16*)(out_buf + pos) = - SWAP16(SWAP16(*(u16*)(out_buf + pos)) - num); + SWAP16(SWAP16(*(u16*)(out_buf + pos)) - num); } @@ -1720,7 +1836,7 @@ havoc_stage: u16 num = 1 + UR(ARITH_MAX); *(u16*)(out_buf + pos) = - SWAP16(SWAP16(*(u16*)(out_buf + pos)) + num); + SWAP16(SWAP16(*(u16*)(out_buf + pos)) + num); } @@ -1744,7 +1860,7 @@ havoc_stage: u32 num = 1 + UR(ARITH_MAX); *(u32*)(out_buf + pos) = - SWAP32(SWAP32(*(u32*)(out_buf + pos)) - num); + SWAP32(SWAP32(*(u32*)(out_buf + pos)) - num); } @@ -1768,7 +1884,7 @@ havoc_stage: u32 num = 1 + UR(ARITH_MAX); *(u32*)(out_buf + pos) = - SWAP32(SWAP32(*(u32*)(out_buf + pos)) + num); + SWAP32(SWAP32(*(u32*)(out_buf + pos)) + num); } @@ -1785,28 +1901,28 @@ havoc_stage: case 11 ... 12: { - /* Delete bytes. We're making this a bit more likely - than insertion (the next option) in hopes of keeping - files reasonably small. */ + /* Delete bytes. We're making this a bit more likely + than insertion (the next option) in hopes of keeping + files reasonably small. */ - u32 del_from, del_len; + u32 del_from, del_len; - if (temp_len < 2) break; + if (temp_len < 2) break; - /* Don't delete too much. */ + /* Don't delete too much. */ - del_len = choose_block_len(temp_len - 1); + del_len = choose_block_len(temp_len - 1); - del_from = UR(temp_len - del_len + 1); + del_from = UR(temp_len - del_len + 1); - memmove(out_buf + del_from, out_buf + del_from + del_len, - temp_len - del_from - del_len); + memmove(out_buf + del_from, out_buf + del_from + del_len, + temp_len - del_from - del_len); - temp_len -= del_len; + temp_len -= del_len; - break; + break; - } + } case 13: @@ -1820,7 +1936,7 @@ havoc_stage: if (actually_clone) { - clone_len = choose_block_len(temp_len); + clone_len = choose_block_len(temp_len); clone_from = UR(temp_len - clone_len + 1); } else { @@ -1830,7 +1946,7 @@ havoc_stage: } - clone_to = UR(temp_len); + clone_to = UR(temp_len); new_buf = ck_alloc_nozero(temp_len + clone_len); @@ -1860,128 +1976,129 @@ havoc_stage: case 14: { - /* Overwrite bytes with a randomly selected chunk (75%) or fixed - bytes (25%). */ + /* Overwrite bytes with a randomly selected chunk (75%) or fixed + bytes (25%). */ - u32 copy_from, copy_to, copy_len; + u32 copy_from, copy_to, copy_len; - if (temp_len < 2) break; + if (temp_len < 2) break; - copy_len = choose_block_len(temp_len - 1); + copy_len = choose_block_len(temp_len - 1); - copy_from = UR(temp_len - copy_len + 1); - copy_to = UR(temp_len - copy_len + 1); + copy_from = UR(temp_len - copy_len + 1); + copy_to = UR(temp_len - copy_len + 1); - if (UR(4)) { + if (UR(4)) { - if (copy_from != copy_to) - memmove(out_buf + copy_to, out_buf + copy_from, copy_len); + if (copy_from != copy_to) + memmove(out_buf + copy_to, out_buf + copy_from, copy_len); - } else memset(out_buf + copy_to, - UR(2) ? UR(256) : out_buf[UR(temp_len)], copy_len); + } else - break; + memset(out_buf + copy_to, UR(2) ? UR(256) : out_buf[UR(temp_len)], + copy_len); - } + break; + + } - /* Values 15 and 16 can be selected only if there are any extras - present in the dictionaries. */ + /* Values 15 and 16 can be selected only if there are any extras + present in the dictionaries. */ case 15: { - /* Overwrite bytes with an extra. */ + /* Overwrite bytes with an extra. */ - if (!extras_cnt || (a_extras_cnt && UR(2))) { + if (!extras_cnt || (a_extras_cnt && UR(2))) { - /* No user-specified extras or odds in our favor. Let's use an - auto-detected one. */ + /* No user-specified extras or odds in our favor. Let's use an + auto-detected one. */ - u32 use_extra = UR(a_extras_cnt); - u32 extra_len = a_extras[use_extra].len; - u32 insert_at; + u32 use_extra = UR(a_extras_cnt); + u32 extra_len = a_extras[use_extra].len; + u32 insert_at; - if (extra_len > temp_len) break; + if (extra_len > temp_len) break; - insert_at = UR(temp_len - extra_len + 1); - memcpy(out_buf + insert_at, a_extras[use_extra].data, extra_len); + insert_at = UR(temp_len - extra_len + 1); + memcpy(out_buf + insert_at, a_extras[use_extra].data, extra_len); - } else { + } else { - /* No auto extras or odds in our favor. Use the dictionary. */ + /* No auto extras or odds in our favor. Use the dictionary. */ - u32 use_extra = UR(extras_cnt); - u32 extra_len = extras[use_extra].len; - u32 insert_at; + u32 use_extra = UR(extras_cnt); + u32 extra_len = extras[use_extra].len; + u32 insert_at; - if (extra_len > temp_len) break; + if (extra_len > temp_len) break; - insert_at = UR(temp_len - extra_len + 1); - memcpy(out_buf + insert_at, extras[use_extra].data, extra_len); + insert_at = UR(temp_len - extra_len + 1); + memcpy(out_buf + insert_at, extras[use_extra].data, extra_len); - } + } - break; + break; - } + } case 16: { - u32 use_extra, extra_len, insert_at = UR(temp_len + 1); - u8* new_buf; + u32 use_extra, extra_len, insert_at = UR(temp_len + 1); + u8* new_buf; - /* Insert an extra. Do the same dice-rolling stuff as for the - previous case. */ + /* Insert an extra. Do the same dice-rolling stuff as for the + previous case. */ - if (!extras_cnt || (a_extras_cnt && UR(2))) { + if (!extras_cnt || (a_extras_cnt && UR(2))) { - use_extra = UR(a_extras_cnt); - extra_len = a_extras[use_extra].len; + use_extra = UR(a_extras_cnt); + extra_len = a_extras[use_extra].len; - if (temp_len + extra_len >= MAX_FILE) break; + if (temp_len + extra_len >= MAX_FILE) break; - new_buf = ck_alloc_nozero(temp_len + extra_len); + new_buf = ck_alloc_nozero(temp_len + extra_len); - /* Head */ - memcpy(new_buf, out_buf, insert_at); + /* Head */ + memcpy(new_buf, out_buf, insert_at); - /* Inserted part */ - memcpy(new_buf + insert_at, a_extras[use_extra].data, extra_len); + /* Inserted part */ + memcpy(new_buf + insert_at, a_extras[use_extra].data, extra_len); - } else { + } else { - use_extra = UR(extras_cnt); - extra_len = extras[use_extra].len; + use_extra = UR(extras_cnt); + extra_len = extras[use_extra].len; - if (temp_len + extra_len >= MAX_FILE) break; + if (temp_len + extra_len >= MAX_FILE) break; - new_buf = ck_alloc_nozero(temp_len + extra_len); + new_buf = ck_alloc_nozero(temp_len + extra_len); - /* Head */ - memcpy(new_buf, out_buf, insert_at); + /* Head */ + memcpy(new_buf, out_buf, insert_at); - /* Inserted part */ - memcpy(new_buf + insert_at, extras[use_extra].data, extra_len); + /* Inserted part */ + memcpy(new_buf + insert_at, extras[use_extra].data, extra_len); - } + } - /* Tail */ - memcpy(new_buf + insert_at + extra_len, out_buf + insert_at, - temp_len - insert_at); + /* Tail */ + memcpy(new_buf + insert_at + extra_len, out_buf + insert_at, + temp_len - insert_at); - ck_free(out_buf); - out_buf = new_buf; - temp_len += extra_len; + ck_free(out_buf); + out_buf = new_buf; + temp_len += extra_len; - break; + break; - } + } } } - if (common_fuzz_stuff(argv, out_buf, temp_len)) - goto abandon_entry; + if (common_fuzz_stuff(argv, out_buf, temp_len)) goto abandon_entry; /* out_buf might have been mangled a bit, so let's restore it to its original size and shape. */ @@ -1996,8 +2113,10 @@ havoc_stage: if (queued_paths != havoc_queued) { if (perf_score <= havoc_max_mult * 100) { - stage_max *= 2; + + stage_max *= 2; perf_score *= 2; + } havoc_queued = queued_paths; @@ -2009,11 +2128,15 @@ havoc_stage: new_hit_cnt = queued_paths + unique_crashes; if (!splice_cycle) { - stage_finds[STAGE_HAVOC] += new_hit_cnt - orig_hit_cnt; + + stage_finds[STAGE_HAVOC] += new_hit_cnt - orig_hit_cnt; stage_cycles[STAGE_HAVOC] += stage_max; + } else { - stage_finds[STAGE_SPLICE] += new_hit_cnt - orig_hit_cnt; + + stage_finds[STAGE_SPLICE] += new_hit_cnt - orig_hit_cnt; stage_cycles[STAGE_SPLICE] += stage_max; + } #ifndef IGNORE_FINDS @@ -2029,38 +2152,53 @@ havoc_stage: retry_splicing: - if (use_splicing && splice_cycle++ < SPLICE_CYCLES && - queued_paths > 1 && queue_cur->len > 1) { + if (use_splicing && splice_cycle++ < SPLICE_CYCLES && queued_paths > 1 && + queue_cur->len > 1) { struct queue_entry* target; - u32 tid, split_at; - u8* new_buf; - s32 f_diff, l_diff; + u32 tid, split_at; + u8* new_buf; + s32 f_diff, l_diff; /* First of all, if we've modified in_buf for havoc, let's clean that up... */ if (in_buf != orig_in) { + ck_free(in_buf); in_buf = orig_in; len = queue_cur->len; + } /* Pick a random queue entry and seek to it. Don't splice with yourself. */ - do { tid = UR(queued_paths); } while (tid == current_entry); + do { + + tid = UR(queued_paths); + + } while (tid == current_entry); splicing_with = tid; target = queue; - while (tid >= 100) { target = target->next_100; tid -= 100; } - while (tid--) target = target->next; + while (tid >= 100) { + + target = target->next_100; + tid -= 100; + + } + + while (tid--) + target = target->next; /* Make sure that the target has a reasonable length. */ while (target && (target->len < 2 || target == queue_cur)) { + target = target->next; ++splicing_with; + } if (!target) goto retry_splicing; @@ -2084,8 +2222,10 @@ retry_splicing: locate_diffs(in_buf, new_buf, MIN(len, target->len), &f_diff, &l_diff); if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) { + ck_free(new_buf); goto retry_splicing; + } /* Split somewhere between the first and last differing byte. */ @@ -2102,11 +2242,11 @@ retry_splicing: out_buf = ck_alloc_nozero(len); memcpy(out_buf, in_buf, len); -#ifdef USE_PYTHON +# ifdef USE_PYTHON goto python_stage; -#else +# else goto havoc_stage; -#endif +# endif } @@ -2121,10 +2261,13 @@ abandon_entry: /* Update pending_not_fuzzed count if we made it through the calibration cycle and have not seen this entry before. */ - if (!stop_soon && !queue_cur->cal_failed && (queue_cur->was_fuzzed == 0 || queue_cur->fuzz_level == 0)) { + if (!stop_soon && !queue_cur->cal_failed && + (queue_cur->was_fuzzed == 0 || queue_cur->fuzz_level == 0)) { + --pending_not_fuzzed; queue_cur->was_fuzzed = 1; if (queue_cur->favored) --pending_favored; + } ++queue_cur->fuzz_level; @@ -2144,3576 +2287,3738 @@ abandon_entry: /* MOpt mode */ u8 pilot_fuzzing(char** argv) { - s32 len, fd, temp_len, i, j; - u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0; - u64 havoc_queued, orig_hit_cnt, new_hit_cnt, cur_ms_lv; - u32 splice_cycle = 0, perf_score = 100, orig_perf, prev_cksum, eff_cnt = 1; + s32 len, fd, temp_len, i, j; + u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0; + u64 havoc_queued, orig_hit_cnt, new_hit_cnt, cur_ms_lv; + u32 splice_cycle = 0, perf_score = 100, orig_perf, prev_cksum, eff_cnt = 1; - u8 ret_val = 1, doing_det = 0; + u8 ret_val = 1, doing_det = 0; - u8 a_collect[MAX_AUTO_EXTRA]; - u32 a_len = 0; + u8 a_collect[MAX_AUTO_EXTRA]; + u32 a_len = 0; #ifdef IGNORE_FINDS - /* In IGNORE_FINDS mode, skip any entries that weren't in the - initial data set. */ + /* In IGNORE_FINDS mode, skip any entries that weren't in the + initial data set. */ - if (queue_cur->depth > 1) return 1; + if (queue_cur->depth > 1) return 1; #else - if (pending_favored) { + if (pending_favored) { - /* If we have any favored, non-fuzzed new arrivals in the queue, - possibly skip to them at the expense of already-fuzzed or non-favored - cases. */ + /* If we have any favored, non-fuzzed new arrivals in the queue, + possibly skip to them at the expense of already-fuzzed or non-favored + cases. */ - if ((queue_cur->was_fuzzed || !queue_cur->favored) && - UR(100) < SKIP_TO_NEW_PROB) return 1; + if ((queue_cur->was_fuzzed || !queue_cur->favored) && + UR(100) < SKIP_TO_NEW_PROB) + return 1; - } - else if (!dumb_mode && !queue_cur->favored && queued_paths > 10) { + } else if (!dumb_mode && !queue_cur->favored && queued_paths > 10) { - /* Otherwise, still possibly skip non-favored cases, albeit less often. - The odds of skipping stuff are higher for already-fuzzed inputs and - lower for never-fuzzed entries. */ + /* Otherwise, still possibly skip non-favored cases, albeit less often. + The odds of skipping stuff are higher for already-fuzzed inputs and + lower for never-fuzzed entries. */ - if (queue_cycle > 1 && !queue_cur->was_fuzzed) { + if (queue_cycle > 1 && !queue_cur->was_fuzzed) { - if (UR(100) < SKIP_NFAV_NEW_PROB) return 1; + if (UR(100) < SKIP_NFAV_NEW_PROB) return 1; - } - else { + } else { - if (UR(100) < SKIP_NFAV_OLD_PROB) return 1; + if (UR(100) < SKIP_NFAV_OLD_PROB) return 1; - } + } - } + } #endif /* ^IGNORE_FINDS */ - if (not_on_tty) { - ACTF("Fuzzing test case #%u (%u total, %llu uniq crashes found)...", - current_entry, queued_paths, unique_crashes); - fflush(stdout); - } + if (not_on_tty) { - /* Map the test case into memory. */ + ACTF("Fuzzing test case #%u (%u total, %llu uniq crashes found)...", + current_entry, queued_paths, unique_crashes); + fflush(stdout); - fd = open(queue_cur->fname, O_RDONLY); + } - if (fd < 0) PFATAL("Unable to open '%s'", queue_cur->fname); + /* Map the test case into memory. */ - len = queue_cur->len; + fd = open(queue_cur->fname, O_RDONLY); - orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); + if (fd < 0) PFATAL("Unable to open '%s'", queue_cur->fname); - if (orig_in == MAP_FAILED) PFATAL("Unable to mmap '%s'", queue_cur->fname); + len = queue_cur->len; - close(fd); + orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); - /* We could mmap() out_buf as MAP_PRIVATE, but we end up clobbering every - single byte anyway, so it wouldn't give us any performance or memory usage - benefits. */ + if (orig_in == MAP_FAILED) PFATAL("Unable to mmap '%s'", queue_cur->fname); - out_buf = ck_alloc_nozero(len); + close(fd); - subseq_tmouts = 0; + /* We could mmap() out_buf as MAP_PRIVATE, but we end up clobbering every + single byte anyway, so it wouldn't give us any performance or memory usage + benefits. */ - cur_depth = queue_cur->depth; + out_buf = ck_alloc_nozero(len); - /******************************************* - * CALIBRATION (only if failed earlier on) * - *******************************************/ + subseq_tmouts = 0; - if (queue_cur->cal_failed) { + cur_depth = queue_cur->depth; - u8 res = FAULT_TMOUT; + /******************************************* + * CALIBRATION (only if failed earlier on) * + *******************************************/ - if (queue_cur->cal_failed < CAL_CHANCES) { + if (queue_cur->cal_failed) { - res = calibrate_case(argv, queue_cur, in_buf, queue_cycle - 1, 0); + u8 res = FAULT_TMOUT; - if (res == FAULT_ERROR) - FATAL("Unable to execute target application"); + if (queue_cur->cal_failed < CAL_CHANCES) { - } + res = calibrate_case(argv, queue_cur, in_buf, queue_cycle - 1, 0); - if (stop_soon || res != crash_mode) { - ++cur_skipped_paths; - goto abandon_entry; - } + if (res == FAULT_ERROR) FATAL("Unable to execute target application"); - } + } - /************ - * TRIMMING * - ************/ + if (stop_soon || res != crash_mode) { - if (!dumb_mode && !queue_cur->trim_done) { + ++cur_skipped_paths; + goto abandon_entry; - u8 res = trim_case(argv, queue_cur, in_buf); + } - if (res == FAULT_ERROR) - FATAL("Unable to execute target application"); + } - if (stop_soon) { - ++cur_skipped_paths; - goto abandon_entry; - } + /************ + * TRIMMING * + ************/ - /* Don't retry trimming, even if it failed. */ + if (!dumb_mode && !queue_cur->trim_done) { - queue_cur->trim_done = 1; + u8 res = trim_case(argv, queue_cur, in_buf); + + if (res == FAULT_ERROR) FATAL("Unable to execute target application"); + + if (stop_soon) { - len = queue_cur->len; + ++cur_skipped_paths; + goto abandon_entry; + + } + + /* Don't retry trimming, even if it failed. */ - } + queue_cur->trim_done = 1; - memcpy(out_buf, in_buf, len); + len = queue_cur->len; - /********************* - * PERFORMANCE SCORE * - *********************/ + } - orig_perf = perf_score = calculate_score(queue_cur); + memcpy(out_buf, in_buf, len); - /* Skip right away if -d is given, if we have done deterministic fuzzing on - this entry ourselves (was_fuzzed), or if it has gone through deterministic - testing in earlier, resumed runs (passed_det). */ + /********************* + * PERFORMANCE SCORE * + *********************/ - if (skip_deterministic || queue_cur->was_fuzzed || queue_cur->passed_det) - goto havoc_stage; + orig_perf = perf_score = calculate_score(queue_cur); - /* Skip deterministic fuzzing if exec path checksum puts this out of scope - for this master instance. */ + /* Skip right away if -d is given, if we have done deterministic fuzzing on + this entry ourselves (was_fuzzed), or if it has gone through deterministic + testing in earlier, resumed runs (passed_det). */ - if (master_max && (queue_cur->exec_cksum % master_max) != master_id - 1) - goto havoc_stage; + if (skip_deterministic || queue_cur->was_fuzzed || queue_cur->passed_det) + goto havoc_stage; + /* Skip deterministic fuzzing if exec path checksum puts this out of scope + for this master instance. */ - cur_ms_lv = get_cur_time(); - if (!(key_puppet == 0 && ((cur_ms_lv - last_path_time < limit_time_puppet) || - (last_crash_time != 0 && cur_ms_lv - last_crash_time < limit_time_puppet) || last_path_time == 0))) - { - key_puppet = 1; - goto pacemaker_fuzzing; - } + if (master_max && (queue_cur->exec_cksum % master_max) != master_id - 1) + goto havoc_stage; - doing_det = 1; + cur_ms_lv = get_cur_time(); + if (!(key_puppet == 0 && ((cur_ms_lv - last_path_time < limit_time_puppet) || + (last_crash_time != 0 && + cur_ms_lv - last_crash_time < limit_time_puppet) || + last_path_time == 0))) { - /********************************************* - * SIMPLE BITFLIP (+dictionary construction) * - *********************************************/ + key_puppet = 1; + goto pacemaker_fuzzing; -#define FLIP_BIT(_ar, _b) do { \ - u8* _arf = (u8*)(_ar); \ - u32 _bf = (_b); \ - _arf[(_bf) >> 3] ^= (128 >> ((_bf) & 7)); \ + } + + doing_det = 1; + + /********************************************* + * SIMPLE BITFLIP (+dictionary construction) * + *********************************************/ + +#define FLIP_BIT(_ar, _b) \ + do { \ + \ + u8* _arf = (u8*)(_ar); \ + u32 _bf = (_b); \ + _arf[(_bf) >> 3] ^= (128 >> ((_bf)&7)); \ + \ } while (0) - /* Single walking bit. */ + /* Single walking bit. */ - stage_short = "flip1"; - stage_max = len << 3; - stage_name = "bitflip 1/1"; + stage_short = "flip1"; + stage_max = len << 3; + stage_name = "bitflip 1/1"; + stage_val_type = STAGE_VAL_NONE; + orig_hit_cnt = queued_paths + unique_crashes; + prev_cksum = queue_cur->exec_cksum; - stage_val_type = STAGE_VAL_NONE; + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { - orig_hit_cnt = queued_paths + unique_crashes; + stage_cur_byte = stage_cur >> 3; - prev_cksum = queue_cur->exec_cksum; + FLIP_BIT(out_buf, stage_cur); - for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur_byte = stage_cur >> 3; + FLIP_BIT(out_buf, stage_cur); - FLIP_BIT(out_buf, stage_cur); + /* While flipping the least significant bit in every byte, pull of an extra + trick to detect possible syntax tokens. In essence, the idea is that if + you have a binary blob like this: - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + xxxxxxxxIHDRxxxxxxxx - FLIP_BIT(out_buf, stage_cur); + ...and changing the leading and trailing bytes causes variable or no + changes in program flow, but touching any character in the "IHDR" string + always produces the same, distinctive path, it's highly likely that + "IHDR" is an atomically-checked magic value of special significance to + the fuzzed format. - /* While flipping the least significant bit in every byte, pull of an extra - trick to detect possible syntax tokens. In essence, the idea is that if - you have a binary blob like this: + We do this here, rather than as a separate stage, because it's a nice + way to keep the operation approximately "free" (i.e., no extra execs). - xxxxxxxxIHDRxxxxxxxx + Empirically, performing the check when flipping the least significant bit + is advantageous, compared to doing it at the time of more disruptive + changes, where the program flow may be affected in more violent ways. - ...and changing the leading and trailing bytes causes variable or no - changes in program flow, but touching any character in the "IHDR" string - always produces the same, distinctive path, it's highly likely that - "IHDR" is an atomically-checked magic value of special significance to - the fuzzed format. + The caveat is that we won't generate dictionaries in the -d mode or -S + mode - but that's probably a fair trade-off. - We do this here, rather than as a separate stage, because it's a nice - way to keep the operation approximately "free" (i.e., no extra execs). + This won't work particularly well with paths that exhibit variable + behavior, but fails gracefully, so we'll carry out the checks anyway. - Empirically, performing the check when flipping the least significant bit - is advantageous, compared to doing it at the time of more disruptive - changes, where the program flow may be affected in more violent ways. + */ - The caveat is that we won't generate dictionaries in the -d mode or -S - mode - but that's probably a fair trade-off. + if (!dumb_mode && (stage_cur & 7) == 7) { - This won't work particularly well with paths that exhibit variable - behavior, but fails gracefully, so we'll carry out the checks anyway. + u32 cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST); - */ + if (stage_cur == stage_max - 1 && cksum == prev_cksum) { - if (!dumb_mode && (stage_cur & 7) == 7) { + /* If at end of file and we are still collecting a string, grab the + final character and force output. */ - u32 cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST); + if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3]; + ++a_len; - if (stage_cur == stage_max - 1 && cksum == prev_cksum) { + if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA) + maybe_add_auto(a_collect, a_len); - /* If at end of file and we are still collecting a string, grab the - final character and force output. */ + } else if (cksum != prev_cksum) { - if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3]; - ++a_len; + /* Otherwise, if the checksum has changed, see if we have something + worthwhile queued up, and collect that if the answer is yes. */ - if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA) - maybe_add_auto(a_collect, a_len); + if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA) + maybe_add_auto(a_collect, a_len); - } - else if (cksum != prev_cksum) { + a_len = 0; + prev_cksum = cksum; + + } + + /* Continue collecting string, but only if the bit flip actually made + any difference - we don't want no-op tokens. */ + + if (cksum != queue_cur->exec_cksum) { + + if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3]; + ++a_len; + + } + + } + + } + + new_hit_cnt = queued_paths + unique_crashes; + + stage_finds[STAGE_FLIP1] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_FLIP1] += stage_max; + + /* Two walking bits. */ + + stage_name = "bitflip 2/1"; + stage_short = "flip2"; + stage_max = (len << 3) - 1; + + orig_hit_cnt = new_hit_cnt; + + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { + + stage_cur_byte = stage_cur >> 3; + + FLIP_BIT(out_buf, stage_cur); + FLIP_BIT(out_buf, stage_cur + 1); + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + + FLIP_BIT(out_buf, stage_cur); + FLIP_BIT(out_buf, stage_cur + 1); - /* Otherwise, if the checksum has changed, see if we have something - worthwhile queued up, and collect that if the answer is yes. */ + } - if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA) - maybe_add_auto(a_collect, a_len); + new_hit_cnt = queued_paths + unique_crashes; - a_len = 0; - prev_cksum = cksum; + stage_finds[STAGE_FLIP2] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_FLIP2] += stage_max; - } + /* Four walking bits. */ - /* Continue collecting string, but only if the bit flip actually made - any difference - we don't want no-op tokens. */ + stage_name = "bitflip 4/1"; + stage_short = "flip4"; + stage_max = (len << 3) - 3; - if (cksum != queue_cur->exec_cksum) { + orig_hit_cnt = new_hit_cnt; - if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3]; - ++a_len; + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { + + stage_cur_byte = stage_cur >> 3; + + FLIP_BIT(out_buf, stage_cur); + FLIP_BIT(out_buf, stage_cur + 1); + FLIP_BIT(out_buf, stage_cur + 2); + FLIP_BIT(out_buf, stage_cur + 3); - } + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - } + FLIP_BIT(out_buf, stage_cur); + FLIP_BIT(out_buf, stage_cur + 1); + FLIP_BIT(out_buf, stage_cur + 2); + FLIP_BIT(out_buf, stage_cur + 3); - } + } - new_hit_cnt = queued_paths + unique_crashes; + new_hit_cnt = queued_paths + unique_crashes; - stage_finds[STAGE_FLIP1] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_FLIP1] += stage_max; + stage_finds[STAGE_FLIP4] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_FLIP4] += stage_max; - /* Two walking bits. */ + /* Effector map setup. These macros calculate: - stage_name = "bitflip 2/1"; - stage_short = "flip2"; - stage_max = (len << 3) - 1; + EFF_APOS - position of a particular file offset in the map. + EFF_ALEN - length of a map with a particular number of bytes. + EFF_SPAN_ALEN - map span for a sequence of bytes. - orig_hit_cnt = new_hit_cnt; + */ - for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { +#define EFF_APOS(_p) ((_p) >> EFF_MAP_SCALE2) +#define EFF_REM(_x) ((_x) & ((1 << EFF_MAP_SCALE2) - 1)) +#define EFF_ALEN(_l) (EFF_APOS(_l) + !!EFF_REM(_l)) +#define EFF_SPAN_ALEN(_p, _l) (EFF_APOS((_p) + (_l)-1) - EFF_APOS(_p) + 1) - stage_cur_byte = stage_cur >> 3; + /* Initialize effector map for the next step (see comments below). Always + flag first and last byte as doing something. */ - FLIP_BIT(out_buf, stage_cur); - FLIP_BIT(out_buf, stage_cur + 1); + eff_map = ck_alloc(EFF_ALEN(len)); + eff_map[0] = 1; - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + if (EFF_APOS(len - 1) != 0) { - FLIP_BIT(out_buf, stage_cur); - FLIP_BIT(out_buf, stage_cur + 1); + eff_map[EFF_APOS(len - 1)] = 1; + ++eff_cnt; - } + } - new_hit_cnt = queued_paths + unique_crashes; + /* Walking byte. */ - stage_finds[STAGE_FLIP2] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_FLIP2] += stage_max; + stage_name = "bitflip 8/8"; + stage_short = "flip8"; + stage_max = len; + orig_hit_cnt = new_hit_cnt; + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { - /* Four walking bits. */ + stage_cur_byte = stage_cur; - stage_name = "bitflip 4/1"; - stage_short = "flip4"; - stage_max = (len << 3) - 3; + out_buf[stage_cur] ^= 0xFF; + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + /* We also use this stage to pull off a simple trick: we identify + bytes that seem to have no effect on the current execution path + even when fully flipped - and we skip them during more expensive + deterministic stages, such as arithmetics or known ints. */ + if (!eff_map[EFF_APOS(stage_cur)]) { + u32 cksum; - orig_hit_cnt = new_hit_cnt; + /* If in dumb mode or if the file is very short, just flag everything + without wasting time on checksums. */ - for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { + if (!dumb_mode && len >= EFF_MIN_LEN) + cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST); + else + cksum = ~queue_cur->exec_cksum; - stage_cur_byte = stage_cur >> 3; + if (cksum != queue_cur->exec_cksum) { - FLIP_BIT(out_buf, stage_cur); - FLIP_BIT(out_buf, stage_cur + 1); - FLIP_BIT(out_buf, stage_cur + 2); - FLIP_BIT(out_buf, stage_cur + 3); + eff_map[EFF_APOS(stage_cur)] = 1; + ++eff_cnt; - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + } - FLIP_BIT(out_buf, stage_cur); - FLIP_BIT(out_buf, stage_cur + 1); - FLIP_BIT(out_buf, stage_cur + 2); - FLIP_BIT(out_buf, stage_cur + 3); + } - } + out_buf[stage_cur] ^= 0xFF; - new_hit_cnt = queued_paths + unique_crashes; + } - stage_finds[STAGE_FLIP4] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_FLIP4] += stage_max; + /* If the effector map is more than EFF_MAX_PERC dense, just flag the + whole thing as worth fuzzing, since we wouldn't be saving much time + anyway. */ + if (eff_cnt != EFF_ALEN(len) && + eff_cnt * 100 / EFF_ALEN(len) > EFF_MAX_PERC) { + memset(eff_map, 1, EFF_ALEN(len)); + blocks_eff_select += EFF_ALEN(len); - /* Effector map setup. These macros calculate: + } else { - EFF_APOS - position of a particular file offset in the map. - EFF_ALEN - length of a map with a particular number of bytes. - EFF_SPAN_ALEN - map span for a sequence of bytes. + blocks_eff_select += eff_cnt; - */ + } -#define EFF_APOS(_p) ((_p) >> EFF_MAP_SCALE2) -#define EFF_REM(_x) ((_x) & ((1 << EFF_MAP_SCALE2) - 1)) -#define EFF_ALEN(_l) (EFF_APOS(_l) + !!EFF_REM(_l)) -#define EFF_SPAN_ALEN(_p, _l) (EFF_APOS((_p) + (_l) - 1) - EFF_APOS(_p) + 1) + blocks_eff_total += EFF_ALEN(len); - /* Initialize effector map for the next step (see comments below). Always - flag first and last byte as doing something. */ + new_hit_cnt = queued_paths + unique_crashes; - eff_map = ck_alloc(EFF_ALEN(len)); - eff_map[0] = 1; + stage_finds[STAGE_FLIP8] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_FLIP8] += stage_max; - if (EFF_APOS(len - 1) != 0) { - eff_map[EFF_APOS(len - 1)] = 1; - ++eff_cnt; - } + /* Two walking bytes. */ - /* Walking byte. */ + if (len < 2) goto skip_bitflip; - stage_name = "bitflip 8/8"; - stage_short = "flip8"; - stage_max = len; + stage_name = "bitflip 16/8"; + stage_short = "flip16"; + stage_cur = 0; + stage_max = len - 1; + orig_hit_cnt = new_hit_cnt; + for (i = 0; i < len - 1; ++i) { - orig_hit_cnt = new_hit_cnt; + /* Let's consult the effector map... */ - for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { + if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { - stage_cur_byte = stage_cur; + --stage_max; + continue; - out_buf[stage_cur] ^= 0xFF; + } - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + stage_cur_byte = i; - /* We also use this stage to pull off a simple trick: we identify - bytes that seem to have no effect on the current execution path - even when fully flipped - and we skip them during more expensive - deterministic stages, such as arithmetics or known ints. */ + *(u16*)(out_buf + i) ^= 0xFFFF; - if (!eff_map[EFF_APOS(stage_cur)]) { + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - u32 cksum; + *(u16*)(out_buf + i) ^= 0xFFFF; - /* If in dumb mode or if the file is very short, just flag everything - without wasting time on checksums. */ + } - if (!dumb_mode && len >= EFF_MIN_LEN) - cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST); - else - cksum = ~queue_cur->exec_cksum; + new_hit_cnt = queued_paths + unique_crashes; - if (cksum != queue_cur->exec_cksum) { - eff_map[EFF_APOS(stage_cur)] = 1; - ++eff_cnt; - } + stage_finds[STAGE_FLIP16] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_FLIP16] += stage_max; - } + if (len < 4) goto skip_bitflip; - out_buf[stage_cur] ^= 0xFF; + /* Four walking bytes. */ - } + stage_name = "bitflip 32/8"; + stage_short = "flip32"; + stage_cur = 0; + stage_max = len - 3; - /* If the effector map is more than EFF_MAX_PERC dense, just flag the - whole thing as worth fuzzing, since we wouldn't be saving much time - anyway. */ + orig_hit_cnt = new_hit_cnt; - if (eff_cnt != EFF_ALEN(len) && - eff_cnt * 100 / EFF_ALEN(len) > EFF_MAX_PERC) { + for (i = 0; i < len - 3; ++i) { - memset(eff_map, 1, EFF_ALEN(len)); + /* Let's consult the effector map... */ + if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && + !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { - blocks_eff_select += EFF_ALEN(len); + --stage_max; + continue; - } - else { + } - blocks_eff_select += eff_cnt; + stage_cur_byte = i; - } + *(u32*)(out_buf + i) ^= 0xFFFFFFFF; - blocks_eff_total += EFF_ALEN(len); + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - new_hit_cnt = queued_paths + unique_crashes; + *(u32*)(out_buf + i) ^= 0xFFFFFFFF; - stage_finds[STAGE_FLIP8] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_FLIP8] += stage_max; + } + new_hit_cnt = queued_paths + unique_crashes; + stage_finds[STAGE_FLIP32] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_FLIP32] += stage_max; +skip_bitflip: + if (no_arith) goto skip_arith; - /* Two walking bytes. */ + /********************** + * ARITHMETIC INC/DEC * + **********************/ - if (len < 2) goto skip_bitflip; + /* 8-bit arithmetics. */ - stage_name = "bitflip 16/8"; - stage_short = "flip16"; - stage_cur = 0; - stage_max = len - 1; + stage_name = "arith 8/8"; + stage_short = "arith8"; + stage_cur = 0; + stage_max = 2 * len * ARITH_MAX; + stage_val_type = STAGE_VAL_LE; + orig_hit_cnt = new_hit_cnt; - orig_hit_cnt = new_hit_cnt; + for (i = 0; i < len; ++i) { - for (i = 0; i < len - 1; ++i) { + u8 orig = out_buf[i]; - /* Let's consult the effector map... */ + /* Let's consult the effector map... */ - if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { - --stage_max; - continue; - } + if (!eff_map[EFF_APOS(i)]) { - stage_cur_byte = i; + stage_max -= 2 * ARITH_MAX; + continue; - *(u16*)(out_buf + i) ^= 0xFFFF; + } - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + stage_cur_byte = i; - *(u16*)(out_buf + i) ^= 0xFFFF; + for (j = 1; j <= ARITH_MAX; ++j) { + u8 r = orig ^ (orig + j); - } + /* Do arithmetic operations only if the result couldn't be a product + of a bitflip. */ - new_hit_cnt = queued_paths + unique_crashes; + if (!could_be_bitflip(r)) { - stage_finds[STAGE_FLIP16] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_FLIP16] += stage_max; + stage_cur_val = j; + out_buf[i] = orig + j; + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + } else + --stage_max; - if (len < 4) goto skip_bitflip; + r = orig ^ (orig - j); - /* Four walking bytes. */ + if (!could_be_bitflip(r)) { - stage_name = "bitflip 32/8"; - stage_short = "flip32"; - stage_cur = 0; - stage_max = len - 3; + stage_cur_val = -j; + out_buf[i] = orig - j; + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + } else - orig_hit_cnt = new_hit_cnt; + --stage_max; - for (i = 0; i < len - 3; ++i) { + out_buf[i] = orig; - /* Let's consult the effector map... */ - if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && - !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { - --stage_max; - continue; - } + } - stage_cur_byte = i; + } - *(u32*)(out_buf + i) ^= 0xFFFFFFFF; + new_hit_cnt = queued_paths + unique_crashes; - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + stage_finds[STAGE_ARITH8] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_ARITH8] += stage_max; - *(u32*)(out_buf + i) ^= 0xFFFFFFFF; + /* 16-bit arithmetics, both endians. */ - } + if (len < 2) goto skip_arith; - new_hit_cnt = queued_paths + unique_crashes; + stage_name = "arith 16/8"; + stage_short = "arith16"; + stage_cur = 0; + stage_max = 4 * (len - 1) * ARITH_MAX; - stage_finds[STAGE_FLIP32] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_FLIP32] += stage_max; + orig_hit_cnt = new_hit_cnt; + for (i = 0; i < len - 1; ++i) { + u16 orig = *(u16*)(out_buf + i); + /* Let's consult the effector map... */ + if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { + stage_max -= 4 * ARITH_MAX; + continue; - skip_bitflip: + } - if (no_arith) goto skip_arith; + stage_cur_byte = i; - /********************** - * ARITHMETIC INC/DEC * - **********************/ + for (j = 1; j <= ARITH_MAX; ++j) { - /* 8-bit arithmetics. */ + u16 r1 = orig ^ (orig + j), r2 = orig ^ (orig - j), + r3 = orig ^ SWAP16(SWAP16(orig) + j), + r4 = orig ^ SWAP16(SWAP16(orig) - j); - stage_name = "arith 8/8"; - stage_short = "arith8"; - stage_cur = 0; - stage_max = 2 * len * ARITH_MAX; + /* Try little endian addition and subtraction first. Do it only + if the operation would affect more than one byte (hence the + & 0xff overflow checks) and if it couldn't be a product of + a bitflip. */ + stage_val_type = STAGE_VAL_LE; + if ((orig & 0xff) + j > 0xff && !could_be_bitflip(r1)) { + stage_cur_val = j; + *(u16*)(out_buf + i) = orig + j; - stage_val_type = STAGE_VAL_LE; + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - orig_hit_cnt = new_hit_cnt; + } else - for (i = 0; i < len; ++i) { + --stage_max; - u8 orig = out_buf[i]; + if ((orig & 0xff) < j && !could_be_bitflip(r2)) { - /* Let's consult the effector map... */ + stage_cur_val = -j; + *(u16*)(out_buf + i) = orig - j; - if (!eff_map[EFF_APOS(i)]) { - stage_max -= 2 * ARITH_MAX; - continue; - } + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - stage_cur_byte = i; + } else - for (j = 1; j <= ARITH_MAX; ++j) { + --stage_max; - u8 r = orig ^ (orig + j); + /* Big endian comes next. Same deal. */ - /* Do arithmetic operations only if the result couldn't be a product - of a bitflip. */ + stage_val_type = STAGE_VAL_BE; - if (!could_be_bitflip(r)) { + if ((orig >> 8) + j > 0xff && !could_be_bitflip(r3)) { - stage_cur_val = j; - out_buf[i] = orig + j; + stage_cur_val = j; + *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) + j); - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - } else --stage_max; + } else - r = orig ^ (orig - j); + --stage_max; - if (!could_be_bitflip(r)) { + if ((orig >> 8) < j && !could_be_bitflip(r4)) { - stage_cur_val = -j; - out_buf[i] = orig - j; + stage_cur_val = -j; + *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) - j); - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - } else --stage_max; + } else - out_buf[i] = orig; + --stage_max; - } + *(u16*)(out_buf + i) = orig; - } + } - new_hit_cnt = queued_paths + unique_crashes; + } - stage_finds[STAGE_ARITH8] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_ARITH8] += stage_max; + new_hit_cnt = queued_paths + unique_crashes; + stage_finds[STAGE_ARITH16] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_ARITH16] += stage_max; + /* 32-bit arithmetics, both endians. */ + if (len < 4) goto skip_arith; + stage_name = "arith 32/8"; + stage_short = "arith32"; + stage_cur = 0; + stage_max = 4 * (len - 3) * ARITH_MAX; - /* 16-bit arithmetics, both endians. */ + orig_hit_cnt = new_hit_cnt; - if (len < 2) goto skip_arith; + for (i = 0; i < len - 3; ++i) { - stage_name = "arith 16/8"; - stage_short = "arith16"; - stage_cur = 0; - stage_max = 4 * (len - 1) * ARITH_MAX; + u32 orig = *(u32*)(out_buf + i); + /* Let's consult the effector map... */ + if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && + !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { + stage_max -= 4 * ARITH_MAX; + continue; - orig_hit_cnt = new_hit_cnt; + } - for (i = 0; i < len - 1; ++i) { + stage_cur_byte = i; - u16 orig = *(u16*)(out_buf + i); + for (j = 1; j <= ARITH_MAX; ++j) { - /* Let's consult the effector map... */ + u32 r1 = orig ^ (orig + j), r2 = orig ^ (orig - j), + r3 = orig ^ SWAP32(SWAP32(orig) + j), + r4 = orig ^ SWAP32(SWAP32(orig) - j); - if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { - stage_max -= 4 * ARITH_MAX; - continue; - } + /* Little endian first. Same deal as with 16-bit: we only want to + try if the operation would have effect on more than two bytes. */ - stage_cur_byte = i; + stage_val_type = STAGE_VAL_LE; - for (j = 1; j <= ARITH_MAX; ++j) { + if ((orig & 0xffff) + j > 0xffff && !could_be_bitflip(r1)) { - u16 r1 = orig ^ (orig + j), - r2 = orig ^ (orig - j), - r3 = orig ^ SWAP16(SWAP16(orig) + j), - r4 = orig ^ SWAP16(SWAP16(orig) - j); + stage_cur_val = j; + *(u32*)(out_buf + i) = orig + j; - /* Try little endian addition and subtraction first. Do it only - if the operation would affect more than one byte (hence the - & 0xff overflow checks) and if it couldn't be a product of - a bitflip. */ + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - stage_val_type = STAGE_VAL_LE; + } else - if ((orig & 0xff) + j > 0xff && !could_be_bitflip(r1)) { + --stage_max; - stage_cur_val = j; - *(u16*)(out_buf + i) = orig + j; + if ((orig & 0xffff) < j && !could_be_bitflip(r2)) { - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + stage_cur_val = -j; + *(u32*)(out_buf + i) = orig - j; - } else --stage_max; + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + stage_cur++; - if ((orig & 0xff) < j && !could_be_bitflip(r2)) { + } else - stage_cur_val = -j; - *(u16*)(out_buf + i) = orig - j; + --stage_max; - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + /* Big endian next. */ - } else --stage_max; + stage_val_type = STAGE_VAL_BE; - /* Big endian comes next. Same deal. */ + if ((SWAP32(orig) & 0xffff) + j > 0xffff && !could_be_bitflip(r3)) { - stage_val_type = STAGE_VAL_BE; + stage_cur_val = j; + *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) + j); + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - if ((orig >> 8) + j > 0xff && !could_be_bitflip(r3)) { + } else - stage_cur_val = j; - *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) + j); + --stage_max; - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + if ((SWAP32(orig) & 0xffff) < j && !could_be_bitflip(r4)) { - } else --stage_max; + stage_cur_val = -j; + *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) - j); - if ((orig >> 8) < j && !could_be_bitflip(r4)) { + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - stage_cur_val = -j; - *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) - j); + } else - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + --stage_max; - } else --stage_max; + *(u32*)(out_buf + i) = orig; - *(u16*)(out_buf + i) = orig; + } - } + } - } + new_hit_cnt = queued_paths + unique_crashes; - new_hit_cnt = queued_paths + unique_crashes; + stage_finds[STAGE_ARITH32] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_ARITH32] += stage_max; - stage_finds[STAGE_ARITH16] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_ARITH16] += stage_max; +skip_arith: + /********************** + * INTERESTING VALUES * + **********************/ + stage_name = "interest 8/8"; + stage_short = "int8"; + stage_cur = 0; + stage_max = len * sizeof(interesting_8); + stage_val_type = STAGE_VAL_LE; - /* 32-bit arithmetics, both endians. */ + orig_hit_cnt = new_hit_cnt; - if (len < 4) goto skip_arith; + /* Setting 8-bit integers. */ - stage_name = "arith 32/8"; - stage_short = "arith32"; - stage_cur = 0; - stage_max = 4 * (len - 3) * ARITH_MAX; + for (i = 0; i < len; ++i) { + u8 orig = out_buf[i]; + /* Let's consult the effector map... */ - orig_hit_cnt = new_hit_cnt; + if (!eff_map[EFF_APOS(i)]) { - for (i = 0; i < len - 3; ++i) { + stage_max -= sizeof(interesting_8); + continue; - u32 orig = *(u32*)(out_buf + i); + } - /* Let's consult the effector map... */ + stage_cur_byte = i; - if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && - !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { - stage_max -= 4 * ARITH_MAX; - continue; - } + for (j = 0; j < sizeof(interesting_8); ++j) { - stage_cur_byte = i; + /* Skip if the value could be a product of bitflips or arithmetics. */ - for (j = 1; j <= ARITH_MAX; ++j) { + if (could_be_bitflip(orig ^ (u8)interesting_8[j]) || + could_be_arith(orig, (u8)interesting_8[j], 1)) { - u32 r1 = orig ^ (orig + j), - r2 = orig ^ (orig - j), - r3 = orig ^ SWAP32(SWAP32(orig) + j), - r4 = orig ^ SWAP32(SWAP32(orig) - j); + --stage_max; + continue; - /* Little endian first. Same deal as with 16-bit: we only want to - try if the operation would have effect on more than two bytes. */ + } - stage_val_type = STAGE_VAL_LE; + stage_cur_val = interesting_8[j]; + out_buf[i] = interesting_8[j]; - if ((orig & 0xffff) + j > 0xffff && !could_be_bitflip(r1)) { + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur_val = j; - *(u32*)(out_buf + i) = orig + j; + out_buf[i] = orig; + ++stage_cur; - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + } - } else --stage_max; + } - if ((orig & 0xffff) < j && !could_be_bitflip(r2)) { + new_hit_cnt = queued_paths + unique_crashes; - stage_cur_val = -j; - *(u32*)(out_buf + i) = orig - j; + stage_finds[STAGE_INTEREST8] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_INTEREST8] += stage_max; - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur++; + /* Setting 16-bit integers, both endians. */ - } else --stage_max; + if (no_arith || len < 2) goto skip_interest; - /* Big endian next. */ + stage_name = "interest 16/8"; + stage_short = "int16"; + stage_cur = 0; + stage_max = 2 * (len - 1) * (sizeof(interesting_16) >> 1); - stage_val_type = STAGE_VAL_BE; + orig_hit_cnt = new_hit_cnt; - if ((SWAP32(orig) & 0xffff) + j > 0xffff && !could_be_bitflip(r3)) { + for (i = 0; i < len - 1; ++i) { - stage_cur_val = j; - *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) + j); + u16 orig = *(u16*)(out_buf + i); - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + /* Let's consult the effector map... */ - } else --stage_max; + if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { - if ((SWAP32(orig) & 0xffff) < j && !could_be_bitflip(r4)) { + stage_max -= sizeof(interesting_16); + continue; - stage_cur_val = -j; - *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) - j); + } - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + stage_cur_byte = i; - } else --stage_max; + for (j = 0; j < sizeof(interesting_16) / 2; ++j) { - *(u32*)(out_buf + i) = orig; + stage_cur_val = interesting_16[j]; - } + /* Skip if this could be a product of a bitflip, arithmetics, + or single-byte interesting value insertion. */ - } + if (!could_be_bitflip(orig ^ (u16)interesting_16[j]) && + !could_be_arith(orig, (u16)interesting_16[j], 2) && + !could_be_interest(orig, (u16)interesting_16[j], 2, 0)) { - new_hit_cnt = queued_paths + unique_crashes; + stage_val_type = STAGE_VAL_LE; - stage_finds[STAGE_ARITH32] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_ARITH32] += stage_max; + *(u16*)(out_buf + i) = interesting_16[j]; + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + } else + --stage_max; - skip_arith: + if ((u16)interesting_16[j] != SWAP16(interesting_16[j]) && + !could_be_bitflip(orig ^ SWAP16(interesting_16[j])) && + !could_be_arith(orig, SWAP16(interesting_16[j]), 2) && + !could_be_interest(orig, SWAP16(interesting_16[j]), 2, 1)) { - /********************** - * INTERESTING VALUES * - **********************/ + stage_val_type = STAGE_VAL_BE; - stage_name = "interest 8/8"; - stage_short = "int8"; - stage_cur = 0; - stage_max = len * sizeof(interesting_8); + *(u16*)(out_buf + i) = SWAP16(interesting_16[j]); + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + } else + --stage_max; - stage_val_type = STAGE_VAL_LE; + } - orig_hit_cnt = new_hit_cnt; + *(u16*)(out_buf + i) = orig; - /* Setting 8-bit integers. */ + } - for (i = 0; i < len; ++i) { + new_hit_cnt = queued_paths + unique_crashes; - u8 orig = out_buf[i]; + stage_finds[STAGE_INTEREST16] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_INTEREST16] += stage_max; - /* Let's consult the effector map... */ + if (len < 4) goto skip_interest; - if (!eff_map[EFF_APOS(i)]) { - stage_max -= sizeof(interesting_8); - continue; - } + /* Setting 32-bit integers, both endians. */ - stage_cur_byte = i; + stage_name = "interest 32/8"; + stage_short = "int32"; + stage_cur = 0; + stage_max = 2 * (len - 3) * (sizeof(interesting_32) >> 2); - for (j = 0; j < sizeof(interesting_8); ++j) { + orig_hit_cnt = new_hit_cnt; - /* Skip if the value could be a product of bitflips or arithmetics. */ + for (i = 0; i < len - 3; ++i) { - if (could_be_bitflip(orig ^ (u8)interesting_8[j]) || - could_be_arith(orig, (u8)interesting_8[j], 1)) { - --stage_max; - continue; - } + u32 orig = *(u32*)(out_buf + i); - stage_cur_val = interesting_8[j]; - out_buf[i] = interesting_8[j]; + /* Let's consult the effector map... */ - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && + !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { - out_buf[i] = orig; - ++stage_cur; + stage_max -= sizeof(interesting_32) >> 1; + continue; - } + } - } + stage_cur_byte = i; - new_hit_cnt = queued_paths + unique_crashes; + for (j = 0; j < sizeof(interesting_32) / 4; ++j) { - stage_finds[STAGE_INTEREST8] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_INTEREST8] += stage_max; + stage_cur_val = interesting_32[j]; + /* Skip if this could be a product of a bitflip, arithmetics, + or word interesting value insertion. */ + if (!could_be_bitflip(orig ^ (u32)interesting_32[j]) && + !could_be_arith(orig, interesting_32[j], 4) && + !could_be_interest(orig, interesting_32[j], 4, 0)) { + stage_val_type = STAGE_VAL_LE; - /* Setting 16-bit integers, both endians. */ + *(u32*)(out_buf + i) = interesting_32[j]; - if (no_arith || len < 2) goto skip_interest; + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - stage_name = "interest 16/8"; - stage_short = "int16"; - stage_cur = 0; - stage_max = 2 * (len - 1) * (sizeof(interesting_16) >> 1); + } else + --stage_max; + if ((u32)interesting_32[j] != SWAP32(interesting_32[j]) && + !could_be_bitflip(orig ^ SWAP32(interesting_32[j])) && + !could_be_arith(orig, SWAP32(interesting_32[j]), 4) && + !could_be_interest(orig, SWAP32(interesting_32[j]), 4, 1)) { - orig_hit_cnt = new_hit_cnt; + stage_val_type = STAGE_VAL_BE; - for (i = 0; i < len - 1; ++i) { + *(u32*)(out_buf + i) = SWAP32(interesting_32[j]); + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - u16 orig = *(u16*)(out_buf + i); + } else - /* Let's consult the effector map... */ + --stage_max; - if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { - stage_max -= sizeof(interesting_16); - continue; - } + } - stage_cur_byte = i; + *(u32*)(out_buf + i) = orig; - for (j = 0; j < sizeof(interesting_16) / 2; ++j) { + } - stage_cur_val = interesting_16[j]; + new_hit_cnt = queued_paths + unique_crashes; - /* Skip if this could be a product of a bitflip, arithmetics, - or single-byte interesting value insertion. */ + stage_finds[STAGE_INTEREST32] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_INTEREST32] += stage_max; - if (!could_be_bitflip(orig ^ (u16)interesting_16[j]) && - !could_be_arith(orig, (u16)interesting_16[j], 2) && - !could_be_interest(orig, (u16)interesting_16[j], 2, 0)) { +skip_interest: - stage_val_type = STAGE_VAL_LE; + /******************** + * DICTIONARY STUFF * + ********************/ - *(u16*)(out_buf + i) = interesting_16[j]; + if (!extras_cnt) goto skip_user_extras; - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + /* Overwrite with user-supplied extras. */ - } else --stage_max; + stage_name = "user extras (over)"; + stage_short = "ext_UO"; + stage_cur = 0; + stage_max = extras_cnt * len; - if ((u16)interesting_16[j] != SWAP16(interesting_16[j]) && - !could_be_bitflip(orig ^ SWAP16(interesting_16[j])) && - !could_be_arith(orig, SWAP16(interesting_16[j]), 2) && - !could_be_interest(orig, SWAP16(interesting_16[j]), 2, 1)) { + stage_val_type = STAGE_VAL_NONE; - stage_val_type = STAGE_VAL_BE; + orig_hit_cnt = new_hit_cnt; - *(u16*)(out_buf + i) = SWAP16(interesting_16[j]); - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + for (i = 0; i < len; ++i) { - } else --stage_max; + u32 last_len = 0; - } + stage_cur_byte = i; - *(u16*)(out_buf + i) = orig; + /* Extras are sorted by size, from smallest to largest. This means + that we don't have to worry about restoring the buffer in + between writes at a particular offset determined by the outer + loop. */ - } + for (j = 0; j < extras_cnt; ++j) { - new_hit_cnt = queued_paths + unique_crashes; + /* Skip extras probabilistically if extras_cnt > MAX_DET_EXTRAS. Also + skip them if there's no room to insert the payload, if the token + is redundant, or if its entire span has no bytes set in the effector + map. */ - stage_finds[STAGE_INTEREST16] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_INTEREST16] += stage_max; + if ((extras_cnt > MAX_DET_EXTRAS && UR(extras_cnt) >= MAX_DET_EXTRAS) || + extras[j].len > len - i || + !memcmp(extras[j].data, out_buf + i, extras[j].len) || + !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, extras[j].len))) { + --stage_max; + continue; + } + last_len = extras[j].len; + memcpy(out_buf + i, extras[j].data, last_len); + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - if (len < 4) goto skip_interest; + ++stage_cur; - /* Setting 32-bit integers, both endians. */ + } - stage_name = "interest 32/8"; - stage_short = "int32"; - stage_cur = 0; - stage_max = 2 * (len - 3) * (sizeof(interesting_32) >> 2); + /* Restore all the clobbered memory. */ + memcpy(out_buf + i, in_buf + i, last_len); + } - orig_hit_cnt = new_hit_cnt; + new_hit_cnt = queued_paths + unique_crashes; - for (i = 0; i < len - 3; ++i) { + stage_finds[STAGE_EXTRAS_UO] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_EXTRAS_UO] += stage_max; - u32 orig = *(u32*)(out_buf + i); + /* Insertion of user-supplied extras. */ - /* Let's consult the effector map... */ + stage_name = "user extras (insert)"; + stage_short = "ext_UI"; + stage_cur = 0; + stage_max = extras_cnt * len; - if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && - !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { - stage_max -= sizeof(interesting_32) >> 1; - continue; - } + orig_hit_cnt = new_hit_cnt; - stage_cur_byte = i; + ex_tmp = ck_alloc(len + MAX_DICT_FILE); - for (j = 0; j < sizeof(interesting_32) / 4; ++j) { + for (i = 0; i <= len; ++i) { - stage_cur_val = interesting_32[j]; + stage_cur_byte = i; - /* Skip if this could be a product of a bitflip, arithmetics, - or word interesting value insertion. */ + for (j = 0; j < extras_cnt; ++j) { - if (!could_be_bitflip(orig ^ (u32)interesting_32[j]) && - !could_be_arith(orig, interesting_32[j], 4) && - !could_be_interest(orig, interesting_32[j], 4, 0)) { + if (len + extras[j].len > MAX_FILE) { - stage_val_type = STAGE_VAL_LE; + --stage_max; + continue; - *(u32*)(out_buf + i) = interesting_32[j]; + } - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + /* Insert token */ + memcpy(ex_tmp + i, extras[j].data, extras[j].len); - } else --stage_max; + /* Copy tail */ + memcpy(ex_tmp + i + extras[j].len, out_buf + i, len - i); - if ((u32)interesting_32[j] != SWAP32(interesting_32[j]) && - !could_be_bitflip(orig ^ SWAP32(interesting_32[j])) && - !could_be_arith(orig, SWAP32(interesting_32[j]), 4) && - !could_be_interest(orig, SWAP32(interesting_32[j]), 4, 1)) { + if (common_fuzz_stuff(argv, ex_tmp, len + extras[j].len)) { - stage_val_type = STAGE_VAL_BE; + ck_free(ex_tmp); + goto abandon_entry; - *(u32*)(out_buf + i) = SWAP32(interesting_32[j]); - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + } - } else --stage_max; + ++stage_cur; - } + } - *(u32*)(out_buf + i) = orig; + /* Copy head */ + ex_tmp[i] = out_buf[i]; - } + } - new_hit_cnt = queued_paths + unique_crashes; + ck_free(ex_tmp); - stage_finds[STAGE_INTEREST32] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_INTEREST32] += stage_max; + new_hit_cnt = queued_paths + unique_crashes; + stage_finds[STAGE_EXTRAS_UI] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_EXTRAS_UI] += stage_max; +skip_user_extras: + if (!a_extras_cnt) goto skip_extras; + stage_name = "auto extras (over)"; + stage_short = "ext_AO"; + stage_cur = 0; + stage_max = MIN(a_extras_cnt, USE_AUTO_EXTRAS) * len; - skip_interest: + stage_val_type = STAGE_VAL_NONE; - /******************** - * DICTIONARY STUFF * - ********************/ + orig_hit_cnt = new_hit_cnt; - if (!extras_cnt) goto skip_user_extras; + for (i = 0; i < len; ++i) { - /* Overwrite with user-supplied extras. */ + u32 last_len = 0; - stage_name = "user extras (over)"; - stage_short = "ext_UO"; - stage_cur = 0; - stage_max = extras_cnt * len; + stage_cur_byte = i; + for (j = 0; j < MIN(a_extras_cnt, USE_AUTO_EXTRAS); ++j) { + /* See the comment in the earlier code; extras are sorted by size. */ + if (a_extras[j].len > len - i || + !memcmp(a_extras[j].data, out_buf + i, a_extras[j].len) || + !memchr(eff_map + EFF_APOS(i), 1, + EFF_SPAN_ALEN(i, a_extras[j].len))) { - stage_val_type = STAGE_VAL_NONE; + --stage_max; + continue; - orig_hit_cnt = new_hit_cnt; + } - for (i = 0; i < len; ++i) { + last_len = a_extras[j].len; + memcpy(out_buf + i, a_extras[j].data, last_len); - u32 last_len = 0; + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur_byte = i; + ++stage_cur; - /* Extras are sorted by size, from smallest to largest. This means - that we don't have to worry about restoring the buffer in - between writes at a particular offset determined by the outer - loop. */ + } - for (j = 0; j < extras_cnt; ++j) { + /* Restore all the clobbered memory. */ + memcpy(out_buf + i, in_buf + i, last_len); - /* Skip extras probabilistically if extras_cnt > MAX_DET_EXTRAS. Also - skip them if there's no room to insert the payload, if the token - is redundant, or if its entire span has no bytes set in the effector - map. */ + } - if ((extras_cnt > MAX_DET_EXTRAS && UR(extras_cnt) >= MAX_DET_EXTRAS) || - extras[j].len > len - i || - !memcmp(extras[j].data, out_buf + i, extras[j].len) || - !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, extras[j].len))) { + new_hit_cnt = queued_paths + unique_crashes; - --stage_max; - continue; + stage_finds[STAGE_EXTRAS_AO] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_EXTRAS_AO] += stage_max; - } +skip_extras: - last_len = extras[j].len; - memcpy(out_buf + i, extras[j].data, last_len); + /* If we made this to here without jumping to havoc_stage or abandon_entry, + we're properly done with deterministic steps and can mark it as such + in the .state/ directory. */ - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + if (!queue_cur->passed_det) mark_as_det_done(queue_cur); - ++stage_cur; + /**************** + * RANDOM HAVOC * + ****************/ - } +havoc_stage: +pacemaker_fuzzing: - /* Restore all the clobbered memory. */ - memcpy(out_buf + i, in_buf + i, last_len); + stage_cur_byte = -1; - } + /* The havoc stage mutation code is also invoked when splicing files; if the + splice_cycle variable is set, generate different descriptions and such. */ - new_hit_cnt = queued_paths + unique_crashes; + if (!splice_cycle) { - stage_finds[STAGE_EXTRAS_UO] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_EXTRAS_UO] += stage_max; + stage_name = "MOpt-havoc"; + stage_short = "MOpt_havoc"; + stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * perf_score / + havoc_div / 100; - /* Insertion of user-supplied extras. */ + } else { - stage_name = "user extras (insert)"; - stage_short = "ext_UI"; - stage_cur = 0; - stage_max = extras_cnt * len; + static u8 tmp[32]; + perf_score = orig_perf; + sprintf(tmp, "MOpt-splice %u", splice_cycle); + stage_name = tmp; + stage_short = "MOpt_splice"; + stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100; + } - orig_hit_cnt = new_hit_cnt; + s32 temp_len_puppet; + cur_ms_lv = get_cur_time(); - ex_tmp = ck_alloc(len + MAX_DICT_FILE); + { - for (i = 0; i <= len; ++i) { + if (key_puppet == 1) { - stage_cur_byte = i; + if (unlikely(orig_hit_cnt_puppet == 0)) { - for (j = 0; j < extras_cnt; ++j) { + orig_hit_cnt_puppet = queued_paths + unique_crashes; + last_limit_time_start = get_cur_time(); + SPLICE_CYCLES_puppet = + (UR(SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) + + SPLICE_CYCLES_puppet_low); - if (len + extras[j].len > MAX_FILE) { - --stage_max; - continue; - } + } - /* Insert token */ - memcpy(ex_tmp + i, extras[j].data, extras[j].len); + } - /* Copy tail */ - memcpy(ex_tmp + i + extras[j].len, out_buf + i, len - i); + { - if (common_fuzz_stuff(argv, ex_tmp, len + extras[j].len)) { - ck_free(ex_tmp); - goto abandon_entry; - } +#ifndef IGNORE_FINDS + havoc_stage_puppet: +#endif - ++stage_cur; + stage_cur_byte = -1; - } + /* The havoc stage mutation code is also invoked when splicing files; if + the splice_cycle variable is set, generate different descriptions and + such. */ - /* Copy head */ - ex_tmp[i] = out_buf[i]; + if (!splice_cycle) { - } + stage_name = "MOpt avoc"; + stage_short = "MOpt_havoc"; + stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * + perf_score / havoc_div / 100; - ck_free(ex_tmp); + } else { - new_hit_cnt = queued_paths + unique_crashes; + static u8 tmp[32]; + perf_score = orig_perf; + sprintf(tmp, "MOpt splice %u", splice_cycle); + stage_name = tmp; + stage_short = "MOpt_splice"; + stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100; - stage_finds[STAGE_EXTRAS_UI] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_EXTRAS_UI] += stage_max; + } - skip_user_extras: + if (stage_max < HAVOC_MIN) stage_max = HAVOC_MIN; - if (!a_extras_cnt) goto skip_extras; + temp_len = len; - stage_name = "auto extras (over)"; - stage_short = "ext_AO"; - stage_cur = 0; - stage_max = MIN(a_extras_cnt, USE_AUTO_EXTRAS) * len; + orig_hit_cnt = queued_paths + unique_crashes; + havoc_queued = queued_paths; - stage_val_type = STAGE_VAL_NONE; + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { - orig_hit_cnt = new_hit_cnt; + u32 use_stacking = 1 << (1 + UR(HAVOC_STACK_POW2)); - for (i = 0; i < len; ++i) { + stage_cur_val = use_stacking; - u32 last_len = 0; + for (i = 0; i < operator_num; ++i) { - stage_cur_byte = i; + stage_cycles_puppet_v3[swarm_now][i] = + stage_cycles_puppet_v2[swarm_now][i]; - for (j = 0; j < MIN(a_extras_cnt, USE_AUTO_EXTRAS); ++j) { + } - /* See the comment in the earlier code; extras are sorted by size. */ + for (i = 0; i < use_stacking; ++i) { - if (a_extras[j].len > len - i || - !memcmp(a_extras[j].data, out_buf + i, a_extras[j].len) || - !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, a_extras[j].len))) { + switch (select_algorithm()) { - --stage_max; - continue; + case 0: + /* Flip a single bit somewhere. Spooky! */ + FLIP_BIT(out_buf, UR(temp_len << 3)); + stage_cycles_puppet_v2[swarm_now][STAGE_FLIP1] += 1; + break; - } + case 1: + if (temp_len < 2) break; + temp_len_puppet = UR(temp_len << 3); + FLIP_BIT(out_buf, temp_len_puppet); + FLIP_BIT(out_buf, temp_len_puppet + 1); + stage_cycles_puppet_v2[swarm_now][STAGE_FLIP2] += 1; + break; - last_len = a_extras[j].len; - memcpy(out_buf + i, a_extras[j].data, last_len); + case 2: + if (temp_len < 2) break; + temp_len_puppet = UR(temp_len << 3); + FLIP_BIT(out_buf, temp_len_puppet); + FLIP_BIT(out_buf, temp_len_puppet + 1); + FLIP_BIT(out_buf, temp_len_puppet + 2); + FLIP_BIT(out_buf, temp_len_puppet + 3); + stage_cycles_puppet_v2[swarm_now][STAGE_FLIP4] += 1; + break; - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + case 3: + if (temp_len < 4) break; + out_buf[UR(temp_len)] ^= 0xFF; + stage_cycles_puppet_v2[swarm_now][STAGE_FLIP8] += 1; + break; - ++stage_cur; + case 4: + if (temp_len < 8) break; + *(u16*)(out_buf + UR(temp_len - 1)) ^= 0xFFFF; + stage_cycles_puppet_v2[swarm_now][STAGE_FLIP16] += 1; + break; - } + case 5: + if (temp_len < 8) break; + *(u32*)(out_buf + UR(temp_len - 3)) ^= 0xFFFFFFFF; + stage_cycles_puppet_v2[swarm_now][STAGE_FLIP32] += 1; + break; - /* Restore all the clobbered memory. */ - memcpy(out_buf + i, in_buf + i, last_len); + case 6: + out_buf[UR(temp_len)] -= 1 + UR(ARITH_MAX); + out_buf[UR(temp_len)] += 1 + UR(ARITH_MAX); + stage_cycles_puppet_v2[swarm_now][STAGE_ARITH8] += 1; + break; - } + case 7: + /* Randomly subtract from word, random endian. */ + if (temp_len < 8) break; + if (UR(2)) { - new_hit_cnt = queued_paths + unique_crashes; + u32 pos = UR(temp_len - 1); + *(u16*)(out_buf + pos) -= 1 + UR(ARITH_MAX); - stage_finds[STAGE_EXTRAS_AO] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_EXTRAS_AO] += stage_max; + } else { - skip_extras: + u32 pos = UR(temp_len - 1); + u16 num = 1 + UR(ARITH_MAX); + *(u16*)(out_buf + pos) = + SWAP16(SWAP16(*(u16*)(out_buf + pos)) - num); - /* If we made this to here without jumping to havoc_stage or abandon_entry, - we're properly done with deterministic steps and can mark it as such - in the .state/ directory. */ + } - if (!queue_cur->passed_det) mark_as_det_done(queue_cur); + /* Randomly add to word, random endian. */ + if (UR(2)) { - /**************** - * RANDOM HAVOC * - ****************/ + u32 pos = UR(temp_len - 1); + *(u16*)(out_buf + pos) += 1 + UR(ARITH_MAX); - havoc_stage: - pacemaker_fuzzing: + } else { + u32 pos = UR(temp_len - 1); + u16 num = 1 + UR(ARITH_MAX); + *(u16*)(out_buf + pos) = + SWAP16(SWAP16(*(u16*)(out_buf + pos)) + num); - stage_cur_byte = -1; + } - /* The havoc stage mutation code is also invoked when splicing files; if the - splice_cycle variable is set, generate different descriptions and such. */ + stage_cycles_puppet_v2[swarm_now][STAGE_ARITH16] += 1; + break; - if (!splice_cycle) { + case 8: + /* Randomly subtract from dword, random endian. */ + if (temp_len < 8) break; + if (UR(2)) { - stage_name = "MOpt-havoc"; - stage_short = "MOpt_havoc"; - stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * - perf_score / havoc_div / 100; + u32 pos = UR(temp_len - 3); + *(u32*)(out_buf + pos) -= 1 + UR(ARITH_MAX); - } - else { + } else { - static u8 tmp[32]; + u32 pos = UR(temp_len - 3); + u32 num = 1 + UR(ARITH_MAX); + *(u32*)(out_buf + pos) = + SWAP32(SWAP32(*(u32*)(out_buf + pos)) - num); - perf_score = orig_perf; + } - sprintf(tmp, "MOpt-splice %u", splice_cycle); - stage_name = tmp; - stage_short = "MOpt_splice"; - stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100; + /* Randomly add to dword, random endian. */ + // if (temp_len < 4) break; + if (UR(2)) { - } + u32 pos = UR(temp_len - 3); + *(u32*)(out_buf + pos) += 1 + UR(ARITH_MAX); - s32 temp_len_puppet; - cur_ms_lv = get_cur_time(); + } else { - { + u32 pos = UR(temp_len - 3); + u32 num = 1 + UR(ARITH_MAX); + *(u32*)(out_buf + pos) = + SWAP32(SWAP32(*(u32*)(out_buf + pos)) + num); + } - if (key_puppet == 1) - { - if (unlikely(orig_hit_cnt_puppet == 0)) - { - orig_hit_cnt_puppet = queued_paths + unique_crashes; - last_limit_time_start = get_cur_time(); - SPLICE_CYCLES_puppet = (UR(SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) + SPLICE_CYCLES_puppet_low); - } - } + stage_cycles_puppet_v2[swarm_now][STAGE_ARITH32] += 1; + break; + case 9: + /* Set byte to interesting value. */ + if (temp_len < 4) break; + out_buf[UR(temp_len)] = interesting_8[UR(sizeof(interesting_8))]; + stage_cycles_puppet_v2[swarm_now][STAGE_INTEREST8] += 1; + break; - { -#ifndef IGNORE_FINDS - havoc_stage_puppet: -#endif + case 10: + /* Set word to interesting value, randomly choosing endian. */ + if (temp_len < 8) break; + if (UR(2)) { - stage_cur_byte = -1; + *(u16*)(out_buf + UR(temp_len - 1)) = + interesting_16[UR(sizeof(interesting_16) >> 1)]; - /* The havoc stage mutation code is also invoked when splicing files; if the - splice_cycle variable is set, generate different descriptions and such. */ + } else { - if (!splice_cycle) { + *(u16*)(out_buf + UR(temp_len - 1)) = + SWAP16(interesting_16[UR(sizeof(interesting_16) >> 1)]); - stage_name = "MOpt avoc"; - stage_short = "MOpt_havoc"; - stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * - perf_score / havoc_div / 100; + } - } - else { - static u8 tmp[32]; - perf_score = orig_perf; - sprintf(tmp, "MOpt splice %u", splice_cycle); - stage_name = tmp; - stage_short = "MOpt_splice"; - stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100; - } + stage_cycles_puppet_v2[swarm_now][STAGE_INTEREST16] += 1; + break; + case 11: + /* Set dword to interesting value, randomly choosing endian. */ + if (temp_len < 8) break; - if (stage_max < HAVOC_MIN) stage_max = HAVOC_MIN; - - temp_len = len; - - orig_hit_cnt = queued_paths + unique_crashes; - - havoc_queued = queued_paths; - - - - for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { - - u32 use_stacking = 1 << (1 + UR(HAVOC_STACK_POW2)); - - stage_cur_val = use_stacking; - - - for (i = 0; i < operator_num; ++i) - { - stage_cycles_puppet_v3[swarm_now][i] = stage_cycles_puppet_v2[swarm_now][i]; - } - - - for (i = 0; i < use_stacking; ++i) { - - switch (select_algorithm()) { - - case 0: - /* Flip a single bit somewhere. Spooky! */ - FLIP_BIT(out_buf, UR(temp_len << 3)); - stage_cycles_puppet_v2[swarm_now][STAGE_FLIP1] += 1; - break; - - - case 1: - if (temp_len < 2) break; - temp_len_puppet = UR(temp_len << 3); - FLIP_BIT(out_buf, temp_len_puppet); - FLIP_BIT(out_buf, temp_len_puppet + 1); - stage_cycles_puppet_v2[swarm_now][STAGE_FLIP2] += 1; - break; - - case 2: - if (temp_len < 2) break; - temp_len_puppet = UR(temp_len << 3); - FLIP_BIT(out_buf, temp_len_puppet); - FLIP_BIT(out_buf, temp_len_puppet + 1); - FLIP_BIT(out_buf, temp_len_puppet + 2); - FLIP_BIT(out_buf, temp_len_puppet + 3); - stage_cycles_puppet_v2[swarm_now][STAGE_FLIP4] += 1; - break; - - case 3: - if (temp_len < 4) break; - out_buf[UR(temp_len)] ^= 0xFF; - stage_cycles_puppet_v2[swarm_now][STAGE_FLIP8] += 1; - break; - - case 4: - if (temp_len < 8) break; - *(u16*)(out_buf + UR(temp_len - 1)) ^= 0xFFFF; - stage_cycles_puppet_v2[swarm_now][STAGE_FLIP16] += 1; - break; - - case 5: - if (temp_len < 8) break; - *(u32*)(out_buf + UR(temp_len - 3)) ^= 0xFFFFFFFF; - stage_cycles_puppet_v2[swarm_now][STAGE_FLIP32] += 1; - break; - - case 6: - out_buf[UR(temp_len)] -= 1 + UR(ARITH_MAX); - out_buf[UR(temp_len)] += 1 + UR(ARITH_MAX); - stage_cycles_puppet_v2[swarm_now][STAGE_ARITH8] += 1; - break; - - case 7: - /* Randomly subtract from word, random endian. */ - if (temp_len < 8) break; - if (UR(2)) { - u32 pos = UR(temp_len - 1); - *(u16*)(out_buf + pos) -= 1 + UR(ARITH_MAX); - } - else { - u32 pos = UR(temp_len - 1); - u16 num = 1 + UR(ARITH_MAX); - *(u16*)(out_buf + pos) = - SWAP16(SWAP16(*(u16*)(out_buf + pos)) - num); - } - /* Randomly add to word, random endian. */ - if (UR(2)) { - u32 pos = UR(temp_len - 1); - *(u16*)(out_buf + pos) += 1 + UR(ARITH_MAX); - } - else { - u32 pos = UR(temp_len - 1); - u16 num = 1 + UR(ARITH_MAX); - *(u16*)(out_buf + pos) = - SWAP16(SWAP16(*(u16*)(out_buf + pos)) + num); - } - stage_cycles_puppet_v2[swarm_now][STAGE_ARITH16] += 1; - break; - - - case 8: - /* Randomly subtract from dword, random endian. */ - if (temp_len < 8) break; - if (UR(2)) { - u32 pos = UR(temp_len - 3); - *(u32*)(out_buf + pos) -= 1 + UR(ARITH_MAX); - } - else { - u32 pos = UR(temp_len - 3); - u32 num = 1 + UR(ARITH_MAX); - *(u32*)(out_buf + pos) = - SWAP32(SWAP32(*(u32*)(out_buf + pos)) - num); - } - /* Randomly add to dword, random endian. */ - //if (temp_len < 4) break; - if (UR(2)) { - u32 pos = UR(temp_len - 3); - *(u32*)(out_buf + pos) += 1 + UR(ARITH_MAX); - } - else { - u32 pos = UR(temp_len - 3); - u32 num = 1 + UR(ARITH_MAX); - *(u32*)(out_buf + pos) = - SWAP32(SWAP32(*(u32*)(out_buf + pos)) + num); - } - stage_cycles_puppet_v2[swarm_now][STAGE_ARITH32] += 1; - break; - - - case 9: - /* Set byte to interesting value. */ - if (temp_len < 4) break; - out_buf[UR(temp_len)] = interesting_8[UR(sizeof(interesting_8))]; - stage_cycles_puppet_v2[swarm_now][STAGE_INTEREST8] += 1; - break; - - case 10: - /* Set word to interesting value, randomly choosing endian. */ - if (temp_len < 8) break; - if (UR(2)) { - *(u16*)(out_buf + UR(temp_len - 1)) = - interesting_16[UR(sizeof(interesting_16) >> 1)]; - } - else { - *(u16*)(out_buf + UR(temp_len - 1)) = SWAP16( - interesting_16[UR(sizeof(interesting_16) >> 1)]); - } - stage_cycles_puppet_v2[swarm_now][STAGE_INTEREST16] += 1; - break; - - - case 11: - /* Set dword to interesting value, randomly choosing endian. */ - - if (temp_len < 8) break; - - if (UR(2)) { - *(u32*)(out_buf + UR(temp_len - 3)) = - interesting_32[UR(sizeof(interesting_32) >> 2)]; - } - else { - *(u32*)(out_buf + UR(temp_len - 3)) = SWAP32( - interesting_32[UR(sizeof(interesting_32) >> 2)]); - } - stage_cycles_puppet_v2[swarm_now][STAGE_INTEREST32] += 1; - break; + if (UR(2)) { + *(u32*)(out_buf + UR(temp_len - 3)) = + interesting_32[UR(sizeof(interesting_32) >> 2)]; - case 12: + } else { - /* Just set a random byte to a random value. Because, - why not. We use XOR with 1-255 to eliminate the - possibility of a no-op. */ + *(u32*)(out_buf + UR(temp_len - 3)) = + SWAP32(interesting_32[UR(sizeof(interesting_32) >> 2)]); - out_buf[UR(temp_len)] ^= 1 + UR(255); - stage_cycles_puppet_v2[swarm_now][STAGE_RANDOMBYTE] += 1; - break; + } + stage_cycles_puppet_v2[swarm_now][STAGE_INTEREST32] += 1; + break; + case 12: - case 13: { + /* Just set a random byte to a random value. Because, + why not. We use XOR with 1-255 to eliminate the + possibility of a no-op. */ - /* Delete bytes. We're making this a bit more likely - than insertion (the next option) in hopes of keeping - files reasonably small. */ + out_buf[UR(temp_len)] ^= 1 + UR(255); + stage_cycles_puppet_v2[swarm_now][STAGE_RANDOMBYTE] += 1; + break; - u32 del_from, del_len; + case 13: { - if (temp_len < 2) break; + /* Delete bytes. We're making this a bit more likely + than insertion (the next option) in hopes of keeping + files reasonably small. */ - /* Don't delete too much. */ + u32 del_from, del_len; - del_len = choose_block_len(temp_len - 1); + if (temp_len < 2) break; - del_from = UR(temp_len - del_len + 1); + /* Don't delete too much. */ - memmove(out_buf + del_from, out_buf + del_from + del_len, - temp_len - del_from - del_len); + del_len = choose_block_len(temp_len - 1); - temp_len -= del_len; - stage_cycles_puppet_v2[swarm_now][STAGE_DELETEBYTE] += 1; - break; + del_from = UR(temp_len - del_len + 1); - } + memmove(out_buf + del_from, out_buf + del_from + del_len, + temp_len - del_from - del_len); - case 14: + temp_len -= del_len; + stage_cycles_puppet_v2[swarm_now][STAGE_DELETEBYTE] += 1; + break; - if (temp_len + HAVOC_BLK_XL < MAX_FILE) { + } - /* Clone bytes (75%) or insert a block of constant bytes (25%). */ + case 14: - u8 actually_clone = UR(4); - u32 clone_from, clone_to, clone_len; - u8* new_buf; + if (temp_len + HAVOC_BLK_XL < MAX_FILE) { - if (actually_clone) { + /* Clone bytes (75%) or insert a block of constant bytes (25%). + */ - clone_len = choose_block_len(temp_len); - clone_from = UR(temp_len - clone_len + 1); + u8 actually_clone = UR(4); + u32 clone_from, clone_to, clone_len; + u8* new_buf; - } - else { + if (actually_clone) { - clone_len = choose_block_len(HAVOC_BLK_XL); - clone_from = 0; + clone_len = choose_block_len(temp_len); + clone_from = UR(temp_len - clone_len + 1); - } + } else { - clone_to = UR(temp_len); + clone_len = choose_block_len(HAVOC_BLK_XL); + clone_from = 0; - new_buf = ck_alloc_nozero(temp_len + clone_len); + } - /* Head */ + clone_to = UR(temp_len); - memcpy(new_buf, out_buf, clone_to); + new_buf = ck_alloc_nozero(temp_len + clone_len); - /* Inserted part */ + /* Head */ - if (actually_clone) - memcpy(new_buf + clone_to, out_buf + clone_from, clone_len); - else - memset(new_buf + clone_to, - UR(2) ? UR(256) : out_buf[UR(temp_len)], clone_len); + memcpy(new_buf, out_buf, clone_to); - /* Tail */ - memcpy(new_buf + clone_to + clone_len, out_buf + clone_to, - temp_len - clone_to); + /* Inserted part */ - ck_free(out_buf); - out_buf = new_buf; - temp_len += clone_len; - stage_cycles_puppet_v2[swarm_now][STAGE_Clone75] += 1; - } + if (actually_clone) + memcpy(new_buf + clone_to, out_buf + clone_from, clone_len); + else + memset(new_buf + clone_to, + UR(2) ? UR(256) : out_buf[UR(temp_len)], clone_len); - break; + /* Tail */ + memcpy(new_buf + clone_to + clone_len, out_buf + clone_to, + temp_len - clone_to); - case 15: { + ck_free(out_buf); + out_buf = new_buf; + temp_len += clone_len; + stage_cycles_puppet_v2[swarm_now][STAGE_Clone75] += 1; - /* Overwrite bytes with a randomly selected chunk (75%) or fixed - bytes (25%). */ + } - u32 copy_from, copy_to, copy_len; + break; - if (temp_len < 2) break; + case 15: { - copy_len = choose_block_len(temp_len - 1); + /* Overwrite bytes with a randomly selected chunk (75%) or fixed + bytes (25%). */ - copy_from = UR(temp_len - copy_len + 1); - copy_to = UR(temp_len - copy_len + 1); + u32 copy_from, copy_to, copy_len; - if (UR(4)) { + if (temp_len < 2) break; - if (copy_from != copy_to) - memmove(out_buf + copy_to, out_buf + copy_from, copy_len); + copy_len = choose_block_len(temp_len - 1); - } - else memset(out_buf + copy_to, - UR(2) ? UR(256) : out_buf[UR(temp_len)], copy_len); - stage_cycles_puppet_v2[swarm_now][STAGE_OverWrite75] += 1; - break; + copy_from = UR(temp_len - copy_len + 1); + copy_to = UR(temp_len - copy_len + 1); - } + if (UR(4)) { + if (copy_from != copy_to) + memmove(out_buf + copy_to, out_buf + copy_from, copy_len); - } + } else - } + memset(out_buf + copy_to, + UR(2) ? UR(256) : out_buf[UR(temp_len)], copy_len); + stage_cycles_puppet_v2[swarm_now][STAGE_OverWrite75] += 1; + break; + } - tmp_pilot_time += 1; + } + } + tmp_pilot_time += 1; + u64 temp_total_found = queued_paths + unique_crashes; - u64 temp_total_found = queued_paths + unique_crashes; + if (common_fuzz_stuff(argv, out_buf, temp_len)) + goto abandon_entry_puppet; + /* out_buf might have been mangled a bit, so let's restore it to its + original size and shape. */ + if (temp_len < len) out_buf = ck_realloc(out_buf, len); + temp_len = len; + memcpy(out_buf, in_buf, len); + /* If we're finding new stuff, let's run for a bit longer, limits + permitting. */ - if (common_fuzz_stuff(argv, out_buf, temp_len)) - goto abandon_entry_puppet; + if (queued_paths != havoc_queued) { - /* out_buf might have been mangled a bit, so let's restore it to its - original size and shape. */ + if (perf_score <= havoc_max_mult * 100) { - if (temp_len < len) out_buf = ck_realloc(out_buf, len); - temp_len = len; - memcpy(out_buf, in_buf, len); + stage_max *= 2; + perf_score *= 2; - /* If we're finding new stuff, let's run for a bit longer, limits - permitting. */ + } - if (queued_paths != havoc_queued) { + havoc_queued = queued_paths; - if (perf_score <= havoc_max_mult * 100) { - stage_max *= 2; - perf_score *= 2; - } + } - havoc_queued = queued_paths; + if (unlikely(queued_paths + unique_crashes > temp_total_found)) { - } + u64 temp_temp_puppet = + queued_paths + unique_crashes - temp_total_found; + total_puppet_find = total_puppet_find + temp_temp_puppet; + for (i = 0; i < 16; ++i) { - if (unlikely(queued_paths + unique_crashes > temp_total_found)) - { - u64 temp_temp_puppet = queued_paths + unique_crashes - temp_total_found; - total_puppet_find = total_puppet_find + temp_temp_puppet; - for (i = 0; i < 16; ++i) - { - if (stage_cycles_puppet_v2[swarm_now][i] > stage_cycles_puppet_v3[swarm_now][i]) - stage_finds_puppet_v2[swarm_now][i] += temp_temp_puppet; - } - } + if (stage_cycles_puppet_v2[swarm_now][i] > + stage_cycles_puppet_v3[swarm_now][i]) + stage_finds_puppet_v2[swarm_now][i] += temp_temp_puppet; - } - new_hit_cnt = queued_paths + unique_crashes; + } - if (!splice_cycle) { - stage_finds[STAGE_HAVOC] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_HAVOC] += stage_max; - } else { - stage_finds[STAGE_SPLICE] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_SPLICE] += stage_max; } + } + + new_hit_cnt = queued_paths + unique_crashes; + + if (!splice_cycle) { + + stage_finds[STAGE_HAVOC] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_HAVOC] += stage_max; + + } else { + + stage_finds[STAGE_SPLICE] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_SPLICE] += stage_max; + + } + #ifndef IGNORE_FINDS - /************ - * SPLICING * - ************/ + /************ + * SPLICING * + ************/ + retry_splicing_puppet: - retry_splicing_puppet: + if (use_splicing && splice_cycle++ < SPLICE_CYCLES_puppet && + queued_paths > 1 && queue_cur->len > 1) { - if (use_splicing && splice_cycle++ < SPLICE_CYCLES_puppet && - queued_paths > 1 && queue_cur->len > 1) { + struct queue_entry* target; + u32 tid, split_at; + u8* new_buf; + s32 f_diff, l_diff; - struct queue_entry* target; - u32 tid, split_at; - u8* new_buf; - s32 f_diff, l_diff; + /* First of all, if we've modified in_buf for havoc, let's clean that + up... */ - /* First of all, if we've modified in_buf for havoc, let's clean that - up... */ + if (in_buf != orig_in) { - if (in_buf != orig_in) { - ck_free(in_buf); - in_buf = orig_in; - len = queue_cur->len; - } + ck_free(in_buf); + in_buf = orig_in; + len = queue_cur->len; - /* Pick a random queue entry and seek to it. Don't splice with yourself. */ + } + + /* Pick a random queue entry and seek to it. Don't splice with yourself. + */ + + do { - do { tid = UR(queued_paths); } while (tid == current_entry); + tid = UR(queued_paths); - splicing_with = tid; - target = queue; + } while (tid == current_entry); - while (tid >= 100) { target = target->next_100; tid -= 100; } - while (tid--) target = target->next; + splicing_with = tid; + target = queue; - /* Make sure that the target has a reasonable length. */ + while (tid >= 100) { - while (target && (target->len < 2 || target == queue_cur)) { - target = target->next; - ++splicing_with; - } + target = target->next_100; + tid -= 100; - if (!target) goto retry_splicing_puppet; + } + + while (tid--) + target = target->next; - /* Read the testcase into a new buffer. */ + /* Make sure that the target has a reasonable length. */ - fd = open(target->fname, O_RDONLY); + while (target && (target->len < 2 || target == queue_cur)) { - if (fd < 0) PFATAL("Unable to open '%s'", target->fname); + target = target->next; + ++splicing_with; - new_buf = ck_alloc_nozero(target->len); + } - ck_read(fd, new_buf, target->len, target->fname); + if (!target) goto retry_splicing_puppet; - close(fd); + /* Read the testcase into a new buffer. */ - /* Find a suitable splicin g location, somewhere between the first and - the last differing byte. Bail out if the difference is just a single - byte or so. */ + fd = open(target->fname, O_RDONLY); - locate_diffs(in_buf, new_buf, MIN(len, target->len), &f_diff, &l_diff); + if (fd < 0) PFATAL("Unable to open '%s'", target->fname); - if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) { - ck_free(new_buf); - goto retry_splicing_puppet; - } + new_buf = ck_alloc_nozero(target->len); - /* Split somewhere between the first and last differing byte. */ + ck_read(fd, new_buf, target->len, target->fname); - split_at = f_diff + UR(l_diff - f_diff); + close(fd); - /* Do the thing. */ + /* Find a suitable splicin g location, somewhere between the first and + the last differing byte. Bail out if the difference is just a single + byte or so. */ - len = target->len; - memcpy(new_buf, in_buf, split_at); - in_buf = new_buf; - ck_free(out_buf); - out_buf = ck_alloc_nozero(len); - memcpy(out_buf, in_buf, len); - goto havoc_stage_puppet; + locate_diffs(in_buf, new_buf, MIN(len, target->len), &f_diff, &l_diff); - } + if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) { + + ck_free(new_buf); + goto retry_splicing_puppet; + + } + + /* Split somewhere between the first and last differing byte. */ + + split_at = f_diff + UR(l_diff - f_diff); + + /* Do the thing. */ + + len = target->len; + memcpy(new_buf, in_buf, split_at); + in_buf = new_buf; + ck_free(out_buf); + out_buf = ck_alloc_nozero(len); + memcpy(out_buf, in_buf, len); + goto havoc_stage_puppet; + + } #endif /* !IGNORE_FINDS */ - ret_val = 0; + ret_val = 0; + + abandon_entry: + abandon_entry_puppet: + + if (splice_cycle >= SPLICE_CYCLES_puppet) + SPLICE_CYCLES_puppet = + (UR(SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) + + SPLICE_CYCLES_puppet_low); + + splicing_with = -1; + + /* Update pending_not_fuzzed count if we made it through the calibration + cycle and have not seen this entry before. */ - abandon_entry: - abandon_entry_puppet: + // if (!stop_soon && !queue_cur->cal_failed && !queue_cur->was_fuzzed) { - if (splice_cycle >= SPLICE_CYCLES_puppet) - SPLICE_CYCLES_puppet = (UR(SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) + SPLICE_CYCLES_puppet_low); + // queue_cur->was_fuzzed = 1; + // --pending_not_fuzzed; + // if (queue_cur->favored) --pending_favored; + // } + munmap(orig_in, queue_cur->len); - splicing_with = -1; + if (in_buf != orig_in) ck_free(in_buf); + ck_free(out_buf); + ck_free(eff_map); - /* Update pending_not_fuzzed count if we made it through the calibration - cycle and have not seen this entry before. */ + if (key_puppet == 1) { - // if (!stop_soon && !queue_cur->cal_failed && !queue_cur->was_fuzzed) { - // queue_cur->was_fuzzed = 1; - // --pending_not_fuzzed; - // if (queue_cur->favored) --pending_favored; - // } + if (unlikely(queued_paths + unique_crashes > + ((queued_paths + unique_crashes) * limit_time_bound + + orig_hit_cnt_puppet))) { - munmap(orig_in, queue_cur->len); + key_puppet = 0; + cur_ms_lv = get_cur_time(); + new_hit_cnt = queued_paths + unique_crashes; + orig_hit_cnt_puppet = 0; + last_limit_time_start = 0; - if (in_buf != orig_in) ck_free(in_buf); - ck_free(out_buf); - ck_free(eff_map); + } + + } + if (unlikely(tmp_pilot_time > period_pilot)) { - if (key_puppet == 1) { - if (unlikely(queued_paths + unique_crashes > ((queued_paths + unique_crashes)*limit_time_bound + orig_hit_cnt_puppet))) { - key_puppet = 0; - cur_ms_lv = get_cur_time(); - new_hit_cnt = queued_paths + unique_crashes; - orig_hit_cnt_puppet = 0; - last_limit_time_start = 0; - } - } + total_pacemaker_time += tmp_pilot_time; + new_hit_cnt = queued_paths + unique_crashes; + swarm_fitness[swarm_now] = + (double)(total_puppet_find - temp_puppet_find) / + ((double)(tmp_pilot_time) / period_pilot_tmp); + tmp_pilot_time = 0; + temp_puppet_find = total_puppet_find; + u64 temp_stage_finds_puppet = 0; + for (i = 0; i < operator_num; ++i) { - if (unlikely(tmp_pilot_time > period_pilot)) { - total_pacemaker_time += tmp_pilot_time; - new_hit_cnt = queued_paths + unique_crashes; - swarm_fitness[swarm_now] = (double)(total_puppet_find - temp_puppet_find) / ((double)(tmp_pilot_time)/ period_pilot_tmp); - tmp_pilot_time = 0; - temp_puppet_find = total_puppet_find; + double temp_eff = 0.0; - u64 temp_stage_finds_puppet = 0; - for (i = 0; i < operator_num; ++i) { - double temp_eff = 0.0; + if (stage_cycles_puppet_v2[swarm_now][i] > + stage_cycles_puppet[swarm_now][i]) + temp_eff = (double)(stage_finds_puppet_v2[swarm_now][i] - + stage_finds_puppet[swarm_now][i]) / + (double)(stage_cycles_puppet_v2[swarm_now][i] - + stage_cycles_puppet[swarm_now][i]); - if (stage_cycles_puppet_v2[swarm_now][i] > stage_cycles_puppet[swarm_now][i]) - temp_eff = (double)(stage_finds_puppet_v2[swarm_now][i] - stage_finds_puppet[swarm_now][i]) / - (double)(stage_cycles_puppet_v2[swarm_now][i] - stage_cycles_puppet[swarm_now][i]); + if (eff_best[swarm_now][i] < temp_eff) { - if (eff_best[swarm_now][i] < temp_eff) { - eff_best[swarm_now][i] = temp_eff; - L_best[swarm_now][i] = x_now[swarm_now][i]; - } + eff_best[swarm_now][i] = temp_eff; + L_best[swarm_now][i] = x_now[swarm_now][i]; - stage_finds_puppet[swarm_now][i] = stage_finds_puppet_v2[swarm_now][i]; - stage_cycles_puppet[swarm_now][i] = stage_cycles_puppet_v2[swarm_now][i]; - temp_stage_finds_puppet += stage_finds_puppet[swarm_now][i]; - } + } + + stage_finds_puppet[swarm_now][i] = + stage_finds_puppet_v2[swarm_now][i]; + stage_cycles_puppet[swarm_now][i] = + stage_cycles_puppet_v2[swarm_now][i]; + temp_stage_finds_puppet += stage_finds_puppet[swarm_now][i]; + + } - swarm_now = swarm_now + 1; - if (swarm_now == swarm_num) { - key_module = 1; - for (i = 0; i < operator_num; ++i) { - core_operator_cycles_puppet_v2[i] = core_operator_cycles_puppet[i]; - core_operator_cycles_puppet_v3[i] = core_operator_cycles_puppet[i]; - core_operator_finds_puppet_v2[i] = core_operator_finds_puppet[i]; - } + swarm_now = swarm_now + 1; + if (swarm_now == swarm_num) { - double swarm_eff = 0.0; - swarm_now = 0; - for (i = 0; i < swarm_num; ++i) { - if (swarm_fitness[i] > swarm_eff) { - swarm_eff = swarm_fitness[i]; - swarm_now = i; - } - } - if (swarm_now <0 || swarm_now > swarm_num - 1) - PFATAL("swarm_now error number %d", swarm_now); + key_module = 1; + for (i = 0; i < operator_num; ++i) { - } - } - return ret_val; - } - } + core_operator_cycles_puppet_v2[i] = core_operator_cycles_puppet[i]; + core_operator_cycles_puppet_v3[i] = core_operator_cycles_puppet[i]; + core_operator_finds_puppet_v2[i] = core_operator_finds_puppet[i]; + } + + double swarm_eff = 0.0; + swarm_now = 0; + for (i = 0; i < swarm_num; ++i) { + + if (swarm_fitness[i] > swarm_eff) { + + swarm_eff = swarm_fitness[i]; + swarm_now = i; + + } + + } + + if (swarm_now < 0 || swarm_now > swarm_num - 1) + PFATAL("swarm_now error number %d", swarm_now); + + } + + } + + return ret_val; + + } + + } #undef FLIP_BIT } - u8 core_fuzzing(char** argv) { - int i; - if (swarm_num == 1) { - key_module = 2; - return 0; - } + int i; + if (swarm_num == 1) { - s32 len, fd, temp_len, j; - u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0; - u64 havoc_queued, orig_hit_cnt, new_hit_cnt, cur_ms_lv; - u32 splice_cycle = 0, perf_score = 100, orig_perf, prev_cksum, eff_cnt = 1; + key_module = 2; + return 0; - u8 ret_val = 1, doing_det = 0; + } + + s32 len, fd, temp_len, j; + u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0; + u64 havoc_queued, orig_hit_cnt, new_hit_cnt, cur_ms_lv; + u32 splice_cycle = 0, perf_score = 100, orig_perf, prev_cksum, eff_cnt = 1; - u8 a_collect[MAX_AUTO_EXTRA]; - u32 a_len = 0; + u8 ret_val = 1, doing_det = 0; + + u8 a_collect[MAX_AUTO_EXTRA]; + u32 a_len = 0; #ifdef IGNORE_FINDS - /* In IGNORE_FINDS mode, skip any entries that weren't in the - initial data set. */ + /* In IGNORE_FINDS mode, skip any entries that weren't in the + initial data set. */ - if (queue_cur->depth > 1) return 1; + if (queue_cur->depth > 1) return 1; #else - if (pending_favored) { + if (pending_favored) { - /* If we have any favored, non-fuzzed new arrivals in the queue, - possibly skip to them at the expense of already-fuzzed or non-favored - cases. */ + /* If we have any favored, non-fuzzed new arrivals in the queue, + possibly skip to them at the expense of already-fuzzed or non-favored + cases. */ - if ((queue_cur->was_fuzzed || !queue_cur->favored) && - UR(100) < SKIP_TO_NEW_PROB) return 1; + if ((queue_cur->was_fuzzed || !queue_cur->favored) && + UR(100) < SKIP_TO_NEW_PROB) + return 1; - } else if (!dumb_mode && !queue_cur->favored && queued_paths > 10) { + } else if (!dumb_mode && !queue_cur->favored && queued_paths > 10) { - /* Otherwise, still possibly skip non-favored cases, albeit less often. - The odds of skipping stuff are higher for already-fuzzed inputs and - lower for never-fuzzed entries. */ + /* Otherwise, still possibly skip non-favored cases, albeit less often. + The odds of skipping stuff are higher for already-fuzzed inputs and + lower for never-fuzzed entries. */ - if (queue_cycle > 1 && !queue_cur->was_fuzzed) { + if (queue_cycle > 1 && !queue_cur->was_fuzzed) { - if (UR(100) < SKIP_NFAV_NEW_PROB) return 1; + if (UR(100) < SKIP_NFAV_NEW_PROB) return 1; - } else { + } else { - if (UR(100) < SKIP_NFAV_OLD_PROB) return 1; + if (UR(100) < SKIP_NFAV_OLD_PROB) return 1; - } + } - } + } #endif /* ^IGNORE_FINDS */ - if (not_on_tty) { - ACTF("Fuzzing test case #%u (%u total, %llu uniq crashes found)...", - current_entry, queued_paths, unique_crashes); - fflush(stdout); - } + if (not_on_tty) { + + ACTF("Fuzzing test case #%u (%u total, %llu uniq crashes found)...", + current_entry, queued_paths, unique_crashes); + fflush(stdout); + + } + + /* Map the test case into memory. */ + + fd = open(queue_cur->fname, O_RDONLY); + + if (fd < 0) PFATAL("Unable to open '%s'", queue_cur->fname); + + len = queue_cur->len; + + orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); - /* Map the test case into memory. */ + if (orig_in == MAP_FAILED) PFATAL("Unable to mmap '%s'", queue_cur->fname); - fd = open(queue_cur->fname, O_RDONLY); + close(fd); - if (fd < 0) PFATAL("Unable to open '%s'", queue_cur->fname); + /* We could mmap() out_buf as MAP_PRIVATE, but we end up clobbering every + single byte anyway, so it wouldn't give us any performance or memory usage + benefits. */ - len = queue_cur->len; + out_buf = ck_alloc_nozero(len); - orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); + subseq_tmouts = 0; - if (orig_in == MAP_FAILED) PFATAL("Unable to mmap '%s'", queue_cur->fname); + cur_depth = queue_cur->depth; - close(fd); + /******************************************* + * CALIBRATION (only if failed earlier on) * + *******************************************/ - /* We could mmap() out_buf as MAP_PRIVATE, but we end up clobbering every - single byte anyway, so it wouldn't give us any performance or memory usage - benefits. */ + if (queue_cur->cal_failed) { - out_buf = ck_alloc_nozero(len); + u8 res = FAULT_TMOUT; - subseq_tmouts = 0; + if (queue_cur->cal_failed < CAL_CHANCES) { - cur_depth = queue_cur->depth; + res = calibrate_case(argv, queue_cur, in_buf, queue_cycle - 1, 0); - /******************************************* - * CALIBRATION (only if failed earlier on) * - *******************************************/ + if (res == FAULT_ERROR) FATAL("Unable to execute target application"); - if (queue_cur->cal_failed) { + } - u8 res = FAULT_TMOUT; + if (stop_soon || res != crash_mode) { - if (queue_cur->cal_failed < CAL_CHANCES) { + ++cur_skipped_paths; + goto abandon_entry; - res = calibrate_case(argv, queue_cur, in_buf, queue_cycle - 1, 0); + } - if (res == FAULT_ERROR) - FATAL("Unable to execute target application"); + } - } + /************ + * TRIMMING * + ************/ - if (stop_soon || res != crash_mode) { - ++cur_skipped_paths; - goto abandon_entry; - } + if (!dumb_mode && !queue_cur->trim_done) { - } + u8 res = trim_case(argv, queue_cur, in_buf); - /************ - * TRIMMING * - ************/ + if (res == FAULT_ERROR) FATAL("Unable to execute target application"); - if (!dumb_mode && !queue_cur->trim_done) { + if (stop_soon) { - u8 res = trim_case(argv, queue_cur, in_buf); + ++cur_skipped_paths; + goto abandon_entry; - if (res == FAULT_ERROR) - FATAL("Unable to execute target application"); + } - if (stop_soon) { - ++cur_skipped_paths; - goto abandon_entry; - } + /* Don't retry trimming, even if it failed. */ - /* Don't retry trimming, even if it failed. */ + queue_cur->trim_done = 1; - queue_cur->trim_done = 1; + len = queue_cur->len; - len = queue_cur->len; + } - } + memcpy(out_buf, in_buf, len); - memcpy(out_buf, in_buf, len); + /********************* + * PERFORMANCE SCORE * + *********************/ - /********************* - * PERFORMANCE SCORE * - *********************/ + orig_perf = perf_score = calculate_score(queue_cur); - orig_perf = perf_score = calculate_score(queue_cur); + /* Skip right away if -d is given, if we have done deterministic fuzzing on + this entry ourselves (was_fuzzed), or if it has gone through deterministic + testing in earlier, resumed runs (passed_det). */ - /* Skip right away if -d is given, if we have done deterministic fuzzing on - this entry ourselves (was_fuzzed), or if it has gone through deterministic - testing in earlier, resumed runs (passed_det). */ + if (skip_deterministic || queue_cur->was_fuzzed || queue_cur->passed_det) + goto havoc_stage; - if (skip_deterministic || queue_cur->was_fuzzed || queue_cur->passed_det) - goto havoc_stage; + /* Skip deterministic fuzzing if exec path checksum puts this out of scope + for this master instance. */ - /* Skip deterministic fuzzing if exec path checksum puts this out of scope - for this master instance. */ + if (master_max && (queue_cur->exec_cksum % master_max) != master_id - 1) + goto havoc_stage; - if (master_max && (queue_cur->exec_cksum % master_max) != master_id - 1) - goto havoc_stage; + cur_ms_lv = get_cur_time(); + if (!(key_puppet == 0 && ((cur_ms_lv - last_path_time < limit_time_puppet) || + (last_crash_time != 0 && + cur_ms_lv - last_crash_time < limit_time_puppet) || + last_path_time == 0))) { + key_puppet = 1; + goto pacemaker_fuzzing; - cur_ms_lv = get_cur_time(); - if (!(key_puppet == 0 && ((cur_ms_lv - last_path_time < limit_time_puppet) || - (last_crash_time != 0 && cur_ms_lv - last_crash_time < limit_time_puppet) || last_path_time == 0))) - { - key_puppet = 1; - goto pacemaker_fuzzing; - } + } - doing_det = 1; + doing_det = 1; - /********************************************* - * SIMPLE BITFLIP (+dictionary construction) * - *********************************************/ + /********************************************* + * SIMPLE BITFLIP (+dictionary construction) * + *********************************************/ -#define FLIP_BIT(_ar, _b) do { \ - u8* _arf = (u8*)(_ar); \ - u32 _bf = (_b); \ - _arf[(_bf) >> 3] ^= (128 >> ((_bf) & 7)); \ +#define FLIP_BIT(_ar, _b) \ + do { \ + \ + u8* _arf = (u8*)(_ar); \ + u32 _bf = (_b); \ + _arf[(_bf) >> 3] ^= (128 >> ((_bf)&7)); \ + \ } while (0) - /* Single walking bit. */ + /* Single walking bit. */ - stage_short = "flip1"; - stage_max = len << 3; - stage_name = "bitflip 1/1"; + stage_short = "flip1"; + stage_max = len << 3; + stage_name = "bitflip 1/1"; - stage_val_type = STAGE_VAL_NONE; + stage_val_type = STAGE_VAL_NONE; - orig_hit_cnt = queued_paths + unique_crashes; + orig_hit_cnt = queued_paths + unique_crashes; - prev_cksum = queue_cur->exec_cksum; + prev_cksum = queue_cur->exec_cksum; - for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { - stage_cur_byte = stage_cur >> 3; + stage_cur_byte = stage_cur >> 3; - FLIP_BIT(out_buf, stage_cur); + FLIP_BIT(out_buf, stage_cur); - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - FLIP_BIT(out_buf, stage_cur); + FLIP_BIT(out_buf, stage_cur); - /* While flipping the least significant bit in every byte, pull of an extra - trick to detect possible syntax tokens. In essence, the idea is that if - you have a binary blob like this: + /* While flipping the least significant bit in every byte, pull of an extra + trick to detect possible syntax tokens. In essence, the idea is that if + you have a binary blob like this: - xxxxxxxxIHDRxxxxxxxx + xxxxxxxxIHDRxxxxxxxx - ...and changing the leading and trailing bytes causes variable or no - changes in program flow, but touching any character in the "IHDR" string - always produces the same, distinctive path, it's highly likely that - "IHDR" is an atomically-checked magic value of special significance to - the fuzzed format. + ...and changing the leading and trailing bytes causes variable or no + changes in program flow, but touching any character in the "IHDR" string + always produces the same, distinctive path, it's highly likely that + "IHDR" is an atomically-checked magic value of special significance to + the fuzzed format. - We do this here, rather than as a separate stage, because it's a nice - way to keep the operation approximately "free" (i.e., no extra execs). + We do this here, rather than as a separate stage, because it's a nice + way to keep the operation approximately "free" (i.e., no extra execs). - Empirically, performing the check when flipping the least significant bit - is advantageous, compared to doing it at the time of more disruptive - changes, where the program flow may be affected in more violent ways. + Empirically, performing the check when flipping the least significant bit + is advantageous, compared to doing it at the time of more disruptive + changes, where the program flow may be affected in more violent ways. - The caveat is that we won't generate dictionaries in the -d mode or -S - mode - but that's probably a fair trade-off. + The caveat is that we won't generate dictionaries in the -d mode or -S + mode - but that's probably a fair trade-off. - This won't work particularly well with paths that exhibit variable - behavior, but fails gracefully, so we'll carry out the checks anyway. + This won't work particularly well with paths that exhibit variable + behavior, but fails gracefully, so we'll carry out the checks anyway. - */ + */ - if (!dumb_mode && (stage_cur & 7) == 7) { + if (!dumb_mode && (stage_cur & 7) == 7) { - u32 cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST); + u32 cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST); - if (stage_cur == stage_max - 1 && cksum == prev_cksum) { + if (stage_cur == stage_max - 1 && cksum == prev_cksum) { - /* If at end of file and we are still collecting a string, grab the - final character and force output. */ + /* If at end of file and we are still collecting a string, grab the + final character and force output. */ - if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3]; - ++a_len; + if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3]; + ++a_len; - if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA) - maybe_add_auto(a_collect, a_len); + if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA) + maybe_add_auto(a_collect, a_len); - } - else if (cksum != prev_cksum) { + } else if (cksum != prev_cksum) { - /* Otherwise, if the checksum has changed, see if we have something - worthwhile queued up, and collect that if the answer is yes. */ + /* Otherwise, if the checksum has changed, see if we have something + worthwhile queued up, and collect that if the answer is yes. */ - if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA) - maybe_add_auto(a_collect, a_len); + if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA) + maybe_add_auto(a_collect, a_len); - a_len = 0; - prev_cksum = cksum; + a_len = 0; + prev_cksum = cksum; - } + } - /* Continue collecting string, but only if the bit flip actually made - any difference - we don't want no-op tokens. */ + /* Continue collecting string, but only if the bit flip actually made + any difference - we don't want no-op tokens. */ - if (cksum != queue_cur->exec_cksum) { + if (cksum != queue_cur->exec_cksum) { - if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3]; - ++a_len; + if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3]; + ++a_len; - } + } - } + } - } + } - new_hit_cnt = queued_paths + unique_crashes; + new_hit_cnt = queued_paths + unique_crashes; - stage_finds[STAGE_FLIP1] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_FLIP1] += stage_max; + stage_finds[STAGE_FLIP1] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_FLIP1] += stage_max; + /* Two walking bits. */ + stage_name = "bitflip 2/1"; + stage_short = "flip2"; + stage_max = (len << 3) - 1; - /* Two walking bits. */ + orig_hit_cnt = new_hit_cnt; - stage_name = "bitflip 2/1"; - stage_short = "flip2"; - stage_max = (len << 3) - 1; + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { - orig_hit_cnt = new_hit_cnt; + stage_cur_byte = stage_cur >> 3; - for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { + FLIP_BIT(out_buf, stage_cur); + FLIP_BIT(out_buf, stage_cur + 1); - stage_cur_byte = stage_cur >> 3; + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - FLIP_BIT(out_buf, stage_cur); - FLIP_BIT(out_buf, stage_cur + 1); + FLIP_BIT(out_buf, stage_cur); + FLIP_BIT(out_buf, stage_cur + 1); - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + } - FLIP_BIT(out_buf, stage_cur); - FLIP_BIT(out_buf, stage_cur + 1); + new_hit_cnt = queued_paths + unique_crashes; - } + stage_finds[STAGE_FLIP2] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_FLIP2] += stage_max; - new_hit_cnt = queued_paths + unique_crashes; + /* Four walking bits. */ - stage_finds[STAGE_FLIP2] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_FLIP2] += stage_max; + stage_name = "bitflip 4/1"; + stage_short = "flip4"; + stage_max = (len << 3) - 3; + orig_hit_cnt = new_hit_cnt; - /* Four walking bits. */ + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { - stage_name = "bitflip 4/1"; - stage_short = "flip4"; - stage_max = (len << 3) - 3; + stage_cur_byte = stage_cur >> 3; + FLIP_BIT(out_buf, stage_cur); + FLIP_BIT(out_buf, stage_cur + 1); + FLIP_BIT(out_buf, stage_cur + 2); + FLIP_BIT(out_buf, stage_cur + 3); - orig_hit_cnt = new_hit_cnt; + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { + FLIP_BIT(out_buf, stage_cur); + FLIP_BIT(out_buf, stage_cur + 1); + FLIP_BIT(out_buf, stage_cur + 2); + FLIP_BIT(out_buf, stage_cur + 3); - stage_cur_byte = stage_cur >> 3; + } - FLIP_BIT(out_buf, stage_cur); - FLIP_BIT(out_buf, stage_cur + 1); - FLIP_BIT(out_buf, stage_cur + 2); - FLIP_BIT(out_buf, stage_cur + 3); + new_hit_cnt = queued_paths + unique_crashes; - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + stage_finds[STAGE_FLIP4] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_FLIP4] += stage_max; - FLIP_BIT(out_buf, stage_cur); - FLIP_BIT(out_buf, stage_cur + 1); - FLIP_BIT(out_buf, stage_cur + 2); - FLIP_BIT(out_buf, stage_cur + 3); + /* Effector map setup. These macros calculate: - } + EFF_APOS - position of a particular file offset in the map. + EFF_ALEN - length of a map with a particular number of bytes. + EFF_SPAN_ALEN - map span for a sequence of bytes. - new_hit_cnt = queued_paths + unique_crashes; + */ - stage_finds[STAGE_FLIP4] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_FLIP4] += stage_max; +#define EFF_APOS(_p) ((_p) >> EFF_MAP_SCALE2) +#define EFF_REM(_x) ((_x) & ((1 << EFF_MAP_SCALE2) - 1)) +#define EFF_ALEN(_l) (EFF_APOS(_l) + !!EFF_REM(_l)) +#define EFF_SPAN_ALEN(_p, _l) (EFF_APOS((_p) + (_l)-1) - EFF_APOS(_p) + 1) + /* Initialize effector map for the next step (see comments below). Always + flag first and last byte as doing something. */ - /* Effector map setup. These macros calculate: + eff_map = ck_alloc(EFF_ALEN(len)); + eff_map[0] = 1; - EFF_APOS - position of a particular file offset in the map. - EFF_ALEN - length of a map with a particular number of bytes. - EFF_SPAN_ALEN - map span for a sequence of bytes. + if (EFF_APOS(len - 1) != 0) { - */ + eff_map[EFF_APOS(len - 1)] = 1; + ++eff_cnt; -#define EFF_APOS(_p) ((_p) >> EFF_MAP_SCALE2) -#define EFF_REM(_x) ((_x) & ((1 << EFF_MAP_SCALE2) - 1)) -#define EFF_ALEN(_l) (EFF_APOS(_l) + !!EFF_REM(_l)) -#define EFF_SPAN_ALEN(_p, _l) (EFF_APOS((_p) + (_l) - 1) - EFF_APOS(_p) + 1) + } - /* Initialize effector map for the next step (see comments below). Always - flag first and last byte as doing something. */ + /* Walking byte. */ - eff_map = ck_alloc(EFF_ALEN(len)); - eff_map[0] = 1; + stage_name = "bitflip 8/8"; + stage_short = "flip8"; + stage_max = len; - if (EFF_APOS(len - 1) != 0) { - eff_map[EFF_APOS(len - 1)] = 1; - ++eff_cnt; - } + orig_hit_cnt = new_hit_cnt; - /* Walking byte. */ + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { - stage_name = "bitflip 8/8"; - stage_short = "flip8"; - stage_max = len; + stage_cur_byte = stage_cur; + out_buf[stage_cur] ^= 0xFF; - orig_hit_cnt = new_hit_cnt; + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { + /* We also use this stage to pull off a simple trick: we identify + bytes that seem to have no effect on the current execution path + even when fully flipped - and we skip them during more expensive + deterministic stages, such as arithmetics or known ints. */ - stage_cur_byte = stage_cur; + if (!eff_map[EFF_APOS(stage_cur)]) { - out_buf[stage_cur] ^= 0xFF; + u32 cksum; - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + /* If in dumb mode or if the file is very short, just flag everything + without wasting time on checksums. */ - /* We also use this stage to pull off a simple trick: we identify - bytes that seem to have no effect on the current execution path - even when fully flipped - and we skip them during more expensive - deterministic stages, such as arithmetics or known ints. */ + if (!dumb_mode && len >= EFF_MIN_LEN) + cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST); + else + cksum = ~queue_cur->exec_cksum; - if (!eff_map[EFF_APOS(stage_cur)]) { + if (cksum != queue_cur->exec_cksum) { - u32 cksum; + eff_map[EFF_APOS(stage_cur)] = 1; + ++eff_cnt; - /* If in dumb mode or if the file is very short, just flag everything - without wasting time on checksums. */ + } - if (!dumb_mode && len >= EFF_MIN_LEN) - cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST); - else - cksum = ~queue_cur->exec_cksum; + } - if (cksum != queue_cur->exec_cksum) { - eff_map[EFF_APOS(stage_cur)] = 1; - ++eff_cnt; - } + out_buf[stage_cur] ^= 0xFF; - } + } - out_buf[stage_cur] ^= 0xFF; + /* If the effector map is more than EFF_MAX_PERC dense, just flag the + whole thing as worth fuzzing, since we wouldn't be saving much time + anyway. */ - } + if (eff_cnt != EFF_ALEN(len) && + eff_cnt * 100 / EFF_ALEN(len) > EFF_MAX_PERC) { - /* If the effector map is more than EFF_MAX_PERC dense, just flag the - whole thing as worth fuzzing, since we wouldn't be saving much time - anyway. */ + memset(eff_map, 1, EFF_ALEN(len)); - if (eff_cnt != EFF_ALEN(len) && - eff_cnt * 100 / EFF_ALEN(len) > EFF_MAX_PERC) { + blocks_eff_select += EFF_ALEN(len); - memset(eff_map, 1, EFF_ALEN(len)); + } else { - blocks_eff_select += EFF_ALEN(len); + blocks_eff_select += eff_cnt; - } - else { + } - blocks_eff_select += eff_cnt; + blocks_eff_total += EFF_ALEN(len); - } + new_hit_cnt = queued_paths + unique_crashes; - blocks_eff_total += EFF_ALEN(len); + stage_finds[STAGE_FLIP8] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_FLIP8] += stage_max; - new_hit_cnt = queued_paths + unique_crashes; + /* Two walking bytes. */ - stage_finds[STAGE_FLIP8] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_FLIP8] += stage_max; + if (len < 2) goto skip_bitflip; + stage_name = "bitflip 16/8"; + stage_short = "flip16"; + stage_cur = 0; + stage_max = len - 1; + orig_hit_cnt = new_hit_cnt; - /* Two walking bytes. */ + for (i = 0; i < len - 1; ++i) { - if (len < 2) goto skip_bitflip; + /* Let's consult the effector map... */ - stage_name = "bitflip 16/8"; - stage_short = "flip16"; - stage_cur = 0; - stage_max = len - 1; + if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { + --stage_max; + continue; - orig_hit_cnt = new_hit_cnt; + } - for (i = 0; i < len - 1; ++i) { + stage_cur_byte = i; - /* Let's consult the effector map... */ + *(u16*)(out_buf + i) ^= 0xFFFF; - if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { - --stage_max; - continue; - } + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - stage_cur_byte = i; + *(u16*)(out_buf + i) ^= 0xFFFF; - *(u16*)(out_buf + i) ^= 0xFFFF; + } - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + new_hit_cnt = queued_paths + unique_crashes; - *(u16*)(out_buf + i) ^= 0xFFFF; + stage_finds[STAGE_FLIP16] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_FLIP16] += stage_max; + if (len < 4) goto skip_bitflip; - } + /* Four walking bytes. */ - new_hit_cnt = queued_paths + unique_crashes; + stage_name = "bitflip 32/8"; + stage_short = "flip32"; + stage_cur = 0; + stage_max = len - 3; - stage_finds[STAGE_FLIP16] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_FLIP16] += stage_max; + orig_hit_cnt = new_hit_cnt; + for (i = 0; i < len - 3; ++i) { + /* Let's consult the effector map... */ + if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && + !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { - if (len < 4) goto skip_bitflip; + --stage_max; + continue; - /* Four walking bytes. */ + } - stage_name = "bitflip 32/8"; - stage_short = "flip32"; - stage_cur = 0; - stage_max = len - 3; + stage_cur_byte = i; + *(u32*)(out_buf + i) ^= 0xFFFFFFFF; + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + + *(u32*)(out_buf + i) ^= 0xFFFFFFFF; + + } + + new_hit_cnt = queued_paths + unique_crashes; - orig_hit_cnt = new_hit_cnt; + stage_finds[STAGE_FLIP32] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_FLIP32] += stage_max; - for (i = 0; i < len - 3; ++i) { +skip_bitflip: - /* Let's consult the effector map... */ - if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && - !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { - --stage_max; - continue; - } + if (no_arith) goto skip_arith; - stage_cur_byte = i; + /********************** + * ARITHMETIC INC/DEC * + **********************/ - *(u32*)(out_buf + i) ^= 0xFFFFFFFF; + /* 8-bit arithmetics. */ - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + stage_name = "arith 8/8"; + stage_short = "arith8"; + stage_cur = 0; + stage_max = 2 * len * ARITH_MAX; - *(u32*)(out_buf + i) ^= 0xFFFFFFFF; + stage_val_type = STAGE_VAL_LE; - } + orig_hit_cnt = new_hit_cnt; - new_hit_cnt = queued_paths + unique_crashes; + for (i = 0; i < len; ++i) { - stage_finds[STAGE_FLIP32] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_FLIP32] += stage_max; + u8 orig = out_buf[i]; + /* Let's consult the effector map... */ + if (!eff_map[EFF_APOS(i)]) { + stage_max -= 2 * ARITH_MAX; + continue; - skip_bitflip: + } - if (no_arith) goto skip_arith; + stage_cur_byte = i; - /********************** - * ARITHMETIC INC/DEC * - **********************/ + for (j = 1; j <= ARITH_MAX; ++j) { - /* 8-bit arithmetics. */ + u8 r = orig ^ (orig + j); - stage_name = "arith 8/8"; - stage_short = "arith8"; - stage_cur = 0; - stage_max = 2 * len * ARITH_MAX; + /* Do arithmetic operations only if the result couldn't be a product + of a bitflip. */ + if (!could_be_bitflip(r)) { - stage_val_type = STAGE_VAL_LE; + stage_cur_val = j; + out_buf[i] = orig + j; - orig_hit_cnt = new_hit_cnt; + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - for (i = 0; i < len; ++i) { + } else - u8 orig = out_buf[i]; + --stage_max; - /* Let's consult the effector map... */ + r = orig ^ (orig - j); - if (!eff_map[EFF_APOS(i)]) { - stage_max -= 2 * ARITH_MAX; - continue; - } + if (!could_be_bitflip(r)) { - stage_cur_byte = i; + stage_cur_val = -j; + out_buf[i] = orig - j; - for (j = 1; j <= ARITH_MAX; ++j) { + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - u8 r = orig ^ (orig + j); + } else - /* Do arithmetic operations only if the result couldn't be a product - of a bitflip. */ + --stage_max; - if (!could_be_bitflip(r)) { + out_buf[i] = orig; - stage_cur_val = j; - out_buf[i] = orig + j; + } - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + } - } else --stage_max; + new_hit_cnt = queued_paths + unique_crashes; - r = orig ^ (orig - j); + stage_finds[STAGE_ARITH8] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_ARITH8] += stage_max; - if (!could_be_bitflip(r)) { + /* 16-bit arithmetics, both endians. */ - stage_cur_val = -j; - out_buf[i] = orig - j; + if (len < 2) goto skip_arith; - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + stage_name = "arith 16/8"; + stage_short = "arith16"; + stage_cur = 0; + stage_max = 4 * (len - 1) * ARITH_MAX; - } else --stage_max; + orig_hit_cnt = new_hit_cnt; - out_buf[i] = orig; + for (i = 0; i < len - 1; ++i) { - } + u16 orig = *(u16*)(out_buf + i); - } + /* Let's consult the effector map... */ - new_hit_cnt = queued_paths + unique_crashes; + if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { - stage_finds[STAGE_ARITH8] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_ARITH8] += stage_max; + stage_max -= 4 * ARITH_MAX; + continue; + } + stage_cur_byte = i; + for (j = 1; j <= ARITH_MAX; ++j) { - /* 16-bit arithmetics, both endians. */ + u16 r1 = orig ^ (orig + j), r2 = orig ^ (orig - j), + r3 = orig ^ SWAP16(SWAP16(orig) + j), + r4 = orig ^ SWAP16(SWAP16(orig) - j); - if (len < 2) goto skip_arith; + /* Try little endian addition and subtraction first. Do it only + if the operation would affect more than one byte (hence the + & 0xff overflow checks) and if it couldn't be a product of + a bitflip. */ - stage_name = "arith 16/8"; - stage_short = "arith16"; - stage_cur = 0; - stage_max = 4 * (len - 1) * ARITH_MAX; + stage_val_type = STAGE_VAL_LE; + if ((orig & 0xff) + j > 0xff && !could_be_bitflip(r1)) { - orig_hit_cnt = new_hit_cnt; + stage_cur_val = j; + *(u16*)(out_buf + i) = orig + j; - for (i = 0; i < len - 1; ++i) { + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - u16 orig = *(u16*)(out_buf + i); + } else - /* Let's consult the effector map... */ + --stage_max; - if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { - stage_max -= 4 * ARITH_MAX; - continue; - } + if ((orig & 0xff) < j && !could_be_bitflip(r2)) { - stage_cur_byte = i; + stage_cur_val = -j; + *(u16*)(out_buf + i) = orig - j; - for (j = 1; j <= ARITH_MAX; ++j) { + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - u16 r1 = orig ^ (orig + j), - r2 = orig ^ (orig - j), - r3 = orig ^ SWAP16(SWAP16(orig) + j), - r4 = orig ^ SWAP16(SWAP16(orig) - j); + } else - /* Try little endian addition and subtraction first. Do it only - if the operation would affect more than one byte (hence the - & 0xff overflow checks) and if it couldn't be a product of - a bitflip. */ + --stage_max; - stage_val_type = STAGE_VAL_LE; + /* Big endian comes next. Same deal. */ - if ((orig & 0xff) + j > 0xff && !could_be_bitflip(r1)) { + stage_val_type = STAGE_VAL_BE; - stage_cur_val = j; - *(u16*)(out_buf + i) = orig + j; + if ((orig >> 8) + j > 0xff && !could_be_bitflip(r3)) { - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + stage_cur_val = j; + *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) + j); - } else --stage_max; + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - if ((orig & 0xff) < j && !could_be_bitflip(r2)) { + } else - stage_cur_val = -j; - *(u16*)(out_buf + i) = orig - j; + --stage_max; - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + if ((orig >> 8) < j && !could_be_bitflip(r4)) { - } else --stage_max; + stage_cur_val = -j; + *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) - j); - /* Big endian comes next. Same deal. */ + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - stage_val_type = STAGE_VAL_BE; + } else + --stage_max; - if ((orig >> 8) + j > 0xff && !could_be_bitflip(r3)) { + *(u16*)(out_buf + i) = orig; - stage_cur_val = j; - *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) + j); + } - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + } - } else --stage_max; + new_hit_cnt = queued_paths + unique_crashes; - if ((orig >> 8) < j && !could_be_bitflip(r4)) { + stage_finds[STAGE_ARITH16] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_ARITH16] += stage_max; - stage_cur_val = -j; - *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) - j); + /* 32-bit arithmetics, both endians. */ - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + if (len < 4) goto skip_arith; - } else --stage_max; + stage_name = "arith 32/8"; + stage_short = "arith32"; + stage_cur = 0; + stage_max = 4 * (len - 3) * ARITH_MAX; - *(u16*)(out_buf + i) = orig; + orig_hit_cnt = new_hit_cnt; - } + for (i = 0; i < len - 3; ++i) { - } + u32 orig = *(u32*)(out_buf + i); - new_hit_cnt = queued_paths + unique_crashes; + /* Let's consult the effector map... */ - stage_finds[STAGE_ARITH16] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_ARITH16] += stage_max; + if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && + !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { + stage_max -= 4 * ARITH_MAX; + continue; + } - /* 32-bit arithmetics, both endians. */ + stage_cur_byte = i; - if (len < 4) goto skip_arith; + for (j = 1; j <= ARITH_MAX; ++j) { - stage_name = "arith 32/8"; - stage_short = "arith32"; - stage_cur = 0; - stage_max = 4 * (len - 3) * ARITH_MAX; + u32 r1 = orig ^ (orig + j), r2 = orig ^ (orig - j), + r3 = orig ^ SWAP32(SWAP32(orig) + j), + r4 = orig ^ SWAP32(SWAP32(orig) - j); - orig_hit_cnt = new_hit_cnt; + /* Little endian first. Same deal as with 16-bit: we only want to + try if the operation would have effect on more than two bytes. */ - for (i = 0; i < len - 3; ++i) { + stage_val_type = STAGE_VAL_LE; - u32 orig = *(u32*)(out_buf + i); + if ((orig & 0xffff) + j > 0xffff && !could_be_bitflip(r1)) { - /* Let's consult the effector map... */ + stage_cur_val = j; + *(u32*)(out_buf + i) = orig + j; - if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && - !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { - stage_max -= 4 * ARITH_MAX; - continue; - } + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - stage_cur_byte = i; + } else - for (j = 1; j <= ARITH_MAX; ++j) { + --stage_max; - u32 r1 = orig ^ (orig + j), - r2 = orig ^ (orig - j), - r3 = orig ^ SWAP32(SWAP32(orig) + j), - r4 = orig ^ SWAP32(SWAP32(orig) - j); + if ((orig & 0xffff) < j && !could_be_bitflip(r2)) { - /* Little endian first. Same deal as with 16-bit: we only want to - try if the operation would have effect on more than two bytes. */ + stage_cur_val = -j; + *(u32*)(out_buf + i) = orig - j; - stage_val_type = STAGE_VAL_LE; + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - if ((orig & 0xffff) + j > 0xffff && !could_be_bitflip(r1)) { + } else - stage_cur_val = j; - *(u32*)(out_buf + i) = orig + j; + --stage_max; - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + /* Big endian next. */ - } else --stage_max; + stage_val_type = STAGE_VAL_BE; - if ((orig & 0xffff) < j && !could_be_bitflip(r2)) { + if ((SWAP32(orig) & 0xffff) + j > 0xffff && !could_be_bitflip(r3)) { - stage_cur_val = -j; - *(u32*)(out_buf + i) = orig - j; + stage_cur_val = j; + *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) + j); - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - } else --stage_max; + } else - /* Big endian next. */ + --stage_max; - stage_val_type = STAGE_VAL_BE; + if ((SWAP32(orig) & 0xffff) < j && !could_be_bitflip(r4)) { - if ((SWAP32(orig) & 0xffff) + j > 0xffff && !could_be_bitflip(r3)) { + stage_cur_val = -j; + *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) - j); - stage_cur_val = j; - *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) + j); + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + } else - } else --stage_max; + --stage_max; - if ((SWAP32(orig) & 0xffff) < j && !could_be_bitflip(r4)) { + *(u32*)(out_buf + i) = orig; - stage_cur_val = -j; - *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) - j); + } - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + } - } else --stage_max; + new_hit_cnt = queued_paths + unique_crashes; - *(u32*)(out_buf + i) = orig; + stage_finds[STAGE_ARITH32] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_ARITH32] += stage_max; - } +skip_arith: - } + /********************** + * INTERESTING VALUES * + **********************/ - new_hit_cnt = queued_paths + unique_crashes; + stage_name = "interest 8/8"; + stage_short = "int8"; + stage_cur = 0; + stage_max = len * sizeof(interesting_8); - stage_finds[STAGE_ARITH32] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_ARITH32] += stage_max; + stage_val_type = STAGE_VAL_LE; + orig_hit_cnt = new_hit_cnt; + /* Setting 8-bit integers. */ - skip_arith: + for (i = 0; i < len; ++i) { - /********************** - * INTERESTING VALUES * - **********************/ + u8 orig = out_buf[i]; - stage_name = "interest 8/8"; - stage_short = "int8"; - stage_cur = 0; - stage_max = len * sizeof(interesting_8); + /* Let's consult the effector map... */ + if (!eff_map[EFF_APOS(i)]) { + stage_max -= sizeof(interesting_8); + continue; - stage_val_type = STAGE_VAL_LE; + } - orig_hit_cnt = new_hit_cnt; + stage_cur_byte = i; - /* Setting 8-bit integers. */ + for (j = 0; j < sizeof(interesting_8); ++j) { - for (i = 0; i < len; ++i) { + /* Skip if the value could be a product of bitflips or arithmetics. */ - u8 orig = out_buf[i]; + if (could_be_bitflip(orig ^ (u8)interesting_8[j]) || + could_be_arith(orig, (u8)interesting_8[j], 1)) { - /* Let's consult the effector map... */ + --stage_max; + continue; - if (!eff_map[EFF_APOS(i)]) { - stage_max -= sizeof(interesting_8); - continue; - } + } - stage_cur_byte = i; + stage_cur_val = interesting_8[j]; + out_buf[i] = interesting_8[j]; - for (j = 0; j < sizeof(interesting_8); ++j) { + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - /* Skip if the value could be a product of bitflips or arithmetics. */ + out_buf[i] = orig; + ++stage_cur; - if (could_be_bitflip(orig ^ (u8)interesting_8[j]) || - could_be_arith(orig, (u8)interesting_8[j], 1)) { - --stage_max; - continue; - } + } - stage_cur_val = interesting_8[j]; - out_buf[i] = interesting_8[j]; + } - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + new_hit_cnt = queued_paths + unique_crashes; - out_buf[i] = orig; - ++stage_cur; + stage_finds[STAGE_INTEREST8] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_INTEREST8] += stage_max; - } + /* Setting 16-bit integers, both endians. */ - } + if (no_arith || len < 2) goto skip_interest; - new_hit_cnt = queued_paths + unique_crashes; + stage_name = "interest 16/8"; + stage_short = "int16"; + stage_cur = 0; + stage_max = 2 * (len - 1) * (sizeof(interesting_16) >> 1); - stage_finds[STAGE_INTEREST8] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_INTEREST8] += stage_max; + orig_hit_cnt = new_hit_cnt; + for (i = 0; i < len - 1; ++i) { + u16 orig = *(u16*)(out_buf + i); - /* Setting 16-bit integers, both endians. */ + /* Let's consult the effector map... */ - if (no_arith || len < 2) goto skip_interest; + if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { - stage_name = "interest 16/8"; - stage_short = "int16"; - stage_cur = 0; - stage_max = 2 * (len - 1) * (sizeof(interesting_16) >> 1); + stage_max -= sizeof(interesting_16); + continue; + } - orig_hit_cnt = new_hit_cnt; + stage_cur_byte = i; - for (i = 0; i < len - 1; ++i) { + for (j = 0; j < sizeof(interesting_16) / 2; ++j) { - u16 orig = *(u16*)(out_buf + i); + stage_cur_val = interesting_16[j]; - /* Let's consult the effector map... */ + /* Skip if this could be a product of a bitflip, arithmetics, + or single-byte interesting value insertion. */ - if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { - stage_max -= sizeof(interesting_16); - continue; - } + if (!could_be_bitflip(orig ^ (u16)interesting_16[j]) && + !could_be_arith(orig, (u16)interesting_16[j], 2) && + !could_be_interest(orig, (u16)interesting_16[j], 2, 0)) { - stage_cur_byte = i; + stage_val_type = STAGE_VAL_LE; - for (j = 0; j < sizeof(interesting_16) / 2; ++j) { + *(u16*)(out_buf + i) = interesting_16[j]; - stage_cur_val = interesting_16[j]; + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - /* Skip if this could be a product of a bitflip, arithmetics, - or single-byte interesting value insertion. */ + } else - if (!could_be_bitflip(orig ^ (u16)interesting_16[j]) && - !could_be_arith(orig, (u16)interesting_16[j], 2) && - !could_be_interest(orig, (u16)interesting_16[j], 2, 0)) { + --stage_max; - stage_val_type = STAGE_VAL_LE; + if ((u16)interesting_16[j] != SWAP16(interesting_16[j]) && + !could_be_bitflip(orig ^ SWAP16(interesting_16[j])) && + !could_be_arith(orig, SWAP16(interesting_16[j]), 2) && + !could_be_interest(orig, SWAP16(interesting_16[j]), 2, 1)) { - *(u16*)(out_buf + i) = interesting_16[j]; + stage_val_type = STAGE_VAL_BE; - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + *(u16*)(out_buf + i) = SWAP16(interesting_16[j]); + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - } else --stage_max; + } else - if ((u16)interesting_16[j] != SWAP16(interesting_16[j]) && - !could_be_bitflip(orig ^ SWAP16(interesting_16[j])) && - !could_be_arith(orig, SWAP16(interesting_16[j]), 2) && - !could_be_interest(orig, SWAP16(interesting_16[j]), 2, 1)) { + --stage_max; - stage_val_type = STAGE_VAL_BE; + } - *(u16*)(out_buf + i) = SWAP16(interesting_16[j]); - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + *(u16*)(out_buf + i) = orig; - } else --stage_max; + } - } + new_hit_cnt = queued_paths + unique_crashes; - *(u16*)(out_buf + i) = orig; + stage_finds[STAGE_INTEREST16] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_INTEREST16] += stage_max; - } + if (len < 4) goto skip_interest; - new_hit_cnt = queued_paths + unique_crashes; + /* Setting 32-bit integers, both endians. */ - stage_finds[STAGE_INTEREST16] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_INTEREST16] += stage_max; + stage_name = "interest 32/8"; + stage_short = "int32"; + stage_cur = 0; + stage_max = 2 * (len - 3) * (sizeof(interesting_32) >> 2); + orig_hit_cnt = new_hit_cnt; + for (i = 0; i < len - 3; ++i) { + u32 orig = *(u32*)(out_buf + i); - if (len < 4) goto skip_interest; + /* Let's consult the effector map... */ - /* Setting 32-bit integers, both endians. */ + if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && + !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { - stage_name = "interest 32/8"; - stage_short = "int32"; - stage_cur = 0; - stage_max = 2 * (len - 3) * (sizeof(interesting_32) >> 2); + stage_max -= sizeof(interesting_32) >> 1; + continue; + } - orig_hit_cnt = new_hit_cnt; + stage_cur_byte = i; - for (i = 0; i < len - 3; ++i) { + for (j = 0; j < sizeof(interesting_32) / 4; ++j) { - u32 orig = *(u32*)(out_buf + i); + stage_cur_val = interesting_32[j]; - /* Let's consult the effector map... */ + /* Skip if this could be a product of a bitflip, arithmetics, + or word interesting value insertion. */ - if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && - !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { - stage_max -= sizeof(interesting_32) >> 1; - continue; - } + if (!could_be_bitflip(orig ^ (u32)interesting_32[j]) && + !could_be_arith(orig, interesting_32[j], 4) && + !could_be_interest(orig, interesting_32[j], 4, 0)) { - stage_cur_byte = i; + stage_val_type = STAGE_VAL_LE; - for (j = 0; j < sizeof(interesting_32) / 4; ++j) { + *(u32*)(out_buf + i) = interesting_32[j]; - stage_cur_val = interesting_32[j]; + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - /* Skip if this could be a product of a bitflip, arithmetics, - or word interesting value insertion. */ + } else - if (!could_be_bitflip(orig ^ (u32)interesting_32[j]) && - !could_be_arith(orig, interesting_32[j], 4) && - !could_be_interest(orig, interesting_32[j], 4, 0)) { + --stage_max; - stage_val_type = STAGE_VAL_LE; + if ((u32)interesting_32[j] != SWAP32(interesting_32[j]) && + !could_be_bitflip(orig ^ SWAP32(interesting_32[j])) && + !could_be_arith(orig, SWAP32(interesting_32[j]), 4) && + !could_be_interest(orig, SWAP32(interesting_32[j]), 4, 1)) { - *(u32*)(out_buf + i) = interesting_32[j]; + stage_val_type = STAGE_VAL_BE; - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + *(u32*)(out_buf + i) = SWAP32(interesting_32[j]); + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - } else --stage_max; + } else - if ((u32)interesting_32[j] != SWAP32(interesting_32[j]) && - !could_be_bitflip(orig ^ SWAP32(interesting_32[j])) && - !could_be_arith(orig, SWAP32(interesting_32[j]), 4) && - !could_be_interest(orig, SWAP32(interesting_32[j]), 4, 1)) { + --stage_max; - stage_val_type = STAGE_VAL_BE; + } - *(u32*)(out_buf + i) = SWAP32(interesting_32[j]); - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + *(u32*)(out_buf + i) = orig; - } else --stage_max; + } - } + new_hit_cnt = queued_paths + unique_crashes; - *(u32*)(out_buf + i) = orig; + stage_finds[STAGE_INTEREST32] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_INTEREST32] += stage_max; - } +skip_interest: - new_hit_cnt = queued_paths + unique_crashes; + /******************** + * DICTIONARY STUFF * + ********************/ - stage_finds[STAGE_INTEREST32] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_INTEREST32] += stage_max; + if (!extras_cnt) goto skip_user_extras; + /* Overwrite with user-supplied extras. */ + stage_name = "user extras (over)"; + stage_short = "ext_UO"; + stage_cur = 0; + stage_max = extras_cnt * len; - skip_interest: + stage_val_type = STAGE_VAL_NONE; - /******************** - * DICTIONARY STUFF * - ********************/ + orig_hit_cnt = new_hit_cnt; - if (!extras_cnt) goto skip_user_extras; + for (i = 0; i < len; ++i) { - /* Overwrite with user-supplied extras. */ + u32 last_len = 0; - stage_name = "user extras (over)"; - stage_short = "ext_UO"; - stage_cur = 0; - stage_max = extras_cnt * len; + stage_cur_byte = i; + /* Extras are sorted by size, from smallest to largest. This means + that we don't have to worry about restoring the buffer in + between writes at a particular offset determined by the outer + loop. */ - stage_val_type = STAGE_VAL_NONE; + for (j = 0; j < extras_cnt; ++j) { - orig_hit_cnt = new_hit_cnt; + /* Skip extras probabilistically if extras_cnt > MAX_DET_EXTRAS. Also + skip them if there's no room to insert the payload, if the token + is redundant, or if its entire span has no bytes set in the effector + map. */ - for (i = 0; i < len; ++i) { + if ((extras_cnt > MAX_DET_EXTRAS && UR(extras_cnt) >= MAX_DET_EXTRAS) || + extras[j].len > len - i || + !memcmp(extras[j].data, out_buf + i, extras[j].len) || + !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, extras[j].len))) { - u32 last_len = 0; + --stage_max; + continue; - stage_cur_byte = i; + } - /* Extras are sorted by size, from smallest to largest. This means - that we don't have to worry about restoring the buffer in - between writes at a particular offset determined by the outer - loop. */ + last_len = extras[j].len; + memcpy(out_buf + i, extras[j].data, last_len); - for (j = 0; j < extras_cnt; ++j) { + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - /* Skip extras probabilistically if extras_cnt > MAX_DET_EXTRAS. Also - skip them if there's no room to insert the payload, if the token - is redundant, or if its entire span has no bytes set in the effector - map. */ + ++stage_cur; - if ((extras_cnt > MAX_DET_EXTRAS && UR(extras_cnt) >= MAX_DET_EXTRAS) || - extras[j].len > len - i || - !memcmp(extras[j].data, out_buf + i, extras[j].len) || - !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, extras[j].len))) { + } - --stage_max; - continue; + /* Restore all the clobbered memory. */ + memcpy(out_buf + i, in_buf + i, last_len); - } + } - last_len = extras[j].len; - memcpy(out_buf + i, extras[j].data, last_len); + new_hit_cnt = queued_paths + unique_crashes; - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + stage_finds[STAGE_EXTRAS_UO] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_EXTRAS_UO] += stage_max; - ++stage_cur; + /* Insertion of user-supplied extras. */ - } + stage_name = "user extras (insert)"; + stage_short = "ext_UI"; + stage_cur = 0; + stage_max = extras_cnt * len; - /* Restore all the clobbered memory. */ - memcpy(out_buf + i, in_buf + i, last_len); + orig_hit_cnt = new_hit_cnt; - } + ex_tmp = ck_alloc(len + MAX_DICT_FILE); - new_hit_cnt = queued_paths + unique_crashes; + for (i = 0; i <= len; ++i) { - stage_finds[STAGE_EXTRAS_UO] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_EXTRAS_UO] += stage_max; + stage_cur_byte = i; - /* Insertion of user-supplied extras. */ + for (j = 0; j < extras_cnt; ++j) { - stage_name = "user extras (insert)"; - stage_short = "ext_UI"; - stage_cur = 0; - stage_max = extras_cnt * len; + if (len + extras[j].len > MAX_FILE) { + --stage_max; + continue; + } + /* Insert token */ + memcpy(ex_tmp + i, extras[j].data, extras[j].len); - orig_hit_cnt = new_hit_cnt; + /* Copy tail */ + memcpy(ex_tmp + i + extras[j].len, out_buf + i, len - i); - ex_tmp = ck_alloc(len + MAX_DICT_FILE); + if (common_fuzz_stuff(argv, ex_tmp, len + extras[j].len)) { - for (i = 0; i <= len; ++i) { + ck_free(ex_tmp); + goto abandon_entry; - stage_cur_byte = i; + } - for (j = 0; j < extras_cnt; ++j) { + ++stage_cur; - if (len + extras[j].len > MAX_FILE) { - --stage_max; - continue; - } + } - /* Insert token */ - memcpy(ex_tmp + i, extras[j].data, extras[j].len); + /* Copy head */ + ex_tmp[i] = out_buf[i]; - /* Copy tail */ - memcpy(ex_tmp + i + extras[j].len, out_buf + i, len - i); + } - if (common_fuzz_stuff(argv, ex_tmp, len + extras[j].len)) { - ck_free(ex_tmp); - goto abandon_entry; - } + ck_free(ex_tmp); - ++stage_cur; + new_hit_cnt = queued_paths + unique_crashes; - } + stage_finds[STAGE_EXTRAS_UI] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_EXTRAS_UI] += stage_max; - /* Copy head */ - ex_tmp[i] = out_buf[i]; +skip_user_extras: - } + if (!a_extras_cnt) goto skip_extras; - ck_free(ex_tmp); + stage_name = "auto extras (over)"; + stage_short = "ext_AO"; + stage_cur = 0; + stage_max = MIN(a_extras_cnt, USE_AUTO_EXTRAS) * len; - new_hit_cnt = queued_paths + unique_crashes; + stage_val_type = STAGE_VAL_NONE; - stage_finds[STAGE_EXTRAS_UI] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_EXTRAS_UI] += stage_max; + orig_hit_cnt = new_hit_cnt; - skip_user_extras: + for (i = 0; i < len; ++i) { - if (!a_extras_cnt) goto skip_extras; + u32 last_len = 0; - stage_name = "auto extras (over)"; - stage_short = "ext_AO"; - stage_cur = 0; - stage_max = MIN(a_extras_cnt, USE_AUTO_EXTRAS) * len; + stage_cur_byte = i; + for (j = 0; j < MIN(a_extras_cnt, USE_AUTO_EXTRAS); ++j) { - stage_val_type = STAGE_VAL_NONE; + /* See the comment in the earlier code; extras are sorted by size. */ - orig_hit_cnt = new_hit_cnt; + if (a_extras[j].len > len - i || + !memcmp(a_extras[j].data, out_buf + i, a_extras[j].len) || + !memchr(eff_map + EFF_APOS(i), 1, + EFF_SPAN_ALEN(i, a_extras[j].len))) { - for (i = 0; i < len; ++i) { + --stage_max; + continue; - u32 last_len = 0; + } - stage_cur_byte = i; + last_len = a_extras[j].len; + memcpy(out_buf + i, a_extras[j].data, last_len); - for (j = 0; j < MIN(a_extras_cnt, USE_AUTO_EXTRAS); ++j) { + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - /* See the comment in the earlier code; extras are sorted by size. */ + ++stage_cur; - if (a_extras[j].len > len - i || - !memcmp(a_extras[j].data, out_buf + i, a_extras[j].len) || - !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, a_extras[j].len))) { + } - --stage_max; - continue; + /* Restore all the clobbered memory. */ + memcpy(out_buf + i, in_buf + i, last_len); - } + } - last_len = a_extras[j].len; - memcpy(out_buf + i, a_extras[j].data, last_len); + new_hit_cnt = queued_paths + unique_crashes; - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + stage_finds[STAGE_EXTRAS_AO] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_EXTRAS_AO] += stage_max; - ++stage_cur; +skip_extras: - } + /* If we made this to here without jumping to havoc_stage or abandon_entry, + we're properly done with deterministic steps and can mark it as such + in the .state/ directory. */ - /* Restore all the clobbered memory. */ - memcpy(out_buf + i, in_buf + i, last_len); + if (!queue_cur->passed_det) mark_as_det_done(queue_cur); - } + /**************** + * RANDOM HAVOC * + ****************/ - new_hit_cnt = queued_paths + unique_crashes; +havoc_stage: +pacemaker_fuzzing: - stage_finds[STAGE_EXTRAS_AO] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_EXTRAS_AO] += stage_max; + stage_cur_byte = -1; - skip_extras: + /* The havoc stage mutation code is also invoked when splicing files; if the + splice_cycle variable is set, generate different descriptions and such. */ - /* If we made this to here without jumping to havoc_stage or abandon_entry, - we're properly done with deterministic steps and can mark it as such - in the .state/ directory. */ + if (!splice_cycle) { - if (!queue_cur->passed_det) mark_as_det_done(queue_cur); + stage_name = "MOpt-havoc"; + stage_short = "MOpt_havoc"; + stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * perf_score / + havoc_div / 100; - /**************** - * RANDOM HAVOC * - ****************/ + } else { - havoc_stage: - pacemaker_fuzzing: + static u8 tmp[32]; + perf_score = orig_perf; - stage_cur_byte = -1; + sprintf(tmp, "MOpt-core-splice %u", splice_cycle); + stage_name = tmp; + stage_short = "MOpt_core_splice"; + stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100; - /* The havoc stage mutation code is also invoked when splicing files; if the - splice_cycle variable is set, generate different descriptions and such. */ + } - if (!splice_cycle) { + s32 temp_len_puppet; + cur_ms_lv = get_cur_time(); - stage_name = "MOpt-havoc"; - stage_short = "MOpt_havoc"; - stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * - perf_score / havoc_div / 100; + // for (; swarm_now < swarm_num; ++swarm_now) + { - } else { + if (key_puppet == 1) { - static u8 tmp[32]; + if (unlikely(orig_hit_cnt_puppet == 0)) { - perf_score = orig_perf; + orig_hit_cnt_puppet = queued_paths + unique_crashes; + last_limit_time_start = get_cur_time(); + SPLICE_CYCLES_puppet = + (UR(SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) + + SPLICE_CYCLES_puppet_low); - sprintf(tmp, "MOpt-core-splice %u", splice_cycle); - stage_name = tmp; - stage_short = "MOpt_core_splice"; - stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100; + } - } + } - s32 temp_len_puppet; - cur_ms_lv = get_cur_time(); + { - //for (; swarm_now < swarm_num; ++swarm_now) - { - if (key_puppet == 1) { - if (unlikely(orig_hit_cnt_puppet == 0)) { - orig_hit_cnt_puppet = queued_paths + unique_crashes; - last_limit_time_start = get_cur_time(); - SPLICE_CYCLES_puppet = (UR(SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) + SPLICE_CYCLES_puppet_low); - } - } - { #ifndef IGNORE_FINDS - havoc_stage_puppet: + havoc_stage_puppet: #endif - stage_cur_byte = -1; - - /* The havoc stage mutation code is also invoked when splicing files; if the - splice_cycle variable is set, generate different descriptions and such. */ - - if (!splice_cycle) { - stage_name = "MOpt core avoc"; - stage_short = "MOpt_core_havoc"; - stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * - perf_score / havoc_div / 100; - } else { - static u8 tmp[32]; - perf_score = orig_perf; - sprintf(tmp, "MOpt core splice %u", splice_cycle); - stage_name = tmp; - stage_short = "MOpt_core_splice"; - stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100; - } - - if (stage_max < HAVOC_MIN) stage_max = HAVOC_MIN; - temp_len = len; - orig_hit_cnt = queued_paths + unique_crashes; - havoc_queued = queued_paths; - - for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { - - u32 use_stacking = 1 << (1 + UR(HAVOC_STACK_POW2)); - stage_cur_val = use_stacking; - - for (i = 0; i < operator_num; ++i) { - core_operator_cycles_puppet_v3[i] = core_operator_cycles_puppet_v2[i]; - } - - for (i = 0; i < use_stacking; ++i) { - - switch (select_algorithm()) { - - case 0: - /* Flip a single bit somewhere. Spooky! */ - FLIP_BIT(out_buf, UR(temp_len << 3)); - core_operator_cycles_puppet_v2[STAGE_FLIP1] += 1; - break; - - - case 1: - if (temp_len < 2) break; - temp_len_puppet = UR(temp_len << 3); - FLIP_BIT(out_buf, temp_len_puppet); - FLIP_BIT(out_buf, temp_len_puppet + 1); - core_operator_cycles_puppet_v2[STAGE_FLIP2] += 1; - break; - - case 2: - if (temp_len < 2) break; - temp_len_puppet = UR(temp_len << 3); - FLIP_BIT(out_buf, temp_len_puppet); - FLIP_BIT(out_buf, temp_len_puppet + 1); - FLIP_BIT(out_buf, temp_len_puppet + 2); - FLIP_BIT(out_buf, temp_len_puppet + 3); - core_operator_cycles_puppet_v2[STAGE_FLIP4] += 1; - break; - - case 3: - if (temp_len < 4) break; - out_buf[UR(temp_len)] ^= 0xFF; - core_operator_cycles_puppet_v2[STAGE_FLIP8] += 1; - break; - - case 4: - if (temp_len < 8) break; - *(u16*)(out_buf + UR(temp_len - 1)) ^= 0xFFFF; - core_operator_cycles_puppet_v2[STAGE_FLIP16] += 1; - break; - - case 5: - if (temp_len < 8) break; - *(u32*)(out_buf + UR(temp_len - 3)) ^= 0xFFFFFFFF; - core_operator_cycles_puppet_v2[STAGE_FLIP32] += 1; - break; - - case 6: - out_buf[UR(temp_len)] -= 1 + UR(ARITH_MAX); - out_buf[UR(temp_len)] += 1 + UR(ARITH_MAX); - core_operator_cycles_puppet_v2[STAGE_ARITH8] += 1; - break; - - case 7: - /* Randomly subtract from word, random endian. */ - if (temp_len < 8) break; - if (UR(2)) { - u32 pos = UR(temp_len - 1); - *(u16*)(out_buf + pos) -= 1 + UR(ARITH_MAX); - } else { - u32 pos = UR(temp_len - 1); - u16 num = 1 + UR(ARITH_MAX); - *(u16*)(out_buf + pos) = - SWAP16(SWAP16(*(u16*)(out_buf + pos)) - num); - } - /* Randomly add to word, random endian. */ - if (UR(2)) { - u32 pos = UR(temp_len - 1); - *(u16*)(out_buf + pos) += 1 + UR(ARITH_MAX); - } else { - u32 pos = UR(temp_len - 1); - u16 num = 1 + UR(ARITH_MAX); - *(u16*)(out_buf + pos) = - SWAP16(SWAP16(*(u16*)(out_buf + pos)) + num); - } - core_operator_cycles_puppet_v2[STAGE_ARITH16] += 1; - break; - - - case 8: - /* Randomly subtract from dword, random endian. */ - if (temp_len < 8) break; - if (UR(2)) { - u32 pos = UR(temp_len - 3); - *(u32*)(out_buf + pos) -= 1 + UR(ARITH_MAX); - } else { - u32 pos = UR(temp_len - 3); - u32 num = 1 + UR(ARITH_MAX); - *(u32*)(out_buf + pos) = - SWAP32(SWAP32(*(u32*)(out_buf + pos)) - num); - } - /* Randomly add to dword, random endian. */ - if (UR(2)) { - u32 pos = UR(temp_len - 3); - *(u32*)(out_buf + pos) += 1 + UR(ARITH_MAX); - } else { - u32 pos = UR(temp_len - 3); - u32 num = 1 + UR(ARITH_MAX); - *(u32*)(out_buf + pos) = - SWAP32(SWAP32(*(u32*)(out_buf + pos)) + num); - } - core_operator_cycles_puppet_v2[STAGE_ARITH32] += 1; - break; - - - case 9: - /* Set byte to interesting value. */ - if (temp_len < 4) break; - out_buf[UR(temp_len)] = interesting_8[UR(sizeof(interesting_8))]; - core_operator_cycles_puppet_v2[STAGE_INTEREST8] += 1; - break; - - case 10: - /* Set word to interesting value, randomly choosing endian. */ - if (temp_len < 8) break; - if (UR(2)) { - *(u16*)(out_buf + UR(temp_len - 1)) = - interesting_16[UR(sizeof(interesting_16) >> 1)]; - } else { - *(u16*)(out_buf + UR(temp_len - 1)) = SWAP16( - interesting_16[UR(sizeof(interesting_16) >> 1)]); - } - core_operator_cycles_puppet_v2[STAGE_INTEREST16] += 1; - break; - - - case 11: - /* Set dword to interesting value, randomly choosing endian. */ - - if (temp_len < 8) break; - - if (UR(2)) { - *(u32*)(out_buf + UR(temp_len - 3)) = - interesting_32[UR(sizeof(interesting_32) >> 2)]; - } else { - *(u32*)(out_buf + UR(temp_len - 3)) = SWAP32( - interesting_32[UR(sizeof(interesting_32) >> 2)]); - } - core_operator_cycles_puppet_v2[STAGE_INTEREST32] += 1; - break; + stage_cur_byte = -1; + + /* The havoc stage mutation code is also invoked when splicing files; if + the splice_cycle variable is set, generate different descriptions and + such. */ + + if (!splice_cycle) { + + stage_name = "MOpt core avoc"; + stage_short = "MOpt_core_havoc"; + stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * + perf_score / havoc_div / 100; + + } else { + + static u8 tmp[32]; + perf_score = orig_perf; + sprintf(tmp, "MOpt core splice %u", splice_cycle); + stage_name = tmp; + stage_short = "MOpt_core_splice"; + stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100; + + } + + if (stage_max < HAVOC_MIN) stage_max = HAVOC_MIN; + temp_len = len; + orig_hit_cnt = queued_paths + unique_crashes; + havoc_queued = queued_paths; + + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { + + u32 use_stacking = 1 << (1 + UR(HAVOC_STACK_POW2)); + stage_cur_val = use_stacking; + + for (i = 0; i < operator_num; ++i) { + + core_operator_cycles_puppet_v3[i] = core_operator_cycles_puppet_v2[i]; + + } + + for (i = 0; i < use_stacking; ++i) { + + switch (select_algorithm()) { + + case 0: + /* Flip a single bit somewhere. Spooky! */ + FLIP_BIT(out_buf, UR(temp_len << 3)); + core_operator_cycles_puppet_v2[STAGE_FLIP1] += 1; + break; + + case 1: + if (temp_len < 2) break; + temp_len_puppet = UR(temp_len << 3); + FLIP_BIT(out_buf, temp_len_puppet); + FLIP_BIT(out_buf, temp_len_puppet + 1); + core_operator_cycles_puppet_v2[STAGE_FLIP2] += 1; + break; + + case 2: + if (temp_len < 2) break; + temp_len_puppet = UR(temp_len << 3); + FLIP_BIT(out_buf, temp_len_puppet); + FLIP_BIT(out_buf, temp_len_puppet + 1); + FLIP_BIT(out_buf, temp_len_puppet + 2); + FLIP_BIT(out_buf, temp_len_puppet + 3); + core_operator_cycles_puppet_v2[STAGE_FLIP4] += 1; + break; + + case 3: + if (temp_len < 4) break; + out_buf[UR(temp_len)] ^= 0xFF; + core_operator_cycles_puppet_v2[STAGE_FLIP8] += 1; + break; + + case 4: + if (temp_len < 8) break; + *(u16*)(out_buf + UR(temp_len - 1)) ^= 0xFFFF; + core_operator_cycles_puppet_v2[STAGE_FLIP16] += 1; + break; + + case 5: + if (temp_len < 8) break; + *(u32*)(out_buf + UR(temp_len - 3)) ^= 0xFFFFFFFF; + core_operator_cycles_puppet_v2[STAGE_FLIP32] += 1; + break; + + case 6: + out_buf[UR(temp_len)] -= 1 + UR(ARITH_MAX); + out_buf[UR(temp_len)] += 1 + UR(ARITH_MAX); + core_operator_cycles_puppet_v2[STAGE_ARITH8] += 1; + break; + + case 7: + /* Randomly subtract from word, random endian. */ + if (temp_len < 8) break; + if (UR(2)) { + + u32 pos = UR(temp_len - 1); + *(u16*)(out_buf + pos) -= 1 + UR(ARITH_MAX); + + } else { + + u32 pos = UR(temp_len - 1); + u16 num = 1 + UR(ARITH_MAX); + *(u16*)(out_buf + pos) = + SWAP16(SWAP16(*(u16*)(out_buf + pos)) - num); + } - case 12: + /* Randomly add to word, random endian. */ + if (UR(2)) { - /* Just set a random byte to a random value. Because, - why not. We use XOR with 1-255 to eliminate the - possibility of a no-op. */ + u32 pos = UR(temp_len - 1); + *(u16*)(out_buf + pos) += 1 + UR(ARITH_MAX); - out_buf[UR(temp_len)] ^= 1 + UR(255); - core_operator_cycles_puppet_v2[STAGE_RANDOMBYTE] += 1; - break; + } else { + u32 pos = UR(temp_len - 1); + u16 num = 1 + UR(ARITH_MAX); + *(u16*)(out_buf + pos) = + SWAP16(SWAP16(*(u16*)(out_buf + pos)) + num); - case 13: { + } - /* Delete bytes. We're making this a bit more likely - than insertion (the next option) in hopes of keeping - files reasonably small. */ + core_operator_cycles_puppet_v2[STAGE_ARITH16] += 1; + break; - u32 del_from, del_len; + case 8: + /* Randomly subtract from dword, random endian. */ + if (temp_len < 8) break; + if (UR(2)) { - if (temp_len < 2) break; + u32 pos = UR(temp_len - 3); + *(u32*)(out_buf + pos) -= 1 + UR(ARITH_MAX); - /* Don't delete too much. */ + } else { - del_len = choose_block_len(temp_len - 1); + u32 pos = UR(temp_len - 3); + u32 num = 1 + UR(ARITH_MAX); + *(u32*)(out_buf + pos) = + SWAP32(SWAP32(*(u32*)(out_buf + pos)) - num); - del_from = UR(temp_len - del_len + 1); + } - memmove(out_buf + del_from, out_buf + del_from + del_len, - temp_len - del_from - del_len); + /* Randomly add to dword, random endian. */ + if (UR(2)) { - temp_len -= del_len; - core_operator_cycles_puppet_v2[STAGE_DELETEBYTE] += 1; - break; + u32 pos = UR(temp_len - 3); + *(u32*)(out_buf + pos) += 1 + UR(ARITH_MAX); - } + } else { - case 14: + u32 pos = UR(temp_len - 3); + u32 num = 1 + UR(ARITH_MAX); + *(u32*)(out_buf + pos) = + SWAP32(SWAP32(*(u32*)(out_buf + pos)) + num); - if (temp_len + HAVOC_BLK_XL < MAX_FILE) { + } - /* Clone bytes (75%) or insert a block of constant bytes (25%). */ + core_operator_cycles_puppet_v2[STAGE_ARITH32] += 1; + break; - u8 actually_clone = UR(4); - u32 clone_from, clone_to, clone_len; - u8* new_buf; + case 9: + /* Set byte to interesting value. */ + if (temp_len < 4) break; + out_buf[UR(temp_len)] = interesting_8[UR(sizeof(interesting_8))]; + core_operator_cycles_puppet_v2[STAGE_INTEREST8] += 1; + break; - if (actually_clone) { + case 10: + /* Set word to interesting value, randomly choosing endian. */ + if (temp_len < 8) break; + if (UR(2)) { - clone_len = choose_block_len(temp_len); - clone_from = UR(temp_len - clone_len + 1); + *(u16*)(out_buf + UR(temp_len - 1)) = + interesting_16[UR(sizeof(interesting_16) >> 1)]; - } else { + } else { - clone_len = choose_block_len(HAVOC_BLK_XL); - clone_from = 0; + *(u16*)(out_buf + UR(temp_len - 1)) = + SWAP16(interesting_16[UR(sizeof(interesting_16) >> 1)]); - } + } - clone_to = UR(temp_len); + core_operator_cycles_puppet_v2[STAGE_INTEREST16] += 1; + break; - new_buf = ck_alloc_nozero(temp_len + clone_len); + case 11: + /* Set dword to interesting value, randomly choosing endian. */ - /* Head */ + if (temp_len < 8) break; - memcpy(new_buf, out_buf, clone_to); + if (UR(2)) { - /* Inserted part */ + *(u32*)(out_buf + UR(temp_len - 3)) = + interesting_32[UR(sizeof(interesting_32) >> 2)]; - if (actually_clone) - memcpy(new_buf + clone_to, out_buf + clone_from, clone_len); - else - memset(new_buf + clone_to, - UR(2) ? UR(256) : out_buf[UR(temp_len)], clone_len); + } else { - /* Tail */ - memcpy(new_buf + clone_to + clone_len, out_buf + clone_to, - temp_len - clone_to); + *(u32*)(out_buf + UR(temp_len - 3)) = + SWAP32(interesting_32[UR(sizeof(interesting_32) >> 2)]); - ck_free(out_buf); - out_buf = new_buf; - temp_len += clone_len; - core_operator_cycles_puppet_v2[STAGE_Clone75] += 1; - } + } - break; + core_operator_cycles_puppet_v2[STAGE_INTEREST32] += 1; + break; - case 15: { + case 12: - /* Overwrite bytes with a randomly selected chunk (75%) or fixed - bytes (25%). */ + /* Just set a random byte to a random value. Because, + why not. We use XOR with 1-255 to eliminate the + possibility of a no-op. */ - u32 copy_from, copy_to, copy_len; + out_buf[UR(temp_len)] ^= 1 + UR(255); + core_operator_cycles_puppet_v2[STAGE_RANDOMBYTE] += 1; + break; - if (temp_len < 2) break; + case 13: { - copy_len = choose_block_len(temp_len - 1); + /* Delete bytes. We're making this a bit more likely + than insertion (the next option) in hopes of keeping + files reasonably small. */ - copy_from = UR(temp_len - copy_len + 1); - copy_to = UR(temp_len - copy_len + 1); + u32 del_from, del_len; - if (UR(4)) { + if (temp_len < 2) break; - if (copy_from != copy_to) - memmove(out_buf + copy_to, out_buf + copy_from, copy_len); + /* Don't delete too much. */ - } - else memset(out_buf + copy_to, - UR(2) ? UR(256) : out_buf[UR(temp_len)], copy_len); - core_operator_cycles_puppet_v2[STAGE_OverWrite75] += 1; - break; + del_len = choose_block_len(temp_len - 1); - } + del_from = UR(temp_len - del_len + 1); + memmove(out_buf + del_from, out_buf + del_from + del_len, + temp_len - del_from - del_len); - } + temp_len -= del_len; + core_operator_cycles_puppet_v2[STAGE_DELETEBYTE] += 1; + break; - } + } + + case 14: + + if (temp_len + HAVOC_BLK_XL < MAX_FILE) { - tmp_core_time += 1; + /* Clone bytes (75%) or insert a block of constant bytes (25%). + */ - u64 temp_total_found = queued_paths + unique_crashes; + u8 actually_clone = UR(4); + u32 clone_from, clone_to, clone_len; + u8* new_buf; - if (common_fuzz_stuff(argv, out_buf, temp_len)) - goto abandon_entry_puppet; + if (actually_clone) { - /* out_buf might have been mangled a bit, so let's restore it to its - original size and shape. */ + clone_len = choose_block_len(temp_len); + clone_from = UR(temp_len - clone_len + 1); - if (temp_len < len) out_buf = ck_realloc(out_buf, len); - temp_len = len; - memcpy(out_buf, in_buf, len); + } else { - /* If we're finding new stuff, let's run for a bit longer, limits - permitting. */ + clone_len = choose_block_len(HAVOC_BLK_XL); + clone_from = 0; - if (queued_paths != havoc_queued) { + } - if (perf_score <= havoc_max_mult * 100) { - stage_max *= 2; - perf_score *= 2; - } + clone_to = UR(temp_len); - havoc_queued = queued_paths; + new_buf = ck_alloc_nozero(temp_len + clone_len); - } + /* Head */ - if (unlikely(queued_paths + unique_crashes > temp_total_found)) - { - u64 temp_temp_puppet = queued_paths + unique_crashes - temp_total_found; - total_puppet_find = total_puppet_find + temp_temp_puppet; - for (i = 0; i < 16; ++i) - { - if (core_operator_cycles_puppet_v2[i] > core_operator_cycles_puppet_v3[i]) - core_operator_finds_puppet_v2[i] += temp_temp_puppet; - } - } + memcpy(new_buf, out_buf, clone_to); - } + /* Inserted part */ - new_hit_cnt = queued_paths + unique_crashes; + if (actually_clone) + memcpy(new_buf + clone_to, out_buf + clone_from, clone_len); + else + memset(new_buf + clone_to, + UR(2) ? UR(256) : out_buf[UR(temp_len)], clone_len); + /* Tail */ + memcpy(new_buf + clone_to + clone_len, out_buf + clone_to, + temp_len - clone_to); + + ck_free(out_buf); + out_buf = new_buf; + temp_len += clone_len; + core_operator_cycles_puppet_v2[STAGE_Clone75] += 1; + + } + + break; + + case 15: { + + /* Overwrite bytes with a randomly selected chunk (75%) or fixed + bytes (25%). */ + + u32 copy_from, copy_to, copy_len; + + if (temp_len < 2) break; + + copy_len = choose_block_len(temp_len - 1); + + copy_from = UR(temp_len - copy_len + 1); + copy_to = UR(temp_len - copy_len + 1); + + if (UR(4)) { + + if (copy_from != copy_to) + memmove(out_buf + copy_to, out_buf + copy_from, copy_len); + + } else + + memset(out_buf + copy_to, + UR(2) ? UR(256) : out_buf[UR(temp_len)], copy_len); + core_operator_cycles_puppet_v2[STAGE_OverWrite75] += 1; + break; + + } + + } + + } + + tmp_core_time += 1; + + u64 temp_total_found = queued_paths + unique_crashes; + + if (common_fuzz_stuff(argv, out_buf, temp_len)) + goto abandon_entry_puppet; + + /* out_buf might have been mangled a bit, so let's restore it to its + original size and shape. */ + + if (temp_len < len) out_buf = ck_realloc(out_buf, len); + temp_len = len; + memcpy(out_buf, in_buf, len); + + /* If we're finding new stuff, let's run for a bit longer, limits + permitting. */ + + if (queued_paths != havoc_queued) { + + if (perf_score <= havoc_max_mult * 100) { + + stage_max *= 2; + perf_score *= 2; + + } + + havoc_queued = queued_paths; + + } + + if (unlikely(queued_paths + unique_crashes > temp_total_found)) { + + u64 temp_temp_puppet = + queued_paths + unique_crashes - temp_total_found; + total_puppet_find = total_puppet_find + temp_temp_puppet; + for (i = 0; i < 16; ++i) { + + if (core_operator_cycles_puppet_v2[i] > + core_operator_cycles_puppet_v3[i]) + core_operator_finds_puppet_v2[i] += temp_temp_puppet; + + } + + } + + } + + new_hit_cnt = queued_paths + unique_crashes; #ifndef IGNORE_FINDS - /************ - * SPLICING * - ************/ + /************ + * SPLICING * + ************/ + + retry_splicing_puppet: + if (use_splicing && splice_cycle++ < SPLICE_CYCLES_puppet && + queued_paths > 1 && queue_cur->len > 1) { - retry_splicing_puppet: + struct queue_entry* target; + u32 tid, split_at; + u8* new_buf; + s32 f_diff, l_diff; + /* First of all, if we've modified in_buf for havoc, let's clean that + up... */ + if (in_buf != orig_in) { + + ck_free(in_buf); + in_buf = orig_in; + len = queue_cur->len; + + } - if (use_splicing && splice_cycle++ < SPLICE_CYCLES_puppet && - queued_paths > 1 && queue_cur->len > 1) { + /* Pick a random queue entry and seek to it. Don't splice with yourself. + */ - struct queue_entry* target; - u32 tid, split_at; - u8* new_buf; - s32 f_diff, l_diff; + do { - /* First of all, if we've modified in_buf for havoc, let's clean that - up... */ + tid = UR(queued_paths); + + } while (tid == current_entry); + + splicing_with = tid; + target = queue; + + while (tid >= 100) { + + target = target->next_100; + tid -= 100; + + } - if (in_buf != orig_in) { - ck_free(in_buf); - in_buf = orig_in; - len = queue_cur->len; - } + while (tid--) + target = target->next; - /* Pick a random queue entry and seek to it. Don't splice with yourself. */ + /* Make sure that the target has a reasonable length. */ - do { tid = UR(queued_paths); } while (tid == current_entry); + while (target && (target->len < 2 || target == queue_cur)) { - splicing_with = tid; - target = queue; + target = target->next; + ++splicing_with; - while (tid >= 100) { target = target->next_100; tid -= 100; } - while (tid--) target = target->next; + } - /* Make sure that the target has a reasonable length. */ + if (!target) goto retry_splicing_puppet; - while (target && (target->len < 2 || target == queue_cur)) { - target = target->next; - ++splicing_with; - } + /* Read the testcase into a new buffer. */ - if (!target) goto retry_splicing_puppet; + fd = open(target->fname, O_RDONLY); - /* Read the testcase into a new buffer. */ + if (fd < 0) PFATAL("Unable to open '%s'", target->fname); - fd = open(target->fname, O_RDONLY); + new_buf = ck_alloc_nozero(target->len); - if (fd < 0) PFATAL("Unable to open '%s'", target->fname); + ck_read(fd, new_buf, target->len, target->fname); - new_buf = ck_alloc_nozero(target->len); + close(fd); - ck_read(fd, new_buf, target->len, target->fname); + /* Find a suitable splicin g location, somewhere between the first and + the last differing byte. Bail out if the difference is just a single + byte or so. */ - close(fd); + locate_diffs(in_buf, new_buf, MIN(len, target->len), &f_diff, &l_diff); - /* Find a suitable splicin g location, somewhere between the first and - the last differing byte. Bail out if the difference is just a single - byte or so. */ + if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) { - locate_diffs(in_buf, new_buf, MIN(len, target->len), &f_diff, &l_diff); + ck_free(new_buf); + goto retry_splicing_puppet; - if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) { - ck_free(new_buf); - goto retry_splicing_puppet; - } + } - /* Split somewhere between the first and last differing byte. */ + /* Split somewhere between the first and last differing byte. */ - split_at = f_diff + UR(l_diff - f_diff); + split_at = f_diff + UR(l_diff - f_diff); - /* Do the thing. */ + /* Do the thing. */ - len = target->len; - memcpy(new_buf, in_buf, split_at); - in_buf = new_buf; - ck_free(out_buf); - out_buf = ck_alloc_nozero(len); - memcpy(out_buf, in_buf, len); + len = target->len; + memcpy(new_buf, in_buf, split_at); + in_buf = new_buf; + ck_free(out_buf); + out_buf = ck_alloc_nozero(len); + memcpy(out_buf, in_buf, len); - goto havoc_stage_puppet; + goto havoc_stage_puppet; - } + } #endif /* !IGNORE_FINDS */ - ret_val = 0; - abandon_entry: - abandon_entry_puppet: + ret_val = 0; + abandon_entry: + abandon_entry_puppet: - if (splice_cycle >= SPLICE_CYCLES_puppet) - SPLICE_CYCLES_puppet = (UR(SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) + SPLICE_CYCLES_puppet_low); + if (splice_cycle >= SPLICE_CYCLES_puppet) + SPLICE_CYCLES_puppet = + (UR(SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) + + SPLICE_CYCLES_puppet_low); + splicing_with = -1; - splicing_with = -1; + munmap(orig_in, queue_cur->len); + if (in_buf != orig_in) ck_free(in_buf); + ck_free(out_buf); + ck_free(eff_map); - munmap(orig_in, queue_cur->len); + if (key_puppet == 1) { - if (in_buf != orig_in) ck_free(in_buf); - ck_free(out_buf); - ck_free(eff_map); + if (unlikely(queued_paths + unique_crashes > + ((queued_paths + unique_crashes) * limit_time_bound + + orig_hit_cnt_puppet))) { + key_puppet = 0; + cur_ms_lv = get_cur_time(); + new_hit_cnt = queued_paths + unique_crashes; + orig_hit_cnt_puppet = 0; + last_limit_time_start = 0; - if (key_puppet == 1) - { - if (unlikely(queued_paths + unique_crashes > ((queued_paths + unique_crashes)*limit_time_bound + orig_hit_cnt_puppet))) - { - key_puppet = 0; - cur_ms_lv = get_cur_time(); - new_hit_cnt = queued_paths + unique_crashes; - orig_hit_cnt_puppet = 0; - last_limit_time_start = 0; - } - } + } + } + + if (unlikely(tmp_core_time > period_core)) { - if (unlikely(tmp_core_time > period_core)) - { - total_pacemaker_time += tmp_core_time; - tmp_core_time = 0; - temp_puppet_find = total_puppet_find; - new_hit_cnt = queued_paths + unique_crashes; + total_pacemaker_time += tmp_core_time; + tmp_core_time = 0; + temp_puppet_find = total_puppet_find; + new_hit_cnt = queued_paths + unique_crashes; - u64 temp_stage_finds_puppet = 0; - for (i = 0; i < operator_num; ++i) - { + u64 temp_stage_finds_puppet = 0; + for (i = 0; i < operator_num; ++i) { - core_operator_finds_puppet[i] = core_operator_finds_puppet_v2[i]; - core_operator_cycles_puppet[i] = core_operator_cycles_puppet_v2[i]; - temp_stage_finds_puppet += core_operator_finds_puppet[i]; - } + core_operator_finds_puppet[i] = core_operator_finds_puppet_v2[i]; + core_operator_cycles_puppet[i] = core_operator_cycles_puppet_v2[i]; + temp_stage_finds_puppet += core_operator_finds_puppet[i]; - key_module = 2; + } - old_hit_count = new_hit_cnt; - } - return ret_val; - } - } + key_module = 2; + + old_hit_count = new_hit_cnt; + + } + + return ret_val; + + } + } #undef FLIP_BIT } - void pso_updating(void) { - g_now += 1; - if (g_now > g_max) g_now = 0; - w_now = (w_init - w_end)*(g_max - g_now) / (g_max)+w_end; - int tmp_swarm, i, j; - u64 temp_operator_finds_puppet = 0; - for (i = 0; i < operator_num; ++i) - { - operator_finds_puppet[i] = core_operator_finds_puppet[i]; - - for (j = 0; j < swarm_num; ++j) - { - operator_finds_puppet[i] = operator_finds_puppet[i] + stage_finds_puppet[j][i]; - } - temp_operator_finds_puppet = temp_operator_finds_puppet + operator_finds_puppet[i]; - } - - for (i = 0; i < operator_num; ++i) - { - if (operator_finds_puppet[i]) - G_best[i] = (double)((double)(operator_finds_puppet[i]) / (double)(temp_operator_finds_puppet)); - } - - for (tmp_swarm = 0; tmp_swarm < swarm_num; ++tmp_swarm) - { - double x_temp = 0.0; - for (i = 0; i < operator_num; ++i) - { - probability_now[tmp_swarm][i] = 0.0; - v_now[tmp_swarm][i] = w_now * v_now[tmp_swarm][i] + RAND_C * (L_best[tmp_swarm][i] - x_now[tmp_swarm][i]) + RAND_C * (G_best[i] - x_now[tmp_swarm][i]); - x_now[tmp_swarm][i] += v_now[tmp_swarm][i]; - if (x_now[tmp_swarm][i] > v_max) - x_now[tmp_swarm][i] = v_max; - else if (x_now[tmp_swarm][i] < v_min) - x_now[tmp_swarm][i] = v_min; - x_temp += x_now[tmp_swarm][i]; - } - - for (i = 0; i < operator_num; ++i) - { - x_now[tmp_swarm][i] = x_now[tmp_swarm][i] / x_temp; - if (likely(i != 0)) - probability_now[tmp_swarm][i] = probability_now[tmp_swarm][i - 1] + x_now[tmp_swarm][i]; - else - probability_now[tmp_swarm][i] = x_now[tmp_swarm][i]; - } - if (probability_now[tmp_swarm][operator_num - 1] < 0.99 || probability_now[tmp_swarm][operator_num - 1] > 1.01) FATAL("ERROR probability"); - } - swarm_now = 0; - key_module = 0; -} + g_now += 1; + if (g_now > g_max) g_now = 0; + w_now = (w_init - w_end) * (g_max - g_now) / (g_max) + w_end; + int tmp_swarm, i, j; + u64 temp_operator_finds_puppet = 0; + for (i = 0; i < operator_num; ++i) { + + operator_finds_puppet[i] = core_operator_finds_puppet[i]; + + for (j = 0; j < swarm_num; ++j) { + + operator_finds_puppet[i] = + operator_finds_puppet[i] + stage_finds_puppet[j][i]; + + } + + temp_operator_finds_puppet = + temp_operator_finds_puppet + operator_finds_puppet[i]; + + } + + for (i = 0; i < operator_num; ++i) { + + if (operator_finds_puppet[i]) + G_best[i] = (double)((double)(operator_finds_puppet[i]) / + (double)(temp_operator_finds_puppet)); + } + + for (tmp_swarm = 0; tmp_swarm < swarm_num; ++tmp_swarm) { + + double x_temp = 0.0; + for (i = 0; i < operator_num; ++i) { + + probability_now[tmp_swarm][i] = 0.0; + v_now[tmp_swarm][i] = + w_now * v_now[tmp_swarm][i] + + RAND_C * (L_best[tmp_swarm][i] - x_now[tmp_swarm][i]) + + RAND_C * (G_best[i] - x_now[tmp_swarm][i]); + x_now[tmp_swarm][i] += v_now[tmp_swarm][i]; + if (x_now[tmp_swarm][i] > v_max) + x_now[tmp_swarm][i] = v_max; + else if (x_now[tmp_swarm][i] < v_min) + x_now[tmp_swarm][i] = v_min; + x_temp += x_now[tmp_swarm][i]; + + } + + for (i = 0; i < operator_num; ++i) { + + x_now[tmp_swarm][i] = x_now[tmp_swarm][i] / x_temp; + if (likely(i != 0)) + probability_now[tmp_swarm][i] = + probability_now[tmp_swarm][i - 1] + x_now[tmp_swarm][i]; + else + probability_now[tmp_swarm][i] = x_now[tmp_swarm][i]; + + } + + if (probability_now[tmp_swarm][operator_num - 1] < 0.99 || + probability_now[tmp_swarm][operator_num - 1] > 1.01) + FATAL("ERROR probability"); + + } + + swarm_now = 0; + key_module = 0; + +} /* larger change for MOpt implementation: the original fuzz_one was renamed to fuzz_one_original. All documentation references to fuzz_one therefore mean fuzz_one_original */ u8 fuzz_one(char** argv) { - int key_val_lv = 0; - if (limit_time_sig == 0) { - key_val_lv = fuzz_one_original(argv); - } else { - if (key_module == 0) - key_val_lv = pilot_fuzzing(argv); - else if (key_module == 1) - key_val_lv = core_fuzzing(argv); - else if (key_module == 2) - pso_updating(); - } - - return key_val_lv; + + int key_val_lv = 0; + if (limit_time_sig == 0) { + + key_val_lv = fuzz_one_original(argv); + + } else { + + if (key_module == 0) + key_val_lv = pilot_fuzzing(argv); + else if (key_module == 1) + key_val_lv = core_fuzzing(argv); + else if (key_module == 2) + pso_updating(); + + } + + return key_val_lv; + } diff --git a/src/afl-fuzz-python.c b/src/afl-fuzz-python.c index ed158e6c..e22291b5 100644 --- a/src/afl-fuzz-python.c +++ b/src/afl-fuzz-python.c @@ -26,45 +26,62 @@ #ifdef USE_PYTHON int init_py() { + Py_Initialize(); u8* module_name = getenv("AFL_PYTHON_MODULE"); if (module_name) { + PyObject* py_name = PyString_FromString(module_name); py_module = PyImport_Import(py_name); Py_DECREF(py_name); if (py_module != NULL) { + u8 py_notrim = 0; py_functions[PY_FUNC_INIT] = PyObject_GetAttrString(py_module, "init"); py_functions[PY_FUNC_FUZZ] = PyObject_GetAttrString(py_module, "fuzz"); - py_functions[PY_FUNC_INIT_TRIM] = PyObject_GetAttrString(py_module, "init_trim"); - py_functions[PY_FUNC_POST_TRIM] = PyObject_GetAttrString(py_module, "post_trim"); + py_functions[PY_FUNC_INIT_TRIM] = + PyObject_GetAttrString(py_module, "init_trim"); + py_functions[PY_FUNC_POST_TRIM] = + PyObject_GetAttrString(py_module, "post_trim"); py_functions[PY_FUNC_TRIM] = PyObject_GetAttrString(py_module, "trim"); for (u8 py_idx = 0; py_idx < PY_FUNC_COUNT; ++py_idx) { + if (!py_functions[py_idx] || !PyCallable_Check(py_functions[py_idx])) { + if (py_idx >= PY_FUNC_INIT_TRIM && py_idx <= PY_FUNC_TRIM) { + // Implementing the trim API is optional for now - if (PyErr_Occurred()) - PyErr_Print(); + if (PyErr_Occurred()) PyErr_Print(); py_notrim = 1; + } else { - if (PyErr_Occurred()) - PyErr_Print(); - fprintf(stderr, "Cannot find/call function with index %d in external Python module.\n", py_idx); + + if (PyErr_Occurred()) PyErr_Print(); + fprintf(stderr, + "Cannot find/call function with index %d in external " + "Python module.\n", + py_idx); return 1; + } + } } if (py_notrim) { + py_functions[PY_FUNC_INIT_TRIM] = NULL; py_functions[PY_FUNC_POST_TRIM] = NULL; py_functions[PY_FUNC_TRIM] = NULL; - WARNF("Python module does not implement trim API, standard trimming will be used."); + WARNF( + "Python module does not implement trim API, standard trimming will " + "be used."); + } PyObject *py_args, *py_value; @@ -73,9 +90,11 @@ int init_py() { py_args = PyTuple_New(1); py_value = PyInt_FromLong(UR(0xFFFFFFFF)); if (!py_value) { + Py_DECREF(py_args); fprintf(stderr, "Cannot convert argument\n"); return 1; + } PyTuple_SetItem(py_args, 0, py_value); @@ -85,51 +104,68 @@ int init_py() { Py_DECREF(py_args); if (py_value == NULL) { + PyErr_Print(); - fprintf(stderr,"Call failed\n"); + fprintf(stderr, "Call failed\n"); return 1; + } + } else { + PyErr_Print(); fprintf(stderr, "Failed to load \"%s\"\n", module_name); return 1; + } + } return 0; + } void finalize_py() { + if (py_module != NULL) { + u32 i; for (i = 0; i < PY_FUNC_COUNT; ++i) Py_XDECREF(py_functions[i]); Py_DECREF(py_module); + } Py_Finalize(); + } -void fuzz_py(char* buf, size_t buflen, char* add_buf, size_t add_buflen, char** ret, size_t* retlen) { +void fuzz_py(char* buf, size_t buflen, char* add_buf, size_t add_buflen, + char** ret, size_t* retlen) { if (py_module != NULL) { + PyObject *py_args, *py_value; py_args = PyTuple_New(2); py_value = PyByteArray_FromStringAndSize(buf, buflen); if (!py_value) { + Py_DECREF(py_args); fprintf(stderr, "Cannot convert argument\n"); return; + } PyTuple_SetItem(py_args, 0, py_value); py_value = PyByteArray_FromStringAndSize(add_buf, add_buflen); if (!py_value) { + Py_DECREF(py_args); fprintf(stderr, "Cannot convert argument\n"); return; + } PyTuple_SetItem(py_args, 1, py_value); @@ -139,26 +175,35 @@ void fuzz_py(char* buf, size_t buflen, char* add_buf, size_t add_buflen, char** Py_DECREF(py_args); if (py_value != NULL) { + *retlen = PyByteArray_Size(py_value); *ret = malloc(*retlen); memcpy(*ret, PyByteArray_AsString(py_value), *retlen); Py_DECREF(py_value); + } else { + PyErr_Print(); - fprintf(stderr,"Call failed\n"); + fprintf(stderr, "Call failed\n"); return; + } + } + } u32 init_trim_py(char* buf, size_t buflen) { + PyObject *py_args, *py_value; py_args = PyTuple_New(1); py_value = PyByteArray_FromStringAndSize(buf, buflen); if (!py_value) { + Py_DECREF(py_args); FATAL("Failed to convert arguments"); + } PyTuple_SetItem(py_args, 0, py_value); @@ -167,24 +212,32 @@ u32 init_trim_py(char* buf, size_t buflen) { Py_DECREF(py_args); if (py_value != NULL) { + u32 retcnt = PyInt_AsLong(py_value); Py_DECREF(py_value); return retcnt; + } else { + PyErr_Print(); FATAL("Call failed"); + } + } u32 post_trim_py(char success) { + PyObject *py_args, *py_value; py_args = PyTuple_New(1); py_value = PyBool_FromLong(success); if (!py_value) { + Py_DECREF(py_args); FATAL("Failed to convert arguments"); + } PyTuple_SetItem(py_args, 0, py_value); @@ -193,16 +246,22 @@ u32 post_trim_py(char success) { Py_DECREF(py_args); if (py_value != NULL) { + u32 retcnt = PyInt_AsLong(py_value); Py_DECREF(py_value); return retcnt; + } else { + PyErr_Print(); FATAL("Call failed"); + } + } void trim_py(char** ret, size_t* retlen) { + PyObject *py_args, *py_value; py_args = PyTuple_New(0); @@ -210,14 +269,19 @@ void trim_py(char** ret, size_t* retlen) { Py_DECREF(py_args); if (py_value != NULL) { + *retlen = PyByteArray_Size(py_value); *ret = malloc(*retlen); memcpy(*ret, PyByteArray_AsString(py_value), *retlen); Py_DECREF(py_value); + } else { + PyErr_Print(); FATAL("Call failed"); + } + } u8 trim_case_python(char** argv, struct queue_entry* q, u8* in_buf) { @@ -237,20 +301,24 @@ u8 trim_case_python(char** argv, struct queue_entry* q, u8* in_buf) { stage_max = init_trim_py(in_buf, q->len); if (not_on_tty && debug) - SAYF("[Python Trimming] START: Max %d iterations, %u bytes", stage_max, q->len); + SAYF("[Python Trimming] START: Max %d iterations, %u bytes", stage_max, + q->len); + + while (stage_cur < stage_max) { - while(stage_cur < stage_max) { sprintf(tmp, "ptrim %s", DI(trim_exec)); u32 cksum; - char* retbuf = NULL; + char* retbuf = NULL; size_t retlen = 0; trim_py(&retbuf, &retlen); if (retlen > orig_len) - FATAL("Trimmed data returned by Python module is larger than original data"); + FATAL( + "Trimmed data returned by Python module is larger than original " + "data"); write_to_testcase(retbuf, retlen); @@ -280,17 +348,23 @@ u8 trim_case_python(char** argv, struct queue_entry* q, u8* in_buf) { stage_cur = post_trim_py(1); if (not_on_tty && debug) - SAYF("[Python Trimming] SUCCESS: %d/%d iterations (now at %u bytes)", stage_cur, stage_max, q->len); + SAYF("[Python Trimming] SUCCESS: %d/%d iterations (now at %u bytes)", + stage_cur, stage_max, q->len); + } else { + /* Tell the Python module that the trimming was unsuccessful */ stage_cur = post_trim_py(0); if (not_on_tty && debug) - SAYF("[Python Trimming] FAILURE: %d/%d iterations", stage_cur, stage_max); + SAYF("[Python Trimming] FAILURE: %d/%d iterations", stage_cur, + stage_max); + } - /* Since this can be slow, update the screen every now and then. */ + /* Since this can be slow, update the screen every now and then. */ + + if (!(trim_exec++ % stats_update_freq)) show_stats(); - if (!(trim_exec++ % stats_update_freq)) show_stats(); } if (not_on_tty && debug) @@ -303,7 +377,7 @@ u8 trim_case_python(char** argv, struct queue_entry* q, u8* in_buf) { s32 fd; - unlink(q->fname); /* ignore errors */ + unlink(q->fname); /* ignore errors */ fd = open(q->fname, O_WRONLY | O_CREAT | O_EXCL, 0600); @@ -317,8 +391,6 @@ u8 trim_case_python(char** argv, struct queue_entry* q, u8* in_buf) { } - - abort_trimming: bytes_trim_out += q->len; @@ -327,3 +399,4 @@ abort_trimming: } #endif /* USE_PYTHON */ + diff --git a/src/afl-fuzz-queue.c b/src/afl-fuzz-queue.c index c1547b48..22a9ccb0 100644 --- a/src/afl-fuzz-queue.c +++ b/src/afl-fuzz-queue.c @@ -43,7 +43,6 @@ void mark_as_det_done(struct queue_entry* q) { } - /* Mark as variable. Create symlinks if possible to make it easier to examine the files. */ @@ -69,7 +68,6 @@ void mark_as_variable(struct queue_entry* q) { } - /* Mark / unmark as redundant (edge-only). This is not used for restoring state, but may be useful for post-processing datasets. */ @@ -102,18 +100,17 @@ void mark_as_redundant(struct queue_entry* q, u8 state) { } - /* Append new test case to the queue. */ void add_to_queue(u8* fname, u32 len, u8 passed_det) { struct queue_entry* q = ck_alloc(sizeof(struct queue_entry)); - q->fname = fname; - q->len = len; - q->depth = cur_depth + 1; - q->passed_det = passed_det; - q->n_fuzz = 1; + q->fname = fname; + q->len = len; + q->depth = cur_depth + 1; + q->passed_det = passed_det; + q->n_fuzz = 1; if (q->depth > max_depth) max_depth = q->depth; @@ -122,7 +119,9 @@ void add_to_queue(u8* fname, u32 len, u8 passed_det) { queue_top->next = q; queue_top = q; - } else q_prev100 = queue = queue_top = q; + } else + + q_prev100 = queue = queue_top = q; ++queued_paths; ++pending_not_fuzzed; @@ -140,7 +139,6 @@ void add_to_queue(u8* fname, u32 len, u8 passed_det) { } - /* Destroy the entire queue. */ void destroy_queue(void) { @@ -159,7 +157,6 @@ void destroy_queue(void) { } - /* When we bump into a new path, we call this to see if the path appears more "favorable" than any of the existing ones. The purpose of the "favorables" is to have a minimal set of paths that trigger all the bits @@ -170,12 +167,11 @@ void destroy_queue(void) { for every byte in the bitmap. We win that slot if there is no previous contender, or if the contender has a more favorable speed x size factor. */ - void update_bitmap_score(struct queue_entry* q) { u32 i; u64 fav_factor = q->exec_us * q->len; - u64 fuzz_p2 = next_p2 (q->n_fuzz); + u64 fuzz_p2 = next_p2(q->n_fuzz); /* For every byte set in trace_bits[], see if there is a previous winner, and how it compares to us. */ @@ -184,47 +180,53 @@ void update_bitmap_score(struct queue_entry* q) { if (trace_bits[i]) { - if (top_rated[i]) { + if (top_rated[i]) { - /* Faster-executing or smaller test cases are favored. */ - u64 top_rated_fuzz_p2 = next_p2 (top_rated[i]->n_fuzz); - u64 top_rated_fav_factor = top_rated[i]->exec_us * top_rated[i]->len; + /* Faster-executing or smaller test cases are favored. */ + u64 top_rated_fuzz_p2 = next_p2(top_rated[i]->n_fuzz); + u64 top_rated_fav_factor = top_rated[i]->exec_us * top_rated[i]->len; - if (fuzz_p2 > top_rated_fuzz_p2) { - continue; - } else if (fuzz_p2 == top_rated_fuzz_p2) { - if (fav_factor > top_rated_fav_factor) - continue; - } + if (fuzz_p2 > top_rated_fuzz_p2) { - if (fav_factor > top_rated[i]->exec_us * top_rated[i]->len) continue; + continue; - /* Looks like we're going to win. Decrease ref count for the - previous winner, discard its trace_bits[] if necessary. */ + } else if (fuzz_p2 == top_rated_fuzz_p2) { - if (!--top_rated[i]->tc_ref) { - ck_free(top_rated[i]->trace_mini); - top_rated[i]->trace_mini = 0; - } + if (fav_factor > top_rated_fav_factor) continue; - } + } - /* Insert ourselves as the new winner. */ + if (fav_factor > top_rated[i]->exec_us * top_rated[i]->len) continue; - top_rated[i] = q; - ++q->tc_ref; + /* Looks like we're going to win. Decrease ref count for the + previous winner, discard its trace_bits[] if necessary. */ - if (!q->trace_mini) { - q->trace_mini = ck_alloc(MAP_SIZE >> 3); - minimize_bits(q->trace_mini, trace_bits); - } + if (!--top_rated[i]->tc_ref) { - score_changed = 1; + ck_free(top_rated[i]->trace_mini); + top_rated[i]->trace_mini = 0; - } + } -} + } + + /* Insert ourselves as the new winner. */ + + top_rated[i] = q; + ++q->tc_ref; + + if (!q->trace_mini) { + q->trace_mini = ck_alloc(MAP_SIZE >> 3); + minimize_bits(q->trace_mini, trace_bits); + + } + + score_changed = 1; + + } + +} /* The second part of the mechanism discussed above is a routine that goes over top_rated[] entries, and then sequentially grabs winners for @@ -235,8 +237,8 @@ void update_bitmap_score(struct queue_entry* q) { void cull_queue(void) { struct queue_entry* q; - static u8 temp_v[MAP_SIZE >> 3]; - u32 i; + static u8 temp_v[MAP_SIZE >> 3]; + u32 i; if (dumb_mode || !score_changed) return; @@ -244,14 +246,16 @@ void cull_queue(void) { memset(temp_v, 255, MAP_SIZE >> 3); - queued_favored = 0; + queued_favored = 0; pending_favored = 0; q = queue; while (q) { + q->favored = 0; q = q->next; + } /* Let's see if anything in the bitmap isn't captured in temp_v. @@ -264,27 +268,29 @@ void cull_queue(void) { /* Remove all bits belonging to the current entry from temp_v. */ - while (j--) + while (j--) if (top_rated[i]->trace_mini[j]) temp_v[j] &= ~top_rated[i]->trace_mini[j]; top_rated[i]->favored = 1; ++queued_favored; - if (top_rated[i]->fuzz_level == 0 || !top_rated[i]->was_fuzzed) ++pending_favored; + if (top_rated[i]->fuzz_level == 0 || !top_rated[i]->was_fuzzed) + ++pending_favored; } q = queue; while (q) { + mark_as_redundant(q, !q->favored); q = q->next; + } } - /* Calculate case desirability score to adjust the length of havoc fuzzing. A helper function for fuzz_one(). Maybe some of these constants should go into config.h. */ @@ -305,34 +311,51 @@ u32 calculate_score(struct queue_entry* q) { // Longer execution time means longer work on the input, the deeper in // coverage, the better the fuzzing, right? -mh - if (q->exec_us * 0.1 > avg_exec_us) perf_score = 10; - else if (q->exec_us * 0.25 > avg_exec_us) perf_score = 25; - else if (q->exec_us * 0.5 > avg_exec_us) perf_score = 50; - else if (q->exec_us * 0.75 > avg_exec_us) perf_score = 75; - else if (q->exec_us * 4 < avg_exec_us) perf_score = 300; - else if (q->exec_us * 3 < avg_exec_us) perf_score = 200; - else if (q->exec_us * 2 < avg_exec_us) perf_score = 150; + if (q->exec_us * 0.1 > avg_exec_us) + perf_score = 10; + else if (q->exec_us * 0.25 > avg_exec_us) + perf_score = 25; + else if (q->exec_us * 0.5 > avg_exec_us) + perf_score = 50; + else if (q->exec_us * 0.75 > avg_exec_us) + perf_score = 75; + else if (q->exec_us * 4 < avg_exec_us) + perf_score = 300; + else if (q->exec_us * 3 < avg_exec_us) + perf_score = 200; + else if (q->exec_us * 2 < avg_exec_us) + perf_score = 150; /* Adjust score based on bitmap size. The working theory is that better coverage translates to better targets. Multiplier from 0.25x to 3x. */ - if (q->bitmap_size * 0.3 > avg_bitmap_size) perf_score *= 3; - else if (q->bitmap_size * 0.5 > avg_bitmap_size) perf_score *= 2; - else if (q->bitmap_size * 0.75 > avg_bitmap_size) perf_score *= 1.5; - else if (q->bitmap_size * 3 < avg_bitmap_size) perf_score *= 0.25; - else if (q->bitmap_size * 2 < avg_bitmap_size) perf_score *= 0.5; - else if (q->bitmap_size * 1.5 < avg_bitmap_size) perf_score *= 0.75; + if (q->bitmap_size * 0.3 > avg_bitmap_size) + perf_score *= 3; + else if (q->bitmap_size * 0.5 > avg_bitmap_size) + perf_score *= 2; + else if (q->bitmap_size * 0.75 > avg_bitmap_size) + perf_score *= 1.5; + else if (q->bitmap_size * 3 < avg_bitmap_size) + perf_score *= 0.25; + else if (q->bitmap_size * 2 < avg_bitmap_size) + perf_score *= 0.5; + else if (q->bitmap_size * 1.5 < avg_bitmap_size) + perf_score *= 0.75; /* Adjust score based on handicap. Handicap is proportional to how late in the game we learned about this path. Latecomers are allowed to run for a bit longer until they catch up with the rest. */ if (q->handicap >= 4) { + perf_score *= 4; q->handicap -= 4; + } else if (q->handicap) { + perf_score *= 2; --q->handicap; + } /* Final adjustment based on input depth, under the assumption that fuzzing @@ -341,11 +364,11 @@ u32 calculate_score(struct queue_entry* q) { switch (q->depth) { - case 0 ... 3: break; - case 4 ... 7: perf_score *= 2; break; - case 8 ... 13: perf_score *= 3; break; + case 0 ... 3: break; + case 4 ... 7: perf_score *= 2; break; + case 8 ... 13: perf_score *= 3; break; case 14 ... 25: perf_score *= 4; break; - default: perf_score *= 5; + default: perf_score *= 5; } @@ -357,61 +380,69 @@ u32 calculate_score(struct queue_entry* q) { switch (schedule) { - case EXPLORE: - break; + case EXPLORE: break; - case EXPLOIT: - factor = MAX_FACTOR; - break; + case EXPLOIT: factor = MAX_FACTOR; break; case COE: fuzz_total = 0; n_paths = 0; - struct queue_entry *queue_it = queue; + struct queue_entry* queue_it = queue; while (queue_it) { + fuzz_total += queue_it->n_fuzz; - n_paths ++; + n_paths++; queue_it = queue_it->next; + } fuzz_mu = fuzz_total / n_paths; if (fuzz <= fuzz_mu) { + if (q->fuzz_level < 16) - factor = ((u32) (1 << q->fuzz_level)); + factor = ((u32)(1 << q->fuzz_level)); else factor = MAX_FACTOR; + } else { + factor = 0; + } + break; case FAST: if (q->fuzz_level < 16) { - factor = ((u32) (1 << q->fuzz_level)) / (fuzz == 0 ? 1 : fuzz); + + factor = ((u32)(1 << q->fuzz_level)) / (fuzz == 0 ? 1 : fuzz); + } else - factor = MAX_FACTOR / (fuzz == 0 ? 1 : next_p2 (fuzz)); - break; - case LIN: - factor = q->fuzz_level / (fuzz == 0 ? 1 : fuzz); + factor = MAX_FACTOR / (fuzz == 0 ? 1 : next_p2(fuzz)); break; + case LIN: factor = q->fuzz_level / (fuzz == 0 ? 1 : fuzz); break; + case QUAD: factor = q->fuzz_level * q->fuzz_level / (fuzz == 0 ? 1 : fuzz); break; - default: - PFATAL ("Unknown Power Schedule"); + default: PFATAL("Unknown Power Schedule"); + } - if (factor > MAX_FACTOR) - factor = MAX_FACTOR; + + if (factor > MAX_FACTOR) factor = MAX_FACTOR; perf_score *= factor / POWER_BETA; // MOpt mode - if (limit_time_sig != 0 && max_depth - q->depth < 3) perf_score *= 2; - else if (perf_score < 1) perf_score = 1; // Add a lower bound to AFLFast's energy assignment strategies + if (limit_time_sig != 0 && max_depth - q->depth < 3) + perf_score *= 2; + else if (perf_score < 1) + perf_score = + 1; // Add a lower bound to AFLFast's energy assignment strategies /* Make sure that we don't go over limit. */ @@ -420,3 +451,4 @@ u32 calculate_score(struct queue_entry* q) { return perf_score; } + diff --git a/src/afl-fuzz-run.c b/src/afl-fuzz-run.c index c14ecc87..4093d991 100644 --- a/src/afl-fuzz-run.c +++ b/src/afl-fuzz-run.c @@ -28,8 +28,8 @@ u8 run_target(char** argv, u32 timeout) { static struct itimerval it; - static u32 prev_timed_out = 0; - static u64 exec_ms = 0; + static u32 prev_timed_out = 0; + static u64 exec_ms = 0; int status = 0; u32 tb4; @@ -45,7 +45,7 @@ u8 run_target(char** argv, u32 timeout) { /* If we're running in "dumb" mode, we can't rely on the fork server logic compiled into the target program, so we will just keep calling - execve(). There is a bit of code duplication between here and + execve(). There is a bit of code duplication between here and init_forkserver(), but c'est la vie. */ if (dumb_mode == 1 || no_forkserver) { @@ -64,11 +64,11 @@ u8 run_target(char** argv, u32 timeout) { #ifdef RLIMIT_AS - setrlimit(RLIMIT_AS, &r); /* Ignore errors */ + setrlimit(RLIMIT_AS, &r); /* Ignore errors */ #else - setrlimit(RLIMIT_DATA, &r); /* Ignore errors */ + setrlimit(RLIMIT_DATA, &r); /* Ignore errors */ #endif /* ^RLIMIT_AS */ @@ -76,7 +76,7 @@ u8 run_target(char** argv, u32 timeout) { r.rlim_max = r.rlim_cur = 0; - setrlimit(RLIMIT_CORE, &r); /* Ignore errors */ + setrlimit(RLIMIT_CORE, &r); /* Ignore errors */ /* Isolate the process and configure standard descriptors. If out_file is specified, stdin is /dev/null; otherwise, out_fd is cloned instead. */ @@ -108,10 +108,12 @@ u8 run_target(char** argv, u32 timeout) { /* Set sane defaults for ASAN if nothing else specified. */ - setenv("ASAN_OPTIONS", "abort_on_error=1:" - "detect_leaks=0:" - "symbolize=0:" - "allocator_may_return_null=1", 0); + setenv("ASAN_OPTIONS", + "abort_on_error=1:" + "detect_leaks=0:" + "symbolize=0:" + "allocator_may_return_null=1", + 0); setenv("MSAN_OPTIONS", "exit_code=" STRINGIFY(MSAN_ERROR) ":" "symbolize=0:" @@ -152,7 +154,8 @@ u8 run_target(char** argv, u32 timeout) { } - /* Configure timeout, as requested by user, then wait for child to terminate. */ + /* Configure timeout, as requested by user, then wait for child to terminate. + */ it.it_value.tv_sec = (timeout / 1000); it.it_value.tv_usec = (timeout % 1000) * 1000; @@ -179,9 +182,10 @@ u8 run_target(char** argv, u32 timeout) { } if (!WIFSTOPPED(status)) child_pid = 0; - + getitimer(ITIMER_REAL, &it); - exec_ms = (u64) timeout - (it.it_value.tv_sec * 1000 + it.it_value.tv_usec / 1000); + exec_ms = + (u64)timeout - (it.it_value.tv_sec * 1000 + it.it_value.tv_usec / 1000); if (slowest_exec_ms < exec_ms) slowest_exec_ms = exec_ms; it.it_value.tv_sec = 0; @@ -223,8 +227,10 @@ u8 run_target(char** argv, u32 timeout) { must use a special exit code. */ if (uses_asan && WEXITSTATUS(status) == MSAN_ERROR) { + kill_signal = 0; return FAULT_CRASH; + } if ((dumb_mode == 1 || no_forkserver) && tb4 == EXEC_FAIL_SIG) @@ -234,7 +240,6 @@ u8 run_target(char** argv, u32 timeout) { } - /* Write modified data to file for testing. If out_file is set, the old file is unlinked and a new one is created. Otherwise, out_fd is rewound and truncated. */ @@ -245,20 +250,26 @@ void write_to_testcase(void* mem, u32 len) { if (out_file) { - unlink(out_file); /* Ignore errors. */ + unlink(out_file); /* Ignore errors. */ fd = open(out_file, O_WRONLY | O_CREAT | O_EXCL, 0600); if (fd < 0) PFATAL("Unable to create '%s'", out_file); - } else lseek(fd, 0, SEEK_SET); + } else + + lseek(fd, 0, SEEK_SET); if (pre_save_handler) { - u8* new_data; + + u8* new_data; size_t new_size = pre_save_handler(mem, len, &new_data); ck_write(fd, new_data, new_size, out_file); + } else { + ck_write(fd, mem, len, out_file); + } if (!out_file) { @@ -266,10 +277,11 @@ void write_to_testcase(void* mem, u32 len) { if (ftruncate(fd, len)) PFATAL("ftruncate() failed"); lseek(fd, 0, SEEK_SET); - } else close(fd); + } else -} + close(fd); +} /* The same, but with an adjustable gap. Used for trimming. */ @@ -280,17 +292,19 @@ void write_with_gap(void* mem, u32 len, u32 skip_at, u32 skip_len) { if (out_file) { - unlink(out_file); /* Ignore errors. */ + unlink(out_file); /* Ignore errors. */ fd = open(out_file, O_WRONLY | O_CREAT | O_EXCL, 0600); if (fd < 0) PFATAL("Unable to create '%s'", out_file); - } else lseek(fd, 0, SEEK_SET); + } else + + lseek(fd, 0, SEEK_SET); if (skip_at) ck_write(fd, mem, skip_at, out_file); - u8 *memu8 = mem; + u8* memu8 = mem; if (tail_len) ck_write(fd, memu8 + skip_at + skip_len, tail_len, out_file); if (!out_file) { @@ -298,22 +312,23 @@ void write_with_gap(void* mem, u32 len, u32 skip_at, u32 skip_len) { if (ftruncate(fd, len - skip_len)) PFATAL("ftruncate() failed"); lseek(fd, 0, SEEK_SET); - } else close(fd); + } else -} + close(fd); +} /* Calibrate a new test case. This is done when processing the input directory to warn about flaky or otherwise problematic test cases early on; and when new paths are discovered to detect variable behavior and so on. */ -u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem, - u32 handicap, u8 from_queue) { +u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem, u32 handicap, + u8 from_queue) { static u8 first_trace[MAP_SIZE]; - u8 fault = 0, new_bits = 0, var_detected = 0, - first_run = (q->exec_cksum == 0); + u8 fault = 0, new_bits = 0, var_detected = 0, + first_run = (q->exec_cksum == 0); u64 start_us, stop_us; @@ -326,19 +341,18 @@ u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem, to intermittent latency. */ if (!from_queue || resuming_fuzz) - use_tmout = MAX(exec_tmout + CAL_TMOUT_ADD, - exec_tmout * CAL_TMOUT_PERC / 100); + use_tmout = + MAX(exec_tmout + CAL_TMOUT_ADD, exec_tmout * CAL_TMOUT_PERC / 100); ++q->cal_failed; stage_name = "calibration"; - stage_max = fast_cal ? 3 : CAL_CYCLES; + stage_max = fast_cal ? 3 : CAL_CYCLES; /* Make sure the forkserver is up before we do anything, and let's not count its spin-up time toward binary calibration. */ - if (dumb_mode != 1 && !no_forkserver && !forksrv_pid) - init_forkserver(argv); + if (dumb_mode != 1 && !no_forkserver && !forksrv_pid) init_forkserver(argv); if (q->exec_cksum) memcpy(first_trace, trace_bits, MAP_SIZE); @@ -360,8 +374,10 @@ u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem, if (stop_soon || fault != crash_mode) goto abort_calibration; if (!dumb_mode && !stage_cur && !count_bytes(trace_bits)) { + fault = FAULT_NOINST; goto abort_calibration; + } cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST); @@ -380,7 +396,7 @@ u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem, if (!var_bytes[i] && first_trace[i] != trace_bits[i]) { var_bytes[i] = 1; - stage_max = CAL_CYCLES_LONG; + stage_max = CAL_CYCLES_LONG; } @@ -401,16 +417,16 @@ u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem, stop_us = get_cur_time_us(); - total_cal_us += stop_us - start_us; + total_cal_us += stop_us - start_us; total_cal_cycles += stage_max; /* OK, let's collect some stats about the performance of this test case. This is used for fuzzing air time calculations in calculate_score(). */ - q->exec_us = (stop_us - start_us) / stage_max; + q->exec_us = (stop_us - start_us) / stage_max; q->bitmap_size = count_bytes(trace_bits); - q->handicap = handicap; - q->cal_failed = 0; + q->handicap = handicap; + q->cal_failed = 0; total_bitmap_size += q->bitmap_size; ++total_bitmap_entries; @@ -426,8 +442,10 @@ u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem, abort_calibration: if (new_bits == 2 && !q->has_new_cov) { + q->has_new_cov = 1; ++queued_with_cov; + } /* Mark variable paths. */ @@ -437,15 +455,17 @@ abort_calibration: var_byte_count = count_bytes(var_bytes); if (!q->var_behavior) { + mark_as_variable(q); ++queued_variable; + } } stage_name = old_sn; - stage_cur = old_sc; - stage_max = old_sm; + stage_cur = old_sc; + stage_max = old_sm; if (!first_run) show_stats(); @@ -453,14 +473,13 @@ abort_calibration: } - /* Grab interesting test cases from other fuzzers. */ void sync_fuzzers(char** argv) { - DIR* sd; + DIR* sd; struct dirent* sd_ent; - u32 sync_cnt = 0; + u32 sync_cnt = 0; sd = opendir(sync_dir); if (!sd) PFATAL("Unable to open '%s'", sync_dir); @@ -468,16 +487,17 @@ void sync_fuzzers(char** argv) { stage_max = stage_cur = 0; cur_depth = 0; - /* Look at the entries created for every other fuzzer in the sync directory. */ + /* Look at the entries created for every other fuzzer in the sync directory. + */ while ((sd_ent = readdir(sd))) { static u8 stage_tmp[128]; - DIR* qd; + DIR* qd; struct dirent* qd_ent; - u8 *qd_path, *qd_synced_path; - u32 min_accept = 0, next_min_accept; + u8 * qd_path, *qd_synced_path; + u32 min_accept = 0, next_min_accept; s32 id_fd; @@ -490,8 +510,10 @@ void sync_fuzzers(char** argv) { qd_path = alloc_printf("%s/%s/queue", sync_dir, sd_ent->d_name); if (!(qd = opendir(qd_path))) { + ck_free(qd_path); continue; + } /* Retrieve the ID of the last seen test case. */ @@ -502,35 +524,34 @@ void sync_fuzzers(char** argv) { if (id_fd < 0) PFATAL("Unable to create '%s'", qd_synced_path); - if (read(id_fd, &min_accept, sizeof(u32)) > 0) - lseek(id_fd, 0, SEEK_SET); + if (read(id_fd, &min_accept, sizeof(u32)) > 0) lseek(id_fd, 0, SEEK_SET); next_min_accept = min_accept; - /* Show stats */ + /* Show stats */ sprintf(stage_tmp, "sync %u", ++sync_cnt); stage_name = stage_tmp; - stage_cur = 0; - stage_max = 0; + stage_cur = 0; + stage_max = 0; - /* For every file queued by this fuzzer, parse ID and see if we have looked at - it before; exec a test case if not. */ + /* For every file queued by this fuzzer, parse ID and see if we have looked + at it before; exec a test case if not. */ while ((qd_ent = readdir(qd))) { - u8* path; - s32 fd; + u8* path; + s32 fd; struct stat st; if (qd_ent->d_name[0] == '.' || - sscanf(qd_ent->d_name, CASE_PREFIX "%06u", &syncing_case) != 1 || - syncing_case < min_accept) continue; + sscanf(qd_ent->d_name, CASE_PREFIX "%06u", &syncing_case) != 1 || + syncing_case < min_accept) + continue; /* OK, sounds like a new one. Let's give it a try. */ - if (syncing_case >= next_min_accept) - next_min_accept = syncing_case + 1; + if (syncing_case >= next_min_accept) next_min_accept = syncing_case + 1; path = alloc_printf("%s/%s", qd_path, qd_ent->d_name); @@ -539,8 +560,10 @@ void sync_fuzzers(char** argv) { fd = open(path, O_RDONLY); if (fd < 0) { - ck_free(path); - continue; + + ck_free(path); + continue; + } if (fstat(fd, &st)) PFATAL("fstat() failed"); @@ -584,14 +607,13 @@ void sync_fuzzers(char** argv) { closedir(qd); ck_free(qd_path); ck_free(qd_synced_path); - - } + + } closedir(sd); } - /* Trim all new test cases to save cycles when doing deterministic checks. The trimmer uses power-of-two increments somewhere between 1/16 and 1/1024 of file size, to keep the stage short and sweet. */ @@ -599,8 +621,7 @@ void sync_fuzzers(char** argv) { u8 trim_case(char** argv, struct queue_entry* q, u8* in_buf) { #ifdef USE_PYTHON - if (py_functions[PY_FUNC_TRIM]) - return trim_case_python(argv, q, in_buf); + if (py_functions[PY_FUNC_TRIM]) return trim_case_python(argv, q, in_buf); #endif static u8 tmp[64]; @@ -664,9 +685,9 @@ u8 trim_case(char** argv, struct queue_entry* q, u8* in_buf) { u32 move_tail = q->len - remove_pos - trim_avail; q->len -= trim_avail; - len_p2 = next_p2(q->len); + len_p2 = next_p2(q->len); - memmove(in_buf + remove_pos, in_buf + remove_pos + trim_avail, + memmove(in_buf + remove_pos, in_buf + remove_pos + trim_avail, move_tail); /* Let's save a clean trace, which will be needed by @@ -679,7 +700,9 @@ u8 trim_case(char** argv, struct queue_entry* q, u8* in_buf) { } - } else remove_pos += remove_len; + } else + + remove_pos += remove_len; /* Since this can be slow, update the screen every now and then. */ @@ -699,7 +722,7 @@ u8 trim_case(char** argv, struct queue_entry* q, u8* in_buf) { s32 fd; - unlink(q->fname); /* ignore errors */ + unlink(q->fname); /* ignore errors */ fd = open(q->fname, O_WRONLY | O_CREAT | O_EXCL, 0600); @@ -720,7 +743,6 @@ abort_trimming: } - /* Write a modified test case, run program, process results. Handle error conditions, returning 1 if it's time to bail out. This is a helper function for fuzz_one(). */ @@ -745,20 +767,24 @@ u8 common_fuzz_stuff(char** argv, u8* out_buf, u32 len) { if (fault == FAULT_TMOUT) { if (subseq_tmouts++ > TMOUT_LIMIT) { + ++cur_skipped_paths; return 1; + } - } else subseq_tmouts = 0; + } else + + subseq_tmouts = 0; /* Users can hit us with SIGUSR1 to request the current input to be abandoned. */ if (skip_requested) { - skip_requested = 0; - ++cur_skipped_paths; - return 1; + skip_requested = 0; + ++cur_skipped_paths; + return 1; } diff --git a/src/afl-fuzz-stats.c b/src/afl-fuzz-stats.c index 5dbd59ac..3614599d 100644 --- a/src/afl-fuzz-stats.c +++ b/src/afl-fuzz-stats.c @@ -26,11 +26,11 @@ void write_stats_file(double bitmap_cvg, double stability, double eps) { - static double last_bcvg, last_stab, last_eps; + static double last_bcvg, last_stab, last_eps; static struct rusage usage; - u8* fn = alloc_printf("%s/fuzzer_stats", out_dir); - s32 fd; + u8* fn = alloc_printf("%s/fuzzer_stats", out_dir); + s32 fd; FILE* f; fd = open(fn, O_WRONLY | O_CREAT | O_TRUNC, 0600); @@ -47,66 +47,74 @@ void write_stats_file(double bitmap_cvg, double stability, double eps) { where exec/sec stats and such are not readily available. */ if (!bitmap_cvg && !stability && !eps) { + bitmap_cvg = last_bcvg; - stability = last_stab; - eps = last_eps; + stability = last_stab; + eps = last_eps; + } else { + last_bcvg = bitmap_cvg; last_stab = stability; - last_eps = eps; + last_eps = eps; + } - fprintf(f, "start_time : %llu\n" - "last_update : %llu\n" - "fuzzer_pid : %d\n" - "cycles_done : %llu\n" - "execs_done : %llu\n" - "execs_per_sec : %0.02f\n" - "paths_total : %u\n" - "paths_favored : %u\n" - "paths_found : %u\n" - "paths_imported : %u\n" - "max_depth : %u\n" - "cur_path : %u\n" /* Must match find_start_position() */ - "pending_favs : %u\n" - "pending_total : %u\n" - "variable_paths : %u\n" - "stability : %0.02f%%\n" - "bitmap_cvg : %0.02f%%\n" - "unique_crashes : %llu\n" - "unique_hangs : %llu\n" - "last_path : %llu\n" - "last_crash : %llu\n" - "last_hang : %llu\n" - "execs_since_crash : %llu\n" - "exec_timeout : %u\n" - "slowest_exec_ms : %llu\n" - "peak_rss_mb : %lu\n" - "afl_banner : %s\n" - "afl_version : " VERSION "\n" - "target_mode : %s%s%s%s%s%s%s%s\n" - "command_line : %s\n", - start_time / 1000, get_cur_time() / 1000, getpid(), - queue_cycle ? (queue_cycle - 1) : 0, total_execs, eps, - queued_paths, queued_favored, queued_discovered, queued_imported, - max_depth, current_entry, pending_favored, pending_not_fuzzed, - queued_variable, stability, bitmap_cvg, unique_crashes, - unique_hangs, last_path_time / 1000, last_crash_time / 1000, - last_hang_time / 1000, total_execs - last_crash_execs, - exec_tmout, slowest_exec_ms, (unsigned long int)usage.ru_maxrss, use_banner, - unicorn_mode ? "unicorn" : "", qemu_mode ? "qemu " : "", dumb_mode ? " dumb " : "", - no_forkserver ? "no_forksrv " : "", crash_mode ? "crash " : "", - persistent_mode ? "persistent " : "", deferred_mode ? "deferred " : "", - (unicorn_mode || qemu_mode || dumb_mode || no_forkserver || crash_mode || - persistent_mode || deferred_mode) ? "" : "default", - orig_cmdline); - /* ignore errors */ + fprintf(f, + "start_time : %llu\n" + "last_update : %llu\n" + "fuzzer_pid : %d\n" + "cycles_done : %llu\n" + "execs_done : %llu\n" + "execs_per_sec : %0.02f\n" + "paths_total : %u\n" + "paths_favored : %u\n" + "paths_found : %u\n" + "paths_imported : %u\n" + "max_depth : %u\n" + "cur_path : %u\n" /* Must match find_start_position() */ + "pending_favs : %u\n" + "pending_total : %u\n" + "variable_paths : %u\n" + "stability : %0.02f%%\n" + "bitmap_cvg : %0.02f%%\n" + "unique_crashes : %llu\n" + "unique_hangs : %llu\n" + "last_path : %llu\n" + "last_crash : %llu\n" + "last_hang : %llu\n" + "execs_since_crash : %llu\n" + "exec_timeout : %u\n" + "slowest_exec_ms : %llu\n" + "peak_rss_mb : %lu\n" + "afl_banner : %s\n" + "afl_version : " VERSION + "\n" + "target_mode : %s%s%s%s%s%s%s%s\n" + "command_line : %s\n", + start_time / 1000, get_cur_time() / 1000, getpid(), + queue_cycle ? (queue_cycle - 1) : 0, total_execs, eps, queued_paths, + queued_favored, queued_discovered, queued_imported, max_depth, + current_entry, pending_favored, pending_not_fuzzed, queued_variable, + stability, bitmap_cvg, unique_crashes, unique_hangs, + last_path_time / 1000, last_crash_time / 1000, last_hang_time / 1000, + total_execs - last_crash_execs, exec_tmout, slowest_exec_ms, + (unsigned long int)usage.ru_maxrss, use_banner, + unicorn_mode ? "unicorn" : "", qemu_mode ? "qemu " : "", + dumb_mode ? " dumb " : "", no_forkserver ? "no_forksrv " : "", + crash_mode ? "crash " : "", persistent_mode ? "persistent " : "", + deferred_mode ? "deferred " : "", + (unicorn_mode || qemu_mode || dumb_mode || no_forkserver || + crash_mode || persistent_mode || deferred_mode) + ? "" + : "default", + orig_cmdline); + /* ignore errors */ fclose(f); } - /* Update the plot file if there is a reason to. */ void maybe_update_plot_file(double bitmap_cvg, double eps) { @@ -114,19 +122,20 @@ void maybe_update_plot_file(double bitmap_cvg, double eps) { static u32 prev_qp, prev_pf, prev_pnf, prev_ce, prev_md; static u64 prev_qc, prev_uc, prev_uh; - if (prev_qp == queued_paths && prev_pf == pending_favored && + if (prev_qp == queued_paths && prev_pf == pending_favored && prev_pnf == pending_not_fuzzed && prev_ce == current_entry && prev_qc == queue_cycle && prev_uc == unique_crashes && - prev_uh == unique_hangs && prev_md == max_depth) return; + prev_uh == unique_hangs && prev_md == max_depth) + return; - prev_qp = queued_paths; - prev_pf = pending_favored; + prev_qp = queued_paths; + prev_pf = pending_favored; prev_pnf = pending_not_fuzzed; - prev_ce = current_entry; - prev_qc = queue_cycle; - prev_uc = unique_crashes; - prev_uh = unique_hangs; - prev_md = max_depth; + prev_ce = current_entry; + prev_qc = queue_cycle; + prev_uc = unique_crashes; + prev_uh = unique_hangs; + prev_md = max_depth; /* Fields in the file: @@ -134,17 +143,16 @@ void maybe_update_plot_file(double bitmap_cvg, double eps) { favored_not_fuzzed, unique_crashes, unique_hangs, max_depth, execs_per_sec */ - fprintf(plot_file, + fprintf(plot_file, "%llu, %llu, %u, %u, %u, %u, %0.02f%%, %llu, %llu, %u, %0.02f\n", get_cur_time() / 1000, queue_cycle - 1, current_entry, queued_paths, pending_not_fuzzed, pending_favored, bitmap_cvg, unique_crashes, - unique_hangs, max_depth, eps); /* ignore errors */ + unique_hangs, max_depth, eps); /* ignore errors */ fflush(plot_file); } - /* Check terminal dimensions after resize. */ static void check_term_size(void) { @@ -160,15 +168,14 @@ static void check_term_size(void) { } - /* A spiffy retro stats screen! This is called every stats_update_freq execve() calls, plus in several other circumstances. */ void show_stats(void) { - static u64 last_stats_ms, last_plot_ms, last_ms, last_execs; + static u64 last_stats_ms, last_plot_ms, last_ms, last_execs; static double avg_exec; - double t_byte_ratio, stab_ratio; + double t_byte_ratio, stab_ratio; u64 cur_ms; u32 t_bytes, t_bits; @@ -194,14 +201,13 @@ void show_stats(void) { } else { - double cur_avg = ((double)(total_execs - last_execs)) * 1000 / - (cur_ms - last_ms); + double cur_avg = + ((double)(total_execs - last_execs)) * 1000 / (cur_ms - last_ms); /* If there is a dramatic (5x+) jump in speed, reset the indicator more quickly. */ - if (cur_avg * 5 < avg_exec || cur_avg / 5 > avg_exec) - avg_exec = cur_avg; + if (cur_avg * 5 < avg_exec || cur_avg / 5 > avg_exec) avg_exec = cur_avg; avg_exec = avg_exec * (1.0 - 1.0 / AVG_SMOOTHING) + cur_avg * (1.0 / AVG_SMOOTHING); @@ -249,7 +255,8 @@ void show_stats(void) { /* Honor AFL_EXIT_WHEN_DONE and AFL_BENCH_UNTIL_CRASH. */ if (!dumb_mode && cycles_wo_finds > 100 && !pending_not_fuzzed && - getenv("AFL_EXIT_WHEN_DONE")) stop_soon = 2; + getenv("AFL_EXIT_WHEN_DONE")) + stop_soon = 2; if (total_crashes && getenv("AFL_BENCH_UNTIL_CRASH")) stop_soon = 2; @@ -276,7 +283,8 @@ void show_stats(void) { if (term_too_small) { - SAYF(cBRI "Your terminal is too small to display the UI.\n" + SAYF(cBRI + "Your terminal is too small to display the UI.\n" "Please resize terminal window to at least 79x24.\n" cRST); return; @@ -285,38 +293,41 @@ void show_stats(void) { /* Let's start by drawing a centered banner. */ - banner_len = (crash_mode ? 24 : 22) + strlen(VERSION) + strlen(use_banner) + strlen(power_name) + 3 + 5; + banner_len = (crash_mode ? 24 : 22) + strlen(VERSION) + strlen(use_banner) + + strlen(power_name) + 3 + 5; banner_pad = (79 - banner_len) / 2; memset(tmp, ' ', banner_pad); #ifdef HAVE_AFFINITY - sprintf(tmp + banner_pad, "%s " cLCY VERSION cLGN - " (%s) " cPIN "[%s]" cBLU " {%d}", crash_mode ? cPIN "peruvian were-rabbit" : - cYEL "american fuzzy lop", use_banner, power_name, cpu_aff); + sprintf(tmp + banner_pad, + "%s " cLCY VERSION cLGN " (%s) " cPIN "[%s]" cBLU " {%d}", + crash_mode ? cPIN "peruvian were-rabbit" : cYEL "american fuzzy lop", + use_banner, power_name, cpu_aff); #else - sprintf(tmp + banner_pad, "%s " cLCY VERSION cLGN - " (%s) " cPIN "[%s]", crash_mode ? cPIN "peruvian were-rabbit" : - cYEL "american fuzzy lop", use_banner, power_name); + sprintf(tmp + banner_pad, "%s " cLCY VERSION cLGN " (%s) " cPIN "[%s]", + crash_mode ? cPIN "peruvian were-rabbit" : cYEL "american fuzzy lop", + use_banner, power_name); #endif /* HAVE_AFFINITY */ SAYF("\n%s\n", tmp); /* "Handy" shortcuts for drawing boxes... */ -#define bSTG bSTART cGRA -#define bH2 bH bH -#define bH5 bH2 bH2 bH -#define bH10 bH5 bH5 -#define bH20 bH10 bH10 -#define bH30 bH20 bH10 -#define SP5 " " -#define SP10 SP5 SP5 -#define SP20 SP10 SP10 +#define bSTG bSTART cGRA +#define bH2 bH bH +#define bH5 bH2 bH2 bH +#define bH10 bH5 bH5 +#define bH20 bH10 bH10 +#define bH30 bH20 bH10 +#define SP5 " " +#define SP10 SP5 SP5 +#define SP20 SP10 SP10 /* Lord, forgive me this. */ - SAYF(SET_G1 bSTG bLT bH bSTOP cCYA " process timing " bSTG bH30 bH5 bH bHB - bH bSTOP cCYA " overall results " bSTG bH2 bH2 bRT "\n"); + SAYF(SET_G1 bSTG bLT bH bSTOP cCYA + " process timing " bSTG bH30 bH5 bH bHB bH bSTOP cCYA + " overall results " bSTG bH2 bH2 bRT "\n"); if (dumb_mode) { @@ -327,29 +338,34 @@ void show_stats(void) { u64 min_wo_finds = (cur_ms - last_path_time) / 1000 / 60; /* First queue cycle: don't stop now! */ - if (queue_cycle == 1 || min_wo_finds < 15) strcpy(tmp, cMGN); else + if (queue_cycle == 1 || min_wo_finds < 15) + strcpy(tmp, cMGN); + else - /* Subsequent cycles, but we're still making finds. */ - if (cycles_wo_finds < 25 || min_wo_finds < 30) strcpy(tmp, cYEL); else + /* Subsequent cycles, but we're still making finds. */ + if (cycles_wo_finds < 25 || min_wo_finds < 30) + strcpy(tmp, cYEL); + else - /* No finds for a long time and no test cases to try. */ - if (cycles_wo_finds > 100 && !pending_not_fuzzed && min_wo_finds > 120) + /* No finds for a long time and no test cases to try. */ + if (cycles_wo_finds > 100 && !pending_not_fuzzed && min_wo_finds > 120) strcpy(tmp, cLGN); /* Default: cautiously OK to stop? */ - else strcpy(tmp, cLBL); + else + strcpy(tmp, cLBL); } SAYF(bV bSTOP " run time : " cRST "%-33s " bSTG bV bSTOP - " cycles done : %s%-5s " bSTG bV "\n", + " cycles done : %s%-5s " bSTG bV "\n", DTD(cur_ms, start_time), tmp, DI(queue_cycle - 1)); /* We want to warn people about not seeing new paths after a full cycle, except when resuming fuzzing or running in non-instrumented mode. */ if (!dumb_mode && (last_path_time || resuming_fuzz || queue_cycle == 1 || - in_bitmap || crash_mode)) { + in_bitmap || crash_mode)) { SAYF(bV bSTOP " last new path : " cRST "%-33s ", DTD(cur_ms, last_path_time)); @@ -359,12 +375,12 @@ void show_stats(void) { if (dumb_mode) SAYF(bV bSTOP " last new path : " cPIN "n/a" cRST - " (non-instrumented mode) "); + " (non-instrumented mode) "); - else + else SAYF(bV bSTOP " last new path : " cRST "none yet " cLRD - "(odd, check syntax!) "); + "(odd, check syntax!) "); } @@ -378,18 +394,18 @@ void show_stats(void) { (unique_crashes >= KEEP_UNIQUE_CRASH) ? "+" : ""); SAYF(bV bSTOP " last uniq crash : " cRST "%-33s " bSTG bV bSTOP - " uniq crashes : %s%-6s" bSTG bV "\n", - DTD(cur_ms, last_crash_time), unique_crashes ? cLRD : cRST, - tmp); + " uniq crashes : %s%-6s" bSTG bV "\n", + DTD(cur_ms, last_crash_time), unique_crashes ? cLRD : cRST, tmp); sprintf(tmp, "%s%s", DI(unique_hangs), - (unique_hangs >= KEEP_UNIQUE_HANG) ? "+" : ""); + (unique_hangs >= KEEP_UNIQUE_HANG) ? "+" : ""); SAYF(bV bSTOP " last uniq hang : " cRST "%-33s " bSTG bV bSTOP - " uniq hangs : " cRST "%-6s" bSTG bV "\n", + " uniq hangs : " cRST "%-6s" bSTG bV "\n", DTD(cur_ms, last_hang_time), tmp); - SAYF(bVR bH bSTOP cCYA " cycle progress " bSTG bH10 bH5 bH2 bH2 bHB bH bSTOP cCYA + SAYF(bVR bH bSTOP cCYA + " cycle progress " bSTG bH10 bH5 bH2 bH2 bHB bH bSTOP cCYA " map coverage " bSTG bH bHT bH20 bH2 bVL "\n"); /* This gets funny because we want to print several variable-length variables @@ -402,23 +418,24 @@ void show_stats(void) { SAYF(bV bSTOP " now processing : " cRST "%-16s " bSTG bV bSTOP, tmp); - sprintf(tmp, "%0.02f%% / %0.02f%%", ((double)queue_cur->bitmap_size) * - 100 / MAP_SIZE, t_byte_ratio); + sprintf(tmp, "%0.02f%% / %0.02f%%", + ((double)queue_cur->bitmap_size) * 100 / MAP_SIZE, t_byte_ratio); - SAYF(" map density : %s%-21s" bSTG bV "\n", t_byte_ratio > 70 ? cLRD : - ((t_bytes < 200 && !dumb_mode) ? cPIN : cRST), tmp); + SAYF(" map density : %s%-21s" bSTG bV "\n", + t_byte_ratio > 70 ? cLRD : ((t_bytes < 200 && !dumb_mode) ? cPIN : cRST), + tmp); sprintf(tmp, "%s (%0.02f%%)", DI(cur_skipped_paths), ((double)cur_skipped_paths * 100) / queued_paths); SAYF(bV bSTOP " paths timed out : " cRST "%-16s " bSTG bV, tmp); - sprintf(tmp, "%0.02f bits/tuple", - t_bytes ? (((double)t_bits) / t_bytes) : 0); + sprintf(tmp, "%0.02f bits/tuple", t_bytes ? (((double)t_bits) / t_bytes) : 0); SAYF(bSTOP " count coverage : " cRST "%-21s" bSTG bV "\n", tmp); - SAYF(bVR bH bSTOP cCYA " stage progress " bSTG bH10 bH5 bH2 bH2 bX bH bSTOP cCYA + SAYF(bVR bH bSTOP cCYA + " stage progress " bSTG bH10 bH5 bH2 bH2 bX bH bSTOP cCYA " findings in depth " bSTG bH10 bH5 bH2 bH2 bVL "\n"); sprintf(tmp, "%s (%0.02f%%)", DI(queued_favored), @@ -427,7 +444,8 @@ void show_stats(void) { /* Yeah... it's still going on... halp? */ SAYF(bV bSTOP " now trying : " cRST "%-20s " bSTG bV bSTOP - " favored paths : " cRST "%-22s" bSTG bV "\n", stage_name, tmp); + " favored paths : " cRST "%-22s" bSTG bV "\n", + stage_name, tmp); if (!stage_max) { @@ -453,14 +471,14 @@ void show_stats(void) { if (crash_mode) { SAYF(bV bSTOP " total execs : " cRST "%-20s " bSTG bV bSTOP - " new crashes : %s%-22s" bSTG bV "\n", DI(total_execs), - unique_crashes ? cLRD : cRST, tmp); + " new crashes : %s%-22s" bSTG bV "\n", + DI(total_execs), unique_crashes ? cLRD : cRST, tmp); } else { SAYF(bV bSTOP " total execs : " cRST "%-20s " bSTG bV bSTOP - " total crashes : %s%-22s" bSTG bV "\n", DI(total_execs), - unique_crashes ? cLRD : cRST, tmp); + " total crashes : %s%-22s" bSTG bV "\n", + DI(total_execs), unique_crashes ? cLRD : cRST, tmp); } @@ -468,8 +486,8 @@ void show_stats(void) { if (avg_exec < 100) { - sprintf(tmp, "%s/sec (%s)", DF(avg_exec), avg_exec < 20 ? - "zzzz..." : "slow!"); + sprintf(tmp, "%s/sec (%s)", DF(avg_exec), + avg_exec < 20 ? "zzzz..." : "slow!"); SAYF(bV bSTOP " exec speed : " cLRD "%-20s ", tmp); @@ -483,12 +501,13 @@ void show_stats(void) { sprintf(tmp, "%s (%s%s unique)", DI(total_tmouts), DI(unique_tmouts), (unique_hangs >= KEEP_UNIQUE_HANG) ? "+" : ""); - SAYF (bSTG bV bSTOP " total tmouts : " cRST "%-22s" bSTG bV "\n", tmp); + SAYF(bSTG bV bSTOP " total tmouts : " cRST "%-22s" bSTG bV "\n", tmp); /* Aaaalmost there... hold on! */ - SAYF(bVR bH cCYA bSTOP " fuzzing strategy yields " bSTG bH10 bHT bH10 - bH5 bHB bH bSTOP cCYA " path geometry " bSTG bH5 bH2 bVL "\n"); + SAYF(bVR bH cCYA bSTOP + " fuzzing strategy yields " bSTG bH10 bHT bH10 bH5 bHB bH bSTOP cCYA + " path geometry " bSTG bH5 bH2 bVL "\n"); if (skip_deterministic) { @@ -496,66 +515,77 @@ void show_stats(void) { } else { - sprintf(tmp, "%s/%s, %s/%s, %s/%s", - DI(stage_finds[STAGE_FLIP1]), DI(stage_cycles[STAGE_FLIP1]), - DI(stage_finds[STAGE_FLIP2]), DI(stage_cycles[STAGE_FLIP2]), - DI(stage_finds[STAGE_FLIP4]), DI(stage_cycles[STAGE_FLIP4])); + sprintf(tmp, "%s/%s, %s/%s, %s/%s", DI(stage_finds[STAGE_FLIP1]), + DI(stage_cycles[STAGE_FLIP1]), DI(stage_finds[STAGE_FLIP2]), + DI(stage_cycles[STAGE_FLIP2]), DI(stage_finds[STAGE_FLIP4]), + DI(stage_cycles[STAGE_FLIP4])); } - SAYF(bV bSTOP " bit flips : " cRST "%-36s " bSTG bV bSTOP " levels : " - cRST "%-10s" bSTG bV "\n", tmp, DI(max_depth)); + SAYF(bV bSTOP " bit flips : " cRST "%-36s " bSTG bV bSTOP + " levels : " cRST "%-10s" bSTG bV "\n", + tmp, DI(max_depth)); if (!skip_deterministic) - sprintf(tmp, "%s/%s, %s/%s, %s/%s", - DI(stage_finds[STAGE_FLIP8]), DI(stage_cycles[STAGE_FLIP8]), - DI(stage_finds[STAGE_FLIP16]), DI(stage_cycles[STAGE_FLIP16]), - DI(stage_finds[STAGE_FLIP32]), DI(stage_cycles[STAGE_FLIP32])); + sprintf(tmp, "%s/%s, %s/%s, %s/%s", DI(stage_finds[STAGE_FLIP8]), + DI(stage_cycles[STAGE_FLIP8]), DI(stage_finds[STAGE_FLIP16]), + DI(stage_cycles[STAGE_FLIP16]), DI(stage_finds[STAGE_FLIP32]), + DI(stage_cycles[STAGE_FLIP32])); - SAYF(bV bSTOP " byte flips : " cRST "%-36s " bSTG bV bSTOP " pending : " - cRST "%-10s" bSTG bV "\n", tmp, DI(pending_not_fuzzed)); + SAYF(bV bSTOP " byte flips : " cRST "%-36s " bSTG bV bSTOP + " pending : " cRST "%-10s" bSTG bV "\n", + tmp, DI(pending_not_fuzzed)); if (!skip_deterministic) - sprintf(tmp, "%s/%s, %s/%s, %s/%s", - DI(stage_finds[STAGE_ARITH8]), DI(stage_cycles[STAGE_ARITH8]), - DI(stage_finds[STAGE_ARITH16]), DI(stage_cycles[STAGE_ARITH16]), - DI(stage_finds[STAGE_ARITH32]), DI(stage_cycles[STAGE_ARITH32])); + sprintf(tmp, "%s/%s, %s/%s, %s/%s", DI(stage_finds[STAGE_ARITH8]), + DI(stage_cycles[STAGE_ARITH8]), DI(stage_finds[STAGE_ARITH16]), + DI(stage_cycles[STAGE_ARITH16]), DI(stage_finds[STAGE_ARITH32]), + DI(stage_cycles[STAGE_ARITH32])); - SAYF(bV bSTOP " arithmetics : " cRST "%-36s " bSTG bV bSTOP " pend fav : " - cRST "%-10s" bSTG bV "\n", tmp, DI(pending_favored)); + SAYF(bV bSTOP " arithmetics : " cRST "%-36s " bSTG bV bSTOP + " pend fav : " cRST "%-10s" bSTG bV "\n", + tmp, DI(pending_favored)); if (!skip_deterministic) - sprintf(tmp, "%s/%s, %s/%s, %s/%s", - DI(stage_finds[STAGE_INTEREST8]), DI(stage_cycles[STAGE_INTEREST8]), - DI(stage_finds[STAGE_INTEREST16]), DI(stage_cycles[STAGE_INTEREST16]), - DI(stage_finds[STAGE_INTEREST32]), DI(stage_cycles[STAGE_INTEREST32])); + sprintf( + tmp, "%s/%s, %s/%s, %s/%s", DI(stage_finds[STAGE_INTEREST8]), + DI(stage_cycles[STAGE_INTEREST8]), DI(stage_finds[STAGE_INTEREST16]), + DI(stage_cycles[STAGE_INTEREST16]), DI(stage_finds[STAGE_INTEREST32]), + DI(stage_cycles[STAGE_INTEREST32])); - SAYF(bV bSTOP " known ints : " cRST "%-36s " bSTG bV bSTOP " own finds : " - cRST "%-10s" bSTG bV "\n", tmp, DI(queued_discovered)); + SAYF(bV bSTOP " known ints : " cRST "%-36s " bSTG bV bSTOP + " own finds : " cRST "%-10s" bSTG bV "\n", + tmp, DI(queued_discovered)); if (!skip_deterministic) - sprintf(tmp, "%s/%s, %s/%s, %s/%s", - DI(stage_finds[STAGE_EXTRAS_UO]), DI(stage_cycles[STAGE_EXTRAS_UO]), - DI(stage_finds[STAGE_EXTRAS_UI]), DI(stage_cycles[STAGE_EXTRAS_UI]), - DI(stage_finds[STAGE_EXTRAS_AO]), DI(stage_cycles[STAGE_EXTRAS_AO])); + sprintf(tmp, "%s/%s, %s/%s, %s/%s", DI(stage_finds[STAGE_EXTRAS_UO]), + DI(stage_cycles[STAGE_EXTRAS_UO]), DI(stage_finds[STAGE_EXTRAS_UI]), + DI(stage_cycles[STAGE_EXTRAS_UI]), DI(stage_finds[STAGE_EXTRAS_AO]), + DI(stage_cycles[STAGE_EXTRAS_AO])); SAYF(bV bSTOP " dictionary : " cRST "%-36s " bSTG bV bSTOP - " imported : " cRST "%-10s" bSTG bV "\n", tmp, - sync_id ? DI(queued_imported) : (u8*)"n/a"); + " imported : " cRST "%-10s" bSTG bV "\n", + tmp, sync_id ? DI(queued_imported) : (u8*)"n/a"); - sprintf(tmp, "%s/%s, %s/%s, %s/%s", - DI(stage_finds[STAGE_HAVOC]), DI(stage_cycles[STAGE_HAVOC]), - DI(stage_finds[STAGE_SPLICE]), DI(stage_cycles[STAGE_SPLICE]), - DI(stage_finds[STAGE_PYTHON]), DI(stage_cycles[STAGE_PYTHON])); + sprintf(tmp, "%s/%s, %s/%s, %s/%s", DI(stage_finds[STAGE_HAVOC]), + DI(stage_cycles[STAGE_HAVOC]), DI(stage_finds[STAGE_SPLICE]), + DI(stage_cycles[STAGE_SPLICE]), DI(stage_finds[STAGE_PYTHON]), + DI(stage_cycles[STAGE_PYTHON])); SAYF(bV bSTOP " havoc : " cRST "%-36s " bSTG bV bSTOP, tmp); - if (t_bytes) sprintf(tmp, "%0.02f%%", stab_ratio); - else strcpy(tmp, "n/a"); - - SAYF(" stability : %s%-10s" bSTG bV "\n", (stab_ratio < 85 && var_byte_count > 40) - ? cLRD : ((queued_variable && (!persistent_mode || var_byte_count > 20)) - ? cMGN : cRST), tmp); + if (t_bytes) + sprintf(tmp, "%0.02f%%", stab_ratio); + else + strcpy(tmp, "n/a"); + + SAYF(" stability : %s%-10s" bSTG bV "\n", + (stab_ratio < 85 && var_byte_count > 40) + ? cLRD + : ((queued_variable && (!persistent_mode || var_byte_count > 20)) + ? cMGN + : cRST), + tmp); if (!bytes_trim_out) { @@ -582,18 +612,26 @@ void show_stats(void) { sprintf(tmp2, "%0.02f%%", ((double)(blocks_eff_total - blocks_eff_select)) * 100 / - blocks_eff_total); + blocks_eff_total); strcat(tmp, tmp2); } + if (custom_mutator) { - sprintf(tmp, "%s/%s", DI(stage_finds[STAGE_CUSTOM_MUTATOR]), DI(stage_cycles[STAGE_CUSTOM_MUTATOR])); - SAYF(bV bSTOP " custom mut. : " cRST "%-36s " bSTG bVR bH20 bH2 bH bRB "\n" - bLB bH30 bH20 bH2 bH bRB bSTOP cRST RESET_G1, tmp); + + sprintf(tmp, "%s/%s", DI(stage_finds[STAGE_CUSTOM_MUTATOR]), + DI(stage_cycles[STAGE_CUSTOM_MUTATOR])); + SAYF(bV bSTOP " custom mut. : " cRST "%-36s " bSTG bVR bH20 bH2 bH bRB + "\n" bLB bH30 bH20 bH2 bH bRB bSTOP cRST RESET_G1, + tmp); + } else { - SAYF(bV bSTOP " trim : " cRST "%-36s " bSTG bVR bH20 bH2 bH bRB "\n" - bLB bH30 bH20 bH2 bRB bSTOP cRST RESET_G1, tmp); + + SAYF(bV bSTOP " trim : " cRST "%-36s " bSTG bVR bH20 bH2 bH bRB + "\n" bLB bH30 bH20 bH2 bRB bSTOP cRST RESET_G1, + tmp); + } /* Provide some CPU utilization stats. */ @@ -601,7 +639,7 @@ void show_stats(void) { if (cpu_core_count) { double cur_runnable = get_runnable_processes(); - u32 cur_utilization = cur_runnable * 100 / cpu_core_count; + u32 cur_utilization = cur_runnable * 100 / cpu_core_count; u8* cpu_color = cCYA; @@ -618,25 +656,26 @@ void show_stats(void) { if (cpu_aff >= 0) { - SAYF(SP10 cGRA "[cpu%03u:%s%3u%%" cGRA "]\r" cRST, - MIN(cpu_aff, 999), cpu_color, - MIN(cur_utilization, 999)); + SAYF(SP10 cGRA "[cpu%03u:%s%3u%%" cGRA "]\r" cRST, MIN(cpu_aff, 999), + cpu_color, MIN(cur_utilization, 999)); } else { - SAYF(SP10 cGRA " [cpu:%s%3u%%" cGRA "]\r" cRST, - cpu_color, MIN(cur_utilization, 999)); - - } + SAYF(SP10 cGRA " [cpu:%s%3u%%" cGRA "]\r" cRST, cpu_color, + MIN(cur_utilization, 999)); + + } #else - SAYF(SP10 cGRA " [cpu:%s%3u%%" cGRA "]\r" cRST, - cpu_color, MIN(cur_utilization, 999)); + SAYF(SP10 cGRA " [cpu:%s%3u%%" cGRA "]\r" cRST, cpu_color, + MIN(cur_utilization, 999)); #endif /* ^HAVE_AFFINITY */ - } else SAYF("\r"); + } else + + SAYF("\r"); /* Hallelujah! */ @@ -644,7 +683,6 @@ void show_stats(void) { } - /* Display quick statistics at the end of processing the input directory, plus a bunch of warnings. Some calibration stuff also ended up here, along with several hardcoded constants. Maybe clean up eventually. */ @@ -652,10 +690,10 @@ void show_stats(void) { void show_init_stats(void) { struct queue_entry* q = queue; - u32 min_bits = 0, max_bits = 0; - u64 min_us = 0, max_us = 0; - u64 avg_us = 0; - u32 max_len = 0; + u32 min_bits = 0, max_bits = 0; + u64 min_us = 0, max_us = 0; + u64 avg_us = 0; + u32 max_len = 0; if (total_cal_cycles) avg_us = total_cal_us / total_cal_cycles; @@ -681,9 +719,12 @@ void show_init_stats(void) { /* Let's keep things moving with slow binaries. */ - if (avg_us > 50000) havoc_div = 10; /* 0-19 execs/sec */ - else if (avg_us > 20000) havoc_div = 5; /* 20-49 execs/sec */ - else if (avg_us > 10000) havoc_div = 2; /* 50-100 execs/sec */ + if (avg_us > 50000) + havoc_div = 10; /* 0-19 execs/sec */ + else if (avg_us > 20000) + havoc_div = 5; /* 20-49 execs/sec */ + else if (avg_us > 10000) + havoc_div = 2; /* 50-100 execs/sec */ if (!resuming_fuzz) { @@ -698,7 +739,9 @@ void show_init_stats(void) { WARNF(cLRD "Some test cases look useless. Consider using a smaller set."); if (queued_paths > 100) - WARNF(cLRD "You probably have far too many input files! Consider trimming down."); + WARNF(cLRD + "You probably have far too many input files! Consider trimming " + "down."); else if (queued_paths > 20) WARNF("You have lots of input files; try starting small."); @@ -706,11 +749,13 @@ void show_init_stats(void) { OKF("Here are some useful stats:\n\n" - cGRA " Test case count : " cRST "%u favored, %u variable, %u total\n" - cGRA " Bitmap range : " cRST "%u to %u bits (average: %0.02f bits)\n" - cGRA " Exec timing : " cRST "%s to %s us (average: %s us)\n", - queued_favored, queued_variable, queued_paths, min_bits, max_bits, - ((double)total_bitmap_size) / (total_bitmap_entries ? total_bitmap_entries : 1), + cGRA " Test case count : " cRST + "%u favored, %u variable, %u total\n" cGRA " Bitmap range : " cRST + "%u to %u bits (average: %0.02f bits)\n" cGRA + " Exec timing : " cRST "%s to %s us (average: %s us)\n", + queued_favored, queued_variable, queued_paths, min_bits, max_bits, + ((double)total_bitmap_size) / + (total_bitmap_entries ? total_bitmap_entries : 1), DI(min_us), DI(max_us), DI(avg_us)); if (!timeout_given) { @@ -722,16 +767,19 @@ void show_init_stats(void) { random scheduler jitter is less likely to have any impact, and because our patience is wearing thin =) */ - if (avg_us > 50000) exec_tmout = avg_us * 2 / 1000; - else if (avg_us > 10000) exec_tmout = avg_us * 3 / 1000; - else exec_tmout = avg_us * 5 / 1000; + if (avg_us > 50000) + exec_tmout = avg_us * 2 / 1000; + else if (avg_us > 10000) + exec_tmout = avg_us * 3 / 1000; + else + exec_tmout = avg_us * 5 / 1000; exec_tmout = MAX(exec_tmout, max_us / 1000); exec_tmout = (exec_tmout + EXEC_TM_ROUND) / EXEC_TM_ROUND * EXEC_TM_ROUND; if (exec_tmout > EXEC_TIMEOUT) exec_tmout = EXEC_TIMEOUT; - ACTF("No -t option specified, so I'll use exec timeout of %u ms.", + ACTF("No -t option specified, so I'll use exec timeout of %u ms.", exec_tmout); timeout_given = 1; diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c index 2242dd6b..685840c6 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -27,53 +27,62 @@ static void usage(u8* argv0) { #ifdef USE_PYTHON -#define PHYTON_SUPPORT \ - "Compiled with Python 2.7 module support, see docs/python_mutators.txt\n" +# define PHYTON_SUPPORT\ + "Compiled with Python 2.7 module support, see docs/python_mutators.txt\n" #else -#define PHYTON_SUPPORT "" +# define PHYTON_SUPPORT "" #endif - SAYF("\n%s [ options ] -- /path/to/fuzzed_app [ ... ]\n\n" - - "Required parameters:\n" - " -i dir - input directory with test cases\n" - " -o dir - output directory for fuzzer findings\n\n" - - "Execution control settings:\n" - " -p schedule - power schedules recompute a seed's performance score.\n" - " <explore (default), fast, coe, lin, quad, or exploit>\n" - " see docs/power_schedules.txt\n" - " -f file - location read by the fuzzed program (stdin)\n" - " -t msec - timeout for each run (auto-scaled, 50-%d ms)\n" - " -m megs - memory limit for child process (%d MB)\n" - " -Q - use binary-only instrumentation (QEMU mode)\n" - " -U - use Unicorn-based instrumentation (Unicorn mode)\n\n" - " -L minutes - use MOpt(imize) mode and set the limit time for entering the\n" - " pacemaker mode (minutes of no new paths, 0 = immediately).\n" - " a recommended value is 10-60. see docs/README.MOpt\n\n" - - "Fuzzing behavior settings:\n" - " -d - quick & dirty mode (skips deterministic steps)\n" - " -n - fuzz without instrumentation (dumb mode)\n" - " -x dir - optional fuzzer dictionary (see README)\n\n" - - "Testing settings:\n" - " -s seed - use a fixed seed for the RNG\n" - " -V seconds - fuzz for a maximum total time of seconds then terminate\n" - " -E execs - fuzz for a maximum number of total executions then terminate\n\n" - - "Other stuff:\n" - " -T text - text banner to show on the screen\n" - " -M / -S id - distributed mode (see parallel_fuzzing.txt)\n" - " -B bitmap.txt - mutate a specific test case, use the out/fuzz_bitmap file\n" - " -C - crash exploration mode (the peruvian rabbit thing)\n" - " -e ext - File extension for the temporarily generated test case\n\n" - - PHYTON_SUPPORT - - "For additional tips, please consult %s/README\n\n", - - argv0, EXEC_TIMEOUT, MEM_LIMIT, doc_path); + SAYF( + "\n%s [ options ] -- /path/to/fuzzed_app [ ... ]\n\n" + + "Required parameters:\n" + " -i dir - input directory with test cases\n" + " -o dir - output directory for fuzzer findings\n\n" + + "Execution control settings:\n" + " -p schedule - power schedules recompute a seed's performance " + "score.\n" + " <explore (default), fast, coe, lin, quad, or " + "exploit>\n" + " see docs/power_schedules.txt\n" + " -f file - location read by the fuzzed program (stdin)\n" + " -t msec - timeout for each run (auto-scaled, 50-%d ms)\n" + " -m megs - memory limit for child process (%d MB)\n" + " -Q - use binary-only instrumentation (QEMU mode)\n" + " -U - use Unicorn-based instrumentation (Unicorn mode)\n\n" + " -L minutes - use MOpt(imize) mode and set the limit time for " + "entering the\n" + " pacemaker mode (minutes of no new paths, 0 = " + "immediately).\n" + " a recommended value is 10-60. see docs/README.MOpt\n\n" + + "Fuzzing behavior settings:\n" + " -d - quick & dirty mode (skips deterministic steps)\n" + " -n - fuzz without instrumentation (dumb mode)\n" + " -x dir - optional fuzzer dictionary (see README)\n\n" + + "Testing settings:\n" + " -s seed - use a fixed seed for the RNG\n" + " -V seconds - fuzz for a maximum total time of seconds then " + "terminate\n" + " -E execs - fuzz for a maximum number of total executions then " + "terminate\n\n" + + "Other stuff:\n" + " -T text - text banner to show on the screen\n" + " -M / -S id - distributed mode (see parallel_fuzzing.txt)\n" + " -B bitmap.txt - mutate a specific test case, use the out/fuzz_bitmap " + "file\n" + " -C - crash exploration mode (the peruvian rabbit thing)\n" + " -e ext - File extension for the temporarily generated test " + "case\n\n" + + PHYTON_SUPPORT + + "For additional tips, please consult %s/README\n\n", + + argv0, EXEC_TIMEOUT, MEM_LIMIT, doc_path); exit(1); #undef PHYTON_SUPPORT @@ -82,65 +91,90 @@ static void usage(u8* argv0) { #ifndef AFL_LIB -static int stricmp(char const *a, char const *b) { +static int stricmp(char const* a, char const* b) { + for (;; ++a, ++b) { + int d; d = tolower(*a) - tolower(*b); - if (d != 0 || !*a) - return d; + if (d != 0 || !*a) return d; + } + } /* Main entry point */ int main(int argc, char** argv) { - s32 opt; - u64 prev_queued = 0; - u32 sync_interval_cnt = 0, seek_to; - u8 *extras_dir = 0; - u8 mem_limit_given = 0; - u8 exit_1 = !!getenv("AFL_BENCH_JUST_ONE"); + s32 opt; + u64 prev_queued = 0; + u32 sync_interval_cnt = 0, seek_to; + u8* extras_dir = 0; + u8 mem_limit_given = 0; + u8 exit_1 = !!getenv("AFL_BENCH_JUST_ONE"); char** use_argv; - s64 init_seed; + s64 init_seed; - struct timeval tv; + struct timeval tv; struct timezone tz; - SAYF(cCYA "afl-fuzz" VERSION cRST " based on afl by <lcamtuf@google.com> and a big online community\n"); + SAYF(cCYA + "afl-fuzz" VERSION cRST + " based on afl by <lcamtuf@google.com> and a big online community\n"); doc_path = access(DOC_PATH, F_OK) ? "docs" : DOC_PATH; gettimeofday(&tv, &tz); init_seed = tv.tv_sec ^ tv.tv_usec ^ getpid(); - while ((opt = getopt(argc, argv, "+i:o:f:m:t:T:dnCB:S:M:x:QUe:p:s:V:E:L:")) > 0) + while ((opt = getopt(argc, argv, "+i:o:f:m:t:T:dnCB:S:M:x:QUe:p:s:V:E:L:")) > + 0) switch (opt) { case 's': { + init_seed = strtoul(optarg, 0L, 10); fixed_seed = 1; break; + } - case 'p': /* Power schedule */ + case 'p': /* Power schedule */ if (!stricmp(optarg, "fast")) { + schedule = FAST; + } else if (!stricmp(optarg, "coe")) { + schedule = COE; + } else if (!stricmp(optarg, "exploit")) { + schedule = EXPLOIT; + } else if (!stricmp(optarg, "lin")) { + schedule = LIN; + } else if (!stricmp(optarg, "quad")) { + schedule = QUAD; - } else if (!stricmp(optarg, "explore") || !stricmp(optarg, "default") || !stricmp(optarg, "normal") || !stricmp(optarg, "afl")) { + + } else if (!stricmp(optarg, "explore") || !stricmp(optarg, "default") || + + !stricmp(optarg, "normal") || !stricmp(optarg, "afl")) { + schedule = EXPLORE; + } else { + FATAL("Unknown -p power schedule"); + } + break; case 'e': @@ -151,7 +185,7 @@ int main(int argc, char** argv) { break; - case 'i': /* input dir */ + case 'i': /* input dir */ if (in_dir) FATAL("Multiple -i options not supported"); in_dir = optarg; @@ -160,115 +194,121 @@ int main(int argc, char** argv) { break; - case 'o': /* output dir */ + case 'o': /* output dir */ if (out_dir) FATAL("Multiple -o options not supported"); out_dir = optarg; break; - case 'M': { /* master sync ID */ + case 'M': { /* master sync ID */ - u8* c; + u8* c; - if (sync_id) FATAL("Multiple -S or -M options not supported"); - sync_id = ck_strdup(optarg); + if (sync_id) FATAL("Multiple -S or -M options not supported"); + sync_id = ck_strdup(optarg); - if ((c = strchr(sync_id, ':'))) { + if ((c = strchr(sync_id, ':'))) { - *c = 0; + *c = 0; - if (sscanf(c + 1, "%u/%u", &master_id, &master_max) != 2 || - !master_id || !master_max || master_id > master_max || - master_max > 1000000) FATAL("Bogus master ID passed to -M"); + if (sscanf(c + 1, "%u/%u", &master_id, &master_max) != 2 || + !master_id || !master_max || master_id > master_max || + master_max > 1000000) + FATAL("Bogus master ID passed to -M"); - } + } - force_deterministic = 1; + force_deterministic = 1; - } + } - break; + break; - case 'S': + case 'S': if (sync_id) FATAL("Multiple -S or -M options not supported"); sync_id = ck_strdup(optarg); break; - case 'f': /* target file */ + case 'f': /* target file */ if (out_file) FATAL("Multiple -f options not supported"); out_file = optarg; break; - case 'x': /* dictionary */ + case 'x': /* dictionary */ if (extras_dir) FATAL("Multiple -x options not supported"); extras_dir = optarg; break; - case 't': { /* timeout */ + case 't': { /* timeout */ - u8 suffix = 0; + u8 suffix = 0; - if (timeout_given) FATAL("Multiple -t options not supported"); + if (timeout_given) FATAL("Multiple -t options not supported"); - if (sscanf(optarg, "%u%c", &exec_tmout, &suffix) < 1 || - optarg[0] == '-') FATAL("Bad syntax used for -t"); + if (sscanf(optarg, "%u%c", &exec_tmout, &suffix) < 1 || + optarg[0] == '-') + FATAL("Bad syntax used for -t"); - if (exec_tmout < 5) FATAL("Dangerously low value of -t"); + if (exec_tmout < 5) FATAL("Dangerously low value of -t"); - if (suffix == '+') timeout_given = 2; else timeout_given = 1; + if (suffix == '+') + timeout_given = 2; + else + timeout_given = 1; - break; + break; } - case 'm': { /* mem limit */ + case 'm': { /* mem limit */ - u8 suffix = 'M'; + u8 suffix = 'M'; - if (mem_limit_given) FATAL("Multiple -m options not supported"); - mem_limit_given = 1; + if (mem_limit_given) FATAL("Multiple -m options not supported"); + mem_limit_given = 1; - if (!strcmp(optarg, "none")) { + if (!strcmp(optarg, "none")) { - mem_limit = 0; - break; + mem_limit = 0; + break; - } + } - if (sscanf(optarg, "%llu%c", &mem_limit, &suffix) < 1 || - optarg[0] == '-') FATAL("Bad syntax used for -m"); + if (sscanf(optarg, "%llu%c", &mem_limit, &suffix) < 1 || + optarg[0] == '-') + FATAL("Bad syntax used for -m"); - switch (suffix) { + switch (suffix) { - case 'T': mem_limit *= 1024 * 1024; break; - case 'G': mem_limit *= 1024; break; - case 'k': mem_limit /= 1024; break; - case 'M': break; + case 'T': mem_limit *= 1024 * 1024; break; + case 'G': mem_limit *= 1024; break; + case 'k': mem_limit /= 1024; break; + case 'M': break; - default: FATAL("Unsupported suffix or bad syntax for -m"); + default: FATAL("Unsupported suffix or bad syntax for -m"); - } + } - if (mem_limit < 5) FATAL("Dangerously low value of -m"); + if (mem_limit < 5) FATAL("Dangerously low value of -m"); - if (sizeof(rlim_t) == 4 && mem_limit > 2000) - FATAL("Value of -m out of range on 32-bit systems"); + if (sizeof(rlim_t) == 4 && mem_limit > 2000) + FATAL("Value of -m out of range on 32-bit systems"); - } + } - break; + break; - case 'd': /* skip deterministic */ + case 'd': /* skip deterministic */ if (skip_deterministic) FATAL("Multiple -d options not supported"); skip_deterministic = 1; use_splicing = 1; break; - case 'B': /* load bitmap */ + case 'B': /* load bitmap */ /* This is a secret undocumented option! It is useful if you find an interesting test case during a normal fuzzing process, and want @@ -287,26 +327,29 @@ int main(int argc, char** argv) { read_bitmap(in_bitmap); break; - case 'C': /* crash mode */ + case 'C': /* crash mode */ if (crash_mode) FATAL("Multiple -C options not supported"); crash_mode = FAULT_CRASH; break; - case 'n': /* dumb mode */ + case 'n': /* dumb mode */ if (dumb_mode) FATAL("Multiple -n options not supported"); - if (getenv("AFL_DUMB_FORKSRV")) dumb_mode = 2; else dumb_mode = 1; + if (getenv("AFL_DUMB_FORKSRV")) + dumb_mode = 2; + else + dumb_mode = 1; break; - case 'T': /* banner */ + case 'T': /* banner */ if (use_banner) FATAL("Multiple -T options not supported"); use_banner = optarg; break; - case 'Q': /* QEMU mode */ + case 'Q': /* QEMU mode */ if (qemu_mode) FATAL("Multiple -Q options not supported"); qemu_mode = 1; @@ -315,7 +358,7 @@ int main(int argc, char** argv) { break; - case 'U': /* Unicorn mode */ + case 'U': /* Unicorn mode */ if (unicorn_mode) FATAL("Multiple -U options not supported"); unicorn_mode = 1; @@ -325,115 +368,132 @@ int main(int argc, char** argv) { break; case 'V': { - most_time_key = 1; - if (sscanf(optarg, "%llu", &most_time) < 1 || optarg[0] == '-') - FATAL("Bad syntax used for -V"); - } - break; + + most_time_key = 1; + if (sscanf(optarg, "%llu", &most_time) < 1 || optarg[0] == '-') + FATAL("Bad syntax used for -V"); + + } break; case 'E': { - most_execs_key = 1; - if (sscanf(optarg, "%llu", &most_execs) < 1 || optarg[0] == '-') - FATAL("Bad syntax used for -E"); - } - break; - case 'L': { /* MOpt mode */ + most_execs_key = 1; + if (sscanf(optarg, "%llu", &most_execs) < 1 || optarg[0] == '-') + FATAL("Bad syntax used for -E"); - if (limit_time_sig) FATAL("Multiple -L options not supported"); - limit_time_sig = 1; - havoc_max_mult = HAVOC_MAX_MULT_MOPT; + } break; - if (sscanf(optarg, "%llu", &limit_time_puppet) < 1 || - optarg[0] == '-') FATAL("Bad syntax used for -L"); + case 'L': { /* MOpt mode */ - u64 limit_time_puppet2 = limit_time_puppet * 60 * 1000; + if (limit_time_sig) FATAL("Multiple -L options not supported"); + limit_time_sig = 1; + havoc_max_mult = HAVOC_MAX_MULT_MOPT; - if (limit_time_puppet2 < limit_time_puppet ) FATAL("limit_time overflow"); - limit_time_puppet = limit_time_puppet2; + if (sscanf(optarg, "%llu", &limit_time_puppet) < 1 || optarg[0] == '-') + FATAL("Bad syntax used for -L"); - SAYF("limit_time_puppet %llu\n",limit_time_puppet); - swarm_now = 0; + u64 limit_time_puppet2 = limit_time_puppet * 60 * 1000; - if (limit_time_puppet == 0 ) - key_puppet = 1; + if (limit_time_puppet2 < limit_time_puppet) + FATAL("limit_time overflow"); + limit_time_puppet = limit_time_puppet2; - int i; - int tmp_swarm = 0; + SAYF("limit_time_puppet %llu\n", limit_time_puppet); + swarm_now = 0; - if (g_now > g_max) g_now = 0; - w_now = (w_init - w_end)*(g_max - g_now) / (g_max)+w_end; + if (limit_time_puppet == 0) key_puppet = 1; - for (tmp_swarm = 0; tmp_swarm < swarm_num; ++tmp_swarm) { - double total_puppet_temp = 0.0; - swarm_fitness[tmp_swarm] = 0.0; + int i; + int tmp_swarm = 0; - for (i = 0; i < operator_num; ++i) { - stage_finds_puppet[tmp_swarm][i] = 0; - probability_now[tmp_swarm][i] = 0.0; - x_now[tmp_swarm][i] = ((double)(random() % 7000)*0.0001 + 0.1); - total_puppet_temp += x_now[tmp_swarm][i]; - v_now[tmp_swarm][i] = 0.1; - L_best[tmp_swarm][i] = 0.5; - G_best[i] = 0.5; - eff_best[tmp_swarm][i] = 0.0; + if (g_now > g_max) g_now = 0; + w_now = (w_init - w_end) * (g_max - g_now) / (g_max) + w_end; - } + for (tmp_swarm = 0; tmp_swarm < swarm_num; ++tmp_swarm) { - for (i = 0; i < operator_num; ++i) { - stage_cycles_puppet_v2[tmp_swarm][i] = stage_cycles_puppet[tmp_swarm][i]; - stage_finds_puppet_v2[tmp_swarm][i] = stage_finds_puppet[tmp_swarm][i]; - x_now[tmp_swarm][i] = x_now[tmp_swarm][i] / total_puppet_temp; - } + double total_puppet_temp = 0.0; + swarm_fitness[tmp_swarm] = 0.0; - double x_temp = 0.0; + for (i = 0; i < operator_num; ++i) { - for (i = 0; i < operator_num; ++i) { - probability_now[tmp_swarm][i] = 0.0; - v_now[tmp_swarm][i] = w_now * v_now[tmp_swarm][i] + RAND_C * (L_best[tmp_swarm][i] - x_now[tmp_swarm][i]) + RAND_C * (G_best[i] - x_now[tmp_swarm][i]); + stage_finds_puppet[tmp_swarm][i] = 0; + probability_now[tmp_swarm][i] = 0.0; + x_now[tmp_swarm][i] = ((double)(random() % 7000) * 0.0001 + 0.1); + total_puppet_temp += x_now[tmp_swarm][i]; + v_now[tmp_swarm][i] = 0.1; + L_best[tmp_swarm][i] = 0.5; + G_best[i] = 0.5; + eff_best[tmp_swarm][i] = 0.0; - x_now[tmp_swarm][i] += v_now[tmp_swarm][i]; + } + + for (i = 0; i < operator_num; ++i) { + + stage_cycles_puppet_v2[tmp_swarm][i] = + stage_cycles_puppet[tmp_swarm][i]; + stage_finds_puppet_v2[tmp_swarm][i] = + stage_finds_puppet[tmp_swarm][i]; + x_now[tmp_swarm][i] = x_now[tmp_swarm][i] / total_puppet_temp; + + } + + double x_temp = 0.0; + + for (i = 0; i < operator_num; ++i) { - if (x_now[tmp_swarm][i] > v_max) - x_now[tmp_swarm][i] = v_max; - else if (x_now[tmp_swarm][i] < v_min) - x_now[tmp_swarm][i] = v_min; + probability_now[tmp_swarm][i] = 0.0; + v_now[tmp_swarm][i] = + w_now * v_now[tmp_swarm][i] + + RAND_C * (L_best[tmp_swarm][i] - x_now[tmp_swarm][i]) + + RAND_C * (G_best[i] - x_now[tmp_swarm][i]); - x_temp += x_now[tmp_swarm][i]; - } + x_now[tmp_swarm][i] += v_now[tmp_swarm][i]; - for (i = 0; i < operator_num; ++i) { - x_now[tmp_swarm][i] = x_now[tmp_swarm][i] / x_temp; - if (likely(i != 0)) - probability_now[tmp_swarm][i] = probability_now[tmp_swarm][i - 1] + x_now[tmp_swarm][i]; - else - probability_now[tmp_swarm][i] = x_now[tmp_swarm][i]; - } - if (probability_now[tmp_swarm][operator_num - 1] < 0.99 || probability_now[tmp_swarm][operator_num - 1] > 1.01) - FATAL("ERROR probability"); - } + if (x_now[tmp_swarm][i] > v_max) + x_now[tmp_swarm][i] = v_max; + else if (x_now[tmp_swarm][i] < v_min) + x_now[tmp_swarm][i] = v_min; + + x_temp += x_now[tmp_swarm][i]; + + } + + for (i = 0; i < operator_num; ++i) { + + x_now[tmp_swarm][i] = x_now[tmp_swarm][i] / x_temp; + if (likely(i != 0)) + probability_now[tmp_swarm][i] = + probability_now[tmp_swarm][i - 1] + x_now[tmp_swarm][i]; + else + probability_now[tmp_swarm][i] = x_now[tmp_swarm][i]; + + } - for (i = 0; i < operator_num; ++i) { - core_operator_finds_puppet[i] = 0; - core_operator_finds_puppet_v2[i] = 0; - core_operator_cycles_puppet[i] = 0; - core_operator_cycles_puppet_v2[i] = 0; - core_operator_cycles_puppet_v3[i] = 0; - } + if (probability_now[tmp_swarm][operator_num - 1] < 0.99 || + probability_now[tmp_swarm][operator_num - 1] > 1.01) + FATAL("ERROR probability"); + + } + + for (i = 0; i < operator_num; ++i) { + + core_operator_finds_puppet[i] = 0; + core_operator_finds_puppet_v2[i] = 0; + core_operator_cycles_puppet[i] = 0; + core_operator_cycles_puppet_v2[i] = 0; + core_operator_cycles_puppet_v3[i] = 0; } - break; - default: + } break; - usage(argv[0]); + default: usage(argv[0]); } if (optind == argc || !in_dir || !out_dir) usage(argv[0]); - if (fixed_seed) - OKF("Running with fixed seed: %u", (u32)init_seed); + if (fixed_seed) OKF("Running with fixed seed: %u", (u32)init_seed); srandom((u32)init_seed); setup_signal_handlers(); check_asan_opts(); @@ -446,28 +506,39 @@ int main(int argc, char** argv) { FATAL("Input and output directories can't be the same"); if ((tmp_dir = getenv("AFL_TMPDIR")) != NULL) { + char tmpfile[strlen(tmp_dir + 16)]; sprintf(tmpfile, "%s/%s", tmp_dir, ".cur_input"); - if (access(tmpfile, F_OK) != -1) // there is still a race condition here, but well ... - FATAL("TMP_DIR already has an existing temporary input file: %s", tmpfile); + if (access(tmpfile, F_OK) != + -1) // there is still a race condition here, but well ... + FATAL("TMP_DIR already has an existing temporary input file: %s", + tmpfile); + } else + tmp_dir = out_dir; if (dumb_mode) { if (crash_mode) FATAL("-C and -n are mutually exclusive"); - if (qemu_mode) FATAL("-Q and -n are mutually exclusive"); + if (qemu_mode) FATAL("-Q and -n are mutually exclusive"); if (unicorn_mode) FATAL("-U and -n are mutually exclusive"); } - + if (getenv("AFL_NO_UI") && getenv("AFL_FORCE_UI")) FATAL("AFL_NO_UI and AFL_FORCE_UI are mutually exclusive"); - - if (strchr(argv[optind], '/') == NULL) WARNF(cLRD "Target binary called without a prefixed path, make sure you are fuzzing the right binary: " cRST "%s", argv[optind]); - OKF("afl++ is maintained by Marc \"van Hauser\" Heuse, Heiko \"hexcoder\" Eissfeldt and Andrea Fioraldi"); - OKF("afl++ is open source, get it at https://github.com/vanhauser-thc/AFLplusplus"); + if (strchr(argv[optind], '/') == NULL) + WARNF(cLRD + "Target binary called without a prefixed path, make sure you are " + "fuzzing the right binary: " cRST "%s", + argv[optind]); + + OKF("afl++ is maintained by Marc \"van Hauser\" Heuse, Heiko \"hexcoder\" " + "Eissfeldt and Andrea Fioraldi"); + OKF("afl++ is open source, get it at " + "https://github.com/vanhauser-thc/AFLplusplus"); OKF("Power schedules from github.com/mboehme/aflfast"); OKF("Python Mutator and llvm_mode whitelisting from github.com/choller/afl"); OKF("afl-tmin fork server patch from github.com/nccgroup/TriforceAFL"); @@ -475,32 +546,42 @@ int main(int argc, char** argv) { ACTF("Getting to work..."); switch (schedule) { - case FAST: OKF ("Using exponential power schedule (FAST)"); break; - case COE: OKF ("Using cut-off exponential power schedule (COE)"); break; - case EXPLOIT: OKF ("Using exploitation-based constant power schedule (EXPLOIT)"); break; - case LIN: OKF ("Using linear power schedule (LIN)"); break; - case QUAD: OKF ("Using quadratic power schedule (QUAD)"); break; - case EXPLORE: OKF ("Using exploration-based constant power schedule (EXPLORE)"); break; - default : FATAL ("Unknown power schedule"); break; + + case FAST: OKF("Using exponential power schedule (FAST)"); break; + case COE: OKF("Using cut-off exponential power schedule (COE)"); break; + case EXPLOIT: + OKF("Using exploitation-based constant power schedule (EXPLOIT)"); + break; + case LIN: OKF("Using linear power schedule (LIN)"); break; + case QUAD: OKF("Using quadratic power schedule (QUAD)"); break; + case EXPLORE: + OKF("Using exploration-based constant power schedule (EXPLORE)"); + break; + default: FATAL("Unknown power schedule"); break; + } - if (getenv("AFL_NO_FORKSRV")) no_forkserver = 1; - if (getenv("AFL_NO_CPU_RED")) no_cpu_meter_red = 1; - if (getenv("AFL_NO_ARITH")) no_arith = 1; - if (getenv("AFL_SHUFFLE_QUEUE")) shuffle_queue = 1; - if (getenv("AFL_FAST_CAL")) fast_cal = 1; + if (getenv("AFL_NO_FORKSRV")) no_forkserver = 1; + if (getenv("AFL_NO_CPU_RED")) no_cpu_meter_red = 1; + if (getenv("AFL_NO_ARITH")) no_arith = 1; + if (getenv("AFL_SHUFFLE_QUEUE")) shuffle_queue = 1; + if (getenv("AFL_FAST_CAL")) fast_cal = 1; if (getenv("AFL_HANG_TMOUT")) { + hang_tmout = atoi(getenv("AFL_HANG_TMOUT")); if (!hang_tmout) FATAL("Invalid value of AFL_HANG_TMOUT"); + } if (dumb_mode == 2 && no_forkserver) FATAL("AFL_DUMB_FORKSRV and AFL_NO_FORKSRV are mutually exclusive"); if (getenv("AFL_PRELOAD")) { + setenv("LD_PRELOAD", getenv("AFL_PRELOAD"), 1); setenv("DYLD_INSERT_LIBRARIES", getenv("AFL_PRELOAD"), 1); + } if (getenv("AFL_LD_PRELOAD")) @@ -511,31 +592,33 @@ int main(int argc, char** argv) { fix_up_banner(argv[optind]); check_if_tty(); - if (getenv("AFL_FORCE_UI")) - not_on_tty = 0; + if (getenv("AFL_FORCE_UI")) not_on_tty = 0; if (getenv("AFL_CAL_FAST")) { + /* Use less calibration cycles, for slow applications */ cal_cycles = 3; cal_cycles_long = 5; + } - if (getenv("AFL_DEBUG")) - debug = 1; + if (getenv("AFL_DEBUG")) debug = 1; if (getenv("AFL_PYTHON_ONLY")) { + /* This ensures we don't proceed to havoc/splice */ python_only = 1; /* Ensure we also skip all deterministic steps */ skip_deterministic = 1; + } get_core_count(); -#ifdef HAVE_AFFINITY +# ifdef HAVE_AFFINITY bind_to_free_cpu(); -#endif /* HAVE_AFFINITY */ +# endif /* HAVE_AFFINITY */ check_crash_handling(); check_cpu_governor(); @@ -552,13 +635,12 @@ int main(int argc, char** argv) { setup_dirs_fds(); -#ifdef USE_PYTHON - if (init_py()) - FATAL("Failed to initialize Python module"); -#else +# ifdef USE_PYTHON + if (init_py()) FATAL("Failed to initialize Python module"); +# else if (getenv("AFL_PYTHON_MODULE")) - FATAL("Your AFL binary was built without Python support"); -#endif + FATAL("Your AFL binary was built without Python support"); +# endif setup_cmdline_file(argv + optind); @@ -574,24 +656,33 @@ int main(int argc, char** argv) { /* If we don't have a file name chosen yet, use a safe default. */ if (!out_file) { + u32 i = optind + 1; while (argv[i]) { u8* aa_loc = strstr(argv[i], "@@"); if (aa_loc && !out_file) { + if (file_extension) { + out_file = alloc_printf("%s/.cur_input.%s", out_dir, file_extension); + } else { + out_file = alloc_printf("%s/.cur_input", out_dir); + } + detect_file_args(argv + optind + 1, out_file); - break; + break; + } ++i; } + } if (!out_file) setup_stdio_file(); @@ -621,9 +712,11 @@ int main(int argc, char** argv) { /* Woop woop woop */ if (!not_on_tty) { + sleep(4); start_time += 4000; if (stop_soon) goto stop_fuzzing; + } // real start time, we reset, so this works correctly with -V @@ -638,21 +731,25 @@ int main(int argc, char** argv) { if (!queue_cur) { ++queue_cycle; - current_entry = 0; + current_entry = 0; cur_skipped_paths = 0; - queue_cur = queue; + queue_cur = queue; while (seek_to) { + ++current_entry; --seek_to; queue_cur = queue_cur->next; + } show_stats(); if (not_on_tty) { + ACTF("Entering queue cycle %llu.", queue_cycle); fflush(stdout); + } /* If we had a full queue cycle with no new finds, try @@ -660,9 +757,14 @@ int main(int argc, char** argv) { if (queued_paths == prev_queued) { - if (use_splicing) ++cycles_wo_finds; else use_splicing = 1; + if (use_splicing) + ++cycles_wo_finds; + else + use_splicing = 1; - } else cycles_wo_finds = 0; + } else + + cycles_wo_finds = 0; prev_queued = queued_paths; @@ -674,9 +776,8 @@ int main(int argc, char** argv) { skipped_fuzz = fuzz_one(use_argv); if (!stop_soon && sync_id && !skipped_fuzz) { - - if (!(sync_interval_cnt++ % SYNC_INTERVAL)) - sync_fuzzers(use_argv); + + if (!(sync_interval_cnt++ % SYNC_INTERVAL)) sync_fuzzers(use_argv); } @@ -688,18 +789,28 @@ int main(int argc, char** argv) { ++current_entry; if (most_time_key == 1) { + u64 cur_ms_lv = get_cur_time(); - if (most_time * 1000 < cur_ms_lv - start_time) { + if (most_time * 1000 < cur_ms_lv - start_time) { + most_time_key = 2; break; + } + } + if (most_execs_key == 1) { + if (most_execs <= total_execs) { + most_execs_key = 2; break; + } + } + } if (queue_cur) show_stats(); @@ -708,19 +819,20 @@ int main(int argc, char** argv) { * ATTENTION - the following 10 lines were copied from a PR to Google's afl * repository - and slightly fixed. * These lines have nothing to do with the purpose of original PR though. - * Looks like when an exit condition was completed (AFL_BENCH_JUST_ONE, + * Looks like when an exit condition was completed (AFL_BENCH_JUST_ONE, * AFL_EXIT_WHEN_DONE or AFL_BENCH_UNTIL_CRASH) the child and forkserver * where not killed? */ - /* if we stopped programmatically, we kill the forkserver and the current runner. - if we stopped manually, this is done by the signal handler */ - if (stop_soon == 2){ + /* if we stopped programmatically, we kill the forkserver and the current + runner. if we stopped manually, this is done by the signal handler */ + if (stop_soon == 2) { + if (child_pid > 0) kill(child_pid, SIGKILL); if (forksrv_pid > 0) kill(forksrv_pid, SIGKILL); - /* Now that we've killed the forkserver, we wait for it to be able to get rusage stats. */ - if (waitpid(forksrv_pid, NULL, 0) <= 0) { - WARNF("error waitpid\n"); - } + /* Now that we've killed the forkserver, we wait for it to be able to get + * rusage stats. */ + if (waitpid(forksrv_pid, NULL, 0) <= 0) { WARNF("error waitpid\n"); } + } write_bitmap(); @@ -732,8 +844,7 @@ stop_fuzzing: SAYF(CURSOR_SHOW cLRD "\n\n+++ Testing aborted %s +++\n" cRST, stop_soon == 2 ? "programmatically" : "by user"); - if (most_time_key == 2) - SAYF(cYEL "[!] " cRST "Time limit was reached\n"); + if (most_time_key == 2) SAYF(cYEL "[!] " cRST "Time limit was reached\n"); if (most_execs_key == 2) SAYF(cYEL "[!] " cRST "Execution limit was reached\n"); @@ -742,8 +853,9 @@ stop_fuzzing: if (queue_cycle == 1 && get_cur_time() - start_time > 30 * 60 * 1000) { SAYF("\n" cYEL "[!] " cRST - "Stopped during the first cycle, results may be incomplete.\n" - " (For info on resuming, see %s/README)\n", doc_path); + "Stopped during the first cycle, results may be incomplete.\n" + " (For info on resuming, see %s/README)\n", + doc_path); } @@ -755,9 +867,9 @@ stop_fuzzing: alloc_report(); -#ifdef USE_PYTHON +# ifdef USE_PYTHON finalize_py(); -#endif +# endif OKF("We're done here. Have a nice day!\n"); @@ -766,3 +878,4 @@ stop_fuzzing: } #endif /* !AFL_LIB */ + diff --git a/src/afl-gcc.c b/src/afl-gcc.c index f6ededeb..750f9b72 100644 --- a/src/afl-gcc.c +++ b/src/afl-gcc.c @@ -43,19 +43,18 @@ #include <stdlib.h> #include <string.h> -static u8* as_path; /* Path to the AFL 'as' wrapper */ -static u8** cc_params; /* Parameters passed to the real CC */ -static u32 cc_par_cnt = 1; /* Param count, including argv0 */ -static u8 be_quiet, /* Quiet mode */ - clang_mode; /* Invoked as afl-clang*? */ - +static u8* as_path; /* Path to the AFL 'as' wrapper */ +static u8** cc_params; /* Parameters passed to the real CC */ +static u32 cc_par_cnt = 1; /* Param count, including argv0 */ +static u8 be_quiet, /* Quiet mode */ + clang_mode; /* Invoked as afl-clang*? */ /* Try to find our "fake" GNU assembler in AFL_PATH or at the location derived from argv[0]. If that fails, abort. */ static void find_as(u8* argv0) { - u8 *afl_path = getenv("AFL_PATH"); + u8* afl_path = getenv("AFL_PATH"); u8 *slash, *tmp; if (afl_path) { @@ -63,9 +62,11 @@ static void find_as(u8* argv0) { tmp = alloc_printf("%s/as", afl_path); if (!access(tmp, X_OK)) { + as_path = afl_path; ck_free(tmp); return; + } ck_free(tmp); @@ -76,7 +77,7 @@ static void find_as(u8* argv0) { if (slash) { - u8 *dir; + u8* dir; *slash = 0; dir = ck_strdup(argv0); @@ -85,9 +86,11 @@ static void find_as(u8* argv0) { tmp = alloc_printf("%s/afl-as", dir); if (!access(tmp, X_OK)) { + as_path = dir; ck_free(tmp); return; + } ck_free(tmp); @@ -96,21 +99,22 @@ static void find_as(u8* argv0) { } if (!access(AFL_PATH "/as", X_OK)) { + as_path = AFL_PATH; return; + } FATAL("Unable to find AFL wrapper binary for 'as'. Please set AFL_PATH"); - -} +} /* Copy argv to cc_params, making the necessary edits. */ static void edit_params(u32 argc, char** argv) { - u8 fortify_set = 0, asan_set = 0; - u8 *name; + u8 fortify_set = 0, asan_set = 0; + u8* name; #if defined(__FreeBSD__) && defined(__x86_64__) u8 m32_set = 0; @@ -119,7 +123,10 @@ static void edit_params(u32 argc, char** argv) { cc_params = ck_alloc((argc + 128) * sizeof(u8*)); name = strrchr(argv[0], '/'); - if (!name) name = argv[0]; else name++; + if (!name) + name = argv[0]; + else + name++; if (!strncmp(name, "afl-clang", 9)) { @@ -128,11 +135,15 @@ static void edit_params(u32 argc, char** argv) { setenv(CLANG_ENV_VAR, "1", 1); if (!strcmp(name, "afl-clang++")) { + u8* alt_cxx = getenv("AFL_CXX"); cc_params[0] = alt_cxx ? alt_cxx : (u8*)"clang++"; + } else { + u8* alt_cc = getenv("AFL_CC"); cc_params[0] = alt_cc ? alt_cc : (u8*)"clang"; + } } else { @@ -145,16 +156,22 @@ static void edit_params(u32 argc, char** argv) { #ifdef __APPLE__ - if (!strcmp(name, "afl-g++")) cc_params[0] = getenv("AFL_CXX"); - else if (!strcmp(name, "afl-gcj")) cc_params[0] = getenv("AFL_GCJ"); - else cc_params[0] = getenv("AFL_CC"); + if (!strcmp(name, "afl-g++")) + cc_params[0] = getenv("AFL_CXX"); + else if (!strcmp(name, "afl-gcj")) + cc_params[0] = getenv("AFL_GCJ"); + else + cc_params[0] = getenv("AFL_CC"); if (!cc_params[0]) { SAYF("\n" cLRD "[-] " cRST - "On Apple systems, 'gcc' is usually just a wrapper for clang. Please use the\n" - " 'afl-clang' utility instead of 'afl-gcc'. If you really have GCC installed,\n" - " set AFL_CC or AFL_CXX to specify the correct path to that compiler.\n"); + "On Apple systems, 'gcc' is usually just a wrapper for clang. " + "Please use the\n" + " 'afl-clang' utility instead of 'afl-gcc'. If you really have " + "GCC installed,\n" + " set AFL_CC or AFL_CXX to specify the correct path to that " + "compiler.\n"); FATAL("AFL_CC or AFL_CXX required on MacOS X"); @@ -163,14 +180,20 @@ static void edit_params(u32 argc, char** argv) { #else if (!strcmp(name, "afl-g++")) { + u8* alt_cxx = getenv("AFL_CXX"); cc_params[0] = alt_cxx ? alt_cxx : (u8*)"g++"; + } else if (!strcmp(name, "afl-gcj")) { + u8* alt_cc = getenv("AFL_GCJ"); cc_params[0] = alt_cc ? alt_cc : (u8*)"gcj"; + } else { + u8* alt_cc = getenv("AFL_CC"); cc_params[0] = alt_cc ? alt_cc : (u8*)"gcc"; + } #endif /* __APPLE__ */ @@ -178,13 +201,20 @@ static void edit_params(u32 argc, char** argv) { } while (--argc) { + u8* cur = *(++argv); if (!strncmp(cur, "-B", 2)) { if (!be_quiet) WARNF("-B is already set, overriding"); - if (!cur[2] && argc > 1) { argc--; argv++; } + if (!cur[2] && argc > 1) { + + argc--; + argv++; + + } + continue; } @@ -197,8 +227,8 @@ static void edit_params(u32 argc, char** argv) { if (!strcmp(cur, "-m32")) m32_set = 1; #endif - if (!strcmp(cur, "-fsanitize=address") || - !strcmp(cur, "-fsanitize=memory")) asan_set = 1; + if (!strcmp(cur, "-fsanitize=address") || !strcmp(cur, "-fsanitize=memory")) + asan_set = 1; if (strstr(cur, "FORTIFY_SOURCE")) fortify_set = 1; @@ -209,15 +239,13 @@ static void edit_params(u32 argc, char** argv) { cc_params[cc_par_cnt++] = "-B"; cc_params[cc_par_cnt++] = as_path; - if (clang_mode) - cc_params[cc_par_cnt++] = "-no-integrated-as"; + if (clang_mode) cc_params[cc_par_cnt++] = "-no-integrated-as"; if (getenv("AFL_HARDEN")) { cc_params[cc_par_cnt++] = "-fstack-protector-all"; - if (!fortify_set) - cc_params[cc_par_cnt++] = "-D_FORTIFY_SOURCE=2"; + if (!fortify_set) cc_params[cc_par_cnt++] = "-D_FORTIFY_SOURCE=2"; } @@ -229,8 +257,7 @@ static void edit_params(u32 argc, char** argv) { } else if (getenv("AFL_USE_ASAN")) { - if (getenv("AFL_USE_MSAN")) - FATAL("ASAN and MSAN are mutually exclusive"); + if (getenv("AFL_USE_MSAN")) FATAL("ASAN and MSAN are mutually exclusive"); if (getenv("AFL_HARDEN")) FATAL("ASAN and AFL_HARDEN are mutually exclusive"); @@ -240,8 +267,7 @@ static void edit_params(u32 argc, char** argv) { } else if (getenv("AFL_USE_MSAN")) { - if (getenv("AFL_USE_ASAN")) - FATAL("ASAN and MSAN are mutually exclusive"); + if (getenv("AFL_USE_ASAN")) FATAL("ASAN and MSAN are mutually exclusive"); if (getenv("AFL_HARDEN")) FATAL("MSAN and AFL_HARDEN are mutually exclusive"); @@ -249,11 +275,10 @@ static void edit_params(u32 argc, char** argv) { cc_params[cc_par_cnt++] = "-U_FORTIFY_SOURCE"; cc_params[cc_par_cnt++] = "-fsanitize=memory"; - } #ifdef USEMMAP - cc_params[cc_par_cnt++] = "-lrt"; + cc_params[cc_par_cnt++] = "-lrt"; #endif if (!getenv("AFL_DONT_OPTIMIZE")) { @@ -264,12 +289,11 @@ static void edit_params(u32 argc, char** argv) { works OK. This has nothing to do with us, but let's avoid triggering that bug. */ - if (!clang_mode || !m32_set) - cc_params[cc_par_cnt++] = "-g"; + if (!clang_mode || !m32_set) cc_params[cc_par_cnt++] = "-g"; #else - cc_params[cc_par_cnt++] = "-g"; + cc_params[cc_par_cnt++] = "-g"; #endif @@ -300,7 +324,6 @@ static void edit_params(u32 argc, char** argv) { } - /* Main entry point */ int main(int argc, char** argv) { @@ -308,23 +331,33 @@ int main(int argc, char** argv) { if (isatty(2) && !getenv("AFL_QUIET")) { SAYF(cCYA "afl-cc" VERSION cRST " by <lcamtuf@google.com>\n"); - SAYF(cYEL "[!] " cBRI "NOTE: " cRST "afl-gcc is deprecated, llvm_mode is much faster and has more options\n"); + SAYF(cYEL "[!] " cBRI "NOTE: " cRST + "afl-gcc is deprecated, llvm_mode is much faster and has more " + "options\n"); + + } else - } else be_quiet = 1; + be_quiet = 1; if (argc < 2) { - SAYF("\n" - "This is a helper application for afl-fuzz. It serves as a drop-in replacement\n" - "for gcc or clang, letting you recompile third-party code with the required\n" - "runtime instrumentation. A common use pattern would be one of the following:\n\n" + SAYF( + "\n" + "This is a helper application for afl-fuzz. It serves as a drop-in " + "replacement\n" + "for gcc or clang, letting you recompile third-party code with the " + "required\n" + "runtime instrumentation. A common use pattern would be one of the " + "following:\n\n" - " CC=%s/afl-gcc ./configure\n" - " CXX=%s/afl-g++ ./configure\n\n" + " CC=%s/afl-gcc ./configure\n" + " CXX=%s/afl-g++ ./configure\n\n" - "You can specify custom next-stage toolchain via AFL_CC, AFL_CXX, and AFL_AS.\n" - "Setting AFL_HARDEN enables hardening optimizations in the compiled code.\n\n", - BIN_PATH, BIN_PATH); + "You can specify custom next-stage toolchain via AFL_CC, AFL_CXX, and " + "AFL_AS.\n" + "Setting AFL_HARDEN enables hardening optimizations in the compiled " + "code.\n\n", + BIN_PATH, BIN_PATH); exit(1); @@ -341,3 +374,4 @@ int main(int argc, char** argv) { return 0; } + diff --git a/src/afl-gotcpu.c b/src/afl-gotcpu.c index fa629eb7..5aa9b35c 100644 --- a/src/afl-gotcpu.c +++ b/src/afl-gotcpu.c @@ -31,7 +31,7 @@ #endif #ifdef __ANDROID__ - #include "android-ashmem.h" +# include "android-ashmem.h" #endif #include <stdio.h> #include <stdlib.h> @@ -51,12 +51,11 @@ # define HAVE_AFFINITY 1 #endif /* __linux__ */ - /* Get unix time in microseconds. */ static u64 get_cur_time_us(void) { - struct timeval tv; + struct timeval tv; struct timezone tz; gettimeofday(&tv, &tz); @@ -65,7 +64,6 @@ static u64 get_cur_time_us(void) { } - /* Get CPU usage in microseconds. */ static u64 get_cpu_usage_us(void) { @@ -79,7 +77,6 @@ static u64 get_cpu_usage_us(void) { } - /* Measure preemption rate. */ static u32 measure_preemption(u32 target_ms) { @@ -96,14 +93,17 @@ repeat_loop: v1 = CTEST_BUSY_CYCLES; - while (v1--) v2++; + while (v1--) + v2++; sched_yield(); en_t = get_cur_time_us(); if (en_t - st_t < target_ms * 1000) { + loop_repeats++; goto repeat_loop; + } /* Let's see what percentage of this time we actually had a chance to @@ -111,22 +111,20 @@ repeat_loop: en_c = get_cpu_usage_us(); - real_delta = (en_t - st_t) / 1000; + real_delta = (en_t - st_t) / 1000; slice_delta = (en_c - st_c) / 1000; return real_delta * 100 / slice_delta; } - /* Do the benchmark thing. */ int main(int argc, char** argv) { #ifdef HAVE_AFFINITY - u32 cpu_cnt = sysconf(_SC_NPROCESSORS_ONLN), - idle_cpus = 0, maybe_cpus = 0, i; + u32 cpu_cnt = sysconf(_SC_NPROCESSORS_ONLN), idle_cpus = 0, maybe_cpus = 0, i; SAYF(cCYA "afl-gotcpu" VERSION cRST " by <lcamtuf@google.com>\n"); @@ -142,7 +140,7 @@ int main(int argc, char** argv) { if (!fr) { cpu_set_t c; - u32 util_perc; + u32 util_perc; CPU_ZERO(&c); CPU_SET(i, &c); @@ -159,7 +157,7 @@ int main(int argc, char** argv) { } else if (util_perc < 250) { - SAYF(" Core #%u: " cYEL "CAUTION " cRST "(%u%%)\n", i, util_perc); + SAYF(" Core #%u: " cYEL "CAUTION " cRST "(%u%%)\n", i, util_perc); exit(1); } @@ -255,3 +253,4 @@ int main(int argc, char** argv) { #endif /* ^HAVE_AFFINITY */ } + diff --git a/src/afl-sharedmem.c b/src/afl-sharedmem.c index ce3b76e6..9c7ac7c3 100644 --- a/src/afl-sharedmem.c +++ b/src/afl-sharedmem.c @@ -5,7 +5,7 @@ #define AFL_MAIN #ifdef __ANDROID__ - #include "android-ashmem.h" +# include "android-ashmem.h" #endif #include "config.h" #include "types.h" @@ -32,68 +32,79 @@ #include <sys/mman.h> #ifndef USEMMAP - #include <sys/ipc.h> - #include <sys/shm.h> +# include <sys/ipc.h> +# include <sys/shm.h> #endif -extern unsigned char*trace_bits; +extern unsigned char *trace_bits; #ifdef USEMMAP /* ================ Proteas ================ */ -int g_shm_fd = -1; +int g_shm_fd = -1; unsigned char *g_shm_base = NULL; -char g_shm_file_path[L_tmpnam]; +char g_shm_file_path[L_tmpnam]; /* ========================================= */ #else -static s32 shm_id; /* ID of the SHM region */ +static s32 shm_id; /* ID of the SHM region */ #endif /* Get rid of shared memory (atexit handler). */ void remove_shm(void) { + #ifdef USEMMAP if (g_shm_base != NULL) { + munmap(g_shm_base, MAP_SIZE); g_shm_base = NULL; + } if (g_shm_fd != -1) { + close(g_shm_fd); g_shm_fd = -1; + } + #else shmctl(shm_id, IPC_RMID, NULL); #endif -} +} /* Configure shared memory. */ void setup_shm(unsigned char dumb_mode) { + #ifdef USEMMAP /* generate random file name for multi instance */ - /* thanks to f*cking glibc we can not use tmpnam securely, it generates a security warning that cannot be suppressed */ + /* thanks to f*cking glibc we can not use tmpnam securely, it generates a + * security warning that cannot be suppressed */ /* so we do this worse workaround */ snprintf(g_shm_file_path, L_tmpnam, "/afl_%d_%ld", getpid(), random()); /* create the shared memory segment as if it was a file */ g_shm_fd = shm_open(g_shm_file_path, O_CREAT | O_RDWR | O_EXCL, 0600); - if (g_shm_fd == -1) { - PFATAL("shm_open() failed"); - } + if (g_shm_fd == -1) { PFATAL("shm_open() failed"); } /* configure the size of the shared memory segment */ if (ftruncate(g_shm_fd, MAP_SIZE)) { + PFATAL("setup_shm(): ftruncate() failed"); + } /* map the shared memory segment to the address space of the process */ - g_shm_base = mmap(0, MAP_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, g_shm_fd, 0); + g_shm_base = + mmap(0, MAP_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, g_shm_fd, 0); if (g_shm_base == MAP_FAILED) { + close(g_shm_fd); g_shm_fd = -1; PFATAL("mmap() failed"); + } atexit(remove_shm); @@ -108,7 +119,7 @@ void setup_shm(unsigned char dumb_mode) { trace_bits = g_shm_base; if (!trace_bits) PFATAL("mmap() failed"); - + #else u8* shm_str; @@ -132,9 +143,10 @@ void setup_shm(unsigned char dumb_mode) { ck_free(shm_str); trace_bits = shmat(shm_id, NULL, 0); - + if (!trace_bits) PFATAL("shmat() failed"); #endif + } diff --git a/src/afl-showmap.c b/src/afl-showmap.c index ee00bf22..ac3d687d 100644 --- a/src/afl-showmap.c +++ b/src/afl-showmap.c @@ -24,7 +24,7 @@ #define AFL_MAIN #ifdef __ANDROID__ - #include "android-ashmem.h" +# include "android-ashmem.h" #endif #include "config.h" #include "types.h" @@ -51,61 +51,54 @@ #include <sys/types.h> #include <sys/resource.h> -static s32 child_pid; /* PID of the tested program */ +static s32 child_pid; /* PID of the tested program */ - u8* trace_bits; /* SHM with instrumentation bitmap */ +u8* trace_bits; /* SHM with instrumentation bitmap */ -static u8 *out_file, /* Trace output file */ - *doc_path, /* Path to docs */ - *target_path, /* Path to target binary */ - *at_file; /* Substitution string for @@ */ +static u8 *out_file, /* Trace output file */ + *doc_path, /* Path to docs */ + *target_path, /* Path to target binary */ + *at_file; /* Substitution string for @@ */ -static u32 exec_tmout; /* Exec timeout (ms) */ +static u32 exec_tmout; /* Exec timeout (ms) */ -static u32 total, highest; /* tuple content information */ +static u32 total, highest; /* tuple content information */ -static u64 mem_limit = MEM_LIMIT; /* Memory limit (MB) */ +static u64 mem_limit = MEM_LIMIT; /* Memory limit (MB) */ -static u8 quiet_mode, /* Hide non-essential messages? */ - edges_only, /* Ignore hit counts? */ - raw_instr_output, /* Do not apply AFL filters */ - cmin_mode, /* Generate output in afl-cmin mode? */ - binary_mode, /* Write output as a binary map */ - keep_cores; /* Allow coredumps? */ +static u8 quiet_mode, /* Hide non-essential messages? */ + edges_only, /* Ignore hit counts? */ + raw_instr_output, /* Do not apply AFL filters */ + cmin_mode, /* Generate output in afl-cmin mode? */ + binary_mode, /* Write output as a binary map */ + keep_cores; /* Allow coredumps? */ -static volatile u8 - stop_soon, /* Ctrl-C pressed? */ - child_timed_out, /* Child timed out? */ - child_crashed; /* Child crashed? */ +static volatile u8 stop_soon, /* Ctrl-C pressed? */ + child_timed_out, /* Child timed out? */ + child_crashed; /* Child crashed? */ /* Classify tuple counts. Instead of mapping to individual bits, as in afl-fuzz.c, we map to more user-friendly numbers between 1 and 8. */ static const u8 count_class_human[256] = { - [0] = 0, - [1] = 1, - [2] = 2, - [3] = 3, - [4 ... 7] = 4, - [8 ... 15] = 5, - [16 ... 31] = 6, - [32 ... 127] = 7, - [128 ... 255] = 8 + [0] = 0, [1] = 1, [2] = 2, [3] = 3, + [4 ... 7] = 4, [8 ... 15] = 5, [16 ... 31] = 6, [32 ... 127] = 7, + [128 ... 255] = 8 }; static const u8 count_class_binary[256] = { - [0] = 0, - [1] = 1, - [2] = 2, - [3] = 4, - [4 ... 7] = 8, - [8 ... 15] = 16, - [16 ... 31] = 32, - [32 ... 127] = 64, - [128 ... 255] = 128 + [0] = 0, + [1] = 1, + [2] = 2, + [3] = 4, + [4 ... 7] = 8, + [8 ... 15] = 16, + [16 ... 31] = 32, + [32 ... 127] = 64, + [128 ... 255] = 128 }; @@ -116,22 +109,25 @@ static void classify_counts(u8* mem, const u8* map) { if (edges_only) { while (i--) { + if (*mem) *mem = 1; mem++; + } } else if (!raw_instr_output) { while (i--) { + *mem = map[*mem]; mem++; + } } } - /* Write results. */ static u32 write_results(void) { @@ -139,8 +135,8 @@ static u32 write_results(void) { s32 fd; u32 i, ret = 0; - u8 cco = !!getenv("AFL_CMIN_CRASHES_ONLY"), - caa = !!getenv("AFL_CMIN_ALLOW_ANY"); + u8 cco = !!getenv("AFL_CMIN_CRASHES_ONLY"), + caa = !!getenv("AFL_CMIN_ALLOW_ANY"); if (!strncmp(out_file, "/dev/", 5)) { @@ -154,7 +150,7 @@ static u32 write_results(void) { } else { - unlink(out_file); /* Ignore errors */ + unlink(out_file); /* Ignore errors */ fd = open(out_file, O_WRONLY | O_CREAT | O_EXCL, 0600); if (fd < 0) PFATAL("Unable to create '%s'", out_file); @@ -164,7 +160,7 @@ static u32 write_results(void) { for (i = 0; i < MAP_SIZE; i++) if (trace_bits[i]) ret++; - + ck_write(fd, trace_bits, MAP_SIZE, out_file); close(fd); @@ -178,10 +174,9 @@ static u32 write_results(void) { if (!trace_bits[i]) continue; ret++; - + total += trace_bits[i]; - if (highest < trace_bits[i]) - highest = trace_bits[i]; + if (highest < trace_bits[i]) highest = trace_bits[i]; if (cmin_mode) { @@ -190,10 +185,12 @@ static u32 write_results(void) { fprintf(f, "%u%u\n", trace_bits[i], i); - } else fprintf(f, "%06u:%u\n", i, trace_bits[i]); + } else + + fprintf(f, "%06u:%u\n", i, trace_bits[i]); } - + fclose(f); } @@ -202,7 +199,6 @@ static u32 write_results(void) { } - /* Handle timeout signal. */ static void handle_timeout(int sig) { @@ -212,16 +208,14 @@ static void handle_timeout(int sig) { } - /* Execute target application. */ static void run_target(char** argv) { static struct itimerval it; - int status = 0; + int status = 0; - if (!quiet_mode) - SAYF("-- Program output begins --\n" cRST); + if (!quiet_mode) SAYF("-- Program output begins --\n" cRST); MEM_BARRIER(); @@ -238,8 +232,10 @@ static void run_target(char** argv) { s32 fd = open("/dev/null", O_RDWR); if (fd < 0 || dup2(fd, 1) < 0 || dup2(fd, 2) < 0) { + *(u32*)trace_bits = EXEC_FAIL_SIG; PFATAL("Descriptor initialization failed"); + } close(fd); @@ -252,20 +248,22 @@ static void run_target(char** argv) { #ifdef RLIMIT_AS - setrlimit(RLIMIT_AS, &r); /* Ignore errors */ + setrlimit(RLIMIT_AS, &r); /* Ignore errors */ #else - setrlimit(RLIMIT_DATA, &r); /* Ignore errors */ + setrlimit(RLIMIT_DATA, &r); /* Ignore errors */ #endif /* ^RLIMIT_AS */ } - if (!keep_cores) r.rlim_max = r.rlim_cur = 0; - else r.rlim_max = r.rlim_cur = RLIM_INFINITY; + if (!keep_cores) + r.rlim_max = r.rlim_cur = 0; + else + r.rlim_max = r.rlim_cur = RLIM_INFINITY; - setrlimit(RLIMIT_CORE, &r); /* Ignore errors */ + setrlimit(RLIMIT_CORE, &r); /* Ignore errors */ if (!getenv("LD_BIND_LAZY")) setenv("LD_BIND_NOW", "1", 0); @@ -304,14 +302,12 @@ static void run_target(char** argv) { if (*(u32*)trace_bits == EXEC_FAIL_SIG) FATAL("Unable to execute '%s'", argv[0]); - classify_counts(trace_bits, binary_mode ? - count_class_binary : count_class_human); + classify_counts(trace_bits, + binary_mode ? count_class_binary : count_class_human); - if (!quiet_mode) - SAYF(cRST "-- Program output ends --\n"); + if (!quiet_mode) SAYF(cRST "-- Program output ends --\n"); - if (!child_timed_out && !stop_soon && WIFSIGNALED(status)) - child_crashed = 1; + if (!child_timed_out && !stop_soon && WIFSIGNALED(status)) child_crashed = 1; if (!quiet_mode) { @@ -320,14 +316,13 @@ static void run_target(char** argv) { else if (stop_soon) SAYF(cLRD "\n+++ Program aborted by user +++\n" cRST); else if (child_crashed) - SAYF(cLRD "\n+++ Program killed by signal %u +++\n" cRST, WTERMSIG(status)); + SAYF(cLRD "\n+++ Program killed by signal %u +++\n" cRST, + WTERMSIG(status)); } - } - /* Handle Ctrl-C and the like. */ static void handle_stop_sig(int sig) { @@ -338,15 +333,16 @@ static void handle_stop_sig(int sig) { } - /* Do basic preparations - persistent fds, filenames, etc. */ static void set_up_environment(void) { - setenv("ASAN_OPTIONS", "abort_on_error=1:" - "detect_leaks=0:" - "symbolize=0:" - "allocator_may_return_null=1", 0); + setenv("ASAN_OPTIONS", + "abort_on_error=1:" + "detect_leaks=0:" + "symbolize=0:" + "allocator_may_return_null=1", + 0); setenv("MSAN_OPTIONS", "exit_code=" STRINGIFY(MSAN_ERROR) ":" "symbolize=0:" @@ -355,21 +351,22 @@ static void set_up_environment(void) { "msan_track_origins=0", 0); if (getenv("AFL_PRELOAD")) { + setenv("LD_PRELOAD", getenv("AFL_PRELOAD"), 1); setenv("DYLD_INSERT_LIBRARIES", getenv("AFL_PRELOAD"), 1); + } } - /* Setup signal handlers, duh. */ static void setup_signal_handlers(void) { struct sigaction sa; - sa.sa_handler = NULL; - sa.sa_flags = SA_RESTART; + sa.sa_handler = NULL; + sa.sa_flags = SA_RESTART; sa.sa_sigaction = NULL; sigemptyset(&sa.sa_mask); @@ -388,7 +385,6 @@ static void setup_signal_handlers(void) { } - /* Show banner. */ static void show_banner(void) { @@ -403,42 +399,43 @@ static void usage(u8* argv0) { show_banner(); - SAYF("\n%s [ options ] -- /path/to/target_app [ ... ]\n\n" + SAYF( + "\n%s [ options ] -- /path/to/target_app [ ... ]\n\n" - "Required parameters:\n\n" + "Required parameters:\n\n" - " -o file - file to write the trace data to\n\n" + " -o file - file to write the trace data to\n\n" - "Execution control settings:\n\n" + "Execution control settings:\n\n" - " -t msec - timeout for each run (none)\n" - " -m megs - memory limit for child process (%d MB)\n" - " -Q - use binary-only instrumentation (QEMU mode)\n" - " -U - use Unicorn-based instrumentation (Unicorn mode)\n" - " (Not necessary, here for consistency with other afl-* tools)\n\n" + " -t msec - timeout for each run (none)\n" + " -m megs - memory limit for child process (%d MB)\n" + " -Q - use binary-only instrumentation (QEMU mode)\n" + " -U - use Unicorn-based instrumentation (Unicorn mode)\n" + " (Not necessary, here for consistency with other afl-* " + "tools)\n\n" - "Other settings:\n\n" + "Other settings:\n\n" - " -q - sink program's output and don't show messages\n" - " -e - show edge coverage only, ignore hit counts\n" - " -r - show real tuple values instead of AFL filter values\n" - " -c - allow core dumps\n\n" + " -q - sink program's output and don't show messages\n" + " -e - show edge coverage only, ignore hit counts\n" + " -r - show real tuple values instead of AFL filter values\n" + " -c - allow core dumps\n\n" - "This tool displays raw tuple data captured by AFL instrumentation.\n" - "For additional help, consult %s/README.\n\n" cRST, + "This tool displays raw tuple data captured by AFL instrumentation.\n" + "For additional help, consult %s/README.\n\n" cRST, - argv0, MEM_LIMIT, doc_path); + argv0, MEM_LIMIT, doc_path); exit(1); } - /* Find binary. */ static void find_binary(u8* fname) { - u8* env_path = 0; + u8* env_path = 0; struct stat st; if (strchr(fname, '/') || !(env_path = getenv("PATH"))) { @@ -461,7 +458,9 @@ static void find_binary(u8* fname) { memcpy(cur_elem, env_path, delim - env_path); delim++; - } else cur_elem = ck_strdup(env_path); + } else + + cur_elem = ck_strdup(env_path); env_path = delim; @@ -473,7 +472,8 @@ static void find_binary(u8* fname) { ck_free(cur_elem); if (!stat(target_path, &st) && S_ISREG(st.st_mode) && - (st.st_mode & 0111) && st.st_size >= 4) break; + (st.st_mode & 0111) && st.st_size >= 4) + break; ck_free(target_path); target_path = 0; @@ -486,13 +486,12 @@ static void find_binary(u8* fname) { } - /* Fix up argv for QEMU. */ static char** get_qemu_argv(u8* own_loc, char** argv, int argc) { char** new_argv = ck_alloc(sizeof(char*) * (argc + 4)); - u8 *tmp, *cp, *rsl, *own_copy; + u8 * tmp, *cp, *rsl, *own_copy; memcpy(new_argv + 3, argv + 1, sizeof(char*) * argc); @@ -507,8 +506,7 @@ static char** get_qemu_argv(u8* own_loc, char** argv, int argc) { cp = alloc_printf("%s/afl-qemu-trace", tmp); - if (access(cp, X_OK)) - FATAL("Unable to find '%s'", tmp); + if (access(cp, X_OK)) FATAL("Unable to find '%s'", tmp); target_path = new_argv[0] = cp; return new_argv; @@ -532,7 +530,9 @@ static char** get_qemu_argv(u8* own_loc, char** argv, int argc) { } - } else ck_free(own_copy); + } else + + ck_free(own_copy); if (!access(BIN_PATH "/afl-qemu-trace", X_OK)) { @@ -556,7 +556,7 @@ int main(int argc, char** argv) { doc_path = access(DOC_PATH, F_OK) ? "docs" : DOC_PATH; - while ((opt = getopt(argc,argv,"+o:m:t:A:eqZQUbcr")) > 0) + while ((opt = getopt(argc, argv, "+o:m:t:A:eqZQUbcr")) > 0) switch (opt) { @@ -568,40 +568,41 @@ int main(int argc, char** argv) { case 'm': { - u8 suffix = 'M'; + u8 suffix = 'M'; - if (mem_limit_given) FATAL("Multiple -m options not supported"); - mem_limit_given = 1; + if (mem_limit_given) FATAL("Multiple -m options not supported"); + mem_limit_given = 1; - if (!strcmp(optarg, "none")) { + if (!strcmp(optarg, "none")) { - mem_limit = 0; - break; + mem_limit = 0; + break; - } + } - if (sscanf(optarg, "%llu%c", &mem_limit, &suffix) < 1 || - optarg[0] == '-') FATAL("Bad syntax used for -m"); + if (sscanf(optarg, "%llu%c", &mem_limit, &suffix) < 1 || + optarg[0] == '-') + FATAL("Bad syntax used for -m"); - switch (suffix) { + switch (suffix) { - case 'T': mem_limit *= 1024 * 1024; break; - case 'G': mem_limit *= 1024; break; - case 'k': mem_limit /= 1024; break; - case 'M': break; + case 'T': mem_limit *= 1024 * 1024; break; + case 'G': mem_limit *= 1024; break; + case 'k': mem_limit /= 1024; break; + case 'M': break; - default: FATAL("Unsupported suffix or bad syntax for -m"); + default: FATAL("Unsupported suffix or bad syntax for -m"); - } + } - if (mem_limit < 5) FATAL("Dangerously low value of -m"); + if (mem_limit < 5) FATAL("Dangerously low value of -m"); - if (sizeof(rlim_t) == 4 && mem_limit > 2000) - FATAL("Value of -m out of range on 32-bit systems"); + if (sizeof(rlim_t) == 4 && mem_limit > 2000) + FATAL("Value of -m out of range on 32-bit systems"); - } + } - break; + break; case 't': @@ -609,6 +610,7 @@ int main(int argc, char** argv) { timeout_given = 1; if (strcmp(optarg, "none")) { + exec_tmout = atoi(optarg); if (exec_tmout < 20 || optarg[0] == '-') @@ -636,7 +638,7 @@ int main(int argc, char** argv) { /* This is an undocumented option to write data in the syntax expected by afl-cmin. Nobody else should have any use for this. */ - cmin_mode = 1; + cmin_mode = 1; quiet_mode = 1; break; @@ -675,7 +677,7 @@ int main(int argc, char** argv) { if (keep_cores) FATAL("Multiple -c options not supported"); keep_cores = 1; break; - + case 'r': if (raw_instr_output) FATAL("Multiple -r options not supported"); @@ -683,9 +685,7 @@ int main(int argc, char** argv) { raw_instr_output = 1; break; - default: - - usage(argv[0]); + default: usage(argv[0]); } @@ -699,8 +699,10 @@ int main(int argc, char** argv) { find_binary(argv[optind]); if (!quiet_mode) { + show_banner(); ACTF("Executing '%s'...\n", target_path); + } detect_file_args(argv + optind, at_file); @@ -717,7 +719,8 @@ int main(int argc, char** argv) { if (!quiet_mode) { if (!tcnt) FATAL("No instrumentation detected" cRST); - OKF("Captured %u tuples (highest value %u, total values %u) in '%s'." cRST, tcnt, highest, total, out_file); + OKF("Captured %u tuples (highest value %u, total values %u) in '%s'." cRST, + tcnt, highest, total, out_file); } diff --git a/src/afl-tmin.c b/src/afl-tmin.c index 529720ca..9decdb4d 100644 --- a/src/afl-tmin.c +++ b/src/afl-tmin.c @@ -22,7 +22,7 @@ #define AFL_MAIN #ifdef __ANDROID__ - #include "android-ashmem.h" +# include "android-ashmem.h" #endif #include "config.h" @@ -51,72 +51,71 @@ #include <sys/types.h> #include <sys/resource.h> -s32 forksrv_pid, /* PID of the fork server */ - child_pid; /* PID of the tested program */ +s32 forksrv_pid, /* PID of the fork server */ + child_pid; /* PID of the tested program */ -s32 fsrv_ctl_fd, /* Fork server control pipe (write) */ - fsrv_st_fd; /* Fork server status pipe (read) */ +s32 fsrv_ctl_fd, /* Fork server control pipe (write) */ + fsrv_st_fd; /* Fork server status pipe (read) */ - u8 *trace_bits; /* SHM with instrumentation bitmap */ -static u8 *mask_bitmap; /* Mask for trace bits (-B) */ +u8* trace_bits; /* SHM with instrumentation bitmap */ +static u8* mask_bitmap; /* Mask for trace bits (-B) */ - u8 *in_file, /* Minimizer input test case */ - *output_file, /* Minimizer output file */ - *out_file, /* Targeted program input file */ - *target_path, /* Path to target binary */ - *doc_path; /* Path to docs */ +u8 *in_file, /* Minimizer input test case */ + *output_file, /* Minimizer output file */ + *out_file, /* Targeted program input file */ + *target_path, /* Path to target binary */ + *doc_path; /* Path to docs */ - s32 out_fd; /* Persistent fd for out_file */ +s32 out_fd; /* Persistent fd for out_file */ -static u8* in_data; /* Input data for trimming */ +static u8* in_data; /* Input data for trimming */ -static u32 in_len, /* Input data length */ - orig_cksum, /* Original checksum */ - total_execs, /* Total number of execs */ - missed_hangs, /* Misses due to hangs */ - missed_crashes, /* Misses due to crashes */ - missed_paths; /* Misses due to exec path diffs */ - u32 exec_tmout = EXEC_TIMEOUT; /* Exec timeout (ms) */ +static u32 in_len, /* Input data length */ + orig_cksum, /* Original checksum */ + total_execs, /* Total number of execs */ + missed_hangs, /* Misses due to hangs */ + missed_crashes, /* Misses due to crashes */ + missed_paths; /* Misses due to exec path diffs */ +u32 exec_tmout = EXEC_TIMEOUT; /* Exec timeout (ms) */ - u64 mem_limit = MEM_LIMIT; /* Memory limit (MB) */ +u64 mem_limit = MEM_LIMIT; /* Memory limit (MB) */ - s32 dev_null_fd = -1; /* FD to /dev/null */ +s32 dev_null_fd = -1; /* FD to /dev/null */ -static u8 crash_mode, /* Crash-centric mode? */ - exit_crash, /* Treat non-zero exit as crash? */ - edges_only, /* Ignore hit counts? */ - exact_mode, /* Require path match for crashes? */ - use_stdin = 1; /* Use stdin for program input? */ +static u8 crash_mode, /* Crash-centric mode? */ + exit_crash, /* Treat non-zero exit as crash? */ + edges_only, /* Ignore hit counts? */ + exact_mode, /* Require path match for crashes? */ + use_stdin = 1; /* Use stdin for program input? */ -static volatile u8 - stop_soon; /* Ctrl-C pressed? */ +static volatile u8 stop_soon; /* Ctrl-C pressed? */ /* * forkserver section */ /* we only need this to use afl-forkserver */ -FILE *plot_file; -u8 uses_asan; -s32 out_fd = -1, out_dir_fd = -1, dev_urandom_fd = -1; +FILE* plot_file; +u8 uses_asan; +s32 out_fd = -1, out_dir_fd = -1, dev_urandom_fd = -1; /* we import this as we need this information */ extern u8 child_timed_out; - -/* Classify tuple counts. This is a slow & naive version, but good enough here. */ +/* Classify tuple counts. This is a slow & naive version, but good enough here. + */ static const u8 count_class_lookup[256] = { - [0] = 0, - [1] = 1, - [2] = 2, - [3] = 4, - [4 ... 7] = 8, - [8 ... 15] = 16, - [16 ... 31] = 32, - [32 ... 127] = 64, - [128 ... 255] = 128 + [0] = 0, + [1] = 1, + [2] = 2, + [3] = 4, + [4 ... 7] = 8, + [8 ... 15] = 16, + [16 ... 31] = 32, + [32 ... 127] = 64, + [128 ... 255] = 128 }; @@ -127,22 +126,25 @@ static void classify_counts(u8* mem) { if (edges_only) { while (i--) { + if (*mem) *mem = 1; mem++; + } } else { while (i--) { + *mem = count_class_lookup[*mem]; mem++; + } } } - /* Apply mask to classified bitmap (if set). */ static void apply_mask(u32* mem, u32* mask) { @@ -161,25 +163,26 @@ static void apply_mask(u32* mem, u32* mask) { } - /* See if any bytes are set in the bitmap. */ static inline u8 anything_set(void) { u32* ptr = (u32*)trace_bits; - u32 i = (MAP_SIZE >> 2); + u32 i = (MAP_SIZE >> 2); - while (i--) if (*(ptr++)) return 1; + while (i--) + if (*(ptr++)) return 1; return 0; } - /* Get rid of temp files (atexit handler). */ static void at_exit_handler(void) { - if (out_file) unlink(out_file); /* Ignore errors */ + + if (out_file) unlink(out_file); /* Ignore errors */ + } /* Read initial file. */ @@ -187,17 +190,16 @@ static void at_exit_handler(void) { static void read_initial_file(void) { struct stat st; - s32 fd = open(in_file, O_RDONLY); + s32 fd = open(in_file, O_RDONLY); if (fd < 0) PFATAL("Unable to open '%s'", in_file); - if (fstat(fd, &st) || !st.st_size) - FATAL("Zero-sized input file."); + if (fstat(fd, &st) || !st.st_size) FATAL("Zero-sized input file."); if (st.st_size >= TMIN_MAX_FILE) FATAL("Input file is too large (%u MB max)", TMIN_MAX_FILE / 1024 / 1024); - in_len = st.st_size; + in_len = st.st_size; in_data = ck_alloc_nozero(in_len); ck_read(fd, in_data, in_len, in_file); @@ -208,14 +210,13 @@ static void read_initial_file(void) { } - /* Write output file. */ static s32 write_to_file(u8* path, u8* mem, u32 len) { s32 ret; - unlink(path); /* Ignore errors */ + unlink(path); /* Ignore errors */ ret = open(path, O_RDWR | O_CREAT | O_EXCL, 0600); @@ -239,13 +240,15 @@ static void write_to_testcase(void* mem, u32 len) { if (!use_stdin) { - unlink(out_file); /* Ignore errors. */ + unlink(out_file); /* Ignore errors. */ fd = open(out_file, O_WRONLY | O_CREAT | O_EXCL, 0600); if (fd < 0) PFATAL("Unable to create '%s'", out_file); - } else lseek(fd, 0, SEEK_SET); + } else + + lseek(fd, 0, SEEK_SET); ck_write(fd, mem, len, out_file); @@ -254,11 +257,11 @@ static void write_to_testcase(void* mem, u32 len) { if (ftruncate(fd, len)) PFATAL("ftruncate() failed"); lseek(fd, 0, SEEK_SET); - } else close(fd); - -} + } else + close(fd); +} /* Handle timeout signal. */ /* @@ -277,11 +280,13 @@ static void handle_timeout(int sig) { } } + */ /* start the app and it's forkserver */ /* static void init_forkserver(char **argv) { + static struct itimerval it; int st_pipe[2], ctl_pipe[2]; int status = 0; @@ -348,7 +353,7 @@ static void init_forkserver(char **argv) { } - // Close the unneeded endpoints. + // Close the unneeded endpoints. close(ctl_pipe[0]); close(st_pipe[1]); @@ -378,8 +383,10 @@ static void init_forkserver(char **argv) { // Otherwise, try to figure out what went wrong. if (rlen == 4) { + ACTF("All right - fork server is up."); return; + } if (waitpid(forksrv_pid, &status, 0) <= 0) @@ -398,6 +405,7 @@ static void init_forkserver(char **argv) { SAYF(cLRD "\n+++ Program killed by signal %u +++\n" cRST, WTERMSIG(status)); } + */ /* Execute target application. Returns 0 if the changes are a dud, or @@ -406,8 +414,8 @@ static void init_forkserver(char **argv) { static u8 run_target(char** argv, u8* mem, u32 len, u8 first_run) { static struct itimerval it; - static u32 prev_timed_out = 0; - int status = 0; + static u32 prev_timed_out = 0; + int status = 0; u32 cksum; @@ -440,8 +448,10 @@ static u8 run_target(char** argv, u8* mem, u32 len, u8 first_run) { /* Configure timeout, wait for child, cancel timeout. */ if (exec_tmout) { + it.it_value.tv_sec = (exec_tmout / 1000); it.it_value.tv_usec = (exec_tmout % 1000) * 1000; + } setitimer(ITIMER_REAL, &it, NULL); @@ -508,9 +518,9 @@ static u8 run_target(char** argv, u8* mem, u32 len, u8 first_run) { } else - /* Handle non-crashing inputs appropriately. */ + /* Handle non-crashing inputs appropriately. */ - if (crash_mode) { + if (crash_mode) { missed_paths++; return 0; @@ -522,24 +532,23 @@ static u8 run_target(char** argv, u8* mem, u32 len, u8 first_run) { if (first_run) orig_cksum = cksum; if (orig_cksum == cksum) return 1; - + missed_paths++; return 0; } - /* Find first power of two greater or equal to val. */ static u32 next_p2(u32 val) { u32 ret = 1; - while (val > ret) ret <<= 1; + while (val > ret) + ret <<= 1; return ret; } - /* Actually minimize! */ static void minimize(char** argv) { @@ -557,8 +566,8 @@ static void minimize(char** argv) { * BLOCK NORMALIZATION * ***********************/ - set_len = next_p2(in_len / TMIN_SET_STEPS); - set_pos = 0; + set_len = next_p2(in_len / TMIN_SET_STEPS); + set_pos = 0; if (set_len < TMIN_SET_MIN_SIZE) set_len = TMIN_SET_MIN_SIZE; @@ -575,14 +584,14 @@ static void minimize(char** argv) { memcpy(tmp_buf, in_data, in_len); memset(tmp_buf + set_pos, '0', use_len); - - u8 res; + + u8 res; res = run_target(argv, tmp_buf, in_len, 0); if (res) { memset(in_data + set_pos, '0', use_len); -/* changed_any = 1; value is not used */ + /* changed_any = 1; value is not used */ alpha_del0 += use_len; } @@ -615,11 +624,11 @@ next_pass: next_del_blksize: if (!del_len) del_len = 1; - del_pos = 0; + del_pos = 0; prev_del = 1; - SAYF(cGRA " Block length = %u, remaining size = %u\n" cRST, - del_len, in_len); + SAYF(cGRA " Block length = %u, remaining size = %u\n" cRST, del_len, + in_len); while (del_pos < in_len) { @@ -634,8 +643,8 @@ next_del_blksize: very end of the buffer (tail_len > 0), and the current block is the same as the previous one... skip this step as a no-op. */ - if (!prev_del && tail_len && !memcmp(in_data + del_pos - del_len, - in_data + del_pos, del_len)) { + if (!prev_del && tail_len && + !memcmp(in_data + del_pos - del_len, in_data + del_pos, del_len)) { del_pos += del_len; continue; @@ -656,11 +665,13 @@ next_del_blksize: memcpy(in_data, tmp_buf, del_pos + tail_len); prev_del = 1; - in_len = del_pos + tail_len; + in_len = del_pos + tail_len; changed_any = 1; - } else del_pos += del_len; + } else + + del_pos += del_len; } @@ -674,7 +685,8 @@ next_del_blksize: OKF("Block removal complete, %u bytes deleted.", stage_o_len - in_len); if (!in_len && changed_any) - WARNF(cLRD "Down to zero bytes - check the command line and mem limit!" cRST); + WARNF(cLRD + "Down to zero bytes - check the command line and mem limit!" cRST); if (cur_pass > 1 && !changed_any) goto finalize_all; @@ -682,15 +694,17 @@ next_del_blksize: * ALPHABET MINIMIZATION * *************************/ - alpha_size = 0; - alpha_del1 = 0; + alpha_size = 0; + alpha_del1 = 0; syms_removed = 0; memset(alpha_map, 0, sizeof(alpha_map)); for (i = 0; i < in_len; i++) { + if (!alpha_map[in_data[i]]) alpha_size++; alpha_map[in_data[i]]++; + } ACTF(cBRI "Stage #2: " cRST "Minimizing symbols (%u code point%s)...", @@ -699,14 +713,14 @@ next_del_blksize: for (i = 0; i < 256; i++) { u32 r; - u8 res; + u8 res; if (i == '0' || !alpha_map[i]) continue; memcpy(tmp_buf, in_data, in_len); for (r = 0; r < in_len; r++) - if (tmp_buf[r] == i) tmp_buf[r] = '0'; + if (tmp_buf[r] == i) tmp_buf[r] = '0'; res = run_target(argv, tmp_buf, in_len, 0); @@ -724,8 +738,8 @@ next_del_blksize: alpha_d_total += alpha_del1; OKF("Symbol minimization finished, %u symbol%s (%u byte%s) replaced.", - syms_removed, syms_removed == 1 ? "" : "s", - alpha_del1, alpha_del1 == 1 ? "" : "s"); + syms_removed, syms_removed == 1 ? "" : "s", alpha_del1, + alpha_del1 == 1 ? "" : "s"); /************************** * CHARACTER MINIMIZATION * @@ -752,36 +766,34 @@ next_del_blksize: alpha_del2++; changed_any = 1; - } else tmp_buf[i] = orig; + } else + + tmp_buf[i] = orig; } alpha_d_total += alpha_del2; - OKF("Character minimization done, %u byte%s replaced.", - alpha_del2, alpha_del2 == 1 ? "" : "s"); + OKF("Character minimization done, %u byte%s replaced.", alpha_del2, + alpha_del2 == 1 ? "" : "s"); if (changed_any) goto next_pass; finalize_all: - SAYF("\n" - cGRA " File size reduced by : " cRST "%0.02f%% (to %u byte%s)\n" - cGRA " Characters simplified : " cRST "%0.02f%%\n" - cGRA " Number of execs done : " cRST "%u\n" - cGRA " Fruitless execs : " cRST "path=%u crash=%u hang=%s%u\n\n", + SAYF("\n" cGRA " File size reduced by : " cRST + "%0.02f%% (to %u byte%s)\n" cGRA " Characters simplified : " cRST + "%0.02f%%\n" cGRA " Number of execs done : " cRST "%u\n" cGRA + " Fruitless execs : " cRST "path=%u crash=%u hang=%s%u\n\n", 100 - ((double)in_len) * 100 / orig_len, in_len, in_len == 1 ? "" : "s", - ((double)(alpha_d_total)) * 100 / (in_len ? in_len : 1), - total_execs, missed_paths, missed_crashes, missed_hangs ? cLRD : "", - missed_hangs); + ((double)(alpha_d_total)) * 100 / (in_len ? in_len : 1), total_execs, + missed_paths, missed_crashes, missed_hangs ? cLRD : "", missed_hangs); if (total_execs > 50 && missed_hangs * 10 > total_execs) WARNF(cLRD "Frequent timeouts - results may be skewed." cRST); } - - /* Handle Ctrl-C and the like. */ static void handle_stop_sig(int sig) { @@ -792,7 +804,6 @@ static void handle_stop_sig(int sig) { } - /* Do basic preparations - persistent fds, filenames, etc. */ static void set_up_environment(void) { @@ -823,7 +834,6 @@ static void set_up_environment(void) { if (out_fd < 0) PFATAL("Unable to create '%s'", out_file); - /* Set sane defaults... */ x = getenv("ASAN_OPTIONS"); @@ -843,18 +853,20 @@ static void set_up_environment(void) { if (x) { if (!strstr(x, "exit_code=" STRINGIFY(MSAN_ERROR))) - FATAL("Custom MSAN_OPTIONS set without exit_code=" - STRINGIFY(MSAN_ERROR) " - please fix!"); + FATAL("Custom MSAN_OPTIONS set without exit_code=" STRINGIFY( + MSAN_ERROR) " - please fix!"); if (!strstr(x, "symbolize=0")) FATAL("Custom MSAN_OPTIONS set without symbolize=0 - please fix!"); } - setenv("ASAN_OPTIONS", "abort_on_error=1:" - "detect_leaks=0:" - "symbolize=0:" - "allocator_may_return_null=1", 0); + setenv("ASAN_OPTIONS", + "abort_on_error=1:" + "detect_leaks=0:" + "symbolize=0:" + "allocator_may_return_null=1", + 0); setenv("MSAN_OPTIONS", "exit_code=" STRINGIFY(MSAN_ERROR) ":" "symbolize=0:" @@ -863,21 +875,22 @@ static void set_up_environment(void) { "msan_track_origins=0", 0); if (getenv("AFL_PRELOAD")) { + setenv("LD_PRELOAD", getenv("AFL_PRELOAD"), 1); setenv("DYLD_INSERT_LIBRARIES", getenv("AFL_PRELOAD"), 1); + } } - /* Setup signal handlers, duh. */ static void setup_signal_handlers(void) { struct sigaction sa; - sa.sa_handler = NULL; - sa.sa_flags = SA_RESTART; + sa.sa_handler = NULL; + sa.sa_flags = SA_RESTART; sa.sa_sigaction = NULL; sigemptyset(&sa.sa_mask); @@ -896,46 +909,46 @@ static void setup_signal_handlers(void) { } - /* Display usage hints. */ static void usage(u8* argv0) { - SAYF("\n%s [ options ] -- /path/to/target_app [ ... ]\n\n" + SAYF( + "\n%s [ options ] -- /path/to/target_app [ ... ]\n\n" - "Required parameters:\n\n" + "Required parameters:\n\n" - " -i file - input test case to be shrunk by the tool\n" - " -o file - final output location for the minimized data\n\n" + " -i file - input test case to be shrunk by the tool\n" + " -o file - final output location for the minimized data\n\n" - "Execution control settings:\n\n" + "Execution control settings:\n\n" - " -f file - input file read by the tested program (stdin)\n" - " -t msec - timeout for each run (%d ms)\n" - " -m megs - memory limit for child process (%d MB)\n" - " -Q - use binary-only instrumentation (QEMU mode)\n" - " -U - use Unicorn-based instrumentation (Unicorn mode)\n\n" - " (Not necessary, here for consistency with other afl-* tools)\n\n" + " -f file - input file read by the tested program (stdin)\n" + " -t msec - timeout for each run (%d ms)\n" + " -m megs - memory limit for child process (%d MB)\n" + " -Q - use binary-only instrumentation (QEMU mode)\n" + " -U - use Unicorn-based instrumentation (Unicorn mode)\n\n" + " (Not necessary, here for consistency with other afl-* " + "tools)\n\n" - "Minimization settings:\n\n" + "Minimization settings:\n\n" - " -e - solve for edge coverage only, ignore hit counts\n" - " -x - treat non-zero exit codes as crashes\n\n" + " -e - solve for edge coverage only, ignore hit counts\n" + " -x - treat non-zero exit codes as crashes\n\n" - "For additional tips, please consult %s/README.\n\n", + "For additional tips, please consult %s/README.\n\n", - argv0, EXEC_TIMEOUT, MEM_LIMIT, doc_path); + argv0, EXEC_TIMEOUT, MEM_LIMIT, doc_path); exit(1); } - /* Find binary. */ static void find_binary(u8* fname) { - u8* env_path = 0; + u8* env_path = 0; struct stat st; if (strchr(fname, '/') || !(env_path = getenv("PATH"))) { @@ -958,7 +971,9 @@ static void find_binary(u8* fname) { memcpy(cur_elem, env_path, delim - env_path); delim++; - } else cur_elem = ck_strdup(env_path); + } else + + cur_elem = ck_strdup(env_path); env_path = delim; @@ -970,7 +985,8 @@ static void find_binary(u8* fname) { ck_free(cur_elem); if (!stat(target_path, &st) && S_ISREG(st.st_mode) && - (st.st_mode & 0111) && st.st_size >= 4) break; + (st.st_mode & 0111) && st.st_size >= 4) + break; ck_free(target_path); target_path = 0; @@ -983,13 +999,12 @@ static void find_binary(u8* fname) { } - /* Fix up argv for QEMU. */ static char** get_qemu_argv(u8* own_loc, char** argv, int argc) { char** new_argv = ck_alloc(sizeof(char*) * (argc + 4)); - u8 *tmp, *cp, *rsl, *own_copy; + u8 * tmp, *cp, *rsl, *own_copy; memcpy(new_argv + 3, argv + 1, sizeof(char*) * argc); @@ -1004,8 +1019,7 @@ static char** get_qemu_argv(u8* own_loc, char** argv, int argc) { cp = alloc_printf("%s/afl-qemu-trace", tmp); - if (access(cp, X_OK)) - FATAL("Unable to find '%s'", tmp); + if (access(cp, X_OK)) FATAL("Unable to find '%s'", tmp); target_path = new_argv[0] = cp; return new_argv; @@ -1029,7 +1043,9 @@ static char** get_qemu_argv(u8* own_loc, char** argv, int argc) { } - } else ck_free(own_copy); + } else + + ck_free(own_copy); if (!access(BIN_PATH "/afl-qemu-trace", X_OK)) { @@ -1056,8 +1072,6 @@ static void read_bitmap(u8* fname) { } - - /* Main entry point */ int main(int argc, char** argv) { @@ -1070,7 +1084,7 @@ int main(int argc, char** argv) { SAYF(cCYA "afl-tmin" VERSION cRST " by <lcamtuf@google.com>\n"); - while ((opt = getopt(argc,argv,"+i:o:f:m:t:B:xeQU")) > 0) + while ((opt = getopt(argc, argv, "+i:o:f:m:t:B:xeQU")) > 0) switch (opt) { @@ -1090,7 +1104,7 @@ int main(int argc, char** argv) { if (out_file) FATAL("Multiple -f options not supported"); use_stdin = 0; - out_file = optarg; + out_file = optarg; break; case 'e': @@ -1107,40 +1121,41 @@ int main(int argc, char** argv) { case 'm': { - u8 suffix = 'M'; + u8 suffix = 'M'; - if (mem_limit_given) FATAL("Multiple -m options not supported"); - mem_limit_given = 1; + if (mem_limit_given) FATAL("Multiple -m options not supported"); + mem_limit_given = 1; - if (!strcmp(optarg, "none")) { + if (!strcmp(optarg, "none")) { - mem_limit = 0; - break; + mem_limit = 0; + break; - } + } - if (sscanf(optarg, "%llu%c", &mem_limit, &suffix) < 1 || - optarg[0] == '-') FATAL("Bad syntax used for -m"); + if (sscanf(optarg, "%llu%c", &mem_limit, &suffix) < 1 || + optarg[0] == '-') + FATAL("Bad syntax used for -m"); - switch (suffix) { + switch (suffix) { - case 'T': mem_limit *= 1024 * 1024; break; - case 'G': mem_limit *= 1024; break; - case 'k': mem_limit /= 1024; break; - case 'M': break; + case 'T': mem_limit *= 1024 * 1024; break; + case 'G': mem_limit *= 1024; break; + case 'k': mem_limit /= 1024; break; + case 'M': break; - default: FATAL("Unsupported suffix or bad syntax for -m"); + default: FATAL("Unsupported suffix or bad syntax for -m"); - } + } - if (mem_limit < 5) FATAL("Dangerously low value of -m"); + if (mem_limit < 5) FATAL("Dangerously low value of -m"); - if (sizeof(rlim_t) == 4 && mem_limit > 2000) - FATAL("Value of -m out of range on 32-bit systems"); + if (sizeof(rlim_t) == 4 && mem_limit > 2000) + FATAL("Value of -m out of range on 32-bit systems"); - } + } - break; + break; case 't': @@ -1170,7 +1185,7 @@ int main(int argc, char** argv) { unicorn_mode = 1; break; - case 'B': /* load bitmap */ + case 'B': /* load bitmap */ /* This is a secret undocumented option! It is speculated to be useful if you have a baseline "boring" input file and another "interesting" @@ -1190,9 +1205,7 @@ int main(int argc, char** argv) { read_bitmap(optarg); break; - default: - - usage(argv[0]); + default: usage(argv[0]); } @@ -1230,15 +1243,16 @@ int main(int argc, char** argv) { if (!crash_mode) { - OKF("Program terminates normally, minimizing in " - cCYA "instrumented" cRST " mode."); + OKF("Program terminates normally, minimizing in " cCYA "instrumented" cRST + " mode."); - if (!anything_set()) FATAL("No instrumentation detected."); + if (!anything_set()) FATAL("No instrumentation detected."); } else { - OKF("Program exits with a signal, minimizing in " cMGN "%scrash" cRST - " mode.", exact_mode ? "EXACT " : ""); + OKF("Program exits with a signal, minimizing in " cMGN "%scrash" cRST + " mode.", + exact_mode ? "EXACT " : ""); } diff --git a/test-instr.c b/test-instr.c index 9107f15e..71838462 100644 --- a/test-instr.c +++ b/test-instr.c @@ -20,14 +20,16 @@ int main(int argc, char** argv) { - char buff[8]; - char *buf = buff; + char buff[8]; + char* buf = buff; if (argc > 1) buf = argv[1]; else if (read(0, buf, sizeof(buf)) < 1) { + printf("Hum?\n"); exit(1); + } if (buf[0] == '0') @@ -40,3 +42,4 @@ int main(int argc, char** argv) { exit(0); } + diff --git a/unicorn_mode/patches/afl-unicorn-common.h b/unicorn_mode/patches/afl-unicorn-common.h index 6798832c..d5038d06 100644 --- a/unicorn_mode/patches/afl-unicorn-common.h +++ b/unicorn_mode/patches/afl-unicorn-common.h @@ -32,19 +32,17 @@ #include "../../config.h" -/* NeverZero */ +/* NeverZero */ #if (defined(__x86_64__) || defined(__i386__)) && defined(AFL_QEMU_NOT_ZERO) -# define INC_AFL_AREA(loc) \ - asm volatile ( \ - "incb (%0, %1, 1)\n" \ - "adcb $0, (%0, %1, 1)\n" \ - : /* no out */ \ - : "r" (afl_area_ptr), "r" (loc) \ - : "memory", "eax" \ - ) +# define INC_AFL_AREA(loc) \ + asm volatile( \ + "incb (%0, %1, 1)\n" \ + "adcb $0, (%0, %1, 1)\n" \ + : /* no out */ \ + : "r"(afl_area_ptr), "r"(loc) \ + : "memory", "eax") #else -# define INC_AFL_AREA(loc) \ - afl_area_ptr[loc]++ +# define INC_AFL_AREA(loc) afl_area_ptr[loc]++ #endif diff --git a/unicorn_mode/patches/afl-unicorn-cpu-inl.h b/unicorn_mode/patches/afl-unicorn-cpu-inl.h index a713e4ca..082d6d68 100644 --- a/unicorn_mode/patches/afl-unicorn-cpu-inl.h +++ b/unicorn_mode/patches/afl-unicorn-cpu-inl.h @@ -44,21 +44,29 @@ it to translate within its own context, too (this avoids translation overhead in the next forked-off copy). */ -#define AFL_UNICORN_CPU_SNIPPET1 do { \ +#define AFL_UNICORN_CPU_SNIPPET1 \ + do { \ + \ afl_request_tsl(pc, cs_base, flags); \ + \ } while (0) /* This snippet kicks in when the instruction pointer is positioned at _start and does the usual forkserver stuff, not very different from regular instrumentation injected via afl-as.h. */ -#define AFL_UNICORN_CPU_SNIPPET2 do { \ - if(unlikely(afl_first_instr == 0)) { \ - afl_setup(env->uc); \ - afl_forkserver(env); \ - afl_first_instr = 1; \ - } \ - afl_maybe_log(env->uc, tb->pc); \ +#define AFL_UNICORN_CPU_SNIPPET2 \ + do { \ + \ + if (unlikely(afl_first_instr == 0)) { \ + \ + afl_setup(env->uc); \ + afl_forkserver(env); \ + afl_first_instr = 1; \ + \ + } \ + afl_maybe_log(env->uc, tb->pc); \ + \ } while (0) /* We use one additional file descriptor to relay "needs translation" @@ -69,26 +77,28 @@ /* Set in the child process in forkserver mode: */ static unsigned char afl_fork_child; -static unsigned int afl_forksrv_pid; +static unsigned int afl_forksrv_pid; /* Function declarations. */ -static void afl_setup(struct uc_struct* uc); -static void afl_forkserver(CPUArchState*); +static void afl_setup(struct uc_struct* uc); +static void afl_forkserver(CPUArchState*); static inline void afl_maybe_log(struct uc_struct* uc, unsigned long); static void afl_wait_tsl(CPUArchState*, int); static void afl_request_tsl(target_ulong, target_ulong, uint64_t); -static TranslationBlock *tb_find_slow(CPUArchState*, target_ulong, - target_ulong, uint64_t); +static TranslationBlock* tb_find_slow(CPUArchState*, target_ulong, target_ulong, + uint64_t); /* Data structure passed around by the translate handlers: */ struct afl_tsl { + target_ulong pc; target_ulong cs_base; - uint64_t flags; + uint64_t flags; + }; /************************* @@ -99,8 +109,7 @@ struct afl_tsl { static void afl_setup(struct uc_struct* uc) { - char *id_str = getenv(SHM_ENV_VAR), - *inst_r = getenv("AFL_INST_RATIO"); + char *id_str = getenv(SHM_ENV_VAR), *inst_r = getenv("AFL_INST_RATIO"); int shm_id; @@ -116,9 +125,9 @@ static void afl_setup(struct uc_struct* uc) { uc->afl_inst_rms = MAP_SIZE * r / 100; } else { - + uc->afl_inst_rms = MAP_SIZE; - + } if (id_str) { @@ -132,22 +141,22 @@ static void afl_setup(struct uc_struct* uc) { so that the parent doesn't give up on us. */ if (inst_r) uc->afl_area_ptr[0] = 1; - } - - /* Maintain for compatibility */ - if (getenv("AFL_QEMU_COMPCOV")) { - uc->afl_compcov_level = 1; } + + /* Maintain for compatibility */ + if (getenv("AFL_QEMU_COMPCOV")) { uc->afl_compcov_level = 1; } if (getenv("AFL_COMPCOV_LEVEL")) { uc->afl_compcov_level = atoi(getenv("AFL_COMPCOV_LEVEL")); + } + } /* Fork server logic, invoked once we hit first emulated instruction. */ -static void afl_forkserver(CPUArchState *env) { +static void afl_forkserver(CPUArchState* env) { static unsigned char tmp[4]; @@ -165,13 +174,13 @@ static void afl_forkserver(CPUArchState *env) { while (1) { pid_t child_pid; - int status, t_fd[2]; + int status, t_fd[2]; /* Whoops, parent dead? */ if (read(FORKSRV_FD, tmp, 4) != 4) exit(2); - /* Establish a channel with child to grab translation commands. We'll + /* Establish a channel with child to grab translation commands. We'll read from t_fd[0], child will write to TSL_FD. */ if (pipe(t_fd) || dup2(t_fd[1], TSL_FD) < 0) exit(3); @@ -211,7 +220,6 @@ static void afl_forkserver(CPUArchState *env) { } - /* The equivalent of the tuple logging routine from afl-as.h. */ static inline void afl_maybe_log(struct uc_struct* uc, unsigned long cur_loc) { @@ -220,14 +228,13 @@ static inline void afl_maybe_log(struct uc_struct* uc, unsigned long cur_loc) { u8* afl_area_ptr = uc->afl_area_ptr; - if(!afl_area_ptr) - return; + if (!afl_area_ptr) return; /* Looks like QEMU always maps to fixed locations, so ASAN is not a concern. Phew. But instruction addresses may be aligned. Let's mangle the value to get something quasi-uniform. */ - cur_loc = (cur_loc >> 4) ^ (cur_loc << 8); + cur_loc = (cur_loc >> 4) ^ (cur_loc << 8); cur_loc &= MAP_SIZE - 1; /* Implement probabilistic instrumentation by looking at scrambled block @@ -243,7 +250,6 @@ static inline void afl_maybe_log(struct uc_struct* uc, unsigned long cur_loc) { } - /* This code is invoked whenever QEMU decides that it doesn't have a translation of a particular block and needs to compute it. When this happens, we tell the parent to mirror the operation, so that the next fork() has a @@ -255,20 +261,19 @@ static void afl_request_tsl(target_ulong pc, target_ulong cb, uint64_t flags) { if (!afl_fork_child) return; - t.pc = pc; + t.pc = pc; t.cs_base = cb; - t.flags = flags; + t.flags = flags; if (write(TSL_FD, &t, sizeof(struct afl_tsl)) != sizeof(struct afl_tsl)) return; } - /* This is the other side of the same channel. Since timeouts are handled by afl-fuzz simply killing the child, we can just wait until the pipe breaks. */ -static void afl_wait_tsl(CPUArchState *env, int fd) { +static void afl_wait_tsl(CPUArchState* env, int fd) { struct afl_tsl t; @@ -276,12 +281,13 @@ static void afl_wait_tsl(CPUArchState *env, int fd) { /* Broken pipe means it's time to return to the fork server routine. */ - if (read(fd, &t, sizeof(struct afl_tsl)) != sizeof(struct afl_tsl)) - break; + if (read(fd, &t, sizeof(struct afl_tsl)) != sizeof(struct afl_tsl)) break; tb_find_slow(env, t.pc, t.cs_base, t.flags); + } close(fd); + } diff --git a/unicorn_mode/patches/afl-unicorn-cpu-translate-inl.h b/unicorn_mode/patches/afl-unicorn-cpu-translate-inl.h index 69877c6b..7c84058f 100644 --- a/unicorn_mode/patches/afl-unicorn-cpu-translate-inl.h +++ b/unicorn_mode/patches/afl-unicorn-cpu-translate-inl.h @@ -35,28 +35,23 @@ static void afl_gen_compcov(TCGContext *s, uint64_t cur_loc, TCGv_i64 arg1, TCGv_i64 arg2, TCGMemOp ot, int is_imm) { - if (!s->uc->afl_compcov_level || !s->uc->afl_area_ptr) - return; - - if (!is_imm && s->uc->afl_compcov_level < 2) - return; + if (!s->uc->afl_compcov_level || !s->uc->afl_area_ptr) return; - cur_loc = (cur_loc >> 4) ^ (cur_loc << 8); + if (!is_imm && s->uc->afl_compcov_level < 2) return; + + cur_loc = (cur_loc >> 4) ^ (cur_loc << 8); cur_loc &= MAP_SIZE - 7; - + if (cur_loc >= s->uc->afl_inst_rms) return; switch (ot) { - case MO_64: - gen_afl_compcov_log_64(s, cur_loc, arg1, arg2); - break; - case MO_32: - gen_afl_compcov_log_32(s, cur_loc, arg1, arg2); - break; - case MO_16: - gen_afl_compcov_log_16(s, cur_loc, arg1, arg2); - break; - default: - return; + + case MO_64: gen_afl_compcov_log_64(s, cur_loc, arg1, arg2); break; + case MO_32: gen_afl_compcov_log_32(s, cur_loc, arg1, arg2); break; + case MO_16: gen_afl_compcov_log_16(s, cur_loc, arg1, arg2); break; + default: return; + } + } + diff --git a/unicorn_mode/patches/afl-unicorn-tcg-op-inl.h b/unicorn_mode/patches/afl-unicorn-tcg-op-inl.h index fa4974d6..d21bbcc7 100644 --- a/unicorn_mode/patches/afl-unicorn-tcg-op-inl.h +++ b/unicorn_mode/patches/afl-unicorn-tcg-op-inl.h @@ -31,26 +31,29 @@ */ static inline void gen_afl_compcov_log_16(TCGContext *tcg_ctx, uint64_t cur_loc, - TCGv_i64 arg1, TCGv_i64 arg2) -{ - TCGv_ptr tuc = tcg_const_ptr(tcg_ctx, tcg_ctx->uc); - TCGv_i64 tcur_loc = tcg_const_i64(tcg_ctx, cur_loc); - gen_helper_afl_compcov_log_16(tcg_ctx, tuc, tcur_loc, arg1, arg2); + TCGv_i64 arg1, TCGv_i64 arg2) { + + TCGv_ptr tuc = tcg_const_ptr(tcg_ctx, tcg_ctx->uc); + TCGv_i64 tcur_loc = tcg_const_i64(tcg_ctx, cur_loc); + gen_helper_afl_compcov_log_16(tcg_ctx, tuc, tcur_loc, arg1, arg2); + } static inline void gen_afl_compcov_log_32(TCGContext *tcg_ctx, uint64_t cur_loc, - TCGv_i64 arg1, TCGv_i64 arg2) -{ - TCGv_ptr tuc = tcg_const_ptr(tcg_ctx, tcg_ctx->uc); - TCGv_i64 tcur_loc = tcg_const_i64(tcg_ctx, cur_loc); - gen_helper_afl_compcov_log_32(tcg_ctx, tuc, tcur_loc, arg1, arg2); + TCGv_i64 arg1, TCGv_i64 arg2) { + + TCGv_ptr tuc = tcg_const_ptr(tcg_ctx, tcg_ctx->uc); + TCGv_i64 tcur_loc = tcg_const_i64(tcg_ctx, cur_loc); + gen_helper_afl_compcov_log_32(tcg_ctx, tuc, tcur_loc, arg1, arg2); + } static inline void gen_afl_compcov_log_64(TCGContext *tcg_ctx, uint64_t cur_loc, - TCGv_i64 arg1, TCGv_i64 arg2) -{ - TCGv_ptr tuc = tcg_const_ptr(tcg_ctx, tcg_ctx->uc); - TCGv_i64 tcur_loc = tcg_const_i64(tcg_ctx, cur_loc); - gen_helper_afl_compcov_log_64(tcg_ctx, tuc, tcur_loc, arg1, arg2); + TCGv_i64 arg1, TCGv_i64 arg2) { + + TCGv_ptr tuc = tcg_const_ptr(tcg_ctx, tcg_ctx->uc); + TCGv_i64 tcur_loc = tcg_const_i64(tcg_ctx, cur_loc); + gen_helper_afl_compcov_log_64(tcg_ctx, tuc, tcur_loc, arg1, arg2); + } diff --git a/unicorn_mode/patches/afl-unicorn-tcg-runtime-inl.h b/unicorn_mode/patches/afl-unicorn-tcg-runtime-inl.h index 1f0667ce..95e68302 100644 --- a/unicorn_mode/patches/afl-unicorn-tcg-runtime-inl.h +++ b/unicorn_mode/patches/afl-unicorn-tcg-runtime-inl.h @@ -38,9 +38,8 @@ void HELPER(afl_compcov_log_16)(void* uc_ptr, uint64_t cur_loc, uint64_t arg1, u8* afl_area_ptr = ((struct uc_struct*)uc_ptr)->afl_area_ptr; - if ((arg1 & 0xff) == (arg2 & 0xff)) { - INC_AFL_AREA(cur_loc); - } + if ((arg1 & 0xff) == (arg2 & 0xff)) { INC_AFL_AREA(cur_loc); } + } void HELPER(afl_compcov_log_32)(void* uc_ptr, uint64_t cur_loc, uint64_t arg1, @@ -49,14 +48,17 @@ void HELPER(afl_compcov_log_32)(void* uc_ptr, uint64_t cur_loc, uint64_t arg1, u8* afl_area_ptr = ((struct uc_struct*)uc_ptr)->afl_area_ptr; if ((arg1 & 0xff) == (arg2 & 0xff)) { + INC_AFL_AREA(cur_loc); if ((arg1 & 0xffff) == (arg2 & 0xffff)) { - INC_AFL_AREA(cur_loc +1); - if ((arg1 & 0xffffff) == (arg2 & 0xffffff)) { - INC_AFL_AREA(cur_loc +2); - } + + INC_AFL_AREA(cur_loc + 1); + if ((arg1 & 0xffffff) == (arg2 & 0xffffff)) { INC_AFL_AREA(cur_loc + 2); } + } + } + } void HELPER(afl_compcov_log_64)(void* uc_ptr, uint64_t cur_loc, uint64_t arg1, @@ -65,25 +67,40 @@ void HELPER(afl_compcov_log_64)(void* uc_ptr, uint64_t cur_loc, uint64_t arg1, u8* afl_area_ptr = ((struct uc_struct*)uc_ptr)->afl_area_ptr; if ((arg1 & 0xff) == (arg2 & 0xff)) { + INC_AFL_AREA(cur_loc); if ((arg1 & 0xffff) == (arg2 & 0xffff)) { - INC_AFL_AREA(cur_loc +1); + + INC_AFL_AREA(cur_loc + 1); if ((arg1 & 0xffffff) == (arg2 & 0xffffff)) { - INC_AFL_AREA(cur_loc +2); + + INC_AFL_AREA(cur_loc + 2); if ((arg1 & 0xffffffff) == (arg2 & 0xffffffff)) { - INC_AFL_AREA(cur_loc +3); + + INC_AFL_AREA(cur_loc + 3); if ((arg1 & 0xffffffffff) == (arg2 & 0xffffffffff)) { - INC_AFL_AREA(cur_loc +4); + + INC_AFL_AREA(cur_loc + 4); if ((arg1 & 0xffffffffffff) == (arg2 & 0xffffffffffff)) { - INC_AFL_AREA(cur_loc +5); + + INC_AFL_AREA(cur_loc + 5); if ((arg1 & 0xffffffffffffff) == (arg2 & 0xffffffffffffff)) { - INC_AFL_AREA(cur_loc +6); + + INC_AFL_AREA(cur_loc + 6); + } + } + } + } + } + } + } + } |