aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/afl-fuzz.h392
-rw-r--r--include/afl-prealloc.h161
-rw-r--r--include/common.h5
-rw-r--r--include/forkserver.h36
-rw-r--r--include/list.h85
-rw-r--r--include/sharedmem.h26
-rw-r--r--src/afl-analyze.c8
-rw-r--r--src/afl-common.c13
-rw-r--r--src/afl-forkserver.c6
-rw-r--r--src/afl-fuzz-bitmap.c38
-rw-r--r--src/afl-fuzz-cmplog.c50
-rw-r--r--src/afl-fuzz-extras.c42
-rw-r--r--src/afl-fuzz-globals.c65
-rw-r--r--src/afl-fuzz-init.c85
-rw-r--r--src/afl-fuzz-mutators.c42
-rw-r--r--src/afl-fuzz-one.c241
-rw-r--r--src/afl-fuzz-python.c37
-rw-r--r--src/afl-fuzz-queue.c39
-rw-r--r--src/afl-fuzz-redqueen.c66
-rw-r--r--src/afl-fuzz-run.c86
-rw-r--r--src/afl-fuzz-stats.c175
-rw-r--r--src/afl-fuzz.c74
-rw-r--r--src/afl-sharedmem.c13
-rw-r--r--src/afl-showmap.c42
-rw-r--r--src/afl-tmin.c29
25 files changed, 1034 insertions, 822 deletions
diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h
index c773d085..9ef888d9 100644
--- a/include/afl-fuzz.h
+++ b/include/afl-fuzz.h
@@ -109,8 +109,8 @@
extern s8 interesting_8[INTERESTING_8_LEN];
extern s16 interesting_16[INTERESTING_8_LEN + INTERESTING_16_LEN];
-extern s32 interesting_32[INTERESTING_8_LEN + INTERESTING_16_LEN + INTERESTING_32_LEN];
-
+extern s32
+ interesting_32[INTERESTING_8_LEN + INTERESTING_16_LEN + INTERESTING_32_LEN];
struct queue_entry {
@@ -233,7 +233,7 @@ enum {
};
-extern u8 *doc_path; /* gath to documentation dir */
+extern u8* doc_path; /* gath to documentation dir */
/* Python stuff */
#ifdef USE_PYTHON
@@ -263,7 +263,6 @@ extern u8 *doc_path; /* gath to documentation dir */
#define _XOPEN_SOURCE _SAVE_XOPEN_SOURCE
#endif
-
enum {
/* 00 */ PY_FUNC_INIT,
@@ -284,18 +283,18 @@ enum {
typedef struct MOpt_globals {
- u64* finds;
- u64* finds_v2;
- u64* cycles;
- u64* cycles_v2;
- u64* cycles_v3;
- u32 is_pilot_mode;
- u64* pTime;
- u64 period;
- char* havoc_stagename;
- char* splice_stageformat;
- char* havoc_stagenameshort;
- char* splice_stagenameshort;
+ u64* finds;
+ u64* finds_v2;
+ u64* cycles;
+ u64* cycles_v2;
+ u64* cycles_v3;
+ u32 is_pilot_mode;
+ u64* pTime;
+ u64 period;
+ char* havoc_stagename;
+ char* splice_stageformat;
+ char* havoc_stagenameshort;
+ char* splice_stagenameshort;
} MOpt_globals_t;
@@ -307,9 +306,9 @@ typedef struct afl_state {
u32 _id;
afl_forkserver_t fsrv;
- sharedmem_t shm;
+ sharedmem_t shm;
- char** argv; /* argv if needed */
+ char** argv; /* argv if needed */
/* MOpt:
Lots of globals, but mostly for the status UI and other things where it
@@ -346,171 +345,170 @@ typedef struct afl_state {
core_operator_finds_puppet_v2[operator_num],
core_operator_cycles_puppet[operator_num],
core_operator_cycles_puppet_v2[operator_num],
- core_operator_cycles_puppet_v3[operator_num]; /* Execs per fuzz stage */
+ core_operator_cycles_puppet_v3[operator_num]; /* Execs per fuzz stage */
double period_pilot_tmp;
s32 key_lv;
- u8 *in_dir, /* Input directory with test cases */
- *out_dir, /* Working & output directory */
- *tmp_dir, /* Temporary directory for input */
- *sync_dir, /* Synchronization directory */
- *sync_id, /* Fuzzer ID */
- *power_name, /* Power schedule name */
- *use_banner, /* Display banner */
- *in_bitmap, /* Input bitmap */
- *file_extension, /* File extension */
- *orig_cmdline, /* Original command line */
- *infoexec; /* Command to execute on a new crash */
-
- u32 hang_tmout; /* Timeout used for hang det (ms) */
-
- u8 cal_cycles, /* Calibration cycles defaults */
- cal_cycles_long, /* Calibration cycles defaults */
- no_unlink, /* do not unlink cur_input */
- debug, /* Debug mode */
- custom_only, /* Custom mutator only mode */
- python_only; /* Python-only mode */
-
- u32 stats_update_freq; /* Stats update frequency (execs) */
-
- u8 schedule; /* Power schedule (default: EXPLORE)*/
+ u8 *in_dir, /* Input directory with test cases */
+ *out_dir, /* Working & output directory */
+ *tmp_dir, /* Temporary directory for input */
+ *sync_dir, /* Synchronization directory */
+ *sync_id, /* Fuzzer ID */
+ *power_name, /* Power schedule name */
+ *use_banner, /* Display banner */
+ *in_bitmap, /* Input bitmap */
+ *file_extension, /* File extension */
+ *orig_cmdline, /* Original command line */
+ *infoexec; /* Command to execute on a new crash */
+
+ u32 hang_tmout; /* Timeout used for hang det (ms) */
+
+ u8 cal_cycles, /* Calibration cycles defaults */
+ cal_cycles_long, /* Calibration cycles defaults */
+ no_unlink, /* do not unlink cur_input */
+ debug, /* Debug mode */
+ custom_only, /* Custom mutator only mode */
+ python_only; /* Python-only mode */
+
+ u32 stats_update_freq; /* Stats update frequency (execs) */
+
+ u8 schedule; /* Power schedule (default: EXPLORE)*/
u8 havoc_max_mult;
u8 use_radamsa;
size_t (*radamsa_mutate_ptr)(u8*, size_t, u8*, size_t, u32);
- u8 skip_deterministic, /* Skip deterministic stages? */
- force_deterministic, /* Force deterministic stages? */
- use_splicing, /* Recombine input files? */
- dumb_mode, /* Run in non-instrumented mode? */
- score_changed, /* Scoring for favorites changed? */
- kill_signal, /* Signal that killed the child */
- resuming_fuzz, /* Resuming an older fuzzing job? */
- timeout_given, /* Specific timeout given? */
- not_on_tty, /* stdout is not a tty */
- term_too_small, /* terminal dimensions too small */
- no_forkserver, /* Disable forkserver? */
- crash_mode, /* Crash mode! Yeah! */
- in_place_resume, /* Attempt in-place resume? */
- autoresume, /* Resume if afl->out_dir exists? */
- auto_changed, /* Auto-generated tokens changed? */
- no_cpu_meter_red, /* Feng shui on the status screen */
- no_arith, /* Skip most arithmetic ops */
- shuffle_queue, /* Shuffle input queue? */
- bitmap_changed, /* Time to update bitmap? */
- qemu_mode, /* Running in QEMU mode? */
- unicorn_mode, /* Running in Unicorn mode? */
- use_wine, /* Use WINE with QEMU mode */
- skip_requested, /* Skip request, via SIGUSR1 */
- run_over10m, /* Run time over 10 minutes? */
- persistent_mode, /* Running in persistent mode? */
- deferred_mode, /* Deferred forkserver mode? */
- fixed_seed, /* do not reseed */
- fast_cal, /* Try to calibrate faster? */
- disable_trim; /* Never trim in fuzz_one */
-
- u8 virgin_bits[MAP_SIZE], /* Regions yet untouched by fuzzing */
- virgin_tmout[MAP_SIZE], /* Bits we haven't seen in tmouts */
- virgin_crash[MAP_SIZE]; /* Bits we haven't seen in crashes */
-
- u8 var_bytes[MAP_SIZE]; /* Bytes that appear to be variable */
-
- volatile u8 stop_soon, /* Ctrl-C pressed? */
- clear_screen; /* Window resized? */
-
- u32 queued_paths, /* Total number of queued testcases */
- queued_variable, /* Testcases with variable behavior */
- queued_at_start, /* Total number of initial inputs */
- queued_discovered, /* Items discovered during this run */
- queued_imported, /* Items imported via -S */
- queued_favored, /* Paths deemed favorable */
- queued_with_cov, /* Paths with new coverage bytes */
- pending_not_fuzzed, /* Queued but not done yet */
- pending_favored, /* Pending favored paths */
- cur_skipped_paths, /* Abandoned inputs in cur cycle */
- cur_depth, /* Current path depth */
- max_depth, /* Max path depth */
- useless_at_start, /* Number of useless starting paths */
- var_byte_count, /* Bitmap bytes with var behavior */
- current_entry, /* Current queue entry ID */
- havoc_div; /* Cycle count divisor for havoc */
-
- u64 total_crashes, /* Total number of crashes */
- unique_crashes, /* Crashes with unique signatures */
- total_tmouts, /* Total number of timeouts */
- unique_tmouts, /* Timeouts with unique signatures */
- unique_hangs, /* Hangs with unique signatures */
- total_execs, /* Total execve() calls */
- slowest_exec_ms, /* Slowest testcase non hang in ms */
- start_time, /* Unix start time (ms) */
- last_path_time, /* Time for most recent path (ms) */
- last_crash_time, /* Time for most recent crash (ms) */
- last_hang_time, /* Time for most recent hang (ms) */
- last_crash_execs, /* Exec counter at last crash */
- queue_cycle, /* Queue round counter */
- cycles_wo_finds, /* Cycles without any new paths */
- trim_execs, /* Execs done to trim input files */
- bytes_trim_in, /* Bytes coming into the trimmer */
- bytes_trim_out, /* Bytes coming outa the trimmer */
- blocks_eff_total, /* Blocks subject to effector maps */
- blocks_eff_select; /* Blocks selected as fuzzable */
-
- u32 subseq_tmouts; /* Number of timeouts in a row */
-
- u8 *stage_name, /* Name of the current fuzz stage */
- *stage_short, /* Short stage name */
- *syncing_party; /* Currently syncing with... */
-
- u8 stage_name_buf64[64]; /* A name buf with len 64 if needed */
-
- s32 stage_cur, stage_max; /* Stage progression */
- s32 splicing_with; /* Splicing with which test case? */
-
- u32 master_id, master_max; /* Master instance job splitting */
-
- u32 syncing_case; /* Syncing with case #... */
-
- s32 stage_cur_byte, /* Byte offset of current stage op */
- stage_cur_val; /* Value used for stage op */
-
- u8 stage_val_type; /* Value type (STAGE_VAL_*) */
-
- u64 stage_finds[32], /* Patterns found per fuzz stage */
- stage_cycles[32]; /* Execs per fuzz stage */
-
- #ifndef HAVE_ARC4RANDOM
- u32 rand_cnt; /* Random number counter */
- #endif
+ u8 skip_deterministic, /* Skip deterministic stages? */
+ force_deterministic, /* Force deterministic stages? */
+ use_splicing, /* Recombine input files? */
+ dumb_mode, /* Run in non-instrumented mode? */
+ score_changed, /* Scoring for favorites changed? */
+ kill_signal, /* Signal that killed the child */
+ resuming_fuzz, /* Resuming an older fuzzing job? */
+ timeout_given, /* Specific timeout given? */
+ not_on_tty, /* stdout is not a tty */
+ term_too_small, /* terminal dimensions too small */
+ no_forkserver, /* Disable forkserver? */
+ crash_mode, /* Crash mode! Yeah! */
+ in_place_resume, /* Attempt in-place resume? */
+ autoresume, /* Resume if afl->out_dir exists? */
+ auto_changed, /* Auto-generated tokens changed? */
+ no_cpu_meter_red, /* Feng shui on the status screen */
+ no_arith, /* Skip most arithmetic ops */
+ shuffle_queue, /* Shuffle input queue? */
+ bitmap_changed, /* Time to update bitmap? */
+ qemu_mode, /* Running in QEMU mode? */
+ unicorn_mode, /* Running in Unicorn mode? */
+ use_wine, /* Use WINE with QEMU mode */
+ skip_requested, /* Skip request, via SIGUSR1 */
+ run_over10m, /* Run time over 10 minutes? */
+ persistent_mode, /* Running in persistent mode? */
+ deferred_mode, /* Deferred forkserver mode? */
+ fixed_seed, /* do not reseed */
+ fast_cal, /* Try to calibrate faster? */
+ disable_trim; /* Never trim in fuzz_one */
+
+ u8 virgin_bits[MAP_SIZE], /* Regions yet untouched by fuzzing */
+ virgin_tmout[MAP_SIZE], /* Bits we haven't seen in tmouts */
+ virgin_crash[MAP_SIZE]; /* Bits we haven't seen in crashes */
+
+ u8 var_bytes[MAP_SIZE]; /* Bytes that appear to be variable */
+
+ volatile u8 stop_soon, /* Ctrl-C pressed? */
+ clear_screen; /* Window resized? */
+
+ u32 queued_paths, /* Total number of queued testcases */
+ queued_variable, /* Testcases with variable behavior */
+ queued_at_start, /* Total number of initial inputs */
+ queued_discovered, /* Items discovered during this run */
+ queued_imported, /* Items imported via -S */
+ queued_favored, /* Paths deemed favorable */
+ queued_with_cov, /* Paths with new coverage bytes */
+ pending_not_fuzzed, /* Queued but not done yet */
+ pending_favored, /* Pending favored paths */
+ cur_skipped_paths, /* Abandoned inputs in cur cycle */
+ cur_depth, /* Current path depth */
+ max_depth, /* Max path depth */
+ useless_at_start, /* Number of useless starting paths */
+ var_byte_count, /* Bitmap bytes with var behavior */
+ current_entry, /* Current queue entry ID */
+ havoc_div; /* Cycle count divisor for havoc */
+
+ u64 total_crashes, /* Total number of crashes */
+ unique_crashes, /* Crashes with unique signatures */
+ total_tmouts, /* Total number of timeouts */
+ unique_tmouts, /* Timeouts with unique signatures */
+ unique_hangs, /* Hangs with unique signatures */
+ total_execs, /* Total execve() calls */
+ slowest_exec_ms, /* Slowest testcase non hang in ms */
+ start_time, /* Unix start time (ms) */
+ last_path_time, /* Time for most recent path (ms) */
+ last_crash_time, /* Time for most recent crash (ms) */
+ last_hang_time, /* Time for most recent hang (ms) */
+ last_crash_execs, /* Exec counter at last crash */
+ queue_cycle, /* Queue round counter */
+ cycles_wo_finds, /* Cycles without any new paths */
+ trim_execs, /* Execs done to trim input files */
+ bytes_trim_in, /* Bytes coming into the trimmer */
+ bytes_trim_out, /* Bytes coming outa the trimmer */
+ blocks_eff_total, /* Blocks subject to effector maps */
+ blocks_eff_select; /* Blocks selected as fuzzable */
+
+ u32 subseq_tmouts; /* Number of timeouts in a row */
+
+ u8 *stage_name, /* Name of the current fuzz stage */
+ *stage_short, /* Short stage name */
+ *syncing_party; /* Currently syncing with... */
+
+ u8 stage_name_buf64[64]; /* A name buf with len 64 if needed */
+
+ s32 stage_cur, stage_max; /* Stage progression */
+ s32 splicing_with; /* Splicing with which test case? */
+
+ u32 master_id, master_max; /* Master instance job splitting */
+
+ u32 syncing_case; /* Syncing with case #... */
+
+ s32 stage_cur_byte, /* Byte offset of current stage op */
+ stage_cur_val; /* Value used for stage op */
+
+ u8 stage_val_type; /* Value type (STAGE_VAL_*) */
+
+ u64 stage_finds[32], /* Patterns found per fuzz stage */
+ stage_cycles[32]; /* Execs per fuzz stage */
+
+#ifndef HAVE_ARC4RANDOM
+ u32 rand_cnt; /* Random number counter */
+#endif
u32 rand_seed[2];
s64 init_seed;
- u64 total_cal_us, /* Total calibration time (us) */
- total_cal_cycles; /* Total calibration cycles */
+ u64 total_cal_us, /* Total calibration time (us) */
+ total_cal_cycles; /* Total calibration cycles */
- u64 total_bitmap_size, /* Total bit count for all bitmaps */
- total_bitmap_entries; /* Number of bitmaps counted */
+ u64 total_bitmap_size, /* Total bit count for all bitmaps */
+ total_bitmap_entries; /* Number of bitmaps counted */
- s32 cpu_core_count; /* CPU core count */
+ s32 cpu_core_count; /* CPU core count */
#ifdef HAVE_AFFINITY
- s32 cpu_aff; /* Selected CPU core */
-#endif /* HAVE_AFFINITY */
+ s32 cpu_aff; /* Selected CPU core */
+#endif /* HAVE_AFFINITY */
- struct queue_entry *queue, /* Fuzzing queue (linked list) */
- *queue_cur, /* Current offset within the queue */
- *queue_top, /* Top of the list */
- *q_prev100; /* Previous 100 marker */
+ struct queue_entry *queue, /* Fuzzing queue (linked list) */
+ *queue_cur, /* Current offset within the queue */
+ *queue_top, /* Top of the list */
+ *q_prev100; /* Previous 100 marker */
- struct queue_entry*
- top_rated[MAP_SIZE]; /* Top entries for bitmap bytes */
+ struct queue_entry* top_rated[MAP_SIZE]; /* Top entries for bitmap bytes */
- struct extra_data* extras; /* Extra tokens to fuzz with */
- u32 extras_cnt; /* Total number of tokens read */
+ struct extra_data* extras; /* Extra tokens to fuzz with */
+ u32 extras_cnt; /* Total number of tokens read */
- struct extra_data* a_extras; /* Automatically selected extras */
- u32 a_extras_cnt; /* Total number of tokens available */
+ struct extra_data* a_extras; /* Automatically selected extras */
+ u32 a_extras_cnt; /* Total number of tokens available */
u8* (*post_handler)(u8* buf, u32* len);
@@ -525,12 +523,13 @@ typedef struct afl_state {
/* cmplog forkserver ids */
s32 cmplog_fsrv_ctl_fd, cmplog_fsrv_st_fd;
- u8 describe_op_buf_256[256]; /* describe_op will use this to return a string up to 256 */
+ u8 describe_op_buf_256[256]; /* describe_op will use this to return a string
+ up to 256 */
#ifdef USE_PYTHON
/* Python Mutators */
- PyObject *py_module;
- PyObject *py_functions[PY_FUNC_COUNT];
+ PyObject* py_module;
+ PyObject* py_functions[PY_FUNC_COUNT];
#endif
#ifdef _AFL_DOCUMENT_MUTATIONS
@@ -540,7 +539,8 @@ typedef struct afl_state {
} afl_state_t;
-/* A global pointer to all instances is needed (for now) for signals to arrive */
+/* A global pointer to all instances is needed (for now) for signals to arrive
+ */
extern list_t afl_states;
@@ -558,7 +558,7 @@ struct custom_mutator {
*
* @param seed Seed used for the mutation.
*/
- void (*afl_custom_init)(afl_state_t *afl, unsigned int seed);
+ void (*afl_custom_init)(afl_state_t* afl, unsigned int seed);
/**
* Perform custom mutations on a given input
@@ -574,8 +574,8 @@ struct custom_mutator {
* not produce data larger than max_size.
* @return Size of the mutated output.
*/
- size_t (*afl_custom_fuzz)(afl_state_t *afl, u8** buf, size_t buf_size, u8* add_buf,
- size_t add_buf_size, size_t max_size);
+ size_t (*afl_custom_fuzz)(afl_state_t* afl, u8** buf, size_t buf_size,
+ u8* add_buf, size_t add_buf_size, size_t max_size);
/**
* A post-processing function to use right before AFL writes the test case to
@@ -591,7 +591,8 @@ struct custom_mutator {
* will release the memory after saving the test case.
* @return Size of the output buffer after processing
*/
- size_t (*afl_custom_pre_save)(afl_state_t *afl, u8* buf, size_t buf_size, u8** out_buf);
+ size_t (*afl_custom_pre_save)(afl_state_t* afl, u8* buf, size_t buf_size,
+ u8** out_buf);
/**
* This method is called at the start of each trimming operation and receives
@@ -613,7 +614,7 @@ struct custom_mutator {
* @param buf_size Size of the test case
* @return The amount of possible iteration steps to trim the input
*/
- u32 (*afl_custom_init_trim)(afl_state_t *afl, u8* buf, size_t buf_size);
+ u32 (*afl_custom_init_trim)(afl_state_t* afl, u8* buf, size_t buf_size);
/**
* This method is called for each trimming operation. It doesn't have any
@@ -631,7 +632,7 @@ struct custom_mutator {
* the memory after saving the test case.
* @param[out] out_buf_size Pointer to the size of the trimmed test case
*/
- void (*afl_custom_trim)(afl_state_t *afl, u8** out_buf, size_t* out_buf_size);
+ void (*afl_custom_trim)(afl_state_t* afl, u8** out_buf, size_t* out_buf_size);
/**
* This method is called after each trim operation to inform you if your
@@ -644,8 +645,8 @@ struct custom_mutator {
* @return The next trim iteration index (from 0 to the maximum amount of
* steps returned in init_trim)
*/
- u32 (*afl_custom_post_trim)(afl_state_t *afl, u8 success);
-
+ u32 (*afl_custom_post_trim)(afl_state_t* afl, u8 success);
+
/**
* Perform a single custom mutation on a given input.
* This mutation is stacked with the other muatations in havoc.
@@ -659,8 +660,9 @@ struct custom_mutator {
* not produce data larger than max_size.
* @return Size of the mutated output.
*/
- size_t (*afl_custom_havoc_mutation)(afl_state_t *afl, u8** buf, size_t buf_size, size_t max_size);
-
+ size_t (*afl_custom_havoc_mutation)(afl_state_t* afl, u8** buf,
+ size_t buf_size, size_t max_size);
+
/**
* Return the probability (in percentage) that afl_custom_havoc_mutation
* is called in havoc. By default it is 6 %.
@@ -669,7 +671,7 @@ struct custom_mutator {
*
* @return The probability (0-100).
*/
- u8 (*afl_custom_havoc_mutation_probability)(afl_state_t *afl);
+ u8 (*afl_custom_havoc_mutation_probability)(afl_state_t* afl);
/**
* Determine whether the fuzzer should fuzz the current queue entry or not.
@@ -680,7 +682,7 @@ struct custom_mutator {
* @return Return True(1) if the fuzzer will fuzz the queue entry, and
* False(0) otherwise.
*/
- u8 (*afl_custom_queue_get)(afl_state_t *afl, const u8* filename);
+ u8 (*afl_custom_queue_get)(afl_state_t* afl, const u8* filename);
/**
* Allow for additional analysis (e.g. calling a different tool that does a
@@ -692,14 +694,13 @@ struct custom_mutator {
* @param filename_orig_queue File name of the original queue entry. This
* argument can be NULL while initializing the fuzzer
*/
- void (*afl_custom_queue_new_entry)(afl_state_t *afl, const u8* filename_new_queue,
- const u8* filename_orig_queue);
+ void (*afl_custom_queue_new_entry)(afl_state_t* afl,
+ const u8* filename_new_queue,
+ const u8* filename_orig_queue);
};
-
-
-void afl_state_init(afl_state_t *);
+void afl_state_init(afl_state_t*);
void afl_state_deinit(afl_state_t*);
/**** Prototypes ****/
@@ -707,13 +708,13 @@ void afl_state_deinit(afl_state_t*);
/* Custom mutators */
void setup_custom_mutator(afl_state_t*);
void destroy_custom_mutator(afl_state_t*);
-u8 trim_case_custom(afl_state_t *, struct queue_entry* q, u8* in_buf);
+u8 trim_case_custom(afl_state_t*, struct queue_entry* q, u8* in_buf);
/* Python */
#ifdef USE_PYTHON
-int init_py_module(afl_state_t*, u8*);
-void finalize_py_module(afl_state_t*);
+int init_py_module(afl_state_t*, u8*);
+void finalize_py_module(afl_state_t*);
void init_py(afl_state_t*, unsigned int);
size_t fuzz_py(afl_state_t*, u8**, size_t, u8*, size_t, size_t);
@@ -721,7 +722,7 @@ size_t pre_save_py(afl_state_t*, u8*, size_t, u8**);
u32 init_trim_py(afl_state_t*, u8*, size_t);
u32 post_trim_py(afl_state_t*, u8);
void trim_py(afl_state_t*, u8**, size_t*);
-size_t havoc_mutation_py(afl_state_t *, u8**, size_t, size_t);
+size_t havoc_mutation_py(afl_state_t*, u8**, size_t, size_t);
u8 havoc_mutation_probability_py(afl_state_t*);
u8 queue_get_py(afl_state_t*, const u8*);
void queue_new_entry_py(afl_state_t*, const u8*, const u8*);
@@ -759,7 +760,7 @@ void minimize_bits(u8*, u8*);
u8* describe_op(afl_state_t*, u8);
#endif
u8 save_if_interesting(afl_state_t*, void*, u32, u8);
-u8 has_new_bits(afl_state_t *, u8*);
+u8 has_new_bits(afl_state_t*, u8*);
/* Misc */
@@ -830,11 +831,11 @@ void save_cmdline(afl_state_t*, u32, char**);
/* CmpLog */
-void init_cmplog_forkserver(afl_state_t *afl);
-u8 common_fuzz_cmplog_stuff(afl_state_t *afl, u8 *out_buf, u32 len);
+void init_cmplog_forkserver(afl_state_t* afl);
+u8 common_fuzz_cmplog_stuff(afl_state_t* afl, u8* out_buf, u32 len);
/* RedQueen */
-u8 input_to_state_stage(afl_state_t *afl, u8* orig_buf, u8* buf, u32 len,
+u8 input_to_state_stage(afl_state_t* afl, u8* orig_buf, u8* buf, u32 len,
u32 exec_cksum);
/**** Inline routines ****/
@@ -842,7 +843,7 @@ u8 input_to_state_stage(afl_state_t *afl, u8* orig_buf, u8* buf, u32 len,
/* Generate a random number (from 0 to limit - 1). This may
have slight bias. */
-static inline u32 UR(afl_state_t *afl, u32 limit) {
+static inline u32 UR(afl_state_t* afl, u32 limit) {
#ifdef HAVE_ARC4RANDOM
if (afl->fixed_seed) { return random() % limit; }
@@ -853,7 +854,8 @@ static inline u32 UR(afl_state_t *afl, u32 limit) {
#else
if (!afl->fixed_seed && unlikely(!afl->rand_cnt--)) {
- ck_read(afl->fsrv.dev_urandom_fd, &afl->rand_seed, sizeof(afl->rand_seed), "/dev/urandom");
+ ck_read(afl->fsrv.dev_urandom_fd, &afl->rand_seed, sizeof(afl->rand_seed),
+ "/dev/urandom");
srandom(afl->rand_seed[0]);
afl->rand_cnt = (RESEED_RNG / 2) + (afl->rand_seed[1] % RESEED_RNG);
@@ -864,7 +866,7 @@ static inline u32 UR(afl_state_t *afl, u32 limit) {
}
-static inline u32 get_rand_seed(afl_state_t *afl) {
+static inline u32 get_rand_seed(afl_state_t* afl) {
if (afl->fixed_seed) return (u32)afl->init_seed;
return afl->rand_seed[0];
@@ -883,4 +885,4 @@ static u64 next_p2(u64 val) {
}
-#endif \ No newline at end of file
+#endif
diff --git a/include/afl-prealloc.h b/include/afl-prealloc.h
index 712cdec6..fb307eb3 100644
--- a/include/afl-prealloc.h
+++ b/include/afl-prealloc.h
@@ -1,4 +1,5 @@
-/* If we know we'll reuse small elements often, we'll just preallocate a buffer, then fall back to malloc */
+/* If we know we'll reuse small elements often, we'll just preallocate a buffer,
+ * then fall back to malloc */
// TODO: Replace free status check with bitmask+CLZ
#ifndef AFL_PREALLOC_H
@@ -11,91 +12,109 @@
#include "debug.h"
typedef enum prealloc_status {
- PRE_STATUS_UNUSED = 0,/* free in buf */
- PRE_STATUS_USED, /* used in buf */
- PRE_STATUS_MALLOC /* system malloc */
-} pre_status_t;
+ PRE_STATUS_UNUSED = 0, /* free in buf */
+ PRE_STATUS_USED, /* used in buf */
+ PRE_STATUS_MALLOC /* system malloc */
-/* Adds the entry used for prealloc bookkeeping to this struct */
+} pre_status_t;
-#define PREALLOCABLE ;pre_status_t pre_status; /* prealloc status of this instance */
+/* Adds the entry used for prealloc bookkeeping to this struct */
+#define PREALLOCABLE \
+ ; \
+ pre_status_t pre_status; /* prealloc status of this instance */
/* allocate an element of type *el_ptr, to this variable.
Uses (and reuses) the given prealloc_buf before hitting libc's malloc.
prealloc_buf must be the pointer to an array with type `type`.
- `type` must be a struct with uses PREALLOCABLE (a pre_status_t pre_status member).
- prealloc_size must be the array size.
- prealloc_counter must be a variable initialized with 0 (of any name).
+ `type` must be a struct with uses PREALLOCABLE (a pre_status_t pre_status
+ member). prealloc_size must be the array size. prealloc_counter must be a
+ variable initialized with 0 (of any name).
*/
-#define PRE_ALLOC(el_ptr, prealloc_buf, prealloc_size, prealloc_counter) do { \
- \
- if ((prealloc_counter) >= (prealloc_size)) { \
- \
- el_ptr = malloc(sizeof(*el_ptr)); \
- el_ptr->pre_status = PRE_STATUS_MALLOC; \
- \
- } else { \
- \
- /* Find one of our preallocated elements */ \
- u32 i; \
- for (i = 0; i < (prealloc_size); i++) { \
- \
- el_ptr = &((prealloc_buf)[i]); \
- if (el_ptr->pre_status == PRE_STATUS_UNUSED) { \
- \
- (prealloc_counter)++; \
- el_ptr->pre_status = PRE_STATUS_USED; \
- break; \
- \
- } \
- } \
- } \
- \
- if(!el_ptr) { \
- FATAL("BUG in list.h -> no element found or allocated!"); \
- } \
-} while(0);
-
+#define PRE_ALLOC(el_ptr, prealloc_buf, prealloc_size, prealloc_counter) \
+ do { \
+ \
+ if ((prealloc_counter) >= (prealloc_size)) { \
+ \
+ el_ptr = malloc(sizeof(*el_ptr)); \
+ el_ptr->pre_status = PRE_STATUS_MALLOC; \
+ \
+ } else { \
+ \
+ /* Find one of our preallocated elements */ \
+ u32 i; \
+ for (i = 0; i < (prealloc_size); i++) { \
+ \
+ el_ptr = &((prealloc_buf)[i]); \
+ if (el_ptr->pre_status == PRE_STATUS_UNUSED) { \
+ \
+ (prealloc_counter)++; \
+ el_ptr->pre_status = PRE_STATUS_USED; \
+ break; \
+ \
+ } \
+ \
+ } \
+ \
+ } \
+ \
+ if (!el_ptr) { FATAL("BUG in list.h -> no element found or allocated!"); } \
+ \
+ } while (0);
/* Take a chosen (free) element from the prealloc_buf directly */
-#define PRE_ALLOC_FORCE(el_ptr, prealloc_counter) do { \
- if ((el_ptr)->pre_status != PRE_STATUS_UNUSED) { \
- FATAL("PRE_ALLOC_FORCE element already allocated"); \
- } \
- (el_ptr)->pre_status = PRE_STATUS_USED; \
- (prealloc_counter)++; \
-} while(0);
-
+#define PRE_ALLOC_FORCE(el_ptr, prealloc_counter) \
+ do { \
+ \
+ if ((el_ptr)->pre_status != PRE_STATUS_UNUSED) { \
+ \
+ FATAL("PRE_ALLOC_FORCE element already allocated"); \
+ \
+ } \
+ (el_ptr)->pre_status = PRE_STATUS_USED; \
+ (prealloc_counter)++; \
+ \
+ } while (0);
/* free an preallocated element */
-#define PRE_FREE(el_ptr, prealloc_counter) do { \
- \
- switch ((el_ptr)->pre_status) { \
- \
- case PRE_STATUS_USED: { \
- (el_ptr)->pre_status = PRE_STATUS_UNUSED; \
- (prealloc_counter)--; \
- if ((prealloc_counter) < 0) { \
- FATAL("Inconsistent data in PRE_FREE"); \
- } \
- break; \
- } \
- case PRE_STATUS_MALLOC: { \
- (el_ptr)->pre_status = PRE_STATUS_UNUSED; \
- free((el_ptr)); \
- break; \
- } \
- default: { \
- FATAL("Double Free Detected"); \
- break; \
- } \
- \
- } \
-} while(0);
+#define PRE_FREE(el_ptr, prealloc_counter) \
+ do { \
+ \
+ switch ((el_ptr)->pre_status) { \
+ \
+ case PRE_STATUS_USED: { \
+ \
+ (el_ptr)->pre_status = PRE_STATUS_UNUSED; \
+ (prealloc_counter)--; \
+ if ((prealloc_counter) < 0) { \
+ \
+ FATAL("Inconsistent data in PRE_FREE"); \
+ \
+ } \
+ break; \
+ \
+ } \
+ case PRE_STATUS_MALLOC: { \
+ \
+ (el_ptr)->pre_status = PRE_STATUS_UNUSED; \
+ free((el_ptr)); \
+ break; \
+ \
+ } \
+ default: { \
+ \
+ FATAL("Double Free Detected"); \
+ break; \
+ \
+ } \
+ \
+ } \
+ \
+ } while (0);
#endif
+
diff --git a/include/common.h b/include/common.h
index 780e083b..d794d0ac 100644
--- a/include/common.h
+++ b/include/common.h
@@ -33,8 +33,8 @@
void detect_file_args(char** argv, u8* prog_in, u8 use_stdin);
void check_environment_vars(char** env);
-char** get_qemu_argv(u8* own_loc, u8 **target_path_p, int argc, char **argv);
-char** get_wine_argv(u8* own_loc, u8 **target_path_p, int argc, char **argv);
+char** get_qemu_argv(u8* own_loc, u8** target_path_p, int argc, char** argv);
+char** get_wine_argv(u8* own_loc, u8** target_path_p, int argc, char** argv);
char* get_afl_env(char* env);
/* Get unix time in milliseconds */
@@ -64,3 +64,4 @@ static u64 get_cur_time_us(void) {
}
#endif
+
diff --git a/include/forkserver.h b/include/forkserver.h
index 3587427b..ac027576 100644
--- a/include/forkserver.h
+++ b/include/forkserver.h
@@ -33,36 +33,34 @@ typedef struct afl_forkserver {
/* a program that includes afl-forkserver needs to define these */
- u8 uses_asan; /* Target uses ASAN? */
- u8* trace_bits; /* SHM with instrumentation bitmap */
- u8 use_stdin; /* use stdin for sending data */
+ u8 uses_asan; /* Target uses ASAN? */
+ u8 *trace_bits; /* SHM with instrumentation bitmap */
+ u8 use_stdin; /* use stdin for sending data */
-s32 fsrv_pid, /* PID of the fork server */
- child_pid, /* PID of the fuzzed program */
- out_dir_fd; /* FD of the lock file */
+ s32 fsrv_pid, /* PID of the fork server */
+ child_pid, /* PID of the fuzzed program */
+ out_dir_fd; /* FD of the lock file */
-s32 out_fd, /* Persistent fd for afl->fsrv.out_file */
+ s32 out_fd, /* Persistent fd for afl->fsrv.out_file */
#ifndef HAVE_ARC4RANDOM
- dev_urandom_fd, /* Persistent fd for /dev/urandom */
+ dev_urandom_fd, /* Persistent fd for /dev/urandom */
#endif
- dev_null_fd, /* Persistent fd for /dev/null */
- fsrv_ctl_fd, /* Fork server control pipe (write) */
- fsrv_st_fd; /* Fork server status pipe (read) */
+ dev_null_fd, /* Persistent fd for /dev/null */
+ fsrv_ctl_fd, /* Fork server control pipe (write) */
+ fsrv_st_fd; /* Fork server status pipe (read) */
- u32 exec_tmout; /* Configurable exec timeout (ms) */
- u64 mem_limit; /* Memory cap for child (MB) */
+ u32 exec_tmout; /* Configurable exec timeout (ms) */
+ u64 mem_limit; /* Memory cap for child (MB) */
- u8 *out_file, /* File to fuzz, if any */
- *target_path; /* Path of the target */
+ u8 *out_file, /* File to fuzz, if any */
+ *target_path; /* Path of the target */
- FILE* plot_file; /* Gnuplot output file */
+ FILE *plot_file; /* Gnuplot output file */
- u8 child_timed_out; /* Traced process timed out? */
+ u8 child_timed_out; /* Traced process timed out? */
} afl_forkserver_t;
-
-
void handle_timeout(int sig);
void afl_fsrv_init(afl_forkserver_t *fsrv);
void afl_fsrv_start(afl_forkserver_t *fsrv, char **argv);
diff --git a/include/list.h b/include/list.h
index 7184850f..6f84b12d 100644
--- a/include/list.h
+++ b/include/list.h
@@ -8,21 +8,23 @@
#include "debug.h"
#include "afl-prealloc.h"
-#define LIST_PREALLOC_SIZE (64) /* How many elements to allocate before malloc is needed */
+#define LIST_PREALLOC_SIZE \
+ (64) /* How many elements to allocate before malloc is needed */
typedef struct list_element {
+
PREALLOCABLE;
struct list_element *prev;
struct list_element *next;
- void *data;
+ void * data;
} element_t;
typedef struct list {
element_t element_prealloc_buf[LIST_PREALLOC_SIZE];
- u32 element_prealloc_count;
+ u32 element_prealloc_count;
} list_t;
@@ -52,7 +54,8 @@ static void list_append(list_t *list, void *el) {
}
element_t *el_box = NULL;
- PRE_ALLOC(el_box, list->element_prealloc_buf, LIST_PREALLOC_SIZE, list->element_prealloc_count);
+ PRE_ALLOC(el_box, list->element_prealloc_buf, LIST_PREALLOC_SIZE,
+ list->element_prealloc_count);
if (!el_box) FATAL("failed to allocate list element");
el_box->data = el;
el_box->next = head;
@@ -62,59 +65,75 @@ static void list_append(list_t *list, void *el) {
}
-/* Simple foreach.
+/* Simple foreach.
Pointer to the current element is in `el`,
casted to (a pointer) of the given `type`.
A return from this block will return from calling func.
*/
-#define LIST_FOREACH(list, type, block) do { \
- list_t *li = (list); \
- element_t *head = get_head((li)); \
- element_t *el_box = (head)->next; \
- if (!el_box) \
- FATAL("foreach over uninitialized list");\
- while(el_box != head) { \
- type *el = (type *)((el_box)->data); \
- /* get next so el_box can be unlinked */ \
- element_t *next = el_box->next; \
- {block}; \
- el_box = next; \
- } \
-} while(0);
+#define LIST_FOREACH(list, type, block) \
+ do { \
+ \
+ list_t * li = (list); \
+ element_t *head = get_head((li)); \
+ element_t *el_box = (head)->next; \
+ if (!el_box) FATAL("foreach over uninitialized list"); \
+ while (el_box != head) { \
+ \
+ type *el = (type *)((el_box)->data); \
+ /* get next so el_box can be unlinked */ \
+ element_t *next = el_box->next; \
+ {block}; \
+ el_box = next; \
+ \
+ } \
+ \
+ } while (0);
/* In foreach: remove the current el from the list */
-#define LIST_REMOVE_CURRENT_EL_IN_FOREACH() do { \
- el_box->prev->next = next; \
- el_box->next->prev = el_box->prev; \
- list_free_el(li, el_box); \
-} while(0);
+#define LIST_REMOVE_CURRENT_EL_IN_FOREACH() \
+ do { \
+ \
+ el_box->prev->next = next; \
+ el_box->next->prev = el_box->prev; \
+ list_free_el(li, el_box); \
+ \
+ } while (0);
/* Same as foreach, but will clear list in the process */
-#define LIST_FOREACH_CLEAR(list, type, block) do { \
- LIST_FOREACH((list), type, { \
- {block}; \
- LIST_REMOVE_CURRENT_EL_IN_FOREACH(); \
- }); \
-} while(0);
+#define LIST_FOREACH_CLEAR(list, type, block) \
+ do { \
+ \
+ LIST_FOREACH((list), type, { \
+ \
+ {block}; \
+ LIST_REMOVE_CURRENT_EL_IN_FOREACH(); \
+ \
+ }); \
+ \
+ } while (0);
/* remove an item from the list */
static void list_remove(list_t *list, void *remove_me) {
LIST_FOREACH(list, void, {
+
if (el == remove_me) {
+
el_box->prev->next = el_box->next;
el_box->next->prev = el_box->prev;
el_box->data = NULL;
list_free_el(list, el_box);
return;
+
}
+
});
- FATAL ("List item to be removed not in list");
+ FATAL("List item to be removed not in list");
}
@@ -123,11 +142,13 @@ static void list_remove(list_t *list, void *remove_me) {
static bool list_contains(list_t *list, void *contains_me) {
LIST_FOREACH(list, void, {
+
if (el == contains_me) return true;
+
});
return false;
}
-#endif \ No newline at end of file
+#endif
diff --git a/include/sharedmem.h b/include/sharedmem.h
index b90f7d87..9a7d9082 100644
--- a/include/sharedmem.h
+++ b/include/sharedmem.h
@@ -29,30 +29,30 @@
typedef struct sharedmem {
- //extern unsigned char *trace_bits;
+ // extern unsigned char *trace_bits;
- #ifdef USEMMAP
+#ifdef USEMMAP
/* ================ Proteas ================ */
- int g_shm_fd;
- char g_shm_file_path[L_tmpnam];
- /* ========================================= */
- #else
- s32 shm_id; /* ID of the SHM region */
+ int g_shm_fd;
+ char g_shm_file_path[L_tmpnam];
+/* ========================================= */
+#else
+ s32 shm_id; /* ID of the SHM region */
s32 cmplog_shm_id;
- #endif
+#endif
- u8 *map; /* shared memory region */
+ u8 *map; /* shared memory region */
- size_t size_alloc; /* actual allocated size */
- size_t size_used; /* in use by shmem app */
+ size_t size_alloc; /* actual allocated size */
+ size_t size_used; /* in use by shmem app */
int cmplog_mode;
struct cmp_map *cmp_map;
} sharedmem_t;
-u8 *afl_shm_init(sharedmem_t*, size_t, unsigned char dumb_mode);
-void afl_shm_deinit(sharedmem_t*);
+u8 * afl_shm_init(sharedmem_t *, size_t, unsigned char dumb_mode);
+void afl_shm_deinit(sharedmem_t *);
#endif
diff --git a/src/afl-analyze.c b/src/afl-analyze.c
index 9e64a7a5..30d71298 100644
--- a/src/afl-analyze.c
+++ b/src/afl-analyze.c
@@ -84,7 +84,7 @@ static volatile u8 stop_soon, /* Ctrl-C pressed? */
static u8 qemu_mode;
-static u8 *target_path;
+static u8* target_path;
/* Constants used for describing byte behavior. */
@@ -1014,9 +1014,11 @@ int main(int argc, char** argv, char** envp) {
if (qemu_mode) {
if (use_wine)
- use_argv = get_wine_argv(argv[0], &target_path, argc - optind, argv + optind);
+ use_argv =
+ get_wine_argv(argv[0], &target_path, argc - optind, argv + optind);
else
- use_argv = get_qemu_argv(argv[0], &target_path, argc - optind, argv + optind);
+ use_argv =
+ get_qemu_argv(argv[0], &target_path, argc - optind, argv + optind);
} else
diff --git a/src/afl-common.c b/src/afl-common.c
index fc495b60..06c691b3 100644
--- a/src/afl-common.c
+++ b/src/afl-common.c
@@ -38,7 +38,7 @@
extern u8 be_quiet;
-void detect_file_args(char **argv, u8 *prog_in, u8 use_stdin) {
+void detect_file_args(char** argv, u8* prog_in, u8 use_stdin) {
u32 i = 0;
#ifdef __GLIBC__
@@ -110,7 +110,7 @@ void detect_file_args(char **argv, u8 *prog_in, u8 use_stdin) {
/* Rewrite argv for QEMU. */
-char** get_qemu_argv(u8* own_loc, u8 **target_path_p, int argc, char **argv) {
+char** get_qemu_argv(u8* own_loc, u8** target_path_p, int argc, char** argv) {
char** new_argv = ck_alloc(sizeof(char*) * (argc + 4));
u8 * tmp, *cp = NULL, *rsl, *own_copy;
@@ -168,7 +168,8 @@ char** get_qemu_argv(u8* own_loc, u8 **target_path_p, int argc, char **argv) {
SAYF("\n" cLRD "[-] " cRST
"Oops, unable to find the 'afl-qemu-trace' binary. The binary must be "
"built\n"
- " separately by following the instructions in afl->qemu_mode/README.md. "
+ " separately by following the instructions in "
+ "afl->qemu_mode/README.md. "
"If you\n"
" already have the binary installed, you may need to specify "
"AFL_PATH in the\n"
@@ -187,7 +188,7 @@ char** get_qemu_argv(u8* own_loc, u8 **target_path_p, int argc, char **argv) {
/* Rewrite argv for Wine+QEMU. */
-char** get_wine_argv(u8* own_loc, u8 **target_path_p, int argc, char **argv) {
+char** get_wine_argv(u8* own_loc, u8** target_path_p, int argc, char** argv) {
char** new_argv = ck_alloc(sizeof(char*) * (argc + 3));
u8 * tmp, *cp = NULL, *rsl, *own_copy;
@@ -264,7 +265,8 @@ char** get_wine_argv(u8* own_loc, u8 **target_path_p, int argc, char **argv) {
SAYF("\n" cLRD "[-] " cRST
"Oops, unable to find the '%s' binary. The binary must be "
"built\n"
- " separately by following the instructions in afl->qemu_mode/README.md. "
+ " separately by following the instructions in "
+ "afl->qemu_mode/README.md. "
"If you\n"
" already have the binary installed, you may need to specify "
"AFL_PATH in the\n"
@@ -329,3 +331,4 @@ char* get_afl_env(char* env) {
return val;
}
+
diff --git a/src/afl-forkserver.c b/src/afl-forkserver.c
index 7edcde5e..f7b84248 100644
--- a/src/afl-forkserver.c
+++ b/src/afl-forkserver.c
@@ -114,7 +114,7 @@ void handle_timeout(int sig) {
LIST_FOREACH(&fsrv_list, afl_forkserver_t, {
- //TODO: We need a proper timer to handle multiple timeouts
+ // TODO: We need a proper timer to handle multiple timeouts
if (el->child_pid > 0) {
el->child_timed_out = 1;
@@ -465,8 +465,11 @@ void afl_fsrv_start(afl_forkserver_t *fsrv, char **argv) {
void afl_fsrv_killall() {
LIST_FOREACH(&fsrv_list, afl_forkserver_t, {
+
if (el->child_pid > 0) kill(el->child_pid, SIGKILL);
+
});
+
}
void afl_fsrv_deinit(afl_forkserver_t *fsrv) {
@@ -474,3 +477,4 @@ void afl_fsrv_deinit(afl_forkserver_t *fsrv) {
list_remove(&fsrv_list, fsrv);
}
+
diff --git a/src/afl-fuzz-bitmap.c b/src/afl-fuzz-bitmap.c
index 4fba7810..47040fb8 100644
--- a/src/afl-fuzz-bitmap.c
+++ b/src/afl-fuzz-bitmap.c
@@ -29,7 +29,7 @@
-B option, to focus a separate fuzzing session on a particular
interesting input without rediscovering all the others. */
-void write_bitmap(afl_state_t *afl) {
+void write_bitmap(afl_state_t* afl) {
u8* fname;
s32 fd;
@@ -51,7 +51,7 @@ void write_bitmap(afl_state_t *afl) {
/* Read bitmap from file. This is for the -B option again. */
-void read_bitmap(afl_state_t *afl, u8* fname) {
+void read_bitmap(afl_state_t* afl, u8* fname) {
s32 fd = open(fname, O_RDONLY);
@@ -71,7 +71,7 @@ void read_bitmap(afl_state_t *afl, u8* fname) {
This function is called after every exec() on a fairly large buffer, so
it needs to be fast. We do this in 32-bit and 64-bit flavors. */
-u8 has_new_bits(afl_state_t *afl, u8* virgin_map) {
+u8 has_new_bits(afl_state_t* afl, u8* virgin_map) {
#ifdef WORD_SIZE_64
@@ -415,9 +415,9 @@ void minimize_bits(u8* dst, u8* src) {
/* Construct a file name for a new test case, capturing the operation
that led to its discovery. Uses a static buffer. */
-u8* describe_op(afl_state_t *afl, u8 hnb) {
+u8* describe_op(afl_state_t* afl, u8 hnb) {
- u8 *ret = afl->describe_op_buf_256;
+ u8* ret = afl->describe_op_buf_256;
if (afl->syncing_party) {
@@ -429,7 +429,8 @@ u8* describe_op(afl_state_t *afl, u8 hnb) {
sprintf(ret + strlen(ret), ",time:%llu", get_cur_time() - afl->start_time);
- if (afl->splicing_with >= 0) sprintf(ret + strlen(ret), "+%06d", afl->splicing_with);
+ if (afl->splicing_with >= 0)
+ sprintf(ret + strlen(ret), "+%06d", afl->splicing_with);
sprintf(ret + strlen(ret), ",op:%s", afl->stage_short);
@@ -439,7 +440,8 @@ u8* describe_op(afl_state_t *afl, u8 hnb) {
if (afl->stage_val_type != STAGE_VAL_NONE)
sprintf(ret + strlen(ret), ",val:%s%+d",
- (afl->stage_val_type == STAGE_VAL_BE) ? "be:" : "", afl->stage_cur_val);
+ (afl->stage_val_type == STAGE_VAL_BE) ? "be:" : "",
+ afl->stage_cur_val);
} else
@@ -457,7 +459,7 @@ u8* describe_op(afl_state_t *afl, u8 hnb) {
/* Write a message accompanying the crash directory :-) */
-static void write_crash_readme(afl_state_t *afl) {
+static void write_crash_readme(afl_state_t* afl) {
u8* fn = alloc_printf("%s/crashes/README.txt", afl->out_dir);
s32 fd;
@@ -499,7 +501,7 @@ static void write_crash_readme(afl_state_t *afl) {
" https://github.com/vanhauser-thc/AFLplusplus\n\n",
- afl->orig_cmdline, DMS(afl->fsrv.mem_limit << 20)); /* ignore errors */
+ afl->orig_cmdline, DMS(afl->fsrv.mem_limit << 20)); /* ignore errors */
fclose(f);
@@ -509,7 +511,7 @@ static void write_crash_readme(afl_state_t *afl) {
save or queue the input test case for further analysis if so. Returns 1 if
entry is saved, 0 otherwise. */
-u8 save_if_interesting(afl_state_t *afl, void* mem, u32 len, u8 fault) {
+u8 save_if_interesting(afl_state_t* afl, void* mem, u32 len, u8 fault) {
if (len == 0) return 0;
@@ -634,8 +636,8 @@ u8 save_if_interesting(afl_state_t *afl, void* mem, u32 len, u8 fault) {
#ifndef SIMPLE_FILES
- fn = alloc_printf("%s/hangs/id:%06llu,%s", afl->out_dir, afl->unique_hangs,
- describe_op(afl, 0));
+ fn = alloc_printf("%s/hangs/id:%06llu,%s", afl->out_dir,
+ afl->unique_hangs, describe_op(afl, 0));
#else
@@ -678,19 +680,21 @@ u8 save_if_interesting(afl_state_t *afl, void* mem, u32 len, u8 fault) {
#ifndef SIMPLE_FILES
fn = alloc_printf("%s/crashes/id:%06llu,sig:%02u,%s", afl->out_dir,
- afl->unique_crashes, afl->kill_signal, describe_op(afl, 0));
+ afl->unique_crashes, afl->kill_signal,
+ describe_op(afl, 0));
#else
- fn = alloc_printf("%s/crashes/id_%06llu_%02u", afl->out_dir, afl->unique_crashes,
- afl->kill_signal);
+ fn = alloc_printf("%s/crashes/id_%06llu_%02u", afl->out_dir,
+ afl->unique_crashes, afl->kill_signal);
#endif /* ^!SIMPLE_FILES */
++afl->unique_crashes;
- if (afl->infoexec) { // if the user wants to be informed on new crashes - do
+ if (afl->infoexec) { // if the user wants to be informed on new crashes -
+ // do
#if !TARGET_OS_IPHONE
- // that
+ // that
if (system(afl->infoexec) == -1)
hnb += 0; // we dont care if system errors, but we dont want a
// compiler warning either
diff --git a/src/afl-fuzz-cmplog.c b/src/afl-fuzz-cmplog.c
index 08c48fc4..3749330b 100644
--- a/src/afl-fuzz-cmplog.c
+++ b/src/afl-fuzz-cmplog.c
@@ -81,8 +81,9 @@ void init_cmplog_forkserver(afl_state_t *afl) {
// r.rlim_max = r.rlim_cur = 0;
// setrlimit(RLIMIT_CORE, &r); /* Ignore errors */
- /* Isolate the process and configure standard descriptors. If afl->fsrv.out_file is
- specified, stdin is /dev/null; otherwise, afl->fsrv.out_fd is cloned instead. */
+ /* Isolate the process and configure standard descriptors. If
+ afl->fsrv.out_file is specified, stdin is /dev/null; otherwise,
+ afl->fsrv.out_fd is cloned instead. */
setsid();
@@ -156,7 +157,7 @@ void init_cmplog_forkserver(afl_state_t *afl) {
/* Use a distinctive bitmap signature to tell the parent about execv()
falling through. */
- *(u32*)afl->fsrv.trace_bits = EXEC_FAIL_SIG;
+ *(u32 *)afl->fsrv.trace_bits = EXEC_FAIL_SIG;
exit(0);
}
@@ -176,7 +177,8 @@ void init_cmplog_forkserver(afl_state_t *afl) {
if (afl->fsrv.exec_tmout) {
it.it_value.tv_sec = ((afl->fsrv.exec_tmout * FORK_WAIT_MULT) / 1000);
- it.it_value.tv_usec = ((afl->fsrv.exec_tmout * FORK_WAIT_MULT) % 1000) * 1000;
+ it.it_value.tv_usec =
+ ((afl->fsrv.exec_tmout * FORK_WAIT_MULT) % 1000) * 1000;
}
@@ -204,11 +206,13 @@ void init_cmplog_forkserver(afl_state_t *afl) {
"Timeout while initializing cmplog fork server (adjusting -t may "
"help)");
- if (waitpid(afl->cmplog_fsrv_pid, &status, 0) <= 0) PFATAL("waitpid() failed");
+ if (waitpid(afl->cmplog_fsrv_pid, &status, 0) <= 0)
+ PFATAL("waitpid() failed");
if (WIFSIGNALED(status)) {
- if (afl->fsrv.mem_limit && afl->fsrv.mem_limit < 500 && afl->fsrv.uses_asan) {
+ if (afl->fsrv.mem_limit && afl->fsrv.mem_limit < 500 &&
+ afl->fsrv.uses_asan) {
SAYF("\n" cLRD "[-] " cRST
"Whoops, the target binary crashed suddenly, "
@@ -281,7 +285,7 @@ void init_cmplog_forkserver(afl_state_t *afl) {
}
- if (*(u32*)afl->fsrv.trace_bits == EXEC_FAIL_SIG)
+ if (*(u32 *)afl->fsrv.trace_bits == EXEC_FAIL_SIG)
FATAL("Unable to execute target application ('%s')", afl->argv[0]);
if (afl->fsrv.mem_limit && afl->fsrv.mem_limit < 500 && afl->fsrv.uses_asan) {
@@ -400,8 +404,9 @@ u8 run_cmplog_target(afl_state_t *afl, u32 timeout) {
setrlimit(RLIMIT_CORE, &r); /* Ignore errors */
- /* Isolate the process and configure standard descriptors. If afl->fsrv.out_file is
- specified, stdin is /dev/null; otherwise, afl->fsrv.out_fd is cloned instead. */
+ /* Isolate the process and configure standard descriptors. If
+ afl->fsrv.out_file is specified, stdin is /dev/null; otherwise,
+ afl->fsrv.out_fd is cloned instead. */
setsid();
@@ -449,7 +454,7 @@ u8 run_cmplog_target(afl_state_t *afl, u32 timeout) {
/* Use a distinctive bitmap value to tell the parent about execv()
falling through. */
- *(u32*)afl->fsrv.trace_bits = EXEC_FAIL_SIG;
+ *(u32 *)afl->fsrv.trace_bits = EXEC_FAIL_SIG;
exit(0);
}
@@ -495,7 +500,8 @@ u8 run_cmplog_target(afl_state_t *afl, u32 timeout) {
if (afl->dumb_mode == 1 || afl->no_forkserver) {
- if (waitpid(afl->cmplog_child_pid, &status, 0) <= 0) PFATAL("waitpid() failed");
+ if (waitpid(afl->cmplog_child_pid, &status, 0) <= 0)
+ PFATAL("waitpid() failed");
} else {
@@ -540,17 +546,17 @@ u8 run_cmplog_target(afl_state_t *afl, u32 timeout) {
++afl->total_execs;
/* Any subsequent operations on afl->fsrv.trace_bits must not be moved by the
- compiler below this point. Past this location, afl->fsrv.trace_bits[] behave
- very normally and do not have to be treated as volatile. */
+ compiler below this point. Past this location, afl->fsrv.trace_bits[]
+ behave very normally and do not have to be treated as volatile. */
MEM_BARRIER();
- tb4 = *(u32*)afl->fsrv.trace_bits;
+ tb4 = *(u32 *)afl->fsrv.trace_bits;
#ifdef WORD_SIZE_64
- classify_counts((u64*)afl->fsrv.trace_bits);
+ classify_counts((u64 *)afl->fsrv.trace_bits);
#else
- classify_counts((u32*)afl->fsrv.trace_bits);
+ classify_counts((u32 *)afl->fsrv.trace_bits);
#endif /* ^WORD_SIZE_64 */
prev_timed_out = afl->fsrv.child_timed_out;
@@ -561,7 +567,8 @@ u8 run_cmplog_target(afl_state_t *afl, u32 timeout) {
afl->kill_signal = WTERMSIG(status);
- if (afl->fsrv.child_timed_out && afl->kill_signal == SIGKILL) return FAULT_TMOUT;
+ if (afl->fsrv.child_timed_out && afl->kill_signal == SIGKILL)
+ return FAULT_TMOUT;
return FAULT_CRASH;
@@ -584,7 +591,7 @@ u8 run_cmplog_target(afl_state_t *afl, u32 timeout) {
}
-u8 common_fuzz_cmplog_stuff(afl_state_t *afl, u8* out_buf, u32 len) {
+u8 common_fuzz_cmplog_stuff(afl_state_t *afl, u8 *out_buf, u32 len) {
u8 fault;
@@ -627,10 +634,11 @@ u8 common_fuzz_cmplog_stuff(afl_state_t *afl, u8* out_buf, u32 len) {
/* This handles FAULT_ERROR for us: */
- /* afl->queued_discovered += save_if_interesting(afl, argv, out_buf, len, fault);
+ /* afl->queued_discovered += save_if_interesting(afl, argv, out_buf, len,
+ fault);
- if (!(afl->stage_cur % afl->stats_update_freq) || afl->stage_cur + 1 == afl->stage_max)
- show_stats(afl); */
+ if (!(afl->stage_cur % afl->stats_update_freq) || afl->stage_cur + 1 ==
+ afl->stage_max) show_stats(afl); */
return 0;
diff --git a/src/afl-fuzz-extras.c b/src/afl-fuzz-extras.c
index 8c8e085e..ce7e5780 100644
--- a/src/afl-fuzz-extras.c
+++ b/src/afl-fuzz-extras.c
@@ -45,7 +45,8 @@ static int compare_extras_use_d(const void* p1, const void* p2) {
/* Read extras from a file, sort by size. */
-void load_extras_file(afl_state_t *afl, u8* fname, u32* min_len, u32* max_len, u32 dict_level) {
+void load_extras_file(afl_state_t* afl, u8* fname, u32* min_len, u32* max_len,
+ u32 dict_level) {
FILE* f;
u8 buf[MAX_LINE];
@@ -120,8 +121,8 @@ void load_extras_file(afl_state_t *afl, u8* fname, u32* min_len, u32* max_len, u
/* Okay, let's allocate memory and copy data between "...", handling
\xNN escaping, \\, and \". */
- afl->extras =
- ck_realloc_block(afl->extras, (afl->extras_cnt + 1) * sizeof(struct extra_data));
+ afl->extras = ck_realloc_block(
+ afl->extras, (afl->extras_cnt + 1) * sizeof(struct extra_data));
wptr = afl->extras[afl->extras_cnt].data = ck_alloc(rptr - lptr);
@@ -183,7 +184,7 @@ void load_extras_file(afl_state_t *afl, u8* fname, u32* min_len, u32* max_len, u
/* Read extras from the extras directory and sort them by size. */
-void load_extras(afl_state_t *afl, u8* dir) {
+void load_extras(afl_state_t* afl, u8* dir) {
DIR* d;
struct dirent* de;
@@ -241,8 +242,8 @@ void load_extras(afl_state_t *afl, u8* dir) {
if (min_len > st.st_size) min_len = st.st_size;
if (max_len < st.st_size) max_len = st.st_size;
- afl->extras =
- ck_realloc_block(afl->extras, (afl->extras_cnt + 1) * sizeof(struct extra_data));
+ afl->extras = ck_realloc_block(
+ afl->extras, (afl->extras_cnt + 1) * sizeof(struct extra_data));
afl->extras[afl->extras_cnt].data = ck_alloc(st.st_size);
afl->extras[afl->extras_cnt].len = st.st_size;
@@ -266,10 +267,11 @@ check_and_sort:
if (!afl->extras_cnt) FATAL("No usable files in '%s'", dir);
- qsort(afl->extras, afl->extras_cnt, sizeof(struct extra_data), compare_extras_len);
+ qsort(afl->extras, afl->extras_cnt, sizeof(struct extra_data),
+ compare_extras_len);
- OKF("Loaded %u extra tokens, size range %s to %s.", afl->extras_cnt, DMS(min_len),
- DMS(max_len));
+ OKF("Loaded %u extra tokens, size range %s to %s.", afl->extras_cnt,
+ DMS(min_len), DMS(max_len));
if (max_len > 32)
WARNF("Some tokens are relatively large (%s) - consider trimming.",
@@ -293,7 +295,7 @@ static inline u8 memcmp_nocase(u8* m1, u8* m2, u32 len) {
/* Maybe add automatic extra. */
-void maybe_add_auto(afl_state_t *afl, u8* mem, u32 len) {
+void maybe_add_auto(afl_state_t* afl, u8* mem, u32 len) {
u32 i;
@@ -349,7 +351,8 @@ void maybe_add_auto(afl_state_t *afl, u8* mem, u32 len) {
for (i = 0; i < afl->a_extras_cnt; ++i) {
- if (afl->a_extras[i].len == len && !memcmp_nocase(afl->a_extras[i].data, mem, len)) {
+ if (afl->a_extras[i].len == len &&
+ !memcmp_nocase(afl->a_extras[i].data, mem, len)) {
afl->a_extras[i].hit_cnt++;
goto sort_a_extras;
@@ -364,8 +367,8 @@ void maybe_add_auto(afl_state_t *afl, u8* mem, u32 len) {
if (afl->a_extras_cnt < MAX_AUTO_EXTRAS) {
- afl->a_extras = ck_realloc_block(afl->a_extras,
- (afl->a_extras_cnt + 1) * sizeof(struct extra_data));
+ afl->a_extras = ck_realloc_block(
+ afl->a_extras, (afl->a_extras_cnt + 1) * sizeof(struct extra_data));
afl->a_extras[afl->a_extras_cnt].data = ck_memdup(mem, len);
afl->a_extras[afl->a_extras_cnt].len = len;
@@ -392,14 +395,14 @@ sort_a_extras:
/* Then, sort the top USE_AUTO_EXTRAS entries by size. */
- qsort(afl->a_extras, MIN(USE_AUTO_EXTRAS, afl->a_extras_cnt), sizeof(struct extra_data),
- compare_extras_len);
+ qsort(afl->a_extras, MIN(USE_AUTO_EXTRAS, afl->a_extras_cnt),
+ sizeof(struct extra_data), compare_extras_len);
}
/* Save automatically generated extras. */
-void save_auto(afl_state_t *afl) {
+void save_auto(afl_state_t* afl) {
u32 i;
@@ -408,7 +411,8 @@ void save_auto(afl_state_t *afl) {
for (i = 0; i < MIN(USE_AUTO_EXTRAS, afl->a_extras_cnt); ++i) {
- u8* fn = alloc_printf("%s/queue/.state/auto_extras/auto_%06u", afl->out_dir, i);
+ u8* fn =
+ alloc_printf("%s/queue/.state/auto_extras/auto_%06u", afl->out_dir, i);
s32 fd;
fd = open(fn, O_WRONLY | O_CREAT | O_TRUNC, 0600);
@@ -426,7 +430,7 @@ void save_auto(afl_state_t *afl) {
/* Load automatically generated extras. */
-void load_auto(afl_state_t *afl) {
+void load_auto(afl_state_t* afl) {
u32 i;
@@ -470,7 +474,7 @@ void load_auto(afl_state_t *afl) {
/* Destroy extras. */
-void destroy_extras(afl_state_t *afl) {
+void destroy_extras(afl_state_t* afl) {
u32 i;
diff --git a/src/afl-fuzz-globals.c b/src/afl-fuzz-globals.c
index 004af2b4..83f9d912 100644
--- a/src/afl-fuzz-globals.c
+++ b/src/afl-fuzz-globals.c
@@ -32,11 +32,11 @@ s32 interesting_32[] = {INTERESTING_8, INTERESTING_16, INTERESTING_32};
char *power_names[POWER_SCHEDULES_NUM] = {"explore", "fast", "coe",
"lin", "quad", "exploit"};
-u8 *doc_path = NULL; /* gath to documentation dir */
+u8 *doc_path = NULL; /* gath to documentation dir */
/* Initialize MOpt "globals" for this afl state */
-static void init_mopt_globals(afl_state_t *afl){
+static void init_mopt_globals(afl_state_t *afl) {
MOpt_globals_t *core = &afl->mopt_globals_pilot;
core->finds = afl->core_operator_finds_puppet;
@@ -68,53 +68,54 @@ static void init_mopt_globals(afl_state_t *afl){
}
-/* A global pointer to all instances is needed (for now) for signals to arrive */
+/* A global pointer to all instances is needed (for now) for signals to arrive
+ */
list_t afl_states = {0};
/* Initializes an afl_state_t. */
void afl_state_init(afl_state_t *afl) {
-
- afl->w_init = 0.9;
- afl->w_end = 0.3;
- afl->g_max = 5000;
- afl->period_pilot_tmp = 5000.0;
- afl->schedule = EXPLORE; /* Power schedule (default: EXPLORE)*/
- afl->havoc_max_mult = HAVOC_MAX_MULT;
-
- afl->clear_screen = 1; /* Window resized? */
- afl->havoc_div = 1; /* Cycle count divisor for havoc */
- afl->stage_name = "init"; /* Name of the current fuzz stage */
- afl->splicing_with = -1; /* Splicing with which test case? */
+
+ afl->w_init = 0.9;
+ afl->w_end = 0.3;
+ afl->g_max = 5000;
+ afl->period_pilot_tmp = 5000.0;
+ afl->schedule = EXPLORE; /* Power schedule (default: EXPLORE)*/
+ afl->havoc_max_mult = HAVOC_MAX_MULT;
+
+ afl->clear_screen = 1; /* Window resized? */
+ afl->havoc_div = 1; /* Cycle count divisor for havoc */
+ afl->stage_name = "init"; /* Name of the current fuzz stage */
+ afl->splicing_with = -1; /* Splicing with which test case? */
#ifdef HAVE_AFFINITY
- afl->cpu_aff = -1; /* Selected CPU core */
-#endif /* HAVE_AFFINITY */
+ afl->cpu_aff = -1; /* Selected CPU core */
+#endif /* HAVE_AFFINITY */
- afl->fsrv.use_stdin = 1;
+ afl->fsrv.use_stdin = 1;
- afl->cal_cycles = CAL_CYCLES;
- afl->cal_cycles_long = CAL_CYCLES_LONG;
+ afl->cal_cycles = CAL_CYCLES;
+ afl->cal_cycles_long = CAL_CYCLES_LONG;
- afl->fsrv.exec_tmout = EXEC_TIMEOUT;
- afl->hang_tmout = EXEC_TIMEOUT;
+ afl->fsrv.exec_tmout = EXEC_TIMEOUT;
+ afl->hang_tmout = EXEC_TIMEOUT;
- afl->fsrv.mem_limit = MEM_LIMIT;
+ afl->fsrv.mem_limit = MEM_LIMIT;
- afl->stats_update_freq = 1;
+ afl->stats_update_freq = 1;
#ifndef HAVE_ARC4RANDOM
- afl->fsrv.dev_urandom_fd = -1;
+ afl->fsrv.dev_urandom_fd = -1;
#endif
- afl->fsrv.dev_null_fd = -1;
+ afl->fsrv.dev_null_fd = -1;
- afl->fsrv.child_pid = -1;
- afl->fsrv.out_dir_fd = -1;
+ afl->fsrv.child_pid = -1;
+ afl->fsrv.out_dir_fd = -1;
- init_mopt_globals(afl);
+ init_mopt_globals(afl);
- list_append(&afl_states, afl);
+ list_append(&afl_states, afl);
}
@@ -122,6 +123,6 @@ void afl_state_init(afl_state_t *afl) {
void afl_state_deinit(afl_state_t *afl) {
- list_remove(&afl_states, afl);
+ list_remove(&afl_states, afl);
-} \ No newline at end of file
+}
diff --git a/src/afl-fuzz-init.c b/src/afl-fuzz-init.c
index 6cd0cefa..427cea04 100644
--- a/src/afl-fuzz-init.c
+++ b/src/afl-fuzz-init.c
@@ -30,7 +30,7 @@
/* Build a list of processes bound to specific cores. Returns -1 if nothing
can be found. Assumes an upper bound of 4k CPUs. */
-void bind_to_free_cpu(afl_state_t *afl) {
+void bind_to_free_cpu(afl_state_t* afl) {
#if defined(__linux__) || defined(__FreeBSD__) || defined(__DragonFly__)
cpu_set_t c;
@@ -272,7 +272,7 @@ cpuset_destroy(c);
/* Load postprocessor, if available. */
-void setup_post(afl_state_t *afl) {
+void setup_post(afl_state_t* afl) {
void* dh;
u8* fn = get_afl_env("AFL_POST_LIBRARY");
@@ -298,7 +298,7 @@ void setup_post(afl_state_t *afl) {
/* Shuffle an array of pointers. Might be slightly biased. */
-static void shuffle_ptrs(afl_state_t *afl, void** ptrs, u32 cnt) {
+static void shuffle_ptrs(afl_state_t* afl, void** ptrs, u32 cnt) {
u32 i;
@@ -316,7 +316,7 @@ static void shuffle_ptrs(afl_state_t *afl, void** ptrs, u32 cnt) {
/* Read all testcases from the input directory, then queue them for testing.
Called at startup. */
-void read_testcases(afl_state_t *afl) {
+void read_testcases(afl_state_t* afl) {
struct dirent** nl;
s32 nl_cnt;
@@ -368,8 +368,8 @@ void read_testcases(afl_state_t *afl) {
struct stat st;
u8* fn2 = alloc_printf("%s/%s", afl->in_dir, nl[i]->d_name);
- u8* dfn =
- alloc_printf("%s/.state/deterministic_done/%s", afl->in_dir, nl[i]->d_name);
+ u8* dfn = alloc_printf("%s/.state/deterministic_done/%s", afl->in_dir,
+ nl[i]->d_name);
u8 passed_det = 0;
@@ -428,7 +428,7 @@ void read_testcases(afl_state_t *afl) {
/* Examine map coverage. Called once, for first test case. */
-static void check_map_coverage(afl_state_t *afl) {
+static void check_map_coverage(afl_state_t* afl) {
u32 i;
@@ -444,7 +444,7 @@ static void check_map_coverage(afl_state_t *afl) {
/* Perform dry run of all test cases to confirm that the app is working as
expected. This is done only for the initial inputs, and only once. */
-void perform_dry_run(afl_state_t *afl) {
+void perform_dry_run(afl_state_t* afl) {
struct queue_entry* q = afl->queue;
u32 cal_failures = 0;
@@ -493,9 +493,9 @@ void perform_dry_run(afl_state_t *afl) {
if (afl->timeout_given) {
- /* The -t nn+ syntax in the command line sets afl->timeout_given to '2' and
- instructs afl-fuzz to tolerate but skip queue entries that time
- out. */
+ /* The -t nn+ syntax in the command line sets afl->timeout_given to
+ '2' and instructs afl-fuzz to tolerate but skip queue entries that
+ time out. */
if (afl->timeout_given > 1) {
@@ -593,7 +593,8 @@ void perform_dry_run(afl_state_t *afl) {
"other options\n"
" fail, poke <afl-users@googlegroups.com> for "
"troubleshooting tips.\n",
- DMS(afl->fsrv.mem_limit << 20), afl->fsrv.mem_limit - 1, doc_path);
+ DMS(afl->fsrv.mem_limit << 20), afl->fsrv.mem_limit - 1,
+ doc_path);
} else {
@@ -702,7 +703,7 @@ static void link_or_copy(u8* old_path, u8* new_path) {
/* Create hard links for input test cases in the output directory, choosing
good names and pivoting accordingly. */
-void pivot_inputs(afl_state_t *afl) {
+void pivot_inputs(afl_state_t* afl) {
struct queue_entry* q = afl->queue;
u32 id = 0;
@@ -794,7 +795,7 @@ void pivot_inputs(afl_state_t *afl) {
/* When resuming, try to find the queue position to start from. This makes sense
only when resuming, and when we can find the original fuzzer_stats. */
-u32 find_start_position(afl_state_t *afl) {
+u32 find_start_position(afl_state_t* afl) {
static u8 tmp[4096]; /* Ought to be enough for anybody. */
@@ -831,7 +832,7 @@ u32 find_start_position(afl_state_t *afl) {
-t given, we don't want to keep auto-scaling the timeout over and over
again to prevent it from growing due to random flukes. */
-void find_timeout(afl_state_t *afl) {
+void find_timeout(afl_state_t* afl) {
static u8 tmp[4096]; /* Ought to be enough for anybody. */
@@ -953,7 +954,7 @@ double get_runnable_processes(void) {
/* Delete the temporary directory used for in-place session resume. */
-void nuke_resume_dir(afl_state_t *afl) {
+void nuke_resume_dir(afl_state_t* afl) {
u8* fn;
@@ -993,7 +994,7 @@ dir_cleanup_failed:
is not currently running, and if the last run time isn't too great.
Resume fuzzing if `-` is set as in_dir or if AFL_AUTORESUME is set */
-static void handle_existing_out_dir(afl_state_t *afl) {
+static void handle_existing_out_dir(afl_state_t* afl) {
FILE* f;
u8* fn = alloc_printf("%s/fuzzer_stats", afl->out_dir);
@@ -1048,7 +1049,8 @@ static void handle_existing_out_dir(afl_state_t *afl) {
/* Let's see how much work is at stake. */
- if (!afl->in_place_resume && last_update - start_time2 > OUTPUT_GRACE * 60) {
+ if (!afl->in_place_resume &&
+ last_update - start_time2 > OUTPUT_GRACE * 60) {
SAYF("\n" cLRD "[-] " cRST
"The job output directory already exists and contains the results "
@@ -1086,7 +1088,7 @@ static void handle_existing_out_dir(afl_state_t *afl) {
afl->in_dir = alloc_printf("%s/_resume", afl->out_dir);
- rename(orig_q, afl->in_dir); /* Ignore errors */
+ rename(orig_q, afl->in_dir); /* Ignore errors */
OKF("Output directory exists, will attempt session resume.");
@@ -1140,7 +1142,8 @@ static void handle_existing_out_dir(afl_state_t *afl) {
if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed;
ck_free(fn);
- /* All right, let's do <afl->out_dir>/crashes/id:* and <afl->out_dir>/hangs/id:*. */
+ /* All right, let's do <afl->out_dir>/crashes/id:* and
+ * <afl->out_dir>/hangs/id:*. */
if (!afl->in_place_resume) {
@@ -1275,7 +1278,7 @@ dir_cleanup_failed:
/* Prepare output directories and fds. */
-void setup_dirs_fds(afl_state_t *afl) {
+void setup_dirs_fds(afl_state_t* afl) {
u8* tmp;
s32 fd;
@@ -1300,7 +1303,8 @@ void setup_dirs_fds(afl_state_t *afl) {
#ifndef __sun
- if (afl->fsrv.out_dir_fd < 0 || flock(afl->fsrv.out_dir_fd, LOCK_EX | LOCK_NB))
+ if (afl->fsrv.out_dir_fd < 0 ||
+ flock(afl->fsrv.out_dir_fd, LOCK_EX | LOCK_NB))
PFATAL("Unable to flock() output directory.");
#endif /* !__sun */
@@ -1398,7 +1402,7 @@ void setup_dirs_fds(afl_state_t *afl) {
}
-void setup_cmdline_file(afl_state_t *afl, char **argv) {
+void setup_cmdline_file(afl_state_t* afl, char** argv) {
u8* tmp;
s32 fd;
@@ -1428,7 +1432,7 @@ void setup_cmdline_file(afl_state_t *afl, char **argv) {
/* Setup the output file for fuzzed data, if not using -f. */
-void setup_stdio_file(afl_state_t *afl) {
+void setup_stdio_file(afl_state_t* afl) {
u8* fn;
if (afl->file_extension) {
@@ -1527,7 +1531,7 @@ void check_crash_handling(void) {
/* Check CPU governor. */
-void check_cpu_governor(afl_state_t *afl) {
+void check_cpu_governor(afl_state_t* afl) {
#ifdef __linux__
FILE* f;
@@ -1537,8 +1541,8 @@ void check_cpu_governor(afl_state_t *afl) {
if (get_afl_env("AFL_SKIP_CPUFREQ")) return;
if (afl->cpu_aff > 0)
- snprintf(tmp, sizeof(tmp), "%s%d%s", "/sys/devices/system/cpu/cpu", afl->cpu_aff,
- "/cpufreq/scaling_governor");
+ snprintf(tmp, sizeof(tmp), "%s%d%s", "/sys/devices/system/cpu/cpu",
+ afl->cpu_aff, "/cpufreq/scaling_governor");
else
snprintf(tmp, sizeof(tmp), "%s",
"/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor");
@@ -1650,7 +1654,7 @@ void check_cpu_governor(afl_state_t *afl) {
/* Count the number of logical CPU cores. */
-void get_core_count(afl_state_t *afl) {
+void get_core_count(afl_state_t* afl) {
#if defined(__APPLE__) || defined(__FreeBSD__) || defined(__OpenBSD__) || \
defined(__DragonFly__)
@@ -1661,7 +1665,8 @@ void get_core_count(afl_state_t *afl) {
#ifdef __APPLE__
- if (sysctlbyname("hw.logicalcpu", &afl->cpu_core_count, &s, NULL, 0) < 0) return;
+ if (sysctlbyname("hw.logicalcpu", &afl->cpu_core_count, &s, NULL, 0) < 0)
+ return;
#else
@@ -1737,7 +1742,7 @@ void get_core_count(afl_state_t *afl) {
/* Validate and fix up afl->out_dir and sync_dir when using -S. */
-void fix_up_sync(afl_state_t *afl) {
+void fix_up_sync(afl_state_t* afl) {
u8* x = afl->sync_id;
@@ -1844,7 +1849,7 @@ static void handle_skipreq(int sig) {
isn't a shell script - a common and painful mistake. We also check for
a valid ELF header and for evidence of AFL instrumentation. */
-void check_binary(afl_state_t *afl, u8* fname) {
+void check_binary(afl_state_t* afl, u8* fname) {
u8* env_path = 0;
struct stat st;
@@ -1896,7 +1901,8 @@ void check_binary(afl_state_t *afl, u8* fname) {
}
- if (!afl->fsrv.target_path) FATAL("Program '%s' not found or not executable", fname);
+ if (!afl->fsrv.target_path)
+ FATAL("Program '%s' not found or not executable", fname);
}
@@ -1904,8 +1910,10 @@ void check_binary(afl_state_t *afl, u8* fname) {
/* Check for blatant user errors. */
- if ((!strncmp(afl->fsrv.target_path, "/tmp/", 5) && !strchr(afl->fsrv.target_path + 5, '/')) ||
- (!strncmp(afl->fsrv.target_path, "/var/tmp/", 9) && !strchr(afl->fsrv.target_path + 9, '/')))
+ if ((!strncmp(afl->fsrv.target_path, "/tmp/", 5) &&
+ !strchr(afl->fsrv.target_path + 5, '/')) ||
+ (!strncmp(afl->fsrv.target_path, "/var/tmp/", 9) &&
+ !strchr(afl->fsrv.target_path + 9, '/')))
FATAL("Please don't keep binaries in /tmp or /var/tmp");
fd = open(afl->fsrv.target_path, O_RDONLY);
@@ -1914,7 +1922,8 @@ void check_binary(afl_state_t *afl, u8* fname) {
f_data = mmap(0, f_len, PROT_READ, MAP_PRIVATE, fd, 0);
- if (f_data == MAP_FAILED) PFATAL("Unable to mmap file '%s'", afl->fsrv.target_path);
+ if (f_data == MAP_FAILED)
+ PFATAL("Unable to mmap file '%s'", afl->fsrv.target_path);
close(fd);
@@ -2033,7 +2042,7 @@ void check_binary(afl_state_t *afl, u8* fname) {
/* Trim and possibly create a banner for the run. */
-void fix_up_banner(afl_state_t *afl, u8* name) {
+void fix_up_banner(afl_state_t* afl, u8* name) {
if (!afl->use_banner) {
@@ -2065,7 +2074,7 @@ void fix_up_banner(afl_state_t *afl, u8* name) {
/* Check if we're on TTY. */
-void check_if_tty(afl_state_t *afl) {
+void check_if_tty(afl_state_t* afl) {
struct winsize ws;
@@ -2139,7 +2148,7 @@ void setup_signal_handlers(void) {
/* Make a copy of the current command line. */
-void save_cmdline(afl_state_t *afl, u32 argc, char **argv) {
+void save_cmdline(afl_state_t* afl, u32 argc, char** argv) {
u32 len = 1, i;
u8* buf;
diff --git a/src/afl-fuzz-mutators.c b/src/afl-fuzz-mutators.c
index b41d4d2b..0edd93a0 100644
--- a/src/afl-fuzz-mutators.c
+++ b/src/afl-fuzz-mutators.c
@@ -30,12 +30,13 @@ void load_custom_mutator(afl_state_t*, const char*);
void load_custom_mutator_py(afl_state_t*, const char*);
#endif
-void setup_custom_mutator(afl_state_t *afl) {
+void setup_custom_mutator(afl_state_t* afl) {
/* Try mutator library first */
u8* fn = getenv("AFL_CUSTOM_MUTATOR_LIBRARY");
if (fn) {
+
if (afl->limit_time_sig)
FATAL(
"MOpt and custom mutator are mutually exclusive. We accept pull "
@@ -74,7 +75,7 @@ void setup_custom_mutator(afl_state_t *afl) {
}
-void destroy_custom_mutator(afl_state_t *afl) {
+void destroy_custom_mutator(afl_state_t* afl) {
if (afl->mutator) {
@@ -90,11 +91,12 @@ void destroy_custom_mutator(afl_state_t *afl) {
}
ck_free(afl->mutator);
+
}
}
-void load_custom_mutator(afl_state_t *afl, const char *fn) {
+void load_custom_mutator(afl_state_t* afl, const char* fn) {
void* dh;
afl->mutator = ck_alloc(sizeof(struct custom_mutator));
@@ -109,7 +111,8 @@ void load_custom_mutator(afl_state_t *afl, const char *fn) {
/* Mutator */
/* "afl_custom_init", optional for backward compatibility */
afl->mutator->afl_custom_init = dlsym(dh, "afl_custom_init");
- if (!afl->mutator->afl_custom_init) WARNF("Symbol 'afl_custom_init' not found.");
+ if (!afl->mutator->afl_custom_init)
+ WARNF("Symbol 'afl_custom_init' not found.");
/* "afl_custom_fuzz" or "afl_custom_mutator", required */
afl->mutator->afl_custom_fuzz = dlsym(dh, "afl_custom_fuzz");
@@ -137,7 +140,8 @@ void load_custom_mutator(afl_state_t *afl, const char *fn) {
/* "afl_custom_trim", optional */
afl->mutator->afl_custom_trim = dlsym(dh, "afl_custom_trim");
- if (!afl->mutator->afl_custom_trim) WARNF("Symbol 'afl_custom_trim' not found.");
+ if (!afl->mutator->afl_custom_trim)
+ WARNF("Symbol 'afl_custom_trim' not found.");
/* "afl_custom_post_trim", optional */
afl->mutator->afl_custom_post_trim = dlsym(dh, "afl_custom_post_trim");
@@ -156,12 +160,13 @@ void load_custom_mutator(afl_state_t *afl, const char *fn) {
}
/* "afl_custom_havoc_mutation", optional */
- afl->mutator->afl_custom_havoc_mutation = dlsym(dh, "afl_custom_havoc_mutation");
+ afl->mutator->afl_custom_havoc_mutation =
+ dlsym(dh, "afl_custom_havoc_mutation");
if (!afl->mutator->afl_custom_havoc_mutation)
WARNF("Symbol 'afl_custom_havoc_mutation' not found.");
/* "afl_custom_havoc_mutation", optional */
- afl->mutator->afl_custom_havoc_mutation_probability =
+ afl->mutator->afl_custom_havoc_mutation_probability =
dlsym(dh, "afl_custom_havoc_mutation_probability");
if (!afl->mutator->afl_custom_havoc_mutation_probability)
WARNF("Symbol 'afl_custom_havoc_mutation_probability' not found.");
@@ -172,7 +177,8 @@ void load_custom_mutator(afl_state_t *afl, const char *fn) {
WARNF("Symbol 'afl_custom_queue_get' not found.");
/* "afl_custom_queue_new_entry", optional */
- afl->mutator->afl_custom_queue_new_entry = dlsym(dh, "afl_custom_queue_new_entry");
+ afl->mutator->afl_custom_queue_new_entry =
+ dlsym(dh, "afl_custom_queue_new_entry");
if (!afl->mutator->afl_custom_queue_new_entry)
WARNF("Symbol 'afl_custom_queue_new_entry' not found");
@@ -184,7 +190,7 @@ void load_custom_mutator(afl_state_t *afl, const char *fn) {
}
-u8 trim_case_custom(afl_state_t *afl, struct queue_entry* q, u8* in_buf) {
+u8 trim_case_custom(afl_state_t* afl, struct queue_entry* q, u8* in_buf) {
static u8 tmp[64];
static u8 clean_trace[MAP_SIZE];
@@ -306,17 +312,16 @@ abort_trimming:
}
#ifdef USE_PYTHON
-void load_custom_mutator_py(afl_state_t *afl, const char* module_name) {
+void load_custom_mutator_py(afl_state_t* afl, const char* module_name) {
- PyObject **py_functions = afl->py_functions;
+ PyObject** py_functions = afl->py_functions;
afl->mutator = ck_alloc(sizeof(struct custom_mutator));
afl->mutator->name = module_name;
ACTF("Loading Python mutator library from '%s'...", module_name);
- if (py_functions[PY_FUNC_INIT])
- afl->mutator->afl_custom_init = init_py;
+ if (py_functions[PY_FUNC_INIT]) afl->mutator->afl_custom_init = init_py;
/* "afl_custom_fuzz" should not be NULL, but the interface of Python mutator
is quite different from the custom mutator. */
@@ -331,15 +336,14 @@ void load_custom_mutator_py(afl_state_t *afl, const char* module_name) {
if (py_functions[PY_FUNC_POST_TRIM])
afl->mutator->afl_custom_post_trim = post_trim_py;
- if (py_functions[PY_FUNC_TRIM])
- afl->mutator->afl_custom_trim = trim_py;
-
+ if (py_functions[PY_FUNC_TRIM]) afl->mutator->afl_custom_trim = trim_py;
+
if (py_functions[PY_FUNC_HAVOC_MUTATION])
afl->mutator->afl_custom_havoc_mutation = havoc_mutation_py;
-
+
if (py_functions[PY_FUNC_HAVOC_MUTATION_PROBABILITY])
- afl->mutator->afl_custom_havoc_mutation_probability =
- havoc_mutation_probability_py;
+ afl->mutator->afl_custom_havoc_mutation_probability =
+ havoc_mutation_probability_py;
if (py_functions[PY_FUNC_QUEUE_GET])
afl->mutator->afl_custom_queue_get = queue_get_py;
diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c
index 19cdddb9..aefcb231 100644
--- a/src/afl-fuzz-one.c
+++ b/src/afl-fuzz-one.c
@@ -27,7 +27,7 @@
/* MOpt */
-int select_algorithm(afl_state_t *afl) {
+int select_algorithm(afl_state_t* afl) {
int i_puppet, j_puppet;
@@ -52,7 +52,8 @@ int select_algorithm(afl_state_t *afl) {
}
- if (j_puppet == 1 && sele < afl->probability_now[afl->swarm_now][i_puppet - 1])
+ if (j_puppet == 1 &&
+ sele < afl->probability_now[afl->swarm_now][i_puppet - 1])
FATAL("error select_algorithm");
return i_puppet;
@@ -61,7 +62,7 @@ int select_algorithm(afl_state_t *afl) {
/* Helper to choose random block len for block operations in fuzz_one().
Doesn't return zero, provided that max_len is > 0. */
-static u32 choose_block_len(afl_state_t *afl, u32 limit) {
+static u32 choose_block_len(afl_state_t* afl, u32 limit) {
u32 min_value, max_value;
u32 rlim = MIN(afl->queue_cycle, 3);
@@ -334,7 +335,7 @@ static void locate_diffs(u8* ptr1, u8* ptr2, u32 len, s32* first, s32* last) {
function is a tad too long... returns 0 if fuzzed successfully, 1 if
skipped or bailed out. */
-u8 fuzz_one_original(afl_state_t *afl) {
+u8 fuzz_one_original(afl_state_t* afl) {
s32 len, fd, temp_len, i, j;
u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0;
@@ -375,7 +376,9 @@ u8 fuzz_one_original(afl_state_t *afl) {
UR(afl, 100) < SKIP_TO_NEW_PROB)
return 1;
- } else if (!afl->dumb_mode && !afl->queue_cur->favored && afl->queued_paths > 10) {
+ } else if (!afl->dumb_mode && !afl->queue_cur->favored &&
+
+ afl->queued_paths > 10) {
/* Otherwise, still possibly skip non-favored cases, albeit less often.
The odds of skipping stuff are higher for already-fuzzed inputs and
@@ -439,7 +442,8 @@ u8 fuzz_one_original(afl_state_t *afl) {
if (afl->queue_cur->cal_failed < CAL_CHANCES) {
- res = calibrate_case(afl, afl->queue_cur, in_buf, afl->queue_cycle - 1, 0);
+ res =
+ calibrate_case(afl, afl->queue_cur, in_buf, afl->queue_cycle - 1, 0);
if (res == FAULT_ERROR) FATAL("Unable to execute target application");
@@ -493,7 +497,8 @@ u8 fuzz_one_original(afl_state_t *afl) {
if (afl->shm.cmplog_mode) {
- if (input_to_state_stage(afl, in_buf, out_buf, len, afl->queue_cur->exec_cksum))
+ if (input_to_state_stage(afl, in_buf, out_buf, len,
+ afl->queue_cur->exec_cksum))
goto abandon_entry;
}
@@ -517,7 +522,8 @@ u8 fuzz_one_original(afl_state_t *afl) {
/* Skip deterministic fuzzing if exec path checksum puts this out of scope
for this master instance. */
- if (afl->master_max && (afl->queue_cur->exec_cksum % afl->master_max) != afl->master_id - 1) {
+ if (afl->master_max &&
+ (afl->queue_cur->exec_cksum % afl->master_max) != afl->master_id - 1) {
goto custom_mutator_stage;
@@ -596,7 +602,8 @@ u8 fuzz_one_original(afl_state_t *afl) {
/* If at end of file and we are still collecting a string, grab the
final character and force output. */
- if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[afl->stage_cur >> 3];
+ if (a_len < MAX_AUTO_EXTRA)
+ a_collect[a_len] = out_buf[afl->stage_cur >> 3];
++a_len;
if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA)
@@ -620,7 +627,8 @@ u8 fuzz_one_original(afl_state_t *afl) {
if (cksum != afl->queue_cur->exec_cksum) {
- if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[afl->stage_cur >> 3];
+ if (a_len < MAX_AUTO_EXTRA)
+ a_collect[a_len] = out_buf[afl->stage_cur >> 3];
++a_len;
}
@@ -1392,10 +1400,12 @@ skip_interest:
is redundant, or if its entire span has no bytes set in the effector
map. */
- if ((afl->extras_cnt > MAX_DET_EXTRAS && UR(afl, afl->extras_cnt) >= MAX_DET_EXTRAS) ||
+ if ((afl->extras_cnt > MAX_DET_EXTRAS &&
+ UR(afl, afl->extras_cnt) >= MAX_DET_EXTRAS) ||
afl->extras[j].len > len - i ||
!memcmp(afl->extras[j].data, out_buf + i, afl->extras[j].len) ||
- !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, afl->extras[j].len))) {
+ !memchr(eff_map + EFF_APOS(i), 1,
+ EFF_SPAN_ALEN(i, afl->extras[j].len))) {
--afl->stage_max;
continue;
@@ -1552,7 +1562,7 @@ custom_mutator_stage:
const u32 max_seed_size = MAX_FILE;
orig_hit_cnt = afl->queued_paths + afl->unique_crashes;
-
+
for (afl->stage_cur = 0; afl->stage_cur < afl->stage_max; ++afl->stage_cur) {
struct queue_entry* target;
@@ -1597,10 +1607,9 @@ custom_mutator_stage:
new_buf = ck_alloc_nozero(target->len);
ck_read(fd, new_buf, target->len, target->fname);
close(fd);
-
- size_t mutated_size = afl->mutator->afl_custom_fuzz(afl, &out_buf, len,
- new_buf, target->len,
- max_seed_size);
+
+ size_t mutated_size = afl->mutator->afl_custom_fuzz(
+ afl, &out_buf, len, new_buf, target->len, max_seed_size);
ck_free(new_buf);
@@ -1663,8 +1672,8 @@ havoc_stage:
afl->stage_name = "havoc";
afl->stage_short = "havoc";
- afl->stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * perf_score /
- afl->havoc_div / 100;
+ afl->stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) *
+ perf_score / afl->havoc_div / 100;
} else {
@@ -1686,11 +1695,12 @@ havoc_stage:
havoc_queued = afl->queued_paths;
u8 stacked_custom = (afl->mutator && afl->mutator->afl_custom_havoc_mutation);
- u8 stacked_custom_prob = 6; // like one of the default mutations in havoc
+ u8 stacked_custom_prob = 6; // like one of the default mutations in havoc
if (stacked_custom && afl->mutator->afl_custom_havoc_mutation_probability) {
- stacked_custom_prob = afl->mutator->afl_custom_havoc_mutation_probability(afl);
+ stacked_custom_prob =
+ afl->mutator->afl_custom_havoc_mutation_probability(afl);
if (stacked_custom_prob > 100)
FATAL(
"The probability returned by afl_custom_havoc_mutation_propability "
@@ -1708,12 +1718,12 @@ havoc_stage:
afl->stage_cur_val = use_stacking;
for (i = 0; i < use_stacking; ++i) {
-
+
if (stacked_custom && UR(afl, 100) < stacked_custom_prob) {
-
- temp_len = afl->mutator->afl_custom_havoc_mutation(afl, &out_buf, temp_len,
- MAX_FILE);
-
+
+ temp_len = afl->mutator->afl_custom_havoc_mutation(afl, &out_buf,
+ temp_len, MAX_FILE);
+
}
switch (UR(afl, 15 + ((afl->extras_cnt + afl->a_extras_cnt) ? 2 : 0))) {
@@ -1729,7 +1739,8 @@ havoc_stage:
/* Set byte to interesting value. */
- out_buf[UR(afl, temp_len)] = interesting_8[UR(afl, sizeof(interesting_8))];
+ out_buf[UR(afl, temp_len)] =
+ interesting_8[UR(afl, sizeof(interesting_8))];
break;
case 2:
@@ -1952,7 +1963,8 @@ havoc_stage:
memcpy(new_buf + clone_to, out_buf + clone_from, clone_len);
else
memset(new_buf + clone_to,
- UR(afl, 2) ? UR(afl, 256) : out_buf[UR(afl, temp_len)], clone_len);
+ UR(afl, 2) ? UR(afl, 256) : out_buf[UR(afl, temp_len)],
+ clone_len);
/* Tail */
memcpy(new_buf + clone_to + clone_len, out_buf + clone_to,
@@ -1987,7 +1999,8 @@ havoc_stage:
} else
- memset(out_buf + copy_to, UR(afl, 2) ? UR(afl, 256) : out_buf[UR(afl, temp_len)],
+ memset(out_buf + copy_to,
+ UR(afl, 2) ? UR(afl, 256) : out_buf[UR(afl, temp_len)],
copy_len);
break;
@@ -2013,20 +2026,21 @@ havoc_stage:
if (extra_len > temp_len) break;
insert_at = UR(afl, temp_len - extra_len + 1);
- memcpy(out_buf + insert_at, afl->a_extras[use_extra].data, extra_len);
+ memcpy(out_buf + insert_at, afl->a_extras[use_extra].data,
+ extra_len);
} else {
/* No auto extras or odds in our favor. Use the dictionary. */
u32 use_extra = UR(afl, afl->extras_cnt);
- u32 extra_len =afl->extras[use_extra].len;
+ u32 extra_len = afl->extras[use_extra].len;
u32 insert_at;
if (extra_len > temp_len) break;
insert_at = UR(afl, temp_len - extra_len + 1);
- memcpy(out_buf + insert_at,afl->extras[use_extra].data, extra_len);
+ memcpy(out_buf + insert_at, afl->extras[use_extra].data, extra_len);
}
@@ -2055,12 +2069,13 @@ havoc_stage:
memcpy(new_buf, out_buf, insert_at);
/* Inserted part */
- memcpy(new_buf + insert_at, afl->a_extras[use_extra].data, extra_len);
+ memcpy(new_buf + insert_at, afl->a_extras[use_extra].data,
+ extra_len);
} else {
use_extra = UR(afl, afl->extras_cnt);
- extra_len =afl->extras[use_extra].len;
+ extra_len = afl->extras[use_extra].len;
if (temp_len + extra_len >= MAX_FILE) break;
@@ -2070,7 +2085,7 @@ havoc_stage:
memcpy(new_buf, out_buf, insert_at);
/* Inserted part */
- memcpy(new_buf + insert_at,afl->extras[use_extra].data, extra_len);
+ memcpy(new_buf + insert_at, afl->extras[use_extra].data, extra_len);
}
@@ -2144,8 +2159,8 @@ havoc_stage:
retry_splicing:
- if (afl->use_splicing && splice_cycle++ < SPLICE_CYCLES && afl->queued_paths > 1 &&
- afl->queue_cur->len > 1) {
+ if (afl->use_splicing && splice_cycle++ < SPLICE_CYCLES &&
+ afl->queued_paths > 1 && afl->queue_cur->len > 1) {
struct queue_entry* target;
u32 tid, split_at;
@@ -2252,7 +2267,8 @@ radamsa_stage:
afl->stage_name = "radamsa";
afl->stage_short = "radamsa";
- afl->stage_max = (HAVOC_CYCLES * perf_score / afl->havoc_div / 100) << afl->use_radamsa;
+ afl->stage_max = (HAVOC_CYCLES * perf_score / afl->havoc_div / 100)
+ << afl->use_radamsa;
if (afl->stage_max < HAVOC_MIN) afl->stage_max = HAVOC_MIN;
@@ -2268,8 +2284,8 @@ radamsa_stage:
for (afl->stage_cur = 0; afl->stage_cur < afl->stage_max; ++afl->stage_cur) {
- u32 new_len =
- afl->radamsa_mutate_ptr(save_buf, len, new_buf, max_len, get_rand_seed(afl));
+ u32 new_len = afl->radamsa_mutate_ptr(save_buf, len, new_buf, max_len,
+ get_rand_seed(afl));
if (new_len) {
@@ -2336,7 +2352,7 @@ abandon_entry:
}
/* MOpt mode */
-u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) {
+u8 mopt_common_fuzzing(afl_state_t* afl, MOpt_globals_t MOpt_globals) {
if (!MOpt_globals.is_pilot_mode) {
@@ -2378,7 +2394,9 @@ u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) {
UR(afl, 100) < SKIP_TO_NEW_PROB)
return 1;
- } else if (!afl->dumb_mode && !afl->queue_cur->favored && afl->queued_paths > 10) {
+ } else if (!afl->dumb_mode && !afl->queue_cur->favored &&
+
+ afl->queued_paths > 10) {
/* Otherwise, still possibly skip non-favored cases, albeit less often.
The odds of skipping stuff are higher for already-fuzzed inputs and
@@ -2416,7 +2434,8 @@ u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) {
orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
- if (orig_in == MAP_FAILED) PFATAL("Unable to mmap '%s'", afl->queue_cur->fname);
+ if (orig_in == MAP_FAILED)
+ PFATAL("Unable to mmap '%s'", afl->queue_cur->fname);
close(fd);
@@ -2440,7 +2459,8 @@ u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) {
if (afl->queue_cur->cal_failed < CAL_CHANCES) {
- res = calibrate_case(afl, afl->queue_cur, in_buf, afl->queue_cycle - 1, 0);
+ res =
+ calibrate_case(afl, afl->queue_cur, in_buf, afl->queue_cycle - 1, 0);
if (res == FAULT_ERROR) FATAL("Unable to execute target application");
@@ -2492,20 +2512,23 @@ u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) {
this entry ourselves (was_fuzzed), or if it has gone through deterministic
testing in earlier, resumed runs (passed_det). */
- if (afl->skip_deterministic || afl->queue_cur->was_fuzzed || afl->queue_cur->passed_det)
+ if (afl->skip_deterministic || afl->queue_cur->was_fuzzed ||
+ afl->queue_cur->passed_det)
goto havoc_stage;
/* Skip deterministic fuzzing if exec path checksum puts this out of scope
for this master instance. */
- if (afl->master_max && (afl->queue_cur->exec_cksum % afl->master_max) != afl->master_id - 1)
+ if (afl->master_max &&
+ (afl->queue_cur->exec_cksum % afl->master_max) != afl->master_id - 1)
goto havoc_stage;
cur_ms_lv = get_cur_time();
- if (!(afl->key_puppet == 0 && ((cur_ms_lv - afl->last_path_time < afl->limit_time_puppet) ||
- (afl->last_crash_time != 0 &&
- cur_ms_lv - afl->last_crash_time < afl->limit_time_puppet) ||
- afl->last_path_time == 0))) {
+ if (!(afl->key_puppet == 0 &&
+ ((cur_ms_lv - afl->last_path_time < afl->limit_time_puppet) ||
+ (afl->last_crash_time != 0 &&
+ cur_ms_lv - afl->last_crash_time < afl->limit_time_puppet) ||
+ afl->last_path_time == 0))) {
afl->key_puppet = 1;
goto pacemaker_fuzzing;
@@ -2585,7 +2608,8 @@ u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) {
/* If at end of file and we are still collecting a string, grab the
final character and force output. */
- if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[afl->stage_cur >> 3];
+ if (a_len < MAX_AUTO_EXTRA)
+ a_collect[a_len] = out_buf[afl->stage_cur >> 3];
++a_len;
if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA)
@@ -2609,14 +2633,15 @@ u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) {
if (cksum != afl->queue_cur->exec_cksum) {
- if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[afl->stage_cur >> 3];
+ if (a_len < MAX_AUTO_EXTRA)
+ a_collect[a_len] = out_buf[afl->stage_cur >> 3];
++a_len;
}
- } /* if (afl->stage_cur & 7) == 7 */
+ } /* if (afl->stage_cur & 7) == 7 */
- } /* for afl->stage_cur */
+ } /* for afl->stage_cur */
new_hit_cnt = afl->queued_paths + afl->unique_crashes;
@@ -2643,7 +2668,7 @@ u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) {
FLIP_BIT(out_buf, afl->stage_cur);
FLIP_BIT(out_buf, afl->stage_cur + 1);
- } /* for afl->stage_cur */
+ } /* for afl->stage_cur */
new_hit_cnt = afl->queued_paths + afl->unique_crashes;
@@ -2674,7 +2699,7 @@ u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) {
FLIP_BIT(out_buf, afl->stage_cur + 2);
FLIP_BIT(out_buf, afl->stage_cur + 3);
- } /* for afl->stage_cur */
+ } /* for afl->stage_cur */
new_hit_cnt = afl->queued_paths + afl->unique_crashes;
@@ -2751,7 +2776,7 @@ u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) {
out_buf[afl->stage_cur] ^= 0xFF;
- } /* for afl->stage_cur */
+ } /* for afl->stage_cur */
/* If the effector map is more than EFF_MAX_PERC dense, just flag the
whole thing as worth fuzzing, since we wouldn't be saving much time
@@ -3381,10 +3406,12 @@ skip_interest:
is redundant, or if its entire span has no bytes set in the effector
map. */
- if ((afl->extras_cnt > MAX_DET_EXTRAS && UR(afl, afl->extras_cnt) >= MAX_DET_EXTRAS) ||
+ if ((afl->extras_cnt > MAX_DET_EXTRAS &&
+ UR(afl, afl->extras_cnt) >= MAX_DET_EXTRAS) ||
afl->extras[j].len > len - i ||
!memcmp(afl->extras[j].data, out_buf + i, afl->extras[j].len) ||
- !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, afl->extras[j].len))) {
+ !memchr(eff_map + EFF_APOS(i), 1,
+ EFF_SPAN_ALEN(i, afl->extras[j].len))) {
--afl->stage_max;
continue;
@@ -3440,7 +3467,7 @@ skip_interest:
/* Copy tail */
memcpy(ex_tmp + i + afl->extras[j].len, out_buf + i, len - i);
- if (common_fuzz_stuff(afl, ex_tmp, len +afl->extras[j].len)) {
+ if (common_fuzz_stuff(afl, ex_tmp, len + afl->extras[j].len)) {
ck_free(ex_tmp);
goto abandon_entry;
@@ -3539,14 +3566,15 @@ pacemaker_fuzzing:
afl->stage_name = MOpt_globals.havoc_stagename;
afl->stage_short = MOpt_globals.havoc_stagenameshort;
- afl->stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * perf_score /
- afl->havoc_div / 100;
+ afl->stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) *
+ perf_score / afl->havoc_div / 100;
} else {
perf_score = orig_perf;
- snprintf(afl->stage_name_buf64, 64, MOpt_globals.splice_stageformat, splice_cycle);
+ snprintf(afl->stage_name_buf64, 64, MOpt_globals.splice_stageformat,
+ splice_cycle);
afl->stage_name = afl->stage_name_buf64;
afl->stage_short = MOpt_globals.splice_stagenameshort;
afl->stage_max = SPLICE_HAVOC * perf_score / afl->havoc_div / 100;
@@ -3571,7 +3599,7 @@ pacemaker_fuzzing:
}
- } /* if afl->key_puppet == 1 */
+ } /* if afl->key_puppet == 1 */
{
@@ -3590,12 +3618,13 @@ pacemaker_fuzzing:
afl->stage_name = MOpt_globals.havoc_stagename;
afl->stage_short = MOpt_globals.havoc_stagenameshort;
afl->stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) *
- perf_score / afl->havoc_div / 100;
+ perf_score / afl->havoc_div / 100;
} else {
perf_score = orig_perf;
- snprintf(afl->stage_name_buf64, 64, MOpt_globals.splice_stageformat, splice_cycle);
+ snprintf(afl->stage_name_buf64, 64, MOpt_globals.splice_stageformat,
+ splice_cycle);
afl->stage_name = afl->stage_name_buf64;
afl->stage_short = MOpt_globals.splice_stagenameshort;
afl->stage_max = SPLICE_HAVOC * perf_score / afl->havoc_div / 100;
@@ -3610,7 +3639,8 @@ pacemaker_fuzzing:
havoc_queued = afl->queued_paths;
- for (afl->stage_cur = 0; afl->stage_cur < afl->stage_max; ++afl->stage_cur) {
+ for (afl->stage_cur = 0; afl->stage_cur < afl->stage_max;
+ ++afl->stage_cur) {
u32 use_stacking = 1 << (1 + UR(afl, HAVOC_STACK_POW2));
@@ -3748,7 +3778,8 @@ pacemaker_fuzzing:
case 9:
/* Set byte to interesting value. */
if (temp_len < 4) break;
- out_buf[UR(afl, temp_len)] = interesting_8[UR(afl, sizeof(interesting_8))];
+ out_buf[UR(afl, temp_len)] =
+ interesting_8[UR(afl, sizeof(interesting_8))];
MOpt_globals.cycles_v2[STAGE_INTEREST8] += 1;
break;
@@ -3762,8 +3793,8 @@ pacemaker_fuzzing:
} else {
- *(u16*)(out_buf + UR(afl, temp_len - 1)) =
- SWAP16(interesting_16[UR(afl, sizeof(interesting_16) >> 1)]);
+ *(u16*)(out_buf + UR(afl, temp_len - 1)) = SWAP16(
+ interesting_16[UR(afl, sizeof(interesting_16) >> 1)]);
}
@@ -3782,8 +3813,8 @@ pacemaker_fuzzing:
} else {
- *(u32*)(out_buf + UR(afl, temp_len - 3)) =
- SWAP32(interesting_32[UR(afl, sizeof(interesting_32) >> 2)]);
+ *(u32*)(out_buf + UR(afl, temp_len - 3)) = SWAP32(
+ interesting_32[UR(afl, sizeof(interesting_32) >> 2)]);
}
@@ -3862,7 +3893,8 @@ pacemaker_fuzzing:
memcpy(new_buf + clone_to, out_buf + clone_from, clone_len);
else
memset(new_buf + clone_to,
- UR(afl, 2) ? UR(afl, 256) : out_buf[UR(afl, temp_len)], clone_len);
+ UR(afl, 2) ? UR(afl, 256) : out_buf[UR(afl, temp_len)],
+ clone_len);
/* Tail */
memcpy(new_buf + clone_to + clone_len, out_buf + clone_to,
@@ -3899,7 +3931,8 @@ pacemaker_fuzzing:
} else
memset(out_buf + copy_to,
- UR(afl, 2) ? UR(afl, 256) : out_buf[UR(afl, temp_len)], copy_len);
+ UR(afl, 2) ? UR(afl, 256) : out_buf[UR(afl, temp_len)],
+ copy_len);
MOpt_globals.cycles_v2[STAGE_OverWrite75] += 1;
break;
@@ -3939,7 +3972,8 @@ pacemaker_fuzzing:
}
- if (unlikely(afl->queued_paths + afl->unique_crashes > temp_total_found)) {
+ if (unlikely(afl->queued_paths + afl->unique_crashes >
+ temp_total_found)) {
u64 temp_temp_puppet =
afl->queued_paths + afl->unique_crashes - temp_total_found;
@@ -3953,7 +3987,9 @@ pacemaker_fuzzing:
} /* if */
- } /* for (afl->stage_cur = 0; afl->stage_cur < afl->stage_max; ++afl->stage_cur) { */
+ } /* for (afl->stage_cur = 0; afl->stage_cur < afl->stage_max;
+
+ ++afl->stage_cur) { */
new_hit_cnt = afl->queued_paths + afl->unique_crashes;
@@ -4089,10 +4125,11 @@ pacemaker_fuzzing:
afl->splicing_with = -1;
- /* Update afl->pending_not_fuzzed count if we made it through the calibration
- cycle and have not seen this entry before. */
+ /* Update afl->pending_not_fuzzed count if we made it through the
+ calibration cycle and have not seen this entry before. */
- // if (!afl->stop_soon && !afl->queue_cur->cal_failed && !afl->queue_cur->was_fuzzed) {
+ // if (!afl->stop_soon && !afl->queue_cur->cal_failed &&
+ // !afl->queue_cur->was_fuzzed) {
// afl->queue_cur->was_fuzzed = 1;
// --afl->pending_not_fuzzed;
@@ -4107,9 +4144,10 @@ pacemaker_fuzzing:
if (afl->key_puppet == 1) {
- if (unlikely(afl->queued_paths + afl->unique_crashes >
- ((afl->queued_paths + afl->unique_crashes) * limit_time_bound +
- afl->orig_hit_cnt_puppet))) {
+ if (unlikely(
+ afl->queued_paths + afl->unique_crashes >
+ ((afl->queued_paths + afl->unique_crashes) * limit_time_bound +
+ afl->orig_hit_cnt_puppet))) {
afl->key_puppet = 0;
cur_ms_lv = get_cur_time();
@@ -4175,7 +4213,8 @@ pacemaker_fuzzing:
afl->core_operator_cycles_puppet[i];
afl->core_operator_cycles_puppet_v3[i] =
afl->core_operator_cycles_puppet[i];
- afl->core_operator_finds_puppet_v2[i] = afl->core_operator_finds_puppet[i];
+ afl->core_operator_finds_puppet_v2[i] =
+ afl->core_operator_finds_puppet[i];
}
@@ -4195,14 +4234,19 @@ pacemaker_fuzzing:
if (afl->swarm_now < 0 || afl->swarm_now > swarm_num - 1)
PFATAL("swarm_now error number %d", afl->swarm_now);
- } /* if afl->swarm_now == swarm_num */
+ } /* if afl->swarm_now == swarm_num */
/* adjust pointers dependent on 'afl->swarm_now' */
- afl->mopt_globals_pilot.finds = afl->stage_finds_puppet[afl->swarm_now];
- afl->mopt_globals_pilot.finds_v2 = afl->stage_finds_puppet_v2[afl->swarm_now];
- afl->mopt_globals_pilot.cycles = afl->stage_cycles_puppet[afl->swarm_now];
- afl->mopt_globals_pilot.cycles_v2 = afl->stage_cycles_puppet_v2[afl->swarm_now];
- afl->mopt_globals_pilot.cycles_v3 = afl->stage_cycles_puppet_v3[afl->swarm_now];
+ afl->mopt_globals_pilot.finds =
+ afl->stage_finds_puppet[afl->swarm_now];
+ afl->mopt_globals_pilot.finds_v2 =
+ afl->stage_finds_puppet_v2[afl->swarm_now];
+ afl->mopt_globals_pilot.cycles =
+ afl->stage_cycles_puppet[afl->swarm_now];
+ afl->mopt_globals_pilot.cycles_v2 =
+ afl->stage_cycles_puppet_v2[afl->swarm_now];
+ afl->mopt_globals_pilot.cycles_v3 =
+ afl->stage_cycles_puppet_v3[afl->swarm_now];
} else {
@@ -4224,19 +4268,25 @@ pacemaker_fuzzing:
#undef FLIP_BIT
-u8 core_fuzzing(afl_state_t *afl) {
+u8 core_fuzzing(afl_state_t* afl) {
+
return mopt_common_fuzzing(afl, afl->mopt_globals_core);
+
}
-u8 pilot_fuzzing(afl_state_t *afl) {
+u8 pilot_fuzzing(afl_state_t* afl) {
+
return mopt_common_fuzzing(afl, afl->mopt_globals_pilot);
+
}
-void pso_updating(afl_state_t *afl) {
+void pso_updating(afl_state_t* afl) {
afl->g_now += 1;
if (afl->g_now > afl->g_max) afl->g_now = 0;
- afl->w_now = (afl->w_init - afl->w_end) * (afl->g_max - afl->g_now) / (afl->g_max) + afl->w_end;
+ afl->w_now =
+ (afl->w_init - afl->w_end) * (afl->g_max - afl->g_now) / (afl->g_max) +
+ afl->w_end;
int tmp_swarm, i, j;
u64 temp_operator_finds_puppet = 0;
for (i = 0; i < operator_num; ++i) {
@@ -4259,7 +4309,7 @@ void pso_updating(afl_state_t *afl) {
if (afl->operator_finds_puppet[i])
afl->G_best[i] = (double)((double)(afl->operator_finds_puppet[i]) /
- (double)(temp_operator_finds_puppet));
+ (double)(temp_operator_finds_puppet));
}
@@ -4308,7 +4358,7 @@ void pso_updating(afl_state_t *afl) {
to fuzz_one_original. All documentation references to fuzz_one therefore
mean fuzz_one_original */
-u8 fuzz_one(afl_state_t *afl) {
+u8 fuzz_one(afl_state_t* afl) {
int key_val_lv = 0;
@@ -4353,3 +4403,4 @@ u8 fuzz_one(afl_state_t *afl) {
return key_val_lv;
}
+
diff --git a/src/afl-fuzz-python.c b/src/afl-fuzz-python.c
index 195fc6f3..595c1ed0 100644
--- a/src/afl-fuzz-python.c
+++ b/src/afl-fuzz-python.c
@@ -28,22 +28,22 @@
/* Python stuff */
#ifdef USE_PYTHON
-int init_py_module(afl_state_t *afl, u8* module_name) {
+int init_py_module(afl_state_t *afl, u8 *module_name) {
if (!module_name) return 1;
Py_Initialize();
#if PY_MAJOR_VERSION >= 3
- PyObject* py_name = PyUnicode_FromString(module_name);
+ PyObject *py_name = PyUnicode_FromString(module_name);
#else
- PyObject* py_name = PyString_FromString(module_name);
+ PyObject *py_name = PyString_FromString(module_name);
#endif
afl->py_module = PyImport_Import(py_name);
Py_DECREF(py_name);
- PyObject *py_module = afl->py_module;
+ PyObject * py_module = afl->py_module;
PyObject **py_functions = afl->py_functions;
if (afl->py_module != NULL) {
@@ -144,6 +144,7 @@ void finalize_py_module(afl_state_t *afl) {
}
void init_py(afl_state_t *afl, unsigned int seed) {
+
PyObject *py_args, *py_value;
/* Provide the init function a seed for the Python RNG */
@@ -178,7 +179,7 @@ void init_py(afl_state_t *afl, unsigned int seed) {
}
-size_t fuzz_py(afl_state_t *afl, u8** buf, size_t buf_size, u8* add_buf,
+size_t fuzz_py(afl_state_t *afl, u8 **buf, size_t buf_size, u8 *add_buf,
size_t add_buf_size, size_t max_size) {
size_t mutated_size;
@@ -244,7 +245,7 @@ size_t fuzz_py(afl_state_t *afl, u8** buf, size_t buf_size, u8* add_buf,
}
-size_t pre_save_py(afl_state_t *afl, u8* buf, size_t buf_size, u8** out_buf) {
+size_t pre_save_py(afl_state_t *afl, u8 *buf, size_t buf_size, u8 **out_buf) {
size_t out_buf_size;
PyObject *py_args, *py_value;
@@ -280,7 +281,7 @@ size_t pre_save_py(afl_state_t *afl, u8* buf, size_t buf_size, u8** out_buf) {
}
-u32 init_trim_py(afl_state_t *afl, u8* buf, size_t buf_size) {
+u32 init_trim_py(afl_state_t *afl, u8 *buf, size_t buf_size) {
PyObject *py_args, *py_value;
@@ -355,7 +356,7 @@ u32 post_trim_py(afl_state_t *afl, u8 success) {
}
-void trim_py(afl_state_t *afl, u8** out_buf, size_t* out_buf_size) {
+void trim_py(afl_state_t *afl, u8 **out_buf, size_t *out_buf_size) {
PyObject *py_args, *py_value;
@@ -379,7 +380,8 @@ void trim_py(afl_state_t *afl, u8** out_buf, size_t* out_buf_size) {
}
-size_t havoc_mutation_py(afl_state_t *afl, u8** buf, size_t buf_size, size_t max_size) {
+size_t havoc_mutation_py(afl_state_t *afl, u8 **buf, size_t buf_size,
+ size_t max_size) {
size_t mutated_size;
PyObject *py_args, *py_value;
@@ -411,7 +413,8 @@ size_t havoc_mutation_py(afl_state_t *afl, u8** buf, size_t buf_size, size_t max
PyTuple_SetItem(py_args, 1, py_value);
- py_value = PyObject_CallObject(afl->py_functions[PY_FUNC_HAVOC_MUTATION], py_args);
+ py_value =
+ PyObject_CallObject(afl->py_functions[PY_FUNC_HAVOC_MUTATION], py_args);
Py_DECREF(py_args);
@@ -439,8 +442,8 @@ u8 havoc_mutation_probability_py(afl_state_t *afl) {
PyObject *py_args, *py_value;
py_args = PyTuple_New(0);
- py_value = PyObject_CallObject(afl->py_functions[PY_FUNC_HAVOC_MUTATION_PROBABILITY],
- py_args);
+ py_value = PyObject_CallObject(
+ afl->py_functions[PY_FUNC_HAVOC_MUTATION_PROBABILITY], py_args);
Py_DECREF(py_args);
if (py_value != NULL) {
@@ -458,7 +461,7 @@ u8 havoc_mutation_probability_py(afl_state_t *afl) {
}
-u8 queue_get_py(afl_state_t *afl, const u8* filename) {
+u8 queue_get_py(afl_state_t *afl, const u8 *filename) {
PyObject *py_args, *py_value;
@@ -506,8 +509,8 @@ u8 queue_get_py(afl_state_t *afl, const u8* filename) {
}
-void queue_new_entry_py(afl_state_t *afl, const u8* filename_new_queue,
- const u8* filename_orig_queue) {
+void queue_new_entry_py(afl_state_t *afl, const u8 *filename_new_queue,
+ const u8 *filename_orig_queue) {
PyObject *py_args, *py_value;
@@ -549,8 +552,8 @@ void queue_new_entry_py(afl_state_t *afl, const u8* filename_new_queue,
PyTuple_SetItem(py_args, 1, py_value);
// Call
- py_value = PyObject_CallObject(afl->py_functions[PY_FUNC_QUEUE_NEW_ENTRY],
- py_args);
+ py_value =
+ PyObject_CallObject(afl->py_functions[PY_FUNC_QUEUE_NEW_ENTRY], py_args);
Py_DECREF(py_args);
if (py_value == NULL) {
diff --git a/src/afl-fuzz-queue.c b/src/afl-fuzz-queue.c
index c95889f5..988f1ace 100644
--- a/src/afl-fuzz-queue.c
+++ b/src/afl-fuzz-queue.c
@@ -28,12 +28,13 @@
.state file to avoid repeating deterministic fuzzing when resuming aborted
scans. */
-void mark_as_det_done(afl_state_t *afl, struct queue_entry* q) {
+void mark_as_det_done(afl_state_t* afl, struct queue_entry* q) {
u8* fn = strrchr(q->fname, '/');
s32 fd;
- fn = alloc_printf("%s/queue/.state/deterministic_done/%s", afl->out_dir, fn + 1);
+ fn = alloc_printf("%s/queue/.state/deterministic_done/%s", afl->out_dir,
+ fn + 1);
fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600);
if (fd < 0) PFATAL("Unable to create '%s'", fn);
@@ -48,7 +49,7 @@ void mark_as_det_done(afl_state_t *afl, struct queue_entry* q) {
/* Mark as variable. Create symlinks if possible to make it easier to examine
the files. */
-void mark_as_variable(afl_state_t *afl, struct queue_entry* q) {
+void mark_as_variable(afl_state_t* afl, struct queue_entry* q) {
u8 *fn = strrchr(q->fname, '/') + 1, *ldest;
@@ -73,7 +74,7 @@ void mark_as_variable(afl_state_t *afl, struct queue_entry* q) {
/* Mark / unmark as redundant (edge-only). This is not used for restoring state,
but may be useful for post-processing datasets. */
-void mark_as_redundant(afl_state_t *afl, struct queue_entry* q, u8 state) {
+void mark_as_redundant(afl_state_t* afl, struct queue_entry* q, u8 state) {
u8* fn;
@@ -104,7 +105,7 @@ void mark_as_redundant(afl_state_t *afl, struct queue_entry* q, u8 state) {
/* Append new test case to the queue. */
-void add_to_queue(afl_state_t *afl, u8* fname, u32 len, u8 passed_det) {
+void add_to_queue(afl_state_t* afl, u8* fname, u32 len, u8 passed_det) {
struct queue_entry* q = ck_alloc(sizeof(struct queue_entry));
@@ -154,7 +155,7 @@ void add_to_queue(afl_state_t *afl, u8* fname, u32 len, u8 passed_det) {
/* Destroy the entire queue. */
-void destroy_queue(afl_state_t *afl) {
+void destroy_queue(afl_state_t* afl) {
struct queue_entry *q = afl->queue, *n;
@@ -176,18 +177,19 @@ void destroy_queue(afl_state_t *afl) {
seen in the bitmap so far, and focus on fuzzing them at the expense of
the rest.
- The first step of the process is to maintain a list of afl->top_rated[] entries
- for every byte in the bitmap. We win that slot if there is no previous
- contender, or if the contender has a more favorable speed x size factor. */
+ The first step of the process is to maintain a list of afl->top_rated[]
+ entries for every byte in the bitmap. We win that slot if there is no
+ previous contender, or if the contender has a more favorable speed x size
+ factor. */
-void update_bitmap_score(afl_state_t *afl, struct queue_entry* q) {
+void update_bitmap_score(afl_state_t* afl, struct queue_entry* q) {
u32 i;
u64 fav_factor = q->exec_us * q->len;
u64 fuzz_p2 = next_p2(q->n_fuzz);
- /* For every byte set in afl->fsrv.trace_bits[], see if there is a previous winner,
- and how it compares to us. */
+ /* For every byte set in afl->fsrv.trace_bits[], see if there is a previous
+ winner, and how it compares to us. */
for (i = 0; i < MAP_SIZE; ++i)
@@ -197,7 +199,8 @@ void update_bitmap_score(afl_state_t *afl, struct queue_entry* q) {
/* Faster-executing or smaller test cases are favored. */
u64 top_rated_fuzz_p2 = next_p2(afl->top_rated[i]->n_fuzz);
- u64 top_rated_fav_factor = afl->top_rated[i]->exec_us * afl->top_rated[i]->len;
+ u64 top_rated_fav_factor =
+ afl->top_rated[i]->exec_us * afl->top_rated[i]->len;
if (fuzz_p2 > top_rated_fuzz_p2) {
@@ -209,7 +212,8 @@ void update_bitmap_score(afl_state_t *afl, struct queue_entry* q) {
}
- if (fav_factor > afl->top_rated[i]->exec_us * afl->top_rated[i]->len) continue;
+ if (fav_factor > afl->top_rated[i]->exec_us * afl->top_rated[i]->len)
+ continue;
/* Looks like we're going to win. Decrease ref count for the
previous winner, discard its afl->fsrv.trace_bits[] if necessary. */
@@ -247,7 +251,7 @@ void update_bitmap_score(afl_state_t *afl, struct queue_entry* q) {
until the next run. The favored entries are given more air time during
all fuzzing steps. */
-void cull_queue(afl_state_t *afl) {
+void cull_queue(afl_state_t* afl) {
struct queue_entry* q;
static u8 temp_v[MAP_SIZE >> 3];
@@ -308,7 +312,7 @@ void cull_queue(afl_state_t *afl) {
A helper function for fuzz_one(). Maybe some of these constants should
go into config.h. */
-u32 calculate_score(afl_state_t *afl, struct queue_entry* q) {
+u32 calculate_score(afl_state_t* afl, struct queue_entry* q) {
u32 avg_exec_us = afl->total_cal_us / afl->total_cal_cycles;
u32 avg_bitmap_size = afl->total_bitmap_size / afl->total_bitmap_entries;
@@ -459,7 +463,8 @@ u32 calculate_score(afl_state_t *afl, struct queue_entry* q) {
/* Make sure that we don't go over limit. */
- if (perf_score > afl->havoc_max_mult * 100) perf_score = afl->havoc_max_mult * 100;
+ if (perf_score > afl->havoc_max_mult * 100)
+ perf_score = afl->havoc_max_mult * 100;
return perf_score;
diff --git a/src/afl-fuzz-redqueen.c b/src/afl-fuzz-redqueen.c
index 560ec419..e012c4c3 100644
--- a/src/afl-fuzz-redqueen.c
+++ b/src/afl-fuzz-redqueen.c
@@ -84,7 +84,7 @@ struct range* pop_biggest_range(struct range** ranges) {
}
-static u8 get_exec_checksum(afl_state_t *afl, u8* buf, u32 len, u32* cksum) {
+static u8 get_exec_checksum(afl_state_t* afl, u8* buf, u32 len, u32* cksum) {
if (unlikely(common_fuzz_stuff(afl, buf, len))) return 1;
@@ -93,7 +93,7 @@ static u8 get_exec_checksum(afl_state_t *afl, u8* buf, u32 len, u32* cksum) {
}
-static void rand_replace(afl_state_t *afl, u8* buf, u32 len) {
+static void rand_replace(afl_state_t* afl, u8* buf, u32 len) {
u32 i;
for (i = 0; i < len; ++i)
@@ -101,7 +101,7 @@ static void rand_replace(afl_state_t *afl, u8* buf, u32 len) {
}
-static u8 colorization(afl_state_t *afl, u8* buf, u32 len, u32 exec_cksum) {
+static u8 colorization(afl_state_t* afl, u8* buf, u32 len, u32 exec_cksum) {
struct range* ranges = add_range(NULL, 0, len);
u8* backup = ck_alloc_nozero(len);
@@ -117,7 +117,8 @@ static u8 colorization(afl_state_t *afl, u8* buf, u32 len, u32 exec_cksum) {
struct range* rng;
afl->stage_cur = 0;
- while ((rng = pop_biggest_range(&ranges)) != NULL && afl->stage_cur < afl->stage_max) {
+ while ((rng = pop_biggest_range(&ranges)) != NULL &&
+ afl->stage_cur < afl->stage_max) {
u32 s = rng->end - rng->start;
if (s == 0) goto empty_range;
@@ -171,7 +172,7 @@ static u8 colorization(afl_state_t *afl, u8* buf, u32 len, u32 exec_cksum) {
} else {
- unlink(afl->queue_cur->fname); /* ignore errors */
+ unlink(afl->queue_cur->fname); /* ignore errors */
fd = open(afl->queue_cur->fname, O_WRONLY | O_CREAT | O_EXCL, 0600);
}
@@ -204,7 +205,7 @@ checksum_fail:
///// Input to State replacement
-static u8 its_fuzz(afl_state_t *afl, u8* buf, u32 len, u8* status) {
+static u8 its_fuzz(afl_state_t* afl, u8* buf, u32 len, u8* status) {
u64 orig_hit_cnt, new_hit_cnt;
@@ -223,9 +224,9 @@ static u8 its_fuzz(afl_state_t *afl, u8* buf, u32 len, u8* status) {
}
-static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header* h, u64 pattern, u64 repl, u32 idx,
- u8* orig_buf, u8* buf, u32 len, u8 do_reverse,
- u8* status) {
+static u8 cmp_extend_encoding(afl_state_t* afl, struct cmp_header* h,
+ u64 pattern, u64 repl, u32 idx, u8* orig_buf,
+ u8* buf, u32 len, u8 do_reverse, u8* status) {
u64* buf_64 = (u64*)&buf[idx];
u32* buf_32 = (u32*)&buf[idx];
@@ -251,8 +252,8 @@ static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header* h, u64 patter
// reverse encoding
if (do_reverse)
- if (unlikely(cmp_extend_encoding(afl, h, SWAP64(pattern), SWAP64(repl), idx,
- orig_buf, buf, len, 0, status)))
+ if (unlikely(cmp_extend_encoding(afl, h, SWAP64(pattern), SWAP64(repl),
+ idx, orig_buf, buf, len, 0, status)))
return 1;
}
@@ -270,8 +271,8 @@ static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header* h, u64 patter
// reverse encoding
if (do_reverse)
- if (unlikely(cmp_extend_encoding(afl, h, SWAP32(pattern), SWAP32(repl), idx,
- orig_buf, buf, len, 0, status)))
+ if (unlikely(cmp_extend_encoding(afl, h, SWAP32(pattern), SWAP32(repl),
+ idx, orig_buf, buf, len, 0, status)))
return 1;
}
@@ -289,8 +290,8 @@ static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header* h, u64 patter
// reverse encoding
if (do_reverse)
- if (unlikely(cmp_extend_encoding(afl, h, SWAP16(pattern), SWAP16(repl), idx,
- orig_buf, buf, len, 0, status)))
+ if (unlikely(cmp_extend_encoding(afl, h, SWAP16(pattern), SWAP16(repl),
+ idx, orig_buf, buf, len, 0, status)))
return 1;
}
@@ -312,7 +313,7 @@ static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header* h, u64 patter
}
-static void try_to_add_to_dict(afl_state_t *afl, u64 v, u8 shape) {
+static void try_to_add_to_dict(afl_state_t* afl, u64 v, u8 shape) {
u8* b = (u8*)&v;
@@ -354,7 +355,7 @@ static void try_to_add_to_dict(afl_state_t *afl, u64 v, u8 shape) {
}
-static u8 cmp_fuzz(afl_state_t *afl, u32 key, u8* orig_buf, u8* buf, u32 len) {
+static u8 cmp_fuzz(afl_state_t* afl, u32 key, u8* orig_buf, u8* buf, u32 len) {
struct cmp_header* h = &afl->shm.cmp_map->headers[key];
u32 i, j, idx;
@@ -372,21 +373,22 @@ static u8 cmp_fuzz(afl_state_t *afl, u32 key, u8* orig_buf, u8* buf, u32 len) {
// opt not in the paper
for (j = 0; j < i; ++j)
- if (afl->shm.cmp_map->log[key][j].v0 == o->v0 && afl->shm.cmp_map->log[key][i].v1 == o->v1)
+ if (afl->shm.cmp_map->log[key][j].v0 == o->v0 &&
+ afl->shm.cmp_map->log[key][i].v1 == o->v1)
goto cmp_fuzz_next_iter;
for (idx = 0; idx < len && fails < 8; ++idx) {
- if (unlikely(cmp_extend_encoding(afl, h, o->v0, o->v1, idx, orig_buf, buf, len,
- 1, &status)))
+ if (unlikely(cmp_extend_encoding(afl, h, o->v0, o->v1, idx, orig_buf, buf,
+ len, 1, &status)))
return 1;
if (status == 2)
++fails;
else if (status == 1)
break;
- if (unlikely(cmp_extend_encoding(afl, h, o->v1, o->v0, idx, orig_buf, buf, len,
- 1, &status)))
+ if (unlikely(cmp_extend_encoding(afl, h, o->v1, o->v0, idx, orig_buf, buf,
+ len, 1, &status)))
return 1;
if (status == 2)
++fails;
@@ -412,8 +414,9 @@ static u8 cmp_fuzz(afl_state_t *afl, u32 key, u8* orig_buf, u8* buf, u32 len) {
}
-static u8 rtn_extend_encoding(afl_state_t *afl, struct cmp_header* h, u8* pattern, u8* repl, u32 idx,
- u8* orig_buf, u8* buf, u32 len, u8* status) {
+static u8 rtn_extend_encoding(afl_state_t* afl, struct cmp_header* h,
+ u8* pattern, u8* repl, u32 idx, u8* orig_buf,
+ u8* buf, u32 len, u8* status) {
u32 i;
u32 its_len = MIN(32, len - idx);
@@ -437,7 +440,7 @@ static u8 rtn_extend_encoding(afl_state_t *afl, struct cmp_header* h, u8* patter
}
-static u8 rtn_fuzz(afl_state_t *afl, u32 key, u8* orig_buf, u8* buf, u32 len) {
+static u8 rtn_fuzz(afl_state_t* afl, u32 key, u8* orig_buf, u8* buf, u32 len) {
struct cmp_header* h = &afl->shm.cmp_map->headers[key];
u32 i, j, idx;
@@ -451,7 +454,8 @@ static u8 rtn_fuzz(afl_state_t *afl, u32 key, u8* orig_buf, u8* buf, u32 len) {
for (i = 0; i < loggeds; ++i) {
- struct cmpfn_operands* o = &((struct cmpfn_operands*)afl->shm.cmp_map->log[key])[i];
+ struct cmpfn_operands* o =
+ &((struct cmpfn_operands*)afl->shm.cmp_map->log[key])[i];
// opt not in the paper
for (j = 0; j < i; ++j)
@@ -461,16 +465,16 @@ static u8 rtn_fuzz(afl_state_t *afl, u32 key, u8* orig_buf, u8* buf, u32 len) {
for (idx = 0; idx < len && fails < 8; ++idx) {
- if (unlikely(rtn_extend_encoding(afl, h, o->v0, o->v1, idx, orig_buf, buf, len,
- &status)))
+ if (unlikely(rtn_extend_encoding(afl, h, o->v0, o->v1, idx, orig_buf, buf,
+ len, &status)))
return 1;
if (status == 2)
++fails;
else if (status == 1)
break;
- if (unlikely(rtn_extend_encoding(afl, h, o->v1, o->v0, idx, orig_buf, buf, len,
- &status)))
+ if (unlikely(rtn_extend_encoding(afl, h, o->v1, o->v0, idx, orig_buf, buf,
+ len, &status)))
return 1;
if (status == 2)
++fails;
@@ -499,7 +503,7 @@ static u8 rtn_fuzz(afl_state_t *afl, u32 key, u8* orig_buf, u8* buf, u32 len) {
///// Input to State stage
// afl->queue_cur->exec_cksum
-u8 input_to_state_stage(afl_state_t *afl, u8* orig_buf, u8* buf, u32 len,
+u8 input_to_state_stage(afl_state_t* afl, u8* orig_buf, u8* buf, u32 len,
u32 exec_cksum) {
u8 r = 1;
diff --git a/src/afl-fuzz-run.c b/src/afl-fuzz-run.c
index 4ff59f99..fd8b1e46 100644
--- a/src/afl-fuzz-run.c
+++ b/src/afl-fuzz-run.c
@@ -28,7 +28,7 @@
/* Execute target application, monitoring for timeouts. Return status
information. The called program will update afl->fsrv.trace_bits[]. */
-u8 run_target(afl_state_t *afl, u32 timeout) {
+u8 run_target(afl_state_t* afl, u32 timeout) {
static struct itimerval it;
static u32 prev_timed_out = 0;
@@ -81,8 +81,9 @@ u8 run_target(afl_state_t *afl, u32 timeout) {
setrlimit(RLIMIT_CORE, &r); /* Ignore errors */
- /* Isolate the process and configure standard descriptors. If afl->fsrv.out_file is
- specified, stdin is /dev/null; otherwise, afl->fsrv.out_fd is cloned instead. */
+ /* Isolate the process and configure standard descriptors. If
+ afl->fsrv.out_file is specified, stdin is /dev/null; otherwise,
+ afl->fsrv.out_fd is cloned instead. */
setsid();
@@ -165,11 +166,13 @@ u8 run_target(afl_state_t *afl, u32 timeout) {
setitimer(ITIMER_REAL, &it, NULL);
- /* The SIGALRM handler simply kills the afl->fsrv.child_pid and sets afl->fsrv.child_timed_out. */
+ /* The SIGALRM handler simply kills the afl->fsrv.child_pid and sets
+ * afl->fsrv.child_timed_out. */
if (afl->dumb_mode == 1 || afl->no_forkserver) {
- if (waitpid(afl->fsrv.child_pid, &status, 0) <= 0) PFATAL("waitpid() failed");
+ if (waitpid(afl->fsrv.child_pid, &status, 0) <= 0)
+ PFATAL("waitpid() failed");
} else {
@@ -218,8 +221,8 @@ u8 run_target(afl_state_t *afl, u32 timeout) {
++afl->total_execs;
/* Any subsequent operations on afl->fsrv.trace_bits must not be moved by the
- compiler below this point. Past this location, afl->fsrv.trace_bits[] behave
- very normally and do not have to be treated as volatile. */
+ compiler below this point. Past this location, afl->fsrv.trace_bits[]
+ behave very normally and do not have to be treated as volatile. */
MEM_BARRIER();
@@ -239,7 +242,8 @@ u8 run_target(afl_state_t *afl, u32 timeout) {
afl->kill_signal = WTERMSIG(status);
- if (afl->fsrv.child_timed_out && afl->kill_signal == SIGKILL) return FAULT_TMOUT;
+ if (afl->fsrv.child_timed_out && afl->kill_signal == SIGKILL)
+ return FAULT_TMOUT;
return FAULT_CRASH;
@@ -262,18 +266,18 @@ u8 run_target(afl_state_t *afl, u32 timeout) {
}
-/* Write modified data to file for testing. If afl->fsrv.out_file is set, the old file
- is unlinked and a new one is created. Otherwise, afl->fsrv.out_fd is rewound and
- truncated. */
+/* Write modified data to file for testing. If afl->fsrv.out_file is set, the
+ old file is unlinked and a new one is created. Otherwise, afl->fsrv.out_fd is
+ rewound and truncated. */
-void write_to_testcase(afl_state_t *afl, void* mem, u32 len) {
+void write_to_testcase(afl_state_t* afl, void* mem, u32 len) {
s32 fd = afl->fsrv.out_fd;
#ifdef _AFL_DOCUMENT_MUTATIONS
s32 doc_fd;
- char* fn = alloc_printf("%s/mutations/%09u:%s", afl->out_dir, afl->document_counter++,
- describe_op(0));
+ char* fn = alloc_printf("%s/mutations/%09u:%s", afl->out_dir,
+ afl->document_counter++, describe_op(0));
if (fn != NULL) {
if ((doc_fd = open(fn, O_WRONLY | O_CREAT | O_TRUNC, 0600)) >= 0) {
@@ -298,7 +302,7 @@ void write_to_testcase(afl_state_t *afl, void* mem, u32 len) {
} else {
- unlink(afl->fsrv.out_file); /* Ignore errors. */
+ unlink(afl->fsrv.out_file); /* Ignore errors. */
fd = open(afl->fsrv.out_file, O_WRONLY | O_CREAT | O_EXCL, 0600);
}
@@ -312,7 +316,8 @@ void write_to_testcase(afl_state_t *afl, void* mem, u32 len) {
if (afl->mutator && afl->mutator->afl_custom_pre_save) {
u8* new_data;
- size_t new_size = afl->mutator->afl_custom_pre_save(afl, mem, len, &new_data);
+ size_t new_size =
+ afl->mutator->afl_custom_pre_save(afl, mem, len, &new_data);
ck_write(fd, new_data, new_size, afl->fsrv.out_file);
ck_free(new_data);
@@ -335,7 +340,8 @@ void write_to_testcase(afl_state_t *afl, void* mem, u32 len) {
/* The same, but with an adjustable gap. Used for trimming. */
-static void write_with_gap(afl_state_t *afl, void* mem, u32 len, u32 skip_at, u32 skip_len) {
+static void write_with_gap(afl_state_t* afl, void* mem, u32 len, u32 skip_at,
+ u32 skip_len) {
s32 fd = afl->fsrv.out_fd;
u32 tail_len = len - skip_at - skip_len;
@@ -348,7 +354,7 @@ static void write_with_gap(afl_state_t *afl, void* mem, u32 len, u32 skip_at, u3
} else {
- unlink(afl->fsrv.out_file); /* Ignore errors. */
+ unlink(afl->fsrv.out_file); /* Ignore errors. */
fd = open(afl->fsrv.out_file, O_WRONLY | O_CREAT | O_EXCL, 0600);
}
@@ -362,7 +368,8 @@ static void write_with_gap(afl_state_t *afl, void* mem, u32 len, u32 skip_at, u3
if (skip_at) ck_write(fd, mem, skip_at, afl->fsrv.out_file);
u8* memu8 = mem;
- if (tail_len) ck_write(fd, memu8 + skip_at + skip_len, tail_len, afl->fsrv.out_file);
+ if (tail_len)
+ ck_write(fd, memu8 + skip_at + skip_len, tail_len, afl->fsrv.out_file);
if (!afl->fsrv.out_file) {
@@ -379,8 +386,8 @@ static void write_with_gap(afl_state_t *afl, void* mem, u32 len, u32 skip_at, u3
to warn about flaky or otherwise problematic test cases early on; and when
new paths are discovered to detect variable behavior and so on. */
-u8 calibrate_case(afl_state_t *afl, struct queue_entry* q, u8* use_mem, u32 handicap,
- u8 from_queue) {
+u8 calibrate_case(afl_state_t* afl, struct queue_entry* q, u8* use_mem,
+ u32 handicap, u8 from_queue) {
static u8 first_trace[MAP_SIZE];
@@ -398,8 +405,8 @@ u8 calibrate_case(afl_state_t *afl, struct queue_entry* q, u8* use_mem, u32 hand
to intermittent latency. */
if (!from_queue || afl->resuming_fuzz)
- use_tmout =
- MAX(afl->fsrv.exec_tmout + CAL_TMOUT_ADD, afl->fsrv.exec_tmout * CAL_TMOUT_PERC / 100);
+ use_tmout = MAX(afl->fsrv.exec_tmout + CAL_TMOUT_ADD,
+ afl->fsrv.exec_tmout * CAL_TMOUT_PERC / 100);
++q->cal_failed;
@@ -409,8 +416,10 @@ u8 calibrate_case(afl_state_t *afl, struct queue_entry* q, u8* use_mem, u32 hand
/* Make sure the forkserver is up before we do anything, and let's not
count its spin-up time toward binary calibration. */
- if (afl->dumb_mode != 1 && !afl->no_forkserver && !afl->fsrv.fsrv_pid) afl_fsrv_start(&afl->fsrv, afl->argv);
- if (afl->dumb_mode != 1 && !afl->no_forkserver && !afl->cmplog_fsrv_pid && afl->shm.cmplog_mode)
+ if (afl->dumb_mode != 1 && !afl->no_forkserver && !afl->fsrv.fsrv_pid)
+ afl_fsrv_start(&afl->fsrv, afl->argv);
+ if (afl->dumb_mode != 1 && !afl->no_forkserver && !afl->cmplog_fsrv_pid &&
+ afl->shm.cmplog_mode)
init_cmplog_forkserver(afl);
if (q->exec_cksum) memcpy(first_trace, afl->fsrv.trace_bits, MAP_SIZE);
@@ -421,7 +430,8 @@ u8 calibrate_case(afl_state_t *afl, struct queue_entry* q, u8* use_mem, u32 hand
u32 cksum;
- if (!first_run && !(afl->stage_cur % afl->stats_update_freq)) show_stats(afl);
+ if (!first_run && !(afl->stage_cur % afl->stats_update_freq))
+ show_stats(afl);
write_to_testcase(afl, use_mem, q->len);
@@ -432,7 +442,8 @@ u8 calibrate_case(afl_state_t *afl, struct queue_entry* q, u8* use_mem, u32 hand
if (afl->stop_soon || fault != afl->crash_mode) goto abort_calibration;
- if (!afl->dumb_mode && !afl->stage_cur && !count_bytes(afl->fsrv.trace_bits)) {
+ if (!afl->dumb_mode && !afl->stage_cur &&
+ !count_bytes(afl->fsrv.trace_bits)) {
fault = FAULT_NOINST;
goto abort_calibration;
@@ -534,7 +545,7 @@ abort_calibration:
/* Grab interesting test cases from other fuzzers. */
-void sync_fuzzers(afl_state_t *afl) {
+void sync_fuzzers(afl_state_t* afl) {
DIR* sd;
struct dirent* sd_ent;
@@ -562,7 +573,8 @@ void sync_fuzzers(afl_state_t *afl) {
/* Skip dot files and our own output directory. */
- if (sd_ent->d_name[0] == '.' || !strcmp(afl->sync_id, sd_ent->d_name)) continue;
+ if (sd_ent->d_name[0] == '.' || !strcmp(afl->sync_id, sd_ent->d_name))
+ continue;
/* Skip anything that doesn't have a queue/ subdirectory. */
@@ -577,7 +589,8 @@ void sync_fuzzers(afl_state_t *afl) {
/* Retrieve the ID of the last seen test case. */
- qd_synced_path = alloc_printf("%s/.synced/%s", afl->out_dir, sd_ent->d_name);
+ qd_synced_path =
+ alloc_printf("%s/.synced/%s", afl->out_dir, sd_ent->d_name);
id_fd = open(qd_synced_path, O_RDWR | O_CREAT, 0600);
@@ -610,7 +623,8 @@ void sync_fuzzers(afl_state_t *afl) {
/* OK, sounds like a new one. Let's give it a try. */
- if (afl->syncing_case >= next_min_accept) next_min_accept = afl->syncing_case + 1;
+ if (afl->syncing_case >= next_min_accept)
+ next_min_accept = afl->syncing_case + 1;
path = alloc_printf("%s/%s", qd_path, qd_ent->d_name);
@@ -646,7 +660,8 @@ void sync_fuzzers(afl_state_t *afl) {
if (afl->stop_soon) goto close_sync;
afl->syncing_party = sd_ent->d_name;
- afl->queued_imported += save_if_interesting(afl, mem, st.st_size, fault);
+ afl->queued_imported +=
+ save_if_interesting(afl, mem, st.st_size, fault);
afl->syncing_party = 0;
munmap(mem, st.st_size);
@@ -678,7 +693,7 @@ void sync_fuzzers(afl_state_t *afl) {
trimmer uses power-of-two increments somewhere between 1/16 and 1/1024 of
file size, to keep the stage short and sweet. */
-u8 trim_case(afl_state_t *afl, struct queue_entry* q, u8* in_buf) {
+u8 trim_case(afl_state_t* afl, struct queue_entry* q, u8* in_buf) {
/* Custom mutator trimmer */
if (afl->mutator && afl->mutator->afl_custom_trim)
@@ -814,7 +829,7 @@ abort_trimming:
error conditions, returning 1 if it's time to bail out. This is
a helper function for fuzz_one(). */
-u8 common_fuzz_stuff(afl_state_t *afl, u8* out_buf, u32 len) {
+u8 common_fuzz_stuff(afl_state_t* afl, u8* out_buf, u32 len) {
u8 fault;
@@ -859,7 +874,8 @@ u8 common_fuzz_stuff(afl_state_t *afl, u8* out_buf, u32 len) {
afl->queued_discovered += save_if_interesting(afl, out_buf, len, fault);
- if (!(afl->stage_cur % afl->stats_update_freq) || afl->stage_cur + 1 == afl->stage_max)
+ if (!(afl->stage_cur % afl->stats_update_freq) ||
+ afl->stage_cur + 1 == afl->stage_max)
show_stats(afl);
return 0;
diff --git a/src/afl-fuzz-stats.c b/src/afl-fuzz-stats.c
index c9a1dc86..f2f6efb9 100644
--- a/src/afl-fuzz-stats.c
+++ b/src/afl-fuzz-stats.c
@@ -27,14 +27,15 @@
/* Update stats file for unattended monitoring. */
-void write_stats_file(afl_state_t *afl, double bitmap_cvg, double stability, double eps) {
+void write_stats_file(afl_state_t *afl, double bitmap_cvg, double stability,
+ double eps) {
static double last_bcvg, last_stab, last_eps;
static struct rusage rus;
- u8* fn = alloc_printf("%s/fuzzer_stats", afl->out_dir);
+ u8 * fn = alloc_printf("%s/fuzzer_stats", afl->out_dir);
s32 fd;
- FILE* f;
+ FILE *f;
fd = open(fn, O_WRONLY | O_CREAT | O_TRUNC, 0600);
@@ -101,23 +102,28 @@ void write_stats_file(afl_state_t *afl, double bitmap_cvg, double stability, dou
"command_line : %s\n",
afl->start_time / 1000, get_cur_time() / 1000, getpid(),
afl->queue_cycle ? (afl->queue_cycle - 1) : 0, afl->total_execs,
- /*eps,*/ afl->total_execs / ((double)(get_cur_time() - afl->start_time) / 1000),
- afl->queued_paths, afl->queued_favored, afl->queued_discovered, afl->queued_imported,
- afl->max_depth, afl->current_entry, afl->pending_favored, afl->pending_not_fuzzed,
- afl->queued_variable, stability, bitmap_cvg, afl->unique_crashes, afl->unique_hangs,
- afl->last_path_time / 1000, afl->last_crash_time / 1000, afl->last_hang_time / 1000,
- afl->total_execs - afl->last_crash_execs, afl->fsrv.exec_tmout, afl->slowest_exec_ms,
+ /*eps,*/ afl->total_execs /
+ ((double)(get_cur_time() - afl->start_time) / 1000),
+ afl->queued_paths, afl->queued_favored, afl->queued_discovered,
+ afl->queued_imported, afl->max_depth, afl->current_entry,
+ afl->pending_favored, afl->pending_not_fuzzed, afl->queued_variable,
+ stability, bitmap_cvg, afl->unique_crashes, afl->unique_hangs,
+ afl->last_path_time / 1000, afl->last_crash_time / 1000,
+ afl->last_hang_time / 1000, afl->total_execs - afl->last_crash_execs,
+ afl->fsrv.exec_tmout, afl->slowest_exec_ms,
#ifdef __APPLE__
(unsigned long int)(rus.ru_maxrss >> 20),
#else
(unsigned long int)(rus.ru_maxrss >> 10),
#endif
- afl->use_banner, afl->unicorn_mode ? "unicorn" : "", afl->qemu_mode ? "qemu " : "",
- afl->dumb_mode ? " dumb " : "", afl->no_forkserver ? "no_fsrv " : "",
- afl->crash_mode ? "crash " : "", afl->persistent_mode ? "persistent " : "",
+ afl->use_banner, afl->unicorn_mode ? "unicorn" : "",
+ afl->qemu_mode ? "qemu " : "", afl->dumb_mode ? " dumb " : "",
+ afl->no_forkserver ? "no_fsrv " : "", afl->crash_mode ? "crash " : "",
+ afl->persistent_mode ? "persistent " : "",
afl->deferred_mode ? "deferred " : "",
- (afl->unicorn_mode || afl->qemu_mode || afl->dumb_mode || afl->no_forkserver || afl->crash_mode ||
- afl->persistent_mode || afl->deferred_mode)
+ (afl->unicorn_mode || afl->qemu_mode || afl->dumb_mode ||
+ afl->no_forkserver || afl->crash_mode || afl->persistent_mode ||
+ afl->deferred_mode)
? ""
: "default",
afl->orig_cmdline);
@@ -157,9 +163,10 @@ void maybe_update_plot_file(afl_state_t *afl, double bitmap_cvg, double eps) {
fprintf(afl->fsrv.plot_file,
"%llu, %llu, %u, %u, %u, %u, %0.02f%%, %llu, %llu, %u, %0.02f\n",
- get_cur_time() / 1000, afl->queue_cycle - 1, afl->current_entry, afl->queued_paths,
- afl->pending_not_fuzzed, afl->pending_favored, bitmap_cvg, afl->unique_crashes,
- afl->unique_hangs, afl->max_depth, eps); /* ignore errors */
+ get_cur_time() / 1000, afl->queue_cycle - 1, afl->current_entry,
+ afl->queued_paths, afl->pending_not_fuzzed, afl->pending_favored,
+ bitmap_cvg, afl->unique_crashes, afl->unique_hangs, afl->max_depth,
+ eps); /* ignore errors */
fflush(afl->fsrv.plot_file);
@@ -266,11 +273,12 @@ void show_stats(afl_state_t *afl) {
/* Honor AFL_EXIT_WHEN_DONE and AFL_BENCH_UNTIL_CRASH. */
- if (!afl->dumb_mode && afl->cycles_wo_finds > 100 && !afl->pending_not_fuzzed &&
- get_afl_env("AFL_EXIT_WHEN_DONE"))
+ if (!afl->dumb_mode && afl->cycles_wo_finds > 100 &&
+ !afl->pending_not_fuzzed && get_afl_env("AFL_EXIT_WHEN_DONE"))
afl->stop_soon = 2;
- if (afl->total_crashes && get_afl_env("AFL_BENCH_UNTIL_CRASH")) afl->stop_soon = 2;
+ if (afl->total_crashes && get_afl_env("AFL_BENCH_UNTIL_CRASH"))
+ afl->stop_soon = 2;
/* If we're not on TTY, bail out. */
@@ -305,20 +313,22 @@ void show_stats(afl_state_t *afl) {
/* Let's start by drawing a centered banner. */
- banner_len = (afl->crash_mode ? 24 : 22) + strlen(VERSION) + strlen(afl->use_banner) +
- strlen(afl->power_name) + 3 + 5;
+ banner_len = (afl->crash_mode ? 24 : 22) + strlen(VERSION) +
+ strlen(afl->use_banner) + strlen(afl->power_name) + 3 + 5;
banner_pad = (79 - banner_len) / 2;
memset(tmp, ' ', banner_pad);
#ifdef HAVE_AFFINITY
- sprintf(tmp + banner_pad,
- "%s " cLCY VERSION cLGN " (%s) " cPIN "[%s]" cBLU " {%d}",
- afl->crash_mode ? cPIN "peruvian were-rabbit" : cYEL "american fuzzy lop",
- afl->use_banner, afl->power_name, afl->cpu_aff);
+ sprintf(
+ tmp + banner_pad,
+ "%s " cLCY VERSION cLGN " (%s) " cPIN "[%s]" cBLU " {%d}",
+ afl->crash_mode ? cPIN "peruvian were-rabbit" : cYEL "american fuzzy lop",
+ afl->use_banner, afl->power_name, afl->cpu_aff);
#else
- sprintf(tmp + banner_pad, "%s " cLCY VERSION cLGN " (%s) " cPIN "[%s]",
- afl->crash_mode ? cPIN "peruvian were-rabbit" : cYEL "american fuzzy lop",
- afl->use_banner, afl->power_name);
+ sprintf(
+ tmp + banner_pad, "%s " cLCY VERSION cLGN " (%s) " cPIN "[%s]",
+ afl->crash_mode ? cPIN "peruvian were-rabbit" : cYEL "american fuzzy lop",
+ afl->use_banner, afl->power_name);
#endif /* HAVE_AFFINITY */
SAYF("\n%s\n", tmp);
@@ -360,7 +370,8 @@ void show_stats(afl_state_t *afl) {
else
/* No finds for a long time and no test cases to try. */
- if (afl->cycles_wo_finds > 100 && !afl->pending_not_fuzzed && min_wo_finds > 120)
+ if (afl->cycles_wo_finds > 100 && !afl->pending_not_fuzzed &&
+ min_wo_finds > 120)
strcpy(tmp, cLGN);
/* Default: cautiously OK to stop? */
@@ -376,8 +387,9 @@ void show_stats(afl_state_t *afl) {
/* We want to warn people about not seeing new paths after a full cycle,
except when resuming fuzzing or running in non-instrumented mode. */
- if (!afl->dumb_mode && (afl->last_path_time || afl->resuming_fuzz || afl->queue_cycle == 1 ||
- afl->in_bitmap || afl->crash_mode)) {
+ if (!afl->dumb_mode &&
+ (afl->last_path_time || afl->resuming_fuzz || afl->queue_cycle == 1 ||
+ afl->in_bitmap || afl->crash_mode)) {
SAYF(bV bSTOP " last new path : " cRST "%-33s ",
DTD(cur_ms, afl->last_path_time));
@@ -407,7 +419,8 @@ void show_stats(afl_state_t *afl) {
SAYF(bV bSTOP " last uniq crash : " cRST "%-33s " bSTG bV bSTOP
" uniq crashes : %s%-6s" bSTG bV "\n",
- DTD(cur_ms, afl->last_crash_time), afl->unique_crashes ? cLRD : cRST, tmp);
+ DTD(cur_ms, afl->last_crash_time), afl->unique_crashes ? cLRD : cRST,
+ tmp);
sprintf(tmp, "%s%s", DI(afl->unique_hangs),
(afl->unique_hangs >= KEEP_UNIQUE_HANG) ? "+" : "");
@@ -434,7 +447,8 @@ void show_stats(afl_state_t *afl) {
((double)afl->queue_cur->bitmap_size) * 100 / MAP_SIZE, t_byte_ratio);
SAYF(" map density : %s%-21s" bSTG bV "\n",
- t_byte_ratio > 70 ? cLRD : ((t_bytes < 200 && !afl->dumb_mode) ? cPIN : cRST),
+ t_byte_ratio > 70 ? cLRD
+ : ((t_bytes < 200 && !afl->dumb_mode) ? cPIN : cRST),
tmp);
sprintf(tmp, "%s (%0.02f%%)", DI(afl->cur_skipped_paths),
@@ -477,7 +491,8 @@ void show_stats(afl_state_t *afl) {
SAYF(" new edges on : " cRST "%-22s" bSTG bV "\n", tmp);
- sprintf(tmp, "%s (%s%s unique)", DI(afl->total_crashes), DI(afl->unique_crashes),
+ sprintf(tmp, "%s (%s%s unique)", DI(afl->total_crashes),
+ DI(afl->unique_crashes),
(afl->unique_crashes >= KEEP_UNIQUE_CRASH) ? "+" : "");
if (afl->crash_mode) {
@@ -510,7 +525,8 @@ void show_stats(afl_state_t *afl) {
}
- sprintf(tmp, "%s (%s%s unique)", DI(afl->total_tmouts), DI(afl->unique_tmouts),
+ sprintf(tmp, "%s (%s%s unique)", DI(afl->total_tmouts),
+ DI(afl->unique_tmouts),
(afl->unique_hangs >= KEEP_UNIQUE_HANG) ? "+" : "");
SAYF(bSTG bV bSTOP " total tmouts : " cRST "%-22s" bSTG bV "\n", tmp);
@@ -527,10 +543,11 @@ void show_stats(afl_state_t *afl) {
} else {
- sprintf(tmp, "%s/%s, %s/%s, %s/%s", DI(afl->stage_finds[STAGE_FLIP1]),
- DI(afl->stage_cycles[STAGE_FLIP1]), DI(afl->stage_finds[STAGE_FLIP2]),
- DI(afl->stage_cycles[STAGE_FLIP2]), DI(afl->stage_finds[STAGE_FLIP4]),
- DI(afl->stage_cycles[STAGE_FLIP4]));
+ sprintf(
+ tmp, "%s/%s, %s/%s, %s/%s", DI(afl->stage_finds[STAGE_FLIP1]),
+ DI(afl->stage_cycles[STAGE_FLIP1]), DI(afl->stage_finds[STAGE_FLIP2]),
+ DI(afl->stage_cycles[STAGE_FLIP2]), DI(afl->stage_finds[STAGE_FLIP4]),
+ DI(afl->stage_cycles[STAGE_FLIP4]));
}
@@ -539,10 +556,11 @@ void show_stats(afl_state_t *afl) {
tmp, DI(afl->max_depth));
if (!afl->skip_deterministic)
- sprintf(tmp, "%s/%s, %s/%s, %s/%s", DI(afl->stage_finds[STAGE_FLIP8]),
- DI(afl->stage_cycles[STAGE_FLIP8]), DI(afl->stage_finds[STAGE_FLIP16]),
- DI(afl->stage_cycles[STAGE_FLIP16]), DI(afl->stage_finds[STAGE_FLIP32]),
- DI(afl->stage_cycles[STAGE_FLIP32]));
+ sprintf(
+ tmp, "%s/%s, %s/%s, %s/%s", DI(afl->stage_finds[STAGE_FLIP8]),
+ DI(afl->stage_cycles[STAGE_FLIP8]), DI(afl->stage_finds[STAGE_FLIP16]),
+ DI(afl->stage_cycles[STAGE_FLIP16]), DI(afl->stage_finds[STAGE_FLIP32]),
+ DI(afl->stage_cycles[STAGE_FLIP32]));
SAYF(bV bSTOP " byte flips : " cRST "%-36s " bSTG bV bSTOP
" pending : " cRST "%-10s" bSTG bV "\n",
@@ -550,8 +568,10 @@ void show_stats(afl_state_t *afl) {
if (!afl->skip_deterministic)
sprintf(tmp, "%s/%s, %s/%s, %s/%s", DI(afl->stage_finds[STAGE_ARITH8]),
- DI(afl->stage_cycles[STAGE_ARITH8]), DI(afl->stage_finds[STAGE_ARITH16]),
- DI(afl->stage_cycles[STAGE_ARITH16]), DI(afl->stage_finds[STAGE_ARITH32]),
+ DI(afl->stage_cycles[STAGE_ARITH8]),
+ DI(afl->stage_finds[STAGE_ARITH16]),
+ DI(afl->stage_cycles[STAGE_ARITH16]),
+ DI(afl->stage_finds[STAGE_ARITH32]),
DI(afl->stage_cycles[STAGE_ARITH32]));
SAYF(bV bSTOP " arithmetics : " cRST "%-36s " bSTG bV bSTOP
@@ -559,11 +579,12 @@ void show_stats(afl_state_t *afl) {
tmp, DI(afl->pending_favored));
if (!afl->skip_deterministic)
- sprintf(
- tmp, "%s/%s, %s/%s, %s/%s", DI(afl->stage_finds[STAGE_INTEREST8]),
- DI(afl->stage_cycles[STAGE_INTEREST8]), DI(afl->stage_finds[STAGE_INTEREST16]),
- DI(afl->stage_cycles[STAGE_INTEREST16]), DI(afl->stage_finds[STAGE_INTEREST32]),
- DI(afl->stage_cycles[STAGE_INTEREST32]));
+ sprintf(tmp, "%s/%s, %s/%s, %s/%s", DI(afl->stage_finds[STAGE_INTEREST8]),
+ DI(afl->stage_cycles[STAGE_INTEREST8]),
+ DI(afl->stage_finds[STAGE_INTEREST16]),
+ DI(afl->stage_cycles[STAGE_INTEREST16]),
+ DI(afl->stage_finds[STAGE_INTEREST32]),
+ DI(afl->stage_cycles[STAGE_INTEREST32]));
SAYF(bV bSTOP " known ints : " cRST "%-36s " bSTG bV bSTOP
" own finds : " cRST "%-10s" bSTG bV "\n",
@@ -571,18 +592,21 @@ void show_stats(afl_state_t *afl) {
if (!afl->skip_deterministic)
sprintf(tmp, "%s/%s, %s/%s, %s/%s", DI(afl->stage_finds[STAGE_EXTRAS_UO]),
- DI(afl->stage_cycles[STAGE_EXTRAS_UO]), DI(afl->stage_finds[STAGE_EXTRAS_UI]),
- DI(afl->stage_cycles[STAGE_EXTRAS_UI]), DI(afl->stage_finds[STAGE_EXTRAS_AO]),
+ DI(afl->stage_cycles[STAGE_EXTRAS_UO]),
+ DI(afl->stage_finds[STAGE_EXTRAS_UI]),
+ DI(afl->stage_cycles[STAGE_EXTRAS_UI]),
+ DI(afl->stage_finds[STAGE_EXTRAS_AO]),
DI(afl->stage_cycles[STAGE_EXTRAS_AO]));
SAYF(bV bSTOP " dictionary : " cRST "%-36s " bSTG bV bSTOP
" imported : " cRST "%-10s" bSTG bV "\n",
- tmp, afl->sync_id ? DI(afl->queued_imported) : (u8*)"n/a");
+ tmp, afl->sync_id ? DI(afl->queued_imported) : (u8 *)"n/a");
- sprintf(tmp, "%s/%s, %s/%s, %s/%s", DI(afl->stage_finds[STAGE_HAVOC]),
- DI(afl->stage_cycles[STAGE_HAVOC]), DI(afl->stage_finds[STAGE_SPLICE]),
- DI(afl->stage_cycles[STAGE_SPLICE]), DI(afl->stage_finds[STAGE_RADAMSA]),
- DI(afl->stage_cycles[STAGE_RADAMSA]));
+ sprintf(
+ tmp, "%s/%s, %s/%s, %s/%s", DI(afl->stage_finds[STAGE_HAVOC]),
+ DI(afl->stage_cycles[STAGE_HAVOC]), DI(afl->stage_finds[STAGE_SPLICE]),
+ DI(afl->stage_cycles[STAGE_SPLICE]), DI(afl->stage_finds[STAGE_RADAMSA]),
+ DI(afl->stage_cycles[STAGE_RADAMSA]));
SAYF(bV bSTOP " havoc/rad : " cRST "%-36s " bSTG bV bSTOP, tmp);
@@ -594,20 +618,22 @@ void show_stats(afl_state_t *afl) {
SAYF(" stability : %s%-10s" bSTG bV "\n",
(stab_ratio < 85 && afl->var_byte_count > 40)
? cLRD
- : ((afl->queued_variable && (!afl->persistent_mode || afl->var_byte_count > 20))
+ : ((afl->queued_variable &&
+ (!afl->persistent_mode || afl->var_byte_count > 20))
? cMGN
: cRST),
tmp);
if (afl->shm.cmplog_mode) {
- sprintf(tmp, "%s/%s, %s/%s, %s/%s, %s/%s", DI(afl->stage_finds[STAGE_PYTHON]),
+ sprintf(tmp, "%s/%s, %s/%s, %s/%s, %s/%s",
+ DI(afl->stage_finds[STAGE_PYTHON]),
DI(afl->stage_cycles[STAGE_PYTHON]),
DI(afl->stage_finds[STAGE_CUSTOM_MUTATOR]),
DI(afl->stage_cycles[STAGE_CUSTOM_MUTATOR]),
DI(afl->stage_finds[STAGE_COLORIZATION]),
- DI(afl->stage_cycles[STAGE_COLORIZATION]), DI(afl->stage_finds[STAGE_ITS]),
- DI(afl->stage_cycles[STAGE_ITS]));
+ DI(afl->stage_cycles[STAGE_COLORIZATION]),
+ DI(afl->stage_finds[STAGE_ITS]), DI(afl->stage_cycles[STAGE_ITS]));
SAYF(bV bSTOP " custom/rq : " cRST "%-36s " bSTG bVR bH20 bH2 bH bRB "\n",
tmp);
@@ -631,7 +657,8 @@ void show_stats(afl_state_t *afl) {
} else {
sprintf(tmp, "%0.02f%%/%s, ",
- ((double)(afl->bytes_trim_in - afl->bytes_trim_out)) * 100 / afl->bytes_trim_in,
+ ((double)(afl->bytes_trim_in - afl->bytes_trim_out)) * 100 /
+ afl->bytes_trim_in,
DI(afl->trim_execs));
}
@@ -674,7 +701,7 @@ void show_stats(afl_state_t *afl) {
double cur_runnable = get_runnable_processes();
u32 cur_utilization = cur_runnable * 100 / afl->cpu_core_count;
- u8* cpu_color = cCYA;
+ u8 *cpu_color = cCYA;
/* If we could still run one or more processes, use green. */
@@ -725,7 +752,7 @@ void show_stats(afl_state_t *afl) {
void show_init_stats(afl_state_t *afl) {
- struct queue_entry* q = afl->queue;
+ struct queue_entry *q = afl->queue;
u32 min_bits = 0, max_bits = 0;
u64 min_us = 0, max_us = 0;
u64 avg_us = 0;
@@ -756,11 +783,11 @@ void show_init_stats(afl_state_t *afl) {
/* Let's keep things moving with slow binaries. */
if (avg_us > 50000)
- afl->havoc_div = 10; /* 0-19 execs/sec */
+ afl->havoc_div = 10; /* 0-19 execs/sec */
else if (avg_us > 20000)
- afl->havoc_div = 5; /* 20-49 execs/sec */
+ afl->havoc_div = 5; /* 20-49 execs/sec */
else if (avg_us > 10000)
- afl->havoc_div = 2; /* 50-100 execs/sec */
+ afl->havoc_div = 2; /* 50-100 execs/sec */
if (!afl->resuming_fuzz) {
@@ -789,7 +816,8 @@ void show_init_stats(afl_state_t *afl) {
"%u favored, %u variable, %u total\n" cGRA " Bitmap range : " cRST
"%u to %u bits (average: %0.02f bits)\n" cGRA
" Exec timing : " cRST "%s to %s us (average: %s us)\n",
- afl->queued_favored, afl->queued_variable, afl->queued_paths, min_bits, max_bits,
+ afl->queued_favored, afl->queued_variable, afl->queued_paths, min_bits,
+ max_bits,
((double)afl->total_bitmap_size) /
(afl->total_bitmap_entries ? afl->total_bitmap_entries : 1),
DI(min_us), DI(max_us), DI(avg_us));
@@ -811,9 +839,11 @@ void show_init_stats(afl_state_t *afl) {
afl->fsrv.exec_tmout = avg_us * 5 / 1000;
afl->fsrv.exec_tmout = MAX(afl->fsrv.exec_tmout, max_us / 1000);
- afl->fsrv.exec_tmout = (afl->fsrv.exec_tmout + EXEC_TM_ROUND) / EXEC_TM_ROUND * EXEC_TM_ROUND;
+ afl->fsrv.exec_tmout =
+ (afl->fsrv.exec_tmout + EXEC_TM_ROUND) / EXEC_TM_ROUND * EXEC_TM_ROUND;
- if (afl->fsrv.exec_tmout > EXEC_TIMEOUT) afl->fsrv.exec_tmout = EXEC_TIMEOUT;
+ if (afl->fsrv.exec_tmout > EXEC_TIMEOUT)
+ afl->fsrv.exec_tmout = EXEC_TIMEOUT;
ACTF("No -t option specified, so I'll use exec timeout of %u ms.",
afl->fsrv.exec_tmout);
@@ -822,7 +852,8 @@ void show_init_stats(afl_state_t *afl) {
} else if (afl->timeout_given == 3) {
- ACTF("Applying timeout settings from resumed session (%u ms).", afl->fsrv.exec_tmout);
+ ACTF("Applying timeout settings from resumed session (%u ms).",
+ afl->fsrv.exec_tmout);
}
diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c
index 63035e36..35cf582c 100644
--- a/src/afl-fuzz.c
+++ b/src/afl-fuzz.c
@@ -84,7 +84,7 @@ static u8* get_libradamsa_path(u8* own_loc) {
/* Display usage hints. */
-static void usage(afl_state_t *afl, u8* argv0, int more_help) {
+static void usage(afl_state_t* afl, u8* argv0, int more_help) {
SAYF(
"\n%s [ options ] -- /path/to/fuzzed_app [ ... ]\n\n"
@@ -222,7 +222,6 @@ static int stricmp(char const* a, char const* b) {
}
-
/* Main entry point */
int main(int argc, char** argv, char** envp) {
@@ -238,18 +237,16 @@ int main(int argc, char** argv, char** envp) {
struct timeval tv;
struct timezone tz;
- afl_state_t *afl = calloc(1, sizeof(afl_state_t));
- if (!afl) {
- FATAL("Could not create afl state");
- }
+ afl_state_t* afl = calloc(1, sizeof(afl_state_t));
+ if (!afl) { FATAL("Could not create afl state"); }
afl_state_init(afl);
afl_fsrv_init(&afl->fsrv);
SAYF(cCYA "afl-fuzz" VERSION cRST
" based on afl by Michal Zalewski and a big online community\n");
-
- doc_path = access(DOC_PATH, F_OK) ? (u8 *)"docs" : doc_path;
+
+ doc_path = access(DOC_PATH, F_OK) ? (u8*)"docs" : doc_path;
gettimeofday(&tv, &tz);
afl->init_seed = tv.tv_sec ^ tv.tv_usec ^ getpid();
@@ -348,8 +345,8 @@ int main(int argc, char** argv, char** envp) {
*c = 0;
if (sscanf(c + 1, "%u/%u", &afl->master_id, &afl->master_max) != 2 ||
- !afl->master_id || !afl->master_max || afl->master_id > afl->master_max ||
- afl->master_max > 1000000)
+ !afl->master_id || !afl->master_max ||
+ afl->master_id > afl->master_max || afl->master_max > 1000000)
FATAL("Bogus master ID passed to -M");
}
@@ -543,7 +540,8 @@ int main(int argc, char** argv, char** envp) {
afl->limit_time_sig = 1;
afl->havoc_max_mult = HAVOC_MAX_MULT_MOPT;
- if (sscanf(optarg, "%llu", &afl->limit_time_puppet) < 1 || optarg[0] == '-')
+ if (sscanf(optarg, "%llu", &afl->limit_time_puppet) < 1 ||
+ optarg[0] == '-')
FATAL("Bad syntax used for -L");
u64 limit_time_puppet2 = afl->limit_time_puppet * 60 * 1000;
@@ -561,7 +559,9 @@ int main(int argc, char** argv, char** envp) {
int tmp_swarm = 0;
if (afl->g_now > afl->g_max) afl->g_now = 0;
- afl->w_now = (afl->w_init - afl->w_end) * (afl->g_max - afl->g_now) / (afl->g_max) + afl->w_end;
+ afl->w_now = (afl->w_init - afl->w_end) * (afl->g_max - afl->g_now) /
+ (afl->g_max) +
+ afl->w_end;
for (tmp_swarm = 0; tmp_swarm < swarm_num; ++tmp_swarm) {
@@ -572,7 +572,8 @@ int main(int argc, char** argv, char** envp) {
afl->stage_finds_puppet[tmp_swarm][i] = 0;
afl->probability_now[tmp_swarm][i] = 0.0;
- afl->x_now[tmp_swarm][i] = ((double)(random() % 7000) * 0.0001 + 0.1);
+ afl->x_now[tmp_swarm][i] =
+ ((double)(random() % 7000) * 0.0001 + 0.1);
total_puppet_temp += afl->x_now[tmp_swarm][i];
afl->v_now[tmp_swarm][i] = 0.1;
afl->L_best[tmp_swarm][i] = 0.5;
@@ -587,7 +588,8 @@ int main(int argc, char** argv, char** envp) {
afl->stage_cycles_puppet[tmp_swarm][i];
afl->stage_finds_puppet_v2[tmp_swarm][i] =
afl->stage_finds_puppet[tmp_swarm][i];
- afl->x_now[tmp_swarm][i] = afl->x_now[tmp_swarm][i] / total_puppet_temp;
+ afl->x_now[tmp_swarm][i] =
+ afl->x_now[tmp_swarm][i] / total_puppet_temp;
}
@@ -598,7 +600,8 @@ int main(int argc, char** argv, char** envp) {
afl->probability_now[tmp_swarm][i] = 0.0;
afl->v_now[tmp_swarm][i] =
afl->w_now * afl->v_now[tmp_swarm][i] +
- RAND_C * (afl->L_best[tmp_swarm][i] - afl->x_now[tmp_swarm][i]) +
+ RAND_C *
+ (afl->L_best[tmp_swarm][i] - afl->x_now[tmp_swarm][i]) +
RAND_C * (afl->G_best[i] - afl->x_now[tmp_swarm][i]);
afl->x_now[tmp_swarm][i] += afl->v_now[tmp_swarm][i];
@@ -617,7 +620,8 @@ int main(int argc, char** argv, char** envp) {
afl->x_now[tmp_swarm][i] = afl->x_now[tmp_swarm][i] / x_temp;
if (likely(i != 0))
afl->probability_now[tmp_swarm][i] =
- afl->probability_now[tmp_swarm][i - 1] + afl->x_now[tmp_swarm][i];
+ afl->probability_now[tmp_swarm][i - 1] +
+ afl->x_now[tmp_swarm][i];
else
afl->probability_now[tmp_swarm][i] = afl->x_now[tmp_swarm][i];
@@ -669,7 +673,8 @@ int main(int argc, char** argv, char** envp) {
OKF("afl-tmin fork server patch from github.com/nccgroup/TriforceAFL");
OKF("MOpt Mutator from github.com/puppet-meteor/MOpt-AFL");
- if (afl->sync_id && afl->force_deterministic && getenv("AFL_CUSTOM_MUTATOR_ONLY"))
+ if (afl->sync_id && afl->force_deterministic &&
+ getenv("AFL_CUSTOM_MUTATOR_ONLY"))
WARNF(
"Using -M master with the AFL_CUSTOM_MUTATOR_ONLY mutator options will "
"result in no deterministic mutations being done!");
@@ -764,8 +769,7 @@ int main(int argc, char** argv, char** envp) {
if (get_afl_env("AFL_AUTORESUME")) {
afl->autoresume = 1;
- if (afl->in_place_resume)
- SAYF("AFL_AUTORESUME has no effect for '-i -'");
+ if (afl->in_place_resume) SAYF("AFL_AUTORESUME has no effect for '-i -'");
}
@@ -886,11 +890,12 @@ int main(int argc, char** argv, char** envp) {
if (!afl->timeout_given) find_timeout(afl);
- if ((afl->tmp_dir = get_afl_env("AFL_TMPDIR")) != NULL && !afl->in_place_resume) {
+ if ((afl->tmp_dir = get_afl_env("AFL_TMPDIR")) != NULL &&
+ !afl->in_place_resume) {
- char tmpfile[afl->file_extension
- ? strlen(afl->tmp_dir) + 1 + 10 + 1 + strlen(afl->file_extension) + 1
- : strlen(afl->tmp_dir) + 1 + 10 + 1];
+ char tmpfile[afl->file_extension ? strlen(afl->tmp_dir) + 1 + 10 + 1 +
+ strlen(afl->file_extension) + 1
+ : strlen(afl->tmp_dir) + 1 + 10 + 1];
if (afl->file_extension) {
sprintf(tmpfile, "%s/.cur_input.%s", afl->tmp_dir, afl->file_extension);
@@ -927,7 +932,8 @@ int main(int argc, char** argv, char** envp) {
if (afl->file_extension) {
- afl->fsrv.out_file = alloc_printf("%s/.cur_input.%s", afl->tmp_dir, afl->file_extension);
+ afl->fsrv.out_file = alloc_printf("%s/.cur_input.%s", afl->tmp_dir,
+ afl->file_extension);
} else {
@@ -935,7 +941,8 @@ int main(int argc, char** argv, char** envp) {
}
- detect_file_args(argv + optind + 1, afl->fsrv.out_file, afl->fsrv.use_stdin);
+ detect_file_args(argv + optind + 1, afl->fsrv.out_file,
+ afl->fsrv.use_stdin);
break;
}
@@ -969,9 +976,11 @@ int main(int argc, char** argv, char** envp) {
if (afl->qemu_mode) {
if (afl->use_wine)
- use_argv = get_wine_argv(argv[0], &afl->fsrv.target_path, argc - optind, argv + optind);
+ use_argv = get_wine_argv(argv[0], &afl->fsrv.target_path, argc - optind,
+ argv + optind);
else
- use_argv = get_qemu_argv(argv[0], &afl->fsrv.target_path, argc - optind, argv + optind);
+ use_argv = get_qemu_argv(argv[0], &afl->fsrv.target_path, argc - optind,
+ argv + optind);
} else {
@@ -979,7 +988,7 @@ int main(int argc, char** argv, char** envp) {
}
- afl->argv = use_argv;
+ afl->argv = use_argv;
perform_dry_run(afl);
cull_queue(afl);
@@ -1053,7 +1062,8 @@ int main(int argc, char** argv, char** envp) {
prev_queued = afl->queued_paths;
- if (afl->sync_id && afl->queue_cycle == 1 && get_afl_env("AFL_IMPORT_FIRST"))
+ if (afl->sync_id && afl->queue_cycle == 1 &&
+ get_afl_env("AFL_IMPORT_FIRST"))
sync_fuzzers(afl);
}
@@ -1134,13 +1144,15 @@ stop_fuzzing:
SAYF(CURSOR_SHOW cLRD "\n\n+++ Testing aborted %s +++\n" cRST,
afl->stop_soon == 2 ? "programmatically" : "by user");
- if (afl->most_time_key == 2) SAYF(cYEL "[!] " cRST "Time limit was reached\n");
+ if (afl->most_time_key == 2)
+ SAYF(cYEL "[!] " cRST "Time limit was reached\n");
if (afl->most_execs_key == 2)
SAYF(cYEL "[!] " cRST "Execution limit was reached\n");
/* Running for more than 30 minutes but still doing first cycle? */
- if (afl->queue_cycle == 1 && get_cur_time() - afl->start_time > 30 * 60 * 1000) {
+ if (afl->queue_cycle == 1 &&
+ get_cur_time() - afl->start_time > 30 * 60 * 1000) {
SAYF("\n" cYEL "[!] " cRST
"Stopped during the first cycle, results may be incomplete.\n"
diff --git a/src/afl-sharedmem.c b/src/afl-sharedmem.c
index bb49b6bd..004f6773 100644
--- a/src/afl-sharedmem.c
+++ b/src/afl-sharedmem.c
@@ -101,7 +101,7 @@ void afl_shm_atexit() {
}
-/* Configure shared memory.
+/* Configure shared memory.
Returns a pointer to shm->map for ease of use.
*/
@@ -124,7 +124,8 @@ u8 *afl_shm_init(sharedmem_t *shm, size_t map_size, unsigned char dumb_mode) {
snprintf(shm->g_shm_file_path, L_tmpnam, "/afl_%d_%ld", getpid(), random());
/* create the shared memory segment as if it was a file */
- shm->g_shm_fd = shm_open(shm->g_shm_file_path, O_CREAT | O_RDWR | O_EXCL, 0600);
+ shm->g_shm_fd =
+ shm_open(shm->g_shm_file_path, O_CREAT | O_RDWR | O_EXCL, 0600);
if (shm->g_shm_fd == -1) { PFATAL("shm_open() failed"); }
/* configure the size of the shared memory segment */
@@ -135,8 +136,8 @@ u8 *afl_shm_init(sharedmem_t *shm, size_t map_size, unsigned char dumb_mode) {
}
/* map the shared memory segment to the address space of the process */
- shm->map =
- mmap(0, map_size, PROT_READ | PROT_WRITE, MAP_SHARED, map_size->g_shm_fd, 0);
+ shm->map = mmap(0, map_size, PROT_READ | PROT_WRITE, MAP_SHARED,
+ map_size->g_shm_fd, 0);
if (map_size->map == MAP_FAILED) {
close(map_size->g_shm_fd);
@@ -164,7 +165,7 @@ u8 *afl_shm_init(sharedmem_t *shm, size_t map_size, unsigned char dumb_mode) {
if (shm->cmplog_mode) {
shm->cmplog_shm_id = shmget(IPC_PRIVATE, sizeof(struct cmp_map),
- IPC_CREAT | IPC_EXCL | 0600);
+ IPC_CREAT | IPC_EXCL | 0600);
if (shm->cmplog_shm_id < 0) PFATAL("shmget() failed");
@@ -203,7 +204,6 @@ u8 *afl_shm_init(sharedmem_t *shm, size_t map_size, unsigned char dumb_mode) {
}
-
#endif
list_append(&shm_list, shm);
@@ -212,3 +212,4 @@ u8 *afl_shm_init(sharedmem_t *shm, size_t map_size, unsigned char dumb_mode) {
return shm->map;
}
+
diff --git a/src/afl-showmap.c b/src/afl-showmap.c
index 0f0d19c7..fe520af4 100644
--- a/src/afl-showmap.c
+++ b/src/afl-showmap.c
@@ -59,12 +59,12 @@
#include <sys/types.h>
#include <sys/resource.h>
-u8 be_quiet;
+u8 be_quiet;
-u8 *stdin_file, /* stdin file */
+u8 *stdin_file, /* stdin file */
*in_dir, /* input folder */
*doc_path, /* Path to docs */
- *at_file = NULL; /* Substitution string for @@ */
+ *at_file = NULL; /* Substitution string for @@ */
static u8* in_data; /* Input data */
@@ -146,7 +146,7 @@ static void at_exit_handler(void) {
/* Write results. */
-static u32 write_results_to_file(afl_forkserver_t *fsrv) {
+static u32 write_results_to_file(afl_forkserver_t* fsrv) {
s32 fd;
u32 i, ret = 0;
@@ -166,7 +166,7 @@ static u32 write_results_to_file(afl_forkserver_t *fsrv) {
} else {
- unlink(fsrv->out_file); /* Ignore errors */
+ unlink(fsrv->out_file); /* Ignore errors */
fd = open(fsrv->out_file, O_WRONLY | O_CREAT | O_EXCL, 0600);
if (fd < 0) PFATAL("Unable to create '%s'", fsrv->out_file);
@@ -217,7 +217,7 @@ static u32 write_results_to_file(afl_forkserver_t *fsrv) {
/* Write results. */
-static u32 write_results(afl_forkserver_t *fsrv) {
+static u32 write_results(afl_forkserver_t* fsrv) {
return write_results_to_file(fsrv);
@@ -247,7 +247,7 @@ static s32 write_to_file(u8* path, u8* mem, u32 len) {
is unlinked and a new one is created. Otherwise, out_fd is rewound and
truncated. */
-static void write_to_testcase(afl_forkserver_t *fsrv, void* mem, u32 len) {
+static void write_to_testcase(afl_forkserver_t* fsrv, void* mem, u32 len) {
lseek(fsrv->out_fd, 0, SEEK_SET);
ck_write(fsrv->out_fd, mem, len, fsrv->out_file);
@@ -259,7 +259,8 @@ static void write_to_testcase(afl_forkserver_t *fsrv, void* mem, u32 len) {
/* Execute target application. Returns 0 if the changes are a dud, or
1 if they should be kept. */
-static u8 run_target_forkserver(afl_forkserver_t *fsrv, char** argv, u8* mem, u32 len) {
+static u8 run_target_forkserver(afl_forkserver_t* fsrv, char** argv, u8* mem,
+ u32 len) {
static struct itimerval it;
static u32 prev_timed_out = 0;
@@ -379,7 +380,7 @@ u32 read_file(u8* in_file) {
/* Execute target application. */
-static void run_target(afl_forkserver_t *fsrv, char** argv) {
+static void run_target(afl_forkserver_t* fsrv, char** argv) {
static struct itimerval it;
int status = 0;
@@ -476,7 +477,8 @@ static void run_target(afl_forkserver_t *fsrv, char** argv) {
if (!quiet_mode) SAYF(cRST "-- Program output ends --\n");
- if (!fsrv->child_timed_out && !stop_soon && WIFSIGNALED(status)) child_crashed = 1;
+ if (!fsrv->child_timed_out && !stop_soon && WIFSIGNALED(status))
+ child_crashed = 1;
if (!quiet_mode) {
@@ -492,7 +494,7 @@ static void run_target(afl_forkserver_t *fsrv, char** argv) {
}
-extern afl_forkserver_t *fsrv_glob;
+extern afl_forkserver_t* fsrv_glob;
/* Handle Ctrl-C and the like. */
@@ -646,7 +648,7 @@ static void usage(u8* argv0) {
/* Find binary. */
-static void find_binary(afl_forkserver_t *fsrv, u8* fname) {
+static void find_binary(afl_forkserver_t* fsrv, u8* fname) {
u8* env_path = 0;
struct stat st;
@@ -693,7 +695,8 @@ static void find_binary(afl_forkserver_t *fsrv, u8* fname) {
}
- if (!fsrv->target_path) FATAL("Program '%s' not found or not executable", fname);
+ if (!fsrv->target_path)
+ FATAL("Program '%s' not found or not executable", fname);
}
@@ -703,14 +706,14 @@ static void find_binary(afl_forkserver_t *fsrv, u8* fname) {
int main(int argc, char** argv, char** envp) {
- //TODO: u64 mem_limit = MEM_LIMIT; /* Memory limit (MB) */
+ // TODO: u64 mem_limit = MEM_LIMIT; /* Memory limit (MB) */
s32 opt, i;
u8 mem_limit_given = 0, timeout_given = 0, unicorn_mode = 0, use_wine = 0;
u32 tcnt = 0;
char** use_argv;
- afl_forkserver_t *fsrv = calloc(1, sizeof(afl_forkserver_t));
+ afl_forkserver_t* fsrv = calloc(1, sizeof(afl_forkserver_t));
afl_fsrv_init(fsrv);
doc_path = access(DOC_PATH, F_OK) ? "docs" : DOC_PATH;
@@ -912,9 +915,11 @@ int main(int argc, char** argv, char** envp) {
if (qemu_mode) {
if (use_wine)
- use_argv = get_wine_argv(argv[0], &fsrv->target_path, argc - optind, argv + optind);
+ use_argv = get_wine_argv(argv[0], &fsrv->target_path, argc - optind,
+ argv + optind);
else
- use_argv = get_qemu_argv(argv[0], &fsrv->target_path, argc - optind, argv + optind);
+ use_argv = get_qemu_argv(argv[0], &fsrv->target_path, argc - optind,
+ argv + optind);
} else
@@ -985,7 +990,8 @@ int main(int argc, char** argv, char** envp) {
if (-1 == stat(infile, &statbuf) || !S_ISREG(statbuf.st_mode)) continue;
#endif
- snprintf(outfile, sizeof(outfile), "%s/%s", fsrv->out_file, dir_ent->d_name);
+ snprintf(outfile, sizeof(outfile), "%s/%s", fsrv->out_file,
+ dir_ent->d_name);
if (read_file(infile)) {
diff --git a/src/afl-tmin.c b/src/afl-tmin.c
index 6ff77cfd..d07bdd6c 100644
--- a/src/afl-tmin.c
+++ b/src/afl-tmin.c
@@ -78,7 +78,7 @@ u8 crash_mode, /* Crash-centric mode? */
exit_crash, /* Treat non-zero exit as crash? */
edges_only, /* Ignore hit counts? */
exact_mode, /* Require path match for crashes? */
- be_quiet;
+ be_quiet;
static volatile u8 stop_soon; /* Ctrl-C pressed? */
@@ -159,7 +159,7 @@ static void apply_mask(u32* mem, u32* mask) {
/* See if any bytes are set in the bitmap. */
-static inline u8 anything_set(afl_forkserver_t *fsrv) {
+static inline u8 anything_set(afl_forkserver_t* fsrv) {
u32* ptr = (u32*)fsrv->trace_bits;
u32 i = (MAP_SIZE >> 2);
@@ -226,13 +226,13 @@ static s32 write_to_file(u8* path, u8* mem, u32 len) {
is unlinked and a new one is created. Otherwise, out_fd is rewound and
truncated. */
-static void write_to_testcase(afl_forkserver_t *fsrv, void* mem, u32 len) {
+static void write_to_testcase(afl_forkserver_t* fsrv, void* mem, u32 len) {
s32 fd = fsrv->out_fd;
if (!fsrv->use_stdin) {
- unlink(fsrv->out_file); /* Ignore errors. */
+ unlink(fsrv->out_file); /* Ignore errors. */
fd = open(fsrv->out_file, O_WRONLY | O_CREAT | O_EXCL, 0600);
@@ -403,7 +403,8 @@ static void init_forkserver(char **argv) {
/* Execute target application. Returns 0 if the changes are a dud, or
1 if they should be kept. */
-static u8 run_target(afl_forkserver_t *fsrv, char** argv, u8* mem, u32 len, u8 first_run) {
+static u8 run_target(afl_forkserver_t* fsrv, char** argv, u8* mem, u32 len,
+ u8 first_run) {
static struct itimerval it;
static u32 prev_timed_out = 0;
@@ -572,7 +573,7 @@ static u32 next_p2(u32 val) {
/* Actually minimize! */
-static void minimize(afl_forkserver_t *fsrv, char** argv) {
+static void minimize(afl_forkserver_t* fsrv, char** argv) {
static u32 alpha_map[256];
@@ -840,7 +841,7 @@ static void handle_stop_sig(int sig) {
/* Do basic preparations - persistent fds, filenames, etc. */
-static void set_up_environment(afl_forkserver_t *fsrv) {
+static void set_up_environment(afl_forkserver_t* fsrv) {
u8* x;
@@ -1023,7 +1024,7 @@ static void usage(u8* argv0) {
/* Find binary. */
-static void find_binary(afl_forkserver_t *fsrv, u8* fname) {
+static void find_binary(afl_forkserver_t* fsrv, u8* fname) {
u8* env_path = 0;
struct stat st;
@@ -1070,7 +1071,8 @@ static void find_binary(afl_forkserver_t *fsrv, u8* fname) {
}
- if (!fsrv->target_path) FATAL("Program '%s' not found or not executable", fname);
+ if (!fsrv->target_path)
+ FATAL("Program '%s' not found or not executable", fname);
}
@@ -1098,7 +1100,7 @@ int main(int argc, char** argv, char** envp) {
u8 mem_limit_given = 0, timeout_given = 0, unicorn_mode = 0, use_wine = 0;
char** use_argv;
- afl_forkserver_t *fsrv = calloc(1, sizeof(afl_forkserver_t));
+ afl_forkserver_t* fsrv = calloc(1, sizeof(afl_forkserver_t));
afl_fsrv_init(fsrv);
doc_path = access(DOC_PATH, F_OK) ? "docs" : DOC_PATH;
@@ -1275,9 +1277,11 @@ int main(int argc, char** argv, char** envp) {
if (qemu_mode) {
if (use_wine)
- use_argv = get_wine_argv(argv[0], &fsrv->target_path, argc - optind, argv + optind);
+ use_argv = get_wine_argv(argv[0], &fsrv->target_path, argc - optind,
+ argv + optind);
else
- use_argv = get_qemu_argv(argv[0], &fsrv->target_path, argc - optind, argv + optind);
+ use_argv = get_qemu_argv(argv[0], &fsrv->target_path, argc - optind,
+ argv + optind);
} else
@@ -1344,7 +1348,6 @@ int main(int argc, char** argv, char** envp) {
OKF("We're done here. Have a nice day!\n");
-
afl_shm_deinit(&shm);
afl_fsrv_deinit(fsrv);
free(fsrv);