aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/afl-common.c1
-rw-r--r--src/afl-forkserver.c96
-rw-r--r--src/afl-fuzz-bitmap.c61
-rw-r--r--src/afl-fuzz-cmplog.c6
-rw-r--r--src/afl-fuzz-extras.c10
-rw-r--r--src/afl-fuzz-init.c4
-rw-r--r--src/afl-fuzz-mutators.c7
-rw-r--r--src/afl-fuzz-one.c16
-rw-r--r--src/afl-fuzz-queue.c19
-rw-r--r--src/afl-fuzz-redqueen.c14
-rw-r--r--src/afl-fuzz-run.c27
-rw-r--r--src/afl-fuzz-state.c4
-rw-r--r--src/afl-fuzz-stats.c11
-rw-r--r--src/afl-tmin.c4
14 files changed, 200 insertions, 80 deletions
diff --git a/src/afl-common.c b/src/afl-common.c
index 825cd827..5216c7e0 100644
--- a/src/afl-common.c
+++ b/src/afl-common.c
@@ -59,6 +59,7 @@ char *afl_environment_variables[] = {
"AFL_LD_HARD_FAIL", "AFL_LD_LIMIT_MB", "AFL_LD_NO_CALLOC_OVER",
"AFL_LD_PRELOAD", "AFL_LD_VERBOSE", "AFL_LLVM_CMPLOG", "AFL_LLVM_INSTRIM",
"AFL_LLVM_CTX", "AFL_LLVM_INSTRUMENT", "AFL_LLVM_INSTRIM_LOOPHEAD",
+ "AFL_LLVM_LTO_AUTODICTIONARY", "AFL_LLVM_AUTODICTIONARY",
"AFL_LLVM_INSTRIM_SKIPSINGLEBLOCK", "AFL_LLVM_LAF_SPLIT_COMPARES",
"AFL_LLVM_LAF_SPLIT_COMPARES_BITW", "AFL_LLVM_LAF_SPLIT_FLOATS",
"AFL_LLVM_LAF_SPLIT_SWITCHES", "AFL_LLVM_LAF_TRANSFORM_COMPARES",
diff --git a/src/afl-forkserver.c b/src/afl-forkserver.c
index 56c3c9d5..d1037194 100644
--- a/src/afl-forkserver.c
+++ b/src/afl-forkserver.c
@@ -69,7 +69,7 @@ void afl_fsrv_init(afl_forkserver_t *fsrv) {
fsrv->mem_limit = MEM_LIMIT;
fsrv->child_pid = -1;
fsrv->out_dir_fd = -1;
-
+ fsrv->map_size = MAP_SIZE;
fsrv->use_fauxsrv = 0;
fsrv->prev_timed_out = 0;
@@ -82,7 +82,7 @@ void afl_fsrv_init(afl_forkserver_t *fsrv) {
static void afl_fauxsrv_execv(afl_forkserver_t *fsrv, char **argv) {
- unsigned char tmp[4] = {0};
+ unsigned char tmp[4] = {0, 0, 0, 0};
pid_t child_pid = -1;
/* Phone home and tell the parent that we're OK. If parent isn't there,
@@ -167,9 +167,9 @@ void afl_fsrv_start(afl_forkserver_t *fsrv, char **argv,
int status;
s32 rlen;
- if (fsrv->use_fauxsrv) ACTF("Using Fauxserver:");
+ if (!be_quiet) ACTF("Using Fauxserver:");
- if (!getenv("AFL_QUIET")) ACTF("Spinning up the fork server...");
+ if (!be_quiet) ACTF("Spinning up the fork server...");
if (pipe(st_pipe) || pipe(ctl_pipe)) PFATAL("pipe() failed");
@@ -340,7 +340,93 @@ void afl_fsrv_start(afl_forkserver_t *fsrv, char **argv,
if (rlen == 4) {
- if (!getenv("AFL_QUIET")) OKF("All right - fork server is up.");
+ if (!be_quiet) OKF("All right - fork server is up.");
+
+ if ((status & FS_OPT_ENABLED) == FS_OPT_ENABLED) {
+
+ if (!be_quiet)
+ ACTF("Extended forkserver functions received (%08x).", status);
+
+ if ((status & FS_OPT_SNAPSHOT) == FS_OPT_SNAPSHOT) {
+
+ fsrv->snapshot = 1;
+ if (!be_quiet) ACTF("Using SNAPSHOT feature.");
+
+ }
+
+ if ((status & FS_OPT_MAPSIZE) == FS_OPT_MAPSIZE) {
+
+ fsrv->map_size = FS_OPT_GET_MAPSIZE(status);
+ if (fsrv->map_size % 8)
+ fsrv->map_size = (((fsrv->map_size + 8) >> 3) << 3);
+ if (!be_quiet) ACTF("Target map size: %u", fsrv->map_size);
+
+ }
+
+ if (fsrv->function_ptr == NULL || fsrv->function_opt == NULL) {
+
+ // this is not afl-fuzz - we deny and return
+ status = (0xffffffff ^ (FS_OPT_ENABLED | FS_OPT_AUTODICT));
+ if (write(fsrv->fsrv_ctl_fd, &status, 4) != 4)
+ FATAL("Writing to forkserver failed.");
+ return;
+
+ }
+
+ if ((status & FS_OPT_AUTODICT) == FS_OPT_AUTODICT) {
+
+ if (!be_quiet) ACTF("Using AUTODICT feature.");
+ status = (FS_OPT_ENABLED | FS_OPT_AUTODICT);
+ if (write(fsrv->fsrv_ctl_fd, &status, 4) != 4)
+ FATAL("Writing to forkserver failed.");
+ if (read(fsrv->fsrv_st_fd, &status, 4) != 4)
+ FATAL("Reading from forkserver failed.");
+
+ if (status < 2 || (u32)status > 0xffffff)
+ FATAL("Dictionary has an illegal size: %d", status);
+
+ u32 len = status, offset = 0, count = 0;
+ u8 *dict = ck_alloc(len);
+ if (dict == NULL)
+ FATAL("Could not allocate %u bytes of autodictionary memmory", len);
+
+ while (len != 0) {
+
+ rlen = read(fsrv->fsrv_st_fd, dict + offset, len);
+ if (rlen > 0) {
+
+ len -= rlen;
+ offset += rlen;
+
+ } else {
+
+ FATAL(
+ "Reading autodictionary fail at position %u with %u bytes "
+ "left.",
+ offset, len);
+
+ }
+
+ }
+
+ len = status;
+ offset = 0;
+ while (offset < status && (u8)dict[offset] + offset < status) {
+
+ fsrv->function_ptr(fsrv->function_opt, dict + offset + 1,
+ (u8)dict[offset]);
+ offset += (1 + dict[offset]);
+ count++;
+
+ }
+
+ if (!be_quiet) ACTF("Loaded %u autodictionary entries", count);
+ ck_free(dict);
+
+ }
+
+ }
+
return;
}
diff --git a/src/afl-fuzz-bitmap.c b/src/afl-fuzz-bitmap.c
index c5347dcb..1c965532 100644
--- a/src/afl-fuzz-bitmap.c
+++ b/src/afl-fuzz-bitmap.c
@@ -78,16 +78,17 @@ u8 has_new_bits(afl_state_t *afl, u8 *virgin_map) {
u64 *current = (u64 *)afl->fsrv.trace_bits;
u64 *virgin = (u64 *)virgin_map;
- u32 i = (MAP_SIZE >> 3);
+ u32 i = (afl->fsrv.map_size >> 3);
#else
u32 *current = (u32 *)afl->fsrv.trace_bits;
u32 *virgin = (u32 *)virgin_map;
- u32 i = (MAP_SIZE >> 2);
+ u32 i = (afl->fsrv.map_size >> 2);
#endif /* ^WORD_SIZE_64 */
+ if (i == 0) i = 1;
u8 ret = 0;
@@ -148,12 +149,14 @@ u8 has_new_bits(afl_state_t *afl, u8 *virgin_map) {
/* Count the number of bits set in the provided bitmap. Used for the status
screen several times every second, does not have to be fast. */
-u32 count_bits(u8 *mem) {
+u32 count_bits(afl_state_t *afl, u8 *mem) {
u32 *ptr = (u32 *)mem;
- u32 i = (MAP_SIZE >> 2);
+ u32 i = (afl->fsrv.map_size >> 2);
u32 ret = 0;
+ if (i == 0) i = 1;
+
while (i--) {
u32 v = *(ptr++);
@@ -182,12 +185,14 @@ u32 count_bits(u8 *mem) {
mostly to update the status screen or calibrate and examine confirmed
new paths. */
-u32 count_bytes(u8 *mem) {
+u32 count_bytes(afl_state_t *afl, u8 *mem) {
u32 *ptr = (u32 *)mem;
- u32 i = (MAP_SIZE >> 2);
+ u32 i = (afl->fsrv.map_size >> 2);
u32 ret = 0;
+ if (i == 0) i = 1;
+
while (i--) {
u32 v = *(ptr++);
@@ -207,12 +212,14 @@ u32 count_bytes(u8 *mem) {
/* Count the number of non-255 bytes set in the bitmap. Used strictly for the
status screen, several calls per second or so. */
-u32 count_non_255_bytes(u8 *mem) {
+u32 count_non_255_bytes(afl_state_t *afl, u8 *mem) {
u32 *ptr = (u32 *)mem;
- u32 i = (MAP_SIZE >> 2);
+ u32 i = (afl->fsrv.map_size >> 2);
u32 ret = 0;
+ if (i == 0) i = 1;
+
while (i--) {
u32 v = *(ptr++);
@@ -245,9 +252,11 @@ const u8 simplify_lookup[256] = {
#ifdef WORD_SIZE_64
-void simplify_trace(u64 *mem) {
+void simplify_trace(afl_state_t *afl, u64 *mem) {
+
+ u32 i = (afl->fsrv.map_size >> 3);
- u32 i = MAP_SIZE >> 3;
+ if (i == 0) i = 1;
while (i--) {
@@ -278,9 +287,11 @@ void simplify_trace(u64 *mem) {
#else
-void simplify_trace(u32 *mem) {
+void simplify_trace(afl_state_t *afl, u32 *mem) {
- u32 i = MAP_SIZE >> 2;
+ u32 i = (afl->fsrv.map_size >> 2);
+
+ if (i == 0) i = 1;
while (i--) {
@@ -340,9 +351,11 @@ void init_count_class16(void) {
#ifdef WORD_SIZE_64
-void classify_counts(u64 *mem) {
+void classify_counts(afl_state_t *afl, u64 *mem) {
+
+ u32 i = (afl->fsrv.map_size >> 3);
- u32 i = MAP_SIZE >> 3;
+ if (i == 0) i = 1;
while (i--) {
@@ -367,9 +380,11 @@ void classify_counts(u64 *mem) {
#else
-void classify_counts(u32 *mem) {
+void classify_counts(afl_state_t *afl, u32 *mem) {
+
+ u32 i = (afl->fsrv.map_size >> 2);
- u32 i = MAP_SIZE >> 2;
+ if (i == 0) i = 1;
while (i--) {
@@ -396,11 +411,11 @@ void classify_counts(u32 *mem) {
count information here. This is called only sporadically, for some
new paths. */
-void minimize_bits(u8 *dst, u8 *src) {
+void minimize_bits(afl_state_t *afl, u8 *dst, u8 *src) {
u32 i = 0;
- while (i < MAP_SIZE) {
+ while (i < afl->fsrv.map_size) {
if (*(src++)) dst[i >> 3] |= 1 << (i & 7);
++i;
@@ -527,7 +542,7 @@ u8 save_if_interesting(afl_state_t *afl, void *mem, u32 len, u8 fault) {
u8 fn[PATH_MAX];
/* Update path frequency. */
- u32 cksum = hash32(afl->fsrv.trace_bits, MAP_SIZE, HASH_CONST);
+ u32 cksum = hash32(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
struct queue_entry *q = afl->queue;
while (q) {
@@ -611,9 +626,9 @@ u8 save_if_interesting(afl_state_t *afl, void *mem, u32 len, u8 fault) {
if (likely(!afl->dumb_mode)) {
#ifdef WORD_SIZE_64
- simplify_trace((u64 *)afl->fsrv.trace_bits);
+ simplify_trace(afl, (u64 *)afl->fsrv.trace_bits);
#else
- simplify_trace((u32 *)afl->fsrv.trace_bits);
+ simplify_trace(afl, (u32 *)afl->fsrv.trace_bits);
#endif /* ^WORD_SIZE_64 */
if (!has_new_bits(afl, afl->virgin_tmout)) return keeping;
@@ -675,9 +690,9 @@ u8 save_if_interesting(afl_state_t *afl, void *mem, u32 len, u8 fault) {
if (likely(!afl->dumb_mode)) {
#ifdef WORD_SIZE_64
- simplify_trace((u64 *)afl->fsrv.trace_bits);
+ simplify_trace(afl, (u64 *)afl->fsrv.trace_bits);
#else
- simplify_trace((u32 *)afl->fsrv.trace_bits);
+ simplify_trace(afl, (u32 *)afl->fsrv.trace_bits);
#endif /* ^WORD_SIZE_64 */
if (!has_new_bits(afl, afl->virgin_crash)) return keeping;
diff --git a/src/afl-fuzz-cmplog.c b/src/afl-fuzz-cmplog.c
index 5ad73539..ed4be6e4 100644
--- a/src/afl-fuzz-cmplog.c
+++ b/src/afl-fuzz-cmplog.c
@@ -389,7 +389,7 @@ u8 run_cmplog_target(afl_state_t *afl, u32 timeout) {
must prevent any earlier operations from venturing into that
territory. */
- memset(afl->fsrv.trace_bits, 0, MAP_SIZE);
+ memset(afl->fsrv.trace_bits, 0, afl->fsrv.map_size);
MEM_BARRIER();
/* Since we always have a forkserver (or a fauxserver) running, we can simply
@@ -469,9 +469,9 @@ u8 run_cmplog_target(afl_state_t *afl, u32 timeout) {
tb4 = *(u32 *)afl->fsrv.trace_bits;
#ifdef WORD_SIZE_64
- classify_counts((u64 *)afl->fsrv.trace_bits);
+ classify_counts(afl, (u64 *)afl->fsrv.trace_bits);
#else
- classify_counts((u32 *)afl->fsrv.trace_bits);
+ classify_counts(afl, (u32 *)afl->fsrv.trace_bits);
#endif /* ^WORD_SIZE_64 */
afl->cmplog_prev_timed_out = afl->fsrv.child_timed_out;
diff --git a/src/afl-fuzz-extras.c b/src/afl-fuzz-extras.c
index 16806934..55146dd9 100644
--- a/src/afl-fuzz-extras.c
+++ b/src/afl-fuzz-extras.c
@@ -305,10 +305,14 @@ static inline u8 memcmp_nocase(u8 *m1, u8 *m2, u32 len) {
}
/* Maybe add automatic extra. */
+/* Ugly hack: afl state is transfered as u8* because we import data via
+ afl-forkserver.c - which is shared with other afl tools that do not
+ have the afl state struct */
-void maybe_add_auto(afl_state_t *afl, u8 *mem, u32 len) {
+void maybe_add_auto(void *afl_tmp, u8 *mem, u32 len) {
- u32 i;
+ afl_state_t *afl = (afl_state_t *)afl_tmp;
+ u32 i;
/* Allow users to specify that they don't want auto dictionaries. */
@@ -469,7 +473,7 @@ void load_auto(afl_state_t *afl) {
if (len < 0) PFATAL("Unable to read from '%s'", fn);
if (len >= MIN_AUTO_EXTRA && len <= MAX_AUTO_EXTRA)
- maybe_add_auto(afl, tmp, len);
+ maybe_add_auto((u8 *)afl, tmp, len);
close(fd);
ck_free(fn);
diff --git a/src/afl-fuzz-init.c b/src/afl-fuzz-init.c
index 6e0485e5..94ce9604 100644
--- a/src/afl-fuzz-init.c
+++ b/src/afl-fuzz-init.c
@@ -448,11 +448,13 @@ static void check_map_coverage(afl_state_t *afl) {
u32 i;
- if (count_bytes(afl->fsrv.trace_bits) < 100) return;
+ if (count_bytes(afl, afl->fsrv.trace_bits) < 100) return;
for (i = (1 << (MAP_SIZE_POW2 - 1)); i < MAP_SIZE; ++i)
if (afl->fsrv.trace_bits[i]) return;
+ if (afl->fsrv.map_size != MAP_SIZE) return;
+
WARNF("Recompile binary with newer version of afl to improve coverage!");
}
diff --git a/src/afl-fuzz-mutators.c b/src/afl-fuzz-mutators.c
index 754b2190..81504e29 100644
--- a/src/afl-fuzz-mutators.c
+++ b/src/afl-fuzz-mutators.c
@@ -244,7 +244,7 @@ u8 trim_case_custom(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) {
if (afl->stop_soon || fault == FAULT_ERROR) { goto abort_trimming; }
- cksum = hash32(afl->fsrv.trace_bits, MAP_SIZE, HASH_CONST);
+ cksum = hash32(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
if (cksum == q->exec_cksum) {
@@ -257,7 +257,8 @@ u8 trim_case_custom(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) {
if (!needs_write) {
needs_write = 1;
- memcpy(afl->clean_trace_custom, afl->fsrv.trace_bits, MAP_SIZE);
+ memcpy(afl->clean_trace_custom, afl->fsrv.trace_bits,
+ afl->fsrv.map_size);
}
@@ -307,7 +308,7 @@ u8 trim_case_custom(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) {
ck_write(fd, in_buf, q->len, q->fname);
close(fd);
- memcpy(afl->fsrv.trace_bits, afl->clean_trace_custom, MAP_SIZE);
+ memcpy(afl->fsrv.trace_bits, afl->clean_trace_custom, afl->fsrv.map_size);
update_bitmap_score(afl, q);
}
diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c
index b20bde90..80567160 100644
--- a/src/afl-fuzz-one.c
+++ b/src/afl-fuzz-one.c
@@ -601,7 +601,7 @@ u8 fuzz_one_original(afl_state_t *afl) {
if (!afl->dumb_mode && (afl->stage_cur & 7) == 7) {
- u32 cksum = hash32(afl->fsrv.trace_bits, MAP_SIZE, HASH_CONST);
+ u32 cksum = hash32(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
if (afl->stage_cur == afl->stage_max - 1 && cksum == prev_cksum) {
@@ -613,7 +613,7 @@ u8 fuzz_one_original(afl_state_t *afl) {
++a_len;
if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA)
- maybe_add_auto(afl, a_collect, a_len);
+ maybe_add_auto((u8 *)afl, a_collect, a_len);
} else if (cksum != prev_cksum) {
@@ -621,7 +621,7 @@ u8 fuzz_one_original(afl_state_t *afl) {
worthwhile queued up, and collect that if the answer is yes. */
if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA)
- maybe_add_auto(afl, a_collect, a_len);
+ maybe_add_auto((u8 *)afl, a_collect, a_len);
a_len = 0;
prev_cksum = cksum;
@@ -761,7 +761,7 @@ u8 fuzz_one_original(afl_state_t *afl) {
without wasting time on checksums. */
if (!afl->dumb_mode && len >= EFF_MIN_LEN)
- cksum = hash32(afl->fsrv.trace_bits, MAP_SIZE, HASH_CONST);
+ cksum = hash32(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
else
cksum = ~afl->queue_cur->exec_cksum;
@@ -2615,7 +2615,7 @@ u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) {
if (!afl->dumb_mode && (afl->stage_cur & 7) == 7) {
- u32 cksum = hash32(afl->fsrv.trace_bits, MAP_SIZE, HASH_CONST);
+ u32 cksum = hash32(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
if (afl->stage_cur == afl->stage_max - 1 && cksum == prev_cksum) {
@@ -2627,7 +2627,7 @@ u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) {
++a_len;
if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA)
- maybe_add_auto(afl, a_collect, a_len);
+ maybe_add_auto((u8 *)afl, a_collect, a_len);
} else if (cksum != prev_cksum) {
@@ -2635,7 +2635,7 @@ u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) {
worthwhile queued up, and collect that if the answer is yes. */
if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA)
- maybe_add_auto(afl, a_collect, a_len);
+ maybe_add_auto((u8 *)afl, a_collect, a_len);
a_len = 0;
prev_cksum = cksum;
@@ -2775,7 +2775,7 @@ u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) {
without wasting time on checksums. */
if (!afl->dumb_mode && len >= EFF_MIN_LEN)
- cksum = hash32(afl->fsrv.trace_bits, MAP_SIZE, HASH_CONST);
+ cksum = hash32(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
else
cksum = ~afl->queue_cur->exec_cksum;
diff --git a/src/afl-fuzz-queue.c b/src/afl-fuzz-queue.c
index 174d7d92..346c2639 100644
--- a/src/afl-fuzz-queue.c
+++ b/src/afl-fuzz-queue.c
@@ -195,7 +195,7 @@ void update_bitmap_score(afl_state_t *afl, struct queue_entry *q) {
/* For every byte set in afl->fsrv.trace_bits[], see if there is a previous
winner, and how it compares to us. */
- for (i = 0; i < MAP_SIZE; ++i)
+ for (i = 0; i < afl->fsrv.map_size; ++i)
if (afl->fsrv.trace_bits[i]) {
@@ -248,8 +248,10 @@ void update_bitmap_score(afl_state_t *afl, struct queue_entry *q) {
if (!q->trace_mini) {
- q->trace_mini = ck_alloc(MAP_SIZE >> 3);
- minimize_bits(q->trace_mini, afl->fsrv.trace_bits);
+ u32 len = (afl->fsrv.map_size >> 3);
+ if (len == 0) len = 1;
+ q->trace_mini = ck_alloc(len);
+ minimize_bits(afl, q->trace_mini, afl->fsrv.trace_bits);
}
@@ -268,14 +270,17 @@ void update_bitmap_score(afl_state_t *afl, struct queue_entry *q) {
void cull_queue(afl_state_t *afl) {
struct queue_entry *q;
- u8 temp_v[MAP_SIZE >> 3];
+ u32 len = (afl->fsrv.map_size >> 3);
u32 i;
+ u8 temp_v[MAP_SIZE >> 3];
+
+ if (len == 0) len = 1;
if (afl->dumb_mode || !afl->score_changed) return;
afl->score_changed = 0;
- memset(temp_v, 255, MAP_SIZE >> 3);
+ memset(temp_v, 255, len);
afl->queued_favored = 0;
afl->pending_favored = 0;
@@ -292,10 +297,10 @@ void cull_queue(afl_state_t *afl) {
/* Let's see if anything in the bitmap isn't captured in temp_v.
If yes, and if it has a afl->top_rated[] contender, let's use it. */
- for (i = 0; i < MAP_SIZE; ++i)
+ for (i = 0; i < afl->fsrv.map_size; ++i)
if (afl->top_rated[i] && (temp_v[i >> 3] & (1 << (i & 7)))) {
- u32 j = MAP_SIZE >> 3;
+ u32 j = len;
/* Remove all bits belonging to the current entry from temp_v. */
diff --git a/src/afl-fuzz-redqueen.c b/src/afl-fuzz-redqueen.c
index 4acc204b..517f8d7c 100644
--- a/src/afl-fuzz-redqueen.c
+++ b/src/afl-fuzz-redqueen.c
@@ -88,7 +88,7 @@ static u8 get_exec_checksum(afl_state_t *afl, u8 *buf, u32 len, u32 *cksum) {
if (unlikely(common_fuzz_stuff(afl, buf, len))) return 1;
- *cksum = hash32(afl->fsrv.trace_bits, MAP_SIZE, HASH_CONST);
+ *cksum = hash32(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
return 0;
}
@@ -332,7 +332,7 @@ static void try_to_add_to_dict(afl_state_t *afl, u64 v, u8 shape) {
}
- maybe_add_auto(afl, (u8 *)&v, shape);
+ maybe_add_auto((u8 *)afl, (u8 *)&v, shape);
u64 rev;
switch (shape) {
@@ -340,15 +340,15 @@ static void try_to_add_to_dict(afl_state_t *afl, u64 v, u8 shape) {
case 1: break;
case 2:
rev = SWAP16((u16)v);
- maybe_add_auto(afl, (u8 *)&rev, shape);
+ maybe_add_auto((u8 *)afl, (u8 *)&rev, shape);
break;
case 4:
rev = SWAP32((u32)v);
- maybe_add_auto(afl, (u8 *)&rev, shape);
+ maybe_add_auto((u8 *)afl, (u8 *)&rev, shape);
break;
case 8:
rev = SWAP64(v);
- maybe_add_auto(afl, (u8 *)&rev, shape);
+ maybe_add_auto((u8 *)afl, (u8 *)&rev, shape);
break;
}
@@ -486,8 +486,8 @@ static u8 rtn_fuzz(afl_state_t *afl, u32 key, u8 *orig_buf, u8 *buf, u32 len) {
// If failed, add to dictionary
if (fails == 8) {
- maybe_add_auto(afl, o->v0, SHAPE_BYTES(h->shape));
- maybe_add_auto(afl, o->v1, SHAPE_BYTES(h->shape));
+ maybe_add_auto((u8 *)afl, o->v0, SHAPE_BYTES(h->shape));
+ maybe_add_auto((u8 *)afl, o->v1, SHAPE_BYTES(h->shape));
}
diff --git a/src/afl-fuzz-run.c b/src/afl-fuzz-run.c
index 9bbdd23a..850a18bc 100644
--- a/src/afl-fuzz-run.c
+++ b/src/afl-fuzz-run.c
@@ -44,7 +44,7 @@ u8 run_target(afl_state_t *afl, u32 timeout) {
must prevent any earlier operations from venturing into that
territory. */
- memset(afl->fsrv.trace_bits, 0, MAP_SIZE);
+ memset(afl->fsrv.trace_bits, 0, afl->fsrv.map_size);
MEM_BARRIER();
@@ -122,9 +122,9 @@ u8 run_target(afl_state_t *afl, u32 timeout) {
tb4 = *(u32 *)afl->fsrv.trace_bits;
#ifdef WORD_SIZE_64
- classify_counts((u64 *)afl->fsrv.trace_bits);
+ classify_counts(afl, (u64 *)afl->fsrv.trace_bits);
#else
- classify_counts((u32 *)afl->fsrv.trace_bits);
+ classify_counts(afl, (u32 *)afl->fsrv.trace_bits);
#endif /* ^WORD_SIZE_64 */
afl->fsrv.prev_timed_out = afl->fsrv.child_timed_out;
@@ -315,7 +315,8 @@ u8 calibrate_case(afl_state_t *afl, struct queue_entry *q, u8 *use_mem,
afl->shm.cmplog_mode)
init_cmplog_forkserver(afl);
- if (q->exec_cksum) memcpy(afl->first_trace, afl->fsrv.trace_bits, MAP_SIZE);
+ if (q->exec_cksum)
+ memcpy(afl->first_trace, afl->fsrv.trace_bits, afl->fsrv.map_size);
start_us = get_cur_time_us();
@@ -336,14 +337,14 @@ u8 calibrate_case(afl_state_t *afl, struct queue_entry *q, u8 *use_mem,
if (afl->stop_soon || fault != afl->crash_mode) goto abort_calibration;
if (!afl->dumb_mode && !afl->stage_cur &&
- !count_bytes(afl->fsrv.trace_bits)) {
+ !count_bytes(afl, afl->fsrv.trace_bits)) {
fault = FAULT_NOINST;
goto abort_calibration;
}
- cksum = hash32(afl->fsrv.trace_bits, MAP_SIZE, HASH_CONST);
+ cksum = hash32(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
if (q->exec_cksum != cksum) {
@@ -354,7 +355,7 @@ u8 calibrate_case(afl_state_t *afl, struct queue_entry *q, u8 *use_mem,
u32 i;
- for (i = 0; i < MAP_SIZE; ++i) {
+ for (i = 0; i < afl->fsrv.map_size; ++i) {
if (unlikely(!afl->var_bytes[i]) &&
unlikely(afl->first_trace[i] != afl->fsrv.trace_bits[i]))
@@ -368,7 +369,7 @@ u8 calibrate_case(afl_state_t *afl, struct queue_entry *q, u8 *use_mem,
} else {
q->exec_cksum = cksum;
- memcpy(afl->first_trace, afl->fsrv.trace_bits, MAP_SIZE);
+ memcpy(afl->first_trace, afl->fsrv.trace_bits, afl->fsrv.map_size);
}
@@ -385,7 +386,7 @@ u8 calibrate_case(afl_state_t *afl, struct queue_entry *q, u8 *use_mem,
This is used for fuzzing air time calculations in calculate_score(). */
q->exec_us = (stop_us - start_us) / afl->stage_max;
- q->bitmap_size = count_bytes(afl->fsrv.trace_bits);
+ q->bitmap_size = count_bytes(afl, afl->fsrv.trace_bits);
q->handicap = handicap;
q->cal_failed = 0;
@@ -413,7 +414,7 @@ abort_calibration:
if (var_detected) {
- afl->var_byte_count = count_bytes(afl->var_bytes);
+ afl->var_byte_count = count_bytes(afl, afl->var_bytes);
if (!q->var_behavior) {
@@ -640,7 +641,7 @@ u8 trim_case(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) {
/* Note that we don't keep track of crashes or hangs here; maybe TODO?
*/
- cksum = hash32(afl->fsrv.trace_bits, MAP_SIZE, HASH_CONST);
+ cksum = hash32(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
/* If the deletion had no impact on the trace, make it permanent. This
isn't perfect for variable-path inputs, but we're just making a
@@ -663,7 +664,7 @@ u8 trim_case(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) {
if (!needs_write) {
needs_write = 1;
- memcpy(afl->clean_trace, afl->fsrv.trace_bits, MAP_SIZE);
+ memcpy(afl->clean_trace, afl->fsrv.trace_bits, afl->fsrv.map_size);
}
@@ -705,7 +706,7 @@ u8 trim_case(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) {
ck_write(fd, in_buf, q->len, q->fname);
close(fd);
- memcpy(afl->fsrv.trace_bits, afl->clean_trace, MAP_SIZE);
+ memcpy(afl->fsrv.trace_bits, afl->clean_trace, afl->fsrv.map_size);
update_bitmap_score(afl, q);
}
diff --git a/src/afl-fuzz-state.c b/src/afl-fuzz-state.c
index a8c14c31..80039d6f 100644
--- a/src/afl-fuzz-state.c
+++ b/src/afl-fuzz-state.c
@@ -99,6 +99,10 @@ void afl_state_init(afl_state_t *afl) {
afl->fsrv.use_stdin = 1;
+ afl->fsrv.map_size = MAP_SIZE;
+ afl->fsrv.function_opt = (u8 *)afl;
+ afl->fsrv.function_ptr = &maybe_add_auto;
+
afl->cal_cycles = CAL_CYCLES;
afl->cal_cycles_long = CAL_CYCLES_LONG;
diff --git a/src/afl-fuzz-stats.c b/src/afl-fuzz-stats.c
index ab84bf3f..58a37298 100644
--- a/src/afl-fuzz-stats.c
+++ b/src/afl-fuzz-stats.c
@@ -37,7 +37,7 @@ void write_stats_file(afl_state_t *afl, double bitmap_cvg, double stability,
u8 fn[PATH_MAX];
s32 fd;
FILE * f;
- uint32_t t_bytes = count_non_255_bytes(afl->virgin_bits);
+ uint32_t t_bytes = count_non_255_bytes(afl, afl->virgin_bits);
snprintf(fn, PATH_MAX, "%s/fuzzer_stats", afl->out_dir);
@@ -258,8 +258,8 @@ void show_stats(afl_state_t *afl) {
/* Do some bitmap stats. */
- t_bytes = count_non_255_bytes(afl->virgin_bits);
- t_byte_ratio = ((double)t_bytes * 100) / MAP_SIZE;
+ t_bytes = count_non_255_bytes(afl, afl->virgin_bits);
+ t_byte_ratio = ((double)t_bytes * 100) / afl->fsrv.map_size;
if (likely(t_bytes) && unlikely(afl->var_byte_count))
stab_ratio = 100 - (((double)afl->var_byte_count * 100) / t_bytes);
@@ -305,7 +305,7 @@ void show_stats(afl_state_t *afl) {
/* Compute some mildly useful bitmap stats. */
- t_bits = (MAP_SIZE << 3) - count_bits(afl->virgin_bits);
+ t_bits = (afl->fsrv.map_size << 3) - count_bits(afl, afl->virgin_bits);
/* Now, for the visuals... */
@@ -465,7 +465,8 @@ void show_stats(afl_state_t *afl) {
SAYF(bV bSTOP " now processing : " cRST "%-16s " bSTG bV bSTOP, tmp);
sprintf(tmp, "%0.02f%% / %0.02f%%",
- ((double)afl->queue_cur->bitmap_size) * 100 / MAP_SIZE, t_byte_ratio);
+ ((double)afl->queue_cur->bitmap_size) * 100 / afl->fsrv.map_size,
+ t_byte_ratio);
SAYF(" map density : %s%-21s" bSTG bV "\n",
t_byte_ratio > 70 ? cLRD
diff --git a/src/afl-tmin.c b/src/afl-tmin.c
index f899a6b5..53e8705d 100644
--- a/src/afl-tmin.c
+++ b/src/afl-tmin.c
@@ -258,7 +258,7 @@ static u8 run_target(afl_forkserver_t *fsrv, char **argv, u8 *mem, u32 len,
fsrv->child_timed_out = 0;
- memset(fsrv->trace_bits, 0, MAP_SIZE);
+ memset(fsrv->trace_bits, 0, fsrv->map_size);
MEM_BARRIER();
write_to_testcase(fsrv, mem, len);
@@ -393,7 +393,7 @@ static u8 run_target(afl_forkserver_t *fsrv, char **argv, u8 *mem, u32 len,
}
- cksum = hash32(fsrv->trace_bits, MAP_SIZE, HASH_CONST);
+ cksum = hash32(fsrv->trace_bits, fsrv->map_size, HASH_CONST);
if (first_run) orig_cksum = cksum;