aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/afl-fuzz-init.c4
-rw-r--r--src/afl-fuzz-one.c169
-rw-r--r--src/afl-fuzz-queue.c167
-rw-r--r--src/afl-fuzz-run.c6
-rw-r--r--src/afl-fuzz-state.c8
-rw-r--r--src/afl-fuzz-stats.c7
-rw-r--r--src/afl-fuzz.c29
-rw-r--r--src/afl-performance.c2
8 files changed, 234 insertions, 158 deletions
diff --git a/src/afl-fuzz-init.c b/src/afl-fuzz-init.c
index 881bf10f..607b652f 100644
--- a/src/afl-fuzz-init.c
+++ b/src/afl-fuzz-init.c
@@ -1045,7 +1045,7 @@ restart_outer_cull_loop:
while (q) {
- if (q->cal_failed || !q->exec_cksum) continue;
+ if (q->cal_failed || !q->exec_cksum) { goto next_entry; }
restart_inner_cull_loop:
@@ -1090,6 +1090,8 @@ restart_outer_cull_loop:
}
+ next_entry:
+
prev = q;
q = q->next;
diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c
index fc092f8d..154e4b45 100644
--- a/src/afl-fuzz-one.c
+++ b/src/afl-fuzz-one.c
@@ -370,7 +370,7 @@ static void locate_diffs(u8 *ptr1, u8 *ptr2, u32 len, s32 *first, s32 *last) {
u8 fuzz_one_original(afl_state_t *afl) {
- s32 len, fd, temp_len;
+ s32 len, temp_len;
u32 j;
u32 i;
u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0;
@@ -453,32 +453,9 @@ u8 fuzz_one_original(afl_state_t *afl) {
}
- /* Map the test case into memory. */
-
- fd = open(afl->queue_cur->fname, O_RDONLY);
-
- if (unlikely(fd < 0)) {
-
- PFATAL("Unable to open '%s'", afl->queue_cur->fname);
-
- }
-
+ orig_in = in_buf = queue_testcase_get(afl, afl->queue_cur);
len = afl->queue_cur->len;
- orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
-
- if (unlikely(orig_in == MAP_FAILED)) {
-
- PFATAL("Unable to mmap '%s' with len %d", afl->queue_cur->fname, len);
-
- }
-
- close(fd);
-
- /* We could mmap() out_buf as MAP_PRIVATE, but we end up clobbering every
- single byte anyway, so it wouldn't give us any performance or memory usage
- benefits. */
-
out_buf = afl_realloc(AFL_BUF_PARAM(out), len);
if (unlikely(!out_buf)) { PFATAL("alloc"); }
@@ -526,6 +503,7 @@ u8 fuzz_one_original(afl_state_t *afl) {
!afl->disable_trim)) {
u8 res = trim_case(afl, afl->queue_cur, in_buf);
+ orig_in = in_buf = queue_testcase_get(afl, afl->queue_cur);
if (unlikely(res == FSRV_RUN_ERROR)) {
@@ -1720,17 +1698,7 @@ custom_mutator_stage:
afl->splicing_with = tid;
/* Read the additional testcase into a new buffer. */
- fd = open(target->fname, O_RDONLY);
- if (unlikely(fd < 0)) {
-
- PFATAL("Unable to open '%s'", target->fname);
-
- }
-
- new_buf = afl_realloc(AFL_BUF_PARAM(out_scratch), target->len);
- if (unlikely(!new_buf)) { PFATAL("alloc"); }
- ck_read(fd, new_buf, target->len, target->fname);
- close(fd);
+ new_buf = queue_testcase_get(afl, target);
target_len = target->len;
}
@@ -2182,7 +2150,6 @@ havoc_stage:
afl_swap_bufs(AFL_BUF_PARAM(out), AFL_BUF_PARAM(out_scratch));
out_buf = new_buf;
- new_buf = NULL;
temp_len += clone_len;
}
@@ -2326,43 +2293,21 @@ havoc_stage:
/* Pick a random queue entry and seek to it. */
u32 tid;
- do
- tid = rand_below(afl, afl->queued_paths);
- while (tid == afl->current_entry || afl->queue_buf[tid]->len < 4);
-
- struct queue_entry *target = afl->queue_buf[tid];
-
- /* Read the testcase into a new buffer. */
-
- fd = open(target->fname, O_RDONLY);
-
- if (unlikely(fd < 0)) {
-
- PFATAL("Unable to open '%s'", target->fname);
-
- }
-
- u32 new_len = target->len;
- u8 *new_buf = afl_realloc(AFL_BUF_PARAM(in_scratch), new_len);
- if (unlikely(!new_buf)) { PFATAL("alloc"); }
-
- ck_read(fd, new_buf, new_len, target->fname);
+ do {
- close(fd);
+ tid = rand_below(afl, afl->queued_paths);
- u8 overwrite = 0;
- if (temp_len >= 2 && rand_below(afl, 2))
- overwrite = 1;
- else if (temp_len + HAVOC_BLK_XL >= MAX_FILE) {
+ } while (tid == afl->current_entry || afl->queue_buf[tid]->len < 4);
- if (temp_len >= 2)
- overwrite = 1;
- else
- break;
+ /* Get the testcase for splicing. */
+ struct queue_entry *target = afl->queue_buf[tid];
+ u32 new_len = target->len;
+ u8 * new_buf = queue_testcase_get(afl, target);
- }
+ if ((temp_len >= 2 && rand_below(afl, 2)) ||
+ temp_len + HAVOC_BLK_XL >= MAX_FILE) {
- if (overwrite) {
+ /* overwrite mode */
u32 copy_from, copy_to, copy_len;
@@ -2376,15 +2321,16 @@ havoc_stage:
} else {
+ /* insert mode */
+
u32 clone_from, clone_to, clone_len;
clone_len = choose_block_len(afl, new_len);
clone_from = rand_below(afl, new_len - clone_len + 1);
+ clone_to = rand_below(afl, temp_len + 1);
- clone_to = rand_below(afl, temp_len);
-
- u8 *temp_buf =
- afl_realloc(AFL_BUF_PARAM(out_scratch), temp_len + clone_len);
+ u8 *temp_buf = afl_realloc(AFL_BUF_PARAM(out_scratch),
+ temp_len + clone_len + 1);
if (unlikely(!temp_buf)) { PFATAL("alloc"); }
/* Head */
@@ -2496,21 +2442,10 @@ retry_splicing:
} while (tid == afl->current_entry || afl->queue_buf[tid]->len < 4);
+ /* Get the testcase */
afl->splicing_with = tid;
target = afl->queue_buf[tid];
-
- /* Read the testcase into a new buffer. */
-
- fd = open(target->fname, O_RDONLY);
-
- if (unlikely(fd < 0)) { PFATAL("Unable to open '%s'", target->fname); }
-
- new_buf = afl_realloc(AFL_BUF_PARAM(in_scratch), target->len);
- if (unlikely(!new_buf)) { PFATAL("alloc"); }
-
- ck_read(fd, new_buf, target->len, target->fname);
-
- close(fd);
+ new_buf = queue_testcase_get(afl, target);
/* Find a suitable splicing location, somewhere between the first and
the last differing byte. Bail out if the difference is just a single
@@ -2527,18 +2462,16 @@ retry_splicing:
/* Do the thing. */
len = target->len;
- memcpy(new_buf, in_buf, split_at);
- afl_swap_bufs(AFL_BUF_PARAM(in), AFL_BUF_PARAM(in_scratch));
- in_buf = new_buf;
+ afl->in_scratch_buf = afl_realloc(AFL_BUF_PARAM(in_scratch), len);
+ memcpy(afl->in_scratch_buf, in_buf, split_at);
+ memcpy(afl->in_scratch_buf + split_at, new_buf, len - split_at);
+ in_buf = afl->in_scratch_buf;
out_buf = afl_realloc(AFL_BUF_PARAM(out), len);
if (unlikely(!out_buf)) { PFATAL("alloc"); }
memcpy(out_buf, in_buf, len);
goto custom_mutator_stage;
- /* ???: While integrating Python module, the author decided to jump to
- python stage, but the reason behind this is not clear.*/
- // goto havoc_stage;
}
@@ -2564,9 +2497,7 @@ abandon_entry:
}
++afl->queue_cur->fuzz_level;
-
- munmap(orig_in, afl->queue_cur->len);
-
+ orig_in = NULL;
return ret_val;
#undef FLIP_BIT
@@ -2587,7 +2518,7 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) {
}
- s32 len, fd, temp_len;
+ s32 len, temp_len;
u32 i;
u32 j;
u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0;
@@ -2652,32 +2583,11 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) {
}
/* Map the test case into memory. */
-
- fd = open(afl->queue_cur->fname, O_RDONLY);
-
- if (fd < 0) { PFATAL("Unable to open '%s'", afl->queue_cur->fname); }
-
+ orig_in = in_buf = queue_testcase_get(afl, afl->queue_cur);
len = afl->queue_cur->len;
-
- orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
-
- if (orig_in == MAP_FAILED) {
-
- PFATAL("Unable to mmap '%s'", afl->queue_cur->fname);
-
- }
-
- close(fd);
-
- /* We could mmap() out_buf as MAP_PRIVATE, but we end up clobbering every
- single byte anyway, so it wouldn't give us any performance or memory usage
- benefits. */
-
out_buf = afl_realloc(AFL_BUF_PARAM(out), len);
if (unlikely(!out_buf)) { PFATAL("alloc"); }
-
afl->subseq_tmouts = 0;
-
afl->cur_depth = afl->queue_cur->depth;
/*******************************************
@@ -2721,6 +2631,7 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) {
u32 old_len = afl->queue_cur->len;
u8 res = trim_case(afl, afl->queue_cur, in_buf);
+ orig_in = in_buf = queue_testcase_get(afl, afl->queue_cur);
if (res == FSRV_RUN_ERROR) {
@@ -4497,17 +4408,7 @@ pacemaker_fuzzing:
target = afl->queue_buf[tid];
/* Read the testcase into a new buffer. */
-
- fd = open(target->fname, O_RDONLY);
-
- if (fd < 0) { PFATAL("Unable to open '%s'", target->fname); }
-
- new_buf = afl_realloc(AFL_BUF_PARAM(in_scratch), target->len);
- if (unlikely(!new_buf)) { PFATAL("alloc"); }
-
- ck_read(fd, new_buf, target->len, target->fname);
-
- close(fd);
+ new_buf = queue_testcase_get(afl, target);
/* Find a suitable splicin g location, somewhere between the first and
the last differing byte. Bail out if the difference is just a single
@@ -4529,9 +4430,11 @@ pacemaker_fuzzing:
/* Do the thing. */
len = target->len;
- memcpy(new_buf, in_buf, split_at);
- afl_swap_bufs(AFL_BUF_PARAM(in), AFL_BUF_PARAM(in_scratch));
- in_buf = new_buf;
+ afl->in_scratch_buf = afl_realloc(AFL_BUF_PARAM(in_scratch), len);
+ memcpy(afl->in_scratch_buf, in_buf, split_at);
+ memcpy(afl->in_scratch_buf + split_at, new_buf, len - split_at);
+ in_buf = afl->in_scratch_buf;
+
out_buf = afl_realloc(AFL_BUF_PARAM(out), len);
if (unlikely(!out_buf)) { PFATAL("alloc"); }
memcpy(out_buf, in_buf, len);
@@ -4569,7 +4472,7 @@ pacemaker_fuzzing:
// if (afl->queue_cur->favored) --afl->pending_favored;
// }
- munmap(orig_in, afl->queue_cur->len);
+ orig_in = NULL;
if (afl->key_puppet == 1) {
diff --git a/src/afl-fuzz-queue.c b/src/afl-fuzz-queue.c
index f224d851..c634328f 100644
--- a/src/afl-fuzz-queue.c
+++ b/src/afl-fuzz-queue.c
@@ -31,11 +31,12 @@
inline u32 select_next_queue_entry(afl_state_t *afl) {
- u32 s = rand_below(afl, afl->queued_paths);
+ u32 s = rand_below(afl, afl->queued_paths);
double p = rand_next_percent(afl);
/*
fprintf(stderr, "select: p=%f s=%u ... p < prob[s]=%f ? s=%u : alias[%u]=%u"
- " ==> %u\n", p, s, afl->alias_probability[s], s, s, afl->alias_table[s], p < afl->alias_probability[s] ? s : afl->alias_table[s]);
+ " ==> %u\n", p, s, afl->alias_probability[s], s, s, afl->alias_table[s], p <
+ afl->alias_probability[s] ? s : afl->alias_table[s]);
*/
return (p < afl->alias_probability[s] ? s : afl->alias_table[s]);
@@ -55,7 +56,7 @@ void create_alias_table(afl_state_t *afl) {
int * S = (u32 *)afl_realloc(AFL_BUF_PARAM(out_scratch), n * sizeof(u32));
int * L = (u32 *)afl_realloc(AFL_BUF_PARAM(in_scratch), n * sizeof(u32));
- if (!P || !S || !L) FATAL("could not aquire memory for alias table");
+ if (!P || !S || !L) { FATAL("could not aquire memory for alias table"); }
memset((void *)afl->alias_table, 0, n * sizeof(u32));
memset((void *)afl->alias_probability, 0, n * sizeof(double));
@@ -65,7 +66,7 @@ void create_alias_table(afl_state_t *afl) {
struct queue_entry *q = afl->queue_buf[i];
- if (!q->disabled) q->perf_score = calculate_score(afl, q);
+ if (!q->disabled) { q->perf_score = calculate_score(afl, q); }
sum += q->perf_score;
@@ -74,19 +75,23 @@ void create_alias_table(afl_state_t *afl) {
for (i = 0; i < n; i++) {
struct queue_entry *q = afl->queue_buf[i];
-
- P[i] = q->perf_score * n / sum;
+ P[i] = (q->perf_score * n) / sum;
}
int nS = 0, nL = 0, s;
for (s = (s32)n - 1; s >= 0; --s) {
- if (P[s] < 1)
+ if (P[s] < 1) {
+
S[nS++] = s;
- else
+
+ } else {
+
L[nL++] = s;
+ }
+
}
while (nS && nL) {
@@ -96,11 +101,16 @@ void create_alias_table(afl_state_t *afl) {
afl->alias_probability[a] = P[a];
afl->alias_table[a] = g;
P[g] = P[g] + P[a] - 1;
- if (P[g] < 1)
+ if (P[g] < 1) {
+
S[nS++] = g;
- else
+
+ } else {
+
L[nL++] = g;
+ }
+
}
while (nL)
@@ -110,11 +120,10 @@ void create_alias_table(afl_state_t *afl) {
afl->alias_probability[S[--nS]] = 1;
/*
- fprintf(stderr, " %-3s %-3s %-9s %-9s\n", "entry", "alias", "prob", "perf");
- for (u32 i = 0; i < n; ++i)
- fprintf(stderr, " %3i %3i %9.7f %9.7f\n", i, afl->alias_table[i],
- afl->alias_probability[i], afl->queue_buf[i]->perf_score);
-
+ fprintf(stderr, " entry alias probability perf_score\n");
+ for (u32 i = 0; i < n; ++i)
+ fprintf(stderr, " %5u %5u %11u %0.9f\n", i, afl->alias_table[i],
+ afl->alias_probability[i], afl->queue_buf[i]->perf_score);
*/
}
@@ -860,3 +869,131 @@ u32 calculate_score(afl_state_t *afl, struct queue_entry *q) {
}
+void queue_testcase_retake(afl_state_t *afl, struct queue_entry *q,
+ u32 old_len) {
+
+ if (likely(q->testcase_buf)) {
+
+ free(q->testcase_buf);
+ int fd = open(q->fname, O_RDONLY);
+
+ if (unlikely(fd < 0)) { PFATAL("Unable to open '%s'", q->fname); }
+
+ u32 len = q->len;
+ q->testcase_buf = malloc(len);
+
+ if (unlikely(!q->testcase_buf)) {
+
+ PFATAL("Unable to mmap '%s' with len %d", q->fname, len);
+
+ }
+
+ close(fd);
+ afl->q_testcase_cache_size = afl->q_testcase_cache_size + q->len - old_len;
+
+ }
+
+}
+
+/* Returns the testcase buf from the file behind this queue entry.
+ Increases the refcount. */
+inline u8 *queue_testcase_get(afl_state_t *afl, struct queue_entry *q) {
+
+ u32 len = q->len;
+
+ /* first handle if no testcase cache is configured */
+
+ if (unlikely(!afl->q_testcase_max_cache_size)) {
+
+ u8 *buf;
+
+ if (q == afl->queue_cur) {
+
+ buf = afl_realloc((void **)&afl->testcase_buf, len);
+
+ } else {
+
+ buf = afl_realloc((void **)&afl->splicecase_buf, len);
+
+ }
+
+ if (unlikely(!buf)) {
+
+ PFATAL("Unable to malloc '%s' with len %u", q->fname, len);
+
+ }
+
+ int fd = open(q->fname, O_RDONLY);
+
+ if (unlikely(fd < 0)) { PFATAL("Unable to open '%s'", q->fname); }
+
+ ck_read(fd, buf, len, q->fname);
+ close(fd);
+ return buf;
+
+ }
+
+ /* now handle the testcase cache */
+
+ if (unlikely(!q->testcase_buf)) {
+
+ /* Buf not cached, let's load it */
+ u32 tid = 0;
+
+ while (unlikely(afl->q_testcase_cache_size + len >=
+ afl->q_testcase_max_cache_size ||
+ afl->q_testcase_cache_count >= TESTCASE_ENTRIES - 1)) {
+
+ /* Cache full. We neet to evict one to map one.
+ Get a random one which is not in use */
+
+ do {
+
+ tid = rand_below(afl, afl->q_testcase_max_cache_count);
+
+ } while (afl->q_testcase_cache[tid] == NULL ||
+
+ afl->q_testcase_cache[tid] == afl->queue_cur);
+
+ struct queue_entry *old_cached = afl->q_testcase_cache[tid];
+ free(old_cached->testcase_buf);
+ old_cached->testcase_buf = NULL;
+ afl->q_testcase_cache_size -= old_cached->len;
+ afl->q_testcase_cache[tid] = NULL;
+ --afl->q_testcase_cache_count;
+
+ }
+
+ while (likely(afl->q_testcase_cache[tid] != NULL))
+ ++tid;
+
+ /* Map the test case into memory. */
+
+ int fd = open(q->fname, O_RDONLY);
+
+ if (unlikely(fd < 0)) { PFATAL("Unable to open '%s'", q->fname); }
+
+ q->testcase_buf = malloc(len);
+
+ if (unlikely(!q->testcase_buf)) {
+
+ PFATAL("Unable to malloc '%s' with len %u", q->fname, len);
+
+ }
+
+ ck_read(fd, q->testcase_buf, len, q->fname);
+ close(fd);
+
+ /* Register testcase as cached */
+ afl->q_testcase_cache[tid] = q;
+ afl->q_testcase_cache_size += q->len;
+ ++afl->q_testcase_cache_count;
+ if (tid >= afl->q_testcase_max_cache_count)
+ afl->q_testcase_max_cache_count = tid + 1;
+
+ }
+
+ return q->testcase_buf;
+
+}
+
diff --git a/src/afl-fuzz-run.c b/src/afl-fuzz-run.c
index ee22b0f6..ab870319 100644
--- a/src/afl-fuzz-run.c
+++ b/src/afl-fuzz-run.c
@@ -692,6 +692,8 @@ void sync_fuzzers(afl_state_t *afl) {
u8 trim_case(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) {
+ u32 orig_len = q->len;
+
/* Custom mutator trimmer */
if (afl->custom_mutators_count) {
@@ -709,6 +711,8 @@ u8 trim_case(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) {
});
+ if (orig_len != q->len) { queue_testcase_retake(afl, q, orig_len); }
+
if (custom_trimmed) return trimmed_case;
}
@@ -842,6 +846,8 @@ u8 trim_case(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) {
close(fd);
+ if (orig_len != q->len) queue_testcase_retake(afl, q, orig_len);
+
memcpy(afl->fsrv.trace_bits, afl->clean_trace, afl->fsrv.map_size);
update_bitmap_score(afl, q);
diff --git a/src/afl-fuzz-state.c b/src/afl-fuzz-state.c
index a0a2795e..0824b77f 100644
--- a/src/afl-fuzz-state.c
+++ b/src/afl-fuzz-state.c
@@ -103,6 +103,7 @@ void afl_state_init(afl_state_t *afl, uint32_t map_size) {
afl->stats_avg_exec = -1;
afl->skip_deterministic = 1;
afl->use_splicing = 1;
+ afl->q_testcase_max_cache_size = TESTCASE_CACHE * 1024000;
#ifdef HAVE_AFFINITY
afl->cpu_aff = -1; /* Selected CPU core */
@@ -353,6 +354,13 @@ void read_afl_environment(afl_state_t *afl, char **envp) {
afl->afl_env.afl_forksrv_init_tmout =
(u8 *)get_afl_env(afl_environment_variables[i]);
+ } else if (!strncmp(env, "AFL_TESTCACHE_SIZE",
+
+ afl_environment_variable_len)) {
+
+ afl->afl_env.afl_testcache_size =
+ (u8 *)get_afl_env(afl_environment_variables[i]);
+
} else if (!strncmp(env, "AFL_STATSD_HOST",
afl_environment_variable_len)) {
diff --git a/src/afl-fuzz-stats.c b/src/afl-fuzz-stats.c
index 76f24977..4f0cab4c 100644
--- a/src/afl-fuzz-stats.c
+++ b/src/afl-fuzz-stats.c
@@ -165,6 +165,8 @@ void write_stats_file(afl_state_t *afl, double bitmap_cvg, double stability,
"edges_found : %u\n"
"var_byte_count : %u\n"
"havoc_expansion : %u\n"
+ "testcache_size : %llu\n"
+ "testcache_count : %u\n"
"afl_banner : %s\n"
"afl_version : " VERSION
"\n"
@@ -198,8 +200,9 @@ void write_stats_file(afl_state_t *afl, double bitmap_cvg, double stability,
#else
-1,
#endif
- t_bytes, afl->var_byte_count, afl->expand_havoc, afl->use_banner,
- afl->unicorn_mode ? "unicorn" : "",
+ t_bytes, afl->var_byte_count, afl->expand_havoc,
+ afl->q_testcase_cache_size, afl->q_testcase_cache_count,
+ afl->use_banner, afl->unicorn_mode ? "unicorn" : "",
afl->fsrv.qemu_mode ? "qemu " : "",
afl->non_instrumented_mode ? " non_instrumented " : "",
afl->no_forkserver ? "no_fsrv " : "", afl->crash_mode ? "crash " : "",
diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c
index 6498eb30..a59abb7d 100644
--- a/src/afl-fuzz.c
+++ b/src/afl-fuzz.c
@@ -196,11 +196,13 @@ static void usage(u8 *argv0, int more_help) {
"AFL_SKIP_BIN_CHECK: skip the check, if the target is an executable\n"
"AFL_SKIP_CPUFREQ: do not warn about variable cpu clocking\n"
"AFL_SKIP_CRASHES: during initial dry run do not terminate for crashing inputs\n"
- "AFL_STATSD: enables StatsD metrics collection"
- "AFL_STATSD_HOST: change default statsd host (default 127.0.0.1)"
- "AFL_STATSD_PORT: change default statsd port (default: 8125)"
- "AFL_STATSD_TAGS_FLAVOR: change default statsd tags format (default will disable tags)."
- " Supported formats are: 'dogstatsd', 'librato', 'signalfx' and 'influxdb'"
+ "AFL_STATSD: enables StatsD metrics collection\n"
+ "AFL_STATSD_HOST: change default statsd host (default 127.0.0.1)\n"
+ "AFL_STATSD_PORT: change default statsd port (default: 8125)\n"
+ "AFL_STATSD_TAGS_FLAVOR: set statsd tags format (default: disable tags)\n"
+ " Supported formats are: 'dogstatsd', 'librato', 'signalfx'\n"
+ " and 'influxdb'\n"
+ "AFL_TESTCACHE_SIZE: use a cache for testcases, improves performance (in MB)\n"
"AFL_TMPDIR: directory to use for input file generation (ramdisk recommended)\n"
//"AFL_PERSISTENT: not supported anymore -> no effect, just a warning\n"
//"AFL_DEFER_FORKSRV: not supported anymore -> no effect, just a warning\n"
@@ -885,7 +887,7 @@ int main(int argc, char **argv_orig, char **envp) {
auto_sync = 1;
afl->sync_id = ck_strdup("default");
afl->is_secondary_node = 1;
- OKF("no -M/-S set, autoconfiguring for \"-S %s\"", afl->sync_id);
+ OKF("No -M/-S set, autoconfiguring for \"-S %s\"", afl->sync_id);
}
@@ -1006,6 +1008,21 @@ int main(int argc, char **argv_orig, char **envp) {
}
+ if (afl->afl_env.afl_testcache_size) {
+
+ afl->q_testcase_max_cache_size =
+ (u64)atoi(afl->afl_env.afl_testcache_size) * 1024000;
+ OKF("Enabled testcache with %llu MB",
+ afl->q_testcase_max_cache_size / 1024000);
+
+ } else {
+
+ ACTF(
+ "No testcache was configured. it is recommended to use a testcache, it "
+ "improves performance: set AFL_TESTCACHE_SIZE=(value in MB)");
+
+ }
+
if (afl->afl_env.afl_forksrv_init_tmout) {
afl->fsrv.init_tmout = atoi(afl->afl_env.afl_forksrv_init_tmout);
diff --git a/src/afl-performance.c b/src/afl-performance.c
index 6fa95dea..e070a05e 100644
--- a/src/afl-performance.c
+++ b/src/afl-performance.c
@@ -71,7 +71,7 @@ inline uint64_t rand_next(afl_state_t *afl) {
inline double rand_next_percent(afl_state_t *afl) {
- return (double)(((double)rand_next(afl)) / (double) 0xffffffffffffffff);
+ return (double)(((double)rand_next(afl)) / (double)0xffffffffffffffff);
}