aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/afl-fuzz-queue.c11
-rw-r--r--src/afl-fuzz-state.c10
-rw-r--r--src/afl-fuzz-stats.c2
-rw-r--r--src/afl-fuzz.c68
4 files changed, 81 insertions, 10 deletions
diff --git a/src/afl-fuzz-queue.c b/src/afl-fuzz-queue.c
index db387c33..baa80e61 100644
--- a/src/afl-fuzz-queue.c
+++ b/src/afl-fuzz-queue.c
@@ -978,9 +978,9 @@ inline u8 *queue_testcase_get(afl_state_t *afl, struct queue_entry *q) {
/* Buf not cached, let's load it */
u32 tid = afl->q_testcase_max_cache_count;
- while (unlikely(afl->q_testcase_cache_size + len >=
- afl->q_testcase_max_cache_size ||
- afl->q_testcase_cache_count >= TESTCASE_ENTRIES - 1)) {
+ while (unlikely(
+ afl->q_testcase_cache_size + len >= afl->q_testcase_max_cache_size ||
+ afl->q_testcase_cache_count >= afl->q_testcase_max_cache_entries - 1)) {
/* Cache full. We neet to evict one or more to map one.
Get a random one which is not in use */
@@ -1009,7 +1009,7 @@ inline u8 *queue_testcase_get(afl_state_t *afl, struct queue_entry *q) {
}
- if (unlikely(tid >= TESTCASE_ENTRIES)) {
+ if (unlikely(tid >= afl->q_testcase_max_cache_entries)) {
// uh we were full, so now we have to search from start
tid = afl->q_testcase_smallest_free;
@@ -1062,7 +1062,8 @@ inline void queue_testcase_store_mem(afl_state_t *afl, struct queue_entry *q,
if (unlikely(afl->q_testcase_cache_size + len >=
afl->q_testcase_max_cache_size ||
- afl->q_testcase_cache_count >= TESTCASE_ENTRIES - 1)) {
+ afl->q_testcase_cache_count >=
+ afl->q_testcase_max_cache_entries - 1)) {
// no space? will be loaded regularly later.
return;
diff --git a/src/afl-fuzz-state.c b/src/afl-fuzz-state.c
index 0824b77f..ae7d410b 100644
--- a/src/afl-fuzz-state.c
+++ b/src/afl-fuzz-state.c
@@ -103,7 +103,8 @@ void afl_state_init(afl_state_t *afl, uint32_t map_size) {
afl->stats_avg_exec = -1;
afl->skip_deterministic = 1;
afl->use_splicing = 1;
- afl->q_testcase_max_cache_size = TESTCASE_CACHE * 1024000;
+ afl->q_testcase_max_cache_size = TESTCASE_CACHE_SIZE * 1048576UL;
+ afl->q_testcase_max_cache_entries = 4096;
#ifdef HAVE_AFFINITY
afl->cpu_aff = -1; /* Selected CPU core */
@@ -361,6 +362,13 @@ void read_afl_environment(afl_state_t *afl, char **envp) {
afl->afl_env.afl_testcache_size =
(u8 *)get_afl_env(afl_environment_variables[i]);
+ } else if (!strncmp(env, "AFL_TESTCACHE_ENTRIES",
+
+ afl_environment_variable_len)) {
+
+ afl->afl_env.afl_testcache_entries =
+ (u8 *)get_afl_env(afl_environment_variables[i]);
+
} else if (!strncmp(env, "AFL_STATSD_HOST",
afl_environment_variable_len)) {
diff --git a/src/afl-fuzz-stats.c b/src/afl-fuzz-stats.c
index d213d054..bec90519 100644
--- a/src/afl-fuzz-stats.c
+++ b/src/afl-fuzz-stats.c
@@ -1028,7 +1028,7 @@ void show_init_stats(afl_state_t *afl) {
}
- SAYF("\n");
+ // SAYF("\n");
if (avg_us > ((afl->fsrv.qemu_mode || afl->unicorn_mode) ? 50000 : 10000)) {
diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c
index 7215ecec..637e1985 100644
--- a/src/afl-fuzz.c
+++ b/src/afl-fuzz.c
@@ -251,7 +251,7 @@ static int stricmp(char const *a, char const *b) {
int main(int argc, char **argv_orig, char **envp) {
- s32 opt, i, auto_sync = 0;
+ s32 opt, i, auto_sync = 0, user_set_cache = 0;
u64 prev_queued = 0;
u32 sync_interval_cnt = 0, seek_to = 0, show_help = 0, map_size = MAP_SIZE;
u8 *extras_dir[4];
@@ -1015,6 +1015,22 @@ int main(int argc, char **argv_orig, char **envp) {
}
+ if (afl->afl_env.afl_testcache_entries) {
+
+ afl->q_testcase_max_cache_entries =
+ (u32)atoi(afl->afl_env.afl_testcache_entries);
+
+ user_set_cache = 1;
+
+ }
+
+ if (!afl->afl_env.afl_testcache_size || !afl->afl_env.afl_testcache_entries) {
+
+ afl->afl_env.afl_testcache_entries = 0;
+ afl->afl_env.afl_testcache_size = 0;
+
+ }
+
if (!afl->q_testcase_max_cache_size) {
ACTF(
@@ -1347,6 +1363,52 @@ int main(int argc, char **argv_orig, char **envp) {
perform_dry_run(afl);
+ if (!user_set_cache && afl->q_testcase_max_cache_size) {
+
+ /* The user defined not a fixed number of entries for the cache.
+ Hence we autodetect a good value. After the dry run inputs are
+ trimmed and we know the average and max size of the input seeds.
+ We use this information to set a fitting size to max entries
+ based on the cache size. */
+
+ struct queue_entry *q = afl->queue;
+ u64 size = 0, count = 0, avg = 0, max = 0;
+
+ while (q) {
+
+ ++count;
+ size += q->len;
+ if (max < q->len) { max = q->len; }
+ q = q->next;
+
+ }
+
+ if (count) {
+
+ avg = size / count;
+ avg = ((avg + max) / 2) + 1;
+
+ }
+
+ if (avg < 10240) { avg = 10240; }
+
+ afl->q_testcase_max_cache_entries = afl->q_testcase_max_cache_size / avg;
+
+ if (afl->q_testcase_max_cache_entries > 32768)
+ afl->q_testcase_max_cache_entries = 32768;
+
+ }
+
+ if (afl->q_testcase_max_cache_entries) {
+
+ OKF("Setting %u maximum entries for the testcase cache",
+ afl->q_testcase_max_cache_entries);
+ afl->q_testcase_cache =
+ ck_alloc(afl->q_testcase_max_cache_entries * sizeof(size_t));
+ if (!afl->q_testcase_cache) { PFATAL("malloc failed for cache entries"); }
+
+ }
+
cull_queue(afl);
if (!afl->pending_not_fuzzed)
@@ -1366,8 +1428,7 @@ int main(int argc, char **argv_orig, char **envp) {
if (!afl->not_on_tty) {
- sleep(4);
- afl->start_time += 4000;
+ sleep(1);
if (afl->stop_soon) { goto stop_fuzzing; }
}
@@ -1654,6 +1715,7 @@ stop_fuzzing:
ck_free(afl->fsrv.target_path);
ck_free(afl->fsrv.out_file);
ck_free(afl->sync_id);
+ if (afl->q_testcase_cache) { ck_free(afl->q_testcase_cache); }
afl_state_deinit(afl);
free(afl); /* not tracked */