about summary refs log tree commit diff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/afl-fuzz-queue.c18
-rw-r--r--src/afl-fuzz-state.c2
-rw-r--r--src/afl-fuzz.c55
3 files changed, 47 insertions, 28 deletions
diff --git a/src/afl-fuzz-queue.c b/src/afl-fuzz-queue.c
index baa80e61..4989a0ba 100644
--- a/src/afl-fuzz-queue.c
+++ b/src/afl-fuzz-queue.c
@@ -985,6 +985,24 @@ inline u8 *queue_testcase_get(afl_state_t *afl, struct queue_entry *q) {
       /* Cache full. We neet to evict one or more to map one.
          Get a random one which is not in use */
 
+      if (unlikely(afl->q_testcase_cache_size + len >= afl->q_testcase_max_cache_size &&
+          (afl->q_testcase_cache_count < afl->q_testcase_max_cache_entries &&
+           afl->q_testcase_max_cache_count <
+               afl->q_testcase_max_cache_entries))) {
+
+        if (afl->q_testcase_max_cache_count > afl->q_testcase_cache_count) {
+
+          afl->q_testcase_max_cache_entries =
+              afl->q_testcase_max_cache_count + 1;
+
+        } else {
+
+          afl->q_testcase_max_cache_entries = afl->q_testcase_cache_count + 1;
+
+        }
+
+      }
+
       do {
 
         // if the cache (MB) is not enough for the queue then this gets
diff --git a/src/afl-fuzz-state.c b/src/afl-fuzz-state.c
index ae7d410b..3ce16cad 100644
--- a/src/afl-fuzz-state.c
+++ b/src/afl-fuzz-state.c
@@ -104,7 +104,7 @@ void afl_state_init(afl_state_t *afl, uint32_t map_size) {
   afl->skip_deterministic = 1;
   afl->use_splicing = 1;
   afl->q_testcase_max_cache_size = TESTCASE_CACHE_SIZE * 1048576UL;
-  afl->q_testcase_max_cache_entries = 4096;
+  afl->q_testcase_max_cache_entries = 64 * 1024;
 
 #ifdef HAVE_AFFINITY
   afl->cpu_aff = -1;                    /* Selected CPU core                */
diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c
index 637e1985..70e21c0f 100644
--- a/src/afl-fuzz.c
+++ b/src/afl-fuzz.c
@@ -251,7 +251,7 @@ static int stricmp(char const *a, char const *b) {
 
 int main(int argc, char **argv_orig, char **envp) {
 
-  s32 opt, i, auto_sync = 0, user_set_cache = 0;
+  s32 opt, i, auto_sync = 0 /*, user_set_cache = 0*/;
   u64 prev_queued = 0;
   u32 sync_interval_cnt = 0, seek_to = 0, show_help = 0, map_size = MAP_SIZE;
   u8 *extras_dir[4];
@@ -1020,7 +1020,7 @@ int main(int argc, char **argv_orig, char **envp) {
     afl->q_testcase_max_cache_entries =
         (u32)atoi(afl->afl_env.afl_testcache_entries);
 
-    user_set_cache = 1;
+    // user_set_cache = 1;
 
   }
 
@@ -1363,46 +1363,47 @@ int main(int argc, char **argv_orig, char **envp) {
 
   perform_dry_run(afl);
 
-  if (!user_set_cache && afl->q_testcase_max_cache_size) {
+  /*
+    if (!user_set_cache && afl->q_testcase_max_cache_size) {
 
-    /* The user defined not a fixed number of entries for the cache.
-       Hence we autodetect a good value. After the dry run inputs are
-       trimmed and we know the average and max size of the input seeds.
-       We use this information to set a fitting size to max entries
-       based on the cache size. */
+      / * The user defined not a fixed number of entries for the cache.
+         Hence we autodetect a good value. After the dry run inputs are
+         trimmed and we know the average and max size of the input seeds.
+         We use this information to set a fitting size to max entries
+         based on the cache size. * /
 
-    struct queue_entry *q = afl->queue;
-    u64                 size = 0, count = 0, avg = 0, max = 0;
+      struct queue_entry *q = afl->queue;
+      u64                 size = 0, count = 0, avg = 0, max = 0;
 
-    while (q) {
+      while (q) {
 
-      ++count;
-      size += q->len;
-      if (max < q->len) { max = q->len; }
-      q = q->next;
+        ++count;
+        size += q->len;
+        if (max < q->len) { max = q->len; }
+        q = q->next;
 
-    }
+      }
 
-    if (count) {
+      if (count) {
 
-      avg = size / count;
-      avg = ((avg + max) / 2) + 1;
+        avg = size / count;
+        avg = ((avg + max) / 2) + 1;
 
-    }
+      }
 
-    if (avg < 10240) { avg = 10240; }
+      if (avg < 10240) { avg = 10240; }
 
-    afl->q_testcase_max_cache_entries = afl->q_testcase_max_cache_size / avg;
+      afl->q_testcase_max_cache_entries = afl->q_testcase_max_cache_size / avg;
 
-    if (afl->q_testcase_max_cache_entries > 32768)
-      afl->q_testcase_max_cache_entries = 32768;
+      if (afl->q_testcase_max_cache_entries > 32768)
+        afl->q_testcase_max_cache_entries = 32768;
 
-  }
+    }
+
+  */
 
   if (afl->q_testcase_max_cache_entries) {
 
-    OKF("Setting %u maximum entries for the testcase cache",
-        afl->q_testcase_max_cache_entries);
     afl->q_testcase_cache =
         ck_alloc(afl->q_testcase_max_cache_entries * sizeof(size_t));
     if (!afl->q_testcase_cache) { PFATAL("malloc failed for cache entries"); }