about summary refs log tree commit diff
path: root/src
diff options
context:
space:
mode:
authorvan Hauser <vh@thc.org>2020-09-29 14:06:20 +0200
committerGitHub <noreply@github.com>2020-09-29 14:06:20 +0200
commitfe08482c1b2269289bfedea9f0ef2b6721d18221 (patch)
treeabe699ce381526ad0c0106628852593dc9eeebfe /src
parente69b25e34be8028921389bbb114135c3028d0a3d (diff)
parente87eca7fe8ec3ed0ba79e7722350ad502b67218b (diff)
downloadafl++-fe08482c1b2269289bfedea9f0ef2b6721d18221.tar.gz
Merge pull request #568 from mboehme/dev
Patching and improving AFLFast schedules.
Diffstat (limited to 'src')
-rw-r--r--src/afl-fuzz-bitmap.c18
-rw-r--r--src/afl-fuzz-init.c8
-rw-r--r--src/afl-fuzz-queue.c84
-rw-r--r--src/afl-fuzz.c7
4 files changed, 74 insertions, 43 deletions
diff --git a/src/afl-fuzz-bitmap.c b/src/afl-fuzz-bitmap.c
index 1b9df624..64de86a2 100644
--- a/src/afl-fuzz-bitmap.c
+++ b/src/afl-fuzz-bitmap.c
@@ -555,19 +555,9 @@ save_if_interesting(afl_state_t *afl, void *mem, u32 len, u8 fault) {
 
     cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
 
-    struct queue_entry *q = afl->queue;
-    while (q) {
-
-      if (q->exec_cksum == cksum) {
-
-        ++q->n_fuzz;
-        break;
-
-      }
-
-      q = q->next;
-
-    }
+    /* Saturated increment */
+    if (afl->n_fuzz[cksum % n_fuzz_size] < 0xFFFFFFFF)
+      afl->n_fuzz[cksum % n_fuzz_size]++;
 
   }
 
@@ -610,6 +600,8 @@ save_if_interesting(afl_state_t *afl, void *mem, u32 len, u8 fault) {
       afl->queue_top->exec_cksum =
           hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
 
+    afl->n_fuzz[cksum % n_fuzz_size] = 1;
+
     /* Try to calibrate inline; this also calls update_bitmap_score() when
        successful. */
 
diff --git a/src/afl-fuzz-init.c b/src/afl-fuzz-init.c
index cbac3822..b825837f 100644
--- a/src/afl-fuzz-init.c
+++ b/src/afl-fuzz-init.c
@@ -729,6 +729,14 @@ void read_testcases(afl_state_t *afl, u8 *directory) {
     add_to_queue(afl, fn2, st.st_size >= MAX_FILE ? MAX_FILE : st.st_size,
                  passed_det);
 
+    if (unlikely(afl->schedule >= FAST && afl->schedule <= RARE)) {
+
+      u64 cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
+
+      afl->n_fuzz[cksum % n_fuzz_size] = 1;
+
+    }
+
   }
 
   free(nl);                                                  /* not tracked */
diff --git a/src/afl-fuzz-queue.c b/src/afl-fuzz-queue.c
index 53c3e984..dfabba7b 100644
--- a/src/afl-fuzz-queue.c
+++ b/src/afl-fuzz-queue.c
@@ -25,6 +25,7 @@
 #include "afl-fuzz.h"
 #include <limits.h>
 #include <ctype.h>
+#include <math.h>
 
 /* Mark deterministic checks as done for a particular queue entry. We use the
    .state file to avoid repeating deterministic fuzzing when resuming aborted
@@ -218,7 +219,6 @@ void add_to_queue(afl_state_t *afl, u8 *fname, u32 len, u8 passed_det) {
   q->len = len;
   q->depth = afl->cur_depth + 1;
   q->passed_det = passed_det;
-  q->n_fuzz = 1;
   q->trace_mini = NULL;
 
   if (q->depth > afl->max_depth) { afl->max_depth = q->depth; }
@@ -307,8 +307,10 @@ void update_bitmap_score(afl_state_t *afl, struct queue_entry *q) {
   u64 fav_factor;
   u64 fuzz_p2;
 
-  if (unlikely(afl->schedule >= FAST && afl->schedule <= RARE))
-    fuzz_p2 = next_pow2(q->n_fuzz);
+  if (unlikely(afl->schedule >= FAST && afl->schedule < RARE))
+    fuzz_p2 = 0; // Skip the fuzz_p2 comparison 
+  else if (unlikely(afl->schedule == RARE))
+    fuzz_p2 = next_pow2(afl->n_fuzz[q->exec_cksum % n_fuzz_size]);
   else
     fuzz_p2 = q->fuzz_level;
 
@@ -334,7 +336,7 @@ void update_bitmap_score(afl_state_t *afl, struct queue_entry *q) {
         u64 top_rated_fav_factor;
         u64 top_rated_fuzz_p2;
         if (unlikely(afl->schedule >= FAST && afl->schedule <= RARE))
-          top_rated_fuzz_p2 = next_pow2(afl->top_rated[i]->n_fuzz);
+          top_rated_fuzz_p2 = next_pow2(afl->n_fuzz[afl->top_rated[i]->exec_cksum % n_fuzz_size]);
         else
           top_rated_fuzz_p2 = afl->top_rated[i]->fuzz_level;
 
@@ -605,11 +607,10 @@ u32 calculate_score(afl_state_t *afl, struct queue_entry *q) {
 
   }
 
-  u64 fuzz = q->n_fuzz;
-  u64 fuzz_total;
+  u32 n_paths;
+  double factor = 1.0;
+  long double fuzz_mu;
 
-  u32 n_paths, fuzz_mu;
-  u32 factor = 1;
 
   switch (afl->schedule) {
 
@@ -624,60 +625,83 @@ u32 calculate_score(afl_state_t *afl, struct queue_entry *q) {
       break;
 
     case COE:
-      fuzz_total = 0;
+      fuzz_mu = 0.0;
       n_paths = 0;
 
+      // Don't modify perf_score for unfuzzed seeds
+      if (q->fuzz_level == 0) break;
+
       struct queue_entry *queue_it = afl->queue;
       while (queue_it) {
 
-        fuzz_total += queue_it->n_fuzz;
+        fuzz_mu += log2(afl->n_fuzz[q->exec_cksum % n_fuzz_size]);
         n_paths++;
+
         queue_it = queue_it->next;
 
       }
 
       if (unlikely(!n_paths)) { FATAL("Queue state corrupt"); }
 
-      fuzz_mu = fuzz_total / n_paths;
-      if (fuzz <= fuzz_mu) {
+      fuzz_mu = fuzz_mu / n_paths;
 
-        if (q->fuzz_level < 16) {
+      if (log2(afl->n_fuzz[q->exec_cksum % n_fuzz_size]) > fuzz_mu) {
 
-          factor = ((u32)(1 << q->fuzz_level));
+        /* Never skip favourites */
+        if (!q->favored) factor = 0;
 
-        } else {
+        break;
 
-          factor = MAX_FACTOR;
+      }
 
-        }
+    // Fall through
+    case FAST:
 
-      } else {
+      // Don't modify unfuzzed seeds
+      if (q->fuzz_level == 0) break;
 
-        factor = 0;
+      switch ((u32)log2(afl->n_fuzz[q->exec_cksum % n_fuzz_size])) {
 
-      }
+        case 0 ... 1:
+          factor = 4;
+          break;
 
-      break;
+        case 2 ... 3:
+          factor = 3;
+          break;
 
-    case FAST:
-      if (q->fuzz_level < 16) {
+        case 4:
+          factor = 2;
+          break;
 
-        factor = ((u32)(1 << q->fuzz_level)) / (fuzz == 0 ? 1 : fuzz);
+        case 5:
+          break;
 
-      } else {
+        case 6:
+          if (!q->favored) factor = 0.8;
+          break;
 
-        factor = MAX_FACTOR / (fuzz == 0 ? 1 : next_pow2(fuzz));
+        case 7:
+          if (!q->favored) factor = 0.6;
+          break;
+
+        default:
+          if (!q->favored) factor = 0.4;
+          break;
 
       }
 
+      if (q->favored)
+        factor *= 1.15;
+
       break;
 
     case LIN:
-      factor = q->fuzz_level / (fuzz == 0 ? 1 : fuzz);
+      factor = q->fuzz_level / (afl->n_fuzz[q->exec_cksum % n_fuzz_size] + 1);
       break;
 
     case QUAD:
-      factor = q->fuzz_level * q->fuzz_level / (fuzz == 0 ? 1 : fuzz);
+      factor = q->fuzz_level * q->fuzz_level / (afl->n_fuzz[q->exec_cksum % n_fuzz_size] + 1);
       break;
 
     case MMOPT:
@@ -703,7 +727,7 @@ u32 calculate_score(afl_state_t *afl, struct queue_entry *q) {
       // the more often fuzz result paths are equal to this queue entry,
       // reduce its value
       perf_score *=
-          (1 - (double)((double)q->n_fuzz / (double)afl->fsrv.total_execs));
+          (1 - (double)((double)afl->n_fuzz[q->exec_cksum % n_fuzz_size] / (double)afl->fsrv.total_execs));
 
       break;
 
@@ -724,7 +748,7 @@ u32 calculate_score(afl_state_t *afl, struct queue_entry *q) {
 
     perf_score *= 2;
 
-  } else if (perf_score < 1) {
+  } else if (afl->schedule != COE && perf_score < 1) {
 
     // Add a lower bound to AFLFast's energy assignment strategies
     perf_score = 1;
diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c
index 28507857..889f753d 100644
--- a/src/afl-fuzz.c
+++ b/src/afl-fuzz.c
@@ -936,6 +936,13 @@ int main(int argc, char **argv_orig, char **envp) {
 
   }
 
+  /* Dynamically allocate memory for AFLFast schedules */
+  if (afl->schedule >= FAST && afl->schedule <= RARE) {
+
+    afl->n_fuzz = ck_alloc(n_fuzz_size * sizeof(u32));
+
+  }
+
   if (get_afl_env("AFL_NO_FORKSRV")) { afl->no_forkserver = 1; }
   if (get_afl_env("AFL_NO_CPU_RED")) { afl->no_cpu_meter_red = 1; }
   if (get_afl_env("AFL_NO_ARITH")) { afl->no_arith = 1; }