about summary refs log tree commit diff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/afl-cc.c2
-rw-r--r--src/afl-fuzz-bitmap.c25
-rw-r--r--src/afl-fuzz-init.c8
-rw-r--r--src/afl-fuzz-one.c58
-rw-r--r--src/afl-fuzz-queue.c92
-rw-r--r--src/afl-fuzz-state.c55
-rw-r--r--src/afl-fuzz-stats.c2
-rw-r--r--src/afl-fuzz.c33
8 files changed, 151 insertions, 124 deletions
diff --git a/src/afl-cc.c b/src/afl-cc.c
index c3b8959d..c516dc4c 100644
--- a/src/afl-cc.c
+++ b/src/afl-cc.c
@@ -1537,7 +1537,7 @@ int main(int argc, char **argv, char **envp) {
   if (debug) {
 
     SAYF(cMGN "[D]" cRST " cd '%s';", getthecwd());
-    for (i = 0; i < cc_par_cnt; i++)
+    for (i = 0; i < (s32)cc_par_cnt; i++)
       SAYF(" '%s'", cc_params[i]);
     SAYF("\n");
 
diff --git a/src/afl-fuzz-bitmap.c b/src/afl-fuzz-bitmap.c
index 1b9df624..a22223b9 100644
--- a/src/afl-fuzz-bitmap.c
+++ b/src/afl-fuzz-bitmap.c
@@ -555,19 +555,9 @@ save_if_interesting(afl_state_t *afl, void *mem, u32 len, u8 fault) {
 
     cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
 
-    struct queue_entry *q = afl->queue;
-    while (q) {
-
-      if (q->exec_cksum == cksum) {
-
-        ++q->n_fuzz;
-        break;
-
-      }
-
-      q = q->next;
-
-    }
+    /* Saturated increment */
+    if (afl->n_fuzz[cksum % N_FUZZ_SIZE] < 0xFFFFFFFF)
+      afl->n_fuzz[cksum % N_FUZZ_SIZE]++;
 
   }
 
@@ -607,9 +597,16 @@ save_if_interesting(afl_state_t *afl, void *mem, u32 len, u8 fault) {
     if (cksum)
       afl->queue_top->exec_cksum = cksum;
     else
-      afl->queue_top->exec_cksum =
+      cksum = afl->queue_top->exec_cksum =
           hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
 
+    if (afl->schedule >= FAST && afl->schedule <= RARE) {
+
+      afl->queue_top->n_fuzz_entry = cksum % N_FUZZ_SIZE;
+      afl->n_fuzz[afl->queue_top->n_fuzz_entry] = 1;
+
+    }
+
     /* Try to calibrate inline; this also calls update_bitmap_score() when
        successful. */
 
diff --git a/src/afl-fuzz-init.c b/src/afl-fuzz-init.c
index cbac3822..65478a78 100644
--- a/src/afl-fuzz-init.c
+++ b/src/afl-fuzz-init.c
@@ -729,6 +729,14 @@ void read_testcases(afl_state_t *afl, u8 *directory) {
     add_to_queue(afl, fn2, st.st_size >= MAX_FILE ? MAX_FILE : st.st_size,
                  passed_det);
 
+    if (unlikely(afl->schedule >= FAST && afl->schedule <= RARE)) {
+
+      u64 cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
+      afl->queue_top->n_fuzz_entry = cksum % N_FUZZ_SIZE;
+      afl->n_fuzz[afl->queue_top->n_fuzz_entry] = 1;
+
+    }
+
   }
 
   free(nl);                                                  /* not tracked */
diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c
index 5737c1f5..c04b492b 100644
--- a/src/afl-fuzz-one.c
+++ b/src/afl-fuzz-one.c
@@ -1696,50 +1696,47 @@ custom_mutator_stage:
 
           struct queue_entry *target;
           u32                 tid;
-          u8 *                new_buf;
+          u8 *                new_buf = NULL;
+          u32                 target_len = 0;
 
-        retry_external_pick:
-          /* Pick a random other queue entry for passing to external API */
+          /* check if splicing makes sense yet (enough entries) */
+          if (likely(afl->ready_for_splicing_count > 1)) {
 
-          do {
+            /* Pick a random other queue entry for passing to external API
+               that has the necessary length */
 
-            tid = rand_below(afl, afl->queued_paths);
+            do {
 
-          } while (tid == afl->current_entry && afl->queued_paths > 1);
-
-          afl->splicing_with = tid;
-          target = afl->queue_buf[tid];
+              tid = rand_below(afl, afl->queued_paths);
 
-          /* Make sure that the target has a reasonable length. */
+            } while (unlikely(tid == afl->current_entry &&
 
-          while (target && (target->len < 2 || target == afl->queue_cur) &&
-                 afl->queued_paths > 3) {
+                              afl->queue_buf[tid]->len >= 4));
 
-            target = target->next;
-            ++afl->splicing_with;
+            target = afl->queue_buf[tid];
+            afl->splicing_with = tid;
 
-          }
+            /* Read the additional testcase into a new buffer. */
+            fd = open(target->fname, O_RDONLY);
+            if (unlikely(fd < 0)) {
 
-          if (!target) { goto retry_external_pick; }
+              PFATAL("Unable to open '%s'", target->fname);
 
-          /* Read the additional testcase into a new buffer. */
-          fd = open(target->fname, O_RDONLY);
-          if (unlikely(fd < 0)) {
+            }
 
-            PFATAL("Unable to open '%s'", target->fname);
+            new_buf = afl_realloc(AFL_BUF_PARAM(out_scratch), target->len);
+            if (unlikely(!new_buf)) { PFATAL("alloc"); }
+            ck_read(fd, new_buf, target->len, target->fname);
+            close(fd);
+            target_len = target->len;
 
           }
 
-          new_buf = afl_realloc(AFL_BUF_PARAM(out_scratch), target->len);
-          if (unlikely(!new_buf)) { PFATAL("alloc"); }
-          ck_read(fd, new_buf, target->len, target->fname);
-          close(fd);
-
           u8 *mutated_buf = NULL;
 
           size_t mutated_size =
               el->afl_custom_fuzz(el->data, out_buf, len, &mutated_buf, new_buf,
-                                  target->len, max_seed_size);
+                                  target_len, max_seed_size);
 
           if (unlikely(!mutated_buf)) {
 
@@ -1887,7 +1884,7 @@ havoc_stage:
 
   for (afl->stage_cur = 0; afl->stage_cur < afl->stage_max; ++afl->stage_cur) {
 
-    u32 use_stacking = 1 << (1 + rand_below(afl, HAVOC_STACK_POW2));
+    u32 use_stacking = 1 << (1 + rand_below(afl, afl->havoc_stack_pow2));
 
     afl->stage_cur_val = use_stacking;
 
@@ -2738,6 +2735,8 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) {
 
   if (!afl->non_instrumented_mode && !afl->queue_cur->trim_done) {
 
+    u32 old_len = afl->queue_cur->len;
+
     u8 res = trim_case(afl, afl->queue_cur, in_buf);
 
     if (res == FSRV_RUN_ERROR) {
@@ -2759,6 +2758,9 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) {
 
     len = afl->queue_cur->len;
 
+    /* maybe current entry is not ready for splicing anymore */
+    if (unlikely(len <= 4 && old_len > 4)) afl->ready_for_splicing_count--;
+
   }
 
   memcpy(out_buf, in_buf, len);
@@ -3968,7 +3970,7 @@ pacemaker_fuzzing:
       for (afl->stage_cur = 0; afl->stage_cur < afl->stage_max;
            ++afl->stage_cur) {
 
-        u32 use_stacking = 1 << (1 + rand_below(afl, HAVOC_STACK_POW2));
+        u32 use_stacking = 1 << (1 + rand_below(afl, afl->havoc_stack_pow2));
 
         afl->stage_cur_val = use_stacking;
 
diff --git a/src/afl-fuzz-queue.c b/src/afl-fuzz-queue.c
index 336b7f4f..0d7d0314 100644
--- a/src/afl-fuzz-queue.c
+++ b/src/afl-fuzz-queue.c
@@ -25,6 +25,7 @@
 #include "afl-fuzz.h"
 #include <limits.h>
 #include <ctype.h>
+#include <math.h>
 
 /* Mark deterministic checks as done for a particular queue entry. We use the
    .state file to avoid repeating deterministic fuzzing when resuming aborted
@@ -218,7 +219,6 @@ void add_to_queue(afl_state_t *afl, u8 *fname, u32 len, u8 passed_det) {
   q->len = len;
   q->depth = afl->cur_depth + 1;
   q->passed_det = passed_det;
-  q->n_fuzz = 1;
   q->trace_mini = NULL;
 
   if (q->depth > afl->max_depth) { afl->max_depth = q->depth; }
@@ -234,6 +234,8 @@ void add_to_queue(afl_state_t *afl, u8 *fname, u32 len, u8 passed_det) {
 
   }
 
+  if (likely(q->len > 4)) afl->ready_for_splicing_count++;
+
   ++afl->queued_paths;
   ++afl->pending_not_fuzzed;
 
@@ -305,8 +307,10 @@ void update_bitmap_score(afl_state_t *afl, struct queue_entry *q) {
   u64 fav_factor;
   u64 fuzz_p2;
 
-  if (unlikely(afl->schedule >= FAST && afl->schedule <= RARE))
-    fuzz_p2 = next_pow2(q->n_fuzz);
+  if (unlikely(afl->schedule >= FAST && afl->schedule < RARE))
+    fuzz_p2 = 0;  // Skip the fuzz_p2 comparison
+  else if (unlikely(afl->schedule == RARE))
+    fuzz_p2 = next_pow2(afl->n_fuzz[q->n_fuzz_entry]);
   else
     fuzz_p2 = q->fuzz_level;
 
@@ -332,7 +336,8 @@ void update_bitmap_score(afl_state_t *afl, struct queue_entry *q) {
         u64 top_rated_fav_factor;
         u64 top_rated_fuzz_p2;
         if (unlikely(afl->schedule >= FAST && afl->schedule <= RARE))
-          top_rated_fuzz_p2 = next_pow2(afl->top_rated[i]->n_fuzz);
+          top_rated_fuzz_p2 =
+              next_pow2(afl->n_fuzz[afl->top_rated[i]->n_fuzz_entry]);
         else
           top_rated_fuzz_p2 = afl->top_rated[i]->fuzz_level;
 
@@ -603,11 +608,9 @@ u32 calculate_score(afl_state_t *afl, struct queue_entry *q) {
 
   }
 
-  u64 fuzz = q->n_fuzz;
-  u64 fuzz_total;
-
-  u32 n_paths, fuzz_mu;
-  u32 factor = 1;
+  u32         n_paths;
+  double      factor = 1.0;
+  long double fuzz_mu;
 
   switch (afl->schedule) {
 
@@ -622,60 +625,83 @@ u32 calculate_score(afl_state_t *afl, struct queue_entry *q) {
       break;
 
     case COE:
-      fuzz_total = 0;
+      fuzz_mu = 0.0;
       n_paths = 0;
 
+      // Don't modify perf_score for unfuzzed seeds
+      if (q->fuzz_level == 0) break;
+
       struct queue_entry *queue_it = afl->queue;
       while (queue_it) {
 
-        fuzz_total += queue_it->n_fuzz;
+        fuzz_mu += log2(afl->n_fuzz[q->n_fuzz_entry]);
         n_paths++;
+
         queue_it = queue_it->next;
 
       }
 
       if (unlikely(!n_paths)) { FATAL("Queue state corrupt"); }
 
-      fuzz_mu = fuzz_total / n_paths;
-      if (fuzz <= fuzz_mu) {
+      fuzz_mu = fuzz_mu / n_paths;
 
-        if (q->fuzz_level < 16) {
+      if (log2(afl->n_fuzz[q->n_fuzz_entry]) > fuzz_mu) {
 
-          factor = ((u32)(1 << q->fuzz_level));
+        /* Never skip favourites */
+        if (!q->favored) factor = 0;
 
-        } else {
+        break;
 
-          factor = MAX_FACTOR;
+      }
 
-        }
+    // Fall through
+    case FAST:
 
-      } else {
+      // Don't modify unfuzzed seeds
+      if (q->fuzz_level == 0) break;
 
-        factor = 0;
+      switch ((u32)log2(afl->n_fuzz[q->n_fuzz_entry])) {
 
-      }
+        case 0 ... 1:
+          factor = 4;
+          break;
 
-      break;
+        case 2 ... 3:
+          factor = 3;
+          break;
 
-    case FAST:
-      if (q->fuzz_level < 16) {
+        case 4:
+          factor = 2;
+          break;
+
+        case 5:
+          break;
 
-        factor = ((u32)(1 << q->fuzz_level)) / (fuzz == 0 ? 1 : fuzz);
+        case 6:
+          if (!q->favored) factor = 0.8;
+          break;
 
-      } else {
+        case 7:
+          if (!q->favored) factor = 0.6;
+          break;
 
-        factor = MAX_FACTOR / (fuzz == 0 ? 1 : next_pow2(fuzz));
+        default:
+          if (!q->favored) factor = 0.4;
+          break;
 
       }
 
+      if (q->favored) factor *= 1.15;
+
       break;
 
     case LIN:
-      factor = q->fuzz_level / (fuzz == 0 ? 1 : fuzz);
+      factor = q->fuzz_level / (afl->n_fuzz[q->n_fuzz_entry] + 1);
       break;
 
     case QUAD:
-      factor = q->fuzz_level * q->fuzz_level / (fuzz == 0 ? 1 : fuzz);
+      factor =
+          q->fuzz_level * q->fuzz_level / (afl->n_fuzz[q->n_fuzz_entry] + 1);
       break;
 
     case MMOPT:
@@ -700,8 +726,8 @@ u32 calculate_score(afl_state_t *afl, struct queue_entry *q) {
       perf_score += (q->tc_ref * 10);
       // the more often fuzz result paths are equal to this queue entry,
       // reduce its value
-      perf_score *=
-          (1 - (double)((double)q->n_fuzz / (double)afl->fsrv.total_execs));
+      perf_score *= (1 - (double)((double)afl->n_fuzz[q->n_fuzz_entry] /
+                                  (double)afl->fsrv.total_execs));
 
       break;
 
@@ -710,7 +736,7 @@ u32 calculate_score(afl_state_t *afl, struct queue_entry *q) {
 
   }
 
-  if (unlikely(afl->schedule >= FAST && afl->schedule <= RARE)) {
+  if (unlikely(afl->schedule >= EXPLOIT && afl->schedule <= QUAD)) {
 
     if (factor > MAX_FACTOR) { factor = MAX_FACTOR; }
     perf_score *= factor / POWER_BETA;
@@ -722,7 +748,7 @@ u32 calculate_score(afl_state_t *afl, struct queue_entry *q) {
 
     perf_score *= 2;
 
-  } else if (perf_score < 1) {
+  } else if (afl->schedule != COE && perf_score < 1) {
 
     // Add a lower bound to AFLFast's energy assignment strategies
     perf_score = 1;
diff --git a/src/afl-fuzz-state.c b/src/afl-fuzz-state.c
index a7c7aff9..b7d44dbf 100644
--- a/src/afl-fuzz-state.c
+++ b/src/afl-fuzz-state.c
@@ -30,9 +30,9 @@ s8  interesting_8[] = {INTERESTING_8};
 s16 interesting_16[] = {INTERESTING_8, INTERESTING_16};
 s32 interesting_32[] = {INTERESTING_8, INTERESTING_16, INTERESTING_32};
 
-char *power_names[POWER_SCHEDULES_NUM] = {"explore", "exploit", "fast",
-                                          "coe",     "lin",     "quad",
-                                          "rare",    "mmopt",   "seek"};
+char *power_names[POWER_SCHEDULES_NUM] = {"explore", "mmopt", "exploit",
+                                          "fast",    "coe",   "lin",
+                                          "quad",    "rare",  "seek"};
 
 /* Initialize MOpt "globals" for this afl state */
 
@@ -87,7 +87,7 @@ void afl_state_init(afl_state_t *afl, uint32_t map_size) {
   afl->w_end = 0.3;
   afl->g_max = 5000;
   afl->period_pilot_tmp = 5000.0;
-  afl->schedule = SEEK;                   /* Power schedule (default: SEEK) */
+  afl->schedule = EXPLORE;             /* Power schedule (default: EXPLORE) */
   afl->havoc_max_mult = HAVOC_MAX_MULT;
 
   afl->clear_screen = 1;                /* Window resized?                  */
@@ -95,6 +95,12 @@ void afl_state_init(afl_state_t *afl, uint32_t map_size) {
   afl->stage_name = "init";             /* Name of the current fuzz stage   */
   afl->splicing_with = -1;              /* Splicing with which test case?   */
   afl->cpu_to_bind = -1;
+  afl->havoc_stack_pow2 = HAVOC_STACK_POW2;
+  afl->cal_cycles = CAL_CYCLES;
+  afl->cal_cycles_long = CAL_CYCLES_LONG;
+  afl->hang_tmout = EXEC_TIMEOUT;
+  afl->stats_update_freq = 1;
+  afl->stats_avg_exec = -1;
 
 #ifdef HAVE_AFFINITY
   afl->cpu_aff = -1;                    /* Selected CPU core                */
@@ -115,46 +121,13 @@ void afl_state_init(afl_state_t *afl, uint32_t map_size) {
   // afl_state_t is not available in forkserver.c
   afl->fsrv.afl_ptr = (void *)afl;
   afl->fsrv.add_extra_func = (void (*)(void *, u8 *, u32)) & add_extra;
-
-  afl->cal_cycles = CAL_CYCLES;
-  afl->cal_cycles_long = CAL_CYCLES_LONG;
-
   afl->fsrv.exec_tmout = EXEC_TIMEOUT;
-  afl->hang_tmout = EXEC_TIMEOUT;
-
   afl->fsrv.mem_limit = MEM_LIMIT;
-
-  afl->stats_update_freq = 1;
-
   afl->fsrv.dev_urandom_fd = -1;
   afl->fsrv.dev_null_fd = -1;
-
   afl->fsrv.child_pid = -1;
   afl->fsrv.out_dir_fd = -1;
 
-  afl->cmplog_prev_timed_out = 0;
-
-  /* statis file */
-  afl->last_bitmap_cvg = 0;
-  afl->last_stability = 0;
-  afl->last_eps = 0;
-
-  /* plot file saves from last run */
-  afl->plot_prev_qp = 0;
-  afl->plot_prev_pf = 0;
-  afl->plot_prev_pnf = 0;
-  afl->plot_prev_ce = 0;
-  afl->plot_prev_md = 0;
-  afl->plot_prev_qc = 0;
-  afl->plot_prev_uc = 0;
-  afl->plot_prev_uh = 0;
-
-  afl->stats_last_stats_ms = 0;
-  afl->stats_last_plot_ms = 0;
-  afl->stats_last_ms = 0;
-  afl->stats_last_execs = 0;
-  afl->stats_avg_exec = -1;
-
   init_mopt_globals(afl);
 
   list_append(&afl_states, afl);
@@ -175,6 +148,14 @@ void read_afl_environment(afl_state_t *afl, char **envp) {
       WARNF("Potentially mistyped AFL environment variable: %s", env);
       issue_detected = 1;
 
+    } else if (strncmp(env, "USE_", 4) == 0) {
+
+      WARNF(
+          "Potentially mistyped AFL environment variable: %s, did you mean "
+          "AFL_%s?",
+          env, env);
+      issue_detected = 1;
+
     } else if (strncmp(env, "AFL_", 4) == 0) {
 
       int i = 0, match = 0;
diff --git a/src/afl-fuzz-stats.c b/src/afl-fuzz-stats.c
index 8a1c2cc6..0cd6f399 100644
--- a/src/afl-fuzz-stats.c
+++ b/src/afl-fuzz-stats.c
@@ -967,7 +967,7 @@ void show_stats(afl_state_t *afl) {
 #else
 
     SAYF("%s" cGRA "   [cpu:%s%3u%%" cGRA "]\r" cRST, spacing, cpu_color,
-         MIN(cur_utilization, 999));
+         MIN(cur_utilization, (u32)999));
 
 #endif                                                    /* ^HAVE_AFFINITY */
 
diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c
index aa36a6c6..dc0eb4a7 100644
--- a/src/afl-fuzz.c
+++ b/src/afl-fuzz.c
@@ -90,13 +90,13 @@ static void usage(u8 *argv0, int more_help) {
 
       "Execution control settings:\n"
       "  -p schedule   - power schedules compute a seed's performance score:\n"
-      "                  <seek (default), explore, rare, exploit, mmopt, coe, "
+      "                  <explore(default), rare, exploit, seek, mmopt, coe, "
       "fast,\n"
       "                  lin, quad> -- see docs/power_schedules.md\n"
       "  -f file       - location read by the fuzzed program (default: stdin "
       "or @@)\n"
       "  -t msec       - timeout for each run (auto-scaled, 50-%d ms)\n"
-      "  -m megs       - memory limit for child process (%d MB)\n"
+      "  -m megs       - memory limit for child process (%d MB, 0 = no limit)\n"
       "  -Q            - use binary-only instrumentation (QEMU mode)\n"
       "  -U            - use unicorn-based instrumentation (Unicorn mode)\n"
       "  -W            - use qemu-based instrumentation with Wine (Wine "
@@ -251,7 +251,8 @@ int main(int argc, char **argv_orig, char **envp) {
   u64 prev_queued = 0;
   u32 sync_interval_cnt = 0, seek_to, show_help = 0, map_size = MAP_SIZE;
   u8 *extras_dir[4];
-  u8 mem_limit_given = 0, exit_1 = 0, debug = 0, extras_dir_cnt = 0, have_p = 0;
+  u8  mem_limit_given = 0, exit_1 = 0, debug = 0,
+     extras_dir_cnt = 0 /*, have_p = 0*/;
   char **use_argv;
 
   struct timeval  tv;
@@ -369,7 +370,7 @@ int main(int argc, char **argv_orig, char **envp) {
 
         }
 
-        have_p = 1;
+        // have_p = 1;
 
         break;
 
@@ -934,7 +935,7 @@ int main(int argc, char **argv_orig, char **envp) {
       OKF("Using seek power schedule (SEEK)");
       break;
     case EXPLORE:
-      OKF("Using exploration-based constant power schedule (EXPLORE, default)");
+      OKF("Using exploration-based constant power schedule (EXPLORE)");
       break;
     default:
       FATAL("Unknown power schedule");
@@ -942,6 +943,13 @@ int main(int argc, char **argv_orig, char **envp) {
 
   }
 
+  /* Dynamically allocate memory for AFLFast schedules */
+  if (afl->schedule >= FAST && afl->schedule <= RARE) {
+
+    afl->n_fuzz = ck_alloc(N_FUZZ_SIZE * sizeof(u32));
+
+  }
+
   if (get_afl_env("AFL_NO_FORKSRV")) { afl->no_forkserver = 1; }
   if (get_afl_env("AFL_NO_CPU_RED")) { afl->no_cpu_meter_red = 1; }
   if (get_afl_env("AFL_NO_ARITH")) { afl->no_arith = 1; }
@@ -1330,11 +1338,11 @@ int main(int argc, char **argv_orig, char **envp) {
       afl->cur_skipped_paths = 0;
       afl->queue_cur = afl->queue;
 
-      while (seek_to) {
+      if (seek_to) {
 
-        ++afl->current_entry;
-        --seek_to;
-        afl->queue_cur = afl->queue_cur->next;
+        afl->current_entry = seek_to;
+        afl->queue_cur = afl->queue_buf[seek_to];
+        seek_to = 0;
 
       }
 
@@ -1373,10 +1381,15 @@ int main(int argc, char **argv_orig, char **envp) {
               afl->expand_havoc = 2;
               break;
             case 2:
-              if (!have_p) afl->schedule = EXPLOIT;
+              // if (!have_p) afl->schedule = EXPLOIT;
+              afl->havoc_stack_pow2++;
               afl->expand_havoc = 3;
               break;
             case 3:
+              afl->havoc_stack_pow2++;
+              afl->expand_havoc = 4;
+              break;
+            case 4:
               // nothing else currently
               break;