about summary refs log tree commit diff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/afl-forkserver.c3
-rw-r--r--src/afl-fuzz-queue.c284
-rw-r--r--src/afl-fuzz-redqueen.c53
-rw-r--r--src/afl-fuzz-run.c2
-rw-r--r--src/afl-fuzz-skipdet.c9
-rw-r--r--src/afl-fuzz-state.c10
-rw-r--r--src/afl-fuzz-stats.c31
-rw-r--r--src/afl-fuzz.c69
-rw-r--r--src/afl-performance.c18
-rw-r--r--src/afl-sharedmem.c6
10 files changed, 290 insertions, 195 deletions
diff --git a/src/afl-forkserver.c b/src/afl-forkserver.c
index a082982c..71d8570d 100644
--- a/src/afl-forkserver.c
+++ b/src/afl-forkserver.c
@@ -1655,7 +1655,8 @@ void afl_fsrv_kill(afl_forkserver_t *fsrv) {
   if (fsrv->fsrv_pid > 0) {
 
     kill(fsrv->fsrv_pid, fsrv->fsrv_kill_signal);
-    waitpid(fsrv->fsrv_pid, NULL, 0);
+    usleep(25);
+    waitpid(fsrv->fsrv_pid, NULL, WNOHANG);
 
   }
 
diff --git a/src/afl-fuzz-queue.c b/src/afl-fuzz-queue.c
index 784b377a..f4cb930d 100644
--- a/src/afl-fuzz-queue.c
+++ b/src/afl-fuzz-queue.c
@@ -60,32 +60,6 @@ inline u32 select_next_queue_entry(afl_state_t *afl) {
 
 }
 
-double compute_weight(afl_state_t *afl, struct queue_entry *q,
-                      double avg_exec_us, double avg_bitmap_size,
-                      double avg_top_size) {
-
-  double weight = 1.0;
-
-  if (likely(afl->schedule >= FAST && afl->schedule <= RARE)) {
-
-    u32 hits = afl->n_fuzz[q->n_fuzz_entry];
-    if (likely(hits)) { weight /= (log10(hits) + 1); }
-
-  }
-
-  if (likely(afl->schedule < RARE)) { weight *= (avg_exec_us / q->exec_us); }
-  weight *= (log(q->bitmap_size) / avg_bitmap_size);
-  weight *= (1 + (q->tc_ref / avg_top_size));
-
-  if (unlikely(weight < 0.1)) { weight = 0.1; }
-  if (unlikely(q->favored)) { weight *= 5; }
-  if (unlikely(!q->was_fuzzed)) { weight *= 2; }
-  if (unlikely(q->fs_redundant)) { weight *= 0.8; }
-
-  return weight;
-
-}
-
 /* create the alias table that allows weighted random selection - expensive */
 
 void create_alias_table(afl_state_t *afl) {
@@ -117,7 +91,7 @@ void create_alias_table(afl_state_t *afl) {
 
     double avg_exec_us = 0.0;
     double avg_bitmap_size = 0.0;
-    double avg_top_size = 0.0;
+    double avg_len = 0.0;
     u32    active = 0;
 
     for (i = 0; i < n; i++) {
@@ -129,7 +103,7 @@ void create_alias_table(afl_state_t *afl) {
 
         avg_exec_us += q->exec_us;
         avg_bitmap_size += log(q->bitmap_size);
-        avg_top_size += q->tc_ref;
+        avg_len += q->len;
         ++active;
 
       }
@@ -138,7 +112,7 @@ void create_alias_table(afl_state_t *afl) {
 
     avg_exec_us /= active;
     avg_bitmap_size /= active;
-    avg_top_size /= active;
+    avg_len /= active;
 
     for (i = 0; i < n; i++) {
 
@@ -146,8 +120,59 @@ void create_alias_table(afl_state_t *afl) {
 
       if (likely(!q->disabled)) {
 
-        q->weight =
-            compute_weight(afl, q, avg_exec_us, avg_bitmap_size, avg_top_size);
+        double weight = 1.0;
+        {  // inline does result in a compile error with LTO, weird
+
+          if (likely(afl->schedule >= FAST && afl->schedule <= RARE)) {
+
+            u32 hits = afl->n_fuzz[q->n_fuzz_entry];
+            if (likely(hits)) { weight /= (log10(hits) + 1); }
+
+          }
+
+          if (likely(afl->schedule < RARE)) {
+
+            double t = q->exec_us / avg_exec_us;
+            if (likely(t < 0.1)) {
+
+              // nothing
+
+            } else if (likely(t <= 0.25))
+
+              weight *= 0.9;
+            else if (likely(t <= 0.5)) {
+
+              // nothing
+
+            } else if (likely(t < 1.0))
+
+              weight *= 1.15;
+            else if (unlikely(t > 2.5 && t < 5.0))
+              weight *= 1.1;
+            // else nothing
+
+          }
+
+          double l = q->len / avg_len;
+          if (likely(l < 0.1))
+            weight *= 0.75;
+          else if (likely(l < 0.25))
+            weight *= 1.1;
+          else if (unlikely(l >= 10))
+            weight *= 1.1;
+
+          double bms = q->bitmap_size / avg_bitmap_size;
+          if (likely(bms < 0.5))
+            weight *= (1.0 + ((bms - 0.5) / 2));
+          else if (unlikely(bms > 1.33))
+            weight *= 1.1;
+
+          if (unlikely(!q->was_fuzzed)) { weight *= 2.5; }
+          if (unlikely(q->fs_redundant)) { weight *= 0.75; }
+
+        }
+
+        q->weight = weight;
         q->perf_score = calculate_score(afl, q);
         sum += q->weight;
 
@@ -596,6 +621,8 @@ void add_to_queue(afl_state_t *afl, u8 *fname, u32 len, u8 passed_det) {
   q->trace_mini = NULL;
   q->testcase_buf = NULL;
   q->mother = afl->queue_cur;
+  q->weight = 1.0;
+  q->perf_score = 100;
 
 #ifdef INTROSPECTION
   q->bitsmap_size = afl->bitsmap_size;
@@ -1201,9 +1228,11 @@ inline void queue_testcase_retake(afl_state_t *afl, struct queue_entry *q,
 
     u32 len = q->len;
 
-    if (len != old_len) {
+    // only realloc if necessary or useful
+    // (a custom trim can make the testcase larger)
+    if (unlikely(len > old_len || len < old_len + 1024)) {
 
-      afl->q_testcase_cache_size = afl->q_testcase_cache_size + len - old_len;
+      afl->q_testcase_cache_size += len - old_len;
       q->testcase_buf = (u8 *)realloc(q->testcase_buf, len);
 
       if (unlikely(!q->testcase_buf)) {
@@ -1232,41 +1261,48 @@ inline void queue_testcase_retake_mem(afl_state_t *afl, struct queue_entry *q,
 
   if (likely(q->testcase_buf)) {
 
-    u32 is_same = in == q->testcase_buf;
+    if (likely(in != q->testcase_buf)) {
 
-    if (likely(len != old_len)) {
+      // only realloc if we save memory
+      if (unlikely(len < old_len + 1024)) {
 
-      u8 *ptr = (u8 *)realloc(q->testcase_buf, len);
+        u8 *ptr = (u8 *)realloc(q->testcase_buf, len);
 
-      if (likely(ptr)) {
+        if (likely(ptr)) {
 
-        q->testcase_buf = ptr;
-        afl->q_testcase_cache_size = afl->q_testcase_cache_size + len - old_len;
+          q->testcase_buf = ptr;
+          afl->q_testcase_cache_size += len - old_len;
+
+        }
 
       }
 
-    }
+      memcpy(q->testcase_buf, in, len);
 
-    if (unlikely(!is_same)) { memcpy(q->testcase_buf, in, len); }
+    }
 
   }
 
 }
 
 /* Returns the testcase buf from the file behind this queue entry.
-  Increases the refcount. */
+   Increases the refcount. */
 
 inline u8 *queue_testcase_get(afl_state_t *afl, struct queue_entry *q) {
 
-  u32 len = q->len;
+  if (likely(q->testcase_buf)) { return q->testcase_buf; }
 
-  /* first handle if no testcase cache is configured */
+  u32    len = q->len;
+  double weight = q->weight;
 
-  if (unlikely(!afl->q_testcase_max_cache_size)) {
+  // first handle if no testcase cache is configured, or if the
+  // weighting of the testcase is below average.
+
+  if (unlikely(weight < 1.0 || !afl->q_testcase_max_cache_size)) {
 
     u8 *buf;
 
-    if (unlikely(q == afl->queue_cur)) {
+    if (likely(q == afl->queue_cur)) {
 
       buf = (u8 *)afl_realloc((void **)&afl->testcase_buf, len);
 
@@ -1292,118 +1328,113 @@ inline u8 *queue_testcase_get(afl_state_t *afl, struct queue_entry *q) {
 
   }
 
-  /* now handle the testcase cache */
+  /* now handle the testcase cache and we know it is an interesting one */
 
-  if (unlikely(!q->testcase_buf)) {
+  /* Buf not cached, let's load it */
+  u32        tid = afl->q_testcase_max_cache_count;
+  static u32 do_once = 0;  // because even threaded we would want this. WIP
 
-    /* Buf not cached, let's load it */
-    u32        tid = afl->q_testcase_max_cache_count;
-    static u32 do_once = 0;  // because even threaded we would want this. WIP
+  while (unlikely(
+      (afl->q_testcase_cache_size + len >= afl->q_testcase_max_cache_size &&
+       afl->q_testcase_cache_count > 1) ||
+      afl->q_testcase_cache_count >= afl->q_testcase_max_cache_entries - 1)) {
 
-    while (unlikely(
-        (afl->q_testcase_cache_size + len >= afl->q_testcase_max_cache_size &&
-         afl->q_testcase_cache_count > 1) ||
-        afl->q_testcase_cache_count >= afl->q_testcase_max_cache_entries - 1)) {
+    /* We want a max number of entries to the cache that we learn.
+       Very simple: once the cache is filled by size - that is the max. */
 
-      /* We want a max number of entries to the cache that we learn.
-         Very simple: once the cache is filled by size - that is the max. */
+    if (unlikely(
+            afl->q_testcase_cache_size + len >=
+                afl->q_testcase_max_cache_size &&
+            (afl->q_testcase_cache_count < afl->q_testcase_max_cache_entries &&
+             afl->q_testcase_max_cache_count <
+                 afl->q_testcase_max_cache_entries) &&
+            !do_once)) {
 
-      if (unlikely(afl->q_testcase_cache_size + len >=
-                       afl->q_testcase_max_cache_size &&
-                   (afl->q_testcase_cache_count <
-                        afl->q_testcase_max_cache_entries &&
-                    afl->q_testcase_max_cache_count <
-                        afl->q_testcase_max_cache_entries) &&
-                   !do_once)) {
+      if (afl->q_testcase_max_cache_count > afl->q_testcase_cache_count) {
 
-        if (afl->q_testcase_max_cache_count > afl->q_testcase_cache_count) {
+        afl->q_testcase_max_cache_entries = afl->q_testcase_max_cache_count + 1;
 
-          afl->q_testcase_max_cache_entries =
-              afl->q_testcase_max_cache_count + 1;
-
-        } else {
-
-          afl->q_testcase_max_cache_entries = afl->q_testcase_cache_count + 1;
-
-        }
+      } else {
 
-        do_once = 1;
-        // release unneeded memory
-        afl->q_testcase_cache = (struct queue_entry **)ck_realloc(
-            afl->q_testcase_cache,
-            (afl->q_testcase_max_cache_entries + 1) * sizeof(size_t));
+        afl->q_testcase_max_cache_entries = afl->q_testcase_cache_count + 1;
 
       }
 
-      /* Cache full. We neet to evict one or more to map one.
-         Get a random one which is not in use */
+      do_once = 1;
+      // release unneeded memory
+      afl->q_testcase_cache = (struct queue_entry **)ck_realloc(
+          afl->q_testcase_cache,
+          (afl->q_testcase_max_cache_entries + 1) * sizeof(size_t));
 
-      do {
+    }
 
-        // if the cache (MB) is not enough for the queue then this gets
-        // undesirable because q_testcase_max_cache_count grows sometimes
-        // although the number of items in the cache will not change hence
-        // more and more loops
-        tid = rand_below(afl, afl->q_testcase_max_cache_count);
+    /* Cache full. We neet to evict one or more to map one.
+       Get a random one which is not in use */
 
-      } while (afl->q_testcase_cache[tid] == NULL ||
+    do {
 
-               afl->q_testcase_cache[tid] == afl->queue_cur);
+      // if the cache (MB) is not enough for the queue then this gets
+      // undesirable because q_testcase_max_cache_count grows sometimes
+      // although the number of items in the cache will not change hence
+      // more and more loops
+      tid = rand_below(afl, afl->q_testcase_max_cache_count);
 
-      struct queue_entry *old_cached = afl->q_testcase_cache[tid];
-      free(old_cached->testcase_buf);
-      old_cached->testcase_buf = NULL;
-      afl->q_testcase_cache_size -= old_cached->len;
-      afl->q_testcase_cache[tid] = NULL;
-      --afl->q_testcase_cache_count;
-      ++afl->q_testcase_evictions;
-      if (tid < afl->q_testcase_smallest_free)
-        afl->q_testcase_smallest_free = tid;
+    } while (afl->q_testcase_cache[tid] == NULL ||
 
-    }
+             afl->q_testcase_cache[tid] == afl->queue_cur);
 
-    if (unlikely(tid >= afl->q_testcase_max_cache_entries)) {
+    struct queue_entry *old_cached = afl->q_testcase_cache[tid];
+    free(old_cached->testcase_buf);
+    old_cached->testcase_buf = NULL;
+    afl->q_testcase_cache_size -= old_cached->len;
+    afl->q_testcase_cache[tid] = NULL;
+    --afl->q_testcase_cache_count;
+    ++afl->q_testcase_evictions;
+    if (tid < afl->q_testcase_smallest_free)
+      afl->q_testcase_smallest_free = tid;
 
-      // uh we were full, so now we have to search from start
-      tid = afl->q_testcase_smallest_free;
+  }
 
-    }
+  if (unlikely(tid >= afl->q_testcase_max_cache_entries)) {
 
-    // we need this while loop in case there were ever previous evictions but
-    // not in this call.
-    while (unlikely(afl->q_testcase_cache[tid] != NULL))
-      ++tid;
+    // uh we were full, so now we have to search from start
+    tid = afl->q_testcase_smallest_free;
 
-    /* Map the test case into memory. */
+  }
 
-    int fd = open((char *)q->fname, O_RDONLY);
+  // we need this while loop in case there were ever previous evictions but
+  // not in this call.
+  while (unlikely(afl->q_testcase_cache[tid] != NULL))
+    ++tid;
 
-    if (unlikely(fd < 0)) { PFATAL("Unable to open '%s'", (char *)q->fname); }
+  /* Map the test case into memory. */
 
-    q->testcase_buf = (u8 *)malloc(len);
+  int fd = open((char *)q->fname, O_RDONLY);
 
-    if (unlikely(!q->testcase_buf)) {
+  if (unlikely(fd < 0)) { PFATAL("Unable to open '%s'", (char *)q->fname); }
 
-      PFATAL("Unable to malloc '%s' with len %u", (char *)q->fname, len);
+  q->testcase_buf = (u8 *)malloc(len);
 
-    }
+  if (unlikely(!q->testcase_buf)) {
 
-    ck_read(fd, q->testcase_buf, len, q->fname);
-    close(fd);
+    PFATAL("Unable to malloc '%s' with len %u", (char *)q->fname, len);
+
+  }
 
-    /* Register testcase as cached */
-    afl->q_testcase_cache[tid] = q;
-    afl->q_testcase_cache_size += len;
-    ++afl->q_testcase_cache_count;
-    if (likely(tid >= afl->q_testcase_max_cache_count)) {
+  ck_read(fd, q->testcase_buf, len, q->fname);
+  close(fd);
 
-      afl->q_testcase_max_cache_count = tid + 1;
+  /* Register testcase as cached */
+  afl->q_testcase_cache[tid] = q;
+  afl->q_testcase_cache_size += len;
+  ++afl->q_testcase_cache_count;
+  if (likely(tid >= afl->q_testcase_max_cache_count)) {
 
-    } else if (unlikely(tid == afl->q_testcase_smallest_free)) {
+    afl->q_testcase_max_cache_count = tid + 1;
 
-      afl->q_testcase_smallest_free = tid + 1;
+  } else if (unlikely(tid == afl->q_testcase_smallest_free)) {
 
-    }
+    afl->q_testcase_smallest_free = tid + 1;
 
   }
 
@@ -1418,12 +1449,13 @@ inline void queue_testcase_store_mem(afl_state_t *afl, struct queue_entry *q,
 
   u32 len = q->len;
 
-  if (unlikely(afl->q_testcase_cache_size + len >=
+  if (unlikely(q->weight < 1.0 ||
+               afl->q_testcase_cache_size + len >=
                    afl->q_testcase_max_cache_size ||
                afl->q_testcase_cache_count >=
                    afl->q_testcase_max_cache_entries - 1)) {
 
-    // no space? will be loaded regularly later.
+    // no space or uninteresting? will be loaded regularly later.
     return;
 
   }
diff --git a/src/afl-fuzz-redqueen.c b/src/afl-fuzz-redqueen.c
index 9316da71..954e5671 100644
--- a/src/afl-fuzz-redqueen.c
+++ b/src/afl-fuzz-redqueen.c
@@ -322,7 +322,7 @@ static u8 colorization(afl_state_t *afl, u8 *buf, u32 len,
 
   memcpy(backup, buf, len);
   memcpy(changed, buf, len);
-  if (afl->cmplog_random_colorization) {
+  if (likely(afl->cmplog_random_colorization)) {
 
     random_replace(afl, changed, len);
 
@@ -402,6 +402,7 @@ static u8 colorization(afl_state_t *afl, u8 *buf, u32 len,
 
   u32 i = 1;
   u32 positions = 0;
+
   while (i) {
 
   restart:
@@ -2937,7 +2938,8 @@ static u8 rtn_fuzz(afl_state_t *afl, u32 key, u8 *orig_buf, u8 *buf, u8 *cbuf,
 // afl->queue_cur->exec_cksum
 u8 input_to_state_stage(afl_state_t *afl, u8 *orig_buf, u8 *buf, u32 len) {
 
-  u8 r = 1;
+  u64 cmplog_start_us = get_cur_time_us();
+  u8  r = 1;
   if (unlikely(!afl->pass_stats)) {
 
     afl->pass_stats = ck_alloc(sizeof(struct afl_pass_stat) * CMP_MAP_W);
@@ -2965,7 +2967,12 @@ u8 input_to_state_stage(afl_state_t *afl, u8 *orig_buf, u8 *buf, u32 len) {
 
   if (!afl->queue_cur->taint || !afl->queue_cur->cmplog_colorinput) {
 
-    if (unlikely(colorization(afl, buf, len, &taint))) { return 1; }
+    if (unlikely(colorization(afl, buf, len, &taint))) {
+
+      update_cmplog_time(afl, &cmplog_start_us);
+      return 1;
+
+    }
 
     // no taint? still try, create a dummy to prevent again colorization
     if (!taint) {
@@ -2974,6 +2981,7 @@ u8 input_to_state_stage(afl_state_t *afl, u8 *orig_buf, u8 *buf, u32 len) {
       fprintf(stderr, "TAINT FAILED\n");
 #endif
       afl->queue_cur->colorized = CMPLOG_LVL_MAX;
+      update_cmplog_time(afl, &cmplog_start_us);
       return 0;
 
     }
@@ -2994,17 +3002,20 @@ u8 input_to_state_stage(afl_state_t *afl, u8 *orig_buf, u8 *buf, u32 len) {
 
   }
 
+  update_cmplog_time(afl, &cmplog_start_us);
+
   struct tainted *t = taint;
 
+#ifdef _DEBUG
   while (t) {
 
-#ifdef _DEBUG
     fprintf(stderr, "T: idx=%u len=%u\n", t->pos, t->len);
-#endif
     t = t->next;
 
   }
 
+#endif
+
 #if defined(_DEBUG) || defined(CMPLOG_INTROSPECTION)
   u64 start_time = get_cur_time();
   u32 cmp_locations = 0;
@@ -3025,6 +3036,7 @@ u8 input_to_state_stage(afl_state_t *afl, u8 *orig_buf, u8 *buf, u32 len) {
 
     }
 
+    update_cmplog_time(afl, &cmplog_start_us);
     return 1;
 
   }
@@ -3048,6 +3060,7 @@ u8 input_to_state_stage(afl_state_t *afl, u8 *orig_buf, u8 *buf, u32 len) {
 
     }
 
+    update_cmplog_time(afl, &cmplog_start_us);
     return 1;
 
   }
@@ -3066,6 +3079,7 @@ u8 input_to_state_stage(afl_state_t *afl, u8 *orig_buf, u8 *buf, u32 len) {
   u64 orig_hit_cnt, new_hit_cnt;
   u64 orig_execs = afl->fsrv.total_execs;
   orig_hit_cnt = afl->queued_items + afl->saved_crashes;
+  update_cmplog_time(afl, &cmplog_start_us);
 
   afl->stage_name = "input-to-state";
   afl->stage_short = "its";
@@ -3142,33 +3156,35 @@ u8 input_to_state_stage(afl_state_t *afl, u8 *orig_buf, u8 *buf, u32 len) {
 
     }
 
+    update_cmplog_time(afl, &cmplog_start_us);
+
   }
 
   r = 0;
 
 exit_its:
 
-  if (afl->cmplog_lvl == CMPLOG_LVL_MAX) {
+  // if (afl->cmplog_lvl == CMPLOG_LVL_MAX) {
 
-    afl->queue_cur->colorized = CMPLOG_LVL_MAX;
+  afl->queue_cur->colorized = CMPLOG_LVL_MAX;
 
-    if (afl->queue_cur->cmplog_colorinput) {
+  if (afl->queue_cur->cmplog_colorinput) {
 
-      ck_free(afl->queue_cur->cmplog_colorinput);
+    ck_free(afl->queue_cur->cmplog_colorinput);
 
-    }
+  }
 
-    while (taint) {
+  while (taint) {
 
-      t = taint->next;
-      ck_free(taint);
-      taint = t;
+    t = taint->next;
+    ck_free(taint);
+    taint = t;
 
-    }
+  }
 
-    afl->queue_cur->taint = NULL;
+  afl->queue_cur->taint = NULL;
 
-  } else {
+  /*} else {
 
     afl->queue_cur->colorized = LVL2;
 
@@ -3182,7 +3198,7 @@ exit_its:
 
     }
 
-  }
+  }*/
 
 #ifdef CMPLOG_COMBINE
   if (afl->queued_items + afl->saved_crashes > orig_hit_cnt + 1) {
@@ -3270,6 +3286,7 @@ exit_its:
 
 #endif
 
+  update_cmplog_time(afl, &cmplog_start_us);
   return r;
 
 }
diff --git a/src/afl-fuzz-run.c b/src/afl-fuzz-run.c
index 4e2cceff..6a0da6ab 100644
--- a/src/afl-fuzz-run.c
+++ b/src/afl-fuzz-run.c
@@ -666,6 +666,8 @@ abort_calibration:
 
 void sync_fuzzers(afl_state_t *afl) {
 
+  if (unlikely(afl->afl_env.afl_no_sync)) { return; }
+
   DIR           *sd;
   struct dirent *sd_ent;
   u32            sync_cnt = 0, synced = 0, entries = 0;
diff --git a/src/afl-fuzz-skipdet.c b/src/afl-fuzz-skipdet.c
index e52d59a3..8a927292 100644
--- a/src/afl-fuzz-skipdet.c
+++ b/src/afl-fuzz-skipdet.c
@@ -33,15 +33,15 @@ u8 is_det_timeout(u64 cur_ms, u8 is_flip) {
 
 u8 should_det_fuzz(afl_state_t *afl, struct queue_entry *q) {
 
-  if (!afl->skipdet_g->virgin_det_bits) {
+  if (unlikely(!afl->skipdet_g->virgin_det_bits)) {
 
     afl->skipdet_g->virgin_det_bits =
         (u8 *)ck_alloc(sizeof(u8) * afl->fsrv.map_size);
 
   }
 
-  if (!q->favored || q->passed_det) return 0;
-  if (!q->trace_mini) return 0;
+  if (likely(!q->favored || q->passed_det)) return 0;
+  if (unlikely(!q->trace_mini)) return 0;
 
   if (!afl->skipdet_g->last_cov_undet)
     afl->skipdet_g->last_cov_undet = get_cur_time();
@@ -122,7 +122,8 @@ u8 skip_deterministic_stage(afl_state_t *afl, u8 *orig_buf, u8 *out_buf,
   afl->stage_cur = 0;
   orig_hit_cnt = afl->queued_items + afl->saved_crashes;
 
-  u8 *inf_eff_map = (u8 *)ck_alloc(sizeof(u8) * len);
+  static u8 *inf_eff_map;
+  inf_eff_map = (u8 *)ck_realloc(inf_eff_map, sizeof(u8) * len);
   memset(inf_eff_map, 1, sizeof(u8) * len);
 
   if (common_fuzz_stuff(afl, orig_buf, len)) { return 0; }
diff --git a/src/afl-fuzz-state.c b/src/afl-fuzz-state.c
index 333d57b2..fbe6d32a 100644
--- a/src/afl-fuzz-state.c
+++ b/src/afl-fuzz-state.c
@@ -279,6 +279,13 @@ void read_afl_environment(afl_state_t *afl, char **envp) {
             afl->afl_env.afl_final_sync =
                 get_afl_env(afl_environment_variables[i]) ? 1 : 0;
 
+          } else if (!strncmp(env, "AFL_NO_SYNC",
+
+                              afl_environment_variable_len)) {
+
+            afl->afl_env.afl_no_sync =
+                get_afl_env(afl_environment_variables[i]) ? 1 : 0;
+
           } else if (!strncmp(env, "AFL_CUSTOM_MUTATOR_ONLY",
 
                               afl_environment_variable_len)) {
@@ -762,8 +769,9 @@ void afl_states_stop(void) {
     if (el->fsrv.fsrv_pid > 0) {
 
       kill(el->fsrv.fsrv_pid, el->fsrv.fsrv_kill_signal);
+      usleep(100);
       /* Make sure the forkserver does not end up as zombie. */
-      waitpid(el->fsrv.fsrv_pid, NULL, 0);
+      waitpid(el->fsrv.fsrv_pid, NULL, WNOHANG);
 
     }
 
diff --git a/src/afl-fuzz-stats.c b/src/afl-fuzz-stats.c
index eafeebba..3a71e158 100644
--- a/src/afl-fuzz-stats.c
+++ b/src/afl-fuzz-stats.c
@@ -207,6 +207,12 @@ void load_stats_file(afl_state_t *afl) {
 
       }
 
+      if (starts_with("cmplog_time", keystring)) {
+
+        afl->cmplog_time_us = strtoull(lptr, &nptr, 10) * 1000000;
+
+      }
+
       if (starts_with("trim_time", keystring)) {
 
         afl->trim_time_us = strtoull(lptr, &nptr, 10) * 1000000;
@@ -322,8 +328,9 @@ void write_stats_file(afl_state_t *afl, u32 t_bytes, double bitmap_cvg,
   if (getrusage(RUSAGE_CHILDREN, &rus)) { rus.ru_maxrss = 0; }
 #endif
   u64 runtime_ms = afl->prev_run_time + cur_time - afl->start_time;
-  u64 overhead_ms =
-      (afl->calibration_time_us + afl->sync_time_us + afl->trim_time_us) / 1000;
+  u64 overhead_ms = (afl->calibration_time_us + afl->sync_time_us +
+                     afl->trim_time_us + afl->cmplog_time_us) /
+                    1000;
   if (!runtime_ms) { runtime_ms = 1; }
 
   fprintf(
@@ -337,6 +344,7 @@ void write_stats_file(afl_state_t *afl, u32 t_bytes, double bitmap_cvg,
       "time_wo_finds     : %llu\n"
       "fuzz_time         : %llu\n"
       "calibration_time  : %llu\n"
+      "cmplog_time       : %llu\n"
       "sync_time         : %llu\n"
       "trim_time         : %llu\n"
       "execs_done        : %llu\n"
@@ -385,8 +393,9 @@ void write_stats_file(afl_state_t *afl, u32 t_bytes, double bitmap_cvg,
                  ? 0
                  : (cur_time - afl->last_find_time) / 1000),
       (runtime_ms - MIN(runtime_ms, overhead_ms)) / 1000,
-      afl->calibration_time_us / 1000000, afl->sync_time_us / 1000000,
-      afl->trim_time_us / 1000000, afl->fsrv.total_execs,
+      afl->calibration_time_us / 1000000, afl->cmplog_time_us / 1000000,
+      afl->sync_time_us / 1000000, afl->trim_time_us / 1000000,
+      afl->fsrv.total_execs,
       afl->fsrv.total_execs / ((double)(runtime_ms) / 1000),
       afl->last_avg_execs_saved, afl->queued_items, afl->queued_favored,
       afl->queued_discovered, afl->queued_imported, afl->queued_variable,
@@ -2487,7 +2496,7 @@ void show_init_stats(afl_state_t *afl) {
 
 }
 
-void update_calibration_time(afl_state_t *afl, u64 *time) {
+inline void update_calibration_time(afl_state_t *afl, u64 *time) {
 
   u64 cur = get_cur_time_us();
   afl->calibration_time_us += cur - *time;
@@ -2495,7 +2504,7 @@ void update_calibration_time(afl_state_t *afl, u64 *time) {
 
 }
 
-void update_trim_time(afl_state_t *afl, u64 *time) {
+inline void update_trim_time(afl_state_t *afl, u64 *time) {
 
   u64 cur = get_cur_time_us();
   afl->trim_time_us += cur - *time;
@@ -2503,7 +2512,7 @@ void update_trim_time(afl_state_t *afl, u64 *time) {
 
 }
 
-void update_sync_time(afl_state_t *afl, u64 *time) {
+inline void update_sync_time(afl_state_t *afl, u64 *time) {
 
   u64 cur = get_cur_time_us();
   afl->sync_time_us += cur - *time;
@@ -2511,3 +2520,11 @@ void update_sync_time(afl_state_t *afl, u64 *time) {
 
 }
 
+inline void update_cmplog_time(afl_state_t *afl, u64 *time) {
+
+  u64 cur = get_cur_time_us();
+  afl->cmplog_time_us += cur - *time;
+  *time = cur;
+
+}
+
diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c
index 70ab983c..a7ddef6e 100644
--- a/src/afl-fuzz.c
+++ b/src/afl-fuzz.c
@@ -335,6 +335,7 @@ static void usage(u8 *argv0, int more_help) {
       "AFL_STATSD_PORT: change default statsd port (default: 8125)\n"
       "AFL_STATSD_TAGS_FLAVOR: set statsd tags format (default: disable tags)\n"
       "                        suported formats: dogstatsd, librato, signalfx, influxdb\n"
+      "AFL_NO_SYNC: disables all syncing\n"
       "AFL_SYNC_TIME: sync time between fuzzing instances (in minutes)\n"
       "AFL_FINAL_SYNC: sync a final time when exiting (will delay the exit!)\n"
       "AFL_NO_CRASH_README: do not create a README in the crashes directory\n"
@@ -914,8 +915,15 @@ int main(int argc, char **argv_orig, char **envp) {
 
         u8 suffix = 'M';
 
-        if (mem_limit_given) { FATAL("Multiple -m options not supported"); }
-        mem_limit_given = 1;
+        if (mem_limit_given) {
+
+          WARNF("Overriding previous -m option.");
+
+        } else {
+
+          mem_limit_given = 1;
+
+        }
 
         if (!optarg) { FATAL("Wrong usage of -m"); }
 
@@ -1461,15 +1469,16 @@ int main(int argc, char **argv_orig, char **envp) {
 
   #endif
 
-  configure_afl_kill_signals(&afl->fsrv, afl->afl_env.afl_child_kill_signal,
-                             afl->afl_env.afl_fsrv_kill_signal,
-                             (afl->fsrv.qemu_mode || afl->unicorn_mode
+  configure_afl_kill_signals(
+      &afl->fsrv, afl->afl_env.afl_child_kill_signal,
+      afl->afl_env.afl_fsrv_kill_signal,
+      (afl->fsrv.qemu_mode || afl->unicorn_mode || afl->fsrv.use_fauxsrv
   #ifdef __linux__
-                              || afl->fsrv.nyx_mode
+       || afl->fsrv.nyx_mode
   #endif
-                              )
-                                 ? SIGKILL
-                                 : SIGTERM);
+       )
+          ? SIGKILL
+          : SIGTERM);
 
   setup_signal_handlers();
   check_asan_opts(afl);
@@ -2586,7 +2595,7 @@ int main(int argc, char **argv_orig, char **envp) {
                     (!afl->queue_cycle && afl->afl_env.afl_import_first)) &&
                    afl->sync_id)) {
 
-        if (!afl->queue_cycle && afl->afl_env.afl_import_first) {
+        if (unlikely(!afl->queue_cycle && afl->afl_env.afl_import_first)) {
 
           OKF("Syncing queues from other fuzzer instances first ...");
 
@@ -2597,6 +2606,12 @@ int main(int argc, char **argv_orig, char **envp) {
       }
 
       ++afl->queue_cycle;
+      if (afl->afl_env.afl_no_ui) {
+
+        ACTF("Entering queue cycle %llu\n", afl->queue_cycle);
+
+      }
+
       runs_in_current_cycle = (u32)-1;
       afl->cur_skipped_items = 0;
 
@@ -2605,7 +2620,7 @@ int main(int argc, char **argv_orig, char **envp) {
       // queue is fully cycled.
       time_t     cursec = time(NULL);
       struct tm *curdate = localtime(&cursec);
-      if (likely(!afl->afl_env.afl_pizza_mode)) {
+      if (unlikely(!afl->afl_env.afl_pizza_mode)) {
 
         if (unlikely(curdate->tm_mon == 3 && curdate->tm_mday == 1)) {
 
@@ -2650,13 +2665,6 @@ int main(int argc, char **argv_orig, char **envp) {
 
       }
 
-      if (unlikely(afl->not_on_tty)) {
-
-        ACTF("Entering queue cycle %llu.", afl->queue_cycle);
-        fflush(stdout);
-
-      }
-
       /* If we had a full queue cycle with no new finds, try
          recombination strategies next. */
 
@@ -2942,35 +2950,26 @@ int main(int argc, char **argv_orig, char **envp) {
 
     if (likely(!afl->stop_soon && afl->sync_id)) {
 
-      if (likely(afl->skip_deterministic)) {
+      if (unlikely(afl->is_main_node)) {
 
-        if (unlikely(afl->is_main_node)) {
+        if (unlikely(cur_time > (afl->sync_time >> 1) + afl->last_sync_time)) {
 
-          if (unlikely(cur_time >
-                       (afl->sync_time >> 1) + afl->last_sync_time)) {
+          if (!(sync_interval_cnt++ % (SYNC_INTERVAL / 3))) {
 
-            if (!(sync_interval_cnt++ % (SYNC_INTERVAL / 3))) {
-
-              sync_fuzzers(afl);
-
-            }
+            sync_fuzzers(afl);
 
           }
 
-        } else {
+        }
 
-          if (unlikely(cur_time > afl->sync_time + afl->last_sync_time)) {
+      } else {
 
-            if (!(sync_interval_cnt++ % SYNC_INTERVAL)) { sync_fuzzers(afl); }
+        if (unlikely(cur_time > afl->sync_time + afl->last_sync_time)) {
 
-          }
+          if (!(sync_interval_cnt++ % SYNC_INTERVAL)) { sync_fuzzers(afl); }
 
         }
 
-      } else {
-
-        sync_fuzzers(afl);
-
       }
 
     }
diff --git a/src/afl-performance.c b/src/afl-performance.c
index 6c6e3c8b..e8ece6b5 100644
--- a/src/afl-performance.c
+++ b/src/afl-performance.c
@@ -95,6 +95,24 @@ inline u64 hash64(u8 *key, u32 len, u64 seed) {
 
 }
 
+/* Hash a file */
+
+u64 get_binary_hash(u8 *fn) {
+
+  int fd = open(fn, O_RDONLY);
+  if (fd < 0) { PFATAL("Unable to open '%s'", fn); }
+  struct stat st;
+  if (fstat(fd, &st) < 0) { PFATAL("Unable to fstat '%s'", fn); }
+  u32 f_len = st.st_size;
+  u8 *f_data = mmap(0, f_len, PROT_READ, MAP_PRIVATE, fd, 0);
+  if (f_data == MAP_FAILED) { PFATAL("Unable to mmap file '%s'", fn); }
+  close(fd);
+  u64 hash = hash64(f_data, f_len, 0);
+  if (munmap(f_data, f_len)) { PFATAL("unmap() failed"); }
+  return hash;
+
+}
+
 // Public domain SHA1 implementation copied from:
 // https://github.com/x42/liboauth/blob/7001b8256cd654952ec2515b055d2c5b243be600/src/sha1.c
 
diff --git a/src/afl-sharedmem.c b/src/afl-sharedmem.c
index 8f685633..1dea83f9 100644
--- a/src/afl-sharedmem.c
+++ b/src/afl-sharedmem.c
@@ -239,15 +239,15 @@ u8 *afl_shm_init(sharedmem_t *shm, size_t map_size,
     if (shm->cmplog_g_shm_fd == -1) { PFATAL("shm_open() failed"); }
 
     /* configure the size of the shared memory segment */
-    if (ftruncate(shm->cmplog_g_shm_fd, map_size)) {
+    if (ftruncate(shm->cmplog_g_shm_fd, sizeof(struct cmp_map))) {
 
       PFATAL("setup_shm(): cmplog ftruncate() failed");
 
     }
 
     /* map the shared memory segment to the address space of the process */
-    shm->cmp_map = mmap(0, map_size, PROT_READ | PROT_WRITE, MAP_SHARED,
-                        shm->cmplog_g_shm_fd, 0);
+    shm->cmp_map = mmap(0, sizeof(struct cmp_map), PROT_READ | PROT_WRITE,
+                        MAP_SHARED, shm->cmplog_g_shm_fd, 0);
     if (shm->cmp_map == MAP_FAILED) {
 
       close(shm->cmplog_g_shm_fd);