about summary refs log tree commit diff
path: root/src/afl-fuzz-one.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/afl-fuzz-one.c')
-rw-r--r--src/afl-fuzz-one.c2077
1 files changed, 1018 insertions, 1059 deletions
diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c
index c6dbb858..19cdddb9 100644
--- a/src/afl-fuzz-one.c
+++ b/src/afl-fuzz-one.c
@@ -27,21 +27,21 @@
 
 /* MOpt */
 
-int select_algorithm(void) {
+int select_algorithm(afl_state_t *afl) {
 
   int i_puppet, j_puppet;
 
-  double sele = ((double)(UR(10000)) * 0.0001);
+  double sele = ((double)(UR(afl, 10000)) * 0.0001);
   j_puppet = 0;
   for (i_puppet = 0; i_puppet < operator_num; ++i_puppet) {
 
     if (unlikely(i_puppet == 0)) {
 
-      if (sele < probability_now[swarm_now][i_puppet]) break;
+      if (sele < afl->probability_now[afl->swarm_now][i_puppet]) break;
 
     } else {
 
-      if (sele < probability_now[swarm_now][i_puppet]) {
+      if (sele < afl->probability_now[afl->swarm_now][i_puppet]) {
 
         j_puppet = 1;
         break;
@@ -52,7 +52,7 @@ int select_algorithm(void) {
 
   }
 
-  if (j_puppet == 1 && sele < probability_now[swarm_now][i_puppet - 1])
+  if (j_puppet == 1 && sele < afl->probability_now[afl->swarm_now][i_puppet - 1])
     FATAL("error select_algorithm");
   return i_puppet;
 
@@ -61,14 +61,14 @@ int select_algorithm(void) {
 /* Helper to choose random block len for block operations in fuzz_one().
    Doesn't return zero, provided that max_len is > 0. */
 
-static u32 choose_block_len(u32 limit) {
+static u32 choose_block_len(afl_state_t *afl, u32 limit) {
 
   u32 min_value, max_value;
-  u32 rlim = MIN(queue_cycle, 3);
+  u32 rlim = MIN(afl->queue_cycle, 3);
 
-  if (!run_over10m) rlim = 1;
+  if (!afl->run_over10m) rlim = 1;
 
-  switch (UR(rlim)) {
+  switch (UR(afl, rlim)) {
 
     case 0:
       min_value = 1;
@@ -82,7 +82,7 @@ static u32 choose_block_len(u32 limit) {
 
     default:
 
-      if (UR(10)) {
+      if (UR(afl, 10)) {
 
         min_value = HAVOC_BLK_MEDIUM;
         max_value = HAVOC_BLK_LARGE;
@@ -98,7 +98,7 @@ static u32 choose_block_len(u32 limit) {
 
   if (min_value >= limit) min_value = 1;
 
-  return min_value + UR(MIN(max_value, limit) - min_value + 1);
+  return min_value + UR(afl, MIN(max_value, limit) - min_value + 1);
 
 }
 
@@ -334,7 +334,7 @@ static void locate_diffs(u8* ptr1, u8* ptr2, u32 len, s32* first, s32* last) {
    function is a tad too long... returns 0 if fuzzed successfully, 1 if
    skipped or bailed out. */
 
-u8 fuzz_one_original(char** argv) {
+u8 fuzz_one_original(afl_state_t *afl) {
 
   s32 len, fd, temp_len, i, j;
   u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0;
@@ -351,43 +351,44 @@ u8 fuzz_one_original(char** argv) {
   /* In IGNORE_FINDS mode, skip any entries that weren't in the
      initial data set. */
 
-  if (queue_cur->depth > 1) return 1;
+  if (afl->queue_cur->depth > 1) return 1;
 
 #else
 
-  if (mutator && mutator->afl_custom_queue_get) {
+  if (afl->mutator && afl->mutator->afl_custom_queue_get) {
 
     /* The custom mutator will decide to skip this test case or not. */
 
-    if (!mutator->afl_custom_queue_get(queue_cur->fname)) return 1;
+    if (!afl->mutator->afl_custom_queue_get(afl, afl->queue_cur->fname))
+      return 1;
 
   }
 
-  if (pending_favored) {
+  if (afl->pending_favored) {
 
     /* If we have any favored, non-fuzzed new arrivals in the queue,
        possibly skip to them at the expense of already-fuzzed or non-favored
        cases. */
 
-    if (((queue_cur->was_fuzzed > 0 || queue_cur->fuzz_level > 0) ||
-         !queue_cur->favored) &&
-        UR(100) < SKIP_TO_NEW_PROB)
+    if (((afl->queue_cur->was_fuzzed > 0 || afl->queue_cur->fuzz_level > 0) ||
+         !afl->queue_cur->favored) &&
+        UR(afl, 100) < SKIP_TO_NEW_PROB)
       return 1;
 
-  } else if (!dumb_mode && !queue_cur->favored && queued_paths > 10) {
+  } else if (!afl->dumb_mode && !afl->queue_cur->favored && afl->queued_paths > 10) {
 
     /* Otherwise, still possibly skip non-favored cases, albeit less often.
        The odds of skipping stuff are higher for already-fuzzed inputs and
        lower for never-fuzzed entries. */
 
-    if (queue_cycle > 1 &&
-        (queue_cur->fuzz_level == 0 || queue_cur->was_fuzzed)) {
+    if (afl->queue_cycle > 1 &&
+        (afl->queue_cur->fuzz_level == 0 || afl->queue_cur->was_fuzzed)) {
 
-      if (UR(100) < SKIP_NFAV_NEW_PROB) return 1;
+      if (UR(afl, 100) < SKIP_NFAV_NEW_PROB) return 1;
 
     } else {
 
-      if (UR(100) < SKIP_NFAV_OLD_PROB) return 1;
+      if (UR(afl, 100) < SKIP_NFAV_OLD_PROB) return 1;
 
     }
 
@@ -395,26 +396,26 @@ u8 fuzz_one_original(char** argv) {
 
 #endif                                                     /* ^IGNORE_FINDS */
 
-  if (not_on_tty) {
+  if (afl->not_on_tty) {
 
     ACTF("Fuzzing test case #%u (%u total, %llu uniq crashes found)...",
-         current_entry, queued_paths, unique_crashes);
+         afl->current_entry, afl->queued_paths, afl->unique_crashes);
     fflush(stdout);
 
   }
 
   /* Map the test case into memory. */
 
-  fd = open(queue_cur->fname, O_RDONLY);
+  fd = open(afl->queue_cur->fname, O_RDONLY);
 
-  if (fd < 0) PFATAL("Unable to open '%s'", queue_cur->fname);
+  if (fd < 0) PFATAL("Unable to open '%s'", afl->queue_cur->fname);
 
-  len = queue_cur->len;
+  len = afl->queue_cur->len;
 
   orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
 
   if (orig_in == MAP_FAILED)
-    PFATAL("Unable to mmap '%s' with len %d", queue_cur->fname, len);
+    PFATAL("Unable to mmap '%s' with len %d", afl->queue_cur->fname, len);
 
   close(fd);
 
@@ -424,29 +425,29 @@ u8 fuzz_one_original(char** argv) {
 
   out_buf = ck_alloc_nozero(len);
 
-  subseq_tmouts = 0;
+  afl->subseq_tmouts = 0;
 
-  cur_depth = queue_cur->depth;
+  afl->cur_depth = afl->queue_cur->depth;
 
   /*******************************************
    * CALIBRATION (only if failed earlier on) *
    *******************************************/
 
-  if (queue_cur->cal_failed) {
+  if (afl->queue_cur->cal_failed) {
 
     u8 res = FAULT_TMOUT;
 
-    if (queue_cur->cal_failed < CAL_CHANCES) {
+    if (afl->queue_cur->cal_failed < CAL_CHANCES) {
 
-      res = calibrate_case(argv, queue_cur, in_buf, queue_cycle - 1, 0);
+      res = calibrate_case(afl, afl->queue_cur, in_buf, afl->queue_cycle - 1, 0);
 
       if (res == FAULT_ERROR) FATAL("Unable to execute target application");
 
     }
 
-    if (stop_soon || res != crash_mode) {
+    if (afl->stop_soon || res != afl->crash_mode) {
 
-      ++cur_skipped_paths;
+      ++afl->cur_skipped_paths;
       goto abandon_entry;
 
     }
@@ -457,24 +458,24 @@ u8 fuzz_one_original(char** argv) {
    * TRIMMING *
    ************/
 
-  if (!dumb_mode && !queue_cur->trim_done && !disable_trim) {
+  if (!afl->dumb_mode && !afl->queue_cur->trim_done && !afl->disable_trim) {
 
-    u8 res = trim_case(argv, queue_cur, in_buf);
+    u8 res = trim_case(afl, afl->queue_cur, in_buf);
 
     if (res == FAULT_ERROR) FATAL("Unable to execute target application");
 
-    if (stop_soon) {
+    if (afl->stop_soon) {
 
-      ++cur_skipped_paths;
+      ++afl->cur_skipped_paths;
       goto abandon_entry;
 
     }
 
     /* Don't retry trimming, even if it failed. */
 
-    queue_cur->trim_done = 1;
+    afl->queue_cur->trim_done = 1;
 
-    len = queue_cur->len;
+    len = afl->queue_cur->len;
 
   }
 
@@ -484,15 +485,15 @@ u8 fuzz_one_original(char** argv) {
    * PERFORMANCE SCORE *
    *********************/
 
-  orig_perf = perf_score = calculate_score(queue_cur);
+  orig_perf = perf_score = calculate_score(afl, afl->queue_cur);
 
   if (perf_score == 0) goto abandon_entry;
 
-  if (use_radamsa > 1) goto radamsa_stage;
+  if (afl->use_radamsa > 1) goto radamsa_stage;
 
-  if (cmplog_mode) {
+  if (afl->shm.cmplog_mode) {
 
-    if (input_to_state_stage(argv, in_buf, out_buf, len, queue_cur->exec_cksum))
+    if (input_to_state_stage(afl, in_buf, out_buf, len, afl->queue_cur->exec_cksum))
       goto abandon_entry;
 
   }
@@ -502,12 +503,12 @@ u8 fuzz_one_original(char** argv) {
      if it has gone through deterministic testing in earlier, resumed runs
      (passed_det). */
 
-  if (skip_deterministic ||
-      ((!queue_cur->passed_det) &&
-       perf_score < (queue_cur->depth * 30 <= havoc_max_mult * 100
-                         ? queue_cur->depth * 30
-                         : havoc_max_mult * 100)) ||
-      queue_cur->passed_det) {
+  if (afl->skip_deterministic ||
+      ((!afl->queue_cur->passed_det) &&
+       perf_score < (afl->queue_cur->depth * 30 <= afl->havoc_max_mult * 100
+                         ? afl->queue_cur->depth * 30
+                         : afl->havoc_max_mult * 100)) ||
+      afl->queue_cur->passed_det) {
 
     goto custom_mutator_stage;
 
@@ -516,7 +517,7 @@ u8 fuzz_one_original(char** argv) {
   /* Skip deterministic fuzzing if exec path checksum puts this out of scope
      for this master instance. */
 
-  if (master_max && (queue_cur->exec_cksum % master_max) != master_id - 1) {
+  if (afl->master_max && (afl->queue_cur->exec_cksum % afl->master_max) != afl->master_id - 1) {
 
     goto custom_mutator_stage;
 
@@ -539,25 +540,25 @@ u8 fuzz_one_original(char** argv) {
 
   /* Single walking bit. */
 
-  stage_short = "flip1";
-  stage_max = len << 3;
-  stage_name = "bitflip 1/1";
+  afl->stage_short = "flip1";
+  afl->stage_max = len << 3;
+  afl->stage_name = "bitflip 1/1";
 
-  stage_val_type = STAGE_VAL_NONE;
+  afl->stage_val_type = STAGE_VAL_NONE;
 
-  orig_hit_cnt = queued_paths + unique_crashes;
+  orig_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  prev_cksum = queue_cur->exec_cksum;
+  prev_cksum = afl->queue_cur->exec_cksum;
 
-  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+  for (afl->stage_cur = 0; afl->stage_cur < afl->stage_max; ++afl->stage_cur) {
 
-    stage_cur_byte = stage_cur >> 3;
+    afl->stage_cur_byte = afl->stage_cur >> 3;
 
-    FLIP_BIT(out_buf, stage_cur);
+    FLIP_BIT(out_buf, afl->stage_cur);
 
-    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+    if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
 
-    FLIP_BIT(out_buf, stage_cur);
+    FLIP_BIT(out_buf, afl->stage_cur);
 
     /* While flipping the least significant bit in every byte, pull of an extra
        trick to detect possible syntax tokens. In essence, the idea is that if
@@ -586,20 +587,20 @@ u8 fuzz_one_original(char** argv) {
 
       */
 
-    if (!dumb_mode && (stage_cur & 7) == 7) {
+    if (!afl->dumb_mode && (afl->stage_cur & 7) == 7) {
 
-      u32 cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
+      u32 cksum = hash32(afl->fsrv.trace_bits, MAP_SIZE, HASH_CONST);
 
-      if (stage_cur == stage_max - 1 && cksum == prev_cksum) {
+      if (afl->stage_cur == afl->stage_max - 1 && cksum == prev_cksum) {
 
         /* If at end of file and we are still collecting a string, grab the
            final character and force output. */
 
-        if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3];
+        if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[afl->stage_cur >> 3];
         ++a_len;
 
         if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA)
-          maybe_add_auto(a_collect, a_len);
+          maybe_add_auto(afl, a_collect, a_len);
 
       } else if (cksum != prev_cksum) {
 
@@ -607,7 +608,7 @@ u8 fuzz_one_original(char** argv) {
            worthwhile queued up, and collect that if the answer is yes. */
 
         if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA)
-          maybe_add_auto(a_collect, a_len);
+          maybe_add_auto(afl, a_collect, a_len);
 
         a_len = 0;
         prev_cksum = cksum;
@@ -617,9 +618,9 @@ u8 fuzz_one_original(char** argv) {
       /* Continue collecting string, but only if the bit flip actually made
          any difference - we don't want no-op tokens. */
 
-      if (cksum != queue_cur->exec_cksum) {
+      if (cksum != afl->queue_cur->exec_cksum) {
 
-        if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3];
+        if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[afl->stage_cur >> 3];
         ++a_len;
 
       }
@@ -628,68 +629,68 @@ u8 fuzz_one_original(char** argv) {
 
   }
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_FLIP1] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_FLIP1] += stage_max;
+  afl->stage_finds[STAGE_FLIP1] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_FLIP1] += afl->stage_max;
 
   /* Two walking bits. */
 
-  stage_name = "bitflip 2/1";
-  stage_short = "flip2";
-  stage_max = (len << 3) - 1;
+  afl->stage_name = "bitflip 2/1";
+  afl->stage_short = "flip2";
+  afl->stage_max = (len << 3) - 1;
 
   orig_hit_cnt = new_hit_cnt;
 
-  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+  for (afl->stage_cur = 0; afl->stage_cur < afl->stage_max; ++afl->stage_cur) {
 
-    stage_cur_byte = stage_cur >> 3;
+    afl->stage_cur_byte = afl->stage_cur >> 3;
 
-    FLIP_BIT(out_buf, stage_cur);
-    FLIP_BIT(out_buf, stage_cur + 1);
+    FLIP_BIT(out_buf, afl->stage_cur);
+    FLIP_BIT(out_buf, afl->stage_cur + 1);
 
-    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+    if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
 
-    FLIP_BIT(out_buf, stage_cur);
-    FLIP_BIT(out_buf, stage_cur + 1);
+    FLIP_BIT(out_buf, afl->stage_cur);
+    FLIP_BIT(out_buf, afl->stage_cur + 1);
 
   }
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_FLIP2] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_FLIP2] += stage_max;
+  afl->stage_finds[STAGE_FLIP2] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_FLIP2] += afl->stage_max;
 
   /* Four walking bits. */
 
-  stage_name = "bitflip 4/1";
-  stage_short = "flip4";
-  stage_max = (len << 3) - 3;
+  afl->stage_name = "bitflip 4/1";
+  afl->stage_short = "flip4";
+  afl->stage_max = (len << 3) - 3;
 
   orig_hit_cnt = new_hit_cnt;
 
-  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+  for (afl->stage_cur = 0; afl->stage_cur < afl->stage_max; ++afl->stage_cur) {
 
-    stage_cur_byte = stage_cur >> 3;
+    afl->stage_cur_byte = afl->stage_cur >> 3;
 
-    FLIP_BIT(out_buf, stage_cur);
-    FLIP_BIT(out_buf, stage_cur + 1);
-    FLIP_BIT(out_buf, stage_cur + 2);
-    FLIP_BIT(out_buf, stage_cur + 3);
+    FLIP_BIT(out_buf, afl->stage_cur);
+    FLIP_BIT(out_buf, afl->stage_cur + 1);
+    FLIP_BIT(out_buf, afl->stage_cur + 2);
+    FLIP_BIT(out_buf, afl->stage_cur + 3);
 
-    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+    if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
 
-    FLIP_BIT(out_buf, stage_cur);
-    FLIP_BIT(out_buf, stage_cur + 1);
-    FLIP_BIT(out_buf, stage_cur + 2);
-    FLIP_BIT(out_buf, stage_cur + 3);
+    FLIP_BIT(out_buf, afl->stage_cur);
+    FLIP_BIT(out_buf, afl->stage_cur + 1);
+    FLIP_BIT(out_buf, afl->stage_cur + 2);
+    FLIP_BIT(out_buf, afl->stage_cur + 3);
 
   }
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_FLIP4] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_FLIP4] += stage_max;
+  afl->stage_finds[STAGE_FLIP4] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_FLIP4] += afl->stage_max;
 
   /* Effector map setup. These macros calculate:
 
@@ -719,47 +720,47 @@ u8 fuzz_one_original(char** argv) {
 
   /* Walking byte. */
 
-  stage_name = "bitflip 8/8";
-  stage_short = "flip8";
-  stage_max = len;
+  afl->stage_name = "bitflip 8/8";
+  afl->stage_short = "flip8";
+  afl->stage_max = len;
 
   orig_hit_cnt = new_hit_cnt;
 
-  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+  for (afl->stage_cur = 0; afl->stage_cur < afl->stage_max; ++afl->stage_cur) {
 
-    stage_cur_byte = stage_cur;
+    afl->stage_cur_byte = afl->stage_cur;
 
-    out_buf[stage_cur] ^= 0xFF;
+    out_buf[afl->stage_cur] ^= 0xFF;
 
-    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+    if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
 
     /* We also use this stage to pull off a simple trick: we identify
        bytes that seem to have no effect on the current execution path
        even when fully flipped - and we skip them during more expensive
        deterministic stages, such as arithmetics or known ints. */
 
-    if (!eff_map[EFF_APOS(stage_cur)]) {
+    if (!eff_map[EFF_APOS(afl->stage_cur)]) {
 
       u32 cksum;
 
       /* If in dumb mode or if the file is very short, just flag everything
          without wasting time on checksums. */
 
-      if (!dumb_mode && len >= EFF_MIN_LEN)
-        cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
+      if (!afl->dumb_mode && len >= EFF_MIN_LEN)
+        cksum = hash32(afl->fsrv.trace_bits, MAP_SIZE, HASH_CONST);
       else
-        cksum = ~queue_cur->exec_cksum;
+        cksum = ~afl->queue_cur->exec_cksum;
 
-      if (cksum != queue_cur->exec_cksum) {
+      if (cksum != afl->queue_cur->exec_cksum) {
 
-        eff_map[EFF_APOS(stage_cur)] = 1;
+        eff_map[EFF_APOS(afl->stage_cur)] = 1;
         ++eff_cnt;
 
       }
 
     }
 
-    out_buf[stage_cur] ^= 0xFF;
+    out_buf[afl->stage_cur] ^= 0xFF;
 
   }
 
@@ -772,29 +773,29 @@ u8 fuzz_one_original(char** argv) {
 
     memset(eff_map, 1, EFF_ALEN(len));
 
-    blocks_eff_select += EFF_ALEN(len);
+    afl->blocks_eff_select += EFF_ALEN(len);
 
   } else {
 
-    blocks_eff_select += eff_cnt;
+    afl->blocks_eff_select += eff_cnt;
 
   }
 
-  blocks_eff_total += EFF_ALEN(len);
+  afl->blocks_eff_total += EFF_ALEN(len);
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_FLIP8] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_FLIP8] += stage_max;
+  afl->stage_finds[STAGE_FLIP8] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_FLIP8] += afl->stage_max;
 
   /* Two walking bytes. */
 
   if (len < 2) goto skip_bitflip;
 
-  stage_name = "bitflip 16/8";
-  stage_short = "flip16";
-  stage_cur = 0;
-  stage_max = len - 1;
+  afl->stage_name = "bitflip 16/8";
+  afl->stage_short = "flip16";
+  afl->stage_cur = 0;
+  afl->stage_max = len - 1;
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -804,35 +805,35 @@ u8 fuzz_one_original(char** argv) {
 
     if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
 
-      --stage_max;
+      --afl->stage_max;
       continue;
 
     }
 
-    stage_cur_byte = i;
+    afl->stage_cur_byte = i;
 
     *(u16*)(out_buf + i) ^= 0xFFFF;
 
-    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-    ++stage_cur;
+    if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+    ++afl->stage_cur;
 
     *(u16*)(out_buf + i) ^= 0xFFFF;
 
   }
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_FLIP16] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_FLIP16] += stage_max;
+  afl->stage_finds[STAGE_FLIP16] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_FLIP16] += afl->stage_max;
 
   if (len < 4) goto skip_bitflip;
 
   /* Four walking bytes. */
 
-  stage_name = "bitflip 32/8";
-  stage_short = "flip32";
-  stage_cur = 0;
-  stage_max = len - 3;
+  afl->stage_name = "bitflip 32/8";
+  afl->stage_short = "flip32";
+  afl->stage_cur = 0;
+  afl->stage_max = len - 3;
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -842,30 +843,30 @@ u8 fuzz_one_original(char** argv) {
     if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
         !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
 
-      --stage_max;
+      --afl->stage_max;
       continue;
 
     }
 
-    stage_cur_byte = i;
+    afl->stage_cur_byte = i;
 
     *(u32*)(out_buf + i) ^= 0xFFFFFFFF;
 
-    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-    ++stage_cur;
+    if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+    ++afl->stage_cur;
 
     *(u32*)(out_buf + i) ^= 0xFFFFFFFF;
 
   }
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_FLIP32] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_FLIP32] += stage_max;
+  afl->stage_finds[STAGE_FLIP32] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_FLIP32] += afl->stage_max;
 
 skip_bitflip:
 
-  if (no_arith) goto skip_arith;
+  if (afl->no_arith) goto skip_arith;
 
   /**********************
    * ARITHMETIC INC/DEC *
@@ -873,12 +874,12 @@ skip_bitflip:
 
   /* 8-bit arithmetics. */
 
-  stage_name = "arith 8/8";
-  stage_short = "arith8";
-  stage_cur = 0;
-  stage_max = 2 * len * ARITH_MAX;
+  afl->stage_name = "arith 8/8";
+  afl->stage_short = "arith8";
+  afl->stage_cur = 0;
+  afl->stage_max = 2 * len * ARITH_MAX;
 
-  stage_val_type = STAGE_VAL_LE;
+  afl->stage_val_type = STAGE_VAL_LE;
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -890,12 +891,12 @@ skip_bitflip:
 
     if (!eff_map[EFF_APOS(i)]) {
 
-      stage_max -= 2 * ARITH_MAX;
+      afl->stage_max -= 2 * ARITH_MAX;
       continue;
 
     }
 
-    stage_cur_byte = i;
+    afl->stage_cur_byte = i;
 
     for (j = 1; j <= ARITH_MAX; ++j) {
 
@@ -906,29 +907,29 @@ skip_bitflip:
 
       if (!could_be_bitflip(r)) {
 
-        stage_cur_val = j;
+        afl->stage_cur_val = j;
         out_buf[i] = orig + j;
 
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
+        if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+        ++afl->stage_cur;
 
       } else
 
-        --stage_max;
+        --afl->stage_max;
 
       r = orig ^ (orig - j);
 
       if (!could_be_bitflip(r)) {
 
-        stage_cur_val = -j;
+        afl->stage_cur_val = -j;
         out_buf[i] = orig - j;
 
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
+        if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+        ++afl->stage_cur;
 
       } else
 
-        --stage_max;
+        --afl->stage_max;
 
       out_buf[i] = orig;
 
@@ -936,19 +937,19 @@ skip_bitflip:
 
   }
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_ARITH8] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_ARITH8] += stage_max;
+  afl->stage_finds[STAGE_ARITH8] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_ARITH8] += afl->stage_max;
 
   /* 16-bit arithmetics, both endians. */
 
   if (len < 2) goto skip_arith;
 
-  stage_name = "arith 16/8";
-  stage_short = "arith16";
-  stage_cur = 0;
-  stage_max = 4 * (len - 1) * ARITH_MAX;
+  afl->stage_name = "arith 16/8";
+  afl->stage_short = "arith16";
+  afl->stage_cur = 0;
+  afl->stage_max = 4 * (len - 1) * ARITH_MAX;
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -960,12 +961,12 @@ skip_bitflip:
 
     if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
 
-      stage_max -= 4 * ARITH_MAX;
+      afl->stage_max -= 4 * ARITH_MAX;
       continue;
 
     }
 
-    stage_cur_byte = i;
+    afl->stage_cur_byte = i;
 
     for (j = 1; j <= ARITH_MAX; ++j) {
 
@@ -978,59 +979,59 @@ skip_bitflip:
          & 0xff overflow checks) and if it couldn't be a product of
          a bitflip. */
 
-      stage_val_type = STAGE_VAL_LE;
+      afl->stage_val_type = STAGE_VAL_LE;
 
       if ((orig & 0xff) + j > 0xff && !could_be_bitflip(r1)) {
 
-        stage_cur_val = j;
+        afl->stage_cur_val = j;
         *(u16*)(out_buf + i) = orig + j;
 
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
+        if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+        ++afl->stage_cur;
 
       } else
 
-        --stage_max;
+        --afl->stage_max;
 
       if ((orig & 0xff) < j && !could_be_bitflip(r2)) {
 
-        stage_cur_val = -j;
+        afl->stage_cur_val = -j;
         *(u16*)(out_buf + i) = orig - j;
 
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
+        if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+        ++afl->stage_cur;
 
       } else
 
-        --stage_max;
+        --afl->stage_max;
 
       /* Big endian comes next. Same deal. */
 
-      stage_val_type = STAGE_VAL_BE;
+      afl->stage_val_type = STAGE_VAL_BE;
 
       if ((orig >> 8) + j > 0xff && !could_be_bitflip(r3)) {
 
-        stage_cur_val = j;
+        afl->stage_cur_val = j;
         *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) + j);
 
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
+        if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+        ++afl->stage_cur;
 
       } else
 
-        --stage_max;
+        --afl->stage_max;
 
       if ((orig >> 8) < j && !could_be_bitflip(r4)) {
 
-        stage_cur_val = -j;
+        afl->stage_cur_val = -j;
         *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) - j);
 
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
+        if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+        ++afl->stage_cur;
 
       } else
 
-        --stage_max;
+        --afl->stage_max;
 
       *(u16*)(out_buf + i) = orig;
 
@@ -1038,19 +1039,19 @@ skip_bitflip:
 
   }
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_ARITH16] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_ARITH16] += stage_max;
+  afl->stage_finds[STAGE_ARITH16] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_ARITH16] += afl->stage_max;
 
   /* 32-bit arithmetics, both endians. */
 
   if (len < 4) goto skip_arith;
 
-  stage_name = "arith 32/8";
-  stage_short = "arith32";
-  stage_cur = 0;
-  stage_max = 4 * (len - 3) * ARITH_MAX;
+  afl->stage_name = "arith 32/8";
+  afl->stage_short = "arith32";
+  afl->stage_cur = 0;
+  afl->stage_max = 4 * (len - 3) * ARITH_MAX;
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -1063,12 +1064,12 @@ skip_bitflip:
     if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
         !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
 
-      stage_max -= 4 * ARITH_MAX;
+      afl->stage_max -= 4 * ARITH_MAX;
       continue;
 
     }
 
-    stage_cur_byte = i;
+    afl->stage_cur_byte = i;
 
     for (j = 1; j <= ARITH_MAX; ++j) {
 
@@ -1079,59 +1080,59 @@ skip_bitflip:
       /* Little endian first. Same deal as with 16-bit: we only want to
          try if the operation would have effect on more than two bytes. */
 
-      stage_val_type = STAGE_VAL_LE;
+      afl->stage_val_type = STAGE_VAL_LE;
 
       if ((orig & 0xffff) + j > 0xffff && !could_be_bitflip(r1)) {
 
-        stage_cur_val = j;
+        afl->stage_cur_val = j;
         *(u32*)(out_buf + i) = orig + j;
 
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
+        if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+        ++afl->stage_cur;
 
       } else
 
-        --stage_max;
+        --afl->stage_max;
 
       if ((orig & 0xffff) < j && !could_be_bitflip(r2)) {
 
-        stage_cur_val = -j;
+        afl->stage_cur_val = -j;
         *(u32*)(out_buf + i) = orig - j;
 
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
+        if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+        ++afl->stage_cur;
 
       } else
 
-        --stage_max;
+        --afl->stage_max;
 
       /* Big endian next. */
 
-      stage_val_type = STAGE_VAL_BE;
+      afl->stage_val_type = STAGE_VAL_BE;
 
       if ((SWAP32(orig) & 0xffff) + j > 0xffff && !could_be_bitflip(r3)) {
 
-        stage_cur_val = j;
+        afl->stage_cur_val = j;
         *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) + j);
 
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
+        if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+        ++afl->stage_cur;
 
       } else
 
-        --stage_max;
+        --afl->stage_max;
 
       if ((SWAP32(orig) & 0xffff) < j && !could_be_bitflip(r4)) {
 
-        stage_cur_val = -j;
+        afl->stage_cur_val = -j;
         *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) - j);
 
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
+        if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+        ++afl->stage_cur;
 
       } else
 
-        --stage_max;
+        --afl->stage_max;
 
       *(u32*)(out_buf + i) = orig;
 
@@ -1139,10 +1140,10 @@ skip_bitflip:
 
   }
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_ARITH32] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_ARITH32] += stage_max;
+  afl->stage_finds[STAGE_ARITH32] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_ARITH32] += afl->stage_max;
 
 skip_arith:
 
@@ -1150,12 +1151,12 @@ skip_arith:
    * INTERESTING VALUES *
    **********************/
 
-  stage_name = "interest 8/8";
-  stage_short = "int8";
-  stage_cur = 0;
-  stage_max = len * sizeof(interesting_8);
+  afl->stage_name = "interest 8/8";
+  afl->stage_short = "int8";
+  afl->stage_cur = 0;
+  afl->stage_max = len * sizeof(interesting_8);
 
-  stage_val_type = STAGE_VAL_LE;
+  afl->stage_val_type = STAGE_VAL_LE;
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -1169,12 +1170,12 @@ skip_arith:
 
     if (!eff_map[EFF_APOS(i)]) {
 
-      stage_max -= sizeof(interesting_8);
+      afl->stage_max -= sizeof(interesting_8);
       continue;
 
     }
 
-    stage_cur_byte = i;
+    afl->stage_cur_byte = i;
 
     for (j = 0; j < sizeof(interesting_8); ++j) {
 
@@ -1183,36 +1184,36 @@ skip_arith:
       if (could_be_bitflip(orig ^ (u8)interesting_8[j]) ||
           could_be_arith(orig, (u8)interesting_8[j], 1)) {
 
-        --stage_max;
+        --afl->stage_max;
         continue;
 
       }
 
-      stage_cur_val = interesting_8[j];
+      afl->stage_cur_val = interesting_8[j];
       out_buf[i] = interesting_8[j];
 
-      if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+      if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
 
       out_buf[i] = orig;
-      ++stage_cur;
+      ++afl->stage_cur;
 
     }
 
   }
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_INTEREST8] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_INTEREST8] += stage_max;
+  afl->stage_finds[STAGE_INTEREST8] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_INTEREST8] += afl->stage_max;
 
   /* Setting 16-bit integers, both endians. */
 
-  if (no_arith || len < 2) goto skip_interest;
+  if (afl->no_arith || len < 2) goto skip_interest;
 
-  stage_name = "interest 16/8";
-  stage_short = "int16";
-  stage_cur = 0;
-  stage_max = 2 * (len - 1) * (sizeof(interesting_16) >> 1);
+  afl->stage_name = "interest 16/8";
+  afl->stage_short = "int16";
+  afl->stage_cur = 0;
+  afl->stage_max = 2 * (len - 1) * (sizeof(interesting_16) >> 1);
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -1224,16 +1225,16 @@ skip_arith:
 
     if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
 
-      stage_max -= sizeof(interesting_16);
+      afl->stage_max -= sizeof(interesting_16);
       continue;
 
     }
 
-    stage_cur_byte = i;
+    afl->stage_cur_byte = i;
 
     for (j = 0; j < sizeof(interesting_16) / 2; ++j) {
 
-      stage_cur_val = interesting_16[j];
+      afl->stage_cur_val = interesting_16[j];
 
       /* Skip if this could be a product of a bitflip, arithmetics,
          or single-byte interesting value insertion. */
@@ -1242,31 +1243,31 @@ skip_arith:
           !could_be_arith(orig, (u16)interesting_16[j], 2) &&
           !could_be_interest(orig, (u16)interesting_16[j], 2, 0)) {
 
-        stage_val_type = STAGE_VAL_LE;
+        afl->stage_val_type = STAGE_VAL_LE;
 
         *(u16*)(out_buf + i) = interesting_16[j];
 
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
+        if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+        ++afl->stage_cur;
 
       } else
 
-        --stage_max;
+        --afl->stage_max;
 
       if ((u16)interesting_16[j] != SWAP16(interesting_16[j]) &&
           !could_be_bitflip(orig ^ SWAP16(interesting_16[j])) &&
           !could_be_arith(orig, SWAP16(interesting_16[j]), 2) &&
           !could_be_interest(orig, SWAP16(interesting_16[j]), 2, 1)) {
 
-        stage_val_type = STAGE_VAL_BE;
+        afl->stage_val_type = STAGE_VAL_BE;
 
         *(u16*)(out_buf + i) = SWAP16(interesting_16[j]);
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
+        if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+        ++afl->stage_cur;
 
       } else
 
-        --stage_max;
+        --afl->stage_max;
 
     }
 
@@ -1274,19 +1275,19 @@ skip_arith:
 
   }
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_INTEREST16] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_INTEREST16] += stage_max;
+  afl->stage_finds[STAGE_INTEREST16] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_INTEREST16] += afl->stage_max;
 
   if (len < 4) goto skip_interest;
 
   /* Setting 32-bit integers, both endians. */
 
-  stage_name = "interest 32/8";
-  stage_short = "int32";
-  stage_cur = 0;
-  stage_max = 2 * (len - 3) * (sizeof(interesting_32) >> 2);
+  afl->stage_name = "interest 32/8";
+  afl->stage_short = "int32";
+  afl->stage_cur = 0;
+  afl->stage_max = 2 * (len - 3) * (sizeof(interesting_32) >> 2);
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -1299,16 +1300,16 @@ skip_arith:
     if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
         !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
 
-      stage_max -= sizeof(interesting_32) >> 1;
+      afl->stage_max -= sizeof(interesting_32) >> 1;
       continue;
 
     }
 
-    stage_cur_byte = i;
+    afl->stage_cur_byte = i;
 
     for (j = 0; j < sizeof(interesting_32) / 4; ++j) {
 
-      stage_cur_val = interesting_32[j];
+      afl->stage_cur_val = interesting_32[j];
 
       /* Skip if this could be a product of a bitflip, arithmetics,
          or word interesting value insertion. */
@@ -1317,31 +1318,31 @@ skip_arith:
           !could_be_arith(orig, interesting_32[j], 4) &&
           !could_be_interest(orig, interesting_32[j], 4, 0)) {
 
-        stage_val_type = STAGE_VAL_LE;
+        afl->stage_val_type = STAGE_VAL_LE;
 
         *(u32*)(out_buf + i) = interesting_32[j];
 
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
+        if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+        ++afl->stage_cur;
 
       } else
 
-        --stage_max;
+        --afl->stage_max;
 
       if ((u32)interesting_32[j] != SWAP32(interesting_32[j]) &&
           !could_be_bitflip(orig ^ SWAP32(interesting_32[j])) &&
           !could_be_arith(orig, SWAP32(interesting_32[j]), 4) &&
           !could_be_interest(orig, SWAP32(interesting_32[j]), 4, 1)) {
 
-        stage_val_type = STAGE_VAL_BE;
+        afl->stage_val_type = STAGE_VAL_BE;
 
         *(u32*)(out_buf + i) = SWAP32(interesting_32[j]);
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
+        if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+        ++afl->stage_cur;
 
       } else
 
-        --stage_max;
+        --afl->stage_max;
 
     }
 
@@ -1349,10 +1350,10 @@ skip_arith:
 
   }
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_INTEREST32] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_INTEREST32] += stage_max;
+  afl->stage_finds[STAGE_INTEREST32] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_INTEREST32] += afl->stage_max;
 
 skip_interest:
 
@@ -1360,16 +1361,16 @@ skip_interest:
    * DICTIONARY STUFF *
    ********************/
 
-  if (!extras_cnt) goto skip_user_extras;
+  if (!afl->extras_cnt) goto skip_user_extras;
 
   /* Overwrite with user-supplied extras. */
 
-  stage_name = "user extras (over)";
-  stage_short = "ext_UO";
-  stage_cur = 0;
-  stage_max = extras_cnt * len;
+  afl->stage_name = "user extras (over)";
+  afl->stage_short = "ext_UO";
+  afl->stage_cur = 0;
+  afl->stage_max = afl->extras_cnt * len;
 
-  stage_val_type = STAGE_VAL_NONE;
+  afl->stage_val_type = STAGE_VAL_NONE;
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -1377,36 +1378,36 @@ skip_interest:
 
     u32 last_len = 0;
 
-    stage_cur_byte = i;
+    afl->stage_cur_byte = i;
 
     /* Extras are sorted by size, from smallest to largest. This means
        that we don't have to worry about restoring the buffer in
        between writes at a particular offset determined by the outer
        loop. */
 
-    for (j = 0; j < extras_cnt; ++j) {
+    for (j = 0; j < afl->extras_cnt; ++j) {
 
-      /* Skip extras probabilistically if extras_cnt > MAX_DET_EXTRAS. Also
+      /* Skip extras probabilistically if afl->extras_cnt > MAX_DET_EXTRAS. Also
          skip them if there's no room to insert the payload, if the token
          is redundant, or if its entire span has no bytes set in the effector
          map. */
 
-      if ((extras_cnt > MAX_DET_EXTRAS && UR(extras_cnt) >= MAX_DET_EXTRAS) ||
-          extras[j].len > len - i ||
-          !memcmp(extras[j].data, out_buf + i, extras[j].len) ||
-          !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, extras[j].len))) {
+      if ((afl->extras_cnt > MAX_DET_EXTRAS && UR(afl, afl->extras_cnt) >= MAX_DET_EXTRAS) ||
+          afl->extras[j].len > len - i ||
+          !memcmp(afl->extras[j].data, out_buf + i, afl->extras[j].len) ||
+          !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, afl->extras[j].len))) {
 
-        --stage_max;
+        --afl->stage_max;
         continue;
 
       }
 
-      last_len = extras[j].len;
-      memcpy(out_buf + i, extras[j].data, last_len);
+      last_len = afl->extras[j].len;
+      memcpy(out_buf + i, afl->extras[j].data, last_len);
 
-      if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+      if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
 
-      ++stage_cur;
+      ++afl->stage_cur;
 
     }
 
@@ -1415,17 +1416,17 @@ skip_interest:
 
   }
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_EXTRAS_UO] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_EXTRAS_UO] += stage_max;
+  afl->stage_finds[STAGE_EXTRAS_UO] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_EXTRAS_UO] += afl->stage_max;
 
   /* Insertion of user-supplied extras. */
 
-  stage_name = "user extras (insert)";
-  stage_short = "ext_UI";
-  stage_cur = 0;
-  stage_max = extras_cnt * (len + 1);
+  afl->stage_name = "user extras (insert)";
+  afl->stage_short = "ext_UI";
+  afl->stage_cur = 0;
+  afl->stage_max = afl->extras_cnt * (len + 1);
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -1433,31 +1434,31 @@ skip_interest:
 
   for (i = 0; i <= len; ++i) {
 
-    stage_cur_byte = i;
+    afl->stage_cur_byte = i;
 
-    for (j = 0; j < extras_cnt; ++j) {
+    for (j = 0; j < afl->extras_cnt; ++j) {
 
-      if (len + extras[j].len > MAX_FILE) {
+      if (len + afl->extras[j].len > MAX_FILE) {
 
-        --stage_max;
+        --afl->stage_max;
         continue;
 
       }
 
       /* Insert token */
-      memcpy(ex_tmp + i, extras[j].data, extras[j].len);
+      memcpy(ex_tmp + i, afl->extras[j].data, afl->extras[j].len);
 
       /* Copy tail */
-      memcpy(ex_tmp + i + extras[j].len, out_buf + i, len - i);
+      memcpy(ex_tmp + i + afl->extras[j].len, out_buf + i, len - i);
 
-      if (common_fuzz_stuff(argv, ex_tmp, len + extras[j].len)) {
+      if (common_fuzz_stuff(afl, ex_tmp, len + afl->extras[j].len)) {
 
         ck_free(ex_tmp);
         goto abandon_entry;
 
       }
 
-      ++stage_cur;
+      ++afl->stage_cur;
 
     }
 
@@ -1468,21 +1469,21 @@ skip_interest:
 
   ck_free(ex_tmp);
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_EXTRAS_UI] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_EXTRAS_UI] += stage_max;
+  afl->stage_finds[STAGE_EXTRAS_UI] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_EXTRAS_UI] += afl->stage_max;
 
 skip_user_extras:
 
-  if (!a_extras_cnt) goto skip_extras;
+  if (!afl->a_extras_cnt) goto skip_extras;
 
-  stage_name = "auto extras (over)";
-  stage_short = "ext_AO";
-  stage_cur = 0;
-  stage_max = MIN(a_extras_cnt, USE_AUTO_EXTRAS) * len;
+  afl->stage_name = "auto extras (over)";
+  afl->stage_short = "ext_AO";
+  afl->stage_cur = 0;
+  afl->stage_max = MIN(afl->a_extras_cnt, USE_AUTO_EXTRAS) * len;
 
-  stage_val_type = STAGE_VAL_NONE;
+  afl->stage_val_type = STAGE_VAL_NONE;
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -1490,28 +1491,28 @@ skip_user_extras:
 
     u32 last_len = 0;
 
-    stage_cur_byte = i;
+    afl->stage_cur_byte = i;
 
-    for (j = 0; j < MIN(a_extras_cnt, USE_AUTO_EXTRAS); ++j) {
+    for (j = 0; j < MIN(afl->a_extras_cnt, USE_AUTO_EXTRAS); ++j) {
 
       /* See the comment in the earlier code; extras are sorted by size. */
 
-      if (a_extras[j].len > len - i ||
-          !memcmp(a_extras[j].data, out_buf + i, a_extras[j].len) ||
+      if (afl->a_extras[j].len > len - i ||
+          !memcmp(afl->a_extras[j].data, out_buf + i, afl->a_extras[j].len) ||
           !memchr(eff_map + EFF_APOS(i), 1,
-                  EFF_SPAN_ALEN(i, a_extras[j].len))) {
+                  EFF_SPAN_ALEN(i, afl->a_extras[j].len))) {
 
-        --stage_max;
+        --afl->stage_max;
         continue;
 
       }
 
-      last_len = a_extras[j].len;
-      memcpy(out_buf + i, a_extras[j].data, last_len);
+      last_len = afl->a_extras[j].len;
+      memcpy(out_buf + i, afl->a_extras[j].data, last_len);
 
-      if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+      if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
 
-      ++stage_cur;
+      ++afl->stage_cur;
 
     }
 
@@ -1520,10 +1521,10 @@ skip_user_extras:
 
   }
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_EXTRAS_AO] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_EXTRAS_AO] += stage_max;
+  afl->stage_finds[STAGE_EXTRAS_AO] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_EXTRAS_AO] += afl->stage_max;
 
 skip_extras:
 
@@ -1531,28 +1532,28 @@ skip_extras:
      we're properly done with deterministic steps and can mark it as such
      in the .state/ directory. */
 
-  if (!queue_cur->passed_det) mark_as_det_done(queue_cur);
+  if (!afl->queue_cur->passed_det) mark_as_det_done(afl, afl->queue_cur);
 
 custom_mutator_stage:
   /*******************
    * CUSTOM MUTATORS *
    *******************/
 
-  if (!mutator) goto havoc_stage;
-  if (!mutator->afl_custom_fuzz) goto havoc_stage;
+  if (!afl->mutator) goto havoc_stage;
+  if (!afl->mutator->afl_custom_fuzz) goto havoc_stage;
 
-  stage_name = "custom mutator";
-  stage_short = "custom";
-  stage_max = HAVOC_CYCLES * perf_score / havoc_div / 100;
-  stage_val_type = STAGE_VAL_NONE;
+  afl->stage_name = "custom mutator";
+  afl->stage_short = "custom";
+  afl->stage_max = HAVOC_CYCLES * perf_score / afl->havoc_div / 100;
+  afl->stage_val_type = STAGE_VAL_NONE;
 
-  if (stage_max < HAVOC_MIN) stage_max = HAVOC_MIN;
+  if (afl->stage_max < HAVOC_MIN) afl->stage_max = HAVOC_MIN;
 
   const u32 max_seed_size = MAX_FILE;
 
-  orig_hit_cnt = queued_paths + unique_crashes;
-
-  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+  orig_hit_cnt = afl->queued_paths + afl->unique_crashes;
+  
+  for (afl->stage_cur = 0; afl->stage_cur < afl->stage_max; ++afl->stage_cur) {
 
     struct queue_entry* target;
     u32                 tid;
@@ -1562,11 +1563,11 @@ custom_mutator_stage:
     /* Pick a random other queue entry for passing to external API */
     do {
 
-      tid = UR(queued_paths);
+      tid = UR(afl, afl->queued_paths);
 
-    } while (tid == current_entry && queued_paths > 1);
+    } while (tid == afl->current_entry && afl->queued_paths > 1);
 
-    target = queue;
+    target = afl->queue;
 
     while (tid >= 100) {
 
@@ -1580,11 +1581,11 @@ custom_mutator_stage:
 
     /* Make sure that the target has a reasonable length. */
 
-    while (target && (target->len < 2 || target == queue_cur) &&
-           queued_paths > 1) {
+    while (target && (target->len < 2 || target == afl->queue_cur) &&
+           afl->queued_paths > 1) {
 
       target = target->next;
-      ++splicing_with;
+      ++afl->splicing_with;
 
     }
 
@@ -1596,15 +1597,16 @@ custom_mutator_stage:
     new_buf = ck_alloc_nozero(target->len);
     ck_read(fd, new_buf, target->len, target->fname);
     close(fd);
-
-    size_t mutated_size = mutator->afl_custom_fuzz(&out_buf, len, new_buf,
-                                                   target->len, max_seed_size);
+    
+    size_t mutated_size = afl->mutator->afl_custom_fuzz(afl, &out_buf, len,
+                                                   new_buf, target->len,
+                                                   max_seed_size);
 
     ck_free(new_buf);
 
     if (mutated_size > 0) {
 
-      if (common_fuzz_stuff(argv, out_buf, (u32)mutated_size)) {
+      if (common_fuzz_stuff(afl, out_buf, (u32)mutated_size)) {
 
         goto abandon_entry;
 
@@ -1613,16 +1615,16 @@ custom_mutator_stage:
       /* If we're finding new stuff, let's run for a bit longer, limits
          permitting. */
 
-      if (queued_paths != havoc_queued) {
+      if (afl->queued_paths != havoc_queued) {
 
-        if (perf_score <= havoc_max_mult * 100) {
+        if (perf_score <= afl->havoc_max_mult * 100) {
 
-          stage_max *= 2;
+          afl->stage_max *= 2;
           perf_score *= 2;
 
         }
 
-        havoc_queued = queued_paths;
+        havoc_queued = afl->queued_paths;
 
       }
 
@@ -1633,12 +1635,12 @@ custom_mutator_stage:
 
   }
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_CUSTOM_MUTATOR] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_CUSTOM_MUTATOR] += stage_max;
+  afl->stage_finds[STAGE_CUSTOM_MUTATOR] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_CUSTOM_MUTATOR] += afl->stage_max;
 
-  if (custom_only) {
+  if (afl->custom_only) {
 
     /* Skip other stages */
     ret_val = 0;
@@ -1652,45 +1654,43 @@ custom_mutator_stage:
 
 havoc_stage:
 
-  stage_cur_byte = -1;
+  afl->stage_cur_byte = -1;
 
   /* The havoc stage mutation code is also invoked when splicing files; if the
      splice_cycle variable is set, generate different descriptions and such. */
 
   if (!splice_cycle) {
 
-    stage_name = "havoc";
-    stage_short = "havoc";
-    stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * perf_score /
-                havoc_div / 100;
+    afl->stage_name = "havoc";
+    afl->stage_short = "havoc";
+    afl->stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * perf_score /
+                afl->havoc_div / 100;
 
   } else {
 
-    static u8 tmp[32];
-
     perf_score = orig_perf;
 
-    sprintf(tmp, "splice %u", splice_cycle);
-    stage_name = tmp;
-    stage_short = "splice";
-    stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100;
+    snprintf(afl->stage_name_buf64, 64, "splice %u", splice_cycle);
+    afl->stage_name = afl->stage_name_buf64;
+    afl->stage_short = "splice";
+    afl->stage_max = SPLICE_HAVOC * perf_score / afl->havoc_div / 100;
 
   }
 
-  if (stage_max < HAVOC_MIN) stage_max = HAVOC_MIN;
+  if (afl->stage_max < HAVOC_MIN) afl->stage_max = HAVOC_MIN;
 
   temp_len = len;
 
-  orig_hit_cnt = queued_paths + unique_crashes;
+  orig_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  havoc_queued = queued_paths;
+  havoc_queued = afl->queued_paths;
 
-  u8 stacked_custom = (mutator && mutator->afl_custom_havoc_mutation);
-  u8 stacked_custom_prob = 6;  // like one of the default mutations in havoc
+  u8 stacked_custom = (afl->mutator && afl->mutator->afl_custom_havoc_mutation);
+  u8 stacked_custom_prob = 6; // like one of the default mutations in havoc
 
-  if (stacked_custom && mutator->afl_custom_havoc_mutation_probability) {
+  if (stacked_custom && afl->mutator->afl_custom_havoc_mutation_probability) {
 
-    stacked_custom_prob = mutator->afl_custom_havoc_mutation_probability();
+    stacked_custom_prob = afl->mutator->afl_custom_havoc_mutation_probability(afl);
     if (stacked_custom_prob > 100)
       FATAL(
           "The probability returned by afl_custom_havoc_mutation_propability "
@@ -1701,35 +1701,35 @@ havoc_stage:
   /* We essentially just do several thousand runs (depending on perf_score)
      where we take the input file and make random stacked tweaks. */
 
-  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+  for (afl->stage_cur = 0; afl->stage_cur < afl->stage_max; ++afl->stage_cur) {
 
-    u32 use_stacking = 1 << (1 + UR(HAVOC_STACK_POW2));
+    u32 use_stacking = 1 << (1 + UR(afl, HAVOC_STACK_POW2));
 
-    stage_cur_val = use_stacking;
+    afl->stage_cur_val = use_stacking;
 
     for (i = 0; i < use_stacking; ++i) {
-
-      if (stacked_custom && UR(100) < stacked_custom_prob) {
-
-        temp_len =
-            mutator->afl_custom_havoc_mutation(&out_buf, temp_len, MAX_FILE);
-
+    
+      if (stacked_custom && UR(afl, 100) < stacked_custom_prob) {
+      
+        temp_len = afl->mutator->afl_custom_havoc_mutation(afl, &out_buf, temp_len,
+                                                      MAX_FILE);
+      
       }
 
-      switch (UR(15 + ((extras_cnt + a_extras_cnt) ? 2 : 0))) {
+      switch (UR(afl, 15 + ((afl->extras_cnt + afl->a_extras_cnt) ? 2 : 0))) {
 
         case 0:
 
           /* Flip a single bit somewhere. Spooky! */
 
-          FLIP_BIT(out_buf, UR(temp_len << 3));
+          FLIP_BIT(out_buf, UR(afl, temp_len << 3));
           break;
 
         case 1:
 
           /* Set byte to interesting value. */
 
-          out_buf[UR(temp_len)] = interesting_8[UR(sizeof(interesting_8))];
+          out_buf[UR(afl, temp_len)] = interesting_8[UR(afl, sizeof(interesting_8))];
           break;
 
         case 2:
@@ -1738,15 +1738,15 @@ havoc_stage:
 
           if (temp_len < 2) break;
 
-          if (UR(2)) {
+          if (UR(afl, 2)) {
 
-            *(u16*)(out_buf + UR(temp_len - 1)) =
-                interesting_16[UR(sizeof(interesting_16) >> 1)];
+            *(u16*)(out_buf + UR(afl, temp_len - 1)) =
+                interesting_16[UR(afl, sizeof(interesting_16) >> 1)];
 
           } else {
 
-            *(u16*)(out_buf + UR(temp_len - 1)) =
-                SWAP16(interesting_16[UR(sizeof(interesting_16) >> 1)]);
+            *(u16*)(out_buf + UR(afl, temp_len - 1)) =
+                SWAP16(interesting_16[UR(afl, sizeof(interesting_16) >> 1)]);
 
           }
 
@@ -1758,15 +1758,15 @@ havoc_stage:
 
           if (temp_len < 4) break;
 
-          if (UR(2)) {
+          if (UR(afl, 2)) {
 
-            *(u32*)(out_buf + UR(temp_len - 3)) =
-                interesting_32[UR(sizeof(interesting_32) >> 2)];
+            *(u32*)(out_buf + UR(afl, temp_len - 3)) =
+                interesting_32[UR(afl, sizeof(interesting_32) >> 2)];
 
           } else {
 
-            *(u32*)(out_buf + UR(temp_len - 3)) =
-                SWAP32(interesting_32[UR(sizeof(interesting_32) >> 2)]);
+            *(u32*)(out_buf + UR(afl, temp_len - 3)) =
+                SWAP32(interesting_32[UR(afl, sizeof(interesting_32) >> 2)]);
 
           }
 
@@ -1776,14 +1776,14 @@ havoc_stage:
 
           /* Randomly subtract from byte. */
 
-          out_buf[UR(temp_len)] -= 1 + UR(ARITH_MAX);
+          out_buf[UR(afl, temp_len)] -= 1 + UR(afl, ARITH_MAX);
           break;
 
         case 5:
 
           /* Randomly add to byte. */
 
-          out_buf[UR(temp_len)] += 1 + UR(ARITH_MAX);
+          out_buf[UR(afl, temp_len)] += 1 + UR(afl, ARITH_MAX);
           break;
 
         case 6:
@@ -1792,16 +1792,16 @@ havoc_stage:
 
           if (temp_len < 2) break;
 
-          if (UR(2)) {
+          if (UR(afl, 2)) {
 
-            u32 pos = UR(temp_len - 1);
+            u32 pos = UR(afl, temp_len - 1);
 
-            *(u16*)(out_buf + pos) -= 1 + UR(ARITH_MAX);
+            *(u16*)(out_buf + pos) -= 1 + UR(afl, ARITH_MAX);
 
           } else {
 
-            u32 pos = UR(temp_len - 1);
-            u16 num = 1 + UR(ARITH_MAX);
+            u32 pos = UR(afl, temp_len - 1);
+            u16 num = 1 + UR(afl, ARITH_MAX);
 
             *(u16*)(out_buf + pos) =
                 SWAP16(SWAP16(*(u16*)(out_buf + pos)) - num);
@@ -1816,16 +1816,16 @@ havoc_stage:
 
           if (temp_len < 2) break;
 
-          if (UR(2)) {
+          if (UR(afl, 2)) {
 
-            u32 pos = UR(temp_len - 1);
+            u32 pos = UR(afl, temp_len - 1);
 
-            *(u16*)(out_buf + pos) += 1 + UR(ARITH_MAX);
+            *(u16*)(out_buf + pos) += 1 + UR(afl, ARITH_MAX);
 
           } else {
 
-            u32 pos = UR(temp_len - 1);
-            u16 num = 1 + UR(ARITH_MAX);
+            u32 pos = UR(afl, temp_len - 1);
+            u16 num = 1 + UR(afl, ARITH_MAX);
 
             *(u16*)(out_buf + pos) =
                 SWAP16(SWAP16(*(u16*)(out_buf + pos)) + num);
@@ -1840,16 +1840,16 @@ havoc_stage:
 
           if (temp_len < 4) break;
 
-          if (UR(2)) {
+          if (UR(afl, 2)) {
 
-            u32 pos = UR(temp_len - 3);
+            u32 pos = UR(afl, temp_len - 3);
 
-            *(u32*)(out_buf + pos) -= 1 + UR(ARITH_MAX);
+            *(u32*)(out_buf + pos) -= 1 + UR(afl, ARITH_MAX);
 
           } else {
 
-            u32 pos = UR(temp_len - 3);
-            u32 num = 1 + UR(ARITH_MAX);
+            u32 pos = UR(afl, temp_len - 3);
+            u32 num = 1 + UR(afl, ARITH_MAX);
 
             *(u32*)(out_buf + pos) =
                 SWAP32(SWAP32(*(u32*)(out_buf + pos)) - num);
@@ -1864,16 +1864,16 @@ havoc_stage:
 
           if (temp_len < 4) break;
 
-          if (UR(2)) {
+          if (UR(afl, 2)) {
 
-            u32 pos = UR(temp_len - 3);
+            u32 pos = UR(afl, temp_len - 3);
 
-            *(u32*)(out_buf + pos) += 1 + UR(ARITH_MAX);
+            *(u32*)(out_buf + pos) += 1 + UR(afl, ARITH_MAX);
 
           } else {
 
-            u32 pos = UR(temp_len - 3);
-            u32 num = 1 + UR(ARITH_MAX);
+            u32 pos = UR(afl, temp_len - 3);
+            u32 num = 1 + UR(afl, ARITH_MAX);
 
             *(u32*)(out_buf + pos) =
                 SWAP32(SWAP32(*(u32*)(out_buf + pos)) + num);
@@ -1888,7 +1888,7 @@ havoc_stage:
              why not. We use XOR with 1-255 to eliminate the
              possibility of a no-op. */
 
-          out_buf[UR(temp_len)] ^= 1 + UR(255);
+          out_buf[UR(afl, temp_len)] ^= 1 + UR(afl, 255);
           break;
 
         case 11 ... 12: {
@@ -1903,9 +1903,9 @@ havoc_stage:
 
           /* Don't delete too much. */
 
-          del_len = choose_block_len(temp_len - 1);
+          del_len = choose_block_len(afl, temp_len - 1);
 
-          del_from = UR(temp_len - del_len + 1);
+          del_from = UR(afl, temp_len - del_len + 1);
 
           memmove(out_buf + del_from, out_buf + del_from + del_len,
                   temp_len - del_from - del_len);
@@ -1922,23 +1922,23 @@ havoc_stage:
 
             /* Clone bytes (75%) or insert a block of constant bytes (25%). */
 
-            u8  actually_clone = UR(4);
+            u8  actually_clone = UR(afl, 4);
             u32 clone_from, clone_to, clone_len;
             u8* new_buf;
 
             if (actually_clone) {
 
-              clone_len = choose_block_len(temp_len);
-              clone_from = UR(temp_len - clone_len + 1);
+              clone_len = choose_block_len(afl, temp_len);
+              clone_from = UR(afl, temp_len - clone_len + 1);
 
             } else {
 
-              clone_len = choose_block_len(HAVOC_BLK_XL);
+              clone_len = choose_block_len(afl, HAVOC_BLK_XL);
               clone_from = 0;
 
             }
 
-            clone_to = UR(temp_len);
+            clone_to = UR(afl, temp_len);
 
             new_buf = ck_alloc_nozero(temp_len + clone_len);
 
@@ -1952,7 +1952,7 @@ havoc_stage:
               memcpy(new_buf + clone_to, out_buf + clone_from, clone_len);
             else
               memset(new_buf + clone_to,
-                     UR(2) ? UR(256) : out_buf[UR(temp_len)], clone_len);
+                     UR(afl, 2) ? UR(afl, 256) : out_buf[UR(afl, temp_len)], clone_len);
 
             /* Tail */
             memcpy(new_buf + clone_to + clone_len, out_buf + clone_to,
@@ -1975,19 +1975,19 @@ havoc_stage:
 
           if (temp_len < 2) break;
 
-          copy_len = choose_block_len(temp_len - 1);
+          copy_len = choose_block_len(afl, temp_len - 1);
 
-          copy_from = UR(temp_len - copy_len + 1);
-          copy_to = UR(temp_len - copy_len + 1);
+          copy_from = UR(afl, temp_len - copy_len + 1);
+          copy_to = UR(afl, temp_len - copy_len + 1);
 
-          if (UR(4)) {
+          if (UR(afl, 4)) {
 
             if (copy_from != copy_to)
               memmove(out_buf + copy_to, out_buf + copy_from, copy_len);
 
           } else
 
-            memset(out_buf + copy_to, UR(2) ? UR(256) : out_buf[UR(temp_len)],
+            memset(out_buf + copy_to, UR(afl, 2) ? UR(afl, 256) : out_buf[UR(afl, temp_len)],
                    copy_len);
 
           break;
@@ -2001,32 +2001,32 @@ havoc_stage:
 
           /* Overwrite bytes with an extra. */
 
-          if (!extras_cnt || (a_extras_cnt && UR(2))) {
+          if (!afl->extras_cnt || (afl->a_extras_cnt && UR(afl, 2))) {
 
             /* No user-specified extras or odds in our favor. Let's use an
                auto-detected one. */
 
-            u32 use_extra = UR(a_extras_cnt);
-            u32 extra_len = a_extras[use_extra].len;
+            u32 use_extra = UR(afl, afl->a_extras_cnt);
+            u32 extra_len = afl->a_extras[use_extra].len;
             u32 insert_at;
 
             if (extra_len > temp_len) break;
 
-            insert_at = UR(temp_len - extra_len + 1);
-            memcpy(out_buf + insert_at, a_extras[use_extra].data, extra_len);
+            insert_at = UR(afl, temp_len - extra_len + 1);
+            memcpy(out_buf + insert_at, afl->a_extras[use_extra].data, extra_len);
 
           } else {
 
             /* No auto extras or odds in our favor. Use the dictionary. */
 
-            u32 use_extra = UR(extras_cnt);
-            u32 extra_len = extras[use_extra].len;
+            u32 use_extra = UR(afl, afl->extras_cnt);
+            u32 extra_len =afl->extras[use_extra].len;
             u32 insert_at;
 
             if (extra_len > temp_len) break;
 
-            insert_at = UR(temp_len - extra_len + 1);
-            memcpy(out_buf + insert_at, extras[use_extra].data, extra_len);
+            insert_at = UR(afl, temp_len - extra_len + 1);
+            memcpy(out_buf + insert_at,afl->extras[use_extra].data, extra_len);
 
           }
 
@@ -2036,16 +2036,16 @@ havoc_stage:
 
         case 16: {
 
-          u32 use_extra, extra_len, insert_at = UR(temp_len + 1);
+          u32 use_extra, extra_len, insert_at = UR(afl, temp_len + 1);
           u8* new_buf;
 
           /* Insert an extra. Do the same dice-rolling stuff as for the
              previous case. */
 
-          if (!extras_cnt || (a_extras_cnt && UR(2))) {
+          if (!afl->extras_cnt || (afl->a_extras_cnt && UR(afl, 2))) {
 
-            use_extra = UR(a_extras_cnt);
-            extra_len = a_extras[use_extra].len;
+            use_extra = UR(afl, afl->a_extras_cnt);
+            extra_len = afl->a_extras[use_extra].len;
 
             if (temp_len + extra_len >= MAX_FILE) break;
 
@@ -2055,12 +2055,12 @@ havoc_stage:
             memcpy(new_buf, out_buf, insert_at);
 
             /* Inserted part */
-            memcpy(new_buf + insert_at, a_extras[use_extra].data, extra_len);
+            memcpy(new_buf + insert_at, afl->a_extras[use_extra].data, extra_len);
 
           } else {
 
-            use_extra = UR(extras_cnt);
-            extra_len = extras[use_extra].len;
+            use_extra = UR(afl, afl->extras_cnt);
+            extra_len =afl->extras[use_extra].len;
 
             if (temp_len + extra_len >= MAX_FILE) break;
 
@@ -2070,7 +2070,7 @@ havoc_stage:
             memcpy(new_buf, out_buf, insert_at);
 
             /* Inserted part */
-            memcpy(new_buf + insert_at, extras[use_extra].data, extra_len);
+            memcpy(new_buf + insert_at,afl->extras[use_extra].data, extra_len);
 
           }
 
@@ -2090,7 +2090,7 @@ havoc_stage:
 
     }
 
-    if (common_fuzz_stuff(argv, out_buf, temp_len)) goto abandon_entry;
+    if (common_fuzz_stuff(afl, out_buf, temp_len)) goto abandon_entry;
 
     /* out_buf might have been mangled a bit, so let's restore it to its
        original size and shape. */
@@ -2102,32 +2102,32 @@ havoc_stage:
     /* If we're finding new stuff, let's run for a bit longer, limits
        permitting. */
 
-    if (queued_paths != havoc_queued) {
+    if (afl->queued_paths != havoc_queued) {
 
-      if (perf_score <= havoc_max_mult * 100) {
+      if (perf_score <= afl->havoc_max_mult * 100) {
 
-        stage_max *= 2;
+        afl->stage_max *= 2;
         perf_score *= 2;
 
       }
 
-      havoc_queued = queued_paths;
+      havoc_queued = afl->queued_paths;
 
     }
 
   }
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
   if (!splice_cycle) {
 
-    stage_finds[STAGE_HAVOC] += new_hit_cnt - orig_hit_cnt;
-    stage_cycles[STAGE_HAVOC] += stage_max;
+    afl->stage_finds[STAGE_HAVOC] += new_hit_cnt - orig_hit_cnt;
+    afl->stage_cycles[STAGE_HAVOC] += afl->stage_max;
 
   } else {
 
-    stage_finds[STAGE_SPLICE] += new_hit_cnt - orig_hit_cnt;
-    stage_cycles[STAGE_SPLICE] += stage_max;
+    afl->stage_finds[STAGE_SPLICE] += new_hit_cnt - orig_hit_cnt;
+    afl->stage_cycles[STAGE_SPLICE] += afl->stage_max;
 
   }
 
@@ -2144,8 +2144,8 @@ havoc_stage:
 
 retry_splicing:
 
-  if (use_splicing && splice_cycle++ < SPLICE_CYCLES && queued_paths > 1 &&
-      queue_cur->len > 1) {
+  if (afl->use_splicing && splice_cycle++ < SPLICE_CYCLES && afl->queued_paths > 1 &&
+      afl->queue_cur->len > 1) {
 
     struct queue_entry* target;
     u32                 tid, split_at;
@@ -2159,7 +2159,7 @@ retry_splicing:
 
       ck_free(in_buf);
       in_buf = orig_in;
-      len = queue_cur->len;
+      len = afl->queue_cur->len;
 
     }
 
@@ -2167,12 +2167,12 @@ retry_splicing:
 
     do {
 
-      tid = UR(queued_paths);
+      tid = UR(afl, afl->queued_paths);
 
-    } while (tid == current_entry);
+    } while (tid == afl->current_entry);
 
-    splicing_with = tid;
-    target = queue;
+    afl->splicing_with = tid;
+    target = afl->queue;
 
     while (tid >= 100) {
 
@@ -2186,10 +2186,10 @@ retry_splicing:
 
     /* Make sure that the target has a reasonable length. */
 
-    while (target && (target->len < 2 || target == queue_cur)) {
+    while (target && (target->len < 2 || target == afl->queue_cur)) {
 
       target = target->next;
-      ++splicing_with;
+      ++afl->splicing_with;
 
     }
 
@@ -2222,7 +2222,7 @@ retry_splicing:
 
     /* Split somewhere between the first and last differing byte. */
 
-    split_at = f_diff + UR(l_diff - f_diff);
+    split_at = f_diff + UR(afl, l_diff - f_diff);
 
     /* Do the thing. */
 
@@ -2248,28 +2248,28 @@ retry_splicing:
 
 radamsa_stage:
 
-  if (!use_radamsa || !radamsa_mutate_ptr) goto abandon_entry;
+  if (!afl->use_radamsa || !afl->radamsa_mutate_ptr) goto abandon_entry;
 
-  stage_name = "radamsa";
-  stage_short = "radamsa";
-  stage_max = (HAVOC_CYCLES * perf_score / havoc_div / 100) << use_radamsa;
+  afl->stage_name = "radamsa";
+  afl->stage_short = "radamsa";
+  afl->stage_max = (HAVOC_CYCLES * perf_score / afl->havoc_div / 100) << afl->use_radamsa;
 
-  if (stage_max < HAVOC_MIN) stage_max = HAVOC_MIN;
+  if (afl->stage_max < HAVOC_MIN) afl->stage_max = HAVOC_MIN;
 
-  orig_hit_cnt = queued_paths + unique_crashes;
+  orig_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
   /* Read the additional testcase into a new buffer. */
   u8* save_buf = ck_alloc_nozero(len);
   memcpy(save_buf, out_buf, len);
 
-  u32 max_len = len + choose_block_len(HAVOC_BLK_XL);
+  u32 max_len = len + choose_block_len(afl, HAVOC_BLK_XL);
   u8* new_buf = ck_alloc_nozero(max_len);
   u8* tmp_buf;
 
-  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+  for (afl->stage_cur = 0; afl->stage_cur < afl->stage_max; ++afl->stage_cur) {
 
     u32 new_len =
-        radamsa_mutate_ptr(save_buf, len, new_buf, max_len, get_rand_seed());
+        afl->radamsa_mutate_ptr(save_buf, len, new_buf, max_len, get_rand_seed(afl));
 
     if (new_len) {
 
@@ -2283,7 +2283,7 @@ radamsa_stage:
 
     }
 
-    if (common_fuzz_stuff(argv, tmp_buf, temp_len)) {
+    if (common_fuzz_stuff(afl, tmp_buf, temp_len)) {
 
       ck_free(save_buf);
       ck_free(new_buf);
@@ -2296,10 +2296,10 @@ radamsa_stage:
   ck_free(save_buf);
   ck_free(new_buf);
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_RADAMSA] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_RADAMSA] += stage_max;
+  afl->stage_finds[STAGE_RADAMSA] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_RADAMSA] += afl->stage_max;
 
   ret_val = 0;
   goto abandon_entry;
@@ -2307,23 +2307,23 @@ radamsa_stage:
 /* we are through with this queue entry - for this iteration */
 abandon_entry:
 
-  splicing_with = -1;
+  afl->splicing_with = -1;
 
-  /* Update pending_not_fuzzed count if we made it through the calibration
+  /* Update afl->pending_not_fuzzed count if we made it through the calibration
      cycle and have not seen this entry before. */
 
-  if (!stop_soon && !queue_cur->cal_failed &&
-      (queue_cur->was_fuzzed == 0 || queue_cur->fuzz_level == 0)) {
+  if (!afl->stop_soon && !afl->queue_cur->cal_failed &&
+      (afl->queue_cur->was_fuzzed == 0 || afl->queue_cur->fuzz_level == 0)) {
 
-    --pending_not_fuzzed;
-    queue_cur->was_fuzzed = 1;
-    if (queue_cur->favored) --pending_favored;
+    --afl->pending_not_fuzzed;
+    afl->queue_cur->was_fuzzed = 1;
+    if (afl->queue_cur->favored) --afl->pending_favored;
 
   }
 
-  ++queue_cur->fuzz_level;
+  ++afl->queue_cur->fuzz_level;
 
-  munmap(orig_in, queue_cur->len);
+  munmap(orig_in, afl->queue_cur->len);
 
   if (in_buf != orig_in) ck_free(in_buf);
   ck_free(out_buf);
@@ -2335,55 +2335,14 @@ abandon_entry:
 
 }
 
-struct MOpt_globals_t {
-
-  u64*      finds;
-  u64*      finds_v2;
-  u64*      cycles;
-  u64*      cycles_v2;
-  u64*      cycles_v3;
-  u32       is_pilot_mode;
-  u64*      pTime;
-  const u64 period;
-  char*     havoc_stagename;
-  char*     splice_stageformat;
-  char*     havoc_stagenameshort;
-  char*     splice_stagenameshort;
-
-} MOpt_globals_pilot = {stage_finds_puppet[0],
-
-                        stage_finds_puppet_v2[0],
-                        stage_cycles_puppet[0],
-                        stage_cycles_puppet_v2[0],
-                        stage_cycles_puppet_v3[0],
-                        1,
-                        &tmp_pilot_time,
-                        period_pilot,
-                        "MOpt-havoc",
-                        "MOpt-splice %u",
-                        "MOpt_havoc",
-                        "MOpt_splice"},
-  MOpt_globals_core = {core_operator_finds_puppet,
-                       core_operator_finds_puppet_v2,
-                       core_operator_cycles_puppet,
-                       core_operator_cycles_puppet_v2,
-                       core_operator_cycles_puppet_v3,
-                       0,
-                       &tmp_core_time,
-                       period_core,
-                       "MOpt-core-havoc",
-                       "MOpt-core-splice %u",
-                       "MOpt_core_havoc",
-                       "MOpt_core_splice"};
-
 /* MOpt mode */
-u8 common_fuzzing(char** argv, struct MOpt_globals_t MOpt_globals) {
+u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) {
 
   if (!MOpt_globals.is_pilot_mode) {
 
     if (swarm_num == 1) {
 
-      key_module = 2;
+      afl->key_module = 2;
       return 0;
 
     }
@@ -2405,33 +2364,33 @@ u8 common_fuzzing(char** argv, struct MOpt_globals_t MOpt_globals) {
   /* In IGNORE_FINDS mode, skip any entries that weren't in the
      initial data set. */
 
-  if (queue_cur->depth > 1) return 1;
+  if (afl->queue_cur->depth > 1) return 1;
 
 #else
 
-  if (pending_favored) {
+  if (afl->pending_favored) {
 
     /* If we have any favored, non-fuzzed new arrivals in the queue,
        possibly skip to them at the expense of already-fuzzed or non-favored
        cases. */
 
-    if ((queue_cur->was_fuzzed || !queue_cur->favored) &&
-        UR(100) < SKIP_TO_NEW_PROB)
+    if ((afl->queue_cur->was_fuzzed || !afl->queue_cur->favored) &&
+        UR(afl, 100) < SKIP_TO_NEW_PROB)
       return 1;
 
-  } else if (!dumb_mode && !queue_cur->favored && queued_paths > 10) {
+  } else if (!afl->dumb_mode && !afl->queue_cur->favored && afl->queued_paths > 10) {
 
     /* Otherwise, still possibly skip non-favored cases, albeit less often.
        The odds of skipping stuff are higher for already-fuzzed inputs and
        lower for never-fuzzed entries. */
 
-    if (queue_cycle > 1 && !queue_cur->was_fuzzed) {
+    if (afl->queue_cycle > 1 && !afl->queue_cur->was_fuzzed) {
 
-      if (UR(100) < SKIP_NFAV_NEW_PROB) return 1;
+      if (UR(afl, 100) < SKIP_NFAV_NEW_PROB) return 1;
 
     } else {
 
-      if (UR(100) < SKIP_NFAV_OLD_PROB) return 1;
+      if (UR(afl, 100) < SKIP_NFAV_OLD_PROB) return 1;
 
     }
 
@@ -2439,25 +2398,25 @@ u8 common_fuzzing(char** argv, struct MOpt_globals_t MOpt_globals) {
 
 #endif                                                     /* ^IGNORE_FINDS */
 
-  if (not_on_tty) {
+  if (afl->not_on_tty) {
 
     ACTF("Fuzzing test case #%u (%u total, %llu uniq crashes found)...",
-         current_entry, queued_paths, unique_crashes);
+         afl->current_entry, afl->queued_paths, afl->unique_crashes);
     fflush(stdout);
 
   }
 
   /* Map the test case into memory. */
 
-  fd = open(queue_cur->fname, O_RDONLY);
+  fd = open(afl->queue_cur->fname, O_RDONLY);
 
-  if (fd < 0) PFATAL("Unable to open '%s'", queue_cur->fname);
+  if (fd < 0) PFATAL("Unable to open '%s'", afl->queue_cur->fname);
 
-  len = queue_cur->len;
+  len = afl->queue_cur->len;
 
   orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
 
-  if (orig_in == MAP_FAILED) PFATAL("Unable to mmap '%s'", queue_cur->fname);
+  if (orig_in == MAP_FAILED) PFATAL("Unable to mmap '%s'", afl->queue_cur->fname);
 
   close(fd);
 
@@ -2467,29 +2426,29 @@ u8 common_fuzzing(char** argv, struct MOpt_globals_t MOpt_globals) {
 
   out_buf = ck_alloc_nozero(len);
 
-  subseq_tmouts = 0;
+  afl->subseq_tmouts = 0;
 
-  cur_depth = queue_cur->depth;
+  afl->cur_depth = afl->queue_cur->depth;
 
   /*******************************************
    * CALIBRATION (only if failed earlier on) *
    *******************************************/
 
-  if (queue_cur->cal_failed) {
+  if (afl->queue_cur->cal_failed) {
 
     u8 res = FAULT_TMOUT;
 
-    if (queue_cur->cal_failed < CAL_CHANCES) {
+    if (afl->queue_cur->cal_failed < CAL_CHANCES) {
 
-      res = calibrate_case(argv, queue_cur, in_buf, queue_cycle - 1, 0);
+      res = calibrate_case(afl, afl->queue_cur, in_buf, afl->queue_cycle - 1, 0);
 
       if (res == FAULT_ERROR) FATAL("Unable to execute target application");
 
     }
 
-    if (stop_soon || res != crash_mode) {
+    if (afl->stop_soon || res != afl->crash_mode) {
 
-      ++cur_skipped_paths;
+      ++afl->cur_skipped_paths;
       goto abandon_entry;
 
     }
@@ -2500,24 +2459,24 @@ u8 common_fuzzing(char** argv, struct MOpt_globals_t MOpt_globals) {
    * TRIMMING *
    ************/
 
-  if (!dumb_mode && !queue_cur->trim_done) {
+  if (!afl->dumb_mode && !afl->queue_cur->trim_done) {
 
-    u8 res = trim_case(argv, queue_cur, in_buf);
+    u8 res = trim_case(afl, afl->queue_cur, in_buf);
 
     if (res == FAULT_ERROR) FATAL("Unable to execute target application");
 
-    if (stop_soon) {
+    if (afl->stop_soon) {
 
-      ++cur_skipped_paths;
+      ++afl->cur_skipped_paths;
       goto abandon_entry;
 
     }
 
     /* Don't retry trimming, even if it failed. */
 
-    queue_cur->trim_done = 1;
+    afl->queue_cur->trim_done = 1;
 
-    len = queue_cur->len;
+    len = afl->queue_cur->len;
 
   }
 
@@ -2527,28 +2486,28 @@ u8 common_fuzzing(char** argv, struct MOpt_globals_t MOpt_globals) {
    * PERFORMANCE SCORE *
    *********************/
 
-  orig_perf = perf_score = calculate_score(queue_cur);
+  orig_perf = perf_score = calculate_score(afl, afl->queue_cur);
 
   /* Skip right away if -d is given, if we have done deterministic fuzzing on
      this entry ourselves (was_fuzzed), or if it has gone through deterministic
      testing in earlier, resumed runs (passed_det). */
 
-  if (skip_deterministic || queue_cur->was_fuzzed || queue_cur->passed_det)
+  if (afl->skip_deterministic || afl->queue_cur->was_fuzzed || afl->queue_cur->passed_det)
     goto havoc_stage;
 
   /* Skip deterministic fuzzing if exec path checksum puts this out of scope
      for this master instance. */
 
-  if (master_max && (queue_cur->exec_cksum % master_max) != master_id - 1)
+  if (afl->master_max && (afl->queue_cur->exec_cksum % afl->master_max) != afl->master_id - 1)
     goto havoc_stage;
 
   cur_ms_lv = get_cur_time();
-  if (!(key_puppet == 0 && ((cur_ms_lv - last_path_time < limit_time_puppet) ||
-                            (last_crash_time != 0 &&
-                             cur_ms_lv - last_crash_time < limit_time_puppet) ||
-                            last_path_time == 0))) {
+  if (!(afl->key_puppet == 0 && ((cur_ms_lv - afl->last_path_time < afl->limit_time_puppet) ||
+                            (afl->last_crash_time != 0 &&
+                             cur_ms_lv - afl->last_crash_time < afl->limit_time_puppet) ||
+                            afl->last_path_time == 0))) {
 
-    key_puppet = 1;
+    afl->key_puppet = 1;
     goto pacemaker_fuzzing;
 
   }
@@ -2570,25 +2529,25 @@ u8 common_fuzzing(char** argv, struct MOpt_globals_t MOpt_globals) {
 
   /* Single walking bit. */
 
-  stage_short = "flip1";
-  stage_max = len << 3;
-  stage_name = "bitflip 1/1";
+  afl->stage_short = "flip1";
+  afl->stage_max = len << 3;
+  afl->stage_name = "bitflip 1/1";
 
-  stage_val_type = STAGE_VAL_NONE;
+  afl->stage_val_type = STAGE_VAL_NONE;
 
-  orig_hit_cnt = queued_paths + unique_crashes;
+  orig_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  prev_cksum = queue_cur->exec_cksum;
+  prev_cksum = afl->queue_cur->exec_cksum;
 
-  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+  for (afl->stage_cur = 0; afl->stage_cur < afl->stage_max; ++afl->stage_cur) {
 
-    stage_cur_byte = stage_cur >> 3;
+    afl->stage_cur_byte = afl->stage_cur >> 3;
 
-    FLIP_BIT(out_buf, stage_cur);
+    FLIP_BIT(out_buf, afl->stage_cur);
 
-    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+    if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
 
-    FLIP_BIT(out_buf, stage_cur);
+    FLIP_BIT(out_buf, afl->stage_cur);
 
     /* While flipping the least significant bit in every byte, pull of an extra
        trick to detect possible syntax tokens. In essence, the idea is that if
@@ -2617,20 +2576,20 @@ u8 common_fuzzing(char** argv, struct MOpt_globals_t MOpt_globals) {
 
       */
 
-    if (!dumb_mode && (stage_cur & 7) == 7) {
+    if (!afl->dumb_mode && (afl->stage_cur & 7) == 7) {
 
-      u32 cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
+      u32 cksum = hash32(afl->fsrv.trace_bits, MAP_SIZE, HASH_CONST);
 
-      if (stage_cur == stage_max - 1 && cksum == prev_cksum) {
+      if (afl->stage_cur == afl->stage_max - 1 && cksum == prev_cksum) {
 
         /* If at end of file and we are still collecting a string, grab the
            final character and force output. */
 
-        if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3];
+        if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[afl->stage_cur >> 3];
         ++a_len;
 
         if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA)
-          maybe_add_auto(a_collect, a_len);
+          maybe_add_auto(afl, a_collect, a_len);
 
       } else if (cksum != prev_cksum) {
 
@@ -2638,7 +2597,7 @@ u8 common_fuzzing(char** argv, struct MOpt_globals_t MOpt_globals) {
            worthwhile queued up, and collect that if the answer is yes. */
 
         if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA)
-          maybe_add_auto(a_collect, a_len);
+          maybe_add_auto(afl, a_collect, a_len);
 
         a_len = 0;
         prev_cksum = cksum;
@@ -2648,79 +2607,79 @@ u8 common_fuzzing(char** argv, struct MOpt_globals_t MOpt_globals) {
       /* Continue collecting string, but only if the bit flip actually made
          any difference - we don't want no-op tokens. */
 
-      if (cksum != queue_cur->exec_cksum) {
+      if (cksum != afl->queue_cur->exec_cksum) {
 
-        if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3];
+        if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[afl->stage_cur >> 3];
         ++a_len;
 
       }
 
-    }                                            /* if (stage_cur & 7) == 7 */
+    }                                            /* if (afl->stage_cur & 7) == 7 */
 
-  }                                                        /* for stage_cur */
+  }                                                        /* for afl->stage_cur */
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_FLIP1] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_FLIP1] += stage_max;
+  afl->stage_finds[STAGE_FLIP1] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_FLIP1] += afl->stage_max;
 
   /* Two walking bits. */
 
-  stage_name = "bitflip 2/1";
-  stage_short = "flip2";
-  stage_max = (len << 3) - 1;
+  afl->stage_name = "bitflip 2/1";
+  afl->stage_short = "flip2";
+  afl->stage_max = (len << 3) - 1;
 
   orig_hit_cnt = new_hit_cnt;
 
-  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+  for (afl->stage_cur = 0; afl->stage_cur < afl->stage_max; ++afl->stage_cur) {
 
-    stage_cur_byte = stage_cur >> 3;
+    afl->stage_cur_byte = afl->stage_cur >> 3;
 
-    FLIP_BIT(out_buf, stage_cur);
-    FLIP_BIT(out_buf, stage_cur + 1);
+    FLIP_BIT(out_buf, afl->stage_cur);
+    FLIP_BIT(out_buf, afl->stage_cur + 1);
 
-    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+    if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
 
-    FLIP_BIT(out_buf, stage_cur);
-    FLIP_BIT(out_buf, stage_cur + 1);
+    FLIP_BIT(out_buf, afl->stage_cur);
+    FLIP_BIT(out_buf, afl->stage_cur + 1);
 
-  }                                                        /* for stage_cur */
+  }                                                        /* for afl->stage_cur */
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_FLIP2] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_FLIP2] += stage_max;
+  afl->stage_finds[STAGE_FLIP2] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_FLIP2] += afl->stage_max;
 
   /* Four walking bits. */
 
-  stage_name = "bitflip 4/1";
-  stage_short = "flip4";
-  stage_max = (len << 3) - 3;
+  afl->stage_name = "bitflip 4/1";
+  afl->stage_short = "flip4";
+  afl->stage_max = (len << 3) - 3;
 
   orig_hit_cnt = new_hit_cnt;
 
-  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+  for (afl->stage_cur = 0; afl->stage_cur < afl->stage_max; ++afl->stage_cur) {
 
-    stage_cur_byte = stage_cur >> 3;
+    afl->stage_cur_byte = afl->stage_cur >> 3;
 
-    FLIP_BIT(out_buf, stage_cur);
-    FLIP_BIT(out_buf, stage_cur + 1);
-    FLIP_BIT(out_buf, stage_cur + 2);
-    FLIP_BIT(out_buf, stage_cur + 3);
+    FLIP_BIT(out_buf, afl->stage_cur);
+    FLIP_BIT(out_buf, afl->stage_cur + 1);
+    FLIP_BIT(out_buf, afl->stage_cur + 2);
+    FLIP_BIT(out_buf, afl->stage_cur + 3);
 
-    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+    if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
 
-    FLIP_BIT(out_buf, stage_cur);
-    FLIP_BIT(out_buf, stage_cur + 1);
-    FLIP_BIT(out_buf, stage_cur + 2);
-    FLIP_BIT(out_buf, stage_cur + 3);
+    FLIP_BIT(out_buf, afl->stage_cur);
+    FLIP_BIT(out_buf, afl->stage_cur + 1);
+    FLIP_BIT(out_buf, afl->stage_cur + 2);
+    FLIP_BIT(out_buf, afl->stage_cur + 3);
 
-  }                                                        /* for stage_cur */
+  }                                                        /* for afl->stage_cur */
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_FLIP4] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_FLIP4] += stage_max;
+  afl->stage_finds[STAGE_FLIP4] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_FLIP4] += afl->stage_max;
 
   /* Effector map setup. These macros calculate:
 
@@ -2750,49 +2709,49 @@ u8 common_fuzzing(char** argv, struct MOpt_globals_t MOpt_globals) {
 
   /* Walking byte. */
 
-  stage_name = "bitflip 8/8";
-  stage_short = "flip8";
-  stage_max = len;
+  afl->stage_name = "bitflip 8/8";
+  afl->stage_short = "flip8";
+  afl->stage_max = len;
 
   orig_hit_cnt = new_hit_cnt;
 
-  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+  for (afl->stage_cur = 0; afl->stage_cur < afl->stage_max; ++afl->stage_cur) {
 
-    stage_cur_byte = stage_cur;
+    afl->stage_cur_byte = afl->stage_cur;
 
-    out_buf[stage_cur] ^= 0xFF;
+    out_buf[afl->stage_cur] ^= 0xFF;
 
-    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+    if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
 
     /* We also use this stage to pull off a simple trick: we identify
        bytes that seem to have no effect on the current execution path
        even when fully flipped - and we skip them during more expensive
        deterministic stages, such as arithmetics or known ints. */
 
-    if (!eff_map[EFF_APOS(stage_cur)]) {
+    if (!eff_map[EFF_APOS(afl->stage_cur)]) {
 
       u32 cksum;
 
       /* If in dumb mode or if the file is very short, just flag everything
          without wasting time on checksums. */
 
-      if (!dumb_mode && len >= EFF_MIN_LEN)
-        cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
+      if (!afl->dumb_mode && len >= EFF_MIN_LEN)
+        cksum = hash32(afl->fsrv.trace_bits, MAP_SIZE, HASH_CONST);
       else
-        cksum = ~queue_cur->exec_cksum;
+        cksum = ~afl->queue_cur->exec_cksum;
 
-      if (cksum != queue_cur->exec_cksum) {
+      if (cksum != afl->queue_cur->exec_cksum) {
 
-        eff_map[EFF_APOS(stage_cur)] = 1;
+        eff_map[EFF_APOS(afl->stage_cur)] = 1;
         ++eff_cnt;
 
       }
 
     }
 
-    out_buf[stage_cur] ^= 0xFF;
+    out_buf[afl->stage_cur] ^= 0xFF;
 
-  }                                                        /* for stage_cur */
+  }                                                        /* for afl->stage_cur */
 
   /* If the effector map is more than EFF_MAX_PERC dense, just flag the
      whole thing as worth fuzzing, since we wouldn't be saving much time
@@ -2803,29 +2762,29 @@ u8 common_fuzzing(char** argv, struct MOpt_globals_t MOpt_globals) {
 
     memset(eff_map, 1, EFF_ALEN(len));
 
-    blocks_eff_select += EFF_ALEN(len);
+    afl->blocks_eff_select += EFF_ALEN(len);
 
   } else {
 
-    blocks_eff_select += eff_cnt;
+    afl->blocks_eff_select += eff_cnt;
 
   }
 
-  blocks_eff_total += EFF_ALEN(len);
+  afl->blocks_eff_total += EFF_ALEN(len);
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_FLIP8] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_FLIP8] += stage_max;
+  afl->stage_finds[STAGE_FLIP8] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_FLIP8] += afl->stage_max;
 
   /* Two walking bytes. */
 
   if (len < 2) goto skip_bitflip;
 
-  stage_name = "bitflip 16/8";
-  stage_short = "flip16";
-  stage_cur = 0;
-  stage_max = len - 1;
+  afl->stage_name = "bitflip 16/8";
+  afl->stage_short = "flip16";
+  afl->stage_cur = 0;
+  afl->stage_max = len - 1;
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -2835,35 +2794,35 @@ u8 common_fuzzing(char** argv, struct MOpt_globals_t MOpt_globals) {
 
     if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
 
-      --stage_max;
+      --afl->stage_max;
       continue;
 
     }
 
-    stage_cur_byte = i;
+    afl->stage_cur_byte = i;
 
     *(u16*)(out_buf + i) ^= 0xFFFF;
 
-    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-    ++stage_cur;
+    if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+    ++afl->stage_cur;
 
     *(u16*)(out_buf + i) ^= 0xFFFF;
 
   }                                                   /* for i = 0; i < len */
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_FLIP16] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_FLIP16] += stage_max;
+  afl->stage_finds[STAGE_FLIP16] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_FLIP16] += afl->stage_max;
 
   if (len < 4) goto skip_bitflip;
 
   /* Four walking bytes. */
 
-  stage_name = "bitflip 32/8";
-  stage_short = "flip32";
-  stage_cur = 0;
-  stage_max = len - 3;
+  afl->stage_name = "bitflip 32/8";
+  afl->stage_short = "flip32";
+  afl->stage_cur = 0;
+  afl->stage_max = len - 3;
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -2873,30 +2832,30 @@ u8 common_fuzzing(char** argv, struct MOpt_globals_t MOpt_globals) {
     if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
         !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
 
-      --stage_max;
+      --afl->stage_max;
       continue;
 
     }
 
-    stage_cur_byte = i;
+    afl->stage_cur_byte = i;
 
     *(u32*)(out_buf + i) ^= 0xFFFFFFFF;
 
-    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-    ++stage_cur;
+    if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+    ++afl->stage_cur;
 
     *(u32*)(out_buf + i) ^= 0xFFFFFFFF;
 
   }                                               /* for i = 0; i < len - 3 */
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_FLIP32] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_FLIP32] += stage_max;
+  afl->stage_finds[STAGE_FLIP32] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_FLIP32] += afl->stage_max;
 
 skip_bitflip:
 
-  if (no_arith) goto skip_arith;
+  if (afl->no_arith) goto skip_arith;
 
   /**********************
    * ARITHMETIC INC/DEC *
@@ -2904,12 +2863,12 @@ skip_bitflip:
 
   /* 8-bit arithmetics. */
 
-  stage_name = "arith 8/8";
-  stage_short = "arith8";
-  stage_cur = 0;
-  stage_max = 2 * len * ARITH_MAX;
+  afl->stage_name = "arith 8/8";
+  afl->stage_short = "arith8";
+  afl->stage_cur = 0;
+  afl->stage_max = 2 * len * ARITH_MAX;
 
-  stage_val_type = STAGE_VAL_LE;
+  afl->stage_val_type = STAGE_VAL_LE;
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -2921,12 +2880,12 @@ skip_bitflip:
 
     if (!eff_map[EFF_APOS(i)]) {
 
-      stage_max -= 2 * ARITH_MAX;
+      afl->stage_max -= 2 * ARITH_MAX;
       continue;
 
     }
 
-    stage_cur_byte = i;
+    afl->stage_cur_byte = i;
 
     for (j = 1; j <= ARITH_MAX; ++j) {
 
@@ -2937,29 +2896,29 @@ skip_bitflip:
 
       if (!could_be_bitflip(r)) {
 
-        stage_cur_val = j;
+        afl->stage_cur_val = j;
         out_buf[i] = orig + j;
 
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
+        if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+        ++afl->stage_cur;
 
       } else
 
-        --stage_max;
+        --afl->stage_max;
 
       r = orig ^ (orig - j);
 
       if (!could_be_bitflip(r)) {
 
-        stage_cur_val = -j;
+        afl->stage_cur_val = -j;
         out_buf[i] = orig - j;
 
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
+        if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+        ++afl->stage_cur;
 
       } else
 
-        --stage_max;
+        --afl->stage_max;
 
       out_buf[i] = orig;
 
@@ -2967,19 +2926,19 @@ skip_bitflip:
 
   }                                                   /* for i = 0; i < len */
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_ARITH8] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_ARITH8] += stage_max;
+  afl->stage_finds[STAGE_ARITH8] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_ARITH8] += afl->stage_max;
 
   /* 16-bit arithmetics, both endians. */
 
   if (len < 2) goto skip_arith;
 
-  stage_name = "arith 16/8";
-  stage_short = "arith16";
-  stage_cur = 0;
-  stage_max = 4 * (len - 1) * ARITH_MAX;
+  afl->stage_name = "arith 16/8";
+  afl->stage_short = "arith16";
+  afl->stage_cur = 0;
+  afl->stage_max = 4 * (len - 1) * ARITH_MAX;
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -2991,12 +2950,12 @@ skip_bitflip:
 
     if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
 
-      stage_max -= 4 * ARITH_MAX;
+      afl->stage_max -= 4 * ARITH_MAX;
       continue;
 
     }
 
-    stage_cur_byte = i;
+    afl->stage_cur_byte = i;
 
     for (j = 1; j <= ARITH_MAX; ++j) {
 
@@ -3009,59 +2968,59 @@ skip_bitflip:
          & 0xff overflow checks) and if it couldn't be a product of
          a bitflip. */
 
-      stage_val_type = STAGE_VAL_LE;
+      afl->stage_val_type = STAGE_VAL_LE;
 
       if ((orig & 0xff) + j > 0xff && !could_be_bitflip(r1)) {
 
-        stage_cur_val = j;
+        afl->stage_cur_val = j;
         *(u16*)(out_buf + i) = orig + j;
 
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
+        if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+        ++afl->stage_cur;
 
       } else
 
-        --stage_max;
+        --afl->stage_max;
 
       if ((orig & 0xff) < j && !could_be_bitflip(r2)) {
 
-        stage_cur_val = -j;
+        afl->stage_cur_val = -j;
         *(u16*)(out_buf + i) = orig - j;
 
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
+        if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+        ++afl->stage_cur;
 
       } else
 
-        --stage_max;
+        --afl->stage_max;
 
       /* Big endian comes next. Same deal. */
 
-      stage_val_type = STAGE_VAL_BE;
+      afl->stage_val_type = STAGE_VAL_BE;
 
       if ((orig >> 8) + j > 0xff && !could_be_bitflip(r3)) {
 
-        stage_cur_val = j;
+        afl->stage_cur_val = j;
         *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) + j);
 
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
+        if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+        ++afl->stage_cur;
 
       } else
 
-        --stage_max;
+        --afl->stage_max;
 
       if ((orig >> 8) < j && !could_be_bitflip(r4)) {
 
-        stage_cur_val = -j;
+        afl->stage_cur_val = -j;
         *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) - j);
 
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
+        if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+        ++afl->stage_cur;
 
       } else
 
-        --stage_max;
+        --afl->stage_max;
 
       *(u16*)(out_buf + i) = orig;
 
@@ -3069,19 +3028,19 @@ skip_bitflip:
 
   }                                               /* for i = 0; i < len - 1 */
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_ARITH16] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_ARITH16] += stage_max;
+  afl->stage_finds[STAGE_ARITH16] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_ARITH16] += afl->stage_max;
 
   /* 32-bit arithmetics, both endians. */
 
   if (len < 4) goto skip_arith;
 
-  stage_name = "arith 32/8";
-  stage_short = "arith32";
-  stage_cur = 0;
-  stage_max = 4 * (len - 3) * ARITH_MAX;
+  afl->stage_name = "arith 32/8";
+  afl->stage_short = "arith32";
+  afl->stage_cur = 0;
+  afl->stage_max = 4 * (len - 3) * ARITH_MAX;
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -3094,12 +3053,12 @@ skip_bitflip:
     if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
         !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
 
-      stage_max -= 4 * ARITH_MAX;
+      afl->stage_max -= 4 * ARITH_MAX;
       continue;
 
     }
 
-    stage_cur_byte = i;
+    afl->stage_cur_byte = i;
 
     for (j = 1; j <= ARITH_MAX; ++j) {
 
@@ -3110,59 +3069,59 @@ skip_bitflip:
       /* Little endian first. Same deal as with 16-bit: we only want to
          try if the operation would have effect on more than two bytes. */
 
-      stage_val_type = STAGE_VAL_LE;
+      afl->stage_val_type = STAGE_VAL_LE;
 
       if ((orig & 0xffff) + j > 0xffff && !could_be_bitflip(r1)) {
 
-        stage_cur_val = j;
+        afl->stage_cur_val = j;
         *(u32*)(out_buf + i) = orig + j;
 
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
+        if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+        ++afl->stage_cur;
 
       } else
 
-        --stage_max;
+        --afl->stage_max;
 
       if ((orig & 0xffff) < j && !could_be_bitflip(r2)) {
 
-        stage_cur_val = -j;
+        afl->stage_cur_val = -j;
         *(u32*)(out_buf + i) = orig - j;
 
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
+        if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+        ++afl->stage_cur;
 
       } else
 
-        --stage_max;
+        --afl->stage_max;
 
       /* Big endian next. */
 
-      stage_val_type = STAGE_VAL_BE;
+      afl->stage_val_type = STAGE_VAL_BE;
 
       if ((SWAP32(orig) & 0xffff) + j > 0xffff && !could_be_bitflip(r3)) {
 
-        stage_cur_val = j;
+        afl->stage_cur_val = j;
         *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) + j);
 
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
+        if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+        ++afl->stage_cur;
 
       } else
 
-        --stage_max;
+        --afl->stage_max;
 
       if ((SWAP32(orig) & 0xffff) < j && !could_be_bitflip(r4)) {
 
-        stage_cur_val = -j;
+        afl->stage_cur_val = -j;
         *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) - j);
 
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
+        if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+        ++afl->stage_cur;
 
       } else
 
-        --stage_max;
+        --afl->stage_max;
 
       *(u32*)(out_buf + i) = orig;
 
@@ -3170,10 +3129,10 @@ skip_bitflip:
 
   }                                               /* for i = 0; i < len - 3 */
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_ARITH32] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_ARITH32] += stage_max;
+  afl->stage_finds[STAGE_ARITH32] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_ARITH32] += afl->stage_max;
 
 skip_arith:
 
@@ -3181,12 +3140,12 @@ skip_arith:
    * INTERESTING VALUES *
    **********************/
 
-  stage_name = "interest 8/8";
-  stage_short = "int8";
-  stage_cur = 0;
-  stage_max = len * sizeof(interesting_8);
+  afl->stage_name = "interest 8/8";
+  afl->stage_short = "int8";
+  afl->stage_cur = 0;
+  afl->stage_max = len * sizeof(interesting_8);
 
-  stage_val_type = STAGE_VAL_LE;
+  afl->stage_val_type = STAGE_VAL_LE;
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -3200,12 +3159,12 @@ skip_arith:
 
     if (!eff_map[EFF_APOS(i)]) {
 
-      stage_max -= sizeof(interesting_8);
+      afl->stage_max -= sizeof(interesting_8);
       continue;
 
     }
 
-    stage_cur_byte = i;
+    afl->stage_cur_byte = i;
 
     for (j = 0; j < sizeof(interesting_8); ++j) {
 
@@ -3214,36 +3173,36 @@ skip_arith:
       if (could_be_bitflip(orig ^ (u8)interesting_8[j]) ||
           could_be_arith(orig, (u8)interesting_8[j], 1)) {
 
-        --stage_max;
+        --afl->stage_max;
         continue;
 
       }
 
-      stage_cur_val = interesting_8[j];
+      afl->stage_cur_val = interesting_8[j];
       out_buf[i] = interesting_8[j];
 
-      if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+      if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
 
       out_buf[i] = orig;
-      ++stage_cur;
+      ++afl->stage_cur;
 
     }
 
   }                                                   /* for i = 0; i < len */
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_INTEREST8] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_INTEREST8] += stage_max;
+  afl->stage_finds[STAGE_INTEREST8] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_INTEREST8] += afl->stage_max;
 
   /* Setting 16-bit integers, both endians. */
 
-  if (no_arith || len < 2) goto skip_interest;
+  if (afl->no_arith || len < 2) goto skip_interest;
 
-  stage_name = "interest 16/8";
-  stage_short = "int16";
-  stage_cur = 0;
-  stage_max = 2 * (len - 1) * (sizeof(interesting_16) >> 1);
+  afl->stage_name = "interest 16/8";
+  afl->stage_short = "int16";
+  afl->stage_cur = 0;
+  afl->stage_max = 2 * (len - 1) * (sizeof(interesting_16) >> 1);
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -3255,16 +3214,16 @@ skip_arith:
 
     if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
 
-      stage_max -= sizeof(interesting_16);
+      afl->stage_max -= sizeof(interesting_16);
       continue;
 
     }
 
-    stage_cur_byte = i;
+    afl->stage_cur_byte = i;
 
     for (j = 0; j < sizeof(interesting_16) / 2; ++j) {
 
-      stage_cur_val = interesting_16[j];
+      afl->stage_cur_val = interesting_16[j];
 
       /* Skip if this could be a product of a bitflip, arithmetics,
          or single-byte interesting value insertion. */
@@ -3273,31 +3232,31 @@ skip_arith:
           !could_be_arith(orig, (u16)interesting_16[j], 2) &&
           !could_be_interest(orig, (u16)interesting_16[j], 2, 0)) {
 
-        stage_val_type = STAGE_VAL_LE;
+        afl->stage_val_type = STAGE_VAL_LE;
 
         *(u16*)(out_buf + i) = interesting_16[j];
 
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
+        if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+        ++afl->stage_cur;
 
       } else
 
-        --stage_max;
+        --afl->stage_max;
 
       if ((u16)interesting_16[j] != SWAP16(interesting_16[j]) &&
           !could_be_bitflip(orig ^ SWAP16(interesting_16[j])) &&
           !could_be_arith(orig, SWAP16(interesting_16[j]), 2) &&
           !could_be_interest(orig, SWAP16(interesting_16[j]), 2, 1)) {
 
-        stage_val_type = STAGE_VAL_BE;
+        afl->stage_val_type = STAGE_VAL_BE;
 
         *(u16*)(out_buf + i) = SWAP16(interesting_16[j]);
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
+        if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+        ++afl->stage_cur;
 
       } else
 
-        --stage_max;
+        --afl->stage_max;
 
     }
 
@@ -3305,19 +3264,19 @@ skip_arith:
 
   }                                               /* for i = 0; i < len - 1 */
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_INTEREST16] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_INTEREST16] += stage_max;
+  afl->stage_finds[STAGE_INTEREST16] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_INTEREST16] += afl->stage_max;
 
   if (len < 4) goto skip_interest;
 
   /* Setting 32-bit integers, both endians. */
 
-  stage_name = "interest 32/8";
-  stage_short = "int32";
-  stage_cur = 0;
-  stage_max = 2 * (len - 3) * (sizeof(interesting_32) >> 2);
+  afl->stage_name = "interest 32/8";
+  afl->stage_short = "int32";
+  afl->stage_cur = 0;
+  afl->stage_max = 2 * (len - 3) * (sizeof(interesting_32) >> 2);
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -3330,16 +3289,16 @@ skip_arith:
     if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
         !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
 
-      stage_max -= sizeof(interesting_32) >> 1;
+      afl->stage_max -= sizeof(interesting_32) >> 1;
       continue;
 
     }
 
-    stage_cur_byte = i;
+    afl->stage_cur_byte = i;
 
     for (j = 0; j < sizeof(interesting_32) / 4; ++j) {
 
-      stage_cur_val = interesting_32[j];
+      afl->stage_cur_val = interesting_32[j];
 
       /* Skip if this could be a product of a bitflip, arithmetics,
          or word interesting value insertion. */
@@ -3348,31 +3307,31 @@ skip_arith:
           !could_be_arith(orig, interesting_32[j], 4) &&
           !could_be_interest(orig, interesting_32[j], 4, 0)) {
 
-        stage_val_type = STAGE_VAL_LE;
+        afl->stage_val_type = STAGE_VAL_LE;
 
         *(u32*)(out_buf + i) = interesting_32[j];
 
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
+        if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+        ++afl->stage_cur;
 
       } else
 
-        --stage_max;
+        --afl->stage_max;
 
       if ((u32)interesting_32[j] != SWAP32(interesting_32[j]) &&
           !could_be_bitflip(orig ^ SWAP32(interesting_32[j])) &&
           !could_be_arith(orig, SWAP32(interesting_32[j]), 4) &&
           !could_be_interest(orig, SWAP32(interesting_32[j]), 4, 1)) {
 
-        stage_val_type = STAGE_VAL_BE;
+        afl->stage_val_type = STAGE_VAL_BE;
 
         *(u32*)(out_buf + i) = SWAP32(interesting_32[j]);
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
+        if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+        ++afl->stage_cur;
 
       } else
 
-        --stage_max;
+        --afl->stage_max;
 
     }
 
@@ -3380,10 +3339,10 @@ skip_arith:
 
   }                                               /* for i = 0; i < len - 3 */
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_INTEREST32] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_INTEREST32] += stage_max;
+  afl->stage_finds[STAGE_INTEREST32] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_INTEREST32] += afl->stage_max;
 
 skip_interest:
 
@@ -3391,16 +3350,16 @@ skip_interest:
    * DICTIONARY STUFF *
    ********************/
 
-  if (!extras_cnt) goto skip_user_extras;
+  if (!afl->extras_cnt) goto skip_user_extras;
 
   /* Overwrite with user-supplied extras. */
 
-  stage_name = "user extras (over)";
-  stage_short = "ext_UO";
-  stage_cur = 0;
-  stage_max = extras_cnt * len;
+  afl->stage_name = "user extras (over)";
+  afl->stage_short = "ext_UO";
+  afl->stage_cur = 0;
+  afl->stage_max = afl->extras_cnt * len;
 
-  stage_val_type = STAGE_VAL_NONE;
+  afl->stage_val_type = STAGE_VAL_NONE;
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -3408,36 +3367,36 @@ skip_interest:
 
     u32 last_len = 0;
 
-    stage_cur_byte = i;
+    afl->stage_cur_byte = i;
 
     /* Extras are sorted by size, from smallest to largest. This means
        that we don't have to worry about restoring the buffer in
        between writes at a particular offset determined by the outer
        loop. */
 
-    for (j = 0; j < extras_cnt; ++j) {
+    for (j = 0; j < afl->extras_cnt; ++j) {
 
-      /* Skip extras probabilistically if extras_cnt > MAX_DET_EXTRAS. Also
+      /* Skip extras probabilistically if afl->extras_cnt > MAX_DET_EXTRAS. Also
          skip them if there's no room to insert the payload, if the token
          is redundant, or if its entire span has no bytes set in the effector
          map. */
 
-      if ((extras_cnt > MAX_DET_EXTRAS && UR(extras_cnt) >= MAX_DET_EXTRAS) ||
-          extras[j].len > len - i ||
-          !memcmp(extras[j].data, out_buf + i, extras[j].len) ||
-          !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, extras[j].len))) {
+      if ((afl->extras_cnt > MAX_DET_EXTRAS && UR(afl, afl->extras_cnt) >= MAX_DET_EXTRAS) ||
+          afl->extras[j].len > len - i ||
+          !memcmp(afl->extras[j].data, out_buf + i, afl->extras[j].len) ||
+          !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, afl->extras[j].len))) {
 
-        --stage_max;
+        --afl->stage_max;
         continue;
 
       }
 
-      last_len = extras[j].len;
-      memcpy(out_buf + i, extras[j].data, last_len);
+      last_len = afl->extras[j].len;
+      memcpy(out_buf + i, afl->extras[j].data, last_len);
 
-      if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+      if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
 
-      ++stage_cur;
+      ++afl->stage_cur;
 
     }
 
@@ -3446,17 +3405,17 @@ skip_interest:
 
   }                                                   /* for i = 0; i < len */
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_EXTRAS_UO] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_EXTRAS_UO] += stage_max;
+  afl->stage_finds[STAGE_EXTRAS_UO] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_EXTRAS_UO] += afl->stage_max;
 
   /* Insertion of user-supplied extras. */
 
-  stage_name = "user extras (insert)";
-  stage_short = "ext_UI";
-  stage_cur = 0;
-  stage_max = extras_cnt * (len + 1);
+  afl->stage_name = "user extras (insert)";
+  afl->stage_short = "ext_UI";
+  afl->stage_cur = 0;
+  afl->stage_max = afl->extras_cnt * (len + 1);
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -3464,31 +3423,31 @@ skip_interest:
 
   for (i = 0; i <= len; ++i) {
 
-    stage_cur_byte = i;
+    afl->stage_cur_byte = i;
 
-    for (j = 0; j < extras_cnt; ++j) {
+    for (j = 0; j < afl->extras_cnt; ++j) {
 
-      if (len + extras[j].len > MAX_FILE) {
+      if (len + afl->extras[j].len > MAX_FILE) {
 
-        --stage_max;
+        --afl->stage_max;
         continue;
 
       }
 
       /* Insert token */
-      memcpy(ex_tmp + i, extras[j].data, extras[j].len);
+      memcpy(ex_tmp + i, afl->extras[j].data, afl->extras[j].len);
 
       /* Copy tail */
-      memcpy(ex_tmp + i + extras[j].len, out_buf + i, len - i);
+      memcpy(ex_tmp + i + afl->extras[j].len, out_buf + i, len - i);
 
-      if (common_fuzz_stuff(argv, ex_tmp, len + extras[j].len)) {
+      if (common_fuzz_stuff(afl, ex_tmp, len +afl->extras[j].len)) {
 
         ck_free(ex_tmp);
         goto abandon_entry;
 
       }
 
-      ++stage_cur;
+      ++afl->stage_cur;
 
     }
 
@@ -3499,21 +3458,21 @@ skip_interest:
 
   ck_free(ex_tmp);
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_EXTRAS_UI] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_EXTRAS_UI] += stage_max;
+  afl->stage_finds[STAGE_EXTRAS_UI] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_EXTRAS_UI] += afl->stage_max;
 
 skip_user_extras:
 
-  if (!a_extras_cnt) goto skip_extras;
+  if (!afl->a_extras_cnt) goto skip_extras;
 
-  stage_name = "auto extras (over)";
-  stage_short = "ext_AO";
-  stage_cur = 0;
-  stage_max = MIN(a_extras_cnt, USE_AUTO_EXTRAS) * len;
+  afl->stage_name = "auto extras (over)";
+  afl->stage_short = "ext_AO";
+  afl->stage_cur = 0;
+  afl->stage_max = MIN(afl->a_extras_cnt, USE_AUTO_EXTRAS) * len;
 
-  stage_val_type = STAGE_VAL_NONE;
+  afl->stage_val_type = STAGE_VAL_NONE;
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -3521,28 +3480,28 @@ skip_user_extras:
 
     u32 last_len = 0;
 
-    stage_cur_byte = i;
+    afl->stage_cur_byte = i;
 
-    for (j = 0; j < MIN(a_extras_cnt, USE_AUTO_EXTRAS); ++j) {
+    for (j = 0; j < MIN(afl->a_extras_cnt, USE_AUTO_EXTRAS); ++j) {
 
       /* See the comment in the earlier code; extras are sorted by size. */
 
-      if (a_extras[j].len > len - i ||
-          !memcmp(a_extras[j].data, out_buf + i, a_extras[j].len) ||
+      if (afl->a_extras[j].len > len - i ||
+          !memcmp(afl->a_extras[j].data, out_buf + i, afl->a_extras[j].len) ||
           !memchr(eff_map + EFF_APOS(i), 1,
-                  EFF_SPAN_ALEN(i, a_extras[j].len))) {
+                  EFF_SPAN_ALEN(i, afl->a_extras[j].len))) {
 
-        --stage_max;
+        --afl->stage_max;
         continue;
 
       }
 
-      last_len = a_extras[j].len;
-      memcpy(out_buf + i, a_extras[j].data, last_len);
+      last_len = afl->a_extras[j].len;
+      memcpy(out_buf + i, afl->a_extras[j].data, last_len);
 
-      if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+      if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
 
-      ++stage_cur;
+      ++afl->stage_cur;
 
     }
 
@@ -3551,10 +3510,10 @@ skip_user_extras:
 
   }                                                   /* for i = 0; i < len */
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_EXTRAS_AO] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_EXTRAS_AO] += stage_max;
+  afl->stage_finds[STAGE_EXTRAS_AO] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_EXTRAS_AO] += afl->stage_max;
 
 skip_extras:
 
@@ -3562,7 +3521,7 @@ skip_extras:
      we're properly done with deterministic steps and can mark it as such
      in the .state/ directory. */
 
-  if (!queue_cur->passed_det) mark_as_det_done(queue_cur);
+  if (!afl->queue_cur->passed_det) mark_as_det_done(afl, afl->queue_cur);
 
   /****************
    * RANDOM HAVOC *
@@ -3571,50 +3530,48 @@ skip_extras:
 havoc_stage:
 pacemaker_fuzzing:
 
-  stage_cur_byte = -1;
+  afl->stage_cur_byte = -1;
 
   /* The havoc stage mutation code is also invoked when splicing files; if the
      splice_cycle variable is set, generate different descriptions and such. */
 
   if (!splice_cycle) {
 
-    stage_name = MOpt_globals.havoc_stagename;
-    stage_short = MOpt_globals.havoc_stagenameshort;
-    stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * perf_score /
-                havoc_div / 100;
+    afl->stage_name = MOpt_globals.havoc_stagename;
+    afl->stage_short = MOpt_globals.havoc_stagenameshort;
+    afl->stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * perf_score /
+                afl->havoc_div / 100;
 
   } else {
 
-    static u8 tmp[32];
-
     perf_score = orig_perf;
 
-    sprintf(tmp, MOpt_globals.splice_stageformat, splice_cycle);
-    stage_name = tmp;
-    stage_short = MOpt_globals.splice_stagenameshort;
-    stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100;
+    snprintf(afl->stage_name_buf64, 64, MOpt_globals.splice_stageformat, splice_cycle);
+    afl->stage_name = afl->stage_name_buf64;
+    afl->stage_short = MOpt_globals.splice_stagenameshort;
+    afl->stage_max = SPLICE_HAVOC * perf_score / afl->havoc_div / 100;
 
   }
 
   s32 temp_len_puppet;
   cur_ms_lv = get_cur_time();
 
-  // for (; swarm_now < swarm_num; ++swarm_now)
+  // for (; afl->swarm_now < swarm_num; ++afl->swarm_now)
   {
 
-    if (key_puppet == 1) {
+    if (afl->key_puppet == 1) {
 
-      if (unlikely(orig_hit_cnt_puppet == 0)) {
+      if (unlikely(afl->orig_hit_cnt_puppet == 0)) {
 
-        orig_hit_cnt_puppet = queued_paths + unique_crashes;
-        last_limit_time_start = get_cur_time();
-        SPLICE_CYCLES_puppet =
-            (UR(SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) +
+        afl->orig_hit_cnt_puppet = afl->queued_paths + afl->unique_crashes;
+        afl->last_limit_time_start = get_cur_time();
+        afl->SPLICE_CYCLES_puppet =
+            (UR(afl, SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) +
              SPLICE_CYCLES_puppet_low);
 
       }
 
-    }                                                 /* if key_puppet == 1 */
+    }                                                 /* if afl->key_puppet == 1 */
 
     {
 
@@ -3622,7 +3579,7 @@ pacemaker_fuzzing:
     havoc_stage_puppet:
 #endif
 
-      stage_cur_byte = -1;
+      afl->stage_cur_byte = -1;
 
       /* The havoc stage mutation code is also invoked when splicing files; if
          the splice_cycle variable is set, generate different descriptions and
@@ -3630,35 +3587,34 @@ pacemaker_fuzzing:
 
       if (!splice_cycle) {
 
-        stage_name = MOpt_globals.havoc_stagename;
-        stage_short = MOpt_globals.havoc_stagenameshort;
-        stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) *
-                    perf_score / havoc_div / 100;
+        afl->stage_name = MOpt_globals.havoc_stagename;
+        afl->stage_short = MOpt_globals.havoc_stagenameshort;
+        afl->stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) *
+                    perf_score / afl->havoc_div / 100;
 
       } else {
 
-        static u8 tmp[32];
         perf_score = orig_perf;
-        sprintf(tmp, MOpt_globals.splice_stageformat, splice_cycle);
-        stage_name = tmp;
-        stage_short = MOpt_globals.splice_stagenameshort;
-        stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100;
+        snprintf(afl->stage_name_buf64, 64, MOpt_globals.splice_stageformat, splice_cycle);
+        afl->stage_name = afl->stage_name_buf64;
+        afl->stage_short = MOpt_globals.splice_stagenameshort;
+        afl->stage_max = SPLICE_HAVOC * perf_score / afl->havoc_div / 100;
 
       }
 
-      if (stage_max < HAVOC_MIN) stage_max = HAVOC_MIN;
+      if (afl->stage_max < HAVOC_MIN) afl->stage_max = HAVOC_MIN;
 
       temp_len = len;
 
-      orig_hit_cnt = queued_paths + unique_crashes;
+      orig_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-      havoc_queued = queued_paths;
+      havoc_queued = afl->queued_paths;
 
-      for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+      for (afl->stage_cur = 0; afl->stage_cur < afl->stage_max; ++afl->stage_cur) {
 
-        u32 use_stacking = 1 << (1 + UR(HAVOC_STACK_POW2));
+        u32 use_stacking = 1 << (1 + UR(afl, HAVOC_STACK_POW2));
 
-        stage_cur_val = use_stacking;
+        afl->stage_cur_val = use_stacking;
 
         for (i = 0; i < operator_num; ++i) {
 
@@ -3668,17 +3624,17 @@ pacemaker_fuzzing:
 
         for (i = 0; i < use_stacking; ++i) {
 
-          switch (select_algorithm()) {
+          switch (select_algorithm(afl)) {
 
             case 0:
               /* Flip a single bit somewhere. Spooky! */
-              FLIP_BIT(out_buf, UR(temp_len << 3));
+              FLIP_BIT(out_buf, UR(afl, temp_len << 3));
               MOpt_globals.cycles_v2[STAGE_FLIP1] += 1;
               break;
 
             case 1:
               if (temp_len < 2) break;
-              temp_len_puppet = UR((temp_len << 3) - 1);
+              temp_len_puppet = UR(afl, (temp_len << 3) - 1);
               FLIP_BIT(out_buf, temp_len_puppet);
               FLIP_BIT(out_buf, temp_len_puppet + 1);
               MOpt_globals.cycles_v2[STAGE_FLIP2] += 1;
@@ -3686,7 +3642,7 @@ pacemaker_fuzzing:
 
             case 2:
               if (temp_len < 2) break;
-              temp_len_puppet = UR((temp_len << 3) - 3);
+              temp_len_puppet = UR(afl, (temp_len << 3) - 3);
               FLIP_BIT(out_buf, temp_len_puppet);
               FLIP_BIT(out_buf, temp_len_puppet + 1);
               FLIP_BIT(out_buf, temp_len_puppet + 2);
@@ -3696,55 +3652,55 @@ pacemaker_fuzzing:
 
             case 3:
               if (temp_len < 4) break;
-              out_buf[UR(temp_len)] ^= 0xFF;
+              out_buf[UR(afl, temp_len)] ^= 0xFF;
               MOpt_globals.cycles_v2[STAGE_FLIP8] += 1;
               break;
 
             case 4:
               if (temp_len < 8) break;
-              *(u16*)(out_buf + UR(temp_len - 1)) ^= 0xFFFF;
+              *(u16*)(out_buf + UR(afl, temp_len - 1)) ^= 0xFFFF;
               MOpt_globals.cycles_v2[STAGE_FLIP16] += 1;
               break;
 
             case 5:
               if (temp_len < 8) break;
-              *(u32*)(out_buf + UR(temp_len - 3)) ^= 0xFFFFFFFF;
+              *(u32*)(out_buf + UR(afl, temp_len - 3)) ^= 0xFFFFFFFF;
               MOpt_globals.cycles_v2[STAGE_FLIP32] += 1;
               break;
 
             case 6:
-              out_buf[UR(temp_len)] -= 1 + UR(ARITH_MAX);
-              out_buf[UR(temp_len)] += 1 + UR(ARITH_MAX);
+              out_buf[UR(afl, temp_len)] -= 1 + UR(afl, ARITH_MAX);
+              out_buf[UR(afl, temp_len)] += 1 + UR(afl, ARITH_MAX);
               MOpt_globals.cycles_v2[STAGE_ARITH8] += 1;
               break;
 
             case 7:
               /* Randomly subtract from word, random endian. */
               if (temp_len < 8) break;
-              if (UR(2)) {
+              if (UR(afl, 2)) {
 
-                u32 pos = UR(temp_len - 1);
-                *(u16*)(out_buf + pos) -= 1 + UR(ARITH_MAX);
+                u32 pos = UR(afl, temp_len - 1);
+                *(u16*)(out_buf + pos) -= 1 + UR(afl, ARITH_MAX);
 
               } else {
 
-                u32 pos = UR(temp_len - 1);
-                u16 num = 1 + UR(ARITH_MAX);
+                u32 pos = UR(afl, temp_len - 1);
+                u16 num = 1 + UR(afl, ARITH_MAX);
                 *(u16*)(out_buf + pos) =
                     SWAP16(SWAP16(*(u16*)(out_buf + pos)) - num);
 
               }
 
               /* Randomly add to word, random endian. */
-              if (UR(2)) {
+              if (UR(afl, 2)) {
 
-                u32 pos = UR(temp_len - 1);
-                *(u16*)(out_buf + pos) += 1 + UR(ARITH_MAX);
+                u32 pos = UR(afl, temp_len - 1);
+                *(u16*)(out_buf + pos) += 1 + UR(afl, ARITH_MAX);
 
               } else {
 
-                u32 pos = UR(temp_len - 1);
-                u16 num = 1 + UR(ARITH_MAX);
+                u32 pos = UR(afl, temp_len - 1);
+                u16 num = 1 + UR(afl, ARITH_MAX);
                 *(u16*)(out_buf + pos) =
                     SWAP16(SWAP16(*(u16*)(out_buf + pos)) + num);
 
@@ -3756,15 +3712,15 @@ pacemaker_fuzzing:
             case 8:
               /* Randomly subtract from dword, random endian. */
               if (temp_len < 8) break;
-              if (UR(2)) {
+              if (UR(afl, 2)) {
 
-                u32 pos = UR(temp_len - 3);
-                *(u32*)(out_buf + pos) -= 1 + UR(ARITH_MAX);
+                u32 pos = UR(afl, temp_len - 3);
+                *(u32*)(out_buf + pos) -= 1 + UR(afl, ARITH_MAX);
 
               } else {
 
-                u32 pos = UR(temp_len - 3);
-                u32 num = 1 + UR(ARITH_MAX);
+                u32 pos = UR(afl, temp_len - 3);
+                u32 num = 1 + UR(afl, ARITH_MAX);
                 *(u32*)(out_buf + pos) =
                     SWAP32(SWAP32(*(u32*)(out_buf + pos)) - num);
 
@@ -3772,15 +3728,15 @@ pacemaker_fuzzing:
 
               /* Randomly add to dword, random endian. */
               // if (temp_len < 4) break;
-              if (UR(2)) {
+              if (UR(afl, 2)) {
 
-                u32 pos = UR(temp_len - 3);
-                *(u32*)(out_buf + pos) += 1 + UR(ARITH_MAX);
+                u32 pos = UR(afl, temp_len - 3);
+                *(u32*)(out_buf + pos) += 1 + UR(afl, ARITH_MAX);
 
               } else {
 
-                u32 pos = UR(temp_len - 3);
-                u32 num = 1 + UR(ARITH_MAX);
+                u32 pos = UR(afl, temp_len - 3);
+                u32 num = 1 + UR(afl, ARITH_MAX);
                 *(u32*)(out_buf + pos) =
                     SWAP32(SWAP32(*(u32*)(out_buf + pos)) + num);
 
@@ -3792,22 +3748,22 @@ pacemaker_fuzzing:
             case 9:
               /* Set byte to interesting value. */
               if (temp_len < 4) break;
-              out_buf[UR(temp_len)] = interesting_8[UR(sizeof(interesting_8))];
+              out_buf[UR(afl, temp_len)] = interesting_8[UR(afl, sizeof(interesting_8))];
               MOpt_globals.cycles_v2[STAGE_INTEREST8] += 1;
               break;
 
             case 10:
               /* Set word to interesting value, randomly choosing endian. */
               if (temp_len < 8) break;
-              if (UR(2)) {
+              if (UR(afl, 2)) {
 
-                *(u16*)(out_buf + UR(temp_len - 1)) =
-                    interesting_16[UR(sizeof(interesting_16) >> 1)];
+                *(u16*)(out_buf + UR(afl, temp_len - 1)) =
+                    interesting_16[UR(afl, sizeof(interesting_16) >> 1)];
 
               } else {
 
-                *(u16*)(out_buf + UR(temp_len - 1)) =
-                    SWAP16(interesting_16[UR(sizeof(interesting_16) >> 1)]);
+                *(u16*)(out_buf + UR(afl, temp_len - 1)) =
+                    SWAP16(interesting_16[UR(afl, sizeof(interesting_16) >> 1)]);
 
               }
 
@@ -3819,15 +3775,15 @@ pacemaker_fuzzing:
 
               if (temp_len < 8) break;
 
-              if (UR(2)) {
+              if (UR(afl, 2)) {
 
-                *(u32*)(out_buf + UR(temp_len - 3)) =
-                    interesting_32[UR(sizeof(interesting_32) >> 2)];
+                *(u32*)(out_buf + UR(afl, temp_len - 3)) =
+                    interesting_32[UR(afl, sizeof(interesting_32) >> 2)];
 
               } else {
 
-                *(u32*)(out_buf + UR(temp_len - 3)) =
-                    SWAP32(interesting_32[UR(sizeof(interesting_32) >> 2)]);
+                *(u32*)(out_buf + UR(afl, temp_len - 3)) =
+                    SWAP32(interesting_32[UR(afl, sizeof(interesting_32) >> 2)]);
 
               }
 
@@ -3840,7 +3796,7 @@ pacemaker_fuzzing:
                  why not. We use XOR with 1-255 to eliminate the
                  possibility of a no-op. */
 
-              out_buf[UR(temp_len)] ^= 1 + UR(255);
+              out_buf[UR(afl, temp_len)] ^= 1 + UR(afl, 255);
               MOpt_globals.cycles_v2[STAGE_RANDOMBYTE] += 1;
               break;
 
@@ -3856,9 +3812,9 @@ pacemaker_fuzzing:
 
               /* Don't delete too much. */
 
-              del_len = choose_block_len(temp_len - 1);
+              del_len = choose_block_len(afl, temp_len - 1);
 
-              del_from = UR(temp_len - del_len + 1);
+              del_from = UR(afl, temp_len - del_len + 1);
 
               memmove(out_buf + del_from, out_buf + del_from + del_len,
                       temp_len - del_from - del_len);
@@ -3876,23 +3832,23 @@ pacemaker_fuzzing:
                 /* Clone bytes (75%) or insert a block of constant bytes (25%).
                  */
 
-                u8  actually_clone = UR(4);
+                u8  actually_clone = UR(afl, 4);
                 u32 clone_from, clone_to, clone_len;
                 u8* new_buf;
 
                 if (actually_clone) {
 
-                  clone_len = choose_block_len(temp_len);
-                  clone_from = UR(temp_len - clone_len + 1);
+                  clone_len = choose_block_len(afl, temp_len);
+                  clone_from = UR(afl, temp_len - clone_len + 1);
 
                 } else {
 
-                  clone_len = choose_block_len(HAVOC_BLK_XL);
+                  clone_len = choose_block_len(afl, HAVOC_BLK_XL);
                   clone_from = 0;
 
                 }
 
-                clone_to = UR(temp_len);
+                clone_to = UR(afl, temp_len);
 
                 new_buf = ck_alloc_nozero(temp_len + clone_len);
 
@@ -3906,7 +3862,7 @@ pacemaker_fuzzing:
                   memcpy(new_buf + clone_to, out_buf + clone_from, clone_len);
                 else
                   memset(new_buf + clone_to,
-                         UR(2) ? UR(256) : out_buf[UR(temp_len)], clone_len);
+                         UR(afl, 2) ? UR(afl, 256) : out_buf[UR(afl, temp_len)], clone_len);
 
                 /* Tail */
                 memcpy(new_buf + clone_to + clone_len, out_buf + clone_to,
@@ -3930,12 +3886,12 @@ pacemaker_fuzzing:
 
               if (temp_len < 2) break;
 
-              copy_len = choose_block_len(temp_len - 1);
+              copy_len = choose_block_len(afl, temp_len - 1);
 
-              copy_from = UR(temp_len - copy_len + 1);
-              copy_to = UR(temp_len - copy_len + 1);
+              copy_from = UR(afl, temp_len - copy_len + 1);
+              copy_to = UR(afl, temp_len - copy_len + 1);
 
-              if (UR(4)) {
+              if (UR(afl, 4)) {
 
                 if (copy_from != copy_to)
                   memmove(out_buf + copy_to, out_buf + copy_from, copy_len);
@@ -3943,7 +3899,7 @@ pacemaker_fuzzing:
               } else
 
                 memset(out_buf + copy_to,
-                       UR(2) ? UR(256) : out_buf[UR(temp_len)], copy_len);
+                       UR(afl, 2) ? UR(afl, 256) : out_buf[UR(afl, temp_len)], copy_len);
               MOpt_globals.cycles_v2[STAGE_OverWrite75] += 1;
               break;
 
@@ -3955,9 +3911,9 @@ pacemaker_fuzzing:
 
         *MOpt_globals.pTime += 1;
 
-        u64 temp_total_found = queued_paths + unique_crashes;
+        u64 temp_total_found = afl->queued_paths + afl->unique_crashes;
 
-        if (common_fuzz_stuff(argv, out_buf, temp_len))
+        if (common_fuzz_stuff(afl, out_buf, temp_len))
           goto abandon_entry_puppet;
 
         /* out_buf might have been mangled a bit, so let's restore it to its
@@ -3970,24 +3926,24 @@ pacemaker_fuzzing:
         /* If we're finding new stuff, let's run for a bit longer, limits
            permitting. */
 
-        if (queued_paths != havoc_queued) {
+        if (afl->queued_paths != havoc_queued) {
 
-          if (perf_score <= havoc_max_mult * 100) {
+          if (perf_score <= afl->havoc_max_mult * 100) {
 
-            stage_max *= 2;
+            afl->stage_max *= 2;
             perf_score *= 2;
 
           }
 
-          havoc_queued = queued_paths;
+          havoc_queued = afl->queued_paths;
 
         }
 
-        if (unlikely(queued_paths + unique_crashes > temp_total_found)) {
+        if (unlikely(afl->queued_paths + afl->unique_crashes > temp_total_found)) {
 
           u64 temp_temp_puppet =
-              queued_paths + unique_crashes - temp_total_found;
-          total_puppet_find = total_puppet_find + temp_temp_puppet;
+              afl->queued_paths + afl->unique_crashes - temp_total_found;
+          afl->total_puppet_find = afl->total_puppet_find + temp_temp_puppet;
           for (i = 0; i < operator_num; ++i) {
 
             if (MOpt_globals.cycles_v2[i] > MOpt_globals.cycles_v3[i])
@@ -3997,21 +3953,21 @@ pacemaker_fuzzing:
 
         }                                                             /* if */
 
-      }        /* for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { */
+      }        /* for (afl->stage_cur = 0; afl->stage_cur < afl->stage_max; ++afl->stage_cur) { */
 
-      new_hit_cnt = queued_paths + unique_crashes;
+      new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
       if (MOpt_globals.is_pilot_mode) {
 
         if (!splice_cycle) {
 
-          stage_finds[STAGE_HAVOC] += new_hit_cnt - orig_hit_cnt;
-          stage_cycles[STAGE_HAVOC] += stage_max;
+          afl->stage_finds[STAGE_HAVOC] += new_hit_cnt - orig_hit_cnt;
+          afl->stage_cycles[STAGE_HAVOC] += afl->stage_max;
 
         } else {
 
-          stage_finds[STAGE_SPLICE] += new_hit_cnt - orig_hit_cnt;
-          stage_cycles[STAGE_SPLICE] += stage_max;
+          afl->stage_finds[STAGE_SPLICE] += new_hit_cnt - orig_hit_cnt;
+          afl->stage_cycles[STAGE_SPLICE] += afl->stage_max;
 
         }
 
@@ -4025,8 +3981,8 @@ pacemaker_fuzzing:
 
     retry_splicing_puppet:
 
-      if (use_splicing && splice_cycle++ < SPLICE_CYCLES_puppet &&
-          queued_paths > 1 && queue_cur->len > 1) {
+      if (afl->use_splicing && splice_cycle++ < afl->SPLICE_CYCLES_puppet &&
+          afl->queued_paths > 1 && afl->queue_cur->len > 1) {
 
         struct queue_entry* target;
         u32                 tid, split_at;
@@ -4040,7 +3996,7 @@ pacemaker_fuzzing:
 
           ck_free(in_buf);
           in_buf = orig_in;
-          len = queue_cur->len;
+          len = afl->queue_cur->len;
 
         }
 
@@ -4049,12 +4005,12 @@ pacemaker_fuzzing:
 
         do {
 
-          tid = UR(queued_paths);
+          tid = UR(afl, afl->queued_paths);
 
-        } while (tid == current_entry);
+        } while (tid == afl->current_entry);
 
-        splicing_with = tid;
-        target = queue;
+        afl->splicing_with = tid;
+        target = afl->queue;
 
         while (tid >= 100) {
 
@@ -4068,10 +4024,10 @@ pacemaker_fuzzing:
 
         /* Make sure that the target has a reasonable length. */
 
-        while (target && (target->len < 2 || target == queue_cur)) {
+        while (target && (target->len < 2 || target == afl->queue_cur)) {
 
           target = target->next;
-          ++splicing_with;
+          ++afl->splicing_with;
 
         }
 
@@ -4104,7 +4060,7 @@ pacemaker_fuzzing:
 
         /* Split somewhere between the first and last differing byte. */
 
-        split_at = f_diff + UR(l_diff - f_diff);
+        split_at = f_diff + UR(afl, l_diff - f_diff);
 
         /* Do the thing. */
 
@@ -4126,40 +4082,40 @@ pacemaker_fuzzing:
     abandon_entry:
     abandon_entry_puppet:
 
-      if (splice_cycle >= SPLICE_CYCLES_puppet)
-        SPLICE_CYCLES_puppet =
-            (UR(SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) +
+      if (splice_cycle >= afl->SPLICE_CYCLES_puppet)
+        afl->SPLICE_CYCLES_puppet =
+            (UR(afl, SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) +
              SPLICE_CYCLES_puppet_low);
 
-      splicing_with = -1;
+      afl->splicing_with = -1;
 
-      /* Update pending_not_fuzzed count if we made it through the calibration
+      /* Update afl->pending_not_fuzzed count if we made it through the calibration
          cycle and have not seen this entry before. */
 
-      // if (!stop_soon && !queue_cur->cal_failed && !queue_cur->was_fuzzed) {
+      // if (!afl->stop_soon && !afl->queue_cur->cal_failed && !afl->queue_cur->was_fuzzed) {
 
-      //   queue_cur->was_fuzzed = 1;
-      //   --pending_not_fuzzed;
-      //   if (queue_cur->favored) --pending_favored;
+      //   afl->queue_cur->was_fuzzed = 1;
+      //   --afl->pending_not_fuzzed;
+      //   if (afl->queue_cur->favored) --afl->pending_favored;
       // }
 
-      munmap(orig_in, queue_cur->len);
+      munmap(orig_in, afl->queue_cur->len);
 
       if (in_buf != orig_in) ck_free(in_buf);
       ck_free(out_buf);
       ck_free(eff_map);
 
-      if (key_puppet == 1) {
+      if (afl->key_puppet == 1) {
 
-        if (unlikely(queued_paths + unique_crashes >
-                     ((queued_paths + unique_crashes) * limit_time_bound +
-                      orig_hit_cnt_puppet))) {
+        if (unlikely(afl->queued_paths + afl->unique_crashes >
+                     ((afl->queued_paths + afl->unique_crashes) * limit_time_bound +
+                      afl->orig_hit_cnt_puppet))) {
 
-          key_puppet = 0;
+          afl->key_puppet = 0;
           cur_ms_lv = get_cur_time();
-          new_hit_cnt = queued_paths + unique_crashes;
-          orig_hit_cnt_puppet = 0;
-          last_limit_time_start = 0;
+          new_hit_cnt = afl->queued_paths + afl->unique_crashes;
+          afl->orig_hit_cnt_puppet = 0;
+          afl->last_limit_time_start = 0;
 
         }
 
@@ -4167,16 +4123,16 @@ pacemaker_fuzzing:
 
       if (unlikely(*MOpt_globals.pTime > MOpt_globals.period)) {
 
-        total_pacemaker_time += *MOpt_globals.pTime;
+        afl->total_pacemaker_time += *MOpt_globals.pTime;
         *MOpt_globals.pTime = 0;
-        temp_puppet_find = total_puppet_find;
-        new_hit_cnt = queued_paths + unique_crashes;
+        afl->temp_puppet_find = afl->total_puppet_find;
+        new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
         if (MOpt_globals.is_pilot_mode) {
 
-          swarm_fitness[swarm_now] =
-              (double)(total_puppet_find - temp_puppet_find) /
-              ((double)(tmp_pilot_time) / period_pilot_tmp);
+          afl->swarm_fitness[afl->swarm_now] =
+              (double)(afl->total_puppet_find - afl->temp_puppet_find) /
+              ((double)(afl->tmp_pilot_time) / afl->period_pilot_tmp);
 
         }
 
@@ -4192,10 +4148,10 @@ pacemaker_fuzzing:
                   (double)(MOpt_globals.finds_v2[i] - MOpt_globals.finds[i]) /
                   (double)(MOpt_globals.cycles_v2[i] - MOpt_globals.cycles[i]);
 
-            if (eff_best[swarm_now][i] < temp_eff) {
+            if (afl->eff_best[afl->swarm_now][i] < temp_eff) {
 
-              eff_best[swarm_now][i] = temp_eff;
-              L_best[swarm_now][i] = x_now[swarm_now][i];
+              afl->eff_best[afl->swarm_now][i] = temp_eff;
+              afl->L_best[afl->swarm_now][i] = afl->x_now[afl->swarm_now][i];
 
             }
 
@@ -4209,50 +4165,50 @@ pacemaker_fuzzing:
 
         if (MOpt_globals.is_pilot_mode) {
 
-          swarm_now = swarm_now + 1;
-          if (swarm_now == swarm_num) {
+          afl->swarm_now = afl->swarm_now + 1;
+          if (afl->swarm_now == swarm_num) {
 
-            key_module = 1;
+            afl->key_module = 1;
             for (i = 0; i < operator_num; ++i) {
 
-              core_operator_cycles_puppet_v2[i] =
-                  core_operator_cycles_puppet[i];
-              core_operator_cycles_puppet_v3[i] =
-                  core_operator_cycles_puppet[i];
-              core_operator_finds_puppet_v2[i] = core_operator_finds_puppet[i];
+              afl->core_operator_cycles_puppet_v2[i] =
+                  afl->core_operator_cycles_puppet[i];
+              afl->core_operator_cycles_puppet_v3[i] =
+                  afl->core_operator_cycles_puppet[i];
+              afl->core_operator_finds_puppet_v2[i] = afl->core_operator_finds_puppet[i];
 
             }
 
             double swarm_eff = 0.0;
-            swarm_now = 0;
+            afl->swarm_now = 0;
             for (i = 0; i < swarm_num; ++i) {
 
-              if (swarm_fitness[i] > swarm_eff) {
+              if (afl->swarm_fitness[i] > swarm_eff) {
 
-                swarm_eff = swarm_fitness[i];
-                swarm_now = i;
+                swarm_eff = afl->swarm_fitness[i];
+                afl->swarm_now = i;
 
               }
 
             }
 
-            if (swarm_now < 0 || swarm_now > swarm_num - 1)
-              PFATAL("swarm_now error number  %d", swarm_now);
+            if (afl->swarm_now < 0 || afl->swarm_now > swarm_num - 1)
+              PFATAL("swarm_now error number  %d", afl->swarm_now);
 
-          }                                    /* if swarm_now == swarm_num */
+          }                                    /* if afl->swarm_now == swarm_num */
 
-          /* adjust pointers dependent on 'swarm_now' */
-          MOpt_globals_pilot.finds = stage_finds_puppet[swarm_now];
-          MOpt_globals_pilot.finds_v2 = stage_finds_puppet_v2[swarm_now];
-          MOpt_globals_pilot.cycles = stage_cycles_puppet[swarm_now];
-          MOpt_globals_pilot.cycles_v2 = stage_cycles_puppet_v2[swarm_now];
-          MOpt_globals_pilot.cycles_v3 = stage_cycles_puppet_v3[swarm_now];
+          /* adjust pointers dependent on 'afl->swarm_now' */
+          afl->mopt_globals_pilot.finds = afl->stage_finds_puppet[afl->swarm_now];
+          afl->mopt_globals_pilot.finds_v2 = afl->stage_finds_puppet_v2[afl->swarm_now];
+          afl->mopt_globals_pilot.cycles = afl->stage_cycles_puppet[afl->swarm_now];
+          afl->mopt_globals_pilot.cycles_v2 = afl->stage_cycles_puppet_v2[afl->swarm_now];
+          afl->mopt_globals_pilot.cycles_v3 = afl->stage_cycles_puppet_v3[afl->swarm_now];
 
         } else {
 
-          key_module = 2;
+          afl->key_module = 2;
 
-          old_hit_count = new_hit_cnt;
+          afl->old_hit_count = new_hit_cnt;
 
         }                                                  /* if pilot_mode */
 
@@ -4268,37 +4224,41 @@ pacemaker_fuzzing:
 
 #undef FLIP_BIT
 
-#define pilot_fuzzing(a) common_fuzzing((a), MOpt_globals_pilot)
+u8 core_fuzzing(afl_state_t *afl) {
+  return mopt_common_fuzzing(afl, afl->mopt_globals_core);
+}
 
-#define core_fuzzing(a) common_fuzzing((a), MOpt_globals_core)
+u8 pilot_fuzzing(afl_state_t *afl) {
+  return mopt_common_fuzzing(afl, afl->mopt_globals_pilot);
+}
 
-void pso_updating(void) {
+void pso_updating(afl_state_t *afl) {
 
-  g_now += 1;
-  if (g_now > g_max) g_now = 0;
-  w_now = (w_init - w_end) * (g_max - g_now) / (g_max) + w_end;
+  afl->g_now += 1;
+  if (afl->g_now > afl->g_max) afl->g_now = 0;
+  afl->w_now = (afl->w_init - afl->w_end) * (afl->g_max - afl->g_now) / (afl->g_max) + afl->w_end;
   int tmp_swarm, i, j;
   u64 temp_operator_finds_puppet = 0;
   for (i = 0; i < operator_num; ++i) {
 
-    operator_finds_puppet[i] = core_operator_finds_puppet[i];
+    afl->operator_finds_puppet[i] = afl->core_operator_finds_puppet[i];
 
     for (j = 0; j < swarm_num; ++j) {
 
-      operator_finds_puppet[i] =
-          operator_finds_puppet[i] + stage_finds_puppet[j][i];
+      afl->operator_finds_puppet[i] =
+          afl->operator_finds_puppet[i] + afl->stage_finds_puppet[j][i];
 
     }
 
     temp_operator_finds_puppet =
-        temp_operator_finds_puppet + operator_finds_puppet[i];
+        temp_operator_finds_puppet + afl->operator_finds_puppet[i];
 
   }
 
   for (i = 0; i < operator_num; ++i) {
 
-    if (operator_finds_puppet[i])
-      G_best[i] = (double)((double)(operator_finds_puppet[i]) /
+    if (afl->operator_finds_puppet[i])
+      afl->G_best[i] = (double)((double)(afl->operator_finds_puppet[i]) /
                            (double)(temp_operator_finds_puppet));
 
   }
@@ -4308,39 +4268,39 @@ void pso_updating(void) {
     double x_temp = 0.0;
     for (i = 0; i < operator_num; ++i) {
 
-      probability_now[tmp_swarm][i] = 0.0;
-      v_now[tmp_swarm][i] =
-          w_now * v_now[tmp_swarm][i] +
-          RAND_C * (L_best[tmp_swarm][i] - x_now[tmp_swarm][i]) +
-          RAND_C * (G_best[i] - x_now[tmp_swarm][i]);
-      x_now[tmp_swarm][i] += v_now[tmp_swarm][i];
-      if (x_now[tmp_swarm][i] > v_max)
-        x_now[tmp_swarm][i] = v_max;
-      else if (x_now[tmp_swarm][i] < v_min)
-        x_now[tmp_swarm][i] = v_min;
-      x_temp += x_now[tmp_swarm][i];
+      afl->probability_now[tmp_swarm][i] = 0.0;
+      afl->v_now[tmp_swarm][i] =
+          afl->w_now * afl->v_now[tmp_swarm][i] +
+          RAND_C * (afl->L_best[tmp_swarm][i] - afl->x_now[tmp_swarm][i]) +
+          RAND_C * (afl->G_best[i] - afl->x_now[tmp_swarm][i]);
+      afl->x_now[tmp_swarm][i] += afl->v_now[tmp_swarm][i];
+      if (afl->x_now[tmp_swarm][i] > v_max)
+        afl->x_now[tmp_swarm][i] = v_max;
+      else if (afl->x_now[tmp_swarm][i] < v_min)
+        afl->x_now[tmp_swarm][i] = v_min;
+      x_temp += afl->x_now[tmp_swarm][i];
 
     }
 
     for (i = 0; i < operator_num; ++i) {
 
-      x_now[tmp_swarm][i] = x_now[tmp_swarm][i] / x_temp;
+      afl->x_now[tmp_swarm][i] = afl->x_now[tmp_swarm][i] / x_temp;
       if (likely(i != 0))
-        probability_now[tmp_swarm][i] =
-            probability_now[tmp_swarm][i - 1] + x_now[tmp_swarm][i];
+        afl->probability_now[tmp_swarm][i] =
+            afl->probability_now[tmp_swarm][i - 1] + afl->x_now[tmp_swarm][i];
       else
-        probability_now[tmp_swarm][i] = x_now[tmp_swarm][i];
+        afl->probability_now[tmp_swarm][i] = afl->x_now[tmp_swarm][i];
 
     }
 
-    if (probability_now[tmp_swarm][operator_num - 1] < 0.99 ||
-        probability_now[tmp_swarm][operator_num - 1] > 1.01)
+    if (afl->probability_now[tmp_swarm][operator_num - 1] < 0.99 ||
+        afl->probability_now[tmp_swarm][operator_num - 1] > 1.01)
       FATAL("ERROR probability");
 
   }
 
-  swarm_now = 0;
-  key_module = 0;
+  afl->swarm_now = 0;
+  afl->key_module = 0;
 
 }
 
@@ -4348,18 +4308,18 @@ void pso_updating(void) {
    to fuzz_one_original. All documentation references to fuzz_one therefore
    mean fuzz_one_original */
 
-u8 fuzz_one(char** argv) {
+u8 fuzz_one(afl_state_t *afl) {
 
   int key_val_lv = 0;
 
 #ifdef _AFL_DOCUMENT_MUTATIONS
-  if (do_document == 0) {
+  if (afl->do_document == 0) {
 
-    char* fn = alloc_printf("%s/mutations", out_dir);
+    char* fn = alloc_printf("%s/mutations", afl->out_dir);
     if (fn) {
 
-      do_document = mkdir(fn, 0700);  // if it exists we do not care
-      do_document = 1;
+      afl->do_document = mkdir(fn, 0700);  // if it exists we do not care
+      afl->do_document = 1;
       ck_free(fn);
 
     } else
@@ -4368,29 +4328,28 @@ u8 fuzz_one(char** argv) {
 
   } else {
 
-    do_document = 2;
-    stop_soon = 2;
+    afl->do_document = 2;
+    afl->stop_soon = 2;
 
   }
 
 #endif
 
-  if (limit_time_sig == 0) {
+  if (afl->limit_time_sig == 0) {
 
-    key_val_lv = fuzz_one_original(argv);
+    key_val_lv = fuzz_one_original(afl);
 
   } else {
 
-    if (key_module == 0)
-      key_val_lv = pilot_fuzzing(argv);
-    else if (key_module == 1)
-      key_val_lv = core_fuzzing(argv);
-    else if (key_module == 2)
-      pso_updating();
+    if (afl->key_module == 0)
+      key_val_lv = pilot_fuzzing(afl);
+    else if (afl->key_module == 1)
+      key_val_lv = core_fuzzing(afl);
+    else if (afl->key_module == 2)
+      pso_updating(afl);
 
   }
 
   return key_val_lv;
 
 }
-