about summary refs log tree commit diff
path: root/src/afl-fuzz-one.c
diff options
context:
space:
mode:
authorAndrea Fioraldi <andreafioraldi@gmail.com>2019-09-02 18:49:43 +0200
committerAndrea Fioraldi <andreafioraldi@gmail.com>2019-09-02 18:49:43 +0200
commitb24639d0113e15933e749ea0f96abe3f25a134a0 (patch)
tree4272020625c80c0d6982d3787bebc573c0da01b8 /src/afl-fuzz-one.c
parent2ae4ca91b48407add0e940ee13bd8b385e319a7a (diff)
downloadafl++-b24639d0113e15933e749ea0f96abe3f25a134a0.tar.gz
run code formatter
Diffstat (limited to 'src/afl-fuzz-one.c')
-rw-r--r--src/afl-fuzz-one.c5533
1 files changed, 2919 insertions, 2614 deletions
diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c
index 59370c3d..1b7abedd 100644
--- a/src/afl-fuzz-one.c
+++ b/src/afl-fuzz-one.c
@@ -28,22 +28,31 @@ int select_algorithm(void) {
 
   int i_puppet, j_puppet;
 
-  double sele = ((double)(UR(10000))*0.0001);
+  double sele = ((double)(UR(10000)) * 0.0001);
   j_puppet = 0;
   for (i_puppet = 0; i_puppet < operator_num; ++i_puppet) {
-      if (unlikely(i_puppet == 0)) {
-          if (sele < probability_now[swarm_now][i_puppet])
-            break;
-      } else {
-          if (sele < probability_now[swarm_now][i_puppet]) {
-              j_puppet =1;
-              break;
-          }
+
+    if (unlikely(i_puppet == 0)) {
+
+      if (sele < probability_now[swarm_now][i_puppet]) break;
+
+    } else {
+
+      if (sele < probability_now[swarm_now][i_puppet]) {
+
+        j_puppet = 1;
+        break;
+
       }
+
+    }
+
   }
-  if (j_puppet ==1 && sele < probability_now[swarm_now][i_puppet-1])
+
+  if (j_puppet == 1 && sele < probability_now[swarm_now][i_puppet - 1])
     FATAL("error select_algorithm");
   return i_puppet;
+
 }
 
 /* Helper to choose random block len for block operations in fuzz_one().
@@ -58,27 +67,29 @@ static u32 choose_block_len(u32 limit) {
 
   switch (UR(rlim)) {
 
-    case 0:  min_value = 1;
-             max_value = HAVOC_BLK_SMALL;
-             break;
+    case 0:
+      min_value = 1;
+      max_value = HAVOC_BLK_SMALL;
+      break;
 
-    case 1:  min_value = HAVOC_BLK_SMALL;
-             max_value = HAVOC_BLK_MEDIUM;
-             break;
+    case 1:
+      min_value = HAVOC_BLK_SMALL;
+      max_value = HAVOC_BLK_MEDIUM;
+      break;
 
-    default: 
+    default:
 
-             if (UR(10)) {
+      if (UR(10)) {
 
-               min_value = HAVOC_BLK_MEDIUM;
-               max_value = HAVOC_BLK_LARGE;
+        min_value = HAVOC_BLK_MEDIUM;
+        max_value = HAVOC_BLK_LARGE;
 
-             } else {
+      } else {
 
-               min_value = HAVOC_BLK_LARGE;
-               max_value = HAVOC_BLK_XL;
+        min_value = HAVOC_BLK_LARGE;
+        max_value = HAVOC_BLK_XL;
 
-             }
+      }
 
   }
 
@@ -88,7 +99,6 @@ static u32 choose_block_len(u32 limit) {
 
 }
 
-
 /* Helper function to see if a particular change (xor_val = old ^ new) could
    be a product of deterministic bit flips with the lengths and stepovers
    attempted by afl-fuzz. This is used to avoid dupes in some of the
@@ -104,7 +114,12 @@ static u8 could_be_bitflip(u32 xor_val) {
 
   /* Shift left until first bit set. */
 
-  while (!(xor_val & 1)) { ++sh; xor_val >>= 1; }
+  while (!(xor_val & 1)) {
+
+    ++sh;
+    xor_val >>= 1;
+
+  }
 
   /* 1-, 2-, and 4-bit patterns are OK anywhere. */
 
@@ -115,14 +130,12 @@ static u8 could_be_bitflip(u32 xor_val) {
 
   if (sh & 7) return 0;
 
-  if (xor_val == 0xff || xor_val == 0xffff || xor_val == 0xffffffff)
-    return 1;
+  if (xor_val == 0xff || xor_val == 0xffff || xor_val == 0xffffffff) return 1;
 
   return 0;
 
 }
 
-
 /* Helper function to see if a particular value is reachable through
    arithmetic operations. Used for similar purposes. */
 
@@ -136,10 +149,15 @@ static u8 could_be_arith(u32 old_val, u32 new_val, u8 blen) {
 
   for (i = 0; i < blen; ++i) {
 
-    u8 a = old_val >> (8 * i),
-       b = new_val >> (8 * i);
+    u8 a = old_val >> (8 * i), b = new_val >> (8 * i);
+
+    if (a != b) {
+
+      ++diffs;
+      ov = a;
+      nv = b;
 
-    if (a != b) { ++diffs; ov = a; nv = b; }
+    }
 
   }
 
@@ -147,8 +165,7 @@ static u8 could_be_arith(u32 old_val, u32 new_val, u8 blen) {
 
   if (diffs == 1) {
 
-    if ((u8)(ov - nv) <= ARITH_MAX ||
-        (u8)(nv - ov) <= ARITH_MAX) return 1;
+    if ((u8)(ov - nv) <= ARITH_MAX || (u8)(nv - ov) <= ARITH_MAX) return 1;
 
   }
 
@@ -160,10 +177,15 @@ static u8 could_be_arith(u32 old_val, u32 new_val, u8 blen) {
 
   for (i = 0; i < blen / 2; ++i) {
 
-    u16 a = old_val >> (16 * i),
-        b = new_val >> (16 * i);
+    u16 a = old_val >> (16 * i), b = new_val >> (16 * i);
+
+    if (a != b) {
 
-    if (a != b) { ++diffs; ov = a; nv = b; }
+      ++diffs;
+      ov = a;
+      nv = b;
+
+    }
 
   }
 
@@ -171,13 +193,12 @@ static u8 could_be_arith(u32 old_val, u32 new_val, u8 blen) {
 
   if (diffs == 1) {
 
-    if ((u16)(ov - nv) <= ARITH_MAX ||
-        (u16)(nv - ov) <= ARITH_MAX) return 1;
+    if ((u16)(ov - nv) <= ARITH_MAX || (u16)(nv - ov) <= ARITH_MAX) return 1;
 
-    ov = SWAP16(ov); nv = SWAP16(nv);
+    ov = SWAP16(ov);
+    nv = SWAP16(nv);
 
-    if ((u16)(ov - nv) <= ARITH_MAX ||
-        (u16)(nv - ov) <= ARITH_MAX) return 1;
+    if ((u16)(ov - nv) <= ARITH_MAX || (u16)(nv - ov) <= ARITH_MAX) return 1;
 
   }
 
@@ -186,13 +207,15 @@ static u8 could_be_arith(u32 old_val, u32 new_val, u8 blen) {
   if (blen == 4) {
 
     if ((u32)(old_val - new_val) <= ARITH_MAX ||
-        (u32)(new_val - old_val) <= ARITH_MAX) return 1;
+        (u32)(new_val - old_val) <= ARITH_MAX)
+      return 1;
 
     new_val = SWAP32(new_val);
     old_val = SWAP32(old_val);
 
     if ((u32)(old_val - new_val) <= ARITH_MAX ||
-        (u32)(new_val - old_val) <= ARITH_MAX) return 1;
+        (u32)(new_val - old_val) <= ARITH_MAX)
+      return 1;
 
   }
 
@@ -200,8 +223,7 @@ static u8 could_be_arith(u32 old_val, u32 new_val, u8 blen) {
 
 }
 
-
-/* Last but not least, a similar helper to see if insertion of an 
+/* Last but not least, a similar helper to see if insertion of an
    interesting integer is redundant given the insertions done for
    shorter blen. The last param (check_le) is set if the caller
    already executed LE insertion for current blen and wants to see
@@ -220,8 +242,8 @@ static u8 could_be_interest(u32 old_val, u32 new_val, u8 blen, u8 check_le) {
 
     for (j = 0; j < sizeof(interesting_8); ++j) {
 
-      u32 tval = (old_val & ~(0xff << (i * 8))) |
-                 (((u8)interesting_8[j]) << (i * 8));
+      u32 tval =
+          (old_val & ~(0xff << (i * 8))) | (((u8)interesting_8[j]) << (i * 8));
 
       if (new_val == tval) return 1;
 
@@ -274,11 +296,10 @@ static u8 could_be_interest(u32 old_val, u32 new_val, u8 blen, u8 check_le) {
 
 }
 
-
 #ifndef IGNORE_FINDS
 
-/* Helper function to compare buffers; returns first and last differing offset. We
-   use this to find reasonable locations for splicing two files. */
+/* Helper function to compare buffers; returns first and last differing offset.
+   We use this to find reasonable locations for splicing two files. */
 
 static void locate_diffs(u8* ptr1, u8* ptr2, u32 len, s32* first, s32* last) {
 
@@ -313,11 +334,11 @@ static void locate_diffs(u8* ptr1, u8* ptr2, u32 len, s32* first, s32* last) {
 u8 fuzz_one_original(char** argv) {
 
   s32 len, fd, temp_len, i, j;
-  u8  *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0;
-  u64 havoc_queued = 0,  orig_hit_cnt, new_hit_cnt;
+  u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0;
+  u64 havoc_queued = 0, orig_hit_cnt, new_hit_cnt;
   u32 splice_cycle = 0, perf_score = 100, orig_perf, prev_cksum, eff_cnt = 1;
 
-  u8  ret_val = 1, doing_det = 0;
+  u8 ret_val = 1, doing_det = 0;
 
   u8  a_collect[MAX_AUTO_EXTRA];
   u32 a_len = 0;
@@ -337,8 +358,10 @@ u8 fuzz_one_original(char** argv) {
        possibly skip to them at the expense of already-fuzzed or non-favored
        cases. */
 
-    if (((queue_cur->was_fuzzed > 0 || queue_cur->fuzz_level > 0) || !queue_cur->favored) &&
-        UR(100) < SKIP_TO_NEW_PROB) return 1;
+    if (((queue_cur->was_fuzzed > 0 || queue_cur->fuzz_level > 0) ||
+         !queue_cur->favored) &&
+        UR(100) < SKIP_TO_NEW_PROB)
+      return 1;
 
   } else if (!dumb_mode && !queue_cur->favored && queued_paths > 10) {
 
@@ -346,7 +369,8 @@ u8 fuzz_one_original(char** argv) {
        The odds of skipping stuff are higher for already-fuzzed inputs and
        lower for never-fuzzed entries. */
 
-    if (queue_cycle > 1 && (queue_cur->fuzz_level == 0 || queue_cur->was_fuzzed)) {
+    if (queue_cycle > 1 &&
+        (queue_cur->fuzz_level == 0 || queue_cur->was_fuzzed)) {
 
       if (UR(100) < SKIP_NFAV_NEW_PROB) return 1;
 
@@ -361,9 +385,11 @@ u8 fuzz_one_original(char** argv) {
 #endif /* ^IGNORE_FINDS */
 
   if (not_on_tty) {
+
     ACTF("Fuzzing test case #%u (%u total, %llu uniq crashes found)...",
          current_entry, queued_paths, unique_crashes);
     fflush(stdout);
+
   }
 
   /* Map the test case into memory. */
@@ -376,7 +402,8 @@ u8 fuzz_one_original(char** argv) {
 
   orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
 
-  if (orig_in == MAP_FAILED) PFATAL("Unable to mmap '%s' with len %d", queue_cur->fname, len);
+  if (orig_in == MAP_FAILED)
+    PFATAL("Unable to mmap '%s' with len %d", queue_cur->fname, len);
 
   close(fd);
 
@@ -402,14 +429,15 @@ u8 fuzz_one_original(char** argv) {
 
       res = calibrate_case(argv, queue_cur, in_buf, queue_cycle - 1, 0);
 
-      if (res == FAULT_ERROR)
-        FATAL("Unable to execute target application");
+      if (res == FAULT_ERROR) FATAL("Unable to execute target application");
 
     }
 
     if (stop_soon || res != crash_mode) {
+
       ++cur_skipped_paths;
       goto abandon_entry;
+
     }
 
   }
@@ -422,12 +450,13 @@ u8 fuzz_one_original(char** argv) {
 
     u8 res = trim_case(argv, queue_cur, in_buf);
 
-    if (res == FAULT_ERROR)
-      FATAL("Unable to execute target application");
+    if (res == FAULT_ERROR) FATAL("Unable to execute target application");
 
     if (stop_soon) {
+
       ++cur_skipped_paths;
       goto abandon_entry;
+
     }
 
     /* Don't retry trimming, even if it failed. */
@@ -449,49 +478,56 @@ u8 fuzz_one_original(char** argv) {
   if (perf_score == 0) goto abandon_entry;
 
   if (custom_mutator) {
+
     stage_short = "custom";
     stage_name = "custom mutator";
     stage_max = len << 3;
     stage_val_type = STAGE_VAL_NONE;
 
-    const u32 max_seed_size = 4096*4096;
-    u8* mutated_buf = ck_alloc(max_seed_size);
+    const u32 max_seed_size = 4096 * 4096;
+    u8*       mutated_buf = ck_alloc(max_seed_size);
 
     orig_hit_cnt = queued_paths + unique_crashes;
 
-    for (stage_cur = 0 ; stage_cur < stage_max ; ++stage_cur) {
-      size_t orig_size = (size_t) len;
-      size_t mutated_size = custom_mutator(out_buf, orig_size, mutated_buf, max_seed_size, UR(UINT32_MAX));
+    for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+
+      size_t orig_size = (size_t)len;
+      size_t mutated_size = custom_mutator(out_buf, orig_size, mutated_buf,
+                                           max_seed_size, UR(UINT32_MAX));
       if (mutated_size > 0) {
+
         out_buf = ck_realloc(out_buf, mutated_size);
         memcpy(out_buf, mutated_buf, mutated_size);
-        if (common_fuzz_stuff(argv, out_buf, (u32) mutated_size)) {
+        if (common_fuzz_stuff(argv, out_buf, (u32)mutated_size)) {
+
           goto abandon_entry;
+
         }
+
       }
+
     }
 
     ck_free(mutated_buf);
     new_hit_cnt = queued_paths + unique_crashes;
 
-    stage_finds[STAGE_CUSTOM_MUTATOR]  += new_hit_cnt - orig_hit_cnt;
+    stage_finds[STAGE_CUSTOM_MUTATOR] += new_hit_cnt - orig_hit_cnt;
     stage_cycles[STAGE_CUSTOM_MUTATOR] += stage_max;
     goto abandon_entry;
-  }
 
+  }
 
   /* Skip right away if -d is given, if it has not been chosen sufficiently
      often to warrant the expensive deterministic stage (fuzz_level), or
      if it has gone through deterministic testing in earlier, resumed runs
      (passed_det). */
 
-  if (skip_deterministic
-     || ((!queue_cur->passed_det)
-        && perf_score < (
-              queue_cur->depth * 30 <= havoc_max_mult * 100
-              ? queue_cur->depth * 30
-              : havoc_max_mult * 100))
-     || queue_cur->passed_det)
+  if (skip_deterministic ||
+      ((!queue_cur->passed_det) &&
+       perf_score < (queue_cur->depth * 30 <= havoc_max_mult * 100
+                         ? queue_cur->depth * 30
+                         : havoc_max_mult * 100)) ||
+      queue_cur->passed_det)
 #ifdef USE_PYTHON
     goto python_stage;
 #else
@@ -514,17 +550,20 @@ u8 fuzz_one_original(char** argv) {
    * SIMPLE BITFLIP (+dictionary construction) *
    *********************************************/
 
-#define FLIP_BIT(_ar, _b) do { \
-    u8* _arf = (u8*)(_ar); \
-    u32 _bf = (_b); \
-    _arf[(_bf) >> 3] ^= (128 >> ((_bf) & 7)); \
+#define FLIP_BIT(_ar, _b)                   \
+  do {                                      \
+                                            \
+    u8* _arf = (u8*)(_ar);                  \
+    u32 _bf = (_b);                         \
+    _arf[(_bf) >> 3] ^= (128 >> ((_bf)&7)); \
+                                            \
   } while (0)
 
   /* Single walking bit. */
 
   stage_short = "flip1";
-  stage_max   = len << 3;
-  stage_name  = "bitflip 1/1";
+  stage_max = len << 3;
+  stage_name = "bitflip 1/1";
 
   stage_val_type = STAGE_VAL_NONE;
 
@@ -556,7 +595,7 @@ u8 fuzz_one_original(char** argv) {
 
        We do this here, rather than as a separate stage, because it's a nice
        way to keep the operation approximately "free" (i.e., no extra execs).
-       
+
        Empirically, performing the check when flipping the least significant bit
        is advantageous, compared to doing it at the time of more disruptive
        changes, where the program flow may be affected in more violent ways.
@@ -602,7 +641,7 @@ u8 fuzz_one_original(char** argv) {
 
       if (cksum != queue_cur->exec_cksum) {
 
-        if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3];        
+        if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3];
         ++a_len;
 
       }
@@ -613,14 +652,14 @@ u8 fuzz_one_original(char** argv) {
 
   new_hit_cnt = queued_paths + unique_crashes;
 
-  stage_finds[STAGE_FLIP1]  += new_hit_cnt - orig_hit_cnt;
+  stage_finds[STAGE_FLIP1] += new_hit_cnt - orig_hit_cnt;
   stage_cycles[STAGE_FLIP1] += stage_max;
 
   /* Two walking bits. */
 
-  stage_name  = "bitflip 2/1";
+  stage_name = "bitflip 2/1";
   stage_short = "flip2";
-  stage_max   = (len << 3) - 1;
+  stage_max = (len << 3) - 1;
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -640,14 +679,14 @@ u8 fuzz_one_original(char** argv) {
 
   new_hit_cnt = queued_paths + unique_crashes;
 
-  stage_finds[STAGE_FLIP2]  += new_hit_cnt - orig_hit_cnt;
+  stage_finds[STAGE_FLIP2] += new_hit_cnt - orig_hit_cnt;
   stage_cycles[STAGE_FLIP2] += stage_max;
 
   /* Four walking bits. */
 
-  stage_name  = "bitflip 4/1";
+  stage_name = "bitflip 4/1";
   stage_short = "flip4";
-  stage_max   = (len << 3) - 3;
+  stage_max = (len << 3) - 3;
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -671,7 +710,7 @@ u8 fuzz_one_original(char** argv) {
 
   new_hit_cnt = queued_paths + unique_crashes;
 
-  stage_finds[STAGE_FLIP4]  += new_hit_cnt - orig_hit_cnt;
+  stage_finds[STAGE_FLIP4] += new_hit_cnt - orig_hit_cnt;
   stage_cycles[STAGE_FLIP4] += stage_max;
 
   /* Effector map setup. These macros calculate:
@@ -682,27 +721,29 @@ u8 fuzz_one_original(char** argv) {
 
    */
 
-#define EFF_APOS(_p)          ((_p) >> EFF_MAP_SCALE2)
-#define EFF_REM(_x)           ((_x) & ((1 << EFF_MAP_SCALE2) - 1))
-#define EFF_ALEN(_l)          (EFF_APOS(_l) + !!EFF_REM(_l))
-#define EFF_SPAN_ALEN(_p, _l) (EFF_APOS((_p) + (_l) - 1) - EFF_APOS(_p) + 1)
+#define EFF_APOS(_p) ((_p) >> EFF_MAP_SCALE2)
+#define EFF_REM(_x) ((_x) & ((1 << EFF_MAP_SCALE2) - 1))
+#define EFF_ALEN(_l) (EFF_APOS(_l) + !!EFF_REM(_l))
+#define EFF_SPAN_ALEN(_p, _l) (EFF_APOS((_p) + (_l)-1) - EFF_APOS(_p) + 1)
 
   /* Initialize effector map for the next step (see comments below). Always
      flag first and last byte as doing something. */
 
-  eff_map    = ck_alloc(EFF_ALEN(len));
+  eff_map = ck_alloc(EFF_ALEN(len));
   eff_map[0] = 1;
 
   if (EFF_APOS(len - 1) != 0) {
+
     eff_map[EFF_APOS(len - 1)] = 1;
     ++eff_cnt;
+
   }
 
   /* Walking byte. */
 
-  stage_name  = "bitflip 8/8";
+  stage_name = "bitflip 8/8";
   stage_short = "flip8";
-  stage_max   = len;
+  stage_max = len;
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -732,8 +773,10 @@ u8 fuzz_one_original(char** argv) {
         cksum = ~queue_cur->exec_cksum;
 
       if (cksum != queue_cur->exec_cksum) {
+
         eff_map[EFF_APOS(stage_cur)] = 1;
         ++eff_cnt;
+
       }
 
     }
@@ -763,17 +806,17 @@ u8 fuzz_one_original(char** argv) {
 
   new_hit_cnt = queued_paths + unique_crashes;
 
-  stage_finds[STAGE_FLIP8]  += new_hit_cnt - orig_hit_cnt;
+  stage_finds[STAGE_FLIP8] += new_hit_cnt - orig_hit_cnt;
   stage_cycles[STAGE_FLIP8] += stage_max;
 
   /* Two walking bytes. */
 
   if (len < 2) goto skip_bitflip;
 
-  stage_name  = "bitflip 16/8";
+  stage_name = "bitflip 16/8";
   stage_short = "flip16";
-  stage_cur   = 0;
-  stage_max   = len - 1;
+  stage_cur = 0;
+  stage_max = len - 1;
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -782,8 +825,10 @@ u8 fuzz_one_original(char** argv) {
     /* Let's consult the effector map... */
 
     if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
+
       --stage_max;
       continue;
+
     }
 
     stage_cur_byte = i;
@@ -795,22 +840,21 @@ u8 fuzz_one_original(char** argv) {
 
     *(u16*)(out_buf + i) ^= 0xFFFF;
 
-
   }
 
   new_hit_cnt = queued_paths + unique_crashes;
 
-  stage_finds[STAGE_FLIP16]  += new_hit_cnt - orig_hit_cnt;
+  stage_finds[STAGE_FLIP16] += new_hit_cnt - orig_hit_cnt;
   stage_cycles[STAGE_FLIP16] += stage_max;
 
   if (len < 4) goto skip_bitflip;
 
   /* Four walking bytes. */
 
-  stage_name  = "bitflip 32/8";
+  stage_name = "bitflip 32/8";
   stage_short = "flip32";
-  stage_cur   = 0;
-  stage_max   = len - 3;
+  stage_cur = 0;
+  stage_max = len - 3;
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -819,8 +863,10 @@ u8 fuzz_one_original(char** argv) {
     /* Let's consult the effector map... */
     if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
         !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
+
       --stage_max;
       continue;
+
     }
 
     stage_cur_byte = i;
@@ -836,7 +882,7 @@ u8 fuzz_one_original(char** argv) {
 
   new_hit_cnt = queued_paths + unique_crashes;
 
-  stage_finds[STAGE_FLIP32]  += new_hit_cnt - orig_hit_cnt;
+  stage_finds[STAGE_FLIP32] += new_hit_cnt - orig_hit_cnt;
   stage_cycles[STAGE_FLIP32] += stage_max;
 
 skip_bitflip:
@@ -849,10 +895,10 @@ skip_bitflip:
 
   /* 8-bit arithmetics. */
 
-  stage_name  = "arith 8/8";
+  stage_name = "arith 8/8";
   stage_short = "arith8";
-  stage_cur   = 0;
-  stage_max   = 2 * len * ARITH_MAX;
+  stage_cur = 0;
+  stage_max = 2 * len * ARITH_MAX;
 
   stage_val_type = STAGE_VAL_LE;
 
@@ -865,8 +911,10 @@ skip_bitflip:
     /* Let's consult the effector map... */
 
     if (!eff_map[EFF_APOS(i)]) {
+
       stage_max -= 2 * ARITH_MAX;
       continue;
+
     }
 
     stage_cur_byte = i;
@@ -886,9 +934,11 @@ skip_bitflip:
         if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
         ++stage_cur;
 
-      } else --stage_max;
+      } else
+
+        --stage_max;
 
-      r =  orig ^ (orig - j);
+      r = orig ^ (orig - j);
 
       if (!could_be_bitflip(r)) {
 
@@ -898,7 +948,9 @@ skip_bitflip:
         if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
         ++stage_cur;
 
-      } else --stage_max;
+      } else
+
+        --stage_max;
 
       out_buf[i] = orig;
 
@@ -908,17 +960,17 @@ skip_bitflip:
 
   new_hit_cnt = queued_paths + unique_crashes;
 
-  stage_finds[STAGE_ARITH8]  += new_hit_cnt - orig_hit_cnt;
+  stage_finds[STAGE_ARITH8] += new_hit_cnt - orig_hit_cnt;
   stage_cycles[STAGE_ARITH8] += stage_max;
 
   /* 16-bit arithmetics, both endians. */
 
   if (len < 2) goto skip_arith;
 
-  stage_name  = "arith 16/8";
+  stage_name = "arith 16/8";
   stage_short = "arith16";
-  stage_cur   = 0;
-  stage_max   = 4 * (len - 1) * ARITH_MAX;
+  stage_cur = 0;
+  stage_max = 4 * (len - 1) * ARITH_MAX;
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -929,25 +981,26 @@ skip_bitflip:
     /* Let's consult the effector map... */
 
     if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
+
       stage_max -= 4 * ARITH_MAX;
       continue;
+
     }
 
     stage_cur_byte = i;
 
     for (j = 1; j <= ARITH_MAX; ++j) {
 
-      u16 r1 = orig ^ (orig + j),
-          r2 = orig ^ (orig - j),
+      u16 r1 = orig ^ (orig + j), r2 = orig ^ (orig - j),
           r3 = orig ^ SWAP16(SWAP16(orig) + j),
           r4 = orig ^ SWAP16(SWAP16(orig) - j);
 
       /* Try little endian addition and subtraction first. Do it only
-         if the operation would affect more than one byte (hence the 
+         if the operation would affect more than one byte (hence the
          & 0xff overflow checks) and if it couldn't be a product of
          a bitflip. */
 
-      stage_val_type = STAGE_VAL_LE; 
+      stage_val_type = STAGE_VAL_LE;
 
       if ((orig & 0xff) + j > 0xff && !could_be_bitflip(r1)) {
 
@@ -956,8 +1009,10 @@ skip_bitflip:
 
         if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
         ++stage_cur;
- 
-      } else --stage_max;
+
+      } else
+
+        --stage_max;
 
       if ((orig & 0xff) < j && !could_be_bitflip(r2)) {
 
@@ -967,13 +1022,14 @@ skip_bitflip:
         if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
         ++stage_cur;
 
-      } else --stage_max;
+      } else
+
+        --stage_max;
 
       /* Big endian comes next. Same deal. */
 
       stage_val_type = STAGE_VAL_BE;
 
-
       if ((orig >> 8) + j > 0xff && !could_be_bitflip(r3)) {
 
         stage_cur_val = j;
@@ -982,7 +1038,9 @@ skip_bitflip:
         if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
         ++stage_cur;
 
-      } else --stage_max;
+      } else
+
+        --stage_max;
 
       if ((orig >> 8) < j && !could_be_bitflip(r4)) {
 
@@ -992,7 +1050,9 @@ skip_bitflip:
         if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
         ++stage_cur;
 
-      } else --stage_max;
+      } else
+
+        --stage_max;
 
       *(u16*)(out_buf + i) = orig;
 
@@ -1002,17 +1062,17 @@ skip_bitflip:
 
   new_hit_cnt = queued_paths + unique_crashes;
 
-  stage_finds[STAGE_ARITH16]  += new_hit_cnt - orig_hit_cnt;
+  stage_finds[STAGE_ARITH16] += new_hit_cnt - orig_hit_cnt;
   stage_cycles[STAGE_ARITH16] += stage_max;
 
   /* 32-bit arithmetics, both endians. */
 
   if (len < 4) goto skip_arith;
 
-  stage_name  = "arith 32/8";
+  stage_name = "arith 32/8";
   stage_short = "arith32";
-  stage_cur   = 0;
-  stage_max   = 4 * (len - 3) * ARITH_MAX;
+  stage_cur = 0;
+  stage_max = 4 * (len - 3) * ARITH_MAX;
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -1024,16 +1084,17 @@ skip_bitflip:
 
     if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
         !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
+
       stage_max -= 4 * ARITH_MAX;
       continue;
+
     }
 
     stage_cur_byte = i;
 
     for (j = 1; j <= ARITH_MAX; ++j) {
 
-      u32 r1 = orig ^ (orig + j),
-          r2 = orig ^ (orig - j),
+      u32 r1 = orig ^ (orig + j), r2 = orig ^ (orig - j),
           r3 = orig ^ SWAP32(SWAP32(orig) + j),
           r4 = orig ^ SWAP32(SWAP32(orig) - j);
 
@@ -1050,7 +1111,9 @@ skip_bitflip:
         if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
         ++stage_cur;
 
-      } else --stage_max;
+      } else
+
+        --stage_max;
 
       if ((orig & 0xffff) < j && !could_be_bitflip(r2)) {
 
@@ -1060,7 +1123,9 @@ skip_bitflip:
         if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
         ++stage_cur;
 
-      } else --stage_max;
+      } else
+
+        --stage_max;
 
       /* Big endian next. */
 
@@ -1074,7 +1139,9 @@ skip_bitflip:
         if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
         ++stage_cur;
 
-      } else --stage_max;
+      } else
+
+        --stage_max;
 
       if ((SWAP32(orig) & 0xffff) < j && !could_be_bitflip(r4)) {
 
@@ -1084,7 +1151,9 @@ skip_bitflip:
         if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
         ++stage_cur;
 
-      } else --stage_max;
+      } else
+
+        --stage_max;
 
       *(u32*)(out_buf + i) = orig;
 
@@ -1094,7 +1163,7 @@ skip_bitflip:
 
   new_hit_cnt = queued_paths + unique_crashes;
 
-  stage_finds[STAGE_ARITH32]  += new_hit_cnt - orig_hit_cnt;
+  stage_finds[STAGE_ARITH32] += new_hit_cnt - orig_hit_cnt;
   stage_cycles[STAGE_ARITH32] += stage_max;
 
 skip_arith:
@@ -1103,10 +1172,10 @@ skip_arith:
    * INTERESTING VALUES *
    **********************/
 
-  stage_name  = "interest 8/8";
+  stage_name = "interest 8/8";
   stage_short = "int8";
-  stage_cur   = 0;
-  stage_max   = len * sizeof(interesting_8);
+  stage_cur = 0;
+  stage_max = len * sizeof(interesting_8);
 
   stage_val_type = STAGE_VAL_LE;
 
@@ -1121,8 +1190,10 @@ skip_arith:
     /* Let's consult the effector map... */
 
     if (!eff_map[EFF_APOS(i)]) {
+
       stage_max -= sizeof(interesting_8);
       continue;
+
     }
 
     stage_cur_byte = i;
@@ -1133,8 +1204,10 @@ skip_arith:
 
       if (could_be_bitflip(orig ^ (u8)interesting_8[j]) ||
           could_be_arith(orig, (u8)interesting_8[j], 1)) {
+
         --stage_max;
         continue;
+
       }
 
       stage_cur_val = interesting_8[j];
@@ -1151,17 +1224,17 @@ skip_arith:
 
   new_hit_cnt = queued_paths + unique_crashes;
 
-  stage_finds[STAGE_INTEREST8]  += new_hit_cnt - orig_hit_cnt;
+  stage_finds[STAGE_INTEREST8] += new_hit_cnt - orig_hit_cnt;
   stage_cycles[STAGE_INTEREST8] += stage_max;
 
   /* Setting 16-bit integers, both endians. */
 
   if (no_arith || len < 2) goto skip_interest;
 
-  stage_name  = "interest 16/8";
+  stage_name = "interest 16/8";
   stage_short = "int16";
-  stage_cur   = 0;
-  stage_max   = 2 * (len - 1) * (sizeof(interesting_16) >> 1);
+  stage_cur = 0;
+  stage_max = 2 * (len - 1) * (sizeof(interesting_16) >> 1);
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -1172,8 +1245,10 @@ skip_arith:
     /* Let's consult the effector map... */
 
     if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
+
       stage_max -= sizeof(interesting_16);
       continue;
+
     }
 
     stage_cur_byte = i;
@@ -1196,7 +1271,9 @@ skip_arith:
         if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
         ++stage_cur;
 
-      } else --stage_max;
+      } else
+
+        --stage_max;
 
       if ((u16)interesting_16[j] != SWAP16(interesting_16[j]) &&
           !could_be_bitflip(orig ^ SWAP16(interesting_16[j])) &&
@@ -1209,7 +1286,9 @@ skip_arith:
         if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
         ++stage_cur;
 
-      } else --stage_max;
+      } else
+
+        --stage_max;
 
     }
 
@@ -1219,17 +1298,17 @@ skip_arith:
 
   new_hit_cnt = queued_paths + unique_crashes;
 
-  stage_finds[STAGE_INTEREST16]  += new_hit_cnt - orig_hit_cnt;
+  stage_finds[STAGE_INTEREST16] += new_hit_cnt - orig_hit_cnt;
   stage_cycles[STAGE_INTEREST16] += stage_max;
 
   if (len < 4) goto skip_interest;
 
   /* Setting 32-bit integers, both endians. */
 
-  stage_name  = "interest 32/8";
+  stage_name = "interest 32/8";
   stage_short = "int32";
-  stage_cur   = 0;
-  stage_max   = 2 * (len - 3) * (sizeof(interesting_32) >> 2);
+  stage_cur = 0;
+  stage_max = 2 * (len - 3) * (sizeof(interesting_32) >> 2);
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -1241,8 +1320,10 @@ skip_arith:
 
     if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
         !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
+
       stage_max -= sizeof(interesting_32) >> 1;
       continue;
+
     }
 
     stage_cur_byte = i;
@@ -1265,7 +1346,9 @@ skip_arith:
         if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
         ++stage_cur;
 
-      } else --stage_max;
+      } else
+
+        --stage_max;
 
       if ((u32)interesting_32[j] != SWAP32(interesting_32[j]) &&
           !could_be_bitflip(orig ^ SWAP32(interesting_32[j])) &&
@@ -1278,7 +1361,9 @@ skip_arith:
         if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
         ++stage_cur;
 
-      } else --stage_max;
+      } else
+
+        --stage_max;
 
     }
 
@@ -1288,7 +1373,7 @@ skip_arith:
 
   new_hit_cnt = queued_paths + unique_crashes;
 
-  stage_finds[STAGE_INTEREST32]  += new_hit_cnt - orig_hit_cnt;
+  stage_finds[STAGE_INTEREST32] += new_hit_cnt - orig_hit_cnt;
   stage_cycles[STAGE_INTEREST32] += stage_max;
 
 skip_interest:
@@ -1301,10 +1386,10 @@ skip_interest:
 
   /* Overwrite with user-supplied extras. */
 
-  stage_name  = "user extras (over)";
+  stage_name = "user extras (over)";
   stage_short = "ext_UO";
-  stage_cur   = 0;
-  stage_max   = extras_cnt * len;
+  stage_cur = 0;
+  stage_max = extras_cnt * len;
 
   stage_val_type = STAGE_VAL_NONE;
 
@@ -1354,15 +1439,15 @@ skip_interest:
 
   new_hit_cnt = queued_paths + unique_crashes;
 
-  stage_finds[STAGE_EXTRAS_UO]  += new_hit_cnt - orig_hit_cnt;
+  stage_finds[STAGE_EXTRAS_UO] += new_hit_cnt - orig_hit_cnt;
   stage_cycles[STAGE_EXTRAS_UO] += stage_max;
 
   /* Insertion of user-supplied extras. */
 
-  stage_name  = "user extras (insert)";
+  stage_name = "user extras (insert)";
   stage_short = "ext_UI";
-  stage_cur   = 0;
-  stage_max   = extras_cnt * len;
+  stage_cur = 0;
+  stage_max = extras_cnt * len;
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -1375,8 +1460,10 @@ skip_interest:
     for (j = 0; j < extras_cnt; ++j) {
 
       if (len + extras[j].len > MAX_FILE) {
-        --stage_max; 
+
+        --stage_max;
         continue;
+
       }
 
       /* Insert token */
@@ -1386,8 +1473,10 @@ skip_interest:
       memcpy(ex_tmp + i + extras[j].len, out_buf + i, len - i);
 
       if (common_fuzz_stuff(argv, ex_tmp, len + extras[j].len)) {
+
         ck_free(ex_tmp);
         goto abandon_entry;
+
       }
 
       ++stage_cur;
@@ -1403,17 +1492,17 @@ skip_interest:
 
   new_hit_cnt = queued_paths + unique_crashes;
 
-  stage_finds[STAGE_EXTRAS_UI]  += new_hit_cnt - orig_hit_cnt;
+  stage_finds[STAGE_EXTRAS_UI] += new_hit_cnt - orig_hit_cnt;
   stage_cycles[STAGE_EXTRAS_UI] += stage_max;
 
 skip_user_extras:
 
   if (!a_extras_cnt) goto skip_extras;
 
-  stage_name  = "auto extras (over)";
+  stage_name = "auto extras (over)";
   stage_short = "ext_AO";
-  stage_cur   = 0;
-  stage_max   = MIN(a_extras_cnt, USE_AUTO_EXTRAS) * len;
+  stage_cur = 0;
+  stage_max = MIN(a_extras_cnt, USE_AUTO_EXTRAS) * len;
 
   stage_val_type = STAGE_VAL_NONE;
 
@@ -1431,7 +1520,8 @@ skip_user_extras:
 
       if (a_extras[j].len > len - i ||
           !memcmp(a_extras[j].data, out_buf + i, a_extras[j].len) ||
-          !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, a_extras[j].len))) {
+          !memchr(eff_map + EFF_APOS(i), 1,
+                  EFF_SPAN_ALEN(i, a_extras[j].len))) {
 
         --stage_max;
         continue;
@@ -1454,7 +1544,7 @@ skip_user_extras:
 
   new_hit_cnt = queued_paths + unique_crashes;
 
-  stage_finds[STAGE_EXTRAS_AO]  += new_hit_cnt - orig_hit_cnt;
+  stage_finds[STAGE_EXTRAS_AO] += new_hit_cnt - orig_hit_cnt;
   stage_cycles[STAGE_EXTRAS_AO] += stage_max;
 
 skip_extras:
@@ -1473,36 +1563,51 @@ python_stage:
 
   if (!py_module) goto havoc_stage;
 
-  stage_name  = "python";
+  stage_name = "python";
   stage_short = "python";
-  stage_max   = HAVOC_CYCLES * perf_score / havoc_div / 100;
+  stage_max = HAVOC_CYCLES * perf_score / havoc_div / 100;
 
   if (stage_max < HAVOC_MIN) stage_max = HAVOC_MIN;
 
   orig_hit_cnt = queued_paths + unique_crashes;
 
-  char* retbuf = NULL;
+  char*  retbuf = NULL;
   size_t retlen = 0;
 
   for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+
     struct queue_entry* target;
-    u32 tid;
-    u8* new_buf;
+    u32                 tid;
+    u8*                 new_buf;
 
-retry_external_pick:
+  retry_external_pick:
     /* Pick a random other queue entry for passing to external API */
-    do { tid = UR(queued_paths); } while (tid == current_entry && queued_paths > 1);
+    do {
+
+      tid = UR(queued_paths);
+
+    } while (tid == current_entry && queued_paths > 1);
 
     target = queue;
 
-    while (tid >= 100) { target = target->next_100; tid -= 100; }
-    while (tid--) target = target->next;
+    while (tid >= 100) {
+
+      target = target->next_100;
+      tid -= 100;
+
+    }
+
+    while (tid--)
+      target = target->next;
 
     /* Make sure that the target has a reasonable length. */
 
-    while (target && (target->len < 2 || target == queue_cur) && queued_paths > 1) {
+    while (target && (target->len < 2 || target == queue_cur) &&
+           queued_paths > 1) {
+
       target = target->next;
       ++splicing_with;
+
     }
 
     if (!target) goto retry_external_pick;
@@ -1519,12 +1624,14 @@ retry_external_pick:
     ck_free(new_buf);
 
     if (retbuf) {
-      if (!retlen)
-        goto abandon_entry;
+
+      if (!retlen) goto abandon_entry;
 
       if (common_fuzz_stuff(argv, retbuf, retlen)) {
+
         free(retbuf);
         goto abandon_entry;
+
       }
 
       /* Reset retbuf/retlen */
@@ -1536,26 +1643,35 @@ retry_external_pick:
          permitting. */
 
       if (queued_paths != havoc_queued) {
+
         if (perf_score <= havoc_max_mult * 100) {
-          stage_max  *= 2;
+
+          stage_max *= 2;
           perf_score *= 2;
+
         }
 
         havoc_queued = queued_paths;
+
       }
+
     }
+
   }
 
   new_hit_cnt = queued_paths + unique_crashes;
 
-  stage_finds[STAGE_PYTHON]  += new_hit_cnt - orig_hit_cnt;
+  stage_finds[STAGE_PYTHON] += new_hit_cnt - orig_hit_cnt;
   stage_cycles[STAGE_PYTHON] += stage_max;
 
   if (python_only) {
+
     /* Skip other stages */
     ret_val = 0;
     goto abandon_entry;
+
   }
+
 #endif
 
   /****************
@@ -1571,10 +1687,10 @@ havoc_stage:
 
   if (!splice_cycle) {
 
-    stage_name  = "havoc";
+    stage_name = "havoc";
     stage_short = "havoc";
-    stage_max   = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) *
-                  perf_score / havoc_div / 100;
+    stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * perf_score /
+                havoc_div / 100;
 
   } else {
 
@@ -1583,9 +1699,9 @@ havoc_stage:
     perf_score = orig_perf;
 
     sprintf(tmp, "splice %u", splice_cycle);
-    stage_name  = tmp;
+    stage_name = tmp;
     stage_short = "splice";
-    stage_max   = SPLICE_HAVOC * perf_score / havoc_div / 100;
+    stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100;
 
   }
 
@@ -1605,7 +1721,7 @@ havoc_stage:
     u32 use_stacking = 1 << (1 + UR(HAVOC_STACK_POW2));
 
     stage_cur_val = use_stacking;
- 
+
     for (i = 0; i < use_stacking; ++i) {
 
       switch (UR(15 + ((extras_cnt + a_extras_cnt) ? 2 : 0))) {
@@ -1617,7 +1733,7 @@ havoc_stage:
           FLIP_BIT(out_buf, UR(temp_len << 3));
           break;
 
-        case 1: 
+        case 1:
 
           /* Set byte to interesting value. */
 
@@ -1633,12 +1749,12 @@ havoc_stage:
           if (UR(2)) {
 
             *(u16*)(out_buf + UR(temp_len - 1)) =
-              interesting_16[UR(sizeof(interesting_16) >> 1)];
+                interesting_16[UR(sizeof(interesting_16) >> 1)];
 
           } else {
 
-            *(u16*)(out_buf + UR(temp_len - 1)) = SWAP16(
-              interesting_16[UR(sizeof(interesting_16) >> 1)]);
+            *(u16*)(out_buf + UR(temp_len - 1)) =
+                SWAP16(interesting_16[UR(sizeof(interesting_16) >> 1)]);
 
           }
 
@@ -1651,14 +1767,14 @@ havoc_stage:
           if (temp_len < 4) break;
 
           if (UR(2)) {
-  
+
             *(u32*)(out_buf + UR(temp_len - 3)) =
-              interesting_32[UR(sizeof(interesting_32) >> 2)];
+                interesting_32[UR(sizeof(interesting_32) >> 2)];
 
           } else {
 
-            *(u32*)(out_buf + UR(temp_len - 3)) = SWAP32(
-              interesting_32[UR(sizeof(interesting_32) >> 2)]);
+            *(u32*)(out_buf + UR(temp_len - 3)) =
+                SWAP32(interesting_32[UR(sizeof(interesting_32) >> 2)]);
 
           }
 
@@ -1696,7 +1812,7 @@ havoc_stage:
             u16 num = 1 + UR(ARITH_MAX);
 
             *(u16*)(out_buf + pos) =
-              SWAP16(SWAP16(*(u16*)(out_buf + pos)) - num);
+                SWAP16(SWAP16(*(u16*)(out_buf + pos)) - num);
 
           }
 
@@ -1720,7 +1836,7 @@ havoc_stage:
             u16 num = 1 + UR(ARITH_MAX);
 
             *(u16*)(out_buf + pos) =
-              SWAP16(SWAP16(*(u16*)(out_buf + pos)) + num);
+                SWAP16(SWAP16(*(u16*)(out_buf + pos)) + num);
 
           }
 
@@ -1744,7 +1860,7 @@ havoc_stage:
             u32 num = 1 + UR(ARITH_MAX);
 
             *(u32*)(out_buf + pos) =
-              SWAP32(SWAP32(*(u32*)(out_buf + pos)) - num);
+                SWAP32(SWAP32(*(u32*)(out_buf + pos)) - num);
 
           }
 
@@ -1768,7 +1884,7 @@ havoc_stage:
             u32 num = 1 + UR(ARITH_MAX);
 
             *(u32*)(out_buf + pos) =
-              SWAP32(SWAP32(*(u32*)(out_buf + pos)) + num);
+                SWAP32(SWAP32(*(u32*)(out_buf + pos)) + num);
 
           }
 
@@ -1785,28 +1901,28 @@ havoc_stage:
 
         case 11 ... 12: {
 
-            /* Delete bytes. We're making this a bit more likely
-               than insertion (the next option) in hopes of keeping
-               files reasonably small. */
+          /* Delete bytes. We're making this a bit more likely
+             than insertion (the next option) in hopes of keeping
+             files reasonably small. */
 
-            u32 del_from, del_len;
+          u32 del_from, del_len;
 
-            if (temp_len < 2) break;
+          if (temp_len < 2) break;
 
-            /* Don't delete too much. */
+          /* Don't delete too much. */
 
-            del_len = choose_block_len(temp_len - 1);
+          del_len = choose_block_len(temp_len - 1);
 
-            del_from = UR(temp_len - del_len + 1);
+          del_from = UR(temp_len - del_len + 1);
 
-            memmove(out_buf + del_from, out_buf + del_from + del_len,
-                    temp_len - del_from - del_len);
+          memmove(out_buf + del_from, out_buf + del_from + del_len,
+                  temp_len - del_from - del_len);
 
-            temp_len -= del_len;
+          temp_len -= del_len;
 
-            break;
+          break;
 
-          }
+        }
 
         case 13:
 
@@ -1820,7 +1936,7 @@ havoc_stage:
 
             if (actually_clone) {
 
-              clone_len  = choose_block_len(temp_len);
+              clone_len = choose_block_len(temp_len);
               clone_from = UR(temp_len - clone_len + 1);
 
             } else {
@@ -1830,7 +1946,7 @@ havoc_stage:
 
             }
 
-            clone_to   = UR(temp_len);
+            clone_to = UR(temp_len);
 
             new_buf = ck_alloc_nozero(temp_len + clone_len);
 
@@ -1860,128 +1976,129 @@ havoc_stage:
 
         case 14: {
 
-            /* Overwrite bytes with a randomly selected chunk (75%) or fixed
-               bytes (25%). */
+          /* Overwrite bytes with a randomly selected chunk (75%) or fixed
+             bytes (25%). */
 
-            u32 copy_from, copy_to, copy_len;
+          u32 copy_from, copy_to, copy_len;
 
-            if (temp_len < 2) break;
+          if (temp_len < 2) break;
 
-            copy_len  = choose_block_len(temp_len - 1);
+          copy_len = choose_block_len(temp_len - 1);
 
-            copy_from = UR(temp_len - copy_len + 1);
-            copy_to   = UR(temp_len - copy_len + 1);
+          copy_from = UR(temp_len - copy_len + 1);
+          copy_to = UR(temp_len - copy_len + 1);
 
-            if (UR(4)) {
+          if (UR(4)) {
 
-              if (copy_from != copy_to)
-                memmove(out_buf + copy_to, out_buf + copy_from, copy_len);
+            if (copy_from != copy_to)
+              memmove(out_buf + copy_to, out_buf + copy_from, copy_len);
 
-            } else memset(out_buf + copy_to,
-                          UR(2) ? UR(256) : out_buf[UR(temp_len)], copy_len);
+          } else
 
-            break;
+            memset(out_buf + copy_to, UR(2) ? UR(256) : out_buf[UR(temp_len)],
+                   copy_len);
 
-          }
+          break;
+
+        }
 
-        /* Values 15 and 16 can be selected only if there are any extras
-           present in the dictionaries. */
+          /* Values 15 and 16 can be selected only if there are any extras
+             present in the dictionaries. */
 
         case 15: {
 
-            /* Overwrite bytes with an extra. */
+          /* Overwrite bytes with an extra. */
 
-            if (!extras_cnt || (a_extras_cnt && UR(2))) {
+          if (!extras_cnt || (a_extras_cnt && UR(2))) {
 
-              /* No user-specified extras or odds in our favor. Let's use an
-                 auto-detected one. */
+            /* No user-specified extras or odds in our favor. Let's use an
+               auto-detected one. */
 
-              u32 use_extra = UR(a_extras_cnt);
-              u32 extra_len = a_extras[use_extra].len;
-              u32 insert_at;
+            u32 use_extra = UR(a_extras_cnt);
+            u32 extra_len = a_extras[use_extra].len;
+            u32 insert_at;
 
-              if (extra_len > temp_len) break;
+            if (extra_len > temp_len) break;
 
-              insert_at = UR(temp_len - extra_len + 1);
-              memcpy(out_buf + insert_at, a_extras[use_extra].data, extra_len);
+            insert_at = UR(temp_len - extra_len + 1);
+            memcpy(out_buf + insert_at, a_extras[use_extra].data, extra_len);
 
-            } else {
+          } else {
 
-              /* No auto extras or odds in our favor. Use the dictionary. */
+            /* No auto extras or odds in our favor. Use the dictionary. */
 
-              u32 use_extra = UR(extras_cnt);
-              u32 extra_len = extras[use_extra].len;
-              u32 insert_at;
+            u32 use_extra = UR(extras_cnt);
+            u32 extra_len = extras[use_extra].len;
+            u32 insert_at;
 
-              if (extra_len > temp_len) break;
+            if (extra_len > temp_len) break;
 
-              insert_at = UR(temp_len - extra_len + 1);
-              memcpy(out_buf + insert_at, extras[use_extra].data, extra_len);
+            insert_at = UR(temp_len - extra_len + 1);
+            memcpy(out_buf + insert_at, extras[use_extra].data, extra_len);
 
-            }
+          }
 
-            break;
+          break;
 
-          }
+        }
 
         case 16: {
 
-            u32 use_extra, extra_len, insert_at = UR(temp_len + 1);
-            u8* new_buf;
+          u32 use_extra, extra_len, insert_at = UR(temp_len + 1);
+          u8* new_buf;
 
-            /* Insert an extra. Do the same dice-rolling stuff as for the
-               previous case. */
+          /* Insert an extra. Do the same dice-rolling stuff as for the
+             previous case. */
 
-            if (!extras_cnt || (a_extras_cnt && UR(2))) {
+          if (!extras_cnt || (a_extras_cnt && UR(2))) {
 
-              use_extra = UR(a_extras_cnt);
-              extra_len = a_extras[use_extra].len;
+            use_extra = UR(a_extras_cnt);
+            extra_len = a_extras[use_extra].len;
 
-              if (temp_len + extra_len >= MAX_FILE) break;
+            if (temp_len + extra_len >= MAX_FILE) break;
 
-              new_buf = ck_alloc_nozero(temp_len + extra_len);
+            new_buf = ck_alloc_nozero(temp_len + extra_len);
 
-              /* Head */
-              memcpy(new_buf, out_buf, insert_at);
+            /* Head */
+            memcpy(new_buf, out_buf, insert_at);
 
-              /* Inserted part */
-              memcpy(new_buf + insert_at, a_extras[use_extra].data, extra_len);
+            /* Inserted part */
+            memcpy(new_buf + insert_at, a_extras[use_extra].data, extra_len);
 
-	    } else {
+          } else {
 
-              use_extra = UR(extras_cnt);
-              extra_len = extras[use_extra].len;
+            use_extra = UR(extras_cnt);
+            extra_len = extras[use_extra].len;
 
-              if (temp_len + extra_len >= MAX_FILE) break;
+            if (temp_len + extra_len >= MAX_FILE) break;
 
-              new_buf = ck_alloc_nozero(temp_len + extra_len);
+            new_buf = ck_alloc_nozero(temp_len + extra_len);
 
-              /* Head */
-              memcpy(new_buf, out_buf, insert_at);
+            /* Head */
+            memcpy(new_buf, out_buf, insert_at);
 
-              /* Inserted part */
-              memcpy(new_buf + insert_at, extras[use_extra].data, extra_len);
+            /* Inserted part */
+            memcpy(new_buf + insert_at, extras[use_extra].data, extra_len);
 
-            }
+          }
 
-            /* Tail */
-            memcpy(new_buf + insert_at + extra_len, out_buf + insert_at,
-                   temp_len - insert_at);
+          /* Tail */
+          memcpy(new_buf + insert_at + extra_len, out_buf + insert_at,
+                 temp_len - insert_at);
 
-            ck_free(out_buf);
-            out_buf   = new_buf;
-            temp_len += extra_len;
+          ck_free(out_buf);
+          out_buf = new_buf;
+          temp_len += extra_len;
 
-            break;
+          break;
 
-          }
+        }
 
       }
 
     }
 
-    if (common_fuzz_stuff(argv, out_buf, temp_len))
-      goto abandon_entry;
+    if (common_fuzz_stuff(argv, out_buf, temp_len)) goto abandon_entry;
 
     /* out_buf might have been mangled a bit, so let's restore it to its
        original size and shape. */
@@ -1996,8 +2113,10 @@ havoc_stage:
     if (queued_paths != havoc_queued) {
 
       if (perf_score <= havoc_max_mult * 100) {
-        stage_max  *= 2;
+
+        stage_max *= 2;
         perf_score *= 2;
+
       }
 
       havoc_queued = queued_paths;
@@ -2009,11 +2128,15 @@ havoc_stage:
   new_hit_cnt = queued_paths + unique_crashes;
 
   if (!splice_cycle) {
-    stage_finds[STAGE_HAVOC]  += new_hit_cnt - orig_hit_cnt;
+
+    stage_finds[STAGE_HAVOC] += new_hit_cnt - orig_hit_cnt;
     stage_cycles[STAGE_HAVOC] += stage_max;
+
   } else {
-    stage_finds[STAGE_SPLICE]  += new_hit_cnt - orig_hit_cnt;
+
+    stage_finds[STAGE_SPLICE] += new_hit_cnt - orig_hit_cnt;
     stage_cycles[STAGE_SPLICE] += stage_max;
+
   }
 
 #ifndef IGNORE_FINDS
@@ -2029,38 +2152,53 @@ havoc_stage:
 
 retry_splicing:
 
-  if (use_splicing && splice_cycle++ < SPLICE_CYCLES &&
-      queued_paths > 1 && queue_cur->len > 1) {
+  if (use_splicing && splice_cycle++ < SPLICE_CYCLES && queued_paths > 1 &&
+      queue_cur->len > 1) {
 
     struct queue_entry* target;
-    u32 tid, split_at;
-    u8* new_buf;
-    s32 f_diff, l_diff;
+    u32                 tid, split_at;
+    u8*                 new_buf;
+    s32                 f_diff, l_diff;
 
     /* First of all, if we've modified in_buf for havoc, let's clean that
        up... */
 
     if (in_buf != orig_in) {
+
       ck_free(in_buf);
       in_buf = orig_in;
       len = queue_cur->len;
+
     }
 
     /* Pick a random queue entry and seek to it. Don't splice with yourself. */
 
-    do { tid = UR(queued_paths); } while (tid == current_entry);
+    do {
+
+      tid = UR(queued_paths);
+
+    } while (tid == current_entry);
 
     splicing_with = tid;
     target = queue;
 
-    while (tid >= 100) { target = target->next_100; tid -= 100; }
-    while (tid--) target = target->next;
+    while (tid >= 100) {
+
+      target = target->next_100;
+      tid -= 100;
+
+    }
+
+    while (tid--)
+      target = target->next;
 
     /* Make sure that the target has a reasonable length. */
 
     while (target && (target->len < 2 || target == queue_cur)) {
+
       target = target->next;
       ++splicing_with;
+
     }
 
     if (!target) goto retry_splicing;
@@ -2084,8 +2222,10 @@ retry_splicing:
     locate_diffs(in_buf, new_buf, MIN(len, target->len), &f_diff, &l_diff);
 
     if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) {
+
       ck_free(new_buf);
       goto retry_splicing;
+
     }
 
     /* Split somewhere between the first and last differing byte. */
@@ -2102,11 +2242,11 @@ retry_splicing:
     out_buf = ck_alloc_nozero(len);
     memcpy(out_buf, in_buf, len);
 
-#ifdef USE_PYTHON
+#  ifdef USE_PYTHON
     goto python_stage;
-#else
+#  else
     goto havoc_stage;
-#endif
+#  endif
 
   }
 
@@ -2121,10 +2261,13 @@ abandon_entry:
   /* Update pending_not_fuzzed count if we made it through the calibration
      cycle and have not seen this entry before. */
 
-  if (!stop_soon && !queue_cur->cal_failed && (queue_cur->was_fuzzed == 0 || queue_cur->fuzz_level == 0)) {
+  if (!stop_soon && !queue_cur->cal_failed &&
+      (queue_cur->was_fuzzed == 0 || queue_cur->fuzz_level == 0)) {
+
     --pending_not_fuzzed;
     queue_cur->was_fuzzed = 1;
     if (queue_cur->favored) --pending_favored;
+
   }
 
   ++queue_cur->fuzz_level;
@@ -2144,3576 +2287,3738 @@ abandon_entry:
 /* MOpt mode */
 u8 pilot_fuzzing(char** argv) {
 
-	s32 len, fd, temp_len, i, j;
-	u8  *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0;
-	u64 havoc_queued, orig_hit_cnt, new_hit_cnt, cur_ms_lv;
-	u32 splice_cycle = 0, perf_score = 100, orig_perf, prev_cksum, eff_cnt = 1;
+  s32 len, fd, temp_len, i, j;
+  u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0;
+  u64 havoc_queued, orig_hit_cnt, new_hit_cnt, cur_ms_lv;
+  u32 splice_cycle = 0, perf_score = 100, orig_perf, prev_cksum, eff_cnt = 1;
 
-	u8  ret_val = 1, doing_det = 0;
+  u8 ret_val = 1, doing_det = 0;
 
-	u8  a_collect[MAX_AUTO_EXTRA];
-	u32 a_len = 0;
+  u8  a_collect[MAX_AUTO_EXTRA];
+  u32 a_len = 0;
 
 #ifdef IGNORE_FINDS
 
-	/* In IGNORE_FINDS mode, skip any entries that weren't in the
-	   initial data set. */
+  /* In IGNORE_FINDS mode, skip any entries that weren't in the
+     initial data set. */
 
-	if (queue_cur->depth > 1) return 1;
+  if (queue_cur->depth > 1) return 1;
 
 #else
 
-	if (pending_favored) {
+  if (pending_favored) {
 
-		/* If we have any favored, non-fuzzed new arrivals in the queue,
-		   possibly skip to them at the expense of already-fuzzed or non-favored
-		   cases. */
+    /* If we have any favored, non-fuzzed new arrivals in the queue,
+       possibly skip to them at the expense of already-fuzzed or non-favored
+       cases. */
 
-		if ((queue_cur->was_fuzzed || !queue_cur->favored) &&
-			UR(100) < SKIP_TO_NEW_PROB) return 1;
+    if ((queue_cur->was_fuzzed || !queue_cur->favored) &&
+        UR(100) < SKIP_TO_NEW_PROB)
+      return 1;
 
-	}
-	else if (!dumb_mode && !queue_cur->favored && queued_paths > 10) {
+  } else if (!dumb_mode && !queue_cur->favored && queued_paths > 10) {
 
-		/* Otherwise, still possibly skip non-favored cases, albeit less often.
-		   The odds of skipping stuff are higher for already-fuzzed inputs and
-		   lower for never-fuzzed entries. */
+    /* Otherwise, still possibly skip non-favored cases, albeit less often.
+       The odds of skipping stuff are higher for already-fuzzed inputs and
+       lower for never-fuzzed entries. */
 
-		if (queue_cycle > 1 && !queue_cur->was_fuzzed) {
+    if (queue_cycle > 1 && !queue_cur->was_fuzzed) {
 
-			if (UR(100) < SKIP_NFAV_NEW_PROB) return 1;
+      if (UR(100) < SKIP_NFAV_NEW_PROB) return 1;
 
-		}
-		else {
+    } else {
 
-			if (UR(100) < SKIP_NFAV_OLD_PROB) return 1;
+      if (UR(100) < SKIP_NFAV_OLD_PROB) return 1;
 
-		}
+    }
 
-	}
+  }
 
 #endif /* ^IGNORE_FINDS */
 
-	if (not_on_tty) {
-		ACTF("Fuzzing test case #%u (%u total, %llu uniq crashes found)...",
-			current_entry, queued_paths, unique_crashes);
-		fflush(stdout);
-	}
+  if (not_on_tty) {
 
-	/* Map the test case into memory. */
+    ACTF("Fuzzing test case #%u (%u total, %llu uniq crashes found)...",
+         current_entry, queued_paths, unique_crashes);
+    fflush(stdout);
 
-	fd = open(queue_cur->fname, O_RDONLY);
+  }
 
-	if (fd < 0) PFATAL("Unable to open '%s'", queue_cur->fname);
+  /* Map the test case into memory. */
 
-	len = queue_cur->len;
+  fd = open(queue_cur->fname, O_RDONLY);
 
-	orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
+  if (fd < 0) PFATAL("Unable to open '%s'", queue_cur->fname);
 
-	if (orig_in == MAP_FAILED) PFATAL("Unable to mmap '%s'", queue_cur->fname);
+  len = queue_cur->len;
 
-	close(fd);
+  orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
 
-	/* We could mmap() out_buf as MAP_PRIVATE, but we end up clobbering every
-	   single byte anyway, so it wouldn't give us any performance or memory usage
-	   benefits. */
+  if (orig_in == MAP_FAILED) PFATAL("Unable to mmap '%s'", queue_cur->fname);
 
-	out_buf = ck_alloc_nozero(len);
+  close(fd);
 
-	subseq_tmouts = 0;
+  /* We could mmap() out_buf as MAP_PRIVATE, but we end up clobbering every
+     single byte anyway, so it wouldn't give us any performance or memory usage
+     benefits. */
 
-	cur_depth = queue_cur->depth;
+  out_buf = ck_alloc_nozero(len);
 
-	/*******************************************
-	 * CALIBRATION (only if failed earlier on) *
-	 *******************************************/
+  subseq_tmouts = 0;
 
-	if (queue_cur->cal_failed) {
+  cur_depth = queue_cur->depth;
 
-		u8 res = FAULT_TMOUT;
+  /*******************************************
+   * CALIBRATION (only if failed earlier on) *
+   *******************************************/
 
-		if (queue_cur->cal_failed < CAL_CHANCES) {
+  if (queue_cur->cal_failed) {
 
-			res = calibrate_case(argv, queue_cur, in_buf, queue_cycle - 1, 0);
+    u8 res = FAULT_TMOUT;
 
-			if (res == FAULT_ERROR)
-				FATAL("Unable to execute target application");
+    if (queue_cur->cal_failed < CAL_CHANCES) {
 
-		}
+      res = calibrate_case(argv, queue_cur, in_buf, queue_cycle - 1, 0);
 
-		if (stop_soon || res != crash_mode) {
-			++cur_skipped_paths;
-			goto abandon_entry;
-		}
+      if (res == FAULT_ERROR) FATAL("Unable to execute target application");
 
-	}
+    }
 
-	/************
-	 * TRIMMING *
-	 ************/
+    if (stop_soon || res != crash_mode) {
 
-	if (!dumb_mode && !queue_cur->trim_done) {
+      ++cur_skipped_paths;
+      goto abandon_entry;
 
-		u8 res = trim_case(argv, queue_cur, in_buf);
+    }
 
-		if (res == FAULT_ERROR)
-			FATAL("Unable to execute target application");
+  }
 
-		if (stop_soon) {
-			++cur_skipped_paths;
-			goto abandon_entry;
-		}
+  /************
+   * TRIMMING *
+   ************/
 
-		/* Don't retry trimming, even if it failed. */
+  if (!dumb_mode && !queue_cur->trim_done) {
 
-		queue_cur->trim_done = 1;
+    u8 res = trim_case(argv, queue_cur, in_buf);
+
+    if (res == FAULT_ERROR) FATAL("Unable to execute target application");
+
+    if (stop_soon) {
 
-		len = queue_cur->len;
+      ++cur_skipped_paths;
+      goto abandon_entry;
+
+    }
+
+    /* Don't retry trimming, even if it failed. */
 
-	}
+    queue_cur->trim_done = 1;
 
-	memcpy(out_buf, in_buf, len);
+    len = queue_cur->len;
 
-	/*********************
-	 * PERFORMANCE SCORE *
-	 *********************/
+  }
 
-	orig_perf = perf_score = calculate_score(queue_cur);
+  memcpy(out_buf, in_buf, len);
 
-	/* Skip right away if -d is given, if we have done deterministic fuzzing on
-	   this entry ourselves (was_fuzzed), or if it has gone through deterministic
-	   testing in earlier, resumed runs (passed_det). */
+  /*********************
+   * PERFORMANCE SCORE *
+   *********************/
 
-	if (skip_deterministic || queue_cur->was_fuzzed || queue_cur->passed_det)
-		goto havoc_stage;
+  orig_perf = perf_score = calculate_score(queue_cur);
 
-	/* Skip deterministic fuzzing if exec path checksum puts this out of scope
-	   for this master instance. */
+  /* Skip right away if -d is given, if we have done deterministic fuzzing on
+     this entry ourselves (was_fuzzed), or if it has gone through deterministic
+     testing in earlier, resumed runs (passed_det). */
 
-	if (master_max && (queue_cur->exec_cksum % master_max) != master_id - 1)
-		goto havoc_stage;
+  if (skip_deterministic || queue_cur->was_fuzzed || queue_cur->passed_det)
+    goto havoc_stage;
 
+  /* Skip deterministic fuzzing if exec path checksum puts this out of scope
+     for this master instance. */
 
-	cur_ms_lv = get_cur_time();
-	if (!(key_puppet == 0 && ((cur_ms_lv - last_path_time < limit_time_puppet) ||
-		(last_crash_time != 0 && cur_ms_lv - last_crash_time < limit_time_puppet) || last_path_time == 0)))
-	{
-		key_puppet = 1;
-		goto pacemaker_fuzzing;
-	}
+  if (master_max && (queue_cur->exec_cksum % master_max) != master_id - 1)
+    goto havoc_stage;
 
-	doing_det = 1;
+  cur_ms_lv = get_cur_time();
+  if (!(key_puppet == 0 && ((cur_ms_lv - last_path_time < limit_time_puppet) ||
+                            (last_crash_time != 0 &&
+                             cur_ms_lv - last_crash_time < limit_time_puppet) ||
+                            last_path_time == 0))) {
 
-		/*********************************************
-		 * SIMPLE BITFLIP (+dictionary construction) *
-		 *********************************************/
+    key_puppet = 1;
+    goto pacemaker_fuzzing;
 
-#define FLIP_BIT(_ar, _b) do { \
-    u8* _arf = (u8*)(_ar); \
-    u32 _bf = (_b); \
-    _arf[(_bf) >> 3] ^= (128 >> ((_bf) & 7)); \
+  }
+
+  doing_det = 1;
+
+  /*********************************************
+   * SIMPLE BITFLIP (+dictionary construction) *
+   *********************************************/
+
+#define FLIP_BIT(_ar, _b)                   \
+  do {                                      \
+                                            \
+    u8* _arf = (u8*)(_ar);                  \
+    u32 _bf = (_b);                         \
+    _arf[(_bf) >> 3] ^= (128 >> ((_bf)&7)); \
+                                            \
   } while (0)
 
-		 /* Single walking bit. */
+  /* Single walking bit. */
 
-		stage_short = "flip1";
-		stage_max = len << 3;
-		stage_name = "bitflip 1/1";
+  stage_short = "flip1";
+  stage_max = len << 3;
+  stage_name = "bitflip 1/1";
 
+  stage_val_type = STAGE_VAL_NONE;
 
+  orig_hit_cnt = queued_paths + unique_crashes;
 
+  prev_cksum = queue_cur->exec_cksum;
 
-		stage_val_type = STAGE_VAL_NONE;
+  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
 
-		orig_hit_cnt = queued_paths + unique_crashes;
+    stage_cur_byte = stage_cur >> 3;
 
-		prev_cksum = queue_cur->exec_cksum;
+    FLIP_BIT(out_buf, stage_cur);
 
-		for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
 
-			stage_cur_byte = stage_cur >> 3;
+    FLIP_BIT(out_buf, stage_cur);
 
-			FLIP_BIT(out_buf, stage_cur);
+    /* While flipping the least significant bit in every byte, pull of an extra
+       trick to detect possible syntax tokens. In essence, the idea is that if
+       you have a binary blob like this:
 
-			if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+       xxxxxxxxIHDRxxxxxxxx
 
-			FLIP_BIT(out_buf, stage_cur);
+       ...and changing the leading and trailing bytes causes variable or no
+       changes in program flow, but touching any character in the "IHDR" string
+       always produces the same, distinctive path, it's highly likely that
+       "IHDR" is an atomically-checked magic value of special significance to
+       the fuzzed format.
 
-			/* While flipping the least significant bit in every byte, pull of an extra
-			   trick to detect possible syntax tokens. In essence, the idea is that if
-			   you have a binary blob like this:
+       We do this here, rather than as a separate stage, because it's a nice
+       way to keep the operation approximately "free" (i.e., no extra execs).
 
-			   xxxxxxxxIHDRxxxxxxxx
+       Empirically, performing the check when flipping the least significant bit
+       is advantageous, compared to doing it at the time of more disruptive
+       changes, where the program flow may be affected in more violent ways.
 
-			   ...and changing the leading and trailing bytes causes variable or no
-			   changes in program flow, but touching any character in the "IHDR" string
-			   always produces the same, distinctive path, it's highly likely that
-			   "IHDR" is an atomically-checked magic value of special significance to
-			   the fuzzed format.
+       The caveat is that we won't generate dictionaries in the -d mode or -S
+       mode - but that's probably a fair trade-off.
 
-			   We do this here, rather than as a separate stage, because it's a nice
-			   way to keep the operation approximately "free" (i.e., no extra execs).
+       This won't work particularly well with paths that exhibit variable
+       behavior, but fails gracefully, so we'll carry out the checks anyway.
 
-			   Empirically, performing the check when flipping the least significant bit
-			   is advantageous, compared to doing it at the time of more disruptive
-			   changes, where the program flow may be affected in more violent ways.
+      */
 
-			   The caveat is that we won't generate dictionaries in the -d mode or -S
-			   mode - but that's probably a fair trade-off.
+    if (!dumb_mode && (stage_cur & 7) == 7) {
 
-			   This won't work particularly well with paths that exhibit variable
-			   behavior, but fails gracefully, so we'll carry out the checks anyway.
+      u32 cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
 
-			  */
+      if (stage_cur == stage_max - 1 && cksum == prev_cksum) {
 
-			if (!dumb_mode && (stage_cur & 7) == 7) {
+        /* If at end of file and we are still collecting a string, grab the
+           final character and force output. */
 
-				u32 cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
+        if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3];
+        ++a_len;
 
-				if (stage_cur == stage_max - 1 && cksum == prev_cksum) {
+        if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA)
+          maybe_add_auto(a_collect, a_len);
 
-					/* If at end of file and we are still collecting a string, grab the
-					   final character and force output. */
+      } else if (cksum != prev_cksum) {
 
-					if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3];
-					++a_len;
+        /* Otherwise, if the checksum has changed, see if we have something
+           worthwhile queued up, and collect that if the answer is yes. */
 
-					if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA)
-						maybe_add_auto(a_collect, a_len);
+        if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA)
+          maybe_add_auto(a_collect, a_len);
 
-				}
-				else if (cksum != prev_cksum) {
+        a_len = 0;
+        prev_cksum = cksum;
+
+      }
+
+      /* Continue collecting string, but only if the bit flip actually made
+         any difference - we don't want no-op tokens. */
+
+      if (cksum != queue_cur->exec_cksum) {
+
+        if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3];
+        ++a_len;
+
+      }
+
+    }
+
+  }
+
+  new_hit_cnt = queued_paths + unique_crashes;
+
+  stage_finds[STAGE_FLIP1] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_FLIP1] += stage_max;
+
+  /* Two walking bits. */
+
+  stage_name = "bitflip 2/1";
+  stage_short = "flip2";
+  stage_max = (len << 3) - 1;
+
+  orig_hit_cnt = new_hit_cnt;
+
+  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+
+    stage_cur_byte = stage_cur >> 3;
+
+    FLIP_BIT(out_buf, stage_cur);
+    FLIP_BIT(out_buf, stage_cur + 1);
+
+    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+
+    FLIP_BIT(out_buf, stage_cur);
+    FLIP_BIT(out_buf, stage_cur + 1);
 
-					/* Otherwise, if the checksum has changed, see if we have something
-					   worthwhile queued up, and collect that if the answer is yes. */
+  }
 
-					if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA)
-						maybe_add_auto(a_collect, a_len);
+  new_hit_cnt = queued_paths + unique_crashes;
 
-					a_len = 0;
-					prev_cksum = cksum;
+  stage_finds[STAGE_FLIP2] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_FLIP2] += stage_max;
 
-				}
+  /* Four walking bits. */
 
-				/* Continue collecting string, but only if the bit flip actually made
-				   any difference - we don't want no-op tokens. */
+  stage_name = "bitflip 4/1";
+  stage_short = "flip4";
+  stage_max = (len << 3) - 3;
 
-				if (cksum != queue_cur->exec_cksum) {
+  orig_hit_cnt = new_hit_cnt;
 
-					if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3];
-					++a_len;
+  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+
+    stage_cur_byte = stage_cur >> 3;
+
+    FLIP_BIT(out_buf, stage_cur);
+    FLIP_BIT(out_buf, stage_cur + 1);
+    FLIP_BIT(out_buf, stage_cur + 2);
+    FLIP_BIT(out_buf, stage_cur + 3);
 
-				}
+    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
 
-			}
+    FLIP_BIT(out_buf, stage_cur);
+    FLIP_BIT(out_buf, stage_cur + 1);
+    FLIP_BIT(out_buf, stage_cur + 2);
+    FLIP_BIT(out_buf, stage_cur + 3);
 
-		}
+  }
 
-		new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = queued_paths + unique_crashes;
 
-		stage_finds[STAGE_FLIP1] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_FLIP1] += stage_max;
+  stage_finds[STAGE_FLIP4] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_FLIP4] += stage_max;
 
-		/* Two walking bits. */
+  /* Effector map setup. These macros calculate:
 
-		stage_name = "bitflip 2/1";
-		stage_short = "flip2";
-		stage_max = (len << 3) - 1;
+     EFF_APOS      - position of a particular file offset in the map.
+     EFF_ALEN      - length of a map with a particular number of bytes.
+     EFF_SPAN_ALEN - map span for a sequence of bytes.
 
-		orig_hit_cnt = new_hit_cnt;
+   */
 
-		for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+#define EFF_APOS(_p) ((_p) >> EFF_MAP_SCALE2)
+#define EFF_REM(_x) ((_x) & ((1 << EFF_MAP_SCALE2) - 1))
+#define EFF_ALEN(_l) (EFF_APOS(_l) + !!EFF_REM(_l))
+#define EFF_SPAN_ALEN(_p, _l) (EFF_APOS((_p) + (_l)-1) - EFF_APOS(_p) + 1)
 
-			stage_cur_byte = stage_cur >> 3;
+  /* Initialize effector map for the next step (see comments below). Always
+         flag first and last byte as doing something. */
 
-			FLIP_BIT(out_buf, stage_cur);
-			FLIP_BIT(out_buf, stage_cur + 1);
+  eff_map = ck_alloc(EFF_ALEN(len));
+  eff_map[0] = 1;
 
-			if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+  if (EFF_APOS(len - 1) != 0) {
 
-			FLIP_BIT(out_buf, stage_cur);
-			FLIP_BIT(out_buf, stage_cur + 1);
+    eff_map[EFF_APOS(len - 1)] = 1;
+    ++eff_cnt;
 
-		}
+  }
 
-		new_hit_cnt = queued_paths + unique_crashes;
+  /* Walking byte. */
 
-		stage_finds[STAGE_FLIP2] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_FLIP2] += stage_max;
+  stage_name = "bitflip 8/8";
+  stage_short = "flip8";
+  stage_max = len;
 
+  orig_hit_cnt = new_hit_cnt;
 
+  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
 
-		/* Four walking bits. */
+    stage_cur_byte = stage_cur;
 
-		stage_name = "bitflip 4/1";
-		stage_short = "flip4";
-		stage_max = (len << 3) - 3;
+    out_buf[stage_cur] ^= 0xFF;
 
+    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
 
+    /* We also use this stage to pull off a simple trick: we identify
+       bytes that seem to have no effect on the current execution path
+       even when fully flipped - and we skip them during more expensive
+       deterministic stages, such as arithmetics or known ints. */
 
+    if (!eff_map[EFF_APOS(stage_cur)]) {
 
+      u32 cksum;
 
-		orig_hit_cnt = new_hit_cnt;
+      /* If in dumb mode or if the file is very short, just flag everything
+         without wasting time on checksums. */
 
-		for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+      if (!dumb_mode && len >= EFF_MIN_LEN)
+        cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
+      else
+        cksum = ~queue_cur->exec_cksum;
 
-			stage_cur_byte = stage_cur >> 3;
+      if (cksum != queue_cur->exec_cksum) {
 
-			FLIP_BIT(out_buf, stage_cur);
-			FLIP_BIT(out_buf, stage_cur + 1);
-			FLIP_BIT(out_buf, stage_cur + 2);
-			FLIP_BIT(out_buf, stage_cur + 3);
+        eff_map[EFF_APOS(stage_cur)] = 1;
+        ++eff_cnt;
 
-			if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+      }
 
-			FLIP_BIT(out_buf, stage_cur);
-			FLIP_BIT(out_buf, stage_cur + 1);
-			FLIP_BIT(out_buf, stage_cur + 2);
-			FLIP_BIT(out_buf, stage_cur + 3);
+    }
 
-		}
+    out_buf[stage_cur] ^= 0xFF;
 
-		new_hit_cnt = queued_paths + unique_crashes;
+  }
 
-		stage_finds[STAGE_FLIP4] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_FLIP4] += stage_max;
+  /* If the effector map is more than EFF_MAX_PERC dense, just flag the
+     whole thing as worth fuzzing, since we wouldn't be saving much time
+     anyway. */
 
+  if (eff_cnt != EFF_ALEN(len) &&
+      eff_cnt * 100 / EFF_ALEN(len) > EFF_MAX_PERC) {
 
+    memset(eff_map, 1, EFF_ALEN(len));
 
+    blocks_eff_select += EFF_ALEN(len);
 
-		/* Effector map setup. These macros calculate:
+  } else {
 
-		   EFF_APOS      - position of a particular file offset in the map.
-		   EFF_ALEN      - length of a map with a particular number of bytes.
-		   EFF_SPAN_ALEN - map span for a sequence of bytes.
+    blocks_eff_select += eff_cnt;
 
-		 */
+  }
 
-#define EFF_APOS(_p)          ((_p) >> EFF_MAP_SCALE2)
-#define EFF_REM(_x)           ((_x) & ((1 << EFF_MAP_SCALE2) - 1))
-#define EFF_ALEN(_l)          (EFF_APOS(_l) + !!EFF_REM(_l))
-#define EFF_SPAN_ALEN(_p, _l) (EFF_APOS((_p) + (_l) - 1) - EFF_APOS(_p) + 1)
+  blocks_eff_total += EFF_ALEN(len);
 
-		 /* Initialize effector map for the next step (see comments below). Always
-			flag first and last byte as doing something. */
+  new_hit_cnt = queued_paths + unique_crashes;
 
-		eff_map = ck_alloc(EFF_ALEN(len));
-		eff_map[0] = 1;
+  stage_finds[STAGE_FLIP8] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_FLIP8] += stage_max;
 
-		if (EFF_APOS(len - 1) != 0) {
-			eff_map[EFF_APOS(len - 1)] = 1;
-			++eff_cnt;
-		}
+  /* Two walking bytes. */
 
-		/* Walking byte. */
+  if (len < 2) goto skip_bitflip;
 
-		stage_name = "bitflip 8/8";
-		stage_short = "flip8";
-		stage_max = len;
+  stage_name = "bitflip 16/8";
+  stage_short = "flip16";
+  stage_cur = 0;
+  stage_max = len - 1;
 
+  orig_hit_cnt = new_hit_cnt;
 
+  for (i = 0; i < len - 1; ++i) {
 
-		orig_hit_cnt = new_hit_cnt;
+    /* Let's consult the effector map... */
 
-		for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+    if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
 
-			stage_cur_byte = stage_cur;
+      --stage_max;
+      continue;
 
-			out_buf[stage_cur] ^= 0xFF;
+    }
 
-			if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+    stage_cur_byte = i;
 
-			/* We also use this stage to pull off a simple trick: we identify
-			   bytes that seem to have no effect on the current execution path
-			   even when fully flipped - and we skip them during more expensive
-			   deterministic stages, such as arithmetics or known ints. */
+    *(u16*)(out_buf + i) ^= 0xFFFF;
 
-			if (!eff_map[EFF_APOS(stage_cur)]) {
+    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+    ++stage_cur;
 
-				u32 cksum;
+    *(u16*)(out_buf + i) ^= 0xFFFF;
 
-				/* If in dumb mode or if the file is very short, just flag everything
-				   without wasting time on checksums. */
+  }
 
-				if (!dumb_mode && len >= EFF_MIN_LEN)
-					cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
-				else
-					cksum = ~queue_cur->exec_cksum;
+  new_hit_cnt = queued_paths + unique_crashes;
 
-				if (cksum != queue_cur->exec_cksum) {
-					eff_map[EFF_APOS(stage_cur)] = 1;
-					++eff_cnt;
-				}
+  stage_finds[STAGE_FLIP16] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_FLIP16] += stage_max;
 
-			}
+  if (len < 4) goto skip_bitflip;
 
-			out_buf[stage_cur] ^= 0xFF;
+  /* Four walking bytes. */
 
-		}
+  stage_name = "bitflip 32/8";
+  stage_short = "flip32";
+  stage_cur = 0;
+  stage_max = len - 3;
 
-		/* If the effector map is more than EFF_MAX_PERC dense, just flag the
-		   whole thing as worth fuzzing, since we wouldn't be saving much time
-		   anyway. */
+  orig_hit_cnt = new_hit_cnt;
 
-		if (eff_cnt != EFF_ALEN(len) &&
-			eff_cnt * 100 / EFF_ALEN(len) > EFF_MAX_PERC) {
+  for (i = 0; i < len - 3; ++i) {
 
-			memset(eff_map, 1, EFF_ALEN(len));
+    /* Let's consult the effector map... */
+    if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
+        !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
 
-			blocks_eff_select += EFF_ALEN(len);
+      --stage_max;
+      continue;
 
-		}
-		else {
+    }
 
-			blocks_eff_select += eff_cnt;
+    stage_cur_byte = i;
 
-		}
+    *(u32*)(out_buf + i) ^= 0xFFFFFFFF;
 
-		blocks_eff_total += EFF_ALEN(len);
+    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+    ++stage_cur;
 
-		new_hit_cnt = queued_paths + unique_crashes;
+    *(u32*)(out_buf + i) ^= 0xFFFFFFFF;
 
-		stage_finds[STAGE_FLIP8] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_FLIP8] += stage_max;
+  }
 
+  new_hit_cnt = queued_paths + unique_crashes;
 
+  stage_finds[STAGE_FLIP32] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_FLIP32] += stage_max;
 
+skip_bitflip:
 
+  if (no_arith) goto skip_arith;
 
-		/* Two walking bytes. */
+  /**********************
+   * ARITHMETIC INC/DEC *
+   **********************/
 
-		if (len < 2) goto skip_bitflip;
+  /* 8-bit arithmetics. */
 
-		stage_name = "bitflip 16/8";
-		stage_short = "flip16";
-		stage_cur = 0;
-		stage_max = len - 1;
+  stage_name = "arith 8/8";
+  stage_short = "arith8";
+  stage_cur = 0;
+  stage_max = 2 * len * ARITH_MAX;
 
+  stage_val_type = STAGE_VAL_LE;
 
+  orig_hit_cnt = new_hit_cnt;
 
-		orig_hit_cnt = new_hit_cnt;
+  for (i = 0; i < len; ++i) {
 
-		for (i = 0; i < len - 1; ++i) {
+    u8 orig = out_buf[i];
 
-			/* Let's consult the effector map... */
+    /* Let's consult the effector map... */
 
-			if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
-				--stage_max;
-				continue;
-			}
+    if (!eff_map[EFF_APOS(i)]) {
 
-			stage_cur_byte = i;
+      stage_max -= 2 * ARITH_MAX;
+      continue;
 
-			*(u16*)(out_buf + i) ^= 0xFFFF;
+    }
 
-			if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-			++stage_cur;
+    stage_cur_byte = i;
 
-			*(u16*)(out_buf + i) ^= 0xFFFF;
+    for (j = 1; j <= ARITH_MAX; ++j) {
 
+      u8 r = orig ^ (orig + j);
 
-		}
+      /* Do arithmetic operations only if the result couldn't be a product
+         of a bitflip. */
 
-		new_hit_cnt = queued_paths + unique_crashes;
+      if (!could_be_bitflip(r)) {
 
-		stage_finds[STAGE_FLIP16] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_FLIP16] += stage_max;
+        stage_cur_val = j;
+        out_buf[i] = orig + j;
 
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
 
+      } else
 
+        --stage_max;
 
-		if (len < 4) goto skip_bitflip;
+      r = orig ^ (orig - j);
 
-		/* Four walking bytes. */
+      if (!could_be_bitflip(r)) {
 
-		stage_name = "bitflip 32/8";
-		stage_short = "flip32";
-		stage_cur = 0;
-		stage_max = len - 3;
+        stage_cur_val = -j;
+        out_buf[i] = orig - j;
 
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
 
+      } else
 
-		orig_hit_cnt = new_hit_cnt;
+        --stage_max;
 
-		for (i = 0; i < len - 3; ++i) {
+      out_buf[i] = orig;
 
-			/* Let's consult the effector map... */
-			if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
-				!eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
-				--stage_max;
-				continue;
-			}
+    }
 
-			stage_cur_byte = i;
+  }
 
-			*(u32*)(out_buf + i) ^= 0xFFFFFFFF;
+  new_hit_cnt = queued_paths + unique_crashes;
 
-			if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-			++stage_cur;
+  stage_finds[STAGE_ARITH8] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_ARITH8] += stage_max;
 
-			*(u32*)(out_buf + i) ^= 0xFFFFFFFF;
+  /* 16-bit arithmetics, both endians. */
 
-		}
+  if (len < 2) goto skip_arith;
 
-		new_hit_cnt = queued_paths + unique_crashes;
+  stage_name = "arith 16/8";
+  stage_short = "arith16";
+  stage_cur = 0;
+  stage_max = 4 * (len - 1) * ARITH_MAX;
 
-		stage_finds[STAGE_FLIP32] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_FLIP32] += stage_max;
+  orig_hit_cnt = new_hit_cnt;
 
+  for (i = 0; i < len - 1; ++i) {
 
+    u16 orig = *(u16*)(out_buf + i);
 
+    /* Let's consult the effector map... */
 
+    if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
 
+      stage_max -= 4 * ARITH_MAX;
+      continue;
 
-	skip_bitflip:
+    }
 
-		if (no_arith) goto skip_arith;
+    stage_cur_byte = i;
 
-		/**********************
-		 * ARITHMETIC INC/DEC *
-		 **********************/
+    for (j = 1; j <= ARITH_MAX; ++j) {
 
-		 /* 8-bit arithmetics. */
+      u16 r1 = orig ^ (orig + j), r2 = orig ^ (orig - j),
+          r3 = orig ^ SWAP16(SWAP16(orig) + j),
+          r4 = orig ^ SWAP16(SWAP16(orig) - j);
 
-		stage_name = "arith 8/8";
-		stage_short = "arith8";
-		stage_cur = 0;
-		stage_max = 2 * len * ARITH_MAX;
+      /* Try little endian addition and subtraction first. Do it only
+         if the operation would affect more than one byte (hence the
+         & 0xff overflow checks) and if it couldn't be a product of
+         a bitflip. */
 
+      stage_val_type = STAGE_VAL_LE;
 
+      if ((orig & 0xff) + j > 0xff && !could_be_bitflip(r1)) {
 
+        stage_cur_val = j;
+        *(u16*)(out_buf + i) = orig + j;
 
-		stage_val_type = STAGE_VAL_LE;
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
 
-		orig_hit_cnt = new_hit_cnt;
+      } else
 
-		for (i = 0; i < len; ++i) {
+        --stage_max;
 
-			u8 orig = out_buf[i];
+      if ((orig & 0xff) < j && !could_be_bitflip(r2)) {
 
-			/* Let's consult the effector map... */
+        stage_cur_val = -j;
+        *(u16*)(out_buf + i) = orig - j;
 
-			if (!eff_map[EFF_APOS(i)]) {
-				stage_max -= 2 * ARITH_MAX;
-				continue;
-			}
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
 
-			stage_cur_byte = i;
+      } else
 
-			for (j = 1; j <= ARITH_MAX; ++j) {
+        --stage_max;
 
-				u8 r = orig ^ (orig + j);
+      /* Big endian comes next. Same deal. */
 
-				/* Do arithmetic operations only if the result couldn't be a product
-				   of a bitflip. */
+      stage_val_type = STAGE_VAL_BE;
 
-				if (!could_be_bitflip(r)) {
+      if ((orig >> 8) + j > 0xff && !could_be_bitflip(r3)) {
 
-					stage_cur_val = j;
-					out_buf[i] = orig + j;
+        stage_cur_val = j;
+        *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) + j);
 
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
 
-				} else --stage_max;
+      } else
 
-				r = orig ^ (orig - j);
+        --stage_max;
 
-				if (!could_be_bitflip(r)) {
+      if ((orig >> 8) < j && !could_be_bitflip(r4)) {
 
-					stage_cur_val = -j;
-					out_buf[i] = orig - j;
+        stage_cur_val = -j;
+        *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) - j);
 
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
 
-				} else --stage_max;
+      } else
 
-				out_buf[i] = orig;
+        --stage_max;
 
-			}
+      *(u16*)(out_buf + i) = orig;
 
-		}
+    }
 
-		new_hit_cnt = queued_paths + unique_crashes;
+  }
 
-		stage_finds[STAGE_ARITH8] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_ARITH8] += stage_max;
+  new_hit_cnt = queued_paths + unique_crashes;
 
+  stage_finds[STAGE_ARITH16] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_ARITH16] += stage_max;
 
+  /* 32-bit arithmetics, both endians. */
 
+  if (len < 4) goto skip_arith;
 
+  stage_name = "arith 32/8";
+  stage_short = "arith32";
+  stage_cur = 0;
+  stage_max = 4 * (len - 3) * ARITH_MAX;
 
-		/* 16-bit arithmetics, both endians. */
+  orig_hit_cnt = new_hit_cnt;
 
-		if (len < 2) goto skip_arith;
+  for (i = 0; i < len - 3; ++i) {
 
-		stage_name = "arith 16/8";
-		stage_short = "arith16";
-		stage_cur = 0;
-		stage_max = 4 * (len - 1) * ARITH_MAX;
+    u32 orig = *(u32*)(out_buf + i);
 
+    /* Let's consult the effector map... */
 
+    if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
+        !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
 
+      stage_max -= 4 * ARITH_MAX;
+      continue;
 
-		orig_hit_cnt = new_hit_cnt;
+    }
 
-		for (i = 0; i < len - 1; ++i) {
+    stage_cur_byte = i;
 
-			u16 orig = *(u16*)(out_buf + i);
+    for (j = 1; j <= ARITH_MAX; ++j) {
 
-			/* Let's consult the effector map... */
+      u32 r1 = orig ^ (orig + j), r2 = orig ^ (orig - j),
+          r3 = orig ^ SWAP32(SWAP32(orig) + j),
+          r4 = orig ^ SWAP32(SWAP32(orig) - j);
 
-			if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
-				stage_max -= 4 * ARITH_MAX;
-				continue;
-			}
+      /* Little endian first. Same deal as with 16-bit: we only want to
+         try if the operation would have effect on more than two bytes. */
 
-			stage_cur_byte = i;
+      stage_val_type = STAGE_VAL_LE;
 
-			for (j = 1; j <= ARITH_MAX; ++j) {
+      if ((orig & 0xffff) + j > 0xffff && !could_be_bitflip(r1)) {
 
-				u16 r1 = orig ^ (orig + j),
-					r2 = orig ^ (orig - j),
-					r3 = orig ^ SWAP16(SWAP16(orig) + j),
-					r4 = orig ^ SWAP16(SWAP16(orig) - j);
+        stage_cur_val = j;
+        *(u32*)(out_buf + i) = orig + j;
 
-				/* Try little endian addition and subtraction first. Do it only
-				   if the operation would affect more than one byte (hence the
-				   & 0xff overflow checks) and if it couldn't be a product of
-				   a bitflip. */
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
 
-				stage_val_type = STAGE_VAL_LE;
+      } else
 
-				if ((orig & 0xff) + j > 0xff && !could_be_bitflip(r1)) {
+        --stage_max;
 
-					stage_cur_val = j;
-					*(u16*)(out_buf + i) = orig + j;
+      if ((orig & 0xffff) < j && !could_be_bitflip(r2)) {
 
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
+        stage_cur_val = -j;
+        *(u32*)(out_buf + i) = orig - j;
 
-				} else --stage_max;
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        stage_cur++;
 
-				if ((orig & 0xff) < j && !could_be_bitflip(r2)) {
+      } else
 
-					stage_cur_val = -j;
-					*(u16*)(out_buf + i) = orig - j;
+        --stage_max;
 
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
+      /* Big endian next. */
 
-				} else --stage_max;
+      stage_val_type = STAGE_VAL_BE;
 
-				/* Big endian comes next. Same deal. */
+      if ((SWAP32(orig) & 0xffff) + j > 0xffff && !could_be_bitflip(r3)) {
 
-				stage_val_type = STAGE_VAL_BE;
+        stage_cur_val = j;
+        *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) + j);
 
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
 
-				if ((orig >> 8) + j > 0xff && !could_be_bitflip(r3)) {
+      } else
 
-					stage_cur_val = j;
-					*(u16*)(out_buf + i) = SWAP16(SWAP16(orig) + j);
+        --stage_max;
 
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
+      if ((SWAP32(orig) & 0xffff) < j && !could_be_bitflip(r4)) {
 
-				} else --stage_max;
+        stage_cur_val = -j;
+        *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) - j);
 
-				if ((orig >> 8) < j && !could_be_bitflip(r4)) {
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
 
-					stage_cur_val = -j;
-					*(u16*)(out_buf + i) = SWAP16(SWAP16(orig) - j);
+      } else
 
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
+        --stage_max;
 
-				} else --stage_max;
+      *(u32*)(out_buf + i) = orig;
 
-				*(u16*)(out_buf + i) = orig;
+    }
 
-			}
+  }
 
-		}
+  new_hit_cnt = queued_paths + unique_crashes;
 
-		new_hit_cnt = queued_paths + unique_crashes;
+  stage_finds[STAGE_ARITH32] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_ARITH32] += stage_max;
 
-		stage_finds[STAGE_ARITH16] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_ARITH16] += stage_max;
+skip_arith:
 
+  /**********************
+   * INTERESTING VALUES *
+   **********************/
 
+  stage_name = "interest 8/8";
+  stage_short = "int8";
+  stage_cur = 0;
+  stage_max = len * sizeof(interesting_8);
 
+  stage_val_type = STAGE_VAL_LE;
 
-		/* 32-bit arithmetics, both endians. */
+  orig_hit_cnt = new_hit_cnt;
 
-		if (len < 4) goto skip_arith;
+  /* Setting 8-bit integers. */
 
-		stage_name = "arith 32/8";
-		stage_short = "arith32";
-		stage_cur = 0;
-		stage_max = 4 * (len - 3) * ARITH_MAX;
+  for (i = 0; i < len; ++i) {
 
+    u8 orig = out_buf[i];
 
+    /* Let's consult the effector map... */
 
-		orig_hit_cnt = new_hit_cnt;
+    if (!eff_map[EFF_APOS(i)]) {
 
-		for (i = 0; i < len - 3; ++i) {
+      stage_max -= sizeof(interesting_8);
+      continue;
 
-			u32 orig = *(u32*)(out_buf + i);
+    }
 
-			/* Let's consult the effector map... */
+    stage_cur_byte = i;
 
-			if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
-				!eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
-				stage_max -= 4 * ARITH_MAX;
-				continue;
-			}
+    for (j = 0; j < sizeof(interesting_8); ++j) {
 
-			stage_cur_byte = i;
+      /* Skip if the value could be a product of bitflips or arithmetics. */
 
-			for (j = 1; j <= ARITH_MAX; ++j) {
+      if (could_be_bitflip(orig ^ (u8)interesting_8[j]) ||
+          could_be_arith(orig, (u8)interesting_8[j], 1)) {
 
-				u32 r1 = orig ^ (orig + j),
-					r2 = orig ^ (orig - j),
-					r3 = orig ^ SWAP32(SWAP32(orig) + j),
-					r4 = orig ^ SWAP32(SWAP32(orig) - j);
+        --stage_max;
+        continue;
 
-				/* Little endian first. Same deal as with 16-bit: we only want to
-				   try if the operation would have effect on more than two bytes. */
+      }
 
-				stage_val_type = STAGE_VAL_LE;
+      stage_cur_val = interesting_8[j];
+      out_buf[i] = interesting_8[j];
 
-				if ((orig & 0xffff) + j > 0xffff && !could_be_bitflip(r1)) {
+      if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
 
-					stage_cur_val = j;
-					*(u32*)(out_buf + i) = orig + j;
+      out_buf[i] = orig;
+      ++stage_cur;
 
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
+    }
 
-				} else --stage_max;
+  }
 
-				if ((orig & 0xffff) < j && !could_be_bitflip(r2)) {
+  new_hit_cnt = queued_paths + unique_crashes;
 
-					stage_cur_val = -j;
-					*(u32*)(out_buf + i) = orig - j;
+  stage_finds[STAGE_INTEREST8] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_INTEREST8] += stage_max;
 
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					stage_cur++;
+  /* Setting 16-bit integers, both endians. */
 
-				} else --stage_max;
+  if (no_arith || len < 2) goto skip_interest;
 
-				/* Big endian next. */
+  stage_name = "interest 16/8";
+  stage_short = "int16";
+  stage_cur = 0;
+  stage_max = 2 * (len - 1) * (sizeof(interesting_16) >> 1);
 
-				stage_val_type = STAGE_VAL_BE;
+  orig_hit_cnt = new_hit_cnt;
 
-				if ((SWAP32(orig) & 0xffff) + j > 0xffff && !could_be_bitflip(r3)) {
+  for (i = 0; i < len - 1; ++i) {
 
-					stage_cur_val = j;
-					*(u32*)(out_buf + i) = SWAP32(SWAP32(orig) + j);
+    u16 orig = *(u16*)(out_buf + i);
 
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
+    /* Let's consult the effector map... */
 
-				} else --stage_max;
+    if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
 
-				if ((SWAP32(orig) & 0xffff) < j && !could_be_bitflip(r4)) {
+      stage_max -= sizeof(interesting_16);
+      continue;
 
-					stage_cur_val = -j;
-					*(u32*)(out_buf + i) = SWAP32(SWAP32(orig) - j);
+    }
 
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
+    stage_cur_byte = i;
 
-				} else --stage_max;
+    for (j = 0; j < sizeof(interesting_16) / 2; ++j) {
 
-				*(u32*)(out_buf + i) = orig;
+      stage_cur_val = interesting_16[j];
 
-			}
+      /* Skip if this could be a product of a bitflip, arithmetics,
+         or single-byte interesting value insertion. */
 
-		}
+      if (!could_be_bitflip(orig ^ (u16)interesting_16[j]) &&
+          !could_be_arith(orig, (u16)interesting_16[j], 2) &&
+          !could_be_interest(orig, (u16)interesting_16[j], 2, 0)) {
 
-		new_hit_cnt = queued_paths + unique_crashes;
+        stage_val_type = STAGE_VAL_LE;
 
-		stage_finds[STAGE_ARITH32] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_ARITH32] += stage_max;
+        *(u16*)(out_buf + i) = interesting_16[j];
 
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
 
+      } else
 
+        --stage_max;
 
-	skip_arith:
+      if ((u16)interesting_16[j] != SWAP16(interesting_16[j]) &&
+          !could_be_bitflip(orig ^ SWAP16(interesting_16[j])) &&
+          !could_be_arith(orig, SWAP16(interesting_16[j]), 2) &&
+          !could_be_interest(orig, SWAP16(interesting_16[j]), 2, 1)) {
 
-		/**********************
-		 * INTERESTING VALUES *
-		 **********************/
+        stage_val_type = STAGE_VAL_BE;
 
-		stage_name = "interest 8/8";
-		stage_short = "int8";
-		stage_cur = 0;
-		stage_max = len * sizeof(interesting_8);
+        *(u16*)(out_buf + i) = SWAP16(interesting_16[j]);
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
 
+      } else
 
+        --stage_max;
 
-		stage_val_type = STAGE_VAL_LE;
+    }
 
-		orig_hit_cnt = new_hit_cnt;
+    *(u16*)(out_buf + i) = orig;
 
-		/* Setting 8-bit integers. */
+  }
 
-		for (i = 0; i < len; ++i) {
+  new_hit_cnt = queued_paths + unique_crashes;
 
-			u8 orig = out_buf[i];
+  stage_finds[STAGE_INTEREST16] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_INTEREST16] += stage_max;
 
-			/* Let's consult the effector map... */
+  if (len < 4) goto skip_interest;
 
-			if (!eff_map[EFF_APOS(i)]) {
-				stage_max -= sizeof(interesting_8);
-				continue;
-			}
+  /* Setting 32-bit integers, both endians. */
 
-			stage_cur_byte = i;
+  stage_name = "interest 32/8";
+  stage_short = "int32";
+  stage_cur = 0;
+  stage_max = 2 * (len - 3) * (sizeof(interesting_32) >> 2);
 
-			for (j = 0; j < sizeof(interesting_8); ++j) {
+  orig_hit_cnt = new_hit_cnt;
 
-				/* Skip if the value could be a product of bitflips or arithmetics. */
+  for (i = 0; i < len - 3; ++i) {
 
-				if (could_be_bitflip(orig ^ (u8)interesting_8[j]) ||
-					could_be_arith(orig, (u8)interesting_8[j], 1)) {
-					--stage_max;
-					continue;
-				}
+    u32 orig = *(u32*)(out_buf + i);
 
-				stage_cur_val = interesting_8[j];
-				out_buf[i] = interesting_8[j];
+    /* Let's consult the effector map... */
 
-				if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+    if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
+        !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
 
-				out_buf[i] = orig;
-				++stage_cur;
+      stage_max -= sizeof(interesting_32) >> 1;
+      continue;
 
-			}
+    }
 
-		}
+    stage_cur_byte = i;
 
-		new_hit_cnt = queued_paths + unique_crashes;
+    for (j = 0; j < sizeof(interesting_32) / 4; ++j) {
 
-		stage_finds[STAGE_INTEREST8] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_INTEREST8] += stage_max;
+      stage_cur_val = interesting_32[j];
 
+      /* Skip if this could be a product of a bitflip, arithmetics,
+         or word interesting value insertion. */
 
+      if (!could_be_bitflip(orig ^ (u32)interesting_32[j]) &&
+          !could_be_arith(orig, interesting_32[j], 4) &&
+          !could_be_interest(orig, interesting_32[j], 4, 0)) {
 
+        stage_val_type = STAGE_VAL_LE;
 
-		/* Setting 16-bit integers, both endians. */
+        *(u32*)(out_buf + i) = interesting_32[j];
 
-		if (no_arith || len < 2) goto skip_interest;
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
 
-		stage_name = "interest 16/8";
-		stage_short = "int16";
-		stage_cur = 0;
-		stage_max = 2 * (len - 1) * (sizeof(interesting_16) >> 1);
+      } else
 
+        --stage_max;
 
+      if ((u32)interesting_32[j] != SWAP32(interesting_32[j]) &&
+          !could_be_bitflip(orig ^ SWAP32(interesting_32[j])) &&
+          !could_be_arith(orig, SWAP32(interesting_32[j]), 4) &&
+          !could_be_interest(orig, SWAP32(interesting_32[j]), 4, 1)) {
 
-		orig_hit_cnt = new_hit_cnt;
+        stage_val_type = STAGE_VAL_BE;
 
-		for (i = 0; i < len - 1; ++i) {
+        *(u32*)(out_buf + i) = SWAP32(interesting_32[j]);
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
 
-			u16 orig = *(u16*)(out_buf + i);
+      } else
 
-			/* Let's consult the effector map... */
+        --stage_max;
 
-			if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
-				stage_max -= sizeof(interesting_16);
-				continue;
-			}
+    }
 
-			stage_cur_byte = i;
+    *(u32*)(out_buf + i) = orig;
 
-			for (j = 0; j < sizeof(interesting_16) / 2; ++j) {
+  }
 
-				stage_cur_val = interesting_16[j];
+  new_hit_cnt = queued_paths + unique_crashes;
 
-				/* Skip if this could be a product of a bitflip, arithmetics,
-				   or single-byte interesting value insertion. */
+  stage_finds[STAGE_INTEREST32] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_INTEREST32] += stage_max;
 
-				if (!could_be_bitflip(orig ^ (u16)interesting_16[j]) &&
-					!could_be_arith(orig, (u16)interesting_16[j], 2) &&
-					!could_be_interest(orig, (u16)interesting_16[j], 2, 0)) {
+skip_interest:
 
-					stage_val_type = STAGE_VAL_LE;
+  /********************
+   * DICTIONARY STUFF *
+   ********************/
 
-					*(u16*)(out_buf + i) = interesting_16[j];
+  if (!extras_cnt) goto skip_user_extras;
 
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
+  /* Overwrite with user-supplied extras. */
 
-				} else --stage_max;
+  stage_name = "user extras (over)";
+  stage_short = "ext_UO";
+  stage_cur = 0;
+  stage_max = extras_cnt * len;
 
-				if ((u16)interesting_16[j] != SWAP16(interesting_16[j]) &&
-					!could_be_bitflip(orig ^ SWAP16(interesting_16[j])) &&
-					!could_be_arith(orig, SWAP16(interesting_16[j]), 2) &&
-					!could_be_interest(orig, SWAP16(interesting_16[j]), 2, 1)) {
+  stage_val_type = STAGE_VAL_NONE;
 
-					stage_val_type = STAGE_VAL_BE;
+  orig_hit_cnt = new_hit_cnt;
 
-					*(u16*)(out_buf + i) = SWAP16(interesting_16[j]);
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
+  for (i = 0; i < len; ++i) {
 
-				} else --stage_max;
+    u32 last_len = 0;
 
-			}
+    stage_cur_byte = i;
 
-			*(u16*)(out_buf + i) = orig;
+    /* Extras are sorted by size, from smallest to largest. This means
+       that we don't have to worry about restoring the buffer in
+       between writes at a particular offset determined by the outer
+       loop. */
 
-		}
+    for (j = 0; j < extras_cnt; ++j) {
 
-		new_hit_cnt = queued_paths + unique_crashes;
+      /* Skip extras probabilistically if extras_cnt > MAX_DET_EXTRAS. Also
+         skip them if there's no room to insert the payload, if the token
+         is redundant, or if its entire span has no bytes set in the effector
+         map. */
 
-		stage_finds[STAGE_INTEREST16] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_INTEREST16] += stage_max;
+      if ((extras_cnt > MAX_DET_EXTRAS && UR(extras_cnt) >= MAX_DET_EXTRAS) ||
+          extras[j].len > len - i ||
+          !memcmp(extras[j].data, out_buf + i, extras[j].len) ||
+          !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, extras[j].len))) {
 
+        --stage_max;
+        continue;
 
+      }
 
+      last_len = extras[j].len;
+      memcpy(out_buf + i, extras[j].data, last_len);
 
+      if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
 
-		if (len < 4) goto skip_interest;
+      ++stage_cur;
 
-		/* Setting 32-bit integers, both endians. */
+    }
 
-		stage_name = "interest 32/8";
-		stage_short = "int32";
-		stage_cur = 0;
-		stage_max = 2 * (len - 3) * (sizeof(interesting_32) >> 2);
+    /* Restore all the clobbered memory. */
+    memcpy(out_buf + i, in_buf + i, last_len);
 
+  }
 
-		orig_hit_cnt = new_hit_cnt;
+  new_hit_cnt = queued_paths + unique_crashes;
 
-		for (i = 0; i < len - 3; ++i) {
+  stage_finds[STAGE_EXTRAS_UO] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_EXTRAS_UO] += stage_max;
 
-			u32 orig = *(u32*)(out_buf + i);
+  /* Insertion of user-supplied extras. */
 
-			/* Let's consult the effector map... */
+  stage_name = "user extras (insert)";
+  stage_short = "ext_UI";
+  stage_cur = 0;
+  stage_max = extras_cnt * len;
 
-			if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
-				!eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
-				stage_max -= sizeof(interesting_32) >> 1;
-				continue;
-			}
+  orig_hit_cnt = new_hit_cnt;
 
-			stage_cur_byte = i;
+  ex_tmp = ck_alloc(len + MAX_DICT_FILE);
 
-			for (j = 0; j < sizeof(interesting_32) / 4; ++j) {
+  for (i = 0; i <= len; ++i) {
 
-				stage_cur_val = interesting_32[j];
+    stage_cur_byte = i;
 
-				/* Skip if this could be a product of a bitflip, arithmetics,
-				   or word interesting value insertion. */
+    for (j = 0; j < extras_cnt; ++j) {
 
-				if (!could_be_bitflip(orig ^ (u32)interesting_32[j]) &&
-					!could_be_arith(orig, interesting_32[j], 4) &&
-					!could_be_interest(orig, interesting_32[j], 4, 0)) {
+      if (len + extras[j].len > MAX_FILE) {
 
-					stage_val_type = STAGE_VAL_LE;
+        --stage_max;
+        continue;
 
-					*(u32*)(out_buf + i) = interesting_32[j];
+      }
 
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
+      /* Insert token */
+      memcpy(ex_tmp + i, extras[j].data, extras[j].len);
 
-				} else --stage_max;
+      /* Copy tail */
+      memcpy(ex_tmp + i + extras[j].len, out_buf + i, len - i);
 
-				if ((u32)interesting_32[j] != SWAP32(interesting_32[j]) &&
-					!could_be_bitflip(orig ^ SWAP32(interesting_32[j])) &&
-					!could_be_arith(orig, SWAP32(interesting_32[j]), 4) &&
-					!could_be_interest(orig, SWAP32(interesting_32[j]), 4, 1)) {
+      if (common_fuzz_stuff(argv, ex_tmp, len + extras[j].len)) {
 
-					stage_val_type = STAGE_VAL_BE;
+        ck_free(ex_tmp);
+        goto abandon_entry;
 
-					*(u32*)(out_buf + i) = SWAP32(interesting_32[j]);
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
+      }
 
-				} else --stage_max;
+      ++stage_cur;
 
-			}
+    }
 
-			*(u32*)(out_buf + i) = orig;
+    /* Copy head */
+    ex_tmp[i] = out_buf[i];
 
-		}
+  }
 
-		new_hit_cnt = queued_paths + unique_crashes;
+  ck_free(ex_tmp);
 
-		stage_finds[STAGE_INTEREST32] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_INTEREST32] += stage_max;
+  new_hit_cnt = queued_paths + unique_crashes;
 
+  stage_finds[STAGE_EXTRAS_UI] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_EXTRAS_UI] += stage_max;
 
+skip_user_extras:
 
+  if (!a_extras_cnt) goto skip_extras;
 
+  stage_name = "auto extras (over)";
+  stage_short = "ext_AO";
+  stage_cur = 0;
+  stage_max = MIN(a_extras_cnt, USE_AUTO_EXTRAS) * len;
 
-	skip_interest:
+  stage_val_type = STAGE_VAL_NONE;
 
-		/********************
-		 * DICTIONARY STUFF *
-		 ********************/
+  orig_hit_cnt = new_hit_cnt;
 
-		if (!extras_cnt) goto skip_user_extras;
+  for (i = 0; i < len; ++i) {
 
-		/* Overwrite with user-supplied extras. */
+    u32 last_len = 0;
 
-		stage_name = "user extras (over)";
-		stage_short = "ext_UO";
-		stage_cur = 0;
-		stage_max = extras_cnt * len;
+    stage_cur_byte = i;
 
+    for (j = 0; j < MIN(a_extras_cnt, USE_AUTO_EXTRAS); ++j) {
 
+      /* See the comment in the earlier code; extras are sorted by size. */
 
+      if (a_extras[j].len > len - i ||
+          !memcmp(a_extras[j].data, out_buf + i, a_extras[j].len) ||
+          !memchr(eff_map + EFF_APOS(i), 1,
+                  EFF_SPAN_ALEN(i, a_extras[j].len))) {
 
-		stage_val_type = STAGE_VAL_NONE;
+        --stage_max;
+        continue;
 
-		orig_hit_cnt = new_hit_cnt;
+      }
 
-		for (i = 0; i < len; ++i) {
+      last_len = a_extras[j].len;
+      memcpy(out_buf + i, a_extras[j].data, last_len);
 
-			u32 last_len = 0;
+      if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
 
-			stage_cur_byte = i;
+      ++stage_cur;
 
-			/* Extras are sorted by size, from smallest to largest. This means
-			   that we don't have to worry about restoring the buffer in
-			   between writes at a particular offset determined by the outer
-			   loop. */
+    }
 
-			for (j = 0; j < extras_cnt; ++j) {
+    /* Restore all the clobbered memory. */
+    memcpy(out_buf + i, in_buf + i, last_len);
 
-				/* Skip extras probabilistically if extras_cnt > MAX_DET_EXTRAS. Also
-				   skip them if there's no room to insert the payload, if the token
-				   is redundant, or if its entire span has no bytes set in the effector
-				   map. */
+  }
 
-				if ((extras_cnt > MAX_DET_EXTRAS && UR(extras_cnt) >= MAX_DET_EXTRAS) ||
-					extras[j].len > len - i ||
-					!memcmp(extras[j].data, out_buf + i, extras[j].len) ||
-					!memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, extras[j].len))) {
+  new_hit_cnt = queued_paths + unique_crashes;
 
-					--stage_max;
-					continue;
+  stage_finds[STAGE_EXTRAS_AO] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_EXTRAS_AO] += stage_max;
 
-				}
+skip_extras:
 
-				last_len = extras[j].len;
-				memcpy(out_buf + i, extras[j].data, last_len);
+  /* If we made this to here without jumping to havoc_stage or abandon_entry,
+     we're properly done with deterministic steps and can mark it as such
+     in the .state/ directory. */
 
-				if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+  if (!queue_cur->passed_det) mark_as_det_done(queue_cur);
 
-				++stage_cur;
+  /****************
+   * RANDOM HAVOC *
+   ****************/
 
-			}
+havoc_stage:
+pacemaker_fuzzing:
 
-			/* Restore all the clobbered memory. */
-			memcpy(out_buf + i, in_buf + i, last_len);
+  stage_cur_byte = -1;
 
-		}
+  /* The havoc stage mutation code is also invoked when splicing files; if the
+     splice_cycle variable is set, generate different descriptions and such. */
 
-		new_hit_cnt = queued_paths + unique_crashes;
+  if (!splice_cycle) {
 
-		stage_finds[STAGE_EXTRAS_UO] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_EXTRAS_UO] += stage_max;
+    stage_name = "MOpt-havoc";
+    stage_short = "MOpt_havoc";
+    stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * perf_score /
+                havoc_div / 100;
 
-		/* Insertion of user-supplied extras. */
+  } else {
 
-		stage_name = "user extras (insert)";
-		stage_short = "ext_UI";
-		stage_cur = 0;
-		stage_max = extras_cnt * len;
+    static u8 tmp[32];
 
+    perf_score = orig_perf;
 
+    sprintf(tmp, "MOpt-splice %u", splice_cycle);
+    stage_name = tmp;
+    stage_short = "MOpt_splice";
+    stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100;
 
+  }
 
-		orig_hit_cnt = new_hit_cnt;
+  s32 temp_len_puppet;
+  cur_ms_lv = get_cur_time();
 
-		ex_tmp = ck_alloc(len + MAX_DICT_FILE);
+  {
 
-		for (i = 0; i <= len; ++i) {
+    if (key_puppet == 1) {
 
-			stage_cur_byte = i;
+      if (unlikely(orig_hit_cnt_puppet == 0)) {
 
-			for (j = 0; j < extras_cnt; ++j) {
+        orig_hit_cnt_puppet = queued_paths + unique_crashes;
+        last_limit_time_start = get_cur_time();
+        SPLICE_CYCLES_puppet =
+            (UR(SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) +
+             SPLICE_CYCLES_puppet_low);
 
-				if (len + extras[j].len > MAX_FILE) {
-					--stage_max;
-					continue;
-				}
+      }
 
-				/* Insert token */
-				memcpy(ex_tmp + i, extras[j].data, extras[j].len);
+    }
 
-				/* Copy tail */
-				memcpy(ex_tmp + i + extras[j].len, out_buf + i, len - i);
+    {
 
-				if (common_fuzz_stuff(argv, ex_tmp, len + extras[j].len)) {
-					ck_free(ex_tmp);
-					goto abandon_entry;
-				}
+#ifndef IGNORE_FINDS
+    havoc_stage_puppet:
+#endif
 
-				++stage_cur;
+      stage_cur_byte = -1;
 
-			}
+      /* The havoc stage mutation code is also invoked when splicing files; if
+         the splice_cycle variable is set, generate different descriptions and
+         such. */
 
-			/* Copy head */
-			ex_tmp[i] = out_buf[i];
+      if (!splice_cycle) {
 
-		}
+        stage_name = "MOpt avoc";
+        stage_short = "MOpt_havoc";
+        stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) *
+                    perf_score / havoc_div / 100;
 
-		ck_free(ex_tmp);
+      } else {
 
-		new_hit_cnt = queued_paths + unique_crashes;
+        static u8 tmp[32];
+        perf_score = orig_perf;
+        sprintf(tmp, "MOpt splice %u", splice_cycle);
+        stage_name = tmp;
+        stage_short = "MOpt_splice";
+        stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100;
 
-		stage_finds[STAGE_EXTRAS_UI] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_EXTRAS_UI] += stage_max;
+      }
 
-	skip_user_extras:
+      if (stage_max < HAVOC_MIN) stage_max = HAVOC_MIN;
 
-		if (!a_extras_cnt) goto skip_extras;
+      temp_len = len;
 
-		stage_name = "auto extras (over)";
-		stage_short = "ext_AO";
-		stage_cur = 0;
-		stage_max = MIN(a_extras_cnt, USE_AUTO_EXTRAS) * len;
+      orig_hit_cnt = queued_paths + unique_crashes;
 
+      havoc_queued = queued_paths;
 
-		stage_val_type = STAGE_VAL_NONE;
+      for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
 
-		orig_hit_cnt = new_hit_cnt;
+        u32 use_stacking = 1 << (1 + UR(HAVOC_STACK_POW2));
 
-		for (i = 0; i < len; ++i) {
+        stage_cur_val = use_stacking;
 
-			u32 last_len = 0;
+        for (i = 0; i < operator_num; ++i) {
 
-			stage_cur_byte = i;
+          stage_cycles_puppet_v3[swarm_now][i] =
+              stage_cycles_puppet_v2[swarm_now][i];
 
-			for (j = 0; j < MIN(a_extras_cnt, USE_AUTO_EXTRAS); ++j) {
+        }
 
-				/* See the comment in the earlier code; extras are sorted by size. */
+        for (i = 0; i < use_stacking; ++i) {
 
-				if (a_extras[j].len > len - i ||
-					!memcmp(a_extras[j].data, out_buf + i, a_extras[j].len) ||
-					!memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, a_extras[j].len))) {
+          switch (select_algorithm()) {
 
-					--stage_max;
-					continue;
+            case 0:
+              /* Flip a single bit somewhere. Spooky! */
+              FLIP_BIT(out_buf, UR(temp_len << 3));
+              stage_cycles_puppet_v2[swarm_now][STAGE_FLIP1] += 1;
+              break;
 
-				}
+            case 1:
+              if (temp_len < 2) break;
+              temp_len_puppet = UR(temp_len << 3);
+              FLIP_BIT(out_buf, temp_len_puppet);
+              FLIP_BIT(out_buf, temp_len_puppet + 1);
+              stage_cycles_puppet_v2[swarm_now][STAGE_FLIP2] += 1;
+              break;
 
-				last_len = a_extras[j].len;
-				memcpy(out_buf + i, a_extras[j].data, last_len);
+            case 2:
+              if (temp_len < 2) break;
+              temp_len_puppet = UR(temp_len << 3);
+              FLIP_BIT(out_buf, temp_len_puppet);
+              FLIP_BIT(out_buf, temp_len_puppet + 1);
+              FLIP_BIT(out_buf, temp_len_puppet + 2);
+              FLIP_BIT(out_buf, temp_len_puppet + 3);
+              stage_cycles_puppet_v2[swarm_now][STAGE_FLIP4] += 1;
+              break;
 
-				if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+            case 3:
+              if (temp_len < 4) break;
+              out_buf[UR(temp_len)] ^= 0xFF;
+              stage_cycles_puppet_v2[swarm_now][STAGE_FLIP8] += 1;
+              break;
 
-				++stage_cur;
+            case 4:
+              if (temp_len < 8) break;
+              *(u16*)(out_buf + UR(temp_len - 1)) ^= 0xFFFF;
+              stage_cycles_puppet_v2[swarm_now][STAGE_FLIP16] += 1;
+              break;
 
-			}
+            case 5:
+              if (temp_len < 8) break;
+              *(u32*)(out_buf + UR(temp_len - 3)) ^= 0xFFFFFFFF;
+              stage_cycles_puppet_v2[swarm_now][STAGE_FLIP32] += 1;
+              break;
 
-			/* Restore all the clobbered memory. */
-			memcpy(out_buf + i, in_buf + i, last_len);
+            case 6:
+              out_buf[UR(temp_len)] -= 1 + UR(ARITH_MAX);
+              out_buf[UR(temp_len)] += 1 + UR(ARITH_MAX);
+              stage_cycles_puppet_v2[swarm_now][STAGE_ARITH8] += 1;
+              break;
 
-		}
+            case 7:
+              /* Randomly subtract from word, random endian. */
+              if (temp_len < 8) break;
+              if (UR(2)) {
 
-		new_hit_cnt = queued_paths + unique_crashes;
+                u32 pos = UR(temp_len - 1);
+                *(u16*)(out_buf + pos) -= 1 + UR(ARITH_MAX);
 
-		stage_finds[STAGE_EXTRAS_AO] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_EXTRAS_AO] += stage_max;
+              } else {
 
-	skip_extras:
+                u32 pos = UR(temp_len - 1);
+                u16 num = 1 + UR(ARITH_MAX);
+                *(u16*)(out_buf + pos) =
+                    SWAP16(SWAP16(*(u16*)(out_buf + pos)) - num);
 
-		/* If we made this to here without jumping to havoc_stage or abandon_entry,
-		   we're properly done with deterministic steps and can mark it as such
-		   in the .state/ directory. */
+              }
 
-		if (!queue_cur->passed_det) mark_as_det_done(queue_cur);
+              /* Randomly add to word, random endian. */
+              if (UR(2)) {
 
-		/****************
-		 * RANDOM HAVOC *
-		 ****************/
+                u32 pos = UR(temp_len - 1);
+                *(u16*)(out_buf + pos) += 1 + UR(ARITH_MAX);
 
-	havoc_stage:
-	pacemaker_fuzzing:
+              } else {
 
+                u32 pos = UR(temp_len - 1);
+                u16 num = 1 + UR(ARITH_MAX);
+                *(u16*)(out_buf + pos) =
+                    SWAP16(SWAP16(*(u16*)(out_buf + pos)) + num);
 
-		stage_cur_byte = -1;
+              }
 
-		/* The havoc stage mutation code is also invoked when splicing files; if the
-		   splice_cycle variable is set, generate different descriptions and such. */
+              stage_cycles_puppet_v2[swarm_now][STAGE_ARITH16] += 1;
+              break;
 
-		if (!splice_cycle) {
+            case 8:
+              /* Randomly subtract from dword, random endian. */
+              if (temp_len < 8) break;
+              if (UR(2)) {
 
-			stage_name = "MOpt-havoc";
-			stage_short = "MOpt_havoc";
-			stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) *
-				perf_score / havoc_div / 100;
+                u32 pos = UR(temp_len - 3);
+                *(u32*)(out_buf + pos) -= 1 + UR(ARITH_MAX);
 
-		}
-		else {
+              } else {
 
-			static u8 tmp[32];
+                u32 pos = UR(temp_len - 3);
+                u32 num = 1 + UR(ARITH_MAX);
+                *(u32*)(out_buf + pos) =
+                    SWAP32(SWAP32(*(u32*)(out_buf + pos)) - num);
 
-			perf_score = orig_perf;
+              }
 
-			sprintf(tmp, "MOpt-splice %u", splice_cycle);
-			stage_name = tmp;
-			stage_short = "MOpt_splice";
-			stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100;
+              /* Randomly add to dword, random endian. */
+              // if (temp_len < 4) break;
+              if (UR(2)) {
 
-		}
+                u32 pos = UR(temp_len - 3);
+                *(u32*)(out_buf + pos) += 1 + UR(ARITH_MAX);
 
-		s32 temp_len_puppet;
-		cur_ms_lv = get_cur_time();
+              } else {
 
-		{
+                u32 pos = UR(temp_len - 3);
+                u32 num = 1 + UR(ARITH_MAX);
+                *(u32*)(out_buf + pos) =
+                    SWAP32(SWAP32(*(u32*)(out_buf + pos)) + num);
 
+              }
 
-			if (key_puppet == 1)
-			{
-				if (unlikely(orig_hit_cnt_puppet == 0))
-				{
-					orig_hit_cnt_puppet = queued_paths + unique_crashes;
-					last_limit_time_start = get_cur_time();
-					SPLICE_CYCLES_puppet = (UR(SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) + SPLICE_CYCLES_puppet_low);
-				}
-			}
+              stage_cycles_puppet_v2[swarm_now][STAGE_ARITH32] += 1;
+              break;
 
+            case 9:
+              /* Set byte to interesting value. */
+              if (temp_len < 4) break;
+              out_buf[UR(temp_len)] = interesting_8[UR(sizeof(interesting_8))];
+              stage_cycles_puppet_v2[swarm_now][STAGE_INTEREST8] += 1;
+              break;
 
-			{
-#ifndef IGNORE_FINDS
-			havoc_stage_puppet:
-#endif
+            case 10:
+              /* Set word to interesting value, randomly choosing endian. */
+              if (temp_len < 8) break;
+              if (UR(2)) {
 
-				stage_cur_byte = -1;
+                *(u16*)(out_buf + UR(temp_len - 1)) =
+                    interesting_16[UR(sizeof(interesting_16) >> 1)];
 
-				/* The havoc stage mutation code is also invoked when splicing files; if the
-				   splice_cycle variable is set, generate different descriptions and such. */
+              } else {
 
-				if (!splice_cycle) {
+                *(u16*)(out_buf + UR(temp_len - 1)) =
+                    SWAP16(interesting_16[UR(sizeof(interesting_16) >> 1)]);
 
-					stage_name = "MOpt avoc";
-					stage_short = "MOpt_havoc";
-					stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) *
-						perf_score / havoc_div / 100;
+              }
 
-				}
-				else {
-					static u8 tmp[32];
-					perf_score = orig_perf;
-					sprintf(tmp, "MOpt splice %u", splice_cycle);
-					stage_name = tmp;
-					stage_short = "MOpt_splice";
-					stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100;
-				}
+              stage_cycles_puppet_v2[swarm_now][STAGE_INTEREST16] += 1;
+              break;
 
+            case 11:
+              /* Set dword to interesting value, randomly choosing endian. */
 
+              if (temp_len < 8) break;
 
-				if (stage_max < HAVOC_MIN) stage_max = HAVOC_MIN;
-
-				temp_len = len;
-
-				orig_hit_cnt = queued_paths + unique_crashes;
-
-				havoc_queued = queued_paths;
-
-
-
-				for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
-
-					u32 use_stacking = 1 << (1 + UR(HAVOC_STACK_POW2));
-
-					stage_cur_val = use_stacking;
-
-
-					for (i = 0; i < operator_num; ++i)
-					{
-						stage_cycles_puppet_v3[swarm_now][i] = stage_cycles_puppet_v2[swarm_now][i];
-					}
-
-
-					for (i = 0; i < use_stacking; ++i) {
-
-						switch (select_algorithm()) {
-
-						case 0:
-							/* Flip a single bit somewhere. Spooky! */
-							FLIP_BIT(out_buf, UR(temp_len << 3));
-							stage_cycles_puppet_v2[swarm_now][STAGE_FLIP1] += 1;
-							break;
-
-
-						case 1:
-							if (temp_len < 2) break;
-							temp_len_puppet = UR(temp_len << 3);
-							FLIP_BIT(out_buf, temp_len_puppet);
-							FLIP_BIT(out_buf, temp_len_puppet + 1);
-							stage_cycles_puppet_v2[swarm_now][STAGE_FLIP2] += 1;
-							break;
-
-						case 2:
-							if (temp_len < 2) break;
-							temp_len_puppet = UR(temp_len << 3);
-							FLIP_BIT(out_buf, temp_len_puppet);
-							FLIP_BIT(out_buf, temp_len_puppet + 1);
-							FLIP_BIT(out_buf, temp_len_puppet + 2);
-							FLIP_BIT(out_buf, temp_len_puppet + 3);
-							stage_cycles_puppet_v2[swarm_now][STAGE_FLIP4] += 1;
-							break;
-
-						case 3:
-							if (temp_len < 4) break;
-							out_buf[UR(temp_len)] ^= 0xFF;
-							stage_cycles_puppet_v2[swarm_now][STAGE_FLIP8] += 1;
-							break;
-
-						case 4:
-							if (temp_len < 8) break;
-							*(u16*)(out_buf + UR(temp_len - 1)) ^= 0xFFFF;
-							stage_cycles_puppet_v2[swarm_now][STAGE_FLIP16] += 1;
-							break;
-
-						case 5:
-							if (temp_len < 8) break;
-							*(u32*)(out_buf + UR(temp_len - 3)) ^= 0xFFFFFFFF;
-							stage_cycles_puppet_v2[swarm_now][STAGE_FLIP32] += 1;
-							break;
-
-						case 6:
-							out_buf[UR(temp_len)] -= 1 + UR(ARITH_MAX);
-							out_buf[UR(temp_len)] += 1 + UR(ARITH_MAX);
-							stage_cycles_puppet_v2[swarm_now][STAGE_ARITH8] += 1;
-							break;
-
-						case 7:
-							/* Randomly subtract from word, random endian. */
-							if (temp_len < 8) break;
-							if (UR(2)) {
-								u32 pos = UR(temp_len - 1);
-								*(u16*)(out_buf + pos) -= 1 + UR(ARITH_MAX);
-							}
-							else {
-								u32 pos = UR(temp_len - 1);
-								u16 num = 1 + UR(ARITH_MAX);
-								*(u16*)(out_buf + pos) =
-									SWAP16(SWAP16(*(u16*)(out_buf + pos)) - num);
-							}
-							/* Randomly add to word, random endian. */
-							if (UR(2)) {
-								u32 pos = UR(temp_len - 1);
-								*(u16*)(out_buf + pos) += 1 + UR(ARITH_MAX);
-							}
-							else {
-								u32 pos = UR(temp_len - 1);
-								u16 num = 1 + UR(ARITH_MAX);
-								*(u16*)(out_buf + pos) =
-									SWAP16(SWAP16(*(u16*)(out_buf + pos)) + num);
-							}
-							stage_cycles_puppet_v2[swarm_now][STAGE_ARITH16] += 1;
-							break;
-
-
-						case 8:
-							/* Randomly subtract from dword, random endian. */
-							if (temp_len < 8) break;
-							if (UR(2)) {
-								u32 pos = UR(temp_len - 3);
-								*(u32*)(out_buf + pos) -= 1 + UR(ARITH_MAX);
-							}
-							else {
-								u32 pos = UR(temp_len - 3);
-								u32 num = 1 + UR(ARITH_MAX);
-								*(u32*)(out_buf + pos) =
-									SWAP32(SWAP32(*(u32*)(out_buf + pos)) - num);
-							}
-							/* Randomly add to dword, random endian. */
-							//if (temp_len < 4) break;
-							if (UR(2)) {
-								u32 pos = UR(temp_len - 3);
-								*(u32*)(out_buf + pos) += 1 + UR(ARITH_MAX);
-							}
-							else {
-								u32 pos = UR(temp_len - 3);
-								u32 num = 1 + UR(ARITH_MAX);
-								*(u32*)(out_buf + pos) =
-									SWAP32(SWAP32(*(u32*)(out_buf + pos)) + num);
-							}
-							stage_cycles_puppet_v2[swarm_now][STAGE_ARITH32] += 1;
-							break;
-
-
-						case 9:
-							/* Set byte to interesting value. */
-							if (temp_len < 4) break;
-							out_buf[UR(temp_len)] = interesting_8[UR(sizeof(interesting_8))];
-							stage_cycles_puppet_v2[swarm_now][STAGE_INTEREST8] += 1;
-							break;
-
-						case 10:
-							/* Set word to interesting value, randomly choosing endian. */
-							if (temp_len < 8) break;
-							if (UR(2)) {
-								*(u16*)(out_buf + UR(temp_len - 1)) =
-									interesting_16[UR(sizeof(interesting_16) >> 1)];
-							}
-							else {
-								*(u16*)(out_buf + UR(temp_len - 1)) = SWAP16(
-									interesting_16[UR(sizeof(interesting_16) >> 1)]);
-							}
-							stage_cycles_puppet_v2[swarm_now][STAGE_INTEREST16] += 1;
-							break;
-
-
-						case 11:
-							/* Set dword to interesting value, randomly choosing endian. */
-
-							if (temp_len < 8) break;
-
-							if (UR(2)) {
-								*(u32*)(out_buf + UR(temp_len - 3)) =
-									interesting_32[UR(sizeof(interesting_32) >> 2)];
-							}
-							else {
-								*(u32*)(out_buf + UR(temp_len - 3)) = SWAP32(
-									interesting_32[UR(sizeof(interesting_32) >> 2)]);
-							}
-							stage_cycles_puppet_v2[swarm_now][STAGE_INTEREST32] += 1;
-							break;
+              if (UR(2)) {
 
+                *(u32*)(out_buf + UR(temp_len - 3)) =
+                    interesting_32[UR(sizeof(interesting_32) >> 2)];
 
-						case 12:
+              } else {
 
-							/* Just set a random byte to a random value. Because,
-							   why not. We use XOR with 1-255 to eliminate the
-							   possibility of a no-op. */
+                *(u32*)(out_buf + UR(temp_len - 3)) =
+                    SWAP32(interesting_32[UR(sizeof(interesting_32) >> 2)]);
 
-							out_buf[UR(temp_len)] ^= 1 + UR(255);
-							stage_cycles_puppet_v2[swarm_now][STAGE_RANDOMBYTE] += 1;
-							break;
+              }
 
+              stage_cycles_puppet_v2[swarm_now][STAGE_INTEREST32] += 1;
+              break;
 
+            case 12:
 
-						case 13: {
+              /* Just set a random byte to a random value. Because,
+                 why not. We use XOR with 1-255 to eliminate the
+                 possibility of a no-op. */
 
-							/* Delete bytes. We're making this a bit more likely
-							   than insertion (the next option) in hopes of keeping
-							   files reasonably small. */
+              out_buf[UR(temp_len)] ^= 1 + UR(255);
+              stage_cycles_puppet_v2[swarm_now][STAGE_RANDOMBYTE] += 1;
+              break;
 
-							u32 del_from, del_len;
+            case 13: {
 
-							if (temp_len < 2) break;
+              /* Delete bytes. We're making this a bit more likely
+                 than insertion (the next option) in hopes of keeping
+                 files reasonably small. */
 
-							/* Don't delete too much. */
+              u32 del_from, del_len;
 
-							del_len = choose_block_len(temp_len - 1);
+              if (temp_len < 2) break;
 
-							del_from = UR(temp_len - del_len + 1);
+              /* Don't delete too much. */
 
-							memmove(out_buf + del_from, out_buf + del_from + del_len,
-								temp_len - del_from - del_len);
+              del_len = choose_block_len(temp_len - 1);
 
-							temp_len -= del_len;
-							stage_cycles_puppet_v2[swarm_now][STAGE_DELETEBYTE] += 1;
-							break;
+              del_from = UR(temp_len - del_len + 1);
 
-						}
+              memmove(out_buf + del_from, out_buf + del_from + del_len,
+                      temp_len - del_from - del_len);
 
-						case 14:
+              temp_len -= del_len;
+              stage_cycles_puppet_v2[swarm_now][STAGE_DELETEBYTE] += 1;
+              break;
 
-							if (temp_len + HAVOC_BLK_XL < MAX_FILE) {
+            }
 
-								/* Clone bytes (75%) or insert a block of constant bytes (25%). */
+            case 14:
 
-								u8  actually_clone = UR(4);
-								u32 clone_from, clone_to, clone_len;
-								u8* new_buf;
+              if (temp_len + HAVOC_BLK_XL < MAX_FILE) {
 
-								if (actually_clone) {
+                /* Clone bytes (75%) or insert a block of constant bytes (25%).
+                 */
 
-									clone_len = choose_block_len(temp_len);
-									clone_from = UR(temp_len - clone_len + 1);
+                u8  actually_clone = UR(4);
+                u32 clone_from, clone_to, clone_len;
+                u8* new_buf;
 
-								}
-								else {
+                if (actually_clone) {
 
-									clone_len = choose_block_len(HAVOC_BLK_XL);
-									clone_from = 0;
+                  clone_len = choose_block_len(temp_len);
+                  clone_from = UR(temp_len - clone_len + 1);
 
-								}
+                } else {
 
-								clone_to = UR(temp_len);
+                  clone_len = choose_block_len(HAVOC_BLK_XL);
+                  clone_from = 0;
 
-								new_buf = ck_alloc_nozero(temp_len + clone_len);
+                }
 
-								/* Head */
+                clone_to = UR(temp_len);
 
-								memcpy(new_buf, out_buf, clone_to);
+                new_buf = ck_alloc_nozero(temp_len + clone_len);
 
-								/* Inserted part */
+                /* Head */
 
-								if (actually_clone)
-									memcpy(new_buf + clone_to, out_buf + clone_from, clone_len);
-								else
-									memset(new_buf + clone_to,
-										UR(2) ? UR(256) : out_buf[UR(temp_len)], clone_len);
+                memcpy(new_buf, out_buf, clone_to);
 
-								/* Tail */
-								memcpy(new_buf + clone_to + clone_len, out_buf + clone_to,
-									temp_len - clone_to);
+                /* Inserted part */
 
-								ck_free(out_buf);
-								out_buf = new_buf;
-								temp_len += clone_len;
-								stage_cycles_puppet_v2[swarm_now][STAGE_Clone75] += 1;
-							}
+                if (actually_clone)
+                  memcpy(new_buf + clone_to, out_buf + clone_from, clone_len);
+                else
+                  memset(new_buf + clone_to,
+                         UR(2) ? UR(256) : out_buf[UR(temp_len)], clone_len);
 
-							break;
+                /* Tail */
+                memcpy(new_buf + clone_to + clone_len, out_buf + clone_to,
+                       temp_len - clone_to);
 
-						case 15: {
+                ck_free(out_buf);
+                out_buf = new_buf;
+                temp_len += clone_len;
+                stage_cycles_puppet_v2[swarm_now][STAGE_Clone75] += 1;
 
-							/* Overwrite bytes with a randomly selected chunk (75%) or fixed
-							   bytes (25%). */
+              }
 
-							u32 copy_from, copy_to, copy_len;
+              break;
 
-							if (temp_len < 2) break;
+            case 15: {
 
-							copy_len = choose_block_len(temp_len - 1);
+              /* Overwrite bytes with a randomly selected chunk (75%) or fixed
+                 bytes (25%). */
 
-							copy_from = UR(temp_len - copy_len + 1);
-							copy_to = UR(temp_len - copy_len + 1);
+              u32 copy_from, copy_to, copy_len;
 
-							if (UR(4)) {
+              if (temp_len < 2) break;
 
-								if (copy_from != copy_to)
-									memmove(out_buf + copy_to, out_buf + copy_from, copy_len);
+              copy_len = choose_block_len(temp_len - 1);
 
-							}
-							else memset(out_buf + copy_to,
-								UR(2) ? UR(256) : out_buf[UR(temp_len)], copy_len);
-							stage_cycles_puppet_v2[swarm_now][STAGE_OverWrite75] += 1;
-							break;
+              copy_from = UR(temp_len - copy_len + 1);
+              copy_to = UR(temp_len - copy_len + 1);
 
-						}
+              if (UR(4)) {
 
+                if (copy_from != copy_to)
+                  memmove(out_buf + copy_to, out_buf + copy_from, copy_len);
 
-						}
+              } else
 
-					}
+                memset(out_buf + copy_to,
+                       UR(2) ? UR(256) : out_buf[UR(temp_len)], copy_len);
+              stage_cycles_puppet_v2[swarm_now][STAGE_OverWrite75] += 1;
+              break;
 
+            }
 
-					tmp_pilot_time += 1;
+          }
 
+        }
 
+        tmp_pilot_time += 1;
 
+        u64 temp_total_found = queued_paths + unique_crashes;
 
-					u64 temp_total_found = queued_paths + unique_crashes;
+        if (common_fuzz_stuff(argv, out_buf, temp_len))
+          goto abandon_entry_puppet;
 
+        /* out_buf might have been mangled a bit, so let's restore it to its
+           original size and shape. */
 
+        if (temp_len < len) out_buf = ck_realloc(out_buf, len);
+        temp_len = len;
+        memcpy(out_buf, in_buf, len);
 
+        /* If we're finding new stuff, let's run for a bit longer, limits
+           permitting. */
 
-					if (common_fuzz_stuff(argv, out_buf, temp_len))
-						goto abandon_entry_puppet;
+        if (queued_paths != havoc_queued) {
 
-					/* out_buf might have been mangled a bit, so let's restore it to its
-					   original size and shape. */
+          if (perf_score <= havoc_max_mult * 100) {
 
-					if (temp_len < len) out_buf = ck_realloc(out_buf, len);
-					temp_len = len;
-					memcpy(out_buf, in_buf, len);
+            stage_max *= 2;
+            perf_score *= 2;
 
-					/* If we're finding new stuff, let's run for a bit longer, limits
-					   permitting. */
+          }
 
-					if (queued_paths != havoc_queued) {
+          havoc_queued = queued_paths;
 
-						if (perf_score <= havoc_max_mult * 100) {
-							stage_max *= 2;
-							perf_score *= 2;
-						}
+        }
 
-						havoc_queued = queued_paths;
+        if (unlikely(queued_paths + unique_crashes > temp_total_found)) {
 
-					}
+          u64 temp_temp_puppet =
+              queued_paths + unique_crashes - temp_total_found;
+          total_puppet_find = total_puppet_find + temp_temp_puppet;
+          for (i = 0; i < 16; ++i) {
 
-					if (unlikely(queued_paths + unique_crashes > temp_total_found))
-					{
-						u64 temp_temp_puppet = queued_paths + unique_crashes - temp_total_found;
-						total_puppet_find = total_puppet_find + temp_temp_puppet;
-						for (i = 0; i < 16; ++i)
-						{
-							if (stage_cycles_puppet_v2[swarm_now][i] > stage_cycles_puppet_v3[swarm_now][i])
-								stage_finds_puppet_v2[swarm_now][i] += temp_temp_puppet;
-						}
-					}
+            if (stage_cycles_puppet_v2[swarm_now][i] >
+                stage_cycles_puppet_v3[swarm_now][i])
+              stage_finds_puppet_v2[swarm_now][i] += temp_temp_puppet;
 
-				}
-				new_hit_cnt = queued_paths + unique_crashes;
+          }
 
-				if (!splice_cycle) {
-          stage_finds[STAGE_HAVOC]  += new_hit_cnt - orig_hit_cnt;
-          stage_cycles[STAGE_HAVOC] += stage_max;
-        } else {
-          stage_finds[STAGE_SPLICE]  += new_hit_cnt - orig_hit_cnt;
-          stage_cycles[STAGE_SPLICE] += stage_max;
         }
 
+      }
+
+      new_hit_cnt = queued_paths + unique_crashes;
+
+      if (!splice_cycle) {
+
+        stage_finds[STAGE_HAVOC] += new_hit_cnt - orig_hit_cnt;
+        stage_cycles[STAGE_HAVOC] += stage_max;
+
+      } else {
+
+        stage_finds[STAGE_SPLICE] += new_hit_cnt - orig_hit_cnt;
+        stage_cycles[STAGE_SPLICE] += stage_max;
+
+      }
+
 #ifndef IGNORE_FINDS
 
-				/************
-				 * SPLICING *
-				 ************/
+      /************
+       * SPLICING *
+       ************/
 
+    retry_splicing_puppet:
 
-			retry_splicing_puppet:
+      if (use_splicing && splice_cycle++ < SPLICE_CYCLES_puppet &&
+          queued_paths > 1 && queue_cur->len > 1) {
 
-				if (use_splicing && splice_cycle++ < SPLICE_CYCLES_puppet &&
-					queued_paths > 1 && queue_cur->len > 1) {
+        struct queue_entry* target;
+        u32                 tid, split_at;
+        u8*                 new_buf;
+        s32                 f_diff, l_diff;
 
-					struct queue_entry* target;
-					u32 tid, split_at;
-					u8* new_buf;
-					s32 f_diff, l_diff;
+        /* First of all, if we've modified in_buf for havoc, let's clean that
+           up... */
 
-					/* First of all, if we've modified in_buf for havoc, let's clean that
-					   up... */
+        if (in_buf != orig_in) {
 
-					if (in_buf != orig_in) {
-						ck_free(in_buf);
-						in_buf = orig_in;
-						len = queue_cur->len;
-					}
+          ck_free(in_buf);
+          in_buf = orig_in;
+          len = queue_cur->len;
 
-					/* Pick a random queue entry and seek to it. Don't splice with yourself. */
+        }
+
+        /* Pick a random queue entry and seek to it. Don't splice with yourself.
+         */
+
+        do {
 
-					do { tid = UR(queued_paths); } while (tid == current_entry);
+          tid = UR(queued_paths);
 
-					splicing_with = tid;
-					target = queue;
+        } while (tid == current_entry);
 
-					while (tid >= 100) { target = target->next_100; tid -= 100; }
-					while (tid--) target = target->next;
+        splicing_with = tid;
+        target = queue;
 
-					/* Make sure that the target has a reasonable length. */
+        while (tid >= 100) {
 
-					while (target && (target->len < 2 || target == queue_cur)) {
-						target = target->next;
-						++splicing_with;
-					}
+          target = target->next_100;
+          tid -= 100;
 
-					if (!target) goto retry_splicing_puppet;
+        }
+
+        while (tid--)
+          target = target->next;
 
-					/* Read the testcase into a new buffer. */
+        /* Make sure that the target has a reasonable length. */
 
-					fd = open(target->fname, O_RDONLY);
+        while (target && (target->len < 2 || target == queue_cur)) {
 
-					if (fd < 0) PFATAL("Unable to open '%s'", target->fname);
+          target = target->next;
+          ++splicing_with;
 
-					new_buf = ck_alloc_nozero(target->len);
+        }
 
-					ck_read(fd, new_buf, target->len, target->fname);
+        if (!target) goto retry_splicing_puppet;
 
-					close(fd);
+        /* Read the testcase into a new buffer. */
 
-					/* Find a suitable splicin g location, somewhere between the first and
-					   the last differing byte. Bail out if the difference is just a single
-					   byte or so. */
+        fd = open(target->fname, O_RDONLY);
 
-					locate_diffs(in_buf, new_buf, MIN(len, target->len), &f_diff, &l_diff);
+        if (fd < 0) PFATAL("Unable to open '%s'", target->fname);
 
-					if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) {
-						ck_free(new_buf);
-						goto retry_splicing_puppet;
-					}
+        new_buf = ck_alloc_nozero(target->len);
 
-					/* Split somewhere between the first and last differing byte. */
+        ck_read(fd, new_buf, target->len, target->fname);
 
-					split_at = f_diff + UR(l_diff - f_diff);
+        close(fd);
 
-					/* Do the thing. */
+        /* Find a suitable splicin g location, somewhere between the first and
+           the last differing byte. Bail out if the difference is just a single
+           byte or so. */
 
-					len = target->len;
-					memcpy(new_buf, in_buf, split_at);
-					in_buf = new_buf;
-					ck_free(out_buf);
-					out_buf = ck_alloc_nozero(len);
-					memcpy(out_buf, in_buf, len);
-					goto havoc_stage_puppet;
+        locate_diffs(in_buf, new_buf, MIN(len, target->len), &f_diff, &l_diff);
 
-				}
+        if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) {
+
+          ck_free(new_buf);
+          goto retry_splicing_puppet;
+
+        }
+
+        /* Split somewhere between the first and last differing byte. */
+
+        split_at = f_diff + UR(l_diff - f_diff);
+
+        /* Do the thing. */
+
+        len = target->len;
+        memcpy(new_buf, in_buf, split_at);
+        in_buf = new_buf;
+        ck_free(out_buf);
+        out_buf = ck_alloc_nozero(len);
+        memcpy(out_buf, in_buf, len);
+        goto havoc_stage_puppet;
+
+      }
 
 #endif /* !IGNORE_FINDS */
 
-				ret_val = 0;
+      ret_val = 0;
+
+    abandon_entry:
+    abandon_entry_puppet:
+
+      if (splice_cycle >= SPLICE_CYCLES_puppet)
+        SPLICE_CYCLES_puppet =
+            (UR(SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) +
+             SPLICE_CYCLES_puppet_low);
+
+      splicing_with = -1;
+
+      /* Update pending_not_fuzzed count if we made it through the calibration
+         cycle and have not seen this entry before. */
 
-			abandon_entry:
-			abandon_entry_puppet:
+      // if (!stop_soon && !queue_cur->cal_failed && !queue_cur->was_fuzzed) {
 
-				if (splice_cycle >= SPLICE_CYCLES_puppet)
-					SPLICE_CYCLES_puppet = (UR(SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) + SPLICE_CYCLES_puppet_low);
+      //   queue_cur->was_fuzzed = 1;
+      //   --pending_not_fuzzed;
+      //   if (queue_cur->favored) --pending_favored;
+      // }
 
+      munmap(orig_in, queue_cur->len);
 
-				splicing_with = -1;
+      if (in_buf != orig_in) ck_free(in_buf);
+      ck_free(out_buf);
+      ck_free(eff_map);
 
-				/* Update pending_not_fuzzed count if we made it through the calibration
-				   cycle and have not seen this entry before. */
+      if (key_puppet == 1) {
 
-				   // if (!stop_soon && !queue_cur->cal_failed && !queue_cur->was_fuzzed) {
-				   //   queue_cur->was_fuzzed = 1;
-				   //   --pending_not_fuzzed;
-				   //   if (queue_cur->favored) --pending_favored;
-				   // }
+        if (unlikely(queued_paths + unique_crashes >
+                     ((queued_paths + unique_crashes) * limit_time_bound +
+                      orig_hit_cnt_puppet))) {
 
-				munmap(orig_in, queue_cur->len);
+          key_puppet = 0;
+          cur_ms_lv = get_cur_time();
+          new_hit_cnt = queued_paths + unique_crashes;
+          orig_hit_cnt_puppet = 0;
+          last_limit_time_start = 0;
 
-				if (in_buf != orig_in) ck_free(in_buf);
-				ck_free(out_buf);
-				ck_free(eff_map);
+        }
+
+      }
 
+      if (unlikely(tmp_pilot_time > period_pilot)) {
 
-				if (key_puppet == 1) {
-					if (unlikely(queued_paths + unique_crashes > ((queued_paths + unique_crashes)*limit_time_bound + orig_hit_cnt_puppet)))	{
-						key_puppet = 0;
-						cur_ms_lv = get_cur_time();
-						new_hit_cnt = queued_paths + unique_crashes;
-						orig_hit_cnt_puppet = 0;
-						last_limit_time_start = 0;
-					}
-				}
+        total_pacemaker_time += tmp_pilot_time;
+        new_hit_cnt = queued_paths + unique_crashes;
+        swarm_fitness[swarm_now] =
+            (double)(total_puppet_find - temp_puppet_find) /
+            ((double)(tmp_pilot_time) / period_pilot_tmp);
+        tmp_pilot_time = 0;
+        temp_puppet_find = total_puppet_find;
 
+        u64 temp_stage_finds_puppet = 0;
+        for (i = 0; i < operator_num; ++i) {
 
-				if (unlikely(tmp_pilot_time > period_pilot)) {
-					total_pacemaker_time += tmp_pilot_time;
-					new_hit_cnt = queued_paths + unique_crashes;
-					swarm_fitness[swarm_now] = (double)(total_puppet_find - temp_puppet_find) / ((double)(tmp_pilot_time)/ period_pilot_tmp);
-					tmp_pilot_time = 0;
-					temp_puppet_find = total_puppet_find;
+          double temp_eff = 0.0;
 
-					u64 temp_stage_finds_puppet = 0;
-					for (i = 0; i < operator_num; ++i) {
-						double temp_eff = 0.0;
+          if (stage_cycles_puppet_v2[swarm_now][i] >
+              stage_cycles_puppet[swarm_now][i])
+            temp_eff = (double)(stage_finds_puppet_v2[swarm_now][i] -
+                                stage_finds_puppet[swarm_now][i]) /
+                       (double)(stage_cycles_puppet_v2[swarm_now][i] -
+                                stage_cycles_puppet[swarm_now][i]);
 
-						if (stage_cycles_puppet_v2[swarm_now][i] > stage_cycles_puppet[swarm_now][i])
-							temp_eff = (double)(stage_finds_puppet_v2[swarm_now][i] - stage_finds_puppet[swarm_now][i]) /
-							(double)(stage_cycles_puppet_v2[swarm_now][i] - stage_cycles_puppet[swarm_now][i]);
+          if (eff_best[swarm_now][i] < temp_eff) {
 
-						if (eff_best[swarm_now][i] < temp_eff) {
-							eff_best[swarm_now][i] = temp_eff;
-							L_best[swarm_now][i] = x_now[swarm_now][i];
-						}
+            eff_best[swarm_now][i] = temp_eff;
+            L_best[swarm_now][i] = x_now[swarm_now][i];
 
-						stage_finds_puppet[swarm_now][i] = stage_finds_puppet_v2[swarm_now][i];
-						stage_cycles_puppet[swarm_now][i] = stage_cycles_puppet_v2[swarm_now][i];
-						temp_stage_finds_puppet += stage_finds_puppet[swarm_now][i];
-					}
+          }
+
+          stage_finds_puppet[swarm_now][i] =
+              stage_finds_puppet_v2[swarm_now][i];
+          stage_cycles_puppet[swarm_now][i] =
+              stage_cycles_puppet_v2[swarm_now][i];
+          temp_stage_finds_puppet += stage_finds_puppet[swarm_now][i];
+
+        }
 
-					swarm_now = swarm_now + 1;
-						if (swarm_now == swarm_num) {
-							key_module = 1;
-							for (i = 0; i < operator_num; ++i) {
-								core_operator_cycles_puppet_v2[i] = core_operator_cycles_puppet[i];
-								core_operator_cycles_puppet_v3[i] = core_operator_cycles_puppet[i];
-								core_operator_finds_puppet_v2[i] = core_operator_finds_puppet[i];
-							}
+        swarm_now = swarm_now + 1;
+        if (swarm_now == swarm_num) {
 
-							double swarm_eff = 0.0;
-							swarm_now = 0;
-							for (i = 0; i < swarm_num; ++i)	{
-								if (swarm_fitness[i] > swarm_eff) {
-									swarm_eff = swarm_fitness[i];
-									swarm_now = i;
-								}
-							}
-							if (swarm_now <0 || swarm_now > swarm_num - 1)
-								PFATAL("swarm_now error number  %d", swarm_now);
+          key_module = 1;
+          for (i = 0; i < operator_num; ++i) {
 
-						}
-				}
-				return ret_val;
-			}
-		}
+            core_operator_cycles_puppet_v2[i] = core_operator_cycles_puppet[i];
+            core_operator_cycles_puppet_v3[i] = core_operator_cycles_puppet[i];
+            core_operator_finds_puppet_v2[i] = core_operator_finds_puppet[i];
 
+          }
+
+          double swarm_eff = 0.0;
+          swarm_now = 0;
+          for (i = 0; i < swarm_num; ++i) {
+
+            if (swarm_fitness[i] > swarm_eff) {
+
+              swarm_eff = swarm_fitness[i];
+              swarm_now = i;
+
+            }
+
+          }
+
+          if (swarm_now < 0 || swarm_now > swarm_num - 1)
+            PFATAL("swarm_now error number  %d", swarm_now);
+
+        }
+
+      }
+
+      return ret_val;
+
+    }
+
+  }
 
 #undef FLIP_BIT
 
 }
 
-
 u8 core_fuzzing(char** argv) {
-	int i;
 
-	if (swarm_num == 1) {
-		key_module = 2;
-		return 0;
-	}
+  int i;
 
+  if (swarm_num == 1) {
 
-		s32 len, fd, temp_len, j;
-		u8  *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0;
-		u64 havoc_queued, orig_hit_cnt, new_hit_cnt, cur_ms_lv;
-		u32 splice_cycle = 0, perf_score = 100, orig_perf, prev_cksum, eff_cnt = 1;
+    key_module = 2;
+    return 0;
 
-		u8  ret_val = 1, doing_det = 0;
+  }
+
+  s32 len, fd, temp_len, j;
+  u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0;
+  u64 havoc_queued, orig_hit_cnt, new_hit_cnt, cur_ms_lv;
+  u32 splice_cycle = 0, perf_score = 100, orig_perf, prev_cksum, eff_cnt = 1;
 
-		u8  a_collect[MAX_AUTO_EXTRA];
-		u32 a_len = 0;
+  u8 ret_val = 1, doing_det = 0;
+
+  u8  a_collect[MAX_AUTO_EXTRA];
+  u32 a_len = 0;
 
 #ifdef IGNORE_FINDS
 
-		/* In IGNORE_FINDS mode, skip any entries that weren't in the
-		   initial data set. */
+  /* In IGNORE_FINDS mode, skip any entries that weren't in the
+     initial data set. */
 
-		if (queue_cur->depth > 1) return 1;
+  if (queue_cur->depth > 1) return 1;
 
 #else
 
-		if (pending_favored) {
+  if (pending_favored) {
 
-			/* If we have any favored, non-fuzzed new arrivals in the queue,
-			   possibly skip to them at the expense of already-fuzzed or non-favored
-			   cases. */
+    /* If we have any favored, non-fuzzed new arrivals in the queue,
+       possibly skip to them at the expense of already-fuzzed or non-favored
+       cases. */
 
-			if ((queue_cur->was_fuzzed || !queue_cur->favored) &&
-				UR(100) < SKIP_TO_NEW_PROB) return 1;
+    if ((queue_cur->was_fuzzed || !queue_cur->favored) &&
+        UR(100) < SKIP_TO_NEW_PROB)
+      return 1;
 
-		} else if (!dumb_mode && !queue_cur->favored && queued_paths > 10) {
+  } else if (!dumb_mode && !queue_cur->favored && queued_paths > 10) {
 
-			/* Otherwise, still possibly skip non-favored cases, albeit less often.
-			   The odds of skipping stuff are higher for already-fuzzed inputs and
-			   lower for never-fuzzed entries. */
+    /* Otherwise, still possibly skip non-favored cases, albeit less often.
+       The odds of skipping stuff are higher for already-fuzzed inputs and
+       lower for never-fuzzed entries. */
 
-			if (queue_cycle > 1 && !queue_cur->was_fuzzed) {
+    if (queue_cycle > 1 && !queue_cur->was_fuzzed) {
 
-				if (UR(100) < SKIP_NFAV_NEW_PROB) return 1;
+      if (UR(100) < SKIP_NFAV_NEW_PROB) return 1;
 
-			} else {
+    } else {
 
-				if (UR(100) < SKIP_NFAV_OLD_PROB) return 1;
+      if (UR(100) < SKIP_NFAV_OLD_PROB) return 1;
 
-			}
+    }
 
-		}
+  }
 
 #endif /* ^IGNORE_FINDS */
 
-		if (not_on_tty) {
-			ACTF("Fuzzing test case #%u (%u total, %llu uniq crashes found)...",
-				current_entry, queued_paths, unique_crashes);
-			fflush(stdout);
-		}
+  if (not_on_tty) {
+
+    ACTF("Fuzzing test case #%u (%u total, %llu uniq crashes found)...",
+         current_entry, queued_paths, unique_crashes);
+    fflush(stdout);
+
+  }
+
+  /* Map the test case into memory. */
+
+  fd = open(queue_cur->fname, O_RDONLY);
+
+  if (fd < 0) PFATAL("Unable to open '%s'", queue_cur->fname);
+
+  len = queue_cur->len;
+
+  orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
 
-		/* Map the test case into memory. */
+  if (orig_in == MAP_FAILED) PFATAL("Unable to mmap '%s'", queue_cur->fname);
 
-		fd = open(queue_cur->fname, O_RDONLY);
+  close(fd);
 
-		if (fd < 0) PFATAL("Unable to open '%s'", queue_cur->fname);
+  /* We could mmap() out_buf as MAP_PRIVATE, but we end up clobbering every
+     single byte anyway, so it wouldn't give us any performance or memory usage
+     benefits. */
 
-		len = queue_cur->len;
+  out_buf = ck_alloc_nozero(len);
 
-		orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
+  subseq_tmouts = 0;
 
-		if (orig_in == MAP_FAILED) PFATAL("Unable to mmap '%s'", queue_cur->fname);
+  cur_depth = queue_cur->depth;
 
-		close(fd);
+  /*******************************************
+   * CALIBRATION (only if failed earlier on) *
+   *******************************************/
 
-		/* We could mmap() out_buf as MAP_PRIVATE, but we end up clobbering every
-		   single byte anyway, so it wouldn't give us any performance or memory usage
-		   benefits. */
+  if (queue_cur->cal_failed) {
 
-		out_buf = ck_alloc_nozero(len);
+    u8 res = FAULT_TMOUT;
 
-		subseq_tmouts = 0;
+    if (queue_cur->cal_failed < CAL_CHANCES) {
 
-		cur_depth = queue_cur->depth;
+      res = calibrate_case(argv, queue_cur, in_buf, queue_cycle - 1, 0);
 
-		/*******************************************
-		 * CALIBRATION (only if failed earlier on) *
-		 *******************************************/
+      if (res == FAULT_ERROR) FATAL("Unable to execute target application");
 
-		if (queue_cur->cal_failed) {
+    }
 
-			u8 res = FAULT_TMOUT;
+    if (stop_soon || res != crash_mode) {
 
-			if (queue_cur->cal_failed < CAL_CHANCES) {
+      ++cur_skipped_paths;
+      goto abandon_entry;
 
-				res = calibrate_case(argv, queue_cur, in_buf, queue_cycle - 1, 0);
+    }
 
-				if (res == FAULT_ERROR)
-					FATAL("Unable to execute target application");
+  }
 
-			}
+  /************
+   * TRIMMING *
+   ************/
 
-			if (stop_soon || res != crash_mode) {
-				++cur_skipped_paths;
-				goto abandon_entry;
-			}
+  if (!dumb_mode && !queue_cur->trim_done) {
 
-		}
+    u8 res = trim_case(argv, queue_cur, in_buf);
 
-		/************
-		 * TRIMMING *
-		 ************/
+    if (res == FAULT_ERROR) FATAL("Unable to execute target application");
 
-		if (!dumb_mode && !queue_cur->trim_done) {
+    if (stop_soon) {
 
-			u8 res = trim_case(argv, queue_cur, in_buf);
+      ++cur_skipped_paths;
+      goto abandon_entry;
 
-			if (res == FAULT_ERROR)
-				FATAL("Unable to execute target application");
+    }
 
-			if (stop_soon) {
-				++cur_skipped_paths;
-				goto abandon_entry;
-			}
+    /* Don't retry trimming, even if it failed. */
 
-			/* Don't retry trimming, even if it failed. */
+    queue_cur->trim_done = 1;
 
-			queue_cur->trim_done = 1;
+    len = queue_cur->len;
 
-			len = queue_cur->len;
+  }
 
-		}
+  memcpy(out_buf, in_buf, len);
 
-		memcpy(out_buf, in_buf, len);
+  /*********************
+   * PERFORMANCE SCORE *
+   *********************/
 
-		/*********************
-		 * PERFORMANCE SCORE *
-		 *********************/
+  orig_perf = perf_score = calculate_score(queue_cur);
 
-		orig_perf = perf_score = calculate_score(queue_cur);
+  /* Skip right away if -d is given, if we have done deterministic fuzzing on
+     this entry ourselves (was_fuzzed), or if it has gone through deterministic
+     testing in earlier, resumed runs (passed_det). */
 
-		/* Skip right away if -d is given, if we have done deterministic fuzzing on
-		   this entry ourselves (was_fuzzed), or if it has gone through deterministic
-		   testing in earlier, resumed runs (passed_det). */
+  if (skip_deterministic || queue_cur->was_fuzzed || queue_cur->passed_det)
+    goto havoc_stage;
 
-		if (skip_deterministic || queue_cur->was_fuzzed || queue_cur->passed_det)
-			goto havoc_stage;
+  /* Skip deterministic fuzzing if exec path checksum puts this out of scope
+     for this master instance. */
 
-		/* Skip deterministic fuzzing if exec path checksum puts this out of scope
-		   for this master instance. */
+  if (master_max && (queue_cur->exec_cksum % master_max) != master_id - 1)
+    goto havoc_stage;
 
-		if (master_max && (queue_cur->exec_cksum % master_max) != master_id - 1)
-			goto havoc_stage;
+  cur_ms_lv = get_cur_time();
+  if (!(key_puppet == 0 && ((cur_ms_lv - last_path_time < limit_time_puppet) ||
+                            (last_crash_time != 0 &&
+                             cur_ms_lv - last_crash_time < limit_time_puppet) ||
+                            last_path_time == 0))) {
 
+    key_puppet = 1;
+    goto pacemaker_fuzzing;
 
-		cur_ms_lv = get_cur_time();
-		if (!(key_puppet == 0 && ((cur_ms_lv - last_path_time < limit_time_puppet) ||
-			(last_crash_time != 0 && cur_ms_lv - last_crash_time < limit_time_puppet) || last_path_time == 0)))
-		{
-			key_puppet = 1;
-			goto pacemaker_fuzzing;
-		}
+  }
 
-		doing_det = 1;
+  doing_det = 1;
 
-		/*********************************************
-		 * SIMPLE BITFLIP (+dictionary construction) *
-		 *********************************************/
+  /*********************************************
+   * SIMPLE BITFLIP (+dictionary construction) *
+   *********************************************/
 
-#define FLIP_BIT(_ar, _b) do { \
-    u8* _arf = (u8*)(_ar); \
-    u32 _bf = (_b); \
-    _arf[(_bf) >> 3] ^= (128 >> ((_bf) & 7)); \
+#define FLIP_BIT(_ar, _b)                   \
+  do {                                      \
+                                            \
+    u8* _arf = (u8*)(_ar);                  \
+    u32 _bf = (_b);                         \
+    _arf[(_bf) >> 3] ^= (128 >> ((_bf)&7)); \
+                                            \
   } while (0)
 
-		 /* Single walking bit. */
+  /* Single walking bit. */
 
-		stage_short = "flip1";
-		stage_max = len << 3;
-		stage_name = "bitflip 1/1";
+  stage_short = "flip1";
+  stage_max = len << 3;
+  stage_name = "bitflip 1/1";
 
-		stage_val_type = STAGE_VAL_NONE;
+  stage_val_type = STAGE_VAL_NONE;
 
-		orig_hit_cnt = queued_paths + unique_crashes;
+  orig_hit_cnt = queued_paths + unique_crashes;
 
-		prev_cksum = queue_cur->exec_cksum;
+  prev_cksum = queue_cur->exec_cksum;
 
-		for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
 
-			stage_cur_byte = stage_cur >> 3;
+    stage_cur_byte = stage_cur >> 3;
 
-			FLIP_BIT(out_buf, stage_cur);
+    FLIP_BIT(out_buf, stage_cur);
 
-			if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
 
-			FLIP_BIT(out_buf, stage_cur);
+    FLIP_BIT(out_buf, stage_cur);
 
-			/* While flipping the least significant bit in every byte, pull of an extra
-			   trick to detect possible syntax tokens. In essence, the idea is that if
-			   you have a binary blob like this:
+    /* While flipping the least significant bit in every byte, pull of an extra
+       trick to detect possible syntax tokens. In essence, the idea is that if
+       you have a binary blob like this:
 
-			   xxxxxxxxIHDRxxxxxxxx
+       xxxxxxxxIHDRxxxxxxxx
 
-			   ...and changing the leading and trailing bytes causes variable or no
-			   changes in program flow, but touching any character in the "IHDR" string
-			   always produces the same, distinctive path, it's highly likely that
-			   "IHDR" is an atomically-checked magic value of special significance to
-			   the fuzzed format.
+       ...and changing the leading and trailing bytes causes variable or no
+       changes in program flow, but touching any character in the "IHDR" string
+       always produces the same, distinctive path, it's highly likely that
+       "IHDR" is an atomically-checked magic value of special significance to
+       the fuzzed format.
 
-			   We do this here, rather than as a separate stage, because it's a nice
-			   way to keep the operation approximately "free" (i.e., no extra execs).
+       We do this here, rather than as a separate stage, because it's a nice
+       way to keep the operation approximately "free" (i.e., no extra execs).
 
-			   Empirically, performing the check when flipping the least significant bit
-			   is advantageous, compared to doing it at the time of more disruptive
-			   changes, where the program flow may be affected in more violent ways.
+       Empirically, performing the check when flipping the least significant bit
+       is advantageous, compared to doing it at the time of more disruptive
+       changes, where the program flow may be affected in more violent ways.
 
-			   The caveat is that we won't generate dictionaries in the -d mode or -S
-			   mode - but that's probably a fair trade-off.
+       The caveat is that we won't generate dictionaries in the -d mode or -S
+       mode - but that's probably a fair trade-off.
 
-			   This won't work particularly well with paths that exhibit variable
-			   behavior, but fails gracefully, so we'll carry out the checks anyway.
+       This won't work particularly well with paths that exhibit variable
+       behavior, but fails gracefully, so we'll carry out the checks anyway.
 
-			  */
+      */
 
-			if (!dumb_mode && (stage_cur & 7) == 7) {
+    if (!dumb_mode && (stage_cur & 7) == 7) {
 
-				u32 cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
+      u32 cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
 
-				if (stage_cur == stage_max - 1 && cksum == prev_cksum) {
+      if (stage_cur == stage_max - 1 && cksum == prev_cksum) {
 
-					/* If at end of file and we are still collecting a string, grab the
-					   final character and force output. */
+        /* If at end of file and we are still collecting a string, grab the
+           final character and force output. */
 
-					if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3];
-					++a_len;
+        if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3];
+        ++a_len;
 
-					if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA)
-						maybe_add_auto(a_collect, a_len);
+        if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA)
+          maybe_add_auto(a_collect, a_len);
 
-				}
-				else if (cksum != prev_cksum) {
+      } else if (cksum != prev_cksum) {
 
-					/* Otherwise, if the checksum has changed, see if we have something
-					   worthwhile queued up, and collect that if the answer is yes. */
+        /* Otherwise, if the checksum has changed, see if we have something
+           worthwhile queued up, and collect that if the answer is yes. */
 
-					if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA)
-						maybe_add_auto(a_collect, a_len);
+        if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA)
+          maybe_add_auto(a_collect, a_len);
 
-					a_len = 0;
-					prev_cksum = cksum;
+        a_len = 0;
+        prev_cksum = cksum;
 
-				}
+      }
 
-				/* Continue collecting string, but only if the bit flip actually made
-				   any difference - we don't want no-op tokens. */
+      /* Continue collecting string, but only if the bit flip actually made
+         any difference - we don't want no-op tokens. */
 
-				if (cksum != queue_cur->exec_cksum) {
+      if (cksum != queue_cur->exec_cksum) {
 
-					if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3];
-					++a_len;
+        if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3];
+        ++a_len;
 
-				}
+      }
 
-			}
+    }
 
-		}
+  }
 
-		new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = queued_paths + unique_crashes;
 
-		stage_finds[STAGE_FLIP1] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_FLIP1] += stage_max;
+  stage_finds[STAGE_FLIP1] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_FLIP1] += stage_max;
 
+  /* Two walking bits. */
 
+  stage_name = "bitflip 2/1";
+  stage_short = "flip2";
+  stage_max = (len << 3) - 1;
 
-		/* Two walking bits. */
+  orig_hit_cnt = new_hit_cnt;
 
-		stage_name = "bitflip 2/1";
-		stage_short = "flip2";
-		stage_max = (len << 3) - 1;
+  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
 
-		orig_hit_cnt = new_hit_cnt;
+    stage_cur_byte = stage_cur >> 3;
 
-		for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+    FLIP_BIT(out_buf, stage_cur);
+    FLIP_BIT(out_buf, stage_cur + 1);
 
-			stage_cur_byte = stage_cur >> 3;
+    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
 
-			FLIP_BIT(out_buf, stage_cur);
-			FLIP_BIT(out_buf, stage_cur + 1);
+    FLIP_BIT(out_buf, stage_cur);
+    FLIP_BIT(out_buf, stage_cur + 1);
 
-			if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+  }
 
-			FLIP_BIT(out_buf, stage_cur);
-			FLIP_BIT(out_buf, stage_cur + 1);
+  new_hit_cnt = queued_paths + unique_crashes;
 
-		}
+  stage_finds[STAGE_FLIP2] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_FLIP2] += stage_max;
 
-		new_hit_cnt = queued_paths + unique_crashes;
+  /* Four walking bits. */
 
-		stage_finds[STAGE_FLIP2] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_FLIP2] += stage_max;
+  stage_name = "bitflip 4/1";
+  stage_short = "flip4";
+  stage_max = (len << 3) - 3;
 
+  orig_hit_cnt = new_hit_cnt;
 
-		/* Four walking bits. */
+  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
 
-		stage_name = "bitflip 4/1";
-		stage_short = "flip4";
-		stage_max = (len << 3) - 3;
+    stage_cur_byte = stage_cur >> 3;
 
+    FLIP_BIT(out_buf, stage_cur);
+    FLIP_BIT(out_buf, stage_cur + 1);
+    FLIP_BIT(out_buf, stage_cur + 2);
+    FLIP_BIT(out_buf, stage_cur + 3);
 
-		orig_hit_cnt = new_hit_cnt;
+    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
 
-		for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+    FLIP_BIT(out_buf, stage_cur);
+    FLIP_BIT(out_buf, stage_cur + 1);
+    FLIP_BIT(out_buf, stage_cur + 2);
+    FLIP_BIT(out_buf, stage_cur + 3);
 
-			stage_cur_byte = stage_cur >> 3;
+  }
 
-			FLIP_BIT(out_buf, stage_cur);
-			FLIP_BIT(out_buf, stage_cur + 1);
-			FLIP_BIT(out_buf, stage_cur + 2);
-			FLIP_BIT(out_buf, stage_cur + 3);
+  new_hit_cnt = queued_paths + unique_crashes;
 
-			if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+  stage_finds[STAGE_FLIP4] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_FLIP4] += stage_max;
 
-			FLIP_BIT(out_buf, stage_cur);
-			FLIP_BIT(out_buf, stage_cur + 1);
-			FLIP_BIT(out_buf, stage_cur + 2);
-			FLIP_BIT(out_buf, stage_cur + 3);
+  /* Effector map setup. These macros calculate:
 
-		}
+     EFF_APOS      - position of a particular file offset in the map.
+     EFF_ALEN      - length of a map with a particular number of bytes.
+     EFF_SPAN_ALEN - map span for a sequence of bytes.
 
-		new_hit_cnt = queued_paths + unique_crashes;
+   */
 
-		stage_finds[STAGE_FLIP4] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_FLIP4] += stage_max;
+#define EFF_APOS(_p) ((_p) >> EFF_MAP_SCALE2)
+#define EFF_REM(_x) ((_x) & ((1 << EFF_MAP_SCALE2) - 1))
+#define EFF_ALEN(_l) (EFF_APOS(_l) + !!EFF_REM(_l))
+#define EFF_SPAN_ALEN(_p, _l) (EFF_APOS((_p) + (_l)-1) - EFF_APOS(_p) + 1)
 
+  /* Initialize effector map for the next step (see comments below). Always
+         flag first and last byte as doing something. */
 
-		/* Effector map setup. These macros calculate:
+  eff_map = ck_alloc(EFF_ALEN(len));
+  eff_map[0] = 1;
 
-		   EFF_APOS      - position of a particular file offset in the map.
-		   EFF_ALEN      - length of a map with a particular number of bytes.
-		   EFF_SPAN_ALEN - map span for a sequence of bytes.
+  if (EFF_APOS(len - 1) != 0) {
 
-		 */
+    eff_map[EFF_APOS(len - 1)] = 1;
+    ++eff_cnt;
 
-#define EFF_APOS(_p)          ((_p) >> EFF_MAP_SCALE2)
-#define EFF_REM(_x)           ((_x) & ((1 << EFF_MAP_SCALE2) - 1))
-#define EFF_ALEN(_l)          (EFF_APOS(_l) + !!EFF_REM(_l))
-#define EFF_SPAN_ALEN(_p, _l) (EFF_APOS((_p) + (_l) - 1) - EFF_APOS(_p) + 1)
+  }
 
-		 /* Initialize effector map for the next step (see comments below). Always
-			flag first and last byte as doing something. */
+  /* Walking byte. */
 
-		eff_map = ck_alloc(EFF_ALEN(len));
-		eff_map[0] = 1;
+  stage_name = "bitflip 8/8";
+  stage_short = "flip8";
+  stage_max = len;
 
-		if (EFF_APOS(len - 1) != 0) {
-			eff_map[EFF_APOS(len - 1)] = 1;
-			++eff_cnt;
-		}
+  orig_hit_cnt = new_hit_cnt;
 
-		/* Walking byte. */
+  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
 
-		stage_name = "bitflip 8/8";
-		stage_short = "flip8";
-		stage_max = len;
+    stage_cur_byte = stage_cur;
 
+    out_buf[stage_cur] ^= 0xFF;
 
-		orig_hit_cnt = new_hit_cnt;
+    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
 
-		for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+    /* We also use this stage to pull off a simple trick: we identify
+       bytes that seem to have no effect on the current execution path
+       even when fully flipped - and we skip them during more expensive
+       deterministic stages, such as arithmetics or known ints. */
 
-			stage_cur_byte = stage_cur;
+    if (!eff_map[EFF_APOS(stage_cur)]) {
 
-			out_buf[stage_cur] ^= 0xFF;
+      u32 cksum;
 
-			if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+      /* If in dumb mode or if the file is very short, just flag everything
+         without wasting time on checksums. */
 
-			/* We also use this stage to pull off a simple trick: we identify
-			   bytes that seem to have no effect on the current execution path
-			   even when fully flipped - and we skip them during more expensive
-			   deterministic stages, such as arithmetics or known ints. */
+      if (!dumb_mode && len >= EFF_MIN_LEN)
+        cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
+      else
+        cksum = ~queue_cur->exec_cksum;
 
-			if (!eff_map[EFF_APOS(stage_cur)]) {
+      if (cksum != queue_cur->exec_cksum) {
 
-				u32 cksum;
+        eff_map[EFF_APOS(stage_cur)] = 1;
+        ++eff_cnt;
 
-				/* If in dumb mode or if the file is very short, just flag everything
-				   without wasting time on checksums. */
+      }
 
-				if (!dumb_mode && len >= EFF_MIN_LEN)
-					cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
-				else
-					cksum = ~queue_cur->exec_cksum;
+    }
 
-				if (cksum != queue_cur->exec_cksum) {
-					eff_map[EFF_APOS(stage_cur)] = 1;
-					++eff_cnt;
-				}
+    out_buf[stage_cur] ^= 0xFF;
 
-			}
+  }
 
-			out_buf[stage_cur] ^= 0xFF;
+  /* If the effector map is more than EFF_MAX_PERC dense, just flag the
+     whole thing as worth fuzzing, since we wouldn't be saving much time
+     anyway. */
 
-		}
+  if (eff_cnt != EFF_ALEN(len) &&
+      eff_cnt * 100 / EFF_ALEN(len) > EFF_MAX_PERC) {
 
-		/* If the effector map is more than EFF_MAX_PERC dense, just flag the
-		   whole thing as worth fuzzing, since we wouldn't be saving much time
-		   anyway. */
+    memset(eff_map, 1, EFF_ALEN(len));
 
-		if (eff_cnt != EFF_ALEN(len) &&
-			eff_cnt * 100 / EFF_ALEN(len) > EFF_MAX_PERC) {
+    blocks_eff_select += EFF_ALEN(len);
 
-			memset(eff_map, 1, EFF_ALEN(len));
+  } else {
 
-			blocks_eff_select += EFF_ALEN(len);
+    blocks_eff_select += eff_cnt;
 
-		}
-		else {
+  }
 
-			blocks_eff_select += eff_cnt;
+  blocks_eff_total += EFF_ALEN(len);
 
-		}
+  new_hit_cnt = queued_paths + unique_crashes;
 
-		blocks_eff_total += EFF_ALEN(len);
+  stage_finds[STAGE_FLIP8] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_FLIP8] += stage_max;
 
-		new_hit_cnt = queued_paths + unique_crashes;
+  /* Two walking bytes. */
 
-		stage_finds[STAGE_FLIP8] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_FLIP8] += stage_max;
+  if (len < 2) goto skip_bitflip;
 
+  stage_name = "bitflip 16/8";
+  stage_short = "flip16";
+  stage_cur = 0;
+  stage_max = len - 1;
 
+  orig_hit_cnt = new_hit_cnt;
 
-		/* Two walking bytes. */
+  for (i = 0; i < len - 1; ++i) {
 
-		if (len < 2) goto skip_bitflip;
+    /* Let's consult the effector map... */
 
-		stage_name = "bitflip 16/8";
-		stage_short = "flip16";
-		stage_cur = 0;
-		stage_max = len - 1;
+    if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
 
+      --stage_max;
+      continue;
 
-		orig_hit_cnt = new_hit_cnt;
+    }
 
-		for (i = 0; i < len - 1; ++i) {
+    stage_cur_byte = i;
 
-			/* Let's consult the effector map... */
+    *(u16*)(out_buf + i) ^= 0xFFFF;
 
-			if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
-				--stage_max;
-				continue;
-			}
+    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+    ++stage_cur;
 
-			stage_cur_byte = i;
+    *(u16*)(out_buf + i) ^= 0xFFFF;
 
-			*(u16*)(out_buf + i) ^= 0xFFFF;
+  }
 
-			if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-			++stage_cur;
+  new_hit_cnt = queued_paths + unique_crashes;
 
-			*(u16*)(out_buf + i) ^= 0xFFFF;
+  stage_finds[STAGE_FLIP16] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_FLIP16] += stage_max;
 
+  if (len < 4) goto skip_bitflip;
 
-		}
+  /* Four walking bytes. */
 
-		new_hit_cnt = queued_paths + unique_crashes;
+  stage_name = "bitflip 32/8";
+  stage_short = "flip32";
+  stage_cur = 0;
+  stage_max = len - 3;
 
-		stage_finds[STAGE_FLIP16] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_FLIP16] += stage_max;
+  orig_hit_cnt = new_hit_cnt;
 
+  for (i = 0; i < len - 3; ++i) {
 
+    /* Let's consult the effector map... */
+    if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
+        !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
 
-		if (len < 4) goto skip_bitflip;
+      --stage_max;
+      continue;
 
-		/* Four walking bytes. */
+    }
 
-		stage_name = "bitflip 32/8";
-		stage_short = "flip32";
-		stage_cur = 0;
-		stage_max = len - 3;
+    stage_cur_byte = i;
 
+    *(u32*)(out_buf + i) ^= 0xFFFFFFFF;
+
+    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+    ++stage_cur;
+
+    *(u32*)(out_buf + i) ^= 0xFFFFFFFF;
+
+  }
+
+  new_hit_cnt = queued_paths + unique_crashes;
 
-		orig_hit_cnt = new_hit_cnt;
+  stage_finds[STAGE_FLIP32] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_FLIP32] += stage_max;
 
-		for (i = 0; i < len - 3; ++i) {
+skip_bitflip:
 
-			/* Let's consult the effector map... */
-			if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
-				!eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
-				--stage_max;
-				continue;
-			}
+  if (no_arith) goto skip_arith;
 
-			stage_cur_byte = i;
+  /**********************
+   * ARITHMETIC INC/DEC *
+   **********************/
 
-			*(u32*)(out_buf + i) ^= 0xFFFFFFFF;
+  /* 8-bit arithmetics. */
 
-			if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-			++stage_cur;
+  stage_name = "arith 8/8";
+  stage_short = "arith8";
+  stage_cur = 0;
+  stage_max = 2 * len * ARITH_MAX;
 
-			*(u32*)(out_buf + i) ^= 0xFFFFFFFF;
+  stage_val_type = STAGE_VAL_LE;
 
-		}
+  orig_hit_cnt = new_hit_cnt;
 
-		new_hit_cnt = queued_paths + unique_crashes;
+  for (i = 0; i < len; ++i) {
 
-		stage_finds[STAGE_FLIP32] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_FLIP32] += stage_max;
+    u8 orig = out_buf[i];
 
+    /* Let's consult the effector map... */
 
+    if (!eff_map[EFF_APOS(i)]) {
 
+      stage_max -= 2 * ARITH_MAX;
+      continue;
 
-	skip_bitflip:
+    }
 
-		if (no_arith) goto skip_arith;
+    stage_cur_byte = i;
 
-		/**********************
-		 * ARITHMETIC INC/DEC *
-		 **********************/
+    for (j = 1; j <= ARITH_MAX; ++j) {
 
-		 /* 8-bit arithmetics. */
+      u8 r = orig ^ (orig + j);
 
-		stage_name = "arith 8/8";
-		stage_short = "arith8";
-		stage_cur = 0;
-		stage_max = 2 * len * ARITH_MAX;
+      /* Do arithmetic operations only if the result couldn't be a product
+         of a bitflip. */
 
+      if (!could_be_bitflip(r)) {
 
-		stage_val_type = STAGE_VAL_LE;
+        stage_cur_val = j;
+        out_buf[i] = orig + j;
 
-		orig_hit_cnt = new_hit_cnt;
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
 
-		for (i = 0; i < len; ++i) {
+      } else
 
-			u8 orig = out_buf[i];
+        --stage_max;
 
-			/* Let's consult the effector map... */
+      r = orig ^ (orig - j);
 
-			if (!eff_map[EFF_APOS(i)]) {
-				stage_max -= 2 * ARITH_MAX;
-				continue;
-			}
+      if (!could_be_bitflip(r)) {
 
-			stage_cur_byte = i;
+        stage_cur_val = -j;
+        out_buf[i] = orig - j;
 
-			for (j = 1; j <= ARITH_MAX; ++j) {
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
 
-				u8 r = orig ^ (orig + j);
+      } else
 
-				/* Do arithmetic operations only if the result couldn't be a product
-				   of a bitflip. */
+        --stage_max;
 
-				if (!could_be_bitflip(r)) {
+      out_buf[i] = orig;
 
-					stage_cur_val = j;
-					out_buf[i] = orig + j;
+    }
 
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
+  }
 
-				} else --stage_max;
+  new_hit_cnt = queued_paths + unique_crashes;
 
-				r = orig ^ (orig - j);
+  stage_finds[STAGE_ARITH8] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_ARITH8] += stage_max;
 
-				if (!could_be_bitflip(r)) {
+  /* 16-bit arithmetics, both endians. */
 
-					stage_cur_val = -j;
-					out_buf[i] = orig - j;
+  if (len < 2) goto skip_arith;
 
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
+  stage_name = "arith 16/8";
+  stage_short = "arith16";
+  stage_cur = 0;
+  stage_max = 4 * (len - 1) * ARITH_MAX;
 
-				} else --stage_max;
+  orig_hit_cnt = new_hit_cnt;
 
-				out_buf[i] = orig;
+  for (i = 0; i < len - 1; ++i) {
 
-			}
+    u16 orig = *(u16*)(out_buf + i);
 
-		}
+    /* Let's consult the effector map... */
 
-		new_hit_cnt = queued_paths + unique_crashes;
+    if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
 
-		stage_finds[STAGE_ARITH8] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_ARITH8] += stage_max;
+      stage_max -= 4 * ARITH_MAX;
+      continue;
 
+    }
 
+    stage_cur_byte = i;
 
+    for (j = 1; j <= ARITH_MAX; ++j) {
 
-		/* 16-bit arithmetics, both endians. */
+      u16 r1 = orig ^ (orig + j), r2 = orig ^ (orig - j),
+          r3 = orig ^ SWAP16(SWAP16(orig) + j),
+          r4 = orig ^ SWAP16(SWAP16(orig) - j);
 
-		if (len < 2) goto skip_arith;
+      /* Try little endian addition and subtraction first. Do it only
+         if the operation would affect more than one byte (hence the
+         & 0xff overflow checks) and if it couldn't be a product of
+         a bitflip. */
 
-		stage_name = "arith 16/8";
-		stage_short = "arith16";
-		stage_cur = 0;
-		stage_max = 4 * (len - 1) * ARITH_MAX;
+      stage_val_type = STAGE_VAL_LE;
 
+      if ((orig & 0xff) + j > 0xff && !could_be_bitflip(r1)) {
 
-		orig_hit_cnt = new_hit_cnt;
+        stage_cur_val = j;
+        *(u16*)(out_buf + i) = orig + j;
 
-		for (i = 0; i < len - 1; ++i) {
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
 
-			u16 orig = *(u16*)(out_buf + i);
+      } else
 
-			/* Let's consult the effector map... */
+        --stage_max;
 
-			if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
-				stage_max -= 4 * ARITH_MAX;
-				continue;
-			}
+      if ((orig & 0xff) < j && !could_be_bitflip(r2)) {
 
-			stage_cur_byte = i;
+        stage_cur_val = -j;
+        *(u16*)(out_buf + i) = orig - j;
 
-			for (j = 1; j <= ARITH_MAX; ++j) {
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
 
-				u16 r1 = orig ^ (orig + j),
-					r2 = orig ^ (orig - j),
-					r3 = orig ^ SWAP16(SWAP16(orig) + j),
-					r4 = orig ^ SWAP16(SWAP16(orig) - j);
+      } else
 
-				/* Try little endian addition and subtraction first. Do it only
-				   if the operation would affect more than one byte (hence the
-				   & 0xff overflow checks) and if it couldn't be a product of
-				   a bitflip. */
+        --stage_max;
 
-				stage_val_type = STAGE_VAL_LE;
+      /* Big endian comes next. Same deal. */
 
-				if ((orig & 0xff) + j > 0xff && !could_be_bitflip(r1)) {
+      stage_val_type = STAGE_VAL_BE;
 
-					stage_cur_val = j;
-					*(u16*)(out_buf + i) = orig + j;
+      if ((orig >> 8) + j > 0xff && !could_be_bitflip(r3)) {
 
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
+        stage_cur_val = j;
+        *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) + j);
 
-				} else --stage_max;
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
 
-				if ((orig & 0xff) < j && !could_be_bitflip(r2)) {
+      } else
 
-					stage_cur_val = -j;
-					*(u16*)(out_buf + i) = orig - j;
+        --stage_max;
 
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
+      if ((orig >> 8) < j && !could_be_bitflip(r4)) {
 
-				} else --stage_max;
+        stage_cur_val = -j;
+        *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) - j);
 
-				/* Big endian comes next. Same deal. */
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
 
-				stage_val_type = STAGE_VAL_BE;
+      } else
 
+        --stage_max;
 
-				if ((orig >> 8) + j > 0xff && !could_be_bitflip(r3)) {
+      *(u16*)(out_buf + i) = orig;
 
-					stage_cur_val = j;
-					*(u16*)(out_buf + i) = SWAP16(SWAP16(orig) + j);
+    }
 
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
+  }
 
-				} else --stage_max;
+  new_hit_cnt = queued_paths + unique_crashes;
 
-				if ((orig >> 8) < j && !could_be_bitflip(r4)) {
+  stage_finds[STAGE_ARITH16] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_ARITH16] += stage_max;
 
-					stage_cur_val = -j;
-					*(u16*)(out_buf + i) = SWAP16(SWAP16(orig) - j);
+  /* 32-bit arithmetics, both endians. */
 
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
+  if (len < 4) goto skip_arith;
 
-				} else --stage_max;
+  stage_name = "arith 32/8";
+  stage_short = "arith32";
+  stage_cur = 0;
+  stage_max = 4 * (len - 3) * ARITH_MAX;
 
-				*(u16*)(out_buf + i) = orig;
+  orig_hit_cnt = new_hit_cnt;
 
-			}
+  for (i = 0; i < len - 3; ++i) {
 
-		}
+    u32 orig = *(u32*)(out_buf + i);
 
-		new_hit_cnt = queued_paths + unique_crashes;
+    /* Let's consult the effector map... */
 
-		stage_finds[STAGE_ARITH16] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_ARITH16] += stage_max;
+    if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
+        !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
 
+      stage_max -= 4 * ARITH_MAX;
+      continue;
 
+    }
 
-		/* 32-bit arithmetics, both endians. */
+    stage_cur_byte = i;
 
-		if (len < 4) goto skip_arith;
+    for (j = 1; j <= ARITH_MAX; ++j) {
 
-		stage_name = "arith 32/8";
-		stage_short = "arith32";
-		stage_cur = 0;
-		stage_max = 4 * (len - 3) * ARITH_MAX;
+      u32 r1 = orig ^ (orig + j), r2 = orig ^ (orig - j),
+          r3 = orig ^ SWAP32(SWAP32(orig) + j),
+          r4 = orig ^ SWAP32(SWAP32(orig) - j);
 
-		orig_hit_cnt = new_hit_cnt;
+      /* Little endian first. Same deal as with 16-bit: we only want to
+         try if the operation would have effect on more than two bytes. */
 
-		for (i = 0; i < len - 3; ++i) {
+      stage_val_type = STAGE_VAL_LE;
 
-			u32 orig = *(u32*)(out_buf + i);
+      if ((orig & 0xffff) + j > 0xffff && !could_be_bitflip(r1)) {
 
-			/* Let's consult the effector map... */
+        stage_cur_val = j;
+        *(u32*)(out_buf + i) = orig + j;
 
-			if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
-				!eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
-				stage_max -= 4 * ARITH_MAX;
-				continue;
-			}
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
 
-			stage_cur_byte = i;
+      } else
 
-			for (j = 1; j <= ARITH_MAX; ++j) {
+        --stage_max;
 
-				u32 r1 = orig ^ (orig + j),
-					r2 = orig ^ (orig - j),
-					r3 = orig ^ SWAP32(SWAP32(orig) + j),
-					r4 = orig ^ SWAP32(SWAP32(orig) - j);
+      if ((orig & 0xffff) < j && !could_be_bitflip(r2)) {
 
-				/* Little endian first. Same deal as with 16-bit: we only want to
-				   try if the operation would have effect on more than two bytes. */
+        stage_cur_val = -j;
+        *(u32*)(out_buf + i) = orig - j;
 
-				stage_val_type = STAGE_VAL_LE;
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
 
-				if ((orig & 0xffff) + j > 0xffff && !could_be_bitflip(r1)) {
+      } else
 
-					stage_cur_val = j;
-					*(u32*)(out_buf + i) = orig + j;
+        --stage_max;
 
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
+      /* Big endian next. */
 
-				} else --stage_max;
+      stage_val_type = STAGE_VAL_BE;
 
-				if ((orig & 0xffff) < j && !could_be_bitflip(r2)) {
+      if ((SWAP32(orig) & 0xffff) + j > 0xffff && !could_be_bitflip(r3)) {
 
-					stage_cur_val = -j;
-					*(u32*)(out_buf + i) = orig - j;
+        stage_cur_val = j;
+        *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) + j);
 
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
 
-				} else --stage_max;
+      } else
 
-				/* Big endian next. */
+        --stage_max;
 
-				stage_val_type = STAGE_VAL_BE;
+      if ((SWAP32(orig) & 0xffff) < j && !could_be_bitflip(r4)) {
 
-				if ((SWAP32(orig) & 0xffff) + j > 0xffff && !could_be_bitflip(r3)) {
+        stage_cur_val = -j;
+        *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) - j);
 
-					stage_cur_val = j;
-					*(u32*)(out_buf + i) = SWAP32(SWAP32(orig) + j);
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
 
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
+      } else
 
-				} else --stage_max;
+        --stage_max;
 
-				if ((SWAP32(orig) & 0xffff) < j && !could_be_bitflip(r4)) {
+      *(u32*)(out_buf + i) = orig;
 
-					stage_cur_val = -j;
-					*(u32*)(out_buf + i) = SWAP32(SWAP32(orig) - j);
+    }
 
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
+  }
 
-				} else --stage_max;
+  new_hit_cnt = queued_paths + unique_crashes;
 
-				*(u32*)(out_buf + i) = orig;
+  stage_finds[STAGE_ARITH32] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_ARITH32] += stage_max;
 
-			}
+skip_arith:
 
-		}
+  /**********************
+   * INTERESTING VALUES *
+   **********************/
 
-		new_hit_cnt = queued_paths + unique_crashes;
+  stage_name = "interest 8/8";
+  stage_short = "int8";
+  stage_cur = 0;
+  stage_max = len * sizeof(interesting_8);
 
-		stage_finds[STAGE_ARITH32] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_ARITH32] += stage_max;
+  stage_val_type = STAGE_VAL_LE;
 
+  orig_hit_cnt = new_hit_cnt;
 
+  /* Setting 8-bit integers. */
 
-	skip_arith:
+  for (i = 0; i < len; ++i) {
 
-		/**********************
-		 * INTERESTING VALUES *
-		 **********************/
+    u8 orig = out_buf[i];
 
-		stage_name = "interest 8/8";
-		stage_short = "int8";
-		stage_cur = 0;
-		stage_max = len * sizeof(interesting_8);
+    /* Let's consult the effector map... */
 
+    if (!eff_map[EFF_APOS(i)]) {
 
+      stage_max -= sizeof(interesting_8);
+      continue;
 
-		stage_val_type = STAGE_VAL_LE;
+    }
 
-		orig_hit_cnt = new_hit_cnt;
+    stage_cur_byte = i;
 
-		/* Setting 8-bit integers. */
+    for (j = 0; j < sizeof(interesting_8); ++j) {
 
-		for (i = 0; i < len; ++i) {
+      /* Skip if the value could be a product of bitflips or arithmetics. */
 
-			u8 orig = out_buf[i];
+      if (could_be_bitflip(orig ^ (u8)interesting_8[j]) ||
+          could_be_arith(orig, (u8)interesting_8[j], 1)) {
 
-			/* Let's consult the effector map... */
+        --stage_max;
+        continue;
 
-			if (!eff_map[EFF_APOS(i)]) {
-				stage_max -= sizeof(interesting_8);
-				continue;
-			}
+      }
 
-			stage_cur_byte = i;
+      stage_cur_val = interesting_8[j];
+      out_buf[i] = interesting_8[j];
 
-			for (j = 0; j < sizeof(interesting_8); ++j) {
+      if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
 
-				/* Skip if the value could be a product of bitflips or arithmetics. */
+      out_buf[i] = orig;
+      ++stage_cur;
 
-				if (could_be_bitflip(orig ^ (u8)interesting_8[j]) ||
-					could_be_arith(orig, (u8)interesting_8[j], 1)) {
-					--stage_max;
-					continue;
-				}
+    }
 
-				stage_cur_val = interesting_8[j];
-				out_buf[i] = interesting_8[j];
+  }
 
-				if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+  new_hit_cnt = queued_paths + unique_crashes;
 
-				out_buf[i] = orig;
-				++stage_cur;
+  stage_finds[STAGE_INTEREST8] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_INTEREST8] += stage_max;
 
-			}
+  /* Setting 16-bit integers, both endians. */
 
-		}
+  if (no_arith || len < 2) goto skip_interest;
 
-		new_hit_cnt = queued_paths + unique_crashes;
+  stage_name = "interest 16/8";
+  stage_short = "int16";
+  stage_cur = 0;
+  stage_max = 2 * (len - 1) * (sizeof(interesting_16) >> 1);
 
-		stage_finds[STAGE_INTEREST8] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_INTEREST8] += stage_max;
+  orig_hit_cnt = new_hit_cnt;
 
+  for (i = 0; i < len - 1; ++i) {
 
+    u16 orig = *(u16*)(out_buf + i);
 
-		/* Setting 16-bit integers, both endians. */
+    /* Let's consult the effector map... */
 
-		if (no_arith || len < 2) goto skip_interest;
+    if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
 
-		stage_name = "interest 16/8";
-		stage_short = "int16";
-		stage_cur = 0;
-		stage_max = 2 * (len - 1) * (sizeof(interesting_16) >> 1);
+      stage_max -= sizeof(interesting_16);
+      continue;
 
+    }
 
-		orig_hit_cnt = new_hit_cnt;
+    stage_cur_byte = i;
 
-		for (i = 0; i < len - 1; ++i) {
+    for (j = 0; j < sizeof(interesting_16) / 2; ++j) {
 
-			u16 orig = *(u16*)(out_buf + i);
+      stage_cur_val = interesting_16[j];
 
-			/* Let's consult the effector map... */
+      /* Skip if this could be a product of a bitflip, arithmetics,
+         or single-byte interesting value insertion. */
 
-			if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
-				stage_max -= sizeof(interesting_16);
-				continue;
-			}
+      if (!could_be_bitflip(orig ^ (u16)interesting_16[j]) &&
+          !could_be_arith(orig, (u16)interesting_16[j], 2) &&
+          !could_be_interest(orig, (u16)interesting_16[j], 2, 0)) {
 
-			stage_cur_byte = i;
+        stage_val_type = STAGE_VAL_LE;
 
-			for (j = 0; j < sizeof(interesting_16) / 2; ++j) {
+        *(u16*)(out_buf + i) = interesting_16[j];
 
-				stage_cur_val = interesting_16[j];
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
 
-				/* Skip if this could be a product of a bitflip, arithmetics,
-				   or single-byte interesting value insertion. */
+      } else
 
-				if (!could_be_bitflip(orig ^ (u16)interesting_16[j]) &&
-					!could_be_arith(orig, (u16)interesting_16[j], 2) &&
-					!could_be_interest(orig, (u16)interesting_16[j], 2, 0)) {
+        --stage_max;
 
-					stage_val_type = STAGE_VAL_LE;
+      if ((u16)interesting_16[j] != SWAP16(interesting_16[j]) &&
+          !could_be_bitflip(orig ^ SWAP16(interesting_16[j])) &&
+          !could_be_arith(orig, SWAP16(interesting_16[j]), 2) &&
+          !could_be_interest(orig, SWAP16(interesting_16[j]), 2, 1)) {
 
-					*(u16*)(out_buf + i) = interesting_16[j];
+        stage_val_type = STAGE_VAL_BE;
 
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
+        *(u16*)(out_buf + i) = SWAP16(interesting_16[j]);
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
 
-				} else --stage_max;
+      } else
 
-				if ((u16)interesting_16[j] != SWAP16(interesting_16[j]) &&
-					!could_be_bitflip(orig ^ SWAP16(interesting_16[j])) &&
-					!could_be_arith(orig, SWAP16(interesting_16[j]), 2) &&
-					!could_be_interest(orig, SWAP16(interesting_16[j]), 2, 1)) {
+        --stage_max;
 
-					stage_val_type = STAGE_VAL_BE;
+    }
 
-					*(u16*)(out_buf + i) = SWAP16(interesting_16[j]);
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
+    *(u16*)(out_buf + i) = orig;
 
-				} else --stage_max;
+  }
 
-			}
+  new_hit_cnt = queued_paths + unique_crashes;
 
-			*(u16*)(out_buf + i) = orig;
+  stage_finds[STAGE_INTEREST16] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_INTEREST16] += stage_max;
 
-		}
+  if (len < 4) goto skip_interest;
 
-		new_hit_cnt = queued_paths + unique_crashes;
+  /* Setting 32-bit integers, both endians. */
 
-		stage_finds[STAGE_INTEREST16] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_INTEREST16] += stage_max;
+  stage_name = "interest 32/8";
+  stage_short = "int32";
+  stage_cur = 0;
+  stage_max = 2 * (len - 3) * (sizeof(interesting_32) >> 2);
 
+  orig_hit_cnt = new_hit_cnt;
 
+  for (i = 0; i < len - 3; ++i) {
 
+    u32 orig = *(u32*)(out_buf + i);
 
-		if (len < 4) goto skip_interest;
+    /* Let's consult the effector map... */
 
-		/* Setting 32-bit integers, both endians. */
+    if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
+        !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
 
-		stage_name = "interest 32/8";
-		stage_short = "int32";
-		stage_cur = 0;
-		stage_max = 2 * (len - 3) * (sizeof(interesting_32) >> 2);
+      stage_max -= sizeof(interesting_32) >> 1;
+      continue;
 
+    }
 
-		orig_hit_cnt = new_hit_cnt;
+    stage_cur_byte = i;
 
-		for (i = 0; i < len - 3; ++i) {
+    for (j = 0; j < sizeof(interesting_32) / 4; ++j) {
 
-			u32 orig = *(u32*)(out_buf + i);
+      stage_cur_val = interesting_32[j];
 
-			/* Let's consult the effector map... */
+      /* Skip if this could be a product of a bitflip, arithmetics,
+         or word interesting value insertion. */
 
-			if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
-				!eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
-				stage_max -= sizeof(interesting_32) >> 1;
-				continue;
-			}
+      if (!could_be_bitflip(orig ^ (u32)interesting_32[j]) &&
+          !could_be_arith(orig, interesting_32[j], 4) &&
+          !could_be_interest(orig, interesting_32[j], 4, 0)) {
 
-			stage_cur_byte = i;
+        stage_val_type = STAGE_VAL_LE;
 
-			for (j = 0; j < sizeof(interesting_32) / 4; ++j) {
+        *(u32*)(out_buf + i) = interesting_32[j];
 
-				stage_cur_val = interesting_32[j];
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
 
-				/* Skip if this could be a product of a bitflip, arithmetics,
-				   or word interesting value insertion. */
+      } else
 
-				if (!could_be_bitflip(orig ^ (u32)interesting_32[j]) &&
-					!could_be_arith(orig, interesting_32[j], 4) &&
-					!could_be_interest(orig, interesting_32[j], 4, 0)) {
+        --stage_max;
 
-					stage_val_type = STAGE_VAL_LE;
+      if ((u32)interesting_32[j] != SWAP32(interesting_32[j]) &&
+          !could_be_bitflip(orig ^ SWAP32(interesting_32[j])) &&
+          !could_be_arith(orig, SWAP32(interesting_32[j]), 4) &&
+          !could_be_interest(orig, SWAP32(interesting_32[j]), 4, 1)) {
 
-					*(u32*)(out_buf + i) = interesting_32[j];
+        stage_val_type = STAGE_VAL_BE;
 
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
+        *(u32*)(out_buf + i) = SWAP32(interesting_32[j]);
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
 
-				} else --stage_max;
+      } else
 
-				if ((u32)interesting_32[j] != SWAP32(interesting_32[j]) &&
-					!could_be_bitflip(orig ^ SWAP32(interesting_32[j])) &&
-					!could_be_arith(orig, SWAP32(interesting_32[j]), 4) &&
-					!could_be_interest(orig, SWAP32(interesting_32[j]), 4, 1)) {
+        --stage_max;
 
-					stage_val_type = STAGE_VAL_BE;
+    }
 
-					*(u32*)(out_buf + i) = SWAP32(interesting_32[j]);
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
+    *(u32*)(out_buf + i) = orig;
 
-				} else --stage_max;
+  }
 
-			}
+  new_hit_cnt = queued_paths + unique_crashes;
 
-			*(u32*)(out_buf + i) = orig;
+  stage_finds[STAGE_INTEREST32] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_INTEREST32] += stage_max;
 
-		}
+skip_interest:
 
-		new_hit_cnt = queued_paths + unique_crashes;
+  /********************
+   * DICTIONARY STUFF *
+   ********************/
 
-		stage_finds[STAGE_INTEREST32] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_INTEREST32] += stage_max;
+  if (!extras_cnt) goto skip_user_extras;
 
+  /* Overwrite with user-supplied extras. */
 
+  stage_name = "user extras (over)";
+  stage_short = "ext_UO";
+  stage_cur = 0;
+  stage_max = extras_cnt * len;
 
-	skip_interest:
+  stage_val_type = STAGE_VAL_NONE;
 
-		/********************
-		 * DICTIONARY STUFF *
-		 ********************/
+  orig_hit_cnt = new_hit_cnt;
 
-		if (!extras_cnt) goto skip_user_extras;
+  for (i = 0; i < len; ++i) {
 
-		/* Overwrite with user-supplied extras. */
+    u32 last_len = 0;
 
-		stage_name = "user extras (over)";
-		stage_short = "ext_UO";
-		stage_cur = 0;
-		stage_max = extras_cnt * len;
+    stage_cur_byte = i;
 
+    /* Extras are sorted by size, from smallest to largest. This means
+       that we don't have to worry about restoring the buffer in
+       between writes at a particular offset determined by the outer
+       loop. */
 
-		stage_val_type = STAGE_VAL_NONE;
+    for (j = 0; j < extras_cnt; ++j) {
 
-		orig_hit_cnt = new_hit_cnt;
+      /* Skip extras probabilistically if extras_cnt > MAX_DET_EXTRAS. Also
+         skip them if there's no room to insert the payload, if the token
+         is redundant, or if its entire span has no bytes set in the effector
+         map. */
 
-		for (i = 0; i < len; ++i) {
+      if ((extras_cnt > MAX_DET_EXTRAS && UR(extras_cnt) >= MAX_DET_EXTRAS) ||
+          extras[j].len > len - i ||
+          !memcmp(extras[j].data, out_buf + i, extras[j].len) ||
+          !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, extras[j].len))) {
 
-			u32 last_len = 0;
+        --stage_max;
+        continue;
 
-			stage_cur_byte = i;
+      }
 
-			/* Extras are sorted by size, from smallest to largest. This means
-			   that we don't have to worry about restoring the buffer in
-			   between writes at a particular offset determined by the outer
-			   loop. */
+      last_len = extras[j].len;
+      memcpy(out_buf + i, extras[j].data, last_len);
 
-			for (j = 0; j < extras_cnt; ++j) {
+      if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
 
-				/* Skip extras probabilistically if extras_cnt > MAX_DET_EXTRAS. Also
-				   skip them if there's no room to insert the payload, if the token
-				   is redundant, or if its entire span has no bytes set in the effector
-				   map. */
+      ++stage_cur;
 
-				if ((extras_cnt > MAX_DET_EXTRAS && UR(extras_cnt) >= MAX_DET_EXTRAS) ||
-					extras[j].len > len - i ||
-					!memcmp(extras[j].data, out_buf + i, extras[j].len) ||
-					!memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, extras[j].len))) {
+    }
 
-					--stage_max;
-					continue;
+    /* Restore all the clobbered memory. */
+    memcpy(out_buf + i, in_buf + i, last_len);
 
-				}
+  }
 
-				last_len = extras[j].len;
-				memcpy(out_buf + i, extras[j].data, last_len);
+  new_hit_cnt = queued_paths + unique_crashes;
 
-				if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+  stage_finds[STAGE_EXTRAS_UO] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_EXTRAS_UO] += stage_max;
 
-				++stage_cur;
+  /* Insertion of user-supplied extras. */
 
-			}
+  stage_name = "user extras (insert)";
+  stage_short = "ext_UI";
+  stage_cur = 0;
+  stage_max = extras_cnt * len;
 
-			/* Restore all the clobbered memory. */
-			memcpy(out_buf + i, in_buf + i, last_len);
+  orig_hit_cnt = new_hit_cnt;
 
-		}
+  ex_tmp = ck_alloc(len + MAX_DICT_FILE);
 
-		new_hit_cnt = queued_paths + unique_crashes;
+  for (i = 0; i <= len; ++i) {
 
-		stage_finds[STAGE_EXTRAS_UO] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_EXTRAS_UO] += stage_max;
+    stage_cur_byte = i;
 
-		/* Insertion of user-supplied extras. */
+    for (j = 0; j < extras_cnt; ++j) {
 
-		stage_name = "user extras (insert)";
-		stage_short = "ext_UI";
-		stage_cur = 0;
-		stage_max = extras_cnt * len;
+      if (len + extras[j].len > MAX_FILE) {
 
+        --stage_max;
+        continue;
 
+      }
 
+      /* Insert token */
+      memcpy(ex_tmp + i, extras[j].data, extras[j].len);
 
-		orig_hit_cnt = new_hit_cnt;
+      /* Copy tail */
+      memcpy(ex_tmp + i + extras[j].len, out_buf + i, len - i);
 
-		ex_tmp = ck_alloc(len + MAX_DICT_FILE);
+      if (common_fuzz_stuff(argv, ex_tmp, len + extras[j].len)) {
 
-		for (i = 0; i <= len; ++i) {
+        ck_free(ex_tmp);
+        goto abandon_entry;
 
-			stage_cur_byte = i;
+      }
 
-			for (j = 0; j < extras_cnt; ++j) {
+      ++stage_cur;
 
-				if (len + extras[j].len > MAX_FILE) {
-					--stage_max;
-					continue;
-				}
+    }
 
-				/* Insert token */
-				memcpy(ex_tmp + i, extras[j].data, extras[j].len);
+    /* Copy head */
+    ex_tmp[i] = out_buf[i];
 
-				/* Copy tail */
-				memcpy(ex_tmp + i + extras[j].len, out_buf + i, len - i);
+  }
 
-				if (common_fuzz_stuff(argv, ex_tmp, len + extras[j].len)) {
-					ck_free(ex_tmp);
-					goto abandon_entry;
-				}
+  ck_free(ex_tmp);
 
-				++stage_cur;
+  new_hit_cnt = queued_paths + unique_crashes;
 
-			}
+  stage_finds[STAGE_EXTRAS_UI] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_EXTRAS_UI] += stage_max;
 
-			/* Copy head */
-			ex_tmp[i] = out_buf[i];
+skip_user_extras:
 
-		}
+  if (!a_extras_cnt) goto skip_extras;
 
-		ck_free(ex_tmp);
+  stage_name = "auto extras (over)";
+  stage_short = "ext_AO";
+  stage_cur = 0;
+  stage_max = MIN(a_extras_cnt, USE_AUTO_EXTRAS) * len;
 
-		new_hit_cnt = queued_paths + unique_crashes;
+  stage_val_type = STAGE_VAL_NONE;
 
-		stage_finds[STAGE_EXTRAS_UI] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_EXTRAS_UI] += stage_max;
+  orig_hit_cnt = new_hit_cnt;
 
-	skip_user_extras:
+  for (i = 0; i < len; ++i) {
 
-		if (!a_extras_cnt) goto skip_extras;
+    u32 last_len = 0;
 
-		stage_name = "auto extras (over)";
-		stage_short = "ext_AO";
-		stage_cur = 0;
-		stage_max = MIN(a_extras_cnt, USE_AUTO_EXTRAS) * len;
+    stage_cur_byte = i;
 
+    for (j = 0; j < MIN(a_extras_cnt, USE_AUTO_EXTRAS); ++j) {
 
-		stage_val_type = STAGE_VAL_NONE;
+      /* See the comment in the earlier code; extras are sorted by size. */
 
-		orig_hit_cnt = new_hit_cnt;
+      if (a_extras[j].len > len - i ||
+          !memcmp(a_extras[j].data, out_buf + i, a_extras[j].len) ||
+          !memchr(eff_map + EFF_APOS(i), 1,
+                  EFF_SPAN_ALEN(i, a_extras[j].len))) {
 
-		for (i = 0; i < len; ++i) {
+        --stage_max;
+        continue;
 
-			u32 last_len = 0;
+      }
 
-			stage_cur_byte = i;
+      last_len = a_extras[j].len;
+      memcpy(out_buf + i, a_extras[j].data, last_len);
 
-			for (j = 0; j < MIN(a_extras_cnt, USE_AUTO_EXTRAS); ++j) {
+      if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
 
-				/* See the comment in the earlier code; extras are sorted by size. */
+      ++stage_cur;
 
-				if (a_extras[j].len > len - i ||
-					!memcmp(a_extras[j].data, out_buf + i, a_extras[j].len) ||
-					!memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, a_extras[j].len))) {
+    }
 
-					--stage_max;
-					continue;
+    /* Restore all the clobbered memory. */
+    memcpy(out_buf + i, in_buf + i, last_len);
 
-				}
+  }
 
-				last_len = a_extras[j].len;
-				memcpy(out_buf + i, a_extras[j].data, last_len);
+  new_hit_cnt = queued_paths + unique_crashes;
 
-				if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+  stage_finds[STAGE_EXTRAS_AO] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_EXTRAS_AO] += stage_max;
 
-				++stage_cur;
+skip_extras:
 
-			}
+  /* If we made this to here without jumping to havoc_stage or abandon_entry,
+     we're properly done with deterministic steps and can mark it as such
+     in the .state/ directory. */
 
-			/* Restore all the clobbered memory. */
-			memcpy(out_buf + i, in_buf + i, last_len);
+  if (!queue_cur->passed_det) mark_as_det_done(queue_cur);
 
-		}
+  /****************
+   * RANDOM HAVOC *
+   ****************/
 
-		new_hit_cnt = queued_paths + unique_crashes;
+havoc_stage:
+pacemaker_fuzzing:
 
-		stage_finds[STAGE_EXTRAS_AO] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_EXTRAS_AO] += stage_max;
+  stage_cur_byte = -1;
 
-	skip_extras:
+  /* The havoc stage mutation code is also invoked when splicing files; if the
+     splice_cycle variable is set, generate different descriptions and such. */
 
-		/* If we made this to here without jumping to havoc_stage or abandon_entry,
-		   we're properly done with deterministic steps and can mark it as such
-		   in the .state/ directory. */
+  if (!splice_cycle) {
 
-		if (!queue_cur->passed_det) mark_as_det_done(queue_cur);
+    stage_name = "MOpt-havoc";
+    stage_short = "MOpt_havoc";
+    stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * perf_score /
+                havoc_div / 100;
 
-		/****************
-		 * RANDOM HAVOC *
-		 ****************/
+  } else {
 
-	havoc_stage:
-	pacemaker_fuzzing:
+    static u8 tmp[32];
 
+    perf_score = orig_perf;
 
-		stage_cur_byte = -1;
+    sprintf(tmp, "MOpt-core-splice %u", splice_cycle);
+    stage_name = tmp;
+    stage_short = "MOpt_core_splice";
+    stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100;
 
-		/* The havoc stage mutation code is also invoked when splicing files; if the
-		   splice_cycle variable is set, generate different descriptions and such. */
+  }
 
-		if (!splice_cycle) {
+  s32 temp_len_puppet;
+  cur_ms_lv = get_cur_time();
 
-			stage_name = "MOpt-havoc";
-			stage_short = "MOpt_havoc";
-			stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) *
-				perf_score / havoc_div / 100;
+  // for (; swarm_now < swarm_num; ++swarm_now)
+  {
 
-		} else {
+    if (key_puppet == 1) {
 
-			static u8 tmp[32];
+      if (unlikely(orig_hit_cnt_puppet == 0)) {
 
-			perf_score = orig_perf;
+        orig_hit_cnt_puppet = queued_paths + unique_crashes;
+        last_limit_time_start = get_cur_time();
+        SPLICE_CYCLES_puppet =
+            (UR(SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) +
+             SPLICE_CYCLES_puppet_low);
 
-			sprintf(tmp, "MOpt-core-splice %u", splice_cycle);
-			stage_name = tmp;
-			stage_short = "MOpt_core_splice";
-			stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100;
+      }
 
-		}
+    }
 
-		s32 temp_len_puppet;
-		cur_ms_lv = get_cur_time();
+    {
 
-		//for (; swarm_now < swarm_num; ++swarm_now)
-		{
-			if (key_puppet == 1) {
-				if (unlikely(orig_hit_cnt_puppet == 0)) {
-					orig_hit_cnt_puppet = queued_paths + unique_crashes;
-					last_limit_time_start = get_cur_time();
-					SPLICE_CYCLES_puppet = (UR(SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) + SPLICE_CYCLES_puppet_low);
-				}
-			}
-			{
 #ifndef IGNORE_FINDS
-			havoc_stage_puppet:
+    havoc_stage_puppet:
 #endif
 
-				stage_cur_byte = -1;
-
-				/* The havoc stage mutation code is also invoked when splicing files; if the
-				   splice_cycle variable is set, generate different descriptions and such. */
-
-				if (!splice_cycle) {
-					stage_name = "MOpt core avoc";
-					stage_short = "MOpt_core_havoc";
-					stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) *
-						perf_score / havoc_div / 100;
-				} else {
-					static u8 tmp[32];
-					perf_score = orig_perf;
-					sprintf(tmp, "MOpt core splice %u", splice_cycle);
-					stage_name = tmp;
-					stage_short = "MOpt_core_splice";
-					stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100;
-				}
-
-				if (stage_max < HAVOC_MIN) stage_max = HAVOC_MIN;
-				temp_len = len;
-				orig_hit_cnt = queued_paths + unique_crashes;
-				havoc_queued = queued_paths;
-
-				for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
-
-					u32 use_stacking = 1 << (1 + UR(HAVOC_STACK_POW2));
-					stage_cur_val = use_stacking;
-
-					for (i = 0; i < operator_num; ++i) {
-						core_operator_cycles_puppet_v3[i] = core_operator_cycles_puppet_v2[i];
-					}
-
-					for (i = 0; i < use_stacking; ++i) {
-
-						switch (select_algorithm()) {
-
-						case 0:
-							/* Flip a single bit somewhere. Spooky! */
-							FLIP_BIT(out_buf, UR(temp_len << 3));
-							core_operator_cycles_puppet_v2[STAGE_FLIP1] += 1;
-							break;
-
-
-						case 1:
-							if (temp_len < 2) break;
-							temp_len_puppet = UR(temp_len << 3);
-							FLIP_BIT(out_buf, temp_len_puppet);
-							FLIP_BIT(out_buf, temp_len_puppet + 1);
-							core_operator_cycles_puppet_v2[STAGE_FLIP2] += 1;
-							break;
-
-						case 2:
-							if (temp_len < 2) break;
-							temp_len_puppet = UR(temp_len << 3);
-							FLIP_BIT(out_buf, temp_len_puppet);
-							FLIP_BIT(out_buf, temp_len_puppet + 1);
-							FLIP_BIT(out_buf, temp_len_puppet + 2);
-							FLIP_BIT(out_buf, temp_len_puppet + 3);
-							core_operator_cycles_puppet_v2[STAGE_FLIP4] += 1;
-							break;
-
-						case 3:
-							if (temp_len < 4) break;
-							out_buf[UR(temp_len)] ^= 0xFF;
-							core_operator_cycles_puppet_v2[STAGE_FLIP8] += 1;
-							break;
-
-						case 4:
-							if (temp_len < 8) break;
-							*(u16*)(out_buf + UR(temp_len - 1)) ^= 0xFFFF;
-							core_operator_cycles_puppet_v2[STAGE_FLIP16] += 1;
-							break;
-
-						case 5:
-							if (temp_len < 8) break;
-							*(u32*)(out_buf + UR(temp_len - 3)) ^= 0xFFFFFFFF;
-							core_operator_cycles_puppet_v2[STAGE_FLIP32] += 1;
-							break;
-
-						case 6:
-							out_buf[UR(temp_len)] -= 1 + UR(ARITH_MAX);
-							out_buf[UR(temp_len)] += 1 + UR(ARITH_MAX);
-							core_operator_cycles_puppet_v2[STAGE_ARITH8] += 1;
-							break;
-
-						case 7:
-							/* Randomly subtract from word, random endian. */
-							if (temp_len < 8) break;
-							if (UR(2)) {
-								u32 pos = UR(temp_len - 1);
-								*(u16*)(out_buf + pos) -= 1 + UR(ARITH_MAX);
-							} else {
-								u32 pos = UR(temp_len - 1);
-								u16 num = 1 + UR(ARITH_MAX);
-								*(u16*)(out_buf + pos) =
-									SWAP16(SWAP16(*(u16*)(out_buf + pos)) - num);
-							}
-							/* Randomly add to word, random endian. */
-							if (UR(2)) {
-								u32 pos = UR(temp_len - 1);
-								*(u16*)(out_buf + pos) += 1 + UR(ARITH_MAX);
-							} else {
-								u32 pos = UR(temp_len - 1);
-								u16 num = 1 + UR(ARITH_MAX);
-								*(u16*)(out_buf + pos) =
-									SWAP16(SWAP16(*(u16*)(out_buf + pos)) + num);
-							}
-							core_operator_cycles_puppet_v2[STAGE_ARITH16] += 1;
-							break;
-
-
-						case 8:
-							/* Randomly subtract from dword, random endian. */
-							if (temp_len < 8) break;
-							if (UR(2)) {
-								u32 pos = UR(temp_len - 3);
-								*(u32*)(out_buf + pos) -= 1 + UR(ARITH_MAX);
-							} else {
-								u32 pos = UR(temp_len - 3);
-								u32 num = 1 + UR(ARITH_MAX);
-								*(u32*)(out_buf + pos) =
-									SWAP32(SWAP32(*(u32*)(out_buf + pos)) - num);
-							}
-							/* Randomly add to dword, random endian. */
-							if (UR(2)) {
-								u32 pos = UR(temp_len - 3);
-								*(u32*)(out_buf + pos) += 1 + UR(ARITH_MAX);
-							} else {
-								u32 pos = UR(temp_len - 3);
-								u32 num = 1 + UR(ARITH_MAX);
-								*(u32*)(out_buf + pos) =
-									SWAP32(SWAP32(*(u32*)(out_buf + pos)) + num);
-							}
-							core_operator_cycles_puppet_v2[STAGE_ARITH32] += 1;
-							break;
-
-
-						case 9:
-							/* Set byte to interesting value. */
-							if (temp_len < 4) break;
-							out_buf[UR(temp_len)] = interesting_8[UR(sizeof(interesting_8))];
-							core_operator_cycles_puppet_v2[STAGE_INTEREST8] += 1;
-							break;
-
-						case 10:
-							/* Set word to interesting value, randomly choosing endian. */
-							if (temp_len < 8) break;
-							if (UR(2)) {
-								*(u16*)(out_buf + UR(temp_len - 1)) =
-									interesting_16[UR(sizeof(interesting_16) >> 1)];
-							} else {
-								*(u16*)(out_buf + UR(temp_len - 1)) = SWAP16(
-									interesting_16[UR(sizeof(interesting_16) >> 1)]);
-							}
-							core_operator_cycles_puppet_v2[STAGE_INTEREST16] += 1;
-							break;
-
-
-						case 11:
-							/* Set dword to interesting value, randomly choosing endian. */
-
-							if (temp_len < 8) break;
-
-							if (UR(2)) {
-								*(u32*)(out_buf + UR(temp_len - 3)) =
-									interesting_32[UR(sizeof(interesting_32) >> 2)];
-							} else {
-								*(u32*)(out_buf + UR(temp_len - 3)) = SWAP32(
-									interesting_32[UR(sizeof(interesting_32) >> 2)]);
-							}
-							core_operator_cycles_puppet_v2[STAGE_INTEREST32] += 1;
-							break;
+      stage_cur_byte = -1;
+
+      /* The havoc stage mutation code is also invoked when splicing files; if
+         the splice_cycle variable is set, generate different descriptions and
+         such. */
+
+      if (!splice_cycle) {
+
+        stage_name = "MOpt core avoc";
+        stage_short = "MOpt_core_havoc";
+        stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) *
+                    perf_score / havoc_div / 100;
+
+      } else {
+
+        static u8 tmp[32];
+        perf_score = orig_perf;
+        sprintf(tmp, "MOpt core splice %u", splice_cycle);
+        stage_name = tmp;
+        stage_short = "MOpt_core_splice";
+        stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100;
+
+      }
+
+      if (stage_max < HAVOC_MIN) stage_max = HAVOC_MIN;
+      temp_len = len;
+      orig_hit_cnt = queued_paths + unique_crashes;
+      havoc_queued = queued_paths;
+
+      for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+
+        u32 use_stacking = 1 << (1 + UR(HAVOC_STACK_POW2));
+        stage_cur_val = use_stacking;
+
+        for (i = 0; i < operator_num; ++i) {
+
+          core_operator_cycles_puppet_v3[i] = core_operator_cycles_puppet_v2[i];
+
+        }
+
+        for (i = 0; i < use_stacking; ++i) {
+
+          switch (select_algorithm()) {
+
+            case 0:
+              /* Flip a single bit somewhere. Spooky! */
+              FLIP_BIT(out_buf, UR(temp_len << 3));
+              core_operator_cycles_puppet_v2[STAGE_FLIP1] += 1;
+              break;
+
+            case 1:
+              if (temp_len < 2) break;
+              temp_len_puppet = UR(temp_len << 3);
+              FLIP_BIT(out_buf, temp_len_puppet);
+              FLIP_BIT(out_buf, temp_len_puppet + 1);
+              core_operator_cycles_puppet_v2[STAGE_FLIP2] += 1;
+              break;
+
+            case 2:
+              if (temp_len < 2) break;
+              temp_len_puppet = UR(temp_len << 3);
+              FLIP_BIT(out_buf, temp_len_puppet);
+              FLIP_BIT(out_buf, temp_len_puppet + 1);
+              FLIP_BIT(out_buf, temp_len_puppet + 2);
+              FLIP_BIT(out_buf, temp_len_puppet + 3);
+              core_operator_cycles_puppet_v2[STAGE_FLIP4] += 1;
+              break;
+
+            case 3:
+              if (temp_len < 4) break;
+              out_buf[UR(temp_len)] ^= 0xFF;
+              core_operator_cycles_puppet_v2[STAGE_FLIP8] += 1;
+              break;
+
+            case 4:
+              if (temp_len < 8) break;
+              *(u16*)(out_buf + UR(temp_len - 1)) ^= 0xFFFF;
+              core_operator_cycles_puppet_v2[STAGE_FLIP16] += 1;
+              break;
+
+            case 5:
+              if (temp_len < 8) break;
+              *(u32*)(out_buf + UR(temp_len - 3)) ^= 0xFFFFFFFF;
+              core_operator_cycles_puppet_v2[STAGE_FLIP32] += 1;
+              break;
+
+            case 6:
+              out_buf[UR(temp_len)] -= 1 + UR(ARITH_MAX);
+              out_buf[UR(temp_len)] += 1 + UR(ARITH_MAX);
+              core_operator_cycles_puppet_v2[STAGE_ARITH8] += 1;
+              break;
+
+            case 7:
+              /* Randomly subtract from word, random endian. */
+              if (temp_len < 8) break;
+              if (UR(2)) {
+
+                u32 pos = UR(temp_len - 1);
+                *(u16*)(out_buf + pos) -= 1 + UR(ARITH_MAX);
+
+              } else {
+
+                u32 pos = UR(temp_len - 1);
+                u16 num = 1 + UR(ARITH_MAX);
+                *(u16*)(out_buf + pos) =
+                    SWAP16(SWAP16(*(u16*)(out_buf + pos)) - num);
 
+              }
 
-						case 12:
+              /* Randomly add to word, random endian. */
+              if (UR(2)) {
 
-							/* Just set a random byte to a random value. Because,
-							   why not. We use XOR with 1-255 to eliminate the
-							   possibility of a no-op. */
+                u32 pos = UR(temp_len - 1);
+                *(u16*)(out_buf + pos) += 1 + UR(ARITH_MAX);
 
-							out_buf[UR(temp_len)] ^= 1 + UR(255);
-							core_operator_cycles_puppet_v2[STAGE_RANDOMBYTE] += 1;
-							break;
+              } else {
 
+                u32 pos = UR(temp_len - 1);
+                u16 num = 1 + UR(ARITH_MAX);
+                *(u16*)(out_buf + pos) =
+                    SWAP16(SWAP16(*(u16*)(out_buf + pos)) + num);
 
-						case 13: {
+              }
 
-							/* Delete bytes. We're making this a bit more likely
-							   than insertion (the next option) in hopes of keeping
-							   files reasonably small. */
+              core_operator_cycles_puppet_v2[STAGE_ARITH16] += 1;
+              break;
 
-							u32 del_from, del_len;
+            case 8:
+              /* Randomly subtract from dword, random endian. */
+              if (temp_len < 8) break;
+              if (UR(2)) {
 
-							if (temp_len < 2) break;
+                u32 pos = UR(temp_len - 3);
+                *(u32*)(out_buf + pos) -= 1 + UR(ARITH_MAX);
 
-							/* Don't delete too much. */
+              } else {
 
-							del_len = choose_block_len(temp_len - 1);
+                u32 pos = UR(temp_len - 3);
+                u32 num = 1 + UR(ARITH_MAX);
+                *(u32*)(out_buf + pos) =
+                    SWAP32(SWAP32(*(u32*)(out_buf + pos)) - num);
 
-							del_from = UR(temp_len - del_len + 1);
+              }
 
-							memmove(out_buf + del_from, out_buf + del_from + del_len,
-								temp_len - del_from - del_len);
+              /* Randomly add to dword, random endian. */
+              if (UR(2)) {
 
-							temp_len -= del_len;
-							core_operator_cycles_puppet_v2[STAGE_DELETEBYTE] += 1;
-							break;
+                u32 pos = UR(temp_len - 3);
+                *(u32*)(out_buf + pos) += 1 + UR(ARITH_MAX);
 
-						}
+              } else {
 
-						case 14:
+                u32 pos = UR(temp_len - 3);
+                u32 num = 1 + UR(ARITH_MAX);
+                *(u32*)(out_buf + pos) =
+                    SWAP32(SWAP32(*(u32*)(out_buf + pos)) + num);
 
-							if (temp_len + HAVOC_BLK_XL < MAX_FILE) {
+              }
 
-								/* Clone bytes (75%) or insert a block of constant bytes (25%). */
+              core_operator_cycles_puppet_v2[STAGE_ARITH32] += 1;
+              break;
 
-								u8  actually_clone = UR(4);
-								u32 clone_from, clone_to, clone_len;
-								u8* new_buf;
+            case 9:
+              /* Set byte to interesting value. */
+              if (temp_len < 4) break;
+              out_buf[UR(temp_len)] = interesting_8[UR(sizeof(interesting_8))];
+              core_operator_cycles_puppet_v2[STAGE_INTEREST8] += 1;
+              break;
 
-								if (actually_clone) {
+            case 10:
+              /* Set word to interesting value, randomly choosing endian. */
+              if (temp_len < 8) break;
+              if (UR(2)) {
 
-									clone_len = choose_block_len(temp_len);
-									clone_from = UR(temp_len - clone_len + 1);
+                *(u16*)(out_buf + UR(temp_len - 1)) =
+                    interesting_16[UR(sizeof(interesting_16) >> 1)];
 
-								} else {
+              } else {
 
-									clone_len = choose_block_len(HAVOC_BLK_XL);
-									clone_from = 0;
+                *(u16*)(out_buf + UR(temp_len - 1)) =
+                    SWAP16(interesting_16[UR(sizeof(interesting_16) >> 1)]);
 
-								}
+              }
 
-								clone_to = UR(temp_len);
+              core_operator_cycles_puppet_v2[STAGE_INTEREST16] += 1;
+              break;
 
-								new_buf = ck_alloc_nozero(temp_len + clone_len);
+            case 11:
+              /* Set dword to interesting value, randomly choosing endian. */
 
-								/* Head */
+              if (temp_len < 8) break;
 
-								memcpy(new_buf, out_buf, clone_to);
+              if (UR(2)) {
 
-								/* Inserted part */
+                *(u32*)(out_buf + UR(temp_len - 3)) =
+                    interesting_32[UR(sizeof(interesting_32) >> 2)];
 
-								if (actually_clone)
-									memcpy(new_buf + clone_to, out_buf + clone_from, clone_len);
-								else
-									memset(new_buf + clone_to,
-										UR(2) ? UR(256) : out_buf[UR(temp_len)], clone_len);
+              } else {
 
-								/* Tail */
-								memcpy(new_buf + clone_to + clone_len, out_buf + clone_to,
-									temp_len - clone_to);
+                *(u32*)(out_buf + UR(temp_len - 3)) =
+                    SWAP32(interesting_32[UR(sizeof(interesting_32) >> 2)]);
 
-								ck_free(out_buf);
-								out_buf = new_buf;
-								temp_len += clone_len;
-								core_operator_cycles_puppet_v2[STAGE_Clone75] += 1;
-							}
+              }
 
-							break;
+              core_operator_cycles_puppet_v2[STAGE_INTEREST32] += 1;
+              break;
 
-						case 15: {
+            case 12:
 
-							/* Overwrite bytes with a randomly selected chunk (75%) or fixed
-							   bytes (25%). */
+              /* Just set a random byte to a random value. Because,
+                 why not. We use XOR with 1-255 to eliminate the
+                 possibility of a no-op. */
 
-							u32 copy_from, copy_to, copy_len;
+              out_buf[UR(temp_len)] ^= 1 + UR(255);
+              core_operator_cycles_puppet_v2[STAGE_RANDOMBYTE] += 1;
+              break;
 
-							if (temp_len < 2) break;
+            case 13: {
 
-							copy_len = choose_block_len(temp_len - 1);
+              /* Delete bytes. We're making this a bit more likely
+                 than insertion (the next option) in hopes of keeping
+                 files reasonably small. */
 
-							copy_from = UR(temp_len - copy_len + 1);
-							copy_to = UR(temp_len - copy_len + 1);
+              u32 del_from, del_len;
 
-							if (UR(4)) {
+              if (temp_len < 2) break;
 
-								if (copy_from != copy_to)
-									memmove(out_buf + copy_to, out_buf + copy_from, copy_len);
+              /* Don't delete too much. */
 
-							}
-							else memset(out_buf + copy_to,
-								UR(2) ? UR(256) : out_buf[UR(temp_len)], copy_len);
-							core_operator_cycles_puppet_v2[STAGE_OverWrite75] += 1;
-							break;
+              del_len = choose_block_len(temp_len - 1);
 
-						}
+              del_from = UR(temp_len - del_len + 1);
 
+              memmove(out_buf + del_from, out_buf + del_from + del_len,
+                      temp_len - del_from - del_len);
 
-						}
+              temp_len -= del_len;
+              core_operator_cycles_puppet_v2[STAGE_DELETEBYTE] += 1;
+              break;
 
-					}
+            }
+
+            case 14:
+
+              if (temp_len + HAVOC_BLK_XL < MAX_FILE) {
 
-					tmp_core_time += 1;
+                /* Clone bytes (75%) or insert a block of constant bytes (25%).
+                 */
 
-					u64 temp_total_found = queued_paths + unique_crashes;
+                u8  actually_clone = UR(4);
+                u32 clone_from, clone_to, clone_len;
+                u8* new_buf;
 
-					if (common_fuzz_stuff(argv, out_buf, temp_len))
-						goto abandon_entry_puppet;
+                if (actually_clone) {
 
-					/* out_buf might have been mangled a bit, so let's restore it to its
-					   original size and shape. */
+                  clone_len = choose_block_len(temp_len);
+                  clone_from = UR(temp_len - clone_len + 1);
 
-					if (temp_len < len) out_buf = ck_realloc(out_buf, len);
-					temp_len = len;
-					memcpy(out_buf, in_buf, len);
+                } else {
 
-					/* If we're finding new stuff, let's run for a bit longer, limits
-					   permitting. */
+                  clone_len = choose_block_len(HAVOC_BLK_XL);
+                  clone_from = 0;
 
-					if (queued_paths != havoc_queued) {
+                }
 
-						if (perf_score <= havoc_max_mult * 100) {
-							stage_max *= 2;
-							perf_score *= 2;
-						}
+                clone_to = UR(temp_len);
 
-						havoc_queued = queued_paths;
+                new_buf = ck_alloc_nozero(temp_len + clone_len);
 
-					}
+                /* Head */
 
-					if (unlikely(queued_paths + unique_crashes > temp_total_found))
-					{
-						u64 temp_temp_puppet = queued_paths + unique_crashes - temp_total_found;
-						total_puppet_find = total_puppet_find + temp_temp_puppet;
-						for (i = 0; i < 16; ++i)
-						{
-							if (core_operator_cycles_puppet_v2[i] > core_operator_cycles_puppet_v3[i])
-								core_operator_finds_puppet_v2[i] += temp_temp_puppet;
-						}
-					}
+                memcpy(new_buf, out_buf, clone_to);
 
-				}
+                /* Inserted part */
 
-				new_hit_cnt = queued_paths + unique_crashes;
+                if (actually_clone)
+                  memcpy(new_buf + clone_to, out_buf + clone_from, clone_len);
+                else
+                  memset(new_buf + clone_to,
+                         UR(2) ? UR(256) : out_buf[UR(temp_len)], clone_len);
 
+                /* Tail */
+                memcpy(new_buf + clone_to + clone_len, out_buf + clone_to,
+                       temp_len - clone_to);
+
+                ck_free(out_buf);
+                out_buf = new_buf;
+                temp_len += clone_len;
+                core_operator_cycles_puppet_v2[STAGE_Clone75] += 1;
+
+              }
+
+              break;
+
+            case 15: {
+
+              /* Overwrite bytes with a randomly selected chunk (75%) or fixed
+                 bytes (25%). */
+
+              u32 copy_from, copy_to, copy_len;
+
+              if (temp_len < 2) break;
+
+              copy_len = choose_block_len(temp_len - 1);
+
+              copy_from = UR(temp_len - copy_len + 1);
+              copy_to = UR(temp_len - copy_len + 1);
+
+              if (UR(4)) {
+
+                if (copy_from != copy_to)
+                  memmove(out_buf + copy_to, out_buf + copy_from, copy_len);
+
+              } else
+
+                memset(out_buf + copy_to,
+                       UR(2) ? UR(256) : out_buf[UR(temp_len)], copy_len);
+              core_operator_cycles_puppet_v2[STAGE_OverWrite75] += 1;
+              break;
+
+            }
+
+          }
+
+        }
+
+        tmp_core_time += 1;
+
+        u64 temp_total_found = queued_paths + unique_crashes;
+
+        if (common_fuzz_stuff(argv, out_buf, temp_len))
+          goto abandon_entry_puppet;
+
+        /* out_buf might have been mangled a bit, so let's restore it to its
+           original size and shape. */
+
+        if (temp_len < len) out_buf = ck_realloc(out_buf, len);
+        temp_len = len;
+        memcpy(out_buf, in_buf, len);
+
+        /* If we're finding new stuff, let's run for a bit longer, limits
+           permitting. */
+
+        if (queued_paths != havoc_queued) {
+
+          if (perf_score <= havoc_max_mult * 100) {
+
+            stage_max *= 2;
+            perf_score *= 2;
+
+          }
+
+          havoc_queued = queued_paths;
+
+        }
+
+        if (unlikely(queued_paths + unique_crashes > temp_total_found)) {
+
+          u64 temp_temp_puppet =
+              queued_paths + unique_crashes - temp_total_found;
+          total_puppet_find = total_puppet_find + temp_temp_puppet;
+          for (i = 0; i < 16; ++i) {
+
+            if (core_operator_cycles_puppet_v2[i] >
+                core_operator_cycles_puppet_v3[i])
+              core_operator_finds_puppet_v2[i] += temp_temp_puppet;
+
+          }
+
+        }
+
+      }
+
+      new_hit_cnt = queued_paths + unique_crashes;
 
 #ifndef IGNORE_FINDS
 
-				/************
-				 * SPLICING *
-				 ************/
+      /************
+       * SPLICING *
+       ************/
+
+    retry_splicing_puppet:
 
+      if (use_splicing && splice_cycle++ < SPLICE_CYCLES_puppet &&
+          queued_paths > 1 && queue_cur->len > 1) {
 
-			retry_splicing_puppet:
+        struct queue_entry* target;
+        u32                 tid, split_at;
+        u8*                 new_buf;
+        s32                 f_diff, l_diff;
 
+        /* First of all, if we've modified in_buf for havoc, let's clean that
+           up... */
 
+        if (in_buf != orig_in) {
+
+          ck_free(in_buf);
+          in_buf = orig_in;
+          len = queue_cur->len;
+
+        }
 
-				if (use_splicing && splice_cycle++ < SPLICE_CYCLES_puppet &&
-					queued_paths > 1 && queue_cur->len > 1) {
+        /* Pick a random queue entry and seek to it. Don't splice with yourself.
+         */
 
-					struct queue_entry* target;
-					u32 tid, split_at;
-					u8* new_buf;
-					s32 f_diff, l_diff;
+        do {
 
-					/* First of all, if we've modified in_buf for havoc, let's clean that
-					   up... */
+          tid = UR(queued_paths);
+
+        } while (tid == current_entry);
+
+        splicing_with = tid;
+        target = queue;
+
+        while (tid >= 100) {
+
+          target = target->next_100;
+          tid -= 100;
+
+        }
 
-					if (in_buf != orig_in) {
-						ck_free(in_buf);
-						in_buf = orig_in;
-						len = queue_cur->len;
-					}
+        while (tid--)
+          target = target->next;
 
-					/* Pick a random queue entry and seek to it. Don't splice with yourself. */
+        /* Make sure that the target has a reasonable length. */
 
-					do { tid = UR(queued_paths); } while (tid == current_entry);
+        while (target && (target->len < 2 || target == queue_cur)) {
 
-					splicing_with = tid;
-					target = queue;
+          target = target->next;
+          ++splicing_with;
 
-					while (tid >= 100) { target = target->next_100; tid -= 100; }
-					while (tid--) target = target->next;
+        }
 
-					/* Make sure that the target has a reasonable length. */
+        if (!target) goto retry_splicing_puppet;
 
-					while (target && (target->len < 2 || target == queue_cur)) {
-						target = target->next;
-						++splicing_with;
-					}
+        /* Read the testcase into a new buffer. */
 
-					if (!target) goto retry_splicing_puppet;
+        fd = open(target->fname, O_RDONLY);
 
-					/* Read the testcase into a new buffer. */
+        if (fd < 0) PFATAL("Unable to open '%s'", target->fname);
 
-					fd = open(target->fname, O_RDONLY);
+        new_buf = ck_alloc_nozero(target->len);
 
-					if (fd < 0) PFATAL("Unable to open '%s'", target->fname);
+        ck_read(fd, new_buf, target->len, target->fname);
 
-					new_buf = ck_alloc_nozero(target->len);
+        close(fd);
 
-					ck_read(fd, new_buf, target->len, target->fname);
+        /* Find a suitable splicin g location, somewhere between the first and
+           the last differing byte. Bail out if the difference is just a single
+           byte or so. */
 
-					close(fd);
+        locate_diffs(in_buf, new_buf, MIN(len, target->len), &f_diff, &l_diff);
 
-					/* Find a suitable splicin g location, somewhere between the first and
-					   the last differing byte. Bail out if the difference is just a single
-					   byte or so. */
+        if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) {
 
-					locate_diffs(in_buf, new_buf, MIN(len, target->len), &f_diff, &l_diff);
+          ck_free(new_buf);
+          goto retry_splicing_puppet;
 
-					if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) {
-						ck_free(new_buf);
-						goto retry_splicing_puppet;
-					}
+        }
 
-					/* Split somewhere between the first and last differing byte. */
+        /* Split somewhere between the first and last differing byte. */
 
-					split_at = f_diff + UR(l_diff - f_diff);
+        split_at = f_diff + UR(l_diff - f_diff);
 
-					/* Do the thing. */
+        /* Do the thing. */
 
-					len = target->len;
-					memcpy(new_buf, in_buf, split_at);
-					in_buf = new_buf;
-					ck_free(out_buf);
-					out_buf = ck_alloc_nozero(len);
-					memcpy(out_buf, in_buf, len);
+        len = target->len;
+        memcpy(new_buf, in_buf, split_at);
+        in_buf = new_buf;
+        ck_free(out_buf);
+        out_buf = ck_alloc_nozero(len);
+        memcpy(out_buf, in_buf, len);
 
-					goto havoc_stage_puppet;
+        goto havoc_stage_puppet;
 
-				}
+      }
 
 #endif /* !IGNORE_FINDS */
 
-				ret_val = 0;
-			abandon_entry:
-			abandon_entry_puppet:
+      ret_val = 0;
+    abandon_entry:
+    abandon_entry_puppet:
 
-				if (splice_cycle >= SPLICE_CYCLES_puppet)
-					SPLICE_CYCLES_puppet = (UR(SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) + SPLICE_CYCLES_puppet_low);
+      if (splice_cycle >= SPLICE_CYCLES_puppet)
+        SPLICE_CYCLES_puppet =
+            (UR(SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) +
+             SPLICE_CYCLES_puppet_low);
 
+      splicing_with = -1;
 
-				splicing_with = -1;
+      munmap(orig_in, queue_cur->len);
 
+      if (in_buf != orig_in) ck_free(in_buf);
+      ck_free(out_buf);
+      ck_free(eff_map);
 
-				munmap(orig_in, queue_cur->len);
+      if (key_puppet == 1) {
 
-				if (in_buf != orig_in) ck_free(in_buf);
-				ck_free(out_buf);
-				ck_free(eff_map);
+        if (unlikely(queued_paths + unique_crashes >
+                     ((queued_paths + unique_crashes) * limit_time_bound +
+                      orig_hit_cnt_puppet))) {
 
+          key_puppet = 0;
+          cur_ms_lv = get_cur_time();
+          new_hit_cnt = queued_paths + unique_crashes;
+          orig_hit_cnt_puppet = 0;
+          last_limit_time_start = 0;
 
-				if (key_puppet == 1)
-				{
-					if (unlikely(queued_paths + unique_crashes > ((queued_paths + unique_crashes)*limit_time_bound + orig_hit_cnt_puppet)))
-					{
-						key_puppet = 0;
-						cur_ms_lv = get_cur_time();
-						new_hit_cnt = queued_paths + unique_crashes;
-						orig_hit_cnt_puppet = 0;
-						last_limit_time_start = 0;
-					}
-				}
+        }
 
+      }
+
+      if (unlikely(tmp_core_time > period_core)) {
 
-				if (unlikely(tmp_core_time > period_core))
-				{
-					total_pacemaker_time += tmp_core_time;
-					tmp_core_time = 0;
-					temp_puppet_find = total_puppet_find;
-					new_hit_cnt = queued_paths + unique_crashes;
+        total_pacemaker_time += tmp_core_time;
+        tmp_core_time = 0;
+        temp_puppet_find = total_puppet_find;
+        new_hit_cnt = queued_paths + unique_crashes;
 
-					u64 temp_stage_finds_puppet = 0;
-					for (i = 0; i < operator_num; ++i)
-					{
+        u64 temp_stage_finds_puppet = 0;
+        for (i = 0; i < operator_num; ++i) {
 
-						core_operator_finds_puppet[i] = core_operator_finds_puppet_v2[i];
-						core_operator_cycles_puppet[i] = core_operator_cycles_puppet_v2[i];
-						temp_stage_finds_puppet += core_operator_finds_puppet[i];
-					}
+          core_operator_finds_puppet[i] = core_operator_finds_puppet_v2[i];
+          core_operator_cycles_puppet[i] = core_operator_cycles_puppet_v2[i];
+          temp_stage_finds_puppet += core_operator_finds_puppet[i];
 
-					key_module = 2;
+        }
 
-					old_hit_count = new_hit_cnt;
-				}
-				return ret_val;
-			}
-		}
+        key_module = 2;
+
+        old_hit_count = new_hit_cnt;
+
+      }
+
+      return ret_val;
+
+    }
 
+  }
 
 #undef FLIP_BIT
 
 }
 
-
 void pso_updating(void) {
 
-	g_now += 1;
-	if (g_now > g_max) g_now = 0;
-	w_now = (w_init - w_end)*(g_max - g_now) / (g_max)+w_end;
-	int tmp_swarm, i, j;
-	u64 temp_operator_finds_puppet = 0;
-	for (i = 0; i < operator_num; ++i)
-	{
-		operator_finds_puppet[i] = core_operator_finds_puppet[i];
-
-		for (j = 0; j < swarm_num; ++j)
-		{
-			operator_finds_puppet[i] = operator_finds_puppet[i] + stage_finds_puppet[j][i];
-		}
-		temp_operator_finds_puppet = temp_operator_finds_puppet + operator_finds_puppet[i];
-	}
-
-	for (i = 0; i < operator_num; ++i)
-	{
-		if (operator_finds_puppet[i])
-			G_best[i] = (double)((double)(operator_finds_puppet[i]) / (double)(temp_operator_finds_puppet));
-	}
-
-	for (tmp_swarm = 0; tmp_swarm < swarm_num; ++tmp_swarm)
-	{
-		double x_temp = 0.0;
-		for (i = 0; i < operator_num; ++i)
-		{
-			probability_now[tmp_swarm][i] = 0.0;
-			v_now[tmp_swarm][i] = w_now * v_now[tmp_swarm][i] + RAND_C * (L_best[tmp_swarm][i] - x_now[tmp_swarm][i]) + RAND_C * (G_best[i] - x_now[tmp_swarm][i]);
-			x_now[tmp_swarm][i] += v_now[tmp_swarm][i];
-			if (x_now[tmp_swarm][i] > v_max)
-				x_now[tmp_swarm][i] = v_max;
-			else if (x_now[tmp_swarm][i] < v_min)
-				x_now[tmp_swarm][i] = v_min;
-			x_temp += x_now[tmp_swarm][i];
-		}
-
-		for (i = 0; i < operator_num; ++i)
-		{
-			x_now[tmp_swarm][i] = x_now[tmp_swarm][i] / x_temp;
-			if (likely(i != 0))
-				probability_now[tmp_swarm][i] = probability_now[tmp_swarm][i - 1] + x_now[tmp_swarm][i];
-			else
-				probability_now[tmp_swarm][i] = x_now[tmp_swarm][i];
-		}
-		if (probability_now[tmp_swarm][operator_num - 1] < 0.99 || probability_now[tmp_swarm][operator_num - 1] > 1.01) FATAL("ERROR probability");
-	}
-	swarm_now = 0;
-	key_module = 0;
-}
+  g_now += 1;
+  if (g_now > g_max) g_now = 0;
+  w_now = (w_init - w_end) * (g_max - g_now) / (g_max) + w_end;
+  int tmp_swarm, i, j;
+  u64 temp_operator_finds_puppet = 0;
+  for (i = 0; i < operator_num; ++i) {
+
+    operator_finds_puppet[i] = core_operator_finds_puppet[i];
+
+    for (j = 0; j < swarm_num; ++j) {
+
+      operator_finds_puppet[i] =
+          operator_finds_puppet[i] + stage_finds_puppet[j][i];
+
+    }
+
+    temp_operator_finds_puppet =
+        temp_operator_finds_puppet + operator_finds_puppet[i];
+
+  }
+
+  for (i = 0; i < operator_num; ++i) {
+
+    if (operator_finds_puppet[i])
+      G_best[i] = (double)((double)(operator_finds_puppet[i]) /
+                           (double)(temp_operator_finds_puppet));
 
+  }
+
+  for (tmp_swarm = 0; tmp_swarm < swarm_num; ++tmp_swarm) {
+
+    double x_temp = 0.0;
+    for (i = 0; i < operator_num; ++i) {
+
+      probability_now[tmp_swarm][i] = 0.0;
+      v_now[tmp_swarm][i] =
+          w_now * v_now[tmp_swarm][i] +
+          RAND_C * (L_best[tmp_swarm][i] - x_now[tmp_swarm][i]) +
+          RAND_C * (G_best[i] - x_now[tmp_swarm][i]);
+      x_now[tmp_swarm][i] += v_now[tmp_swarm][i];
+      if (x_now[tmp_swarm][i] > v_max)
+        x_now[tmp_swarm][i] = v_max;
+      else if (x_now[tmp_swarm][i] < v_min)
+        x_now[tmp_swarm][i] = v_min;
+      x_temp += x_now[tmp_swarm][i];
+
+    }
+
+    for (i = 0; i < operator_num; ++i) {
+
+      x_now[tmp_swarm][i] = x_now[tmp_swarm][i] / x_temp;
+      if (likely(i != 0))
+        probability_now[tmp_swarm][i] =
+            probability_now[tmp_swarm][i - 1] + x_now[tmp_swarm][i];
+      else
+        probability_now[tmp_swarm][i] = x_now[tmp_swarm][i];
+
+    }
+
+    if (probability_now[tmp_swarm][operator_num - 1] < 0.99 ||
+        probability_now[tmp_swarm][operator_num - 1] > 1.01)
+      FATAL("ERROR probability");
+
+  }
+
+  swarm_now = 0;
+  key_module = 0;
+
+}
 
 /* larger change for MOpt implementation: the original fuzz_one was renamed
    to fuzz_one_original. All documentation references to fuzz_one therefore
    mean fuzz_one_original */
 
 u8 fuzz_one(char** argv) {
-	int key_val_lv = 0;
-	if (limit_time_sig == 0) {
-		key_val_lv = fuzz_one_original(argv);
-	} else {
-		if (key_module == 0)
-			key_val_lv = pilot_fuzzing(argv);
-		else if (key_module == 1)
-			key_val_lv = core_fuzzing(argv);
-		else if (key_module == 2)
-			pso_updating();
-	}
-
-	return key_val_lv;
+
+  int key_val_lv = 0;
+  if (limit_time_sig == 0) {
+
+    key_val_lv = fuzz_one_original(argv);
+
+  } else {
+
+    if (key_module == 0)
+      key_val_lv = pilot_fuzzing(argv);
+    else if (key_module == 1)
+      key_val_lv = core_fuzzing(argv);
+    else if (key_module == 2)
+      pso_updating();
+
+  }
+
+  return key_val_lv;
+
 }