about summary refs log tree commit diff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/afl-analyze.c364
-rw-r--r--src/afl-as.c164
-rw-r--r--src/afl-common.c30
-rw-r--r--src/afl-forkserver.c133
-rw-r--r--src/afl-fuzz-bitmap.c153
-rw-r--r--src/afl-fuzz-extras.c129
-rw-r--r--src/afl-fuzz-globals.c376
-rw-r--r--src/afl-fuzz-init.c643
-rw-r--r--src/afl-fuzz-misc.c27
-rw-r--r--src/afl-fuzz-one.c5533
-rw-r--r--src/afl-fuzz-python.c117
-rw-r--r--src/afl-fuzz-queue.c198
-rw-r--r--src/afl-fuzz-run.c178
-rw-r--r--src/afl-fuzz-stats.c448
-rw-r--r--src/afl-fuzz.c609
-rw-r--r--src/afl-gcc.c128
-rw-r--r--src/afl-gotcpu.c23
-rw-r--r--src/afl-sharedmem.c42
-rw-r--r--src/afl-showmap.c263
-rw-r--r--src/afl-tmin.c360
20 files changed, 5378 insertions, 4540 deletions
diff --git a/src/afl-analyze.c b/src/afl-analyze.c
index 5bb96154..e3014256 100644
--- a/src/afl-analyze.c
+++ b/src/afl-analyze.c
@@ -22,7 +22,7 @@
 #define AFL_MAIN
 
 #ifdef __ANDROID__
-  #include "android-ashmem.h"
+#  include "android-ashmem.h"
 #endif
 #include "config.h"
 #include "types.h"
@@ -50,61 +50,59 @@
 #include <sys/types.h>
 #include <sys/resource.h>
 
-static s32 child_pid;                 /* PID of the tested program         */
+static s32 child_pid;                  /* PID of the tested program         */
 
-       u8* trace_bits;                /* SHM with instrumentation bitmap   */
+u8* trace_bits;                        /* SHM with instrumentation bitmap   */
 
-static u8 *in_file,                   /* Analyzer input test case          */
-          *prog_in,                   /* Targeted program input file       */
-          *target_path,               /* Path to target binary             */
-          *doc_path;                  /* Path to docs                      */
+static u8 *in_file,                    /* Analyzer input test case          */
+    *prog_in,                          /* Targeted program input file       */
+    *target_path,                      /* Path to target binary             */
+    *doc_path;                         /* Path to docs                      */
 
-static u8 *in_data;                   /* Input data for analysis           */
+static u8* in_data;                    /* Input data for analysis           */
 
-static u32 in_len,                    /* Input data length                 */
-           orig_cksum,                /* Original checksum                 */
-           total_execs,               /* Total number of execs             */
-           exec_hangs,                /* Total number of hangs             */
-           exec_tmout = EXEC_TIMEOUT; /* Exec timeout (ms)                 */
+static u32 in_len,                     /* Input data length                 */
+    orig_cksum,                        /* Original checksum                 */
+    total_execs,                       /* Total number of execs             */
+    exec_hangs,                        /* Total number of hangs             */
+    exec_tmout = EXEC_TIMEOUT;         /* Exec timeout (ms)                 */
 
-static u64 mem_limit = MEM_LIMIT;     /* Memory limit (MB)                 */
+static u64 mem_limit = MEM_LIMIT;      /* Memory limit (MB)                 */
 
-static s32 dev_null_fd = -1;          /* FD to /dev/null                   */
+static s32 dev_null_fd = -1;           /* FD to /dev/null                   */
 
-static u8  edges_only,                /* Ignore hit counts?                */
-           use_hex_offsets,           /* Show hex offsets?                 */
-           use_stdin = 1;             /* Use stdin for program input?      */
-
-static volatile u8
-           stop_soon,                 /* Ctrl-C pressed?                   */
-           child_timed_out;           /* Child timed out?                  */
+static u8 edges_only,                  /* Ignore hit counts?                */
+    use_hex_offsets,                   /* Show hex offsets?                 */
+    use_stdin = 1;                     /* Use stdin for program input?      */
 
+static volatile u8 stop_soon,          /* Ctrl-C pressed?                   */
+    child_timed_out;                   /* Child timed out?                  */
 
 /* Constants used for describing byte behavior. */
 
-#define RESP_NONE       0x00          /* Changing byte is a no-op.         */
-#define RESP_MINOR      0x01          /* Some changes have no effect.      */
-#define RESP_VARIABLE   0x02          /* Changes produce variable paths.   */
-#define RESP_FIXED      0x03          /* Changes produce fixed patterns.   */
-
-#define RESP_LEN        0x04          /* Potential length field            */
-#define RESP_CKSUM      0x05          /* Potential checksum                */
-#define RESP_SUSPECT    0x06          /* Potential "suspect" blob          */
+#define RESP_NONE 0x00     /* Changing byte is a no-op.         */
+#define RESP_MINOR 0x01    /* Some changes have no effect.      */
+#define RESP_VARIABLE 0x02 /* Changes produce variable paths.   */
+#define RESP_FIXED 0x03    /* Changes produce fixed patterns.   */
 
+#define RESP_LEN 0x04     /* Potential length field            */
+#define RESP_CKSUM 0x05   /* Potential checksum                */
+#define RESP_SUSPECT 0x06 /* Potential "suspect" blob          */
 
-/* Classify tuple counts. This is a slow & naive version, but good enough here. */
+/* Classify tuple counts. This is a slow & naive version, but good enough here.
+ */
 
 static u8 count_class_lookup[256] = {
 
-  [0]           = 0,
-  [1]           = 1,
-  [2]           = 2,
-  [3]           = 4,
-  [4 ... 7]     = 8,
-  [8 ... 15]    = 16,
-  [16 ... 31]   = 32,
-  [32 ... 127]  = 64,
-  [128 ... 255] = 128
+    [0] = 0,
+    [1] = 1,
+    [2] = 2,
+    [3] = 4,
+    [4 ... 7] = 8,
+    [8 ... 15] = 16,
+    [16 ... 31] = 32,
+    [32 ... 127] = 64,
+    [128 ... 255] = 128
 
 };
 
@@ -115,61 +113,62 @@ static void classify_counts(u8* mem) {
   if (edges_only) {
 
     while (i--) {
+
       if (*mem) *mem = 1;
       mem++;
+
     }
 
   } else {
 
     while (i--) {
+
       *mem = count_class_lookup[*mem];
       mem++;
+
     }
 
   }
 
 }
 
-
 /* See if any bytes are set in the bitmap. */
 
 static inline u8 anything_set(void) {
 
   u32* ptr = (u32*)trace_bits;
-  u32  i   = (MAP_SIZE >> 2);
+  u32  i = (MAP_SIZE >> 2);
 
-  while (i--) if (*(ptr++)) return 1;
+  while (i--)
+    if (*(ptr++)) return 1;
 
   return 0;
 
 }
 
-
 /* Get rid of temp files (atexit handler). */
 
 static void at_exit_handler(void) {
 
-  unlink(prog_in); /* Ignore errors */
+  unlink(prog_in);                                         /* Ignore errors */
 
 }
 
-
 /* Read initial file. */
 
 static void read_initial_file(void) {
 
   struct stat st;
-  s32 fd = open(in_file, O_RDONLY);
+  s32         fd = open(in_file, O_RDONLY);
 
   if (fd < 0) PFATAL("Unable to open '%s'", in_file);
 
-  if (fstat(fd, &st) || !st.st_size)
-    FATAL("Zero-sized input file.");
+  if (fstat(fd, &st) || !st.st_size) FATAL("Zero-sized input file.");
 
   if (st.st_size >= TMIN_MAX_FILE)
     FATAL("Input file is too large (%u MB max)", TMIN_MAX_FILE / 1024 / 1024);
 
-  in_len  = st.st_size;
+  in_len = st.st_size;
   in_data = ck_alloc_nozero(in_len);
 
   ck_read(fd, in_data, in_len, in_file);
@@ -180,14 +179,13 @@ static void read_initial_file(void) {
 
 }
 
-
 /* Write output file. */
 
 static s32 write_to_file(u8* path, u8* mem, u32 len) {
 
   s32 ret;
 
-  unlink(path); /* Ignore errors */
+  unlink(path);                                            /* Ignore errors */
 
   ret = open(path, O_RDWR | O_CREAT | O_EXCL, 0600);
 
@@ -201,7 +199,6 @@ static s32 write_to_file(u8* path, u8* mem, u32 len) {
 
 }
 
-
 /* Handle timeout signal. */
 
 static void handle_timeout(int sig) {
@@ -211,14 +208,13 @@ static void handle_timeout(int sig) {
 
 }
 
-
 /* Execute target application. Returns exec checksum, or 0 if program
    times out. */
 
 static u32 run_target(char** argv, u8* mem, u32 len, u8 first_run) {
 
   static struct itimerval it;
-  int status = 0;
+  int                     status = 0;
 
   s32 prog_in_fd;
   u32 cksum;
@@ -237,8 +233,7 @@ static u32 run_target(char** argv, u8* mem, u32 len, u8 first_run) {
     struct rlimit r;
 
     if (dup2(use_stdin ? prog_in_fd : dev_null_fd, 0) < 0 ||
-        dup2(dev_null_fd, 1) < 0 ||
-        dup2(dev_null_fd, 2) < 0) {
+        dup2(dev_null_fd, 1) < 0 || dup2(dev_null_fd, 2) < 0) {
 
       *(u32*)trace_bits = EXEC_FAIL_SIG;
       PFATAL("dup2() failed");
@@ -254,18 +249,18 @@ static u32 run_target(char** argv, u8* mem, u32 len, u8 first_run) {
 
 #ifdef RLIMIT_AS
 
-      setrlimit(RLIMIT_AS, &r); /* Ignore errors */
+      setrlimit(RLIMIT_AS, &r);                            /* Ignore errors */
 
 #else
 
-      setrlimit(RLIMIT_DATA, &r); /* Ignore errors */
+      setrlimit(RLIMIT_DATA, &r);                          /* Ignore errors */
 
 #endif /* ^RLIMIT_AS */
 
     }
 
     r.rlim_max = r.rlim_cur = 0;
-    setrlimit(RLIMIT_CORE, &r); /* Ignore errors */
+    setrlimit(RLIMIT_CORE, &r);                            /* Ignore errors */
 
     execv(target_path, argv);
 
@@ -303,8 +298,10 @@ static u32 run_target(char** argv, u8* mem, u32 len, u8 first_run) {
   total_execs++;
 
   if (stop_soon) {
+
     SAYF(cRST cLRD "\n+++ Analysis aborted by user +++\n" cRST);
     exit(1);
+
   }
 
   /* Always discard inputs that time out. */
@@ -335,7 +332,6 @@ static u32 run_target(char** argv, u8* mem, u32 len, u8 first_run) {
 
 }
 
-
 #ifdef USE_COLOR
 
 /* Helper function to display a human-readable character. */
@@ -353,24 +349,25 @@ static void show_char(u8 val) {
 
 }
 
-
 /* Show the legend */
 
 static void show_legend(void) {
 
-  SAYF("    " cLGR bgGRA " 01 " cRST " - no-op block              "
-              cBLK bgLGN " 01 " cRST " - suspected length field\n"
-       "    " cBRI bgGRA " 01 " cRST " - superficial content      "
-              cBLK bgYEL " 01 " cRST " - suspected cksum or magic int\n"
-       "    " cBLK bgCYA " 01 " cRST " - critical stream          "
-              cBLK bgLRD " 01 " cRST " - suspected checksummed block\n"
+  SAYF("    " cLGR bgGRA " 01 " cRST " - no-op block              " cBLK bgLGN
+       " 01 " cRST
+       " - suspected length field\n"
+       "    " cBRI bgGRA " 01 " cRST " - superficial content      " cBLK bgYEL
+       " 01 " cRST
+       " - suspected cksum or magic int\n"
+       "    " cBLK bgCYA " 01 " cRST " - critical stream          " cBLK bgLRD
+       " 01 " cRST
+       " - suspected checksummed block\n"
        "    " cBLK bgMGN " 01 " cRST " - \"magic value\" section\n\n");
 
 }
 
 #endif /* USE_COLOR */
 
-
 /* Interpret and report a pattern in the input file. */
 
 static void dump_hex(u8* buf, u32 len, u8* b_data) {
@@ -385,7 +382,7 @@ static void dump_hex(u8* buf, u32 len, u8* b_data) {
     u32 rlen = 1;
 #endif /* ^USE_COLOR */
 
-    u8  rtype = b_data[i] & 0x0f;
+    u8 rtype = b_data[i] & 0x0f;
 
     /* Look ahead to determine the length of run. */
 
@@ -404,51 +401,61 @@ static void dump_hex(u8* buf, u32 len, u8* b_data) {
 
         case 2: {
 
-            u16 val = *(u16*)(in_data + i);
+          u16 val = *(u16*)(in_data + i);
+
+          /* Small integers may be length fields. */
 
-            /* Small integers may be length fields. */
+          if (val && (val <= in_len || SWAP16(val) <= in_len)) {
 
-            if (val && (val <= in_len || SWAP16(val) <= in_len)) {
-              rtype = RESP_LEN;
-              break;
-            }
+            rtype = RESP_LEN;
+            break;
+
+          }
 
-            /* Uniform integers may be checksums. */
+          /* Uniform integers may be checksums. */
 
-            if (val && abs(in_data[i] - in_data[i + 1]) > 32) {
-              rtype = RESP_CKSUM;
-              break;
-            }
+          if (val && abs(in_data[i] - in_data[i + 1]) > 32) {
 
+            rtype = RESP_CKSUM;
             break;
 
           }
 
+          break;
+
+        }
+
         case 4: {
 
-            u32 val = *(u32*)(in_data + i);
+          u32 val = *(u32*)(in_data + i);
 
-            /* Small integers may be length fields. */
+          /* Small integers may be length fields. */
 
-            if (val && (val <= in_len || SWAP32(val) <= in_len)) {
-              rtype = RESP_LEN;
-              break;
-            }
+          if (val && (val <= in_len || SWAP32(val) <= in_len)) {
 
-            /* Uniform integers may be checksums. */
+            rtype = RESP_LEN;
+            break;
 
-            if (val && (in_data[i] >> 7 != in_data[i + 1] >> 7 ||
-                in_data[i] >> 7 != in_data[i + 2] >> 7 ||
-                in_data[i] >> 7 != in_data[i + 3] >> 7)) {
-              rtype = RESP_CKSUM;
-              break;
-            }
+          }
 
+          /* Uniform integers may be checksums. */
+
+          if (val && (in_data[i] >> 7 != in_data[i + 1] >> 7 ||
+                      in_data[i] >> 7 != in_data[i + 2] >> 7 ||
+                      in_data[i] >> 7 != in_data[i + 3] >> 7)) {
+
+            rtype = RESP_CKSUM;
             break;
 
           }
 
-        case 1: case 3: case 5 ... MAX_AUTO_EXTRA - 1: break;
+          break;
+
+        }
+
+        case 1:
+        case 3:
+        case 5 ... MAX_AUTO_EXTRA - 1: break;
 
         default: rtype = RESP_SUSPECT;
 
@@ -477,19 +484,22 @@ static void dump_hex(u8* buf, u32 len, u8* b_data) {
 
       switch (rtype) {
 
-        case RESP_NONE:     SAYF(cLGR bgGRA); break;
-        case RESP_MINOR:    SAYF(cBRI bgGRA); break;
+        case RESP_NONE: SAYF(cLGR bgGRA); break;
+        case RESP_MINOR: SAYF(cBRI bgGRA); break;
         case RESP_VARIABLE: SAYF(cBLK bgCYA); break;
-        case RESP_FIXED:    SAYF(cBLK bgMGN); break;
-        case RESP_LEN:      SAYF(cBLK bgLGN); break;
-        case RESP_CKSUM:    SAYF(cBLK bgYEL); break;
-        case RESP_SUSPECT:  SAYF(cBLK bgLRD); break;
+        case RESP_FIXED: SAYF(cBLK bgMGN); break;
+        case RESP_LEN: SAYF(cBLK bgLGN); break;
+        case RESP_CKSUM: SAYF(cBLK bgYEL); break;
+        case RESP_SUSPECT: SAYF(cBLK bgLRD); break;
 
       }
 
       show_char(in_data[i + off]);
 
-      if (off != rlen - 1 && (i + off + 1) % 16) SAYF(" "); else SAYF(cRST " ");
+      if (off != rlen - 1 && (i + off + 1) % 16)
+        SAYF(" ");
+      else
+        SAYF(cRST " ");
 
     }
 
@@ -502,13 +512,13 @@ static void dump_hex(u8* buf, u32 len, u8* b_data) {
 
     switch (rtype) {
 
-      case RESP_NONE:     SAYF("no-op block\n"); break;
-      case RESP_MINOR:    SAYF("superficial content\n"); break;
+      case RESP_NONE: SAYF("no-op block\n"); break;
+      case RESP_MINOR: SAYF("superficial content\n"); break;
       case RESP_VARIABLE: SAYF("critical stream\n"); break;
-      case RESP_FIXED:    SAYF("\"magic value\" section\n"); break;
-      case RESP_LEN:      SAYF("suspected length field\n"); break;
-      case RESP_CKSUM:    SAYF("suspected cksum or magic int\n"); break;
-      case RESP_SUSPECT:  SAYF("suspected checksummed block\n"); break;
+      case RESP_FIXED: SAYF("\"magic value\" section\n"); break;
+      case RESP_LEN: SAYF("suspected length field\n"); break;
+      case RESP_CKSUM: SAYF("suspected cksum or magic int\n"); break;
+      case RESP_SUSPECT: SAYF("suspected checksummed block\n"); break;
 
     }
 
@@ -524,8 +534,6 @@ static void dump_hex(u8* buf, u32 len, u8* b_data) {
 
 }
 
-
-
 /* Actually analyze! */
 
 static void analyze(char** argv) {
@@ -536,7 +544,7 @@ static void analyze(char** argv) {
   u8* b_data = ck_alloc(in_len + 1);
   u8  seq_byte = 0;
 
-  b_data[in_len] = 0xff; /* Intentional terminator. */
+  b_data[in_len] = 0xff;                         /* Intentional terminator. */
 
   ACTF("Analyzing input file (this may take a while)...\n");
 
@@ -587,12 +595,15 @@ static void analyze(char** argv) {
 
       b_data[i] = RESP_FIXED;
 
-    } else b_data[i] = RESP_VARIABLE;
+    } else
+
+      b_data[i] = RESP_VARIABLE;
 
     /* When all checksums change, flip most significant bit of b_data. */
 
-    if (prev_xff != xor_ff && prev_x01 != xor_01 &&
-        prev_s10 != sub_10 && prev_a10 != add_10) seq_byte ^= 0x80;
+    if (prev_xff != xor_ff && prev_x01 != xor_01 && prev_s10 != sub_10 &&
+        prev_a10 != add_10)
+      seq_byte ^= 0x80;
 
     b_data[i] |= seq_byte;
 
@@ -601,7 +612,7 @@ static void analyze(char** argv) {
     prev_s10 = sub_10;
     prev_a10 = add_10;
 
-  } 
+  }
 
   dump_hex(in_data, in_len, b_data);
 
@@ -618,8 +629,6 @@ static void analyze(char** argv) {
 
 }
 
-
-
 /* Handle Ctrl-C and the like. */
 
 static void handle_stop_sig(int sig) {
@@ -630,7 +639,6 @@ static void handle_stop_sig(int sig) {
 
 }
 
-
 /* Do basic preparations - persistent fds, filenames, etc. */
 
 static void set_up_environment(void) {
@@ -674,18 +682,20 @@ static void set_up_environment(void) {
   if (x) {
 
     if (!strstr(x, "exit_code=" STRINGIFY(MSAN_ERROR)))
-      FATAL("Custom MSAN_OPTIONS set without exit_code="
-            STRINGIFY(MSAN_ERROR) " - please fix!");
+      FATAL("Custom MSAN_OPTIONS set without exit_code=" STRINGIFY(
+          MSAN_ERROR) " - please fix!");
 
     if (!strstr(x, "symbolize=0"))
       FATAL("Custom MSAN_OPTIONS set without symbolize=0 - please fix!");
 
   }
 
-  setenv("ASAN_OPTIONS", "abort_on_error=1:"
-                         "detect_leaks=0:"
-                         "symbolize=0:"
-                         "allocator_may_return_null=1", 0);
+  setenv("ASAN_OPTIONS",
+         "abort_on_error=1:"
+         "detect_leaks=0:"
+         "symbolize=0:"
+         "allocator_may_return_null=1",
+         0);
 
   setenv("MSAN_OPTIONS", "exit_code=" STRINGIFY(MSAN_ERROR) ":"
                          "symbolize=0:"
@@ -694,21 +704,22 @@ static void set_up_environment(void) {
                          "msan_track_origins=0", 0);
 
   if (getenv("AFL_PRELOAD")) {
+
     setenv("LD_PRELOAD", getenv("AFL_PRELOAD"), 1);
     setenv("DYLD_INSERT_LIBRARIES", getenv("AFL_PRELOAD"), 1);
+
   }
 
 }
 
-
 /* Setup signal handlers, duh. */
 
 static void setup_signal_handlers(void) {
 
   struct sigaction sa;
 
-  sa.sa_handler   = NULL;
-  sa.sa_flags     = SA_RESTART;
+  sa.sa_handler = NULL;
+  sa.sa_flags = SA_RESTART;
   sa.sa_sigaction = NULL;
 
   sigemptyset(&sa.sa_mask);
@@ -727,43 +738,42 @@ static void setup_signal_handlers(void) {
 
 }
 
-
 /* Display usage hints. */
 
 static void usage(u8* argv0) {
 
-  SAYF("\n%s [ options ] -- /path/to/target_app [ ... ]\n\n"
+  SAYF(
+      "\n%s [ options ] -- /path/to/target_app [ ... ]\n\n"
 
-       "Required parameters:\n\n"
+      "Required parameters:\n\n"
 
-       "  -i file       - input test case to be analyzed by the tool\n"
+      "  -i file       - input test case to be analyzed by the tool\n"
 
-       "Execution control settings:\n\n"
+      "Execution control settings:\n\n"
 
-       "  -f file       - input file read by the tested program (stdin)\n"
-       "  -t msec       - timeout for each run (%d ms)\n"
-       "  -m megs       - memory limit for child process (%d MB)\n"
-       "  -Q            - use binary-only instrumentation (QEMU mode)\n"
-       "  -U            - use unicorn-based instrumentation (Unicorn mode)\n\n"
+      "  -f file       - input file read by the tested program (stdin)\n"
+      "  -t msec       - timeout for each run (%d ms)\n"
+      "  -m megs       - memory limit for child process (%d MB)\n"
+      "  -Q            - use binary-only instrumentation (QEMU mode)\n"
+      "  -U            - use unicorn-based instrumentation (Unicorn mode)\n\n"
 
-       "Analysis settings:\n\n"
+      "Analysis settings:\n\n"
 
-       "  -e            - look for edge coverage only, ignore hit counts\n\n"
+      "  -e            - look for edge coverage only, ignore hit counts\n\n"
 
-       "For additional tips, please consult %s/README.\n\n",
+      "For additional tips, please consult %s/README.\n\n",
 
-       argv0, EXEC_TIMEOUT, MEM_LIMIT, doc_path);
+      argv0, EXEC_TIMEOUT, MEM_LIMIT, doc_path);
 
   exit(1);
 
 }
 
-
 /* Find binary. */
 
 static void find_binary(u8* fname) {
 
-  u8* env_path = 0;
+  u8*         env_path = 0;
   struct stat st;
 
   if (strchr(fname, '/') || !(env_path = getenv("PATH"))) {
@@ -786,7 +796,9 @@ static void find_binary(u8* fname) {
         memcpy(cur_elem, env_path, delim - env_path);
         delim++;
 
-      } else cur_elem = ck_strdup(env_path);
+      } else
+
+        cur_elem = ck_strdup(env_path);
 
       env_path = delim;
 
@@ -798,7 +810,8 @@ static void find_binary(u8* fname) {
       ck_free(cur_elem);
 
       if (!stat(target_path, &st) && S_ISREG(st.st_mode) &&
-          (st.st_mode & 0111) && st.st_size >= 4) break;
+          (st.st_mode & 0111) && st.st_size >= 4)
+        break;
 
       ck_free(target_path);
       target_path = 0;
@@ -811,13 +824,12 @@ static void find_binary(u8* fname) {
 
 }
 
-
 /* Fix up argv for QEMU. */
 
 static char** get_qemu_argv(u8* own_loc, char** argv, int argc) {
 
   char** new_argv = ck_alloc(sizeof(char*) * (argc + 4));
-  u8 *tmp, *cp, *rsl, *own_copy;
+  u8 *   tmp, *cp, *rsl, *own_copy;
 
   memcpy(new_argv + 3, argv + 1, sizeof(char*) * argc);
 
@@ -832,8 +844,7 @@ static char** get_qemu_argv(u8* own_loc, char** argv, int argc) {
 
     cp = alloc_printf("%s/afl-qemu-trace", tmp);
 
-    if (access(cp, X_OK))
-      FATAL("Unable to find '%s'", tmp);
+    if (access(cp, X_OK)) FATAL("Unable to find '%s'", tmp);
 
     target_path = new_argv[0] = cp;
     return new_argv;
@@ -857,7 +868,9 @@ static char** get_qemu_argv(u8* own_loc, char** argv, int argc) {
 
     }
 
-  } else ck_free(own_copy);
+  } else
+
+    ck_free(own_copy);
 
   if (!access(BIN_PATH "/afl-qemu-trace", X_OK)) {
 
@@ -882,7 +895,7 @@ int main(int argc, char** argv) {
 
   SAYF(cCYA "afl-analyze" VERSION cRST " by <lcamtuf@google.com>\n");
 
-  while ((opt = getopt(argc,argv,"+i:f:m:t:eQU")) > 0)
+  while ((opt = getopt(argc, argv, "+i:f:m:t:eQU")) > 0)
 
     switch (opt) {
 
@@ -896,7 +909,7 @@ int main(int argc, char** argv) {
 
         if (prog_in) FATAL("Multiple -f options not supported");
         use_stdin = 0;
-        prog_in   = optarg;
+        prog_in = optarg;
         break;
 
       case 'e':
@@ -907,40 +920,41 @@ int main(int argc, char** argv) {
 
       case 'm': {
 
-          u8 suffix = 'M';
+        u8 suffix = 'M';
 
-          if (mem_limit_given) FATAL("Multiple -m options not supported");
-          mem_limit_given = 1;
+        if (mem_limit_given) FATAL("Multiple -m options not supported");
+        mem_limit_given = 1;
 
-          if (!strcmp(optarg, "none")) {
+        if (!strcmp(optarg, "none")) {
 
-            mem_limit = 0;
-            break;
+          mem_limit = 0;
+          break;
 
-          }
+        }
 
-          if (sscanf(optarg, "%llu%c", &mem_limit, &suffix) < 1 ||
-              optarg[0] == '-') FATAL("Bad syntax used for -m");
+        if (sscanf(optarg, "%llu%c", &mem_limit, &suffix) < 1 ||
+            optarg[0] == '-')
+          FATAL("Bad syntax used for -m");
 
-          switch (suffix) {
+        switch (suffix) {
 
-            case 'T': mem_limit *= 1024 * 1024; break;
-            case 'G': mem_limit *= 1024; break;
-            case 'k': mem_limit /= 1024; break;
-            case 'M': break;
+          case 'T': mem_limit *= 1024 * 1024; break;
+          case 'G': mem_limit *= 1024; break;
+          case 'k': mem_limit /= 1024; break;
+          case 'M': break;
 
-            default:  FATAL("Unsupported suffix or bad syntax for -m");
+          default: FATAL("Unsupported suffix or bad syntax for -m");
 
-          }
+        }
 
-          if (mem_limit < 5) FATAL("Dangerously low value of -m");
+        if (mem_limit < 5) FATAL("Dangerously low value of -m");
 
-          if (sizeof(rlim_t) == 4 && mem_limit > 2000)
-            FATAL("Value of -m out of range on 32-bit systems");
+        if (sizeof(rlim_t) == 4 && mem_limit > 2000)
+          FATAL("Value of -m out of range on 32-bit systems");
 
-        }
+      }
 
-        break;
+      break;
 
       case 't':
 
@@ -970,9 +984,7 @@ int main(int argc, char** argv) {
         unicorn_mode = 1;
         break;
 
-      default:
-
-        usage(argv[0]);
+      default: usage(argv[0]);
 
     }
 
diff --git a/src/afl-as.c b/src/afl-as.c
index 94595f24..57f4c4a3 100644
--- a/src/afl-as.c
+++ b/src/afl-as.c
@@ -48,39 +48,38 @@
 #include <sys/wait.h>
 #include <sys/time.h>
 
-static u8** as_params;          /* Parameters passed to the real 'as'   */
+static u8** as_params;              /* Parameters passed to the real 'as'   */
 
-static u8*  input_file;         /* Originally specified input file      */
-static u8*  modified_file;      /* Instrumented file for the real 'as'  */
+static u8* input_file;              /* Originally specified input file      */
+static u8* modified_file;           /* Instrumented file for the real 'as'  */
 
-static u8   be_quiet,           /* Quiet mode (no stderr output)        */
-            clang_mode,         /* Running in clang mode?               */
-            pass_thru,          /* Just pass data through?              */
-            just_version,       /* Just show version?                   */
-            sanitizer;          /* Using ASAN / MSAN                    */
+static u8 be_quiet,                 /* Quiet mode (no stderr output)        */
+    clang_mode,                     /* Running in clang mode?               */
+    pass_thru,                      /* Just pass data through?              */
+    just_version,                   /* Just show version?                   */
+    sanitizer;                      /* Using ASAN / MSAN                    */
 
-static u32  inst_ratio = 100,   /* Instrumentation probability (%)      */
-            as_par_cnt = 1;     /* Number of params to 'as'             */
+static u32 inst_ratio = 100,        /* Instrumentation probability (%)      */
+    as_par_cnt = 1;                 /* Number of params to 'as'             */
 
-/* If we don't find --32 or --64 in the command line, default to 
+/* If we don't find --32 or --64 in the command line, default to
    instrumentation for whichever mode we were compiled with. This is not
    perfect, but should do the trick for almost all use cases. */
 
 #ifdef __x86_64__
 
-static u8   use_64bit = 1;
+static u8 use_64bit = 1;
 
 #else
 
-static u8   use_64bit = 0;
+static u8 use_64bit = 0;
 
-#ifdef __APPLE__
-#  error "Sorry, 32-bit Apple platforms are not supported."
-#endif /* __APPLE__ */
+#  ifdef __APPLE__
+#    error "Sorry, 32-bit Apple platforms are not supported."
+#  endif /* __APPLE__ */
 
 #endif /* ^__x86_64__ */
 
-
 /* Examine and modify parameters to pass to 'as'. Note that the file name
    is always the last parameter passed by GCC, so we exploit this property
    to keep the code simple. */
@@ -134,8 +133,10 @@ static void edit_params(int argc, char** argv) {
 
   for (i = 1; i < argc - 1; i++) {
 
-    if (!strcmp(argv[i], "--64")) use_64bit = 1;
-    else if (!strcmp(argv[i], "--32")) use_64bit = 0;
+    if (!strcmp(argv[i], "--64"))
+      use_64bit = 1;
+    else if (!strcmp(argv[i], "--32"))
+      use_64bit = 0;
 
 #ifdef __APPLE__
 
@@ -143,7 +144,8 @@ static void edit_params(int argc, char** argv) {
 
     if (!strcmp(argv[i], "-arch") && i + 1 < argc) {
 
-      if (!strcmp(argv[i + 1], "x86_64")) use_64bit = 1;
+      if (!strcmp(argv[i + 1], "x86_64"))
+        use_64bit = 1;
       else if (!strcmp(argv[i + 1], "i386"))
         FATAL("Sorry, 32-bit Apple platforms are not supported.");
 
@@ -181,13 +183,17 @@ static void edit_params(int argc, char** argv) {
   if (input_file[0] == '-') {
 
     if (!strcmp(input_file + 1, "-version")) {
+
       just_version = 1;
       modified_file = input_file;
       goto wrap_things_up;
+
     }
 
-    if (input_file[1]) FATAL("Incorrect use (not called through afl-gcc?)");
-      else input_file = NULL;
+    if (input_file[1])
+      FATAL("Incorrect use (not called through afl-gcc?)");
+    else
+      input_file = NULL;
 
   } else {
 
@@ -197,22 +203,21 @@ static void edit_params(int argc, char** argv) {
        NSS. */
 
     if (strncmp(input_file, tmp_dir, strlen(tmp_dir)) &&
-        strncmp(input_file, "/var/tmp/", 9) &&
-        strncmp(input_file, "/tmp/", 5)) pass_thru = 1;
+        strncmp(input_file, "/var/tmp/", 9) && strncmp(input_file, "/tmp/", 5))
+      pass_thru = 1;
 
   }
 
-  modified_file = alloc_printf("%s/.afl-%u-%u.s", tmp_dir, getpid(),
-                               (u32)time(NULL));
+  modified_file =
+      alloc_printf("%s/.afl-%u-%u.s", tmp_dir, getpid(), (u32)time(NULL));
 
 wrap_things_up:
 
   as_params[as_par_cnt++] = modified_file;
-  as_params[as_par_cnt]   = NULL;
+  as_params[as_par_cnt] = NULL;
 
 }
 
-
 /* Process input file, generate modified_file. Insert instrumentation in all
    the appropriate places. */
 
@@ -222,11 +227,11 @@ static void add_instrumentation(void) {
 
   FILE* inf;
   FILE* outf;
-  s32 outfd;
-  u32 ins_lines = 0;
+  s32   outfd;
+  u32   ins_lines = 0;
 
-  u8  instr_ok = 0, skip_csect = 0, skip_next_label = 0,
-      skip_intel = 0, skip_app = 0, instrument_next = 0;
+  u8 instr_ok = 0, skip_csect = 0, skip_next_label = 0, skip_intel = 0,
+     skip_app = 0, instrument_next = 0;
 
 #ifdef __APPLE__
 
@@ -239,7 +244,9 @@ static void add_instrumentation(void) {
     inf = fopen(input_file, "r");
     if (!inf) PFATAL("Unable to read '%s'", input_file);
 
-  } else inf = stdin;
+  } else
+
+    inf = stdin;
 
   outfd = open(modified_file, O_WRONLY | O_EXCL | O_CREAT, 0600);
 
@@ -247,7 +254,7 @@ static void add_instrumentation(void) {
 
   outf = fdopen(outfd, "w");
 
-  if (!outf) PFATAL("fdopen() failed");  
+  if (!outf) PFATAL("fdopen() failed");
 
   while (fgets(line, MAX_LINE, inf)) {
 
@@ -284,22 +291,26 @@ static void add_instrumentation(void) {
          around them, so we use that as a signal. */
 
       if (!clang_mode && instr_ok && !strncmp(line + 2, "p2align ", 8) &&
-          isdigit(line[10]) && line[11] == '\n') skip_next_label = 1;
+          isdigit(line[10]) && line[11] == '\n')
+        skip_next_label = 1;
 
       if (!strncmp(line + 2, "text\n", 5) ||
           !strncmp(line + 2, "section\t.text", 13) ||
           !strncmp(line + 2, "section\t__TEXT,__text", 21) ||
           !strncmp(line + 2, "section __TEXT,__text", 21)) {
+
         instr_ok = 1;
-        continue; 
+        continue;
+
       }
 
       if (!strncmp(line + 2, "section\t", 8) ||
-          !strncmp(line + 2, "section ", 8) ||
-          !strncmp(line + 2, "bss\n", 4) ||
+          !strncmp(line + 2, "section ", 8) || !strncmp(line + 2, "bss\n", 4) ||
           !strncmp(line + 2, "data\n", 5)) {
+
         instr_ok = 0;
         continue;
+
       }
 
     }
@@ -354,8 +365,9 @@ static void add_instrumentation(void) {
 
      */
 
-    if (skip_intel || skip_app || skip_csect || !instr_ok ||
-        line[0] == '#' || line[0] == ' ') continue;
+    if (skip_intel || skip_app || skip_csect || !instr_ok || line[0] == '#' ||
+        line[0] == ' ')
+      continue;
 
     /* Conditional branch instruction (jnz, etc). We append the instrumentation
        right after the branch (to instrument the not-taken path) and at the
@@ -404,15 +416,16 @@ static void add_instrumentation(void) {
 
         /* Apple: L<num> / LBB<num> */
 
-        if ((isdigit(line[1]) || (clang_mode && !strncmp(line, "LBB", 3)))
-            && R(100) < inst_ratio) {
+        if ((isdigit(line[1]) || (clang_mode && !strncmp(line, "LBB", 3))) &&
+            R(100) < inst_ratio) {
 
 #else
 
         /* Apple: .L<num> / .LBB<num> */
 
-        if ((isdigit(line[2]) || (clang_mode && !strncmp(line + 1, "LBB", 3)))
-            && R(100) < inst_ratio) {
+        if ((isdigit(line[2]) ||
+             (clang_mode && !strncmp(line + 1, "LBB", 3))) &&
+            R(100) < inst_ratio) {
 
 #endif /* __APPLE__ */
 
@@ -427,7 +440,10 @@ static void add_instrumentation(void) {
              .Lfunc_begin0-style exception handling calculations (a problem on
              MacOS X). */
 
-          if (!skip_next_label) instrument_next = 1; else skip_next_label = 0;
+          if (!skip_next_label)
+            instrument_next = 1;
+          else
+            skip_next_label = 0;
 
         }
 
@@ -436,34 +452,34 @@ static void add_instrumentation(void) {
         /* Function label (always instrumented, deferred mode). */
 
         instrument_next = 1;
-    
+
       }
 
     }
 
   }
 
-  if (ins_lines)
-    fputs(use_64bit ? main_payload_64 : main_payload_32, outf);
+  if (ins_lines) fputs(use_64bit ? main_payload_64 : main_payload_32, outf);
 
   if (input_file) fclose(inf);
   fclose(outf);
 
   if (!be_quiet) {
 
-    if (!ins_lines) WARNF("No instrumentation targets found%s.",
-                          pass_thru ? " (pass-thru mode)" : "");
-    else OKF("Instrumented %u locations (%s-bit, %s mode, ratio %u%%).",
-             ins_lines, use_64bit ? "64" : "32",
-             getenv("AFL_HARDEN") ? "hardened" : 
-             (sanitizer ? "ASAN/MSAN" : "non-hardened"),
-             inst_ratio);
- 
+    if (!ins_lines)
+      WARNF("No instrumentation targets found%s.",
+            pass_thru ? " (pass-thru mode)" : "");
+    else
+      OKF("Instrumented %u locations (%s-bit, %s mode, ratio %u%%).", ins_lines,
+          use_64bit ? "64" : "32",
+          getenv("AFL_HARDEN") ? "hardened"
+                               : (sanitizer ? "ASAN/MSAN" : "non-hardened"),
+          inst_ratio);
+
   }
 
 }
 
-
 /* Main entry point */
 
 int main(int argc, char** argv) {
@@ -473,7 +489,7 @@ int main(int argc, char** argv) {
   int status;
   u8* inst_ratio_str = getenv("AFL_INST_RATIO");
 
-  struct timeval tv;
+  struct timeval  tv;
   struct timezone tz;
 
   clang_mode = !!getenv(CLANG_ENV_VAR);
@@ -481,19 +497,26 @@ int main(int argc, char** argv) {
   if (isatty(2) && !getenv("AFL_QUIET")) {
 
     SAYF(cCYA "afl-as" VERSION cRST " by <lcamtuf@google.com>\n");
- 
-  } else be_quiet = 1;
+
+  } else
+
+    be_quiet = 1;
 
   if (argc < 2) {
 
-    SAYF("\n"
-         "This is a helper application for afl-fuzz. It is a wrapper around GNU 'as',\n"
-         "executed by the toolchain whenever using afl-gcc or afl-clang. You probably\n"
-         "don't want to run this program directly.\n\n"
+    SAYF(
+        "\n"
+        "This is a helper application for afl-fuzz. It is a wrapper around GNU "
+        "'as',\n"
+        "executed by the toolchain whenever using afl-gcc or afl-clang. You "
+        "probably\n"
+        "don't want to run this program directly.\n\n"
 
-         "Rarely, when dealing with extremely complex projects, it may be advisable to\n"
-         "set AFL_INST_RATIO to a value less than 100 in order to reduce the odds of\n"
-         "instrumenting every discovered branch.\n\n");
+        "Rarely, when dealing with extremely complex projects, it may be "
+        "advisable to\n"
+        "set AFL_INST_RATIO to a value less than 100 in order to reduce the "
+        "odds of\n"
+        "instrumenting every discovered branch.\n\n");
 
     exit(1);
 
@@ -509,7 +532,7 @@ int main(int argc, char** argv) {
 
   if (inst_ratio_str) {
 
-    if (sscanf(inst_ratio_str, "%u", &inst_ratio) != 1 || inst_ratio > 100) 
+    if (sscanf(inst_ratio_str, "%u", &inst_ratio) != 1 || inst_ratio > 100)
       FATAL("Bad value of AFL_INST_RATIO (must be between 0 and 100)");
 
   }
@@ -524,9 +547,10 @@ int main(int argc, char** argv) {
      that... */
 
   if (getenv("AFL_USE_ASAN") || getenv("AFL_USE_MSAN")) {
+
     sanitizer = 1;
-    if (!getenv("AFL_INST_RATIO"))
-      inst_ratio /= 3;
+    if (!getenv("AFL_INST_RATIO")) inst_ratio /= 3;
+
   }
 
   if (!just_version) add_instrumentation();
diff --git a/src/afl-common.c b/src/afl-common.c
index f3bbdfb4..9f1f45eb 100644
--- a/src/afl-common.c
+++ b/src/afl-common.c
@@ -13,25 +13,29 @@
 
 /* Detect @@ in args. */
 #ifndef __glibc__
-#include <unistd.h>
+#  include <unistd.h>
 #endif
 
-
 void detect_file_args(char** argv, u8* prog_in) {
 
   u32 i = 0;
 #ifdef __GLIBC__
-  u8* cwd = getcwd(NULL, 0); /* non portable glibc extension */
+  u8* cwd = getcwd(NULL, 0);                /* non portable glibc extension */
 #else
-  u8* cwd;
-  char *buf;
-  long size = pathconf(".", _PC_PATH_MAX);
-  if ((buf = (char *)malloc((size_t)size)) != NULL) {
-    cwd = getcwd(buf, (size_t)size); /* portable version */
+  u8*   cwd;
+  char* buf;
+  long  size = pathconf(".", _PC_PATH_MAX);
+  if ((buf = (char*)malloc((size_t)size)) != NULL) {
+
+    cwd = getcwd(buf, (size_t)size);                    /* portable version */
+
   } else {
+
     PFATAL("getcwd() failed");
-    cwd = 0; /* for dumb compilers */
+    cwd = 0;                                          /* for dumb compilers */
+
   }
+
 #endif
 
   if (!cwd) PFATAL("getcwd() failed");
@@ -48,8 +52,10 @@ void detect_file_args(char** argv, u8* prog_in) {
 
       /* Be sure that we're always using fully-qualified paths. */
 
-      if (prog_in[0] == '/') aa_subst = prog_in;
-      else aa_subst = alloc_printf("%s/%s", cwd, prog_in);
+      if (prog_in[0] == '/')
+        aa_subst = prog_in;
+      else
+        aa_subst = alloc_printf("%s/%s", cwd, prog_in);
 
       /* Construct a replacement argv value. */
 
@@ -66,7 +72,7 @@ void detect_file_args(char** argv, u8* prog_in) {
 
   }
 
-  free(cwd); /* not tracked */
+  free(cwd);                                                 /* not tracked */
 
 }
 
diff --git a/src/afl-forkserver.c b/src/afl-forkserver.c
index 0051f6b0..152ae802 100644
--- a/src/afl-forkserver.c
+++ b/src/afl-forkserver.c
@@ -15,34 +15,39 @@
 #include <sys/resource.h>
 
 /* a program that includes afl-forkserver needs to define these */
-extern u8 uses_asan;
+extern u8  uses_asan;
 extern u8 *trace_bits;
 extern s32 forksrv_pid, child_pid, fsrv_ctl_fd, fsrv_st_fd;
-extern s32 out_fd, out_dir_fd, dev_urandom_fd, dev_null_fd; /* initialize these with -1 */
-extern u32 exec_tmout;
-extern u64 mem_limit;
-extern u8 *out_file, *target_path, *doc_path;
+extern s32 out_fd, out_dir_fd, dev_urandom_fd,
+    dev_null_fd;                                /* initialize these with -1 */
+extern u32   exec_tmout;
+extern u64   mem_limit;
+extern u8 *  out_file, *target_path, *doc_path;
 extern FILE *plot_file;
 
-/* we need this internally but can be defined and read extern in the main source */
+/* we need this internally but can be defined and read extern in the main source
+ */
 u8 child_timed_out;
 
-
 /* Describe integer as memory size. */
 
-u8* forkserver_DMS(u64 val) {
+u8 *forkserver_DMS(u64 val) {
 
   static u8 tmp[12][16];
   static u8 cur;
 
-#define CHK_FORMAT(_divisor, _limit_mult, _fmt, _cast) do { \
-    if (val < (_divisor) * (_limit_mult)) { \
+#define CHK_FORMAT(_divisor, _limit_mult, _fmt, _cast)    \
+  do {                                                    \
+                                                          \
+    if (val < (_divisor) * (_limit_mult)) {               \
+                                                          \
       sprintf(tmp[cur], _fmt, ((_cast)val) / (_divisor)); \
-      return tmp[cur]; \
-    } \
+      return tmp[cur];                                    \
+                                                          \
+    }                                                     \
+                                                          \
   } while (0)
 
-
   cur = (cur + 1) % 12;
 
   /* 0-9999 */
@@ -86,20 +91,23 @@ u8* forkserver_DMS(u64 val) {
 
 }
 
-
-
 /* the timeout handler */
 
 void handle_timeout(int sig) {
+
   if (child_pid > 0) {
-    child_timed_out = 1; 
+
+    child_timed_out = 1;
     kill(child_pid, SIGKILL);
+
   } else if (child_pid == -1 && forksrv_pid > 0) {
-    child_timed_out = 1; 
+
+    child_timed_out = 1;
     kill(forksrv_pid, SIGKILL);
+
   }
-}
 
+}
 
 /* Spin up fork server (instrumented mode only). The idea is explained here:
 
@@ -112,20 +120,18 @@ void handle_timeout(int sig) {
 void init_forkserver(char **argv) {
 
   static struct itimerval it;
-  int st_pipe[2], ctl_pipe[2];
-  int status;
-  s32 rlen;
+  int                     st_pipe[2], ctl_pipe[2];
+  int                     status;
+  s32                     rlen;
 
   ACTF("Spinning up the fork server...");
 
-  if (pipe(st_pipe) || pipe(ctl_pipe))
-    PFATAL("pipe() failed");
+  if (pipe(st_pipe) || pipe(ctl_pipe)) PFATAL("pipe() failed");
 
   child_timed_out = 0;
   forksrv_pid = fork();
 
-  if (forksrv_pid < 0)
-    PFATAL("fork() failed");
+  if (forksrv_pid < 0) PFATAL("fork() failed");
 
   if (!forksrv_pid) {
 
@@ -137,29 +143,33 @@ void init_forkserver(char **argv) {
        soft 128. Let's try to fix that... */
 
     if (!getrlimit(RLIMIT_NOFILE, &r) && r.rlim_cur < FORKSRV_FD + 2) {
+
       r.rlim_cur = FORKSRV_FD + 2;
-      setrlimit(RLIMIT_NOFILE, &r); /* Ignore errors */
+      setrlimit(RLIMIT_NOFILE, &r);                        /* Ignore errors */
+
     }
 
     if (mem_limit) {
+
       r.rlim_max = r.rlim_cur = ((rlim_t)mem_limit) << 20;
 
 #ifdef RLIMIT_AS
-      setrlimit(RLIMIT_AS, &r); /* Ignore errors */
+      setrlimit(RLIMIT_AS, &r);                            /* Ignore errors */
 #else
       /* This takes care of OpenBSD, which doesn't have RLIMIT_AS, but
          according to reliable sources, RLIMIT_DATA covers anonymous
          maps - so we should be getting good protection against OOM bugs. */
 
-      setrlimit(RLIMIT_DATA, &r); /* Ignore errors */
+      setrlimit(RLIMIT_DATA, &r);                          /* Ignore errors */
 #endif /* ^RLIMIT_AS */
+
     }
 
     /* Dumping cores is slow and can lead to anomalies if SIGKILL is delivered
        before the dump is complete. */
 
-//    r.rlim_max = r.rlim_cur = 0;
-//    setrlimit(RLIMIT_CORE, &r); /* Ignore errors */
+    //    r.rlim_max = r.rlim_cur = 0;
+    //    setrlimit(RLIMIT_CORE, &r);                      /* Ignore errors */
 
     /* Isolate the process and configure standard descriptors. If out_file is
        specified, stdin is /dev/null; otherwise, out_fd is cloned instead. */
@@ -167,23 +177,27 @@ void init_forkserver(char **argv) {
     setsid();
 
     if (!getenv("AFL_DEBUG_CHILD_OUTPUT")) {
+
       dup2(dev_null_fd, 1);
       dup2(dev_null_fd, 2);
+
     }
 
     if (out_file) {
+
       dup2(dev_null_fd, 0);
+
     } else {
+
       dup2(out_fd, 0);
       close(out_fd);
+
     }
 
     /* Set up control and status pipes, close the unneeded original fds. */
 
-    if (dup2(ctl_pipe[0], FORKSRV_FD) < 0)
-      PFATAL("dup2() failed");
-    if (dup2(st_pipe[1], FORKSRV_FD + 1) < 0)
-      PFATAL("dup2() failed");
+    if (dup2(ctl_pipe[0], FORKSRV_FD) < 0) PFATAL("dup2() failed");
+    if (dup2(st_pipe[1], FORKSRV_FD + 1) < 0) PFATAL("dup2() failed");
 
     close(ctl_pipe[0]);
     close(ctl_pipe[1]);
@@ -198,8 +212,7 @@ void init_forkserver(char **argv) {
     /* This should improve performance a bit, since it stops the linker from
        doing extra work post-fork(). */
 
-    if (!getenv("LD_BIND_LAZY"))
-      setenv("LD_BIND_NOW", "1", 0);
+    if (!getenv("LD_BIND_LAZY")) setenv("LD_BIND_NOW", "1", 0);
 
     /* Set sane defaults for ASAN if nothing else specified. */
 
@@ -228,6 +241,7 @@ void init_forkserver(char **argv) {
 
     *(u32 *)trace_bits = EXEC_FAIL_SIG;
     exit(0);
+
   }
 
   /* PARENT PROCESS */
@@ -243,8 +257,10 @@ void init_forkserver(char **argv) {
   /* Wait for the fork server to come up, but don't wait too long. */
 
   if (exec_tmout) {
+
     it.it_value.tv_sec = ((exec_tmout * FORK_WAIT_MULT) / 1000);
     it.it_value.tv_usec = ((exec_tmout * FORK_WAIT_MULT) % 1000) * 1000;
+
   }
 
   setitimer(ITIMER_REAL, &it, NULL);
@@ -260,22 +276,24 @@ void init_forkserver(char **argv) {
      Otherwise, try to figure out what went wrong. */
 
   if (rlen == 4) {
+
     OKF("All right - fork server is up.");
     return;
+
   }
 
   if (child_timed_out)
     FATAL("Timeout while initializing fork server (adjusting -t may help)");
 
-  if (waitpid(forksrv_pid, &status, 0) <= 0)
-    PFATAL("waitpid() failed");
+  if (waitpid(forksrv_pid, &status, 0) <= 0) PFATAL("waitpid() failed");
 
   if (WIFSIGNALED(status)) {
 
     if (mem_limit && mem_limit < 500 && uses_asan) {
 
-      SAYF("\n" cLRD "[-] " cRST "Whoops, the target binary crashed suddenly, "
-                                 "before receiving any input\n"
+      SAYF("\n" cLRD "[-] " cRST
+           "Whoops, the target binary crashed suddenly, "
+           "before receiving any input\n"
            "    from the fuzzer! Since it seems to be built with ASAN and you "
            "have a\n"
            "    restrictive memory limit configured, this is expected; please "
@@ -285,8 +303,9 @@ void init_forkserver(char **argv) {
 
     } else if (!mem_limit) {
 
-      SAYF("\n" cLRD "[-] " cRST "Whoops, the target binary crashed suddenly, "
-                                 "before receiving any input\n"
+      SAYF("\n" cLRD "[-] " cRST
+           "Whoops, the target binary crashed suddenly, "
+           "before receiving any input\n"
            "    from the fuzzer! There are several probable explanations:\n\n"
 
            "    - The binary is just buggy and explodes entirely on its own. "
@@ -303,8 +322,9 @@ void init_forkserver(char **argv) {
 
     } else {
 
-      SAYF("\n" cLRD "[-] " cRST "Whoops, the target binary crashed suddenly, "
-                                 "before receiving any input\n"
+      SAYF("\n" cLRD "[-] " cRST
+           "Whoops, the target binary crashed suddenly, "
+           "before receiving any input\n"
            "    from the fuzzer! There are several probable explanations:\n\n"
 
            "    - The current memory limit (%s) is too restrictive, causing "
@@ -315,7 +335,8 @@ void init_forkserver(char **argv) {
            "way confirm\n"
            "      this diagnosis would be:\n\n"
 
-           MSG_ULIMIT_USAGE " /path/to/fuzzed_app )\n\n"
+           MSG_ULIMIT_USAGE
+           " /path/to/fuzzed_app )\n\n"
 
            "      Tip: you can use http://jwilk.net/software/recidivm to "
            "quickly\n"
@@ -334,9 +355,11 @@ void init_forkserver(char **argv) {
            "      fail, poke <afl-users@googlegroups.com> for troubleshooting "
            "tips.\n",
            forkserver_DMS(mem_limit << 20), mem_limit - 1);
+
     }
 
     FATAL("Fork server crashed with signal %d", WTERMSIG(status));
+
   }
 
   if (*(u32 *)trace_bits == EXEC_FAIL_SIG)
@@ -344,8 +367,9 @@ void init_forkserver(char **argv) {
 
   if (mem_limit && mem_limit < 500 && uses_asan) {
 
-    SAYF("\n" cLRD "[-] " cRST "Hmm, looks like the target binary terminated "
-                               "before we could complete a\n"
+    SAYF("\n" cLRD "[-] " cRST
+         "Hmm, looks like the target binary terminated "
+         "before we could complete a\n"
          "    handshake with the injected code. Since it seems to be built "
          "with ASAN and\n"
          "    you have a restrictive memory limit configured, this is "
@@ -355,8 +379,9 @@ void init_forkserver(char **argv) {
 
   } else if (!mem_limit) {
 
-    SAYF("\n" cLRD "[-] " cRST "Hmm, looks like the target binary terminated "
-                               "before we could complete a\n"
+    SAYF("\n" cLRD "[-] " cRST
+         "Hmm, looks like the target binary terminated "
+         "before we could complete a\n"
          "    handshake with the injected code. Perhaps there is a horrible "
          "bug in the\n"
          "    fuzzer. Poke <afl-users@googlegroups.com> for troubleshooting "
@@ -365,8 +390,9 @@ void init_forkserver(char **argv) {
   } else {
 
     SAYF(
-        "\n" cLRD "[-] " cRST "Hmm, looks like the target binary terminated "
-                              "before we could complete a\n"
+        "\n" cLRD "[-] " cRST
+        "Hmm, looks like the target binary terminated "
+        "before we could complete a\n"
         "    handshake with the injected code. There are %s probable "
         "explanations:\n\n"
 
@@ -377,7 +403,8 @@ void init_forkserver(char **argv) {
         "option. A\n"
         "      simple way to confirm the diagnosis may be:\n\n"
 
-        MSG_ULIMIT_USAGE " /path/to/fuzzed_app )\n\n"
+        MSG_ULIMIT_USAGE
+        " /path/to/fuzzed_app )\n\n"
 
         "      Tip: you can use http://jwilk.net/software/recidivm to quickly\n"
         "      estimate the required amount of virtual memory for the "
@@ -394,8 +421,10 @@ void init_forkserver(char **argv) {
               "      reached before the program terminates.\n\n"
             : "",
         forkserver_DMS(mem_limit << 20), mem_limit - 1);
+
   }
 
   FATAL("Fork server handshake failed");
+
 }
 
diff --git a/src/afl-fuzz-bitmap.c b/src/afl-fuzz-bitmap.c
index 1a77dc13..be187fff 100644
--- a/src/afl-fuzz-bitmap.c
+++ b/src/afl-fuzz-bitmap.c
@@ -46,7 +46,6 @@ void write_bitmap(void) {
 
 }
 
-
 /* Read bitmap from file. This is for the -B option again. */
 
 void read_bitmap(u8* fname) {
@@ -61,10 +60,9 @@ void read_bitmap(u8* fname) {
 
 }
 
-
 /* Check if the current execution path brings anything new to the table.
    Update virgin bits to reflect the finds. Returns 1 if the only change is
-   the hit-count for a particular tuple; 2 if there are new tuples seen. 
+   the hit-count for a particular tuple; 2 if there are new tuples seen.
    Updates the map, so subsequent calls will always return 0.
 
    This function is called after every exec() on a fairly large buffer, so
@@ -75,20 +73,20 @@ u8 has_new_bits(u8* virgin_map) {
 #ifdef __x86_64__
 
   u64* current = (u64*)trace_bits;
-  u64* virgin  = (u64*)virgin_map;
+  u64* virgin = (u64*)virgin_map;
 
-  u32  i = (MAP_SIZE >> 3);
+  u32 i = (MAP_SIZE >> 3);
 
 #else
 
   u32* current = (u32*)trace_bits;
-  u32* virgin  = (u32*)virgin_map;
+  u32* virgin = (u32*)virgin_map;
 
-  u32  i = (MAP_SIZE >> 2);
+  u32 i = (MAP_SIZE >> 2);
 
 #endif /* ^__x86_64__ */
 
-  u8   ret = 0;
+  u8 ret = 0;
 
   while (i--) {
 
@@ -111,14 +109,18 @@ u8 has_new_bits(u8* virgin_map) {
         if ((cur[0] && vir[0] == 0xff) || (cur[1] && vir[1] == 0xff) ||
             (cur[2] && vir[2] == 0xff) || (cur[3] && vir[3] == 0xff) ||
             (cur[4] && vir[4] == 0xff) || (cur[5] && vir[5] == 0xff) ||
-            (cur[6] && vir[6] == 0xff) || (cur[7] && vir[7] == 0xff)) ret = 2;
-        else ret = 1;
+            (cur[6] && vir[6] == 0xff) || (cur[7] && vir[7] == 0xff))
+          ret = 2;
+        else
+          ret = 1;
 
 #else
 
         if ((cur[0] && vir[0] == 0xff) || (cur[1] && vir[1] == 0xff) ||
-            (cur[2] && vir[2] == 0xff) || (cur[3] && vir[3] == 0xff)) ret = 2;
-        else ret = 1;
+            (cur[2] && vir[2] == 0xff) || (cur[3] && vir[3] == 0xff))
+          ret = 2;
+        else
+          ret = 1;
 
 #endif /* ^__x86_64__ */
 
@@ -139,14 +141,13 @@ u8 has_new_bits(u8* virgin_map) {
 
 }
 
-
 /* Count the number of bits set in the provided bitmap. Used for the status
    screen several times every second, does not have to be fast. */
 
 u32 count_bits(u8* mem) {
 
   u32* ptr = (u32*)mem;
-  u32  i   = (MAP_SIZE >> 2);
+  u32  i = (MAP_SIZE >> 2);
   u32  ret = 0;
 
   while (i--) {
@@ -157,8 +158,10 @@ u32 count_bits(u8* mem) {
        data. */
 
     if (v == 0xffffffff) {
+
       ret += 32;
       continue;
+
     }
 
     v -= ((v >> 1) & 0x55555555);
@@ -171,8 +174,7 @@ u32 count_bits(u8* mem) {
 
 }
 
-
-#define FF(_b)  (0xff << ((_b) << 3))
+#define FF(_b) (0xff << ((_b) << 3))
 
 /* Count the number of bytes set in the bitmap. Called fairly sporadically,
    mostly to update the status screen or calibrate and examine confirmed
@@ -181,7 +183,7 @@ u32 count_bits(u8* mem) {
 u32 count_bytes(u8* mem) {
 
   u32* ptr = (u32*)mem;
-  u32  i   = (MAP_SIZE >> 2);
+  u32  i = (MAP_SIZE >> 2);
   u32  ret = 0;
 
   while (i--) {
@@ -200,14 +202,13 @@ u32 count_bytes(u8* mem) {
 
 }
 
-
 /* Count the number of non-255 bytes set in the bitmap. Used strictly for the
    status screen, several calls per second or so. */
 
 u32 count_non_255_bytes(u8* mem) {
 
   u32* ptr = (u32*)mem;
-  u32  i   = (MAP_SIZE >> 2);
+  u32  i = (MAP_SIZE >> 2);
   u32  ret = 0;
 
   while (i--) {
@@ -229,16 +230,14 @@ u32 count_non_255_bytes(u8* mem) {
 
 }
 
-
 /* Destructively simplify trace by eliminating hit count information
    and replacing it with 0x80 or 0x01 depending on whether the tuple
    is hit or not. Called on every new crash or timeout, should be
    reasonably fast. */
 
-const u8 simplify_lookup[256] = { 
+const u8 simplify_lookup[256] = {
 
-  [0]         = 1,
-  [1 ... 255] = 128
+    [0] = 1, [1 ... 255] = 128
 
 };
 
@@ -265,7 +264,9 @@ void simplify_trace(u64* mem) {
       mem8[6] = simplify_lookup[mem8[6]];
       mem8[7] = simplify_lookup[mem8[7]];
 
-    } else *mem = 0x0101010101010101ULL;
+    } else
+
+      *mem = 0x0101010101010101ULL;
 
     ++mem;
 
@@ -292,50 +293,49 @@ void simplify_trace(u32* mem) {
       mem8[2] = simplify_lookup[mem8[2]];
       mem8[3] = simplify_lookup[mem8[3]];
 
-    } else *mem = 0x01010101;
+    } else
+
+      *mem = 0x01010101;
 
     ++mem;
+
   }
 
 }
 
 #endif /* ^__x86_64__ */
 
-
 /* Destructively classify execution counts in a trace. This is used as a
    preprocessing step for any newly acquired traces. Called on every exec,
    must be fast. */
 
 static const u8 count_class_lookup8[256] = {
 
-  [0]           = 0,
-  [1]           = 1,
-  [2]           = 2,
-  [3]           = 4,
-  [4 ... 7]     = 8,
-  [8 ... 15]    = 16,
-  [16 ... 31]   = 32,
-  [32 ... 127]  = 64,
-  [128 ... 255] = 128
+    [0] = 0,
+    [1] = 1,
+    [2] = 2,
+    [3] = 4,
+    [4 ... 7] = 8,
+    [8 ... 15] = 16,
+    [16 ... 31] = 32,
+    [32 ... 127] = 64,
+    [128 ... 255] = 128
 
 };
 
 static u16 count_class_lookup16[65536];
 
-
 void init_count_class16(void) {
 
   u32 b1, b2;
 
-  for (b1 = 0; b1 < 256; b1++) 
+  for (b1 = 0; b1 < 256; b1++)
     for (b2 = 0; b2 < 256; b2++)
-      count_class_lookup16[(b1 << 8) + b2] = 
-        (count_class_lookup8[b1] << 8) |
-        count_class_lookup8[b2];
+      count_class_lookup16[(b1 << 8) + b2] =
+          (count_class_lookup8[b1] << 8) | count_class_lookup8[b2];
 
 }
 
-
 #ifdef __x86_64__
 
 void classify_counts(u64* mem) {
@@ -390,7 +390,6 @@ void classify_counts(u32* mem) {
 
 #endif /* ^__x86_64__ */
 
-
 /* Compact trace bytes into a smaller bitmap. We effectively just drop the
    count information here. This is called only sporadically, for some
    new paths. */
@@ -408,7 +407,6 @@ void minimize_bits(u8* dst, u8* src) {
 
 }
 
-
 #ifndef SIMPLE_FILES
 
 /* Construct a file name for a new test case, capturing the operation
@@ -428,8 +426,7 @@ u8* describe_op(u8 hnb) {
 
     sprintf(ret + strlen(ret), ",time:%llu", get_cur_time() - start_time);
 
-    if (splicing_with >= 0)
-      sprintf(ret + strlen(ret), "+%06d", splicing_with);
+    if (splicing_with >= 0) sprintf(ret + strlen(ret), "+%06d", splicing_with);
 
     sprintf(ret + strlen(ret), ",op:%s", stage_short);
 
@@ -438,11 +435,12 @@ u8* describe_op(u8 hnb) {
       sprintf(ret + strlen(ret), ",pos:%d", stage_cur_byte);
 
       if (stage_val_type != STAGE_VAL_NONE)
-        sprintf(ret + strlen(ret), ",val:%s%+d", 
-                (stage_val_type == STAGE_VAL_BE) ? "be:" : "",
-                stage_cur_val);
+        sprintf(ret + strlen(ret), ",val:%s%+d",
+                (stage_val_type == STAGE_VAL_BE) ? "be:" : "", stage_cur_val);
 
-    } else sprintf(ret + strlen(ret), ",rep:%d", stage_cur_val);
+    } else
+
+      sprintf(ret + strlen(ret), ",rep:%d", stage_cur_val);
 
   }
 
@@ -454,13 +452,12 @@ u8* describe_op(u8 hnb) {
 
 #endif /* !SIMPLE_FILES */
 
-
 /* Write a message accompanying the crash directory :-) */
 
 static void write_crash_readme(void) {
 
-  u8* fn = alloc_printf("%s/crashes/README.txt", out_dir);
-  s32 fd;
+  u8*   fn = alloc_printf("%s/crashes/README.txt", out_dir);
+  s32   fd;
   FILE* f;
 
   fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600);
@@ -473,32 +470,38 @@ static void write_crash_readme(void) {
   f = fdopen(fd, "w");
 
   if (!f) {
+
     close(fd);
     return;
+
   }
 
-  fprintf(f, "Command line used to find this crash:\n\n"
+  fprintf(
+      f,
+      "Command line used to find this crash:\n\n"
 
-             "%s\n\n"
+      "%s\n\n"
 
-             "If you can't reproduce a bug outside of afl-fuzz, be sure to set the same\n"
-             "memory limit. The limit used for this fuzzing session was %s.\n\n"
+      "If you can't reproduce a bug outside of afl-fuzz, be sure to set the "
+      "same\n"
+      "memory limit. The limit used for this fuzzing session was %s.\n\n"
 
-             "Need a tool to minimize test cases before investigating the crashes or sending\n"
-             "them to a vendor? Check out the afl-tmin that comes with the fuzzer!\n\n"
+      "Need a tool to minimize test cases before investigating the crashes or "
+      "sending\n"
+      "them to a vendor? Check out the afl-tmin that comes with the fuzzer!\n\n"
 
-             "Found any cool bugs in open-source tools using afl-fuzz? If yes, please drop\n"
-             "an mail at <afl-users@googlegroups.com> once the issues are fixed\n\n"
+      "Found any cool bugs in open-source tools using afl-fuzz? If yes, please "
+      "drop\n"
+      "an mail at <afl-users@googlegroups.com> once the issues are fixed\n\n"
 
-             "  https://github.com/vanhauser-thc/AFLplusplus\n\n",
+      "  https://github.com/vanhauser-thc/AFLplusplus\n\n",
 
-             orig_cmdline, DMS(mem_limit << 20)); /* ignore errors */
+      orig_cmdline, DMS(mem_limit << 20));                 /* ignore errors */
 
   fclose(f);
 
 }
 
-
 /* Check if the result of an execve() during routine fuzzing is interesting,
    save or queue the input test case for further analysis if so. Returns 1 if
    entry is saved, 0 otherwise. */
@@ -507,7 +510,7 @@ u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) {
 
   if (len == 0) return 0;
 
-  u8  *fn = "";
+  u8* fn = "";
   u8  hnb;
   s32 fd;
   u8  keeping = 0, res;
@@ -517,8 +520,8 @@ u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) {
 
   struct queue_entry* q = queue;
   while (q) {
-    if (q->exec_cksum == cksum)
-      q->n_fuzz = q->n_fuzz + 1;
+
+    if (q->exec_cksum == cksum) q->n_fuzz = q->n_fuzz + 1;
 
     q = q->next;
 
@@ -530,9 +533,11 @@ u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) {
        future fuzzing, etc. */
 
     if (!(hnb = has_new_bits(virgin_bits))) {
+
       if (crash_mode) ++total_crashes;
       return 0;
-    }    
+
+    }
 
 #ifndef SIMPLE_FILES
 
@@ -548,8 +553,10 @@ u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) {
     add_to_queue(fn, len, 0);
 
     if (hnb == 2) {
+
       queue_top->has_new_cov = 1;
       ++queued_with_cov;
+
     }
 
     queue_top->exec_cksum = cksum;
@@ -559,8 +566,7 @@ u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) {
 
     res = calibrate_case(argv, queue_top, mem, queue_cycle - 1, 0);
 
-    if (res == FAULT_ERROR)
-      FATAL("Unable to execute target application");
+    if (res == FAULT_ERROR) FATAL("Unable to execute target application");
 
     fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600);
     if (fd < 0) PFATAL("Unable to create '%s'", fn);
@@ -620,13 +626,12 @@ u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) {
 
 #ifndef SIMPLE_FILES
 
-      fn = alloc_printf("%s/hangs/id:%06llu,%s", out_dir,
-                        unique_hangs, describe_op(0));
+      fn = alloc_printf("%s/hangs/id:%06llu,%s", out_dir, unique_hangs,
+                        describe_op(0));
 
 #else
 
-      fn = alloc_printf("%s/hangs/id_%06llu", out_dir,
-                        unique_hangs);
+      fn = alloc_printf("%s/hangs/id_%06llu", out_dir, unique_hangs);
 
 #endif /* ^!SIMPLE_FILES */
 
@@ -638,7 +643,7 @@ u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) {
 
     case FAULT_CRASH:
 
-keep_as_crash:
+    keep_as_crash:
 
       /* This is handled in a manner roughly similar to timeouts,
          except for slightly different limits and no need to re-run test
diff --git a/src/afl-fuzz-extras.c b/src/afl-fuzz-extras.c
index 1f52181d..f43c86f4 100644
--- a/src/afl-fuzz-extras.c
+++ b/src/afl-fuzz-extras.c
@@ -22,32 +22,32 @@
 
 #include "afl-fuzz.h"
 
-
 /* Helper function for load_extras. */
 
 static int compare_extras_len(const void* p1, const void* p2) {
-  struct extra_data *e1 = (struct extra_data*)p1,
-                    *e2 = (struct extra_data*)p2;
+
+  struct extra_data *e1 = (struct extra_data*)p1, *e2 = (struct extra_data*)p2;
 
   return e1->len - e2->len;
+
 }
 
 static int compare_extras_use_d(const void* p1, const void* p2) {
-  struct extra_data *e1 = (struct extra_data*)p1,
-                    *e2 = (struct extra_data*)p2;
+
+  struct extra_data *e1 = (struct extra_data*)p1, *e2 = (struct extra_data*)p2;
 
   return e2->hit_cnt - e1->hit_cnt;
-}
 
+}
 
 /* Read extras from a file, sort by size. */
 
 void load_extras_file(u8* fname, u32* min_len, u32* max_len, u32 dict_level) {
 
   FILE* f;
-  u8  buf[MAX_LINE];
-  u8  *lptr;
-  u32 cur_line = 0;
+  u8    buf[MAX_LINE];
+  u8*   lptr;
+  u32   cur_line = 0;
 
   f = fopen(fname, "r");
 
@@ -62,10 +62,12 @@ void load_extras_file(u8* fname, u32* min_len, u32* max_len, u32 dict_level) {
 
     /* Trim on left and right. */
 
-    while (isspace(*lptr)) ++lptr;
+    while (isspace(*lptr))
+      ++lptr;
 
     rptr = lptr + strlen(lptr) - 1;
-    while (rptr >= lptr && isspace(*rptr)) --rptr;
+    while (rptr >= lptr && isspace(*rptr))
+      --rptr;
     ++rptr;
     *rptr = 0;
 
@@ -84,7 +86,8 @@ void load_extras_file(u8* fname, u32* min_len, u32* max_len, u32 dict_level) {
 
     /* Skip alphanumerics and dashes (label). */
 
-    while (isalnum(*lptr) || *lptr == '_') ++lptr;
+    while (isalnum(*lptr) || *lptr == '_')
+      ++lptr;
 
     /* If @number follows, parse that. */
 
@@ -92,13 +95,15 @@ void load_extras_file(u8* fname, u32* min_len, u32* max_len, u32 dict_level) {
 
       ++lptr;
       if (atoi(lptr) > dict_level) continue;
-      while (isdigit(*lptr)) ++lptr;
+      while (isdigit(*lptr))
+        ++lptr;
 
     }
 
     /* Skip whitespace and = signs. */
 
-    while (isspace(*lptr) || *lptr == '=') ++lptr;
+    while (isspace(*lptr) || *lptr == '=')
+      ++lptr;
 
     /* Consume opening '"'. */
 
@@ -112,8 +117,8 @@ void load_extras_file(u8* fname, u32* min_len, u32* max_len, u32 dict_level) {
     /* Okay, let's allocate memory and copy data between "...", handling
        \xNN escaping, \\, and \". */
 
-    extras = ck_realloc_block(extras, (extras_cnt + 1) *
-               sizeof(struct extra_data));
+    extras =
+        ck_realloc_block(extras, (extras_cnt + 1) * sizeof(struct extra_data));
 
     wptr = extras[extras_cnt].data = ck_alloc(rptr - lptr);
 
@@ -132,27 +137,25 @@ void load_extras_file(u8* fname, u32* min_len, u32* max_len, u32 dict_level) {
           ++lptr;
 
           if (*lptr == '\\' || *lptr == '"') {
+
             *(wptr++) = *(lptr++);
             klen++;
             break;
+
           }
 
           if (*lptr != 'x' || !isxdigit(lptr[1]) || !isxdigit(lptr[2]))
             FATAL("Invalid escaping (not \\xNN) in line %u.", cur_line);
 
-          *(wptr++) =
-            ((strchr(hexdigits, tolower(lptr[1])) - hexdigits) << 4) |
-            (strchr(hexdigits, tolower(lptr[2])) - hexdigits);
+          *(wptr++) = ((strchr(hexdigits, tolower(lptr[1])) - hexdigits) << 4) |
+                      (strchr(hexdigits, tolower(lptr[2])) - hexdigits);
 
           lptr += 3;
           ++klen;
 
           break;
 
-        default:
-
-          *(wptr++) = *(lptr++);
-          ++klen;
+        default: *(wptr++) = *(lptr++); ++klen;
 
       }
 
@@ -161,8 +164,8 @@ void load_extras_file(u8* fname, u32* min_len, u32* max_len, u32 dict_level) {
     extras[extras_cnt].len = klen;
 
     if (extras[extras_cnt].len > MAX_DICT_FILE)
-      FATAL("Keyword too big in line %u (%s, limit is %s)", cur_line,
-            DMS(klen), DMS(MAX_DICT_FILE));
+      FATAL("Keyword too big in line %u (%s, limit is %s)", cur_line, DMS(klen),
+            DMS(MAX_DICT_FILE));
 
     if (*min_len > klen) *min_len = klen;
     if (*max_len < klen) *max_len = klen;
@@ -175,15 +178,14 @@ void load_extras_file(u8* fname, u32* min_len, u32* max_len, u32 dict_level) {
 
 }
 
-
 /* Read extras from the extras directory and sort them by size. */
 
 void load_extras(u8* dir) {
 
-  DIR* d;
+  DIR*           d;
   struct dirent* de;
-  u32 min_len = MAX_DICT_FILE, max_len = 0, dict_level = 0;
-  u8* x;
+  u32            min_len = MAX_DICT_FILE, max_len = 0, dict_level = 0;
+  u8*            x;
 
   /* If the name ends with @, extract level and continue. */
 
@@ -201,8 +203,10 @@ void load_extras(u8* dir) {
   if (!d) {
 
     if (errno == ENOTDIR) {
+
       load_extras_file(dir, &min_len, &max_len, dict_level);
       goto check_and_sort;
+
     }
 
     PFATAL("Unable to open '%s'", dir);
@@ -214,11 +218,10 @@ void load_extras(u8* dir) {
   while ((de = readdir(d))) {
 
     struct stat st;
-    u8* fn = alloc_printf("%s/%s", dir, de->d_name);
-    s32 fd;
+    u8*         fn = alloc_printf("%s/%s", dir, de->d_name);
+    s32         fd;
 
-    if (lstat(fn, &st) || access(fn, R_OK))
-      PFATAL("Unable to access '%s'", fn);
+    if (lstat(fn, &st) || access(fn, R_OK)) PFATAL("Unable to access '%s'", fn);
 
     /* This also takes care of . and .. */
     if (!S_ISREG(st.st_mode) || !st.st_size) {
@@ -229,17 +232,17 @@ void load_extras(u8* dir) {
     }
 
     if (st.st_size > MAX_DICT_FILE)
-      FATAL("Extra '%s' is too big (%s, limit is %s)", fn,
-            DMS(st.st_size), DMS(MAX_DICT_FILE));
+      FATAL("Extra '%s' is too big (%s, limit is %s)", fn, DMS(st.st_size),
+            DMS(MAX_DICT_FILE));
 
     if (min_len > st.st_size) min_len = st.st_size;
     if (max_len < st.st_size) max_len = st.st_size;
 
-    extras = ck_realloc_block(extras, (extras_cnt + 1) *
-               sizeof(struct extra_data));
+    extras =
+        ck_realloc_block(extras, (extras_cnt + 1) * sizeof(struct extra_data));
 
     extras[extras_cnt].data = ck_alloc(st.st_size);
-    extras[extras_cnt].len  = st.st_size;
+    extras[extras_cnt].len = st.st_size;
 
     fd = open(fn, O_RDONLY);
 
@@ -262,8 +265,8 @@ check_and_sort:
 
   qsort(extras, extras_cnt, sizeof(struct extra_data), compare_extras_len);
 
-  OKF("Loaded %u extra tokens, size range %s to %s.", extras_cnt,
-      DMS(min_len), DMS(max_len));
+  OKF("Loaded %u extra tokens, size range %s to %s.", extras_cnt, DMS(min_len),
+      DMS(max_len));
 
   if (max_len > 32)
     WARNF("Some tokens are relatively large (%s) - consider trimming.",
@@ -275,18 +278,16 @@ check_and_sort:
 
 }
 
-
-
 /* Helper function for maybe_add_auto() */
 
 static inline u8 memcmp_nocase(u8* m1, u8* m2, u32 len) {
 
-  while (len--) if (tolower(*(m1++)) ^ tolower(*(m2++))) return 1;
+  while (len--)
+    if (tolower(*(m1++)) ^ tolower(*(m2++))) return 1;
   return 0;
 
 }
 
-
 /* Maybe add automatic extra. */
 
 void maybe_add_auto(u8* mem, u32 len) {
@@ -310,9 +311,10 @@ void maybe_add_auto(u8* mem, u32 len) {
 
     i = sizeof(interesting_16) >> 1;
 
-    while (i--) 
+    while (i--)
       if (*((u16*)mem) == interesting_16[i] ||
-          *((u16*)mem) == SWAP16(interesting_16[i])) return;
+          *((u16*)mem) == SWAP16(interesting_16[i]))
+        return;
 
   }
 
@@ -320,9 +322,10 @@ void maybe_add_auto(u8* mem, u32 len) {
 
     i = sizeof(interesting_32) >> 2;
 
-    while (i--) 
+    while (i--)
       if (*((u32*)mem) == interesting_32[i] ||
-          *((u32*)mem) == SWAP32(interesting_32[i])) return;
+          *((u32*)mem) == SWAP32(interesting_32[i]))
+        return;
 
   }
 
@@ -358,22 +361,21 @@ void maybe_add_auto(u8* mem, u32 len) {
 
   if (a_extras_cnt < MAX_AUTO_EXTRAS) {
 
-    a_extras = ck_realloc_block(a_extras, (a_extras_cnt + 1) *
-                                sizeof(struct extra_data));
+    a_extras = ck_realloc_block(a_extras,
+                                (a_extras_cnt + 1) * sizeof(struct extra_data));
 
     a_extras[a_extras_cnt].data = ck_memdup(mem, len);
-    a_extras[a_extras_cnt].len  = len;
+    a_extras[a_extras_cnt].len = len;
     ++a_extras_cnt;
 
   } else {
 
-    i = MAX_AUTO_EXTRAS / 2 +
-        UR((MAX_AUTO_EXTRAS + 1) / 2);
+    i = MAX_AUTO_EXTRAS / 2 + UR((MAX_AUTO_EXTRAS + 1) / 2);
 
     ck_free(a_extras[i].data);
 
-    a_extras[i].data    = ck_memdup(mem, len);
-    a_extras[i].len     = len;
+    a_extras[i].data = ck_memdup(mem, len);
+    a_extras[i].len = len;
     a_extras[i].hit_cnt = 0;
 
   }
@@ -387,12 +389,11 @@ sort_a_extras:
 
   /* Then, sort the top USE_AUTO_EXTRAS entries by size. */
 
-  qsort(a_extras, MIN(USE_AUTO_EXTRAS, a_extras_cnt),
-        sizeof(struct extra_data), compare_extras_len);
+  qsort(a_extras, MIN(USE_AUTO_EXTRAS, a_extras_cnt), sizeof(struct extra_data),
+        compare_extras_len);
 
 }
 
-
 /* Save automatically generated extras. */
 
 void save_auto(void) {
@@ -420,7 +421,6 @@ void save_auto(void) {
 
 }
 
-
 /* Load automatically generated extras. */
 
 void load_auto(void) {
@@ -458,24 +458,25 @@ void load_auto(void) {
 
   }
 
-  if (i) OKF("Loaded %u auto-discovered dictionary tokens.", i);
-  else OKF("No auto-generated dictionary tokens to reuse.");
+  if (i)
+    OKF("Loaded %u auto-discovered dictionary tokens.", i);
+  else
+    OKF("No auto-generated dictionary tokens to reuse.");
 
 }
 
-
 /* Destroy extras. */
 
 void destroy_extras(void) {
 
   u32 i;
 
-  for (i = 0; i < extras_cnt; ++i) 
+  for (i = 0; i < extras_cnt; ++i)
     ck_free(extras[i].data);
 
   ck_free(extras);
 
-  for (i = 0; i < a_extras_cnt; ++i) 
+  for (i = 0; i < a_extras_cnt; ++i)
     ck_free(a_extras[i].data);
 
   ck_free(a_extras);
diff --git a/src/afl-fuzz-globals.c b/src/afl-fuzz-globals.c
index e28c3099..8fded173 100644
--- a/src/afl-fuzz-globals.c
+++ b/src/afl-fuzz-globals.c
@@ -25,27 +25,13 @@
 /* MOpt:
    Lots of globals, but mostly for the status UI and other things where it
    really makes no sense to haul them around as function parameters. */
-u64 limit_time_puppet,
-    orig_hit_cnt_puppet,
-    last_limit_time_start,
-    tmp_pilot_time,
-    total_pacemaker_time,
-    total_puppet_find,
-    temp_puppet_find,
-    most_time_key,
-    most_time,
-    most_execs_key,
-    most_execs,
-    old_hit_count;
-
-s32 SPLICE_CYCLES_puppet,
-    limit_time_sig,
-    key_puppet,
-    key_module;
-
-double w_init = 0.9,
-      w_end = 0.3,
-      w_now;
+u64 limit_time_puppet, orig_hit_cnt_puppet, last_limit_time_start,
+    tmp_pilot_time, total_pacemaker_time, total_puppet_find, temp_puppet_find,
+    most_time_key, most_time, most_execs_key, most_execs, old_hit_count;
+
+s32 SPLICE_CYCLES_puppet, limit_time_sig, key_puppet, key_module;
+
+double w_init = 0.9, w_end = 0.3, w_now;
 
 s32 g_now;
 s32 g_max = 5000;
@@ -53,15 +39,13 @@ s32 g_max = 5000;
 u64 tmp_core_time;
 s32 swarm_now;
 
-double x_now[swarm_num][operator_num],
-       L_best[swarm_num][operator_num],
-       eff_best[swarm_num][operator_num],
-       G_best[operator_num],
-       v_now[swarm_num][operator_num],
-       probability_now[swarm_num][operator_num],
-       swarm_fitness[swarm_num];
+double x_now[swarm_num][operator_num], L_best[swarm_num][operator_num],
+    eff_best[swarm_num][operator_num], G_best[operator_num],
+    v_now[swarm_num][operator_num], probability_now[swarm_num][operator_num],
+    swarm_fitness[swarm_num];
 
-u64 stage_finds_puppet[swarm_num][operator_num],           /* Patterns found per fuzz stage    */
+u64 stage_finds_puppet[swarm_num]
+                      [operator_num],   /* Patterns found per fuzz stage    */
     stage_finds_puppet_v2[swarm_num][operator_num],
     stage_cycles_puppet_v2[swarm_num][operator_num],
     stage_cycles_puppet_v3[swarm_num][operator_num],
@@ -71,207 +55,197 @@ u64 stage_finds_puppet[swarm_num][operator_num],           /* Patterns found per
     core_operator_finds_puppet_v2[operator_num],
     core_operator_cycles_puppet[operator_num],
     core_operator_cycles_puppet_v2[operator_num],
-    core_operator_cycles_puppet_v3[operator_num];          /* Execs per fuzz stage             */
+    core_operator_cycles_puppet_v3[operator_num];   /* Execs per fuzz stage */
 
 double period_pilot_tmp = 5000.0;
-s32 key_lv;
-
-u8 *in_dir,                    /* Input directory with test cases  */
-    *out_dir,                   /* Working & output directory       */
-    *tmp_dir       ,            /* Temporary directory for input    */
-    *sync_dir,                  /* Synchronization directory        */
-    *sync_id,                   /* Fuzzer ID                        */
-    *power_name,                /* Power schedule name              */
-    *use_banner,                /* Display banner                   */
-    *in_bitmap,                 /* Input bitmap                     */
-    *file_extension,            /* File extension                   */
-    *orig_cmdline;              /* Original command line            */
-u8  *doc_path,                  /* Path to documentation dir        */
-    *target_path,               /* Path to target binary            */
-    *out_file;                  /* File to fuzz, if any             */
-
-u32 exec_tmout = EXEC_TIMEOUT; /* Configurable exec timeout (ms)   */
-u32 hang_tmout = EXEC_TIMEOUT; /* Timeout used for hang det (ms)   */
-
-u64 mem_limit  = MEM_LIMIT;    /* Memory cap for child (MB)        */
-
-u8  cal_cycles = CAL_CYCLES,   /* Calibration cycles defaults      */
-    cal_cycles_long = CAL_CYCLES_LONG,
-    debug,                     /* Debug mode                       */
-    python_only;               /* Python-only mode                 */
-
-u32 stats_update_freq = 1;     /* Stats update frequency (execs)   */
-
-char *power_names[POWER_SCHEDULES_NUM] = {
-  "explore",
-  "fast",
-  "coe",
-  "lin",
-  "quad",
-  "exploit"
-};
-
-u8 schedule = EXPLORE;         /* Power schedule (default: EXPLORE)*/
+s32    key_lv;
+
+u8 *in_dir,                             /* Input directory with test cases  */
+    *out_dir,                           /* Working & output directory       */
+    *tmp_dir,                           /* Temporary directory for input    */
+    *sync_dir,                          /* Synchronization directory        */
+    *sync_id,                           /* Fuzzer ID                        */
+    *power_name,                        /* Power schedule name              */
+    *use_banner,                        /* Display banner                   */
+    *in_bitmap,                         /* Input bitmap                     */
+    *file_extension,                    /* File extension                   */
+    *orig_cmdline;                      /* Original command line            */
+u8 *doc_path,                           /* Path to documentation dir        */
+    *target_path,                       /* Path to target binary            */
+    *out_file;                          /* File to fuzz, if any             */
+
+u32 exec_tmout = EXEC_TIMEOUT;          /* Configurable exec timeout (ms)   */
+u32 hang_tmout = EXEC_TIMEOUT;          /* Timeout used for hang det (ms)   */
+
+u64 mem_limit = MEM_LIMIT;              /* Memory cap for child (MB)        */
+
+u8 cal_cycles = CAL_CYCLES,             /* Calibration cycles defaults      */
+    cal_cycles_long = CAL_CYCLES_LONG, debug,                 /* Debug mode */
+    python_only;                        /* Python-only mode                 */
+
+u32 stats_update_freq = 1;              /* Stats update frequency (execs)   */
+
+char *power_names[POWER_SCHEDULES_NUM] = {"explore", "fast", "coe",
+                                          "lin",     "quad", "exploit"};
+
+u8 schedule = EXPLORE;                  /* Power schedule (default: EXPLORE)*/
 u8 havoc_max_mult = HAVOC_MAX_MULT;
 
-u8  skip_deterministic,        /* Skip deterministic stages?       */
-    force_deterministic,       /* Force deterministic stages?      */
-    use_splicing,              /* Recombine input files?           */
-    dumb_mode,                 /* Run in non-instrumented mode?    */
-    score_changed,             /* Scoring for favorites changed?   */
-    kill_signal,               /* Signal that killed the child     */
-    resuming_fuzz,             /* Resuming an older fuzzing job?   */
-    timeout_given,             /* Specific timeout given?          */
-    not_on_tty,                /* stdout is not a tty              */
-    term_too_small,            /* terminal dimensions too small    */
-    no_forkserver,             /* Disable forkserver?              */
-    crash_mode,                /* Crash mode! Yeah!                */
-    in_place_resume,           /* Attempt in-place resume?         */
-    auto_changed,              /* Auto-generated tokens changed?   */
-    no_cpu_meter_red,          /* Feng shui on the status screen   */
-    no_arith,                  /* Skip most arithmetic ops         */
-    shuffle_queue,             /* Shuffle input queue?             */
-    bitmap_changed = 1,        /* Time to update bitmap?           */
-    qemu_mode,                 /* Running in QEMU mode?            */
-    unicorn_mode,              /* Running in Unicorn mode?         */
-    skip_requested,            /* Skip request, via SIGUSR1        */
-    run_over10m,               /* Run time over 10 minutes?        */
-    persistent_mode,           /* Running in persistent mode?      */
-    deferred_mode,             /* Deferred forkserver mode?        */
-    fixed_seed,                /* do not reseed                    */
-    fast_cal,                  /* Try to calibrate faster?         */
-    uses_asan;                 /* Target uses ASAN?                */
-
-s32 out_fd,                    /* Persistent fd for out_file       */
+u8 skip_deterministic,                  /* Skip deterministic stages?       */
+    force_deterministic,                /* Force deterministic stages?      */
+    use_splicing,                       /* Recombine input files?           */
+    dumb_mode,                          /* Run in non-instrumented mode?    */
+    score_changed,                      /* Scoring for favorites changed?   */
+    kill_signal,                        /* Signal that killed the child     */
+    resuming_fuzz,                      /* Resuming an older fuzzing job?   */
+    timeout_given,                      /* Specific timeout given?          */
+    not_on_tty,                         /* stdout is not a tty              */
+    term_too_small,                     /* terminal dimensions too small    */
+    no_forkserver,                      /* Disable forkserver?              */
+    crash_mode,                         /* Crash mode! Yeah!                */
+    in_place_resume,                    /* Attempt in-place resume?         */
+    auto_changed,                       /* Auto-generated tokens changed?   */
+    no_cpu_meter_red,                   /* Feng shui on the status screen   */
+    no_arith,                           /* Skip most arithmetic ops         */
+    shuffle_queue,                      /* Shuffle input queue?             */
+    bitmap_changed = 1,                 /* Time to update bitmap?           */
+    qemu_mode,                          /* Running in QEMU mode?            */
+    unicorn_mode,                       /* Running in Unicorn mode?         */
+    skip_requested,                     /* Skip request, via SIGUSR1        */
+    run_over10m,                        /* Run time over 10 minutes?        */
+    persistent_mode,                    /* Running in persistent mode?      */
+    deferred_mode,                      /* Deferred forkserver mode?        */
+    fixed_seed,                         /* do not reseed                    */
+    fast_cal,                           /* Try to calibrate faster?         */
+    uses_asan;                          /* Target uses ASAN?                */
+
+s32 out_fd,                             /* Persistent fd for out_file       */
 #ifndef HAVE_ARC4RANDOM
-           dev_urandom_fd = -1,       /* Persistent fd for /dev/urandom   */
+    dev_urandom_fd = -1,                /* Persistent fd for /dev/urandom   */
 #endif
-           dev_null_fd = -1,          /* Persistent fd for /dev/null      */
-           fsrv_ctl_fd,               /* Fork server control pipe (write) */
-           fsrv_st_fd;                /* Fork server status pipe (read)   */
-
-       s32 forksrv_pid,               /* PID of the fork server           */
-           child_pid = -1,            /* PID of the fuzzed program        */
-           out_dir_fd = -1;           /* FD of the lock file              */
-
-       u8* trace_bits;                /* SHM with instrumentation bitmap  */
-
-u8  virgin_bits[MAP_SIZE],     /* Regions yet untouched by fuzzing */
-           virgin_tmout[MAP_SIZE],    /* Bits we haven't seen in tmouts   */
-           virgin_crash[MAP_SIZE];    /* Bits we haven't seen in crashes  */
-
-u8  var_bytes[MAP_SIZE];       /* Bytes that appear to be variable */
-
-volatile u8 stop_soon,         /* Ctrl-C pressed?                  */
-            clear_screen = 1,  /* Window resized?                  */
-            child_timed_out;   /* Traced process timed out?        */
-
-u32 queued_paths,              /* Total number of queued testcases */
-    queued_variable,           /* Testcases with variable behavior */
-    queued_at_start,           /* Total number of initial inputs   */
-    queued_discovered,         /* Items discovered during this run */
-    queued_imported,           /* Items imported via -S            */
-    queued_favored,            /* Paths deemed favorable           */
-    queued_with_cov,           /* Paths with new coverage bytes    */
-    pending_not_fuzzed,        /* Queued but not done yet          */
-    pending_favored,           /* Pending favored paths            */
-    cur_skipped_paths,         /* Abandoned inputs in cur cycle    */
-    cur_depth,                 /* Current path depth               */
-    max_depth,                 /* Max path depth                   */
-    useless_at_start,          /* Number of useless starting paths */
-    var_byte_count,            /* Bitmap bytes with var behavior   */
-    current_entry,             /* Current queue entry ID           */
-    havoc_div = 1;             /* Cycle count divisor for havoc    */
-
-u64 total_crashes,             /* Total number of crashes          */
-    unique_crashes,            /* Crashes with unique signatures   */
-    total_tmouts,              /* Total number of timeouts         */
-    unique_tmouts,             /* Timeouts with unique signatures  */
-    unique_hangs,              /* Hangs with unique signatures     */
-    total_execs,               /* Total execve() calls             */
-    slowest_exec_ms,           /* Slowest testcase non hang in ms  */
-    start_time,                /* Unix start time (ms)             */
-    last_path_time,            /* Time for most recent path (ms)   */
-    last_crash_time,           /* Time for most recent crash (ms)  */
-    last_hang_time,            /* Time for most recent hang (ms)   */
-    last_crash_execs,          /* Exec counter at last crash       */
-    queue_cycle,               /* Queue round counter              */
-    cycles_wo_finds,           /* Cycles without any new paths     */
-    trim_execs,                /* Execs done to trim input files   */
-    bytes_trim_in,             /* Bytes coming into the trimmer    */
-    bytes_trim_out,            /* Bytes coming outa the trimmer    */
-    blocks_eff_total,          /* Blocks subject to effector maps  */
-    blocks_eff_select;         /* Blocks selected as fuzzable      */
-
-u32 subseq_tmouts;             /* Number of timeouts in a row      */
-
-u8 *stage_name = "init",       /* Name of the current fuzz stage   */
-          *stage_short,               /* Short stage name                 */
-          *syncing_party;             /* Currently syncing with...        */
-
-s32 stage_cur, stage_max;      /* Stage progression                */
-s32 splicing_with = -1;        /* Splicing with which test case?   */
-
-u32 master_id, master_max;     /* Master instance job splitting    */
-
-u32 syncing_case;              /* Syncing with case #...           */
-
-s32 stage_cur_byte,            /* Byte offset of current stage op  */
-           stage_cur_val;             /* Value used for stage op          */
-
-u8  stage_val_type;            /* Value type (STAGE_VAL_*)         */
-
-u64 stage_finds[32],           /* Patterns found per fuzz stage    */
-           stage_cycles[32];          /* Execs per fuzz stage             */
+    dev_null_fd = -1,                   /* Persistent fd for /dev/null      */
+    fsrv_ctl_fd,                        /* Fork server control pipe (write) */
+    fsrv_st_fd;                         /* Fork server status pipe (read)   */
+
+s32 forksrv_pid,                        /* PID of the fork server           */
+    child_pid = -1,                     /* PID of the fuzzed program        */
+    out_dir_fd = -1;                    /* FD of the lock file              */
+
+u8 *trace_bits;                         /* SHM with instrumentation bitmap  */
+
+u8 virgin_bits[MAP_SIZE],               /* Regions yet untouched by fuzzing */
+    virgin_tmout[MAP_SIZE],             /* Bits we haven't seen in tmouts   */
+    virgin_crash[MAP_SIZE];             /* Bits we haven't seen in crashes  */
+
+u8 var_bytes[MAP_SIZE];                 /* Bytes that appear to be variable */
+
+volatile u8 stop_soon,                  /* Ctrl-C pressed?                  */
+    clear_screen = 1,                   /* Window resized?                  */
+    child_timed_out;                    /* Traced process timed out?        */
+
+u32 queued_paths,                       /* Total number of queued testcases */
+    queued_variable,                    /* Testcases with variable behavior */
+    queued_at_start,                    /* Total number of initial inputs   */
+    queued_discovered,                  /* Items discovered during this run */
+    queued_imported,                    /* Items imported via -S            */
+    queued_favored,                     /* Paths deemed favorable           */
+    queued_with_cov,                    /* Paths with new coverage bytes    */
+    pending_not_fuzzed,                 /* Queued but not done yet          */
+    pending_favored,                    /* Pending favored paths            */
+    cur_skipped_paths,                  /* Abandoned inputs in cur cycle    */
+    cur_depth,                          /* Current path depth               */
+    max_depth,                          /* Max path depth                   */
+    useless_at_start,                   /* Number of useless starting paths */
+    var_byte_count,                     /* Bitmap bytes with var behavior   */
+    current_entry,                      /* Current queue entry ID           */
+    havoc_div = 1;                      /* Cycle count divisor for havoc    */
+
+u64 total_crashes,                      /* Total number of crashes          */
+    unique_crashes,                     /* Crashes with unique signatures   */
+    total_tmouts,                       /* Total number of timeouts         */
+    unique_tmouts,                      /* Timeouts with unique signatures  */
+    unique_hangs,                       /* Hangs with unique signatures     */
+    total_execs,                        /* Total execve() calls             */
+    slowest_exec_ms,                    /* Slowest testcase non hang in ms  */
+    start_time,                         /* Unix start time (ms)             */
+    last_path_time,                     /* Time for most recent path (ms)   */
+    last_crash_time,                    /* Time for most recent crash (ms)  */
+    last_hang_time,                     /* Time for most recent hang (ms)   */
+    last_crash_execs,                   /* Exec counter at last crash       */
+    queue_cycle,                        /* Queue round counter              */
+    cycles_wo_finds,                    /* Cycles without any new paths     */
+    trim_execs,                         /* Execs done to trim input files   */
+    bytes_trim_in,                      /* Bytes coming into the trimmer    */
+    bytes_trim_out,                     /* Bytes coming outa the trimmer    */
+    blocks_eff_total,                   /* Blocks subject to effector maps  */
+    blocks_eff_select;                  /* Blocks selected as fuzzable      */
+
+u32 subseq_tmouts;                      /* Number of timeouts in a row      */
+
+u8 *stage_name = "init",                /* Name of the current fuzz stage   */
+    *stage_short,                       /* Short stage name                 */
+    *syncing_party;                     /* Currently syncing with...        */
+
+s32 stage_cur, stage_max;               /* Stage progression                */
+s32 splicing_with = -1;                 /* Splicing with which test case?   */
+
+u32 master_id, master_max;              /* Master instance job splitting    */
+
+u32 syncing_case;                       /* Syncing with case #...           */
+
+s32 stage_cur_byte,                     /* Byte offset of current stage op  */
+    stage_cur_val;                      /* Value used for stage op          */
+
+u8 stage_val_type;                      /* Value type (STAGE_VAL_*)         */
+
+u64 stage_finds[32],                    /* Patterns found per fuzz stage    */
+    stage_cycles[32];                   /* Execs per fuzz stage             */
 
 #ifndef HAVE_ARC4RANDOM
-u32 rand_cnt;                  /* Random number counter            */
+u32 rand_cnt;                           /* Random number counter            */
 #endif
 
-u64 total_cal_us,              /* Total calibration time (us)      */
-           total_cal_cycles;          /* Total calibration cycles         */
+u64 total_cal_us,                       /* Total calibration time (us)      */
+    total_cal_cycles;                   /* Total calibration cycles         */
 
-u64 total_bitmap_size,         /* Total bit count for all bitmaps  */
-           total_bitmap_entries;      /* Number of bitmaps counted        */
+u64 total_bitmap_size,                  /* Total bit count for all bitmaps  */
+    total_bitmap_entries;               /* Number of bitmaps counted        */
 
-s32 cpu_core_count;            /* CPU core count                   */
+s32 cpu_core_count;                     /* CPU core count                   */
 
 #ifdef HAVE_AFFINITY
 
-s32 cpu_aff = -1;       	      /* Selected CPU core                */
+s32 cpu_aff = -1;                       /* Selected CPU core                */
 
 #endif /* HAVE_AFFINITY */
 
-FILE* plot_file;               /* Gnuplot output file              */
+FILE *plot_file;                        /* Gnuplot output file              */
 
+struct queue_entry *queue,              /* Fuzzing queue (linked list)      */
+    *queue_cur,                         /* Current offset within the queue  */
+    *queue_top,                         /* Top of the list                  */
+    *q_prev100;                         /* Previous 100 marker              */
 
+struct queue_entry *top_rated[MAP_SIZE]; /* Top entries for bitmap bytes     */
 
-struct queue_entry *queue,     /* Fuzzing queue (linked list)      */
-                          *queue_cur, /* Current offset within the queue  */
-                          *queue_top, /* Top of the list                  */
-                          *q_prev100; /* Previous 100 marker              */
+struct extra_data *extras;              /* Extra tokens to fuzz with        */
+u32                extras_cnt;          /* Total number of tokens read      */
 
-struct queue_entry*
-  top_rated[MAP_SIZE];                /* Top entries for bitmap bytes     */
+struct extra_data *a_extras;            /* Automatically selected extras    */
+u32                a_extras_cnt;        /* Total number of tokens available */
 
-struct extra_data* extras;     /* Extra tokens to fuzz with        */
-u32 extras_cnt;                /* Total number of tokens read      */
-
-struct extra_data* a_extras;   /* Automatically selected extras    */
-u32 a_extras_cnt;              /* Total number of tokens available */
-
-u8* (*post_handler)(u8* buf, u32* len);
+u8 *(*post_handler)(u8 *buf, u32 *len);
 
 /* hooks for the custom mutator function */
-size_t (*custom_mutator)(u8 *data, size_t size, u8* mutated_out, size_t max_size, unsigned int seed);
+size_t (*custom_mutator)(u8 *data, size_t size, u8 *mutated_out,
+                         size_t max_size, unsigned int seed);
 size_t (*pre_save_handler)(u8 *data, size_t size, u8 **new_data);
 
-
 /* Interesting values, as per config.h */
 
-s8  interesting_8[]  = { INTERESTING_8 };
-s16 interesting_16[] = { INTERESTING_8, INTERESTING_16 };
-s32 interesting_32[] = { INTERESTING_8, INTERESTING_16, INTERESTING_32 };
+s8  interesting_8[] = {INTERESTING_8};
+s16 interesting_16[] = {INTERESTING_8, INTERESTING_16};
+s32 interesting_32[] = {INTERESTING_8, INTERESTING_16, INTERESTING_32};
 
 /* Python stuff */
 #ifdef USE_PYTHON
diff --git a/src/afl-fuzz-init.c b/src/afl-fuzz-init.c
index f66db74c..8a3ee6fa 100644
--- a/src/afl-fuzz-init.c
+++ b/src/afl-fuzz-init.c
@@ -22,7 +22,6 @@
 
 #include "afl-fuzz.h"
 
-
 #ifdef HAVE_AFFINITY
 
 /* Build a list of processes bound to specific cores. Returns -1 if nothing
@@ -30,11 +29,11 @@
 
 void bind_to_free_cpu(void) {
 
-  DIR* d;
+  DIR*           d;
   struct dirent* de;
-  cpu_set_t c;
+  cpu_set_t      c;
 
-  u8 cpu_used[4096] = { 0 };
+  u8  cpu_used[4096] = {0};
   u32 i;
 
   if (cpu_core_count < 2) return;
@@ -69,18 +68,20 @@ void bind_to_free_cpu(void) {
 
   while ((de = readdir(d))) {
 
-    u8* fn;
+    u8*   fn;
     FILE* f;
-    u8 tmp[MAX_LINE];
-    u8 has_vmsize = 0;
+    u8    tmp[MAX_LINE];
+    u8    has_vmsize = 0;
 
     if (!isdigit(de->d_name[0])) continue;
 
     fn = alloc_printf("/proc/%s/status", de->d_name);
 
     if (!(f = fopen(fn, "r"))) {
+
       ck_free(fn);
       continue;
+
     }
 
     while (fgets(tmp, MAX_LINE, f)) {
@@ -91,10 +92,9 @@ void bind_to_free_cpu(void) {
 
       if (!strncmp(tmp, "VmSize:\t", 8)) has_vmsize = 1;
 
-      if (!strncmp(tmp, "Cpus_allowed_list:\t", 19) &&
-          !strchr(tmp, '-') && !strchr(tmp, ',') &&
-          sscanf(tmp + 19, "%u", &hval) == 1 && hval < sizeof(cpu_used) &&
-          has_vmsize) {
+      if (!strncmp(tmp, "Cpus_allowed_list:\t", 19) && !strchr(tmp, '-') &&
+          !strchr(tmp, ',') && sscanf(tmp + 19, "%u", &hval) == 1 &&
+          hval < sizeof(cpu_used) && has_vmsize) {
 
         cpu_used[hval] = 1;
         break;
@@ -110,14 +110,17 @@ void bind_to_free_cpu(void) {
 
   closedir(d);
 
-  for (i = 0; i < cpu_core_count; ++i) if (!cpu_used[i]) break;
+  for (i = 0; i < cpu_core_count; ++i)
+    if (!cpu_used[i]) break;
 
   if (i == cpu_core_count) {
 
     SAYF("\n" cLRD "[-] " cRST
          "Uh-oh, looks like all %d CPU cores on your system are allocated to\n"
-         "    other instances of afl-fuzz (or similar CPU-locked tasks). Starting\n"
-         "    another fuzzer on this machine is probably a bad plan, but if you are\n"
+         "    other instances of afl-fuzz (or similar CPU-locked tasks). "
+         "Starting\n"
+         "    another fuzzer on this machine is probably a bad plan, but if "
+         "you are\n"
          "    absolutely sure, you can set AFL_NO_AFFINITY and try again.\n",
          cpu_core_count);
 
@@ -132,8 +135,7 @@ void bind_to_free_cpu(void) {
   CPU_ZERO(&c);
   CPU_SET(i, &c);
 
-  if (sched_setaffinity(0, sizeof(c), &c))
-    PFATAL("sched_setaffinity failed");
+  if (sched_setaffinity(0, sizeof(c), &c)) PFATAL("sched_setaffinity failed");
 
 }
 
@@ -144,8 +146,8 @@ void bind_to_free_cpu(void) {
 void setup_post(void) {
 
   void* dh;
-  u8* fn = getenv("AFL_POST_LIBRARY");
-  u32 tlen = 6;
+  u8*   fn = getenv("AFL_POST_LIBRARY");
+  u32   tlen = 6;
 
   if (!fn) return;
 
@@ -166,8 +168,9 @@ void setup_post(void) {
 }
 
 void setup_custom_mutator(void) {
+
   void* dh;
-  u8* fn = getenv("AFL_CUSTOM_MUTATOR_LIBRARY");
+  u8*   fn = getenv("AFL_CUSTOM_MUTATOR_LIBRARY");
 
   if (!fn) return;
 
@@ -180,11 +183,11 @@ void setup_custom_mutator(void) {
   if (!custom_mutator) FATAL("Symbol 'afl_custom_mutator' not found.");
 
   pre_save_handler = dlsym(dh, "afl_pre_save_handler");
-//  if (!pre_save_handler) WARNF("Symbol 'afl_pre_save_handler' not found.");
+  //  if (!pre_save_handler) WARNF("Symbol 'afl_pre_save_handler' not found.");
 
   OKF("Custom mutator installed successfully.");
-}
 
+}
 
 /* Shuffle an array of pointers. Might be slightly biased. */
 
@@ -194,8 +197,8 @@ static void shuffle_ptrs(void** ptrs, u32 cnt) {
 
   for (i = 0; i < cnt - 2; ++i) {
 
-    u32 j = i + UR(cnt - i);
-    void *s = ptrs[i];
+    u32   j = i + UR(cnt - i);
+    void* s = ptrs[i];
     ptrs[i] = ptrs[j];
     ptrs[j] = s;
 
@@ -208,15 +211,18 @@ static void shuffle_ptrs(void** ptrs, u32 cnt) {
 
 void read_testcases(void) {
 
-  struct dirent **nl;
-  s32 nl_cnt;
-  u32 i;
-  u8* fn1;
+  struct dirent** nl;
+  s32             nl_cnt;
+  u32             i;
+  u8*             fn1;
 
   /* Auto-detect non-in-place resumption attempts. */
 
   fn1 = alloc_printf("%s/queue", in_dir);
-  if (!access(fn1, F_OK)) in_dir = fn1; else ck_free(fn1);
+  if (!access(fn1, F_OK))
+    in_dir = fn1;
+  else
+    ck_free(fn1);
 
   ACTF("Scanning '%s'...", in_dir);
 
@@ -231,9 +237,12 @@ void read_testcases(void) {
     if (errno == ENOENT || errno == ENOTDIR)
 
       SAYF("\n" cLRD "[-] " cRST
-           "The input directory does not seem to be valid - try again. The fuzzer needs\n"
-           "    one or more test case to start with - ideally, a small file under 1 kB\n"
-           "    or so. The cases must be stored as regular files directly in the input\n"
+           "The input directory does not seem to be valid - try again. The "
+           "fuzzer needs\n"
+           "    one or more test case to start with - ideally, a small file "
+           "under 1 kB\n"
+           "    or so. The cases must be stored as regular files directly in "
+           "the input\n"
            "    directory.\n");
 
     PFATAL("Unable to open '%s'", in_dir);
@@ -252,12 +261,13 @@ void read_testcases(void) {
     struct stat st;
 
     u8* fn2 = alloc_printf("%s/%s", in_dir, nl[i]->d_name);
-    u8* dfn = alloc_printf("%s/.state/deterministic_done/%s", in_dir, nl[i]->d_name);
+    u8* dfn =
+        alloc_printf("%s/.state/deterministic_done/%s", in_dir, nl[i]->d_name);
+
+    u8 passed_det = 0;
 
-    u8  passed_det = 0;
+    free(nl[i]);                                             /* not tracked */
 
-    free(nl[i]); /* not tracked */
- 
     if (lstat(fn2, &st) || access(fn2, R_OK))
       PFATAL("Unable to access '%s'", fn2);
 
@@ -271,9 +281,9 @@ void read_testcases(void) {
 
     }
 
-    if (st.st_size > MAX_FILE) 
-      FATAL("Test case '%s' is too big (%s, limit is %s)", fn2,
-            DMS(st.st_size), DMS(MAX_FILE));
+    if (st.st_size > MAX_FILE)
+      FATAL("Test case '%s' is too big (%s, limit is %s)", fn2, DMS(st.st_size),
+            DMS(MAX_FILE));
 
     /* Check for metadata that indicates that deterministic fuzzing
        is complete for this entry. We don't want to repeat deterministic
@@ -287,14 +297,17 @@ void read_testcases(void) {
 
   }
 
-  free(nl); /* not tracked */
+  free(nl);                                                  /* not tracked */
 
   if (!queued_paths) {
 
     SAYF("\n" cLRD "[-] " cRST
-         "Looks like there are no valid test cases in the input directory! The fuzzer\n"
-         "    needs one or more test case to start with - ideally, a small file under\n"
-         "    1 kB or so. The cases must be stored as regular files directly in the\n"
+         "Looks like there are no valid test cases in the input directory! The "
+         "fuzzer\n"
+         "    needs one or more test case to start with - ideally, a small "
+         "file under\n"
+         "    1 kB or so. The cases must be stored as regular files directly "
+         "in the\n"
          "    input directory.\n");
 
     FATAL("No usable test cases in '%s'", in_dir);
@@ -306,7 +319,6 @@ void read_testcases(void) {
 
 }
 
-
 /* Examine map coverage. Called once, for first test case. */
 
 static void check_map_coverage(void) {
@@ -322,15 +334,14 @@ static void check_map_coverage(void) {
 
 }
 
-
 /* Perform dry run of all test cases to confirm that the app is working as
    expected. This is done only for the initial inputs, and only once. */
 
 void perform_dry_run(char** argv) {
 
   struct queue_entry* q = queue;
-  u32 cal_failures = 0;
-  u8* skip_crashes = getenv("AFL_SKIP_CRASHES");
+  u32                 cal_failures = 0;
+  u8*                 skip_crashes = getenv("AFL_SKIP_CRASHES");
 
   while (q) {
 
@@ -358,7 +369,7 @@ void perform_dry_run(char** argv) {
     if (stop_soon) return;
 
     if (res == crash_mode || res == FAULT_NOBITS)
-      SAYF(cGRA "    len = %u, map size = %u, exec speed = %llu us\n" cRST, 
+      SAYF(cGRA "    len = %u, map size = %u, exec speed = %llu us\n" cRST,
            q->len, q->bitmap_size, q->exec_us);
 
     switch (res) {
@@ -380,90 +391,119 @@ void perform_dry_run(char** argv) {
              out. */
 
           if (timeout_given > 1) {
+
             WARNF("Test case results in a timeout (skipping)");
             q->cal_failed = CAL_CHANCES;
             ++cal_failures;
             break;
+
           }
 
           SAYF("\n" cLRD "[-] " cRST
-               "The program took more than %u ms to process one of the initial test cases.\n"
-               "    Usually, the right thing to do is to relax the -t option - or to delete it\n"
-               "    altogether and allow the fuzzer to auto-calibrate. That said, if you know\n"
-               "    what you are doing and want to simply skip the unruly test cases, append\n"
-               "    '+' at the end of the value passed to -t ('-t %u+').\n", exec_tmout,
-               exec_tmout);
+               "The program took more than %u ms to process one of the initial "
+               "test cases.\n"
+               "    Usually, the right thing to do is to relax the -t option - "
+               "or to delete it\n"
+               "    altogether and allow the fuzzer to auto-calibrate. That "
+               "said, if you know\n"
+               "    what you are doing and want to simply skip the unruly test "
+               "cases, append\n"
+               "    '+' at the end of the value passed to -t ('-t %u+').\n",
+               exec_tmout, exec_tmout);
 
           FATAL("Test case '%s' results in a timeout", fn);
 
         } else {
 
           SAYF("\n" cLRD "[-] " cRST
-               "The program took more than %u ms to process one of the initial test cases.\n"
-               "    This is bad news; raising the limit with the -t option is possible, but\n"
+               "The program took more than %u ms to process one of the initial "
+               "test cases.\n"
+               "    This is bad news; raising the limit with the -t option is "
+               "possible, but\n"
                "    will probably make the fuzzing process extremely slow.\n\n"
 
-               "    If this test case is just a fluke, the other option is to just avoid it\n"
-               "    altogether, and find one that is less of a CPU hog.\n", exec_tmout);
+               "    If this test case is just a fluke, the other option is to "
+               "just avoid it\n"
+               "    altogether, and find one that is less of a CPU hog.\n",
+               exec_tmout);
 
           FATAL("Test case '%s' results in a timeout", fn);
 
         }
 
-      case FAULT_CRASH:  
+      case FAULT_CRASH:
 
         if (crash_mode) break;
 
         if (skip_crashes) {
+
           WARNF("Test case results in a crash (skipping)");
           q->cal_failed = CAL_CHANCES;
           ++cal_failures;
           break;
+
         }
 
         if (mem_limit) {
 
           SAYF("\n" cLRD "[-] " cRST
-               "Oops, the program crashed with one of the test cases provided. There are\n"
+               "Oops, the program crashed with one of the test cases provided. "
+               "There are\n"
                "    several possible explanations:\n\n"
 
-               "    - The test case causes known crashes under normal working conditions. If\n"
-               "      so, please remove it. The fuzzer should be seeded with interesting\n"
+               "    - The test case causes known crashes under normal working "
+               "conditions. If\n"
+               "      so, please remove it. The fuzzer should be seeded with "
+               "interesting\n"
                "      inputs - but not ones that cause an outright crash.\n\n"
 
-               "    - The current memory limit (%s) is too low for this program, causing\n"
-               "      it to die due to OOM when parsing valid files. To fix this, try\n"
-               "      bumping it up with the -m setting in the command line. If in doubt,\n"
+               "    - The current memory limit (%s) is too low for this "
+               "program, causing\n"
+               "      it to die due to OOM when parsing valid files. To fix "
+               "this, try\n"
+               "      bumping it up with the -m setting in the command line. "
+               "If in doubt,\n"
                "      try something along the lines of:\n\n"
 
-               MSG_ULIMIT_USAGE " /path/to/binary [...] <testcase )\n\n"
+               MSG_ULIMIT_USAGE
+               " /path/to/binary [...] <testcase )\n\n"
 
-               "      Tip: you can use http://jwilk.net/software/recidivm to quickly\n"
-               "      estimate the required amount of virtual memory for the binary. Also,\n"
+               "      Tip: you can use http://jwilk.net/software/recidivm to "
+               "quickly\n"
+               "      estimate the required amount of virtual memory for the "
+               "binary. Also,\n"
                "      if you are using ASAN, see %s/notes_for_asan.txt.\n\n"
 
                MSG_FORK_ON_APPLE
 
-               "    - Least likely, there is a horrible bug in the fuzzer. If other options\n"
-               "      fail, poke <afl-users@googlegroups.com> for troubleshooting tips.\n",
+               "    - Least likely, there is a horrible bug in the fuzzer. If "
+               "other options\n"
+               "      fail, poke <afl-users@googlegroups.com> for "
+               "troubleshooting tips.\n",
                DMS(mem_limit << 20), mem_limit - 1, doc_path);
 
         } else {
 
           SAYF("\n" cLRD "[-] " cRST
-               "Oops, the program crashed with one of the test cases provided. There are\n"
+               "Oops, the program crashed with one of the test cases provided. "
+               "There are\n"
                "    several possible explanations:\n\n"
 
-               "    - The test case causes known crashes under normal working conditions. If\n"
-               "      so, please remove it. The fuzzer should be seeded with interesting\n"
+               "    - The test case causes known crashes under normal working "
+               "conditions. If\n"
+               "      so, please remove it. The fuzzer should be seeded with "
+               "interesting\n"
                "      inputs - but not ones that cause an outright crash.\n\n"
 
                MSG_FORK_ON_APPLE
 
-               "    - Least likely, there is a horrible bug in the fuzzer. If other options\n"
-               "      fail, poke <afl-users@googlegroups.com> for troubleshooting tips.\n");
+               "    - Least likely, there is a horrible bug in the fuzzer. If "
+               "other options\n"
+               "      fail, poke <afl-users@googlegroups.com> for "
+               "troubleshooting tips.\n");
 
         }
+
 #undef MSG_ULIMIT_USAGE
 #undef MSG_FORK_ON_APPLE
 
@@ -473,11 +513,9 @@ void perform_dry_run(char** argv) {
 
         FATAL("Unable to execute target application ('%s')", argv[0]);
 
-      case FAULT_NOINST:
+      case FAULT_NOINST: FATAL("No instrumentation detected");
 
-        FATAL("No instrumentation detected");
-
-      case FAULT_NOBITS: 
+      case FAULT_NOBITS:
 
         ++useless_at_start;
 
@@ -513,7 +551,6 @@ void perform_dry_run(char** argv) {
 
 }
 
-
 /* Helper function: link() if possible, copy otherwise. */
 
 static void link_or_copy(u8* old_path, u8* new_path) {
@@ -532,7 +569,7 @@ static void link_or_copy(u8* old_path, u8* new_path) {
 
   tmp = ck_alloc(64 * 1024);
 
-  while ((i = read(sfd, tmp, 64 * 1024)) > 0) 
+  while ((i = read(sfd, tmp, 64 * 1024)) > 0)
     ck_write(dfd, tmp, i, new_path);
 
   if (i < 0) PFATAL("read() failed");
@@ -543,23 +580,25 @@ static void link_or_copy(u8* old_path, u8* new_path) {
 
 }
 
-
 /* Create hard links for input test cases in the output directory, choosing
    good names and pivoting accordingly. */
 
 void pivot_inputs(void) {
 
   struct queue_entry* q = queue;
-  u32 id = 0;
+  u32                 id = 0;
 
   ACTF("Creating hard links for all input files...");
 
   while (q) {
 
-    u8  *nfn, *rsl = strrchr(q->fname, '/');
+    u8 *nfn, *rsl = strrchr(q->fname, '/');
     u32 orig_id;
 
-    if (!rsl) rsl = q->fname; else ++rsl;
+    if (!rsl)
+      rsl = q->fname;
+    else
+      ++rsl;
 
     /* If the original file name conforms to the syntax and the recorded
        ID matches the one we'd assign, just use the original file name.
@@ -582,7 +621,8 @@ void pivot_inputs(void) {
       if (src_str && sscanf(src_str + 1, "%06u", &src_id) == 1) {
 
         struct queue_entry* s = queue;
-        while (src_id-- && s) s = s->next;
+        while (src_id-- && s)
+          s = s->next;
         if (s) q->depth = s->depth + 1;
 
         if (max_depth < q->depth) max_depth = q->depth;
@@ -598,7 +638,10 @@ void pivot_inputs(void) {
 
       u8* use_name = strstr(rsl, ",orig:");
 
-      if (use_name) use_name += 6; else use_name = rsl;
+      if (use_name)
+        use_name += 6;
+      else
+        use_name = rsl;
       nfn = alloc_printf("%s/queue/id:%06u,orig:%s", out_dir, id, use_name);
 
 #else
@@ -628,29 +671,31 @@ void pivot_inputs(void) {
 
 }
 
-
 /* When resuming, try to find the queue position to start from. This makes sense
    only when resuming, and when we can find the original fuzzer_stats. */
 
 u32 find_start_position(void) {
 
-  static u8 tmp[4096]; /* Ought to be enough for anybody. */
+  static u8 tmp[4096];                   /* Ought to be enough for anybody. */
 
-  u8  *fn, *off;
+  u8 *fn, *off;
   s32 fd, i;
   u32 ret;
 
   if (!resuming_fuzz) return 0;
 
-  if (in_place_resume) fn = alloc_printf("%s/fuzzer_stats", out_dir);
-  else fn = alloc_printf("%s/../fuzzer_stats", in_dir);
+  if (in_place_resume)
+    fn = alloc_printf("%s/fuzzer_stats", out_dir);
+  else
+    fn = alloc_printf("%s/../fuzzer_stats", in_dir);
 
   fd = open(fn, O_RDONLY);
   ck_free(fn);
 
   if (fd < 0) return 0;
 
-  i = read(fd, tmp, sizeof(tmp) - 1); (void)i; /* Ignore errors */
+  i = read(fd, tmp, sizeof(tmp) - 1);
+  (void)i;                                                 /* Ignore errors */
   close(fd);
 
   off = strstr(tmp, "cur_path          : ");
@@ -662,30 +707,32 @@ u32 find_start_position(void) {
 
 }
 
-
 /* The same, but for timeouts. The idea is that when resuming sessions without
    -t given, we don't want to keep auto-scaling the timeout over and over
    again to prevent it from growing due to random flukes. */
 
 void find_timeout(void) {
 
-  static u8 tmp[4096]; /* Ought to be enough for anybody. */
+  static u8 tmp[4096];                   /* Ought to be enough for anybody. */
 
-  u8  *fn, *off;
+  u8 *fn, *off;
   s32 fd, i;
   u32 ret;
 
   if (!resuming_fuzz) return;
 
-  if (in_place_resume) fn = alloc_printf("%s/fuzzer_stats", out_dir);
-  else fn = alloc_printf("%s/../fuzzer_stats", in_dir);
+  if (in_place_resume)
+    fn = alloc_printf("%s/fuzzer_stats", out_dir);
+  else
+    fn = alloc_printf("%s/../fuzzer_stats", in_dir);
 
   fd = open(fn, O_RDONLY);
   ck_free(fn);
 
   if (fd < 0) return;
 
-  i = read(fd, tmp, sizeof(tmp) - 1); (void)i; /* Ignore errors */
+  i = read(fd, tmp, sizeof(tmp) - 1);
+  (void)i;                                                 /* Ignore errors */
   close(fd);
 
   off = strstr(tmp, "exec_timeout   : ");
@@ -699,14 +746,12 @@ void find_timeout(void) {
 
 }
 
-
-
 /* A helper function for maybe_delete_out_dir(), deleting all prefixed
    files in a directory. */
 
 static u8 delete_files(u8* path, u8* prefix) {
 
-  DIR* d;
+  DIR*           d;
   struct dirent* d_ent;
 
   d = opendir(path);
@@ -715,8 +760,8 @@ static u8 delete_files(u8* path, u8* prefix) {
 
   while ((d_ent = readdir(d))) {
 
-    if (d_ent->d_name[0] != '.' && (!prefix ||
-        !strncmp(d_ent->d_name, prefix, strlen(prefix)))) {
+    if (d_ent->d_name[0] != '.' &&
+        (!prefix || !strncmp(d_ent->d_name, prefix, strlen(prefix)))) {
 
       u8* fname = alloc_printf("%s/%s", path, d_ent->d_name);
       if (unlink(fname)) PFATAL("Unable to delete '%s'", fname);
@@ -732,14 +777,13 @@ static u8 delete_files(u8* path, u8* prefix) {
 
 }
 
-
 /* Get the number of runnable processes, with some simple smoothing. */
 
 double get_runnable_processes(void) {
 
   static double res;
 
-#if defined(__APPLE__) || defined(__FreeBSD__) || defined (__OpenBSD__)
+#if defined(__APPLE__) || defined(__FreeBSD__) || defined(__OpenBSD__)
 
   /* I don't see any portable sysctl or so that would quickly give us the
      number of runnable processes; the 1-minute load average can be a
@@ -762,10 +806,11 @@ double get_runnable_processes(void) {
   while (fgets(tmp, sizeof(tmp), f)) {
 
     if (!strncmp(tmp, "procs_running ", 14) ||
-        !strncmp(tmp, "procs_blocked ", 14)) val += atoi(tmp + 14);
+        !strncmp(tmp, "procs_blocked ", 14))
+      val += atoi(tmp + 14);
 
   }
- 
+
   fclose(f);
 
   if (!res) {
@@ -785,7 +830,6 @@ double get_runnable_processes(void) {
 
 }
 
-
 /* Delete the temporary directory used for in-place session resume. */
 
 void nuke_resume_dir(void) {
@@ -824,14 +868,13 @@ dir_cleanup_failed:
 
 }
 
-
 /* Delete fuzzer output directory if we recognize it as ours, if the fuzzer
    is not currently running, and if the last run time isn't too great. */
 
 void maybe_delete_out_dir(void) {
 
   FILE* f;
-  u8 *fn = alloc_printf("%s/fuzzer_stats", out_dir);
+  u8*   fn = alloc_printf("%s/fuzzer_stats", out_dir);
 
   /* See if the output directory is locked. If yes, bail out. If not,
      create a lock that will persist for the lifetime of the process
@@ -845,7 +888,8 @@ void maybe_delete_out_dir(void) {
   if (flock(out_dir_fd, LOCK_EX | LOCK_NB) && errno == EWOULDBLOCK) {
 
     SAYF("\n" cLRD "[-] " cRST
-         "Looks like the job output directory is being actively used by another\n"
+         "Looks like the job output directory is being actively used by "
+         "another\n"
          "    instance of afl-fuzz. You will need to choose a different %s\n"
          "    or stop the other process first.\n",
          sync_id ? "fuzzer ID" : "output location");
@@ -862,8 +906,10 @@ void maybe_delete_out_dir(void) {
 
     u64 start_time2, last_update;
 
-    if (fscanf(f, "start_time     : %llu\n"
-                  "last_update    : %llu\n", &start_time2, &last_update) != 2)
+    if (fscanf(f,
+               "start_time     : %llu\n"
+               "last_update    : %llu\n",
+               &start_time2, &last_update) != 2)
       FATAL("Malformed data in '%s'", fn);
 
     fclose(f);
@@ -873,16 +919,22 @@ void maybe_delete_out_dir(void) {
     if (!in_place_resume && last_update - start_time2 > OUTPUT_GRACE * 60) {
 
       SAYF("\n" cLRD "[-] " cRST
-           "The job output directory already exists and contains the results of more\n"
-           "    than %d minutes worth of fuzzing. To avoid data loss, afl-fuzz will *NOT*\n"
+           "The job output directory already exists and contains the results "
+           "of more\n"
+           "    than %d minutes worth of fuzzing. To avoid data loss, afl-fuzz "
+           "will *NOT*\n"
            "    automatically delete this data for you.\n\n"
 
-           "    If you wish to start a new session, remove or rename the directory manually,\n"
-           "    or specify a different output location for this job. To resume the old\n"
-           "    session, put '-' as the input directory in the command line ('-i -') and\n"
-           "    try again.\n", OUTPUT_GRACE);
+           "    If you wish to start a new session, remove or rename the "
+           "directory manually,\n"
+           "    or specify a different output location for this job. To resume "
+           "the old\n"
+           "    session, put '-' as the input directory in the command line "
+           "('-i -') and\n"
+           "    try again.\n",
+           OUTPUT_GRACE);
 
-       FATAL("At-risk data found in '%s'", out_dir);
+      FATAL("At-risk data found in '%s'", out_dir);
 
     }
 
@@ -902,7 +954,7 @@ void maybe_delete_out_dir(void) {
 
     in_dir = alloc_printf("%s/_resume", out_dir);
 
-    rename(orig_q, in_dir); /* Ignore errors */
+    rename(orig_q, in_dir);                                /* Ignore errors */
 
     OKF("Output directory exists, will attempt session resume.");
 
@@ -961,7 +1013,7 @@ void maybe_delete_out_dir(void) {
   if (!in_place_resume) {
 
     fn = alloc_printf("%s/crashes/README.txt", out_dir);
-    unlink(fn); /* Ignore errors */
+    unlink(fn);                                            /* Ignore errors */
     ck_free(fn);
 
   }
@@ -973,7 +1025,7 @@ void maybe_delete_out_dir(void) {
 
   if (in_place_resume && rmdir(fn)) {
 
-    time_t cur_t = time(0);
+    time_t     cur_t = time(0);
     struct tm* t = localtime(&cur_t);
 
 #ifndef SIMPLE_FILES
@@ -984,13 +1036,13 @@ void maybe_delete_out_dir(void) {
 
 #else
 
-    u8* nfn = alloc_printf("%s_%04d%02d%02d%02d%02d%02d", fn,
-                           t->tm_year + 1900, t->tm_mon + 1, t->tm_mday,
-                           t->tm_hour, t->tm_min, t->tm_sec);
+    u8* nfn = alloc_printf("%s_%04d%02d%02d%02d%02d%02d", fn, t->tm_year + 1900,
+                           t->tm_mon + 1, t->tm_mday, t->tm_hour, t->tm_min,
+                           t->tm_sec);
 
 #endif /* ^!SIMPLE_FILES */
 
-    rename(fn, nfn); /* Ignore errors. */
+    rename(fn, nfn);                                      /* Ignore errors. */
     ck_free(nfn);
 
   }
@@ -1004,7 +1056,7 @@ void maybe_delete_out_dir(void) {
 
   if (in_place_resume && rmdir(fn)) {
 
-    time_t cur_t = time(0);
+    time_t     cur_t = time(0);
     struct tm* t = localtime(&cur_t);
 
 #ifndef SIMPLE_FILES
@@ -1015,13 +1067,13 @@ void maybe_delete_out_dir(void) {
 
 #else
 
-    u8* nfn = alloc_printf("%s_%04d%02d%02d%02d%02d%02d", fn,
-                           t->tm_year + 1900, t->tm_mon + 1, t->tm_mday,
-                           t->tm_hour, t->tm_min, t->tm_sec);
+    u8* nfn = alloc_printf("%s_%04d%02d%02d%02d%02d%02d", fn, t->tm_year + 1900,
+                           t->tm_mon + 1, t->tm_mday, t->tm_hour, t->tm_min,
+                           t->tm_sec);
 
 #endif /* ^!SIMPLE_FILES */
 
-    rename(fn, nfn); /* Ignore errors. */
+    rename(fn, nfn);                                      /* Ignore errors. */
     ck_free(nfn);
 
   }
@@ -1032,9 +1084,13 @@ void maybe_delete_out_dir(void) {
   /* And now, for some finishing touches. */
 
   if (file_extension) {
+
     fn = alloc_printf("%s/.cur_input.%s", out_dir, file_extension);
+
   } else {
+
     fn = alloc_printf("%s/.cur_input", out_dir);
+
   }
 
   if (unlink(fn) && errno != ENOENT) goto dir_cleanup_failed;
@@ -1045,9 +1101,11 @@ void maybe_delete_out_dir(void) {
   ck_free(fn);
 
   if (!in_place_resume) {
-    fn  = alloc_printf("%s/fuzzer_stats", out_dir);
+
+    fn = alloc_printf("%s/fuzzer_stats", out_dir);
     if (unlink(fn) && errno != ENOENT) goto dir_cleanup_failed;
     ck_free(fn);
+
   }
 
   fn = alloc_printf("%s/plot_data", out_dir);
@@ -1067,19 +1125,22 @@ void maybe_delete_out_dir(void) {
 dir_cleanup_failed:
 
   SAYF("\n" cLRD "[-] " cRST
-       "Whoops, the fuzzer tried to reuse your output directory, but bumped into\n"
-       "    some files that shouldn't be there or that couldn't be removed - so it\n"
+       "Whoops, the fuzzer tried to reuse your output directory, but bumped "
+       "into\n"
+       "    some files that shouldn't be there or that couldn't be removed - "
+       "so it\n"
        "    decided to abort! This happened while processing this path:\n\n"
 
        "    %s\n\n"
-       "    Please examine and manually delete the files, or specify a different\n"
-       "    output location for the tool.\n", fn);
+       "    Please examine and manually delete the files, or specify a "
+       "different\n"
+       "    output location for the tool.\n",
+       fn);
 
   FATAL("Output directory cleanup failed");
 
 }
 
-
 /* Prepare output directories and fds. */
 
 void setup_dirs_fds(void) {
@@ -1090,7 +1151,7 @@ void setup_dirs_fds(void) {
   ACTF("Setting up output directories...");
 
   if (sync_id && mkdir(sync_dir, 0700) && errno != EEXIST)
-      PFATAL("Unable to create '%s'", sync_dir);
+    PFATAL("Unable to create '%s'", sync_dir);
 
   if (mkdir(out_dir, 0700)) {
 
@@ -1197,14 +1258,16 @@ void setup_dirs_fds(void) {
   plot_file = fdopen(fd, "w");
   if (!plot_file) PFATAL("fdopen() failed");
 
-  fprintf(plot_file, "# unix_time, cycles_done, cur_path, paths_total, "
-                     "pending_total, pending_favs, map_size, unique_crashes, "
-                     "unique_hangs, max_depth, execs_per_sec\n");
-                     /* ignore errors */
+  fprintf(plot_file,
+          "# unix_time, cycles_done, cur_path, paths_total, "
+          "pending_total, pending_favs, map_size, unique_crashes, "
+          "unique_hangs, max_depth, execs_per_sec\n");
+  /* ignore errors */
 
 }
 
 void setup_cmdline_file(char** argv) {
+
   u8* tmp;
   s32 fd;
   u32 i = 0;
@@ -1221,13 +1284,15 @@ void setup_cmdline_file(char** argv) {
   if (!cmdline_file) PFATAL("fdopen() failed");
 
   while (argv[i]) {
+
     fprintf(cmdline_file, "%s\n", argv[i]);
     ++i;
+
   }
 
   fclose(cmdline_file);
-}
 
+}
 
 /* Setup the output file for fuzzed data, if not using -f. */
 
@@ -1235,12 +1300,16 @@ void setup_stdio_file(void) {
 
   u8* fn;
   if (file_extension) {
+
     fn = alloc_printf("%s/.cur_input.%s", out_dir, file_extension);
+
   } else {
+
     fn = alloc_printf("%s/.cur_input", out_dir);
+
   }
 
-  unlink(fn); /* Ignore errors */
+  unlink(fn);                                              /* Ignore errors */
 
   out_fd = open(fn, O_RDWR | O_CREAT | O_EXCL, 0600);
 
@@ -1250,32 +1319,34 @@ void setup_stdio_file(void) {
 
 }
 
-
 /* Make sure that core dumps don't go to a program. */
 
 void check_crash_handling(void) {
 
 #ifdef __APPLE__
 
-  /* Yuck! There appears to be no simple C API to query for the state of 
+  /* Yuck! There appears to be no simple C API to query for the state of
      loaded daemons on MacOS X, and I'm a bit hesitant to do something
      more sophisticated, such as disabling crash reporting via Mach ports,
      until I get a box to test the code. So, for now, we check for crash
      reporting the awful way. */
-  
-  if (system("launchctl list 2>/dev/null | grep -q '\\.ReportCrash$'")) return;
 
-  SAYF("\n" cLRD "[-] " cRST
-       "Whoops, your system is configured to forward crash notifications to an\n"
-       "    external crash reporting utility. This will cause issues due to the\n"
-       "    extended delay between the fuzzed binary malfunctioning and this fact\n"
-       "    being relayed to the fuzzer via the standard waitpid() API.\n\n"
-       "    To avoid having crashes misinterpreted as timeouts, please run the\n" 
-       "    following commands:\n\n"
+  if (system("launchctl list 2>/dev/null | grep -q '\\.ReportCrash$'")) return;
 
-       "    SL=/System/Library; PL=com.apple.ReportCrash\n"
-       "    launchctl unload -w ${SL}/LaunchAgents/${PL}.plist\n"
-       "    sudo launchctl unload -w ${SL}/LaunchDaemons/${PL}.Root.plist\n");
+  SAYF(
+      "\n" cLRD "[-] " cRST
+      "Whoops, your system is configured to forward crash notifications to an\n"
+      "    external crash reporting utility. This will cause issues due to "
+      "the\n"
+      "    extended delay between the fuzzed binary malfunctioning and this "
+      "fact\n"
+      "    being relayed to the fuzzer via the standard waitpid() API.\n\n"
+      "    To avoid having crashes misinterpreted as timeouts, please run the\n"
+      "    following commands:\n\n"
+
+      "    SL=/System/Library; PL=com.apple.ReportCrash\n"
+      "    launchctl unload -w ${SL}/LaunchAgents/${PL}.plist\n"
+      "    sudo launchctl unload -w ${SL}/LaunchDaemons/${PL}.Root.plist\n");
 
   if (!getenv("AFL_I_DONT_CARE_ABOUT_MISSING_CRASHES"))
     FATAL("Crash reporter detected");
@@ -1283,10 +1354,10 @@ void check_crash_handling(void) {
 #else
 
   /* This is Linux specific, but I don't think there's anything equivalent on
-     *BSD, so we can just let it slide for now. */
+   *BSD, so we can just let it slide for now. */
 
   s32 fd = open("/proc/sys/kernel/core_pattern", O_RDONLY);
-  u8  fchar;
+  u8 fchar;
 
   if (fd < 0) return;
 
@@ -1294,54 +1365,68 @@ void check_crash_handling(void) {
 
   if (read(fd, &fchar, 1) == 1 && fchar == '|') {
 
-    SAYF("\n" cLRD "[-] " cRST
-         "Hmm, your system is configured to send core dump notifications to an\n"
-         "    external utility. This will cause issues: there will be an extended delay\n"
-         "    between stumbling upon a crash and having this information relayed to the\n"
-         "    fuzzer via the standard waitpid() API.\n\n"
+    SAYF(
+        "\n" cLRD "[-] " cRST
+        "Hmm, your system is configured to send core dump notifications to an\n"
+        "    external utility. This will cause issues: there will be an "
+        "extended delay\n"
+        "    between stumbling upon a crash and having this information "
+        "relayed to the\n"
+        "    fuzzer via the standard waitpid() API.\n\n"
 
-         "    To avoid having crashes misinterpreted as timeouts, please log in as root\n" 
-         "    and temporarily modify /proc/sys/kernel/core_pattern, like so:\n\n"
+        "    To avoid having crashes misinterpreted as timeouts, please log in "
+        "as root\n"
+        "    and temporarily modify /proc/sys/kernel/core_pattern, like so:\n\n"
 
-         "    echo core >/proc/sys/kernel/core_pattern\n");
+        "    echo core >/proc/sys/kernel/core_pattern\n");
 
     if (!getenv("AFL_I_DONT_CARE_ABOUT_MISSING_CRASHES"))
       FATAL("Pipe at the beginning of 'core_pattern'");
 
   }
- 
+
   close(fd);
 
 #endif /* ^__APPLE__ */
 
 }
 
-
 /* Check CPU governor. */
 
 void check_cpu_governor(void) {
+
 #ifdef __linux__
   FILE* f;
-  u8 tmp[128];
-  u64 min = 0, max = 0;
+  u8    tmp[128];
+  u64   min = 0, max = 0;
 
   if (getenv("AFL_SKIP_CPUFREQ")) return;
 
   if (cpu_aff > 0)
-    snprintf(tmp, sizeof(tmp), "%s%d%s", "/sys/devices/system/cpu/cpu", cpu_aff, "/cpufreq/scaling_governor");
+    snprintf(tmp, sizeof(tmp), "%s%d%s", "/sys/devices/system/cpu/cpu", cpu_aff,
+             "/cpufreq/scaling_governor");
   else
-    snprintf(tmp, sizeof(tmp), "%s", "/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor");
+    snprintf(tmp, sizeof(tmp), "%s",
+             "/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor");
   f = fopen("/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor", "r");
   if (!f) {
+
     if (cpu_aff > 0)
-      snprintf(tmp, sizeof(tmp), "%s%d%s", "/sys/devices/system/cpu/cpufreq/policy", cpu_aff, "/scaling_governor");
+      snprintf(tmp, sizeof(tmp), "%s%d%s",
+               "/sys/devices/system/cpu/cpufreq/policy", cpu_aff,
+               "/scaling_governor");
     else
-      snprintf(tmp, sizeof(tmp), "%s", "/sys/devices/system/cpu/cpufreq/policy0/scaling_governor");
+      snprintf(tmp, sizeof(tmp), "%s",
+               "/sys/devices/system/cpu/cpufreq/policy0/scaling_governor");
     f = fopen(tmp, "r");
+
   }
+
   if (!f) {
+
     WARNF("Could not check CPU scaling governor");
     return;
+
   }
 
   ACTF("Checking CPU scaling governor...");
@@ -1355,71 +1440,79 @@ void check_cpu_governor(void) {
   f = fopen("/sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq", "r");
 
   if (f) {
+
     if (fscanf(f, "%llu", &min) != 1) min = 0;
     fclose(f);
+
   }
 
   f = fopen("/sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq", "r");
 
   if (f) {
+
     if (fscanf(f, "%llu", &max) != 1) max = 0;
     fclose(f);
+
   }
 
   if (min == max) return;
 
   SAYF("\n" cLRD "[-] " cRST
        "Whoops, your system uses on-demand CPU frequency scaling, adjusted\n"
-       "    between %llu and %llu MHz. Unfortunately, the scaling algorithm in the\n"
-       "    kernel is imperfect and can miss the short-lived processes spawned by\n"
+       "    between %llu and %llu MHz. Unfortunately, the scaling algorithm in "
+       "the\n"
+       "    kernel is imperfect and can miss the short-lived processes spawned "
+       "by\n"
        "    afl-fuzz. To keep things moving, run these commands as root:\n\n"
 
        "    cd /sys/devices/system/cpu\n"
        "    echo performance | tee cpu*/cpufreq/scaling_governor\n\n"
 
-       "    You can later go back to the original state by replacing 'performance' with\n"
-       "    'ondemand'. If you don't want to change the settings, set AFL_SKIP_CPUFREQ\n"
-       "    to make afl-fuzz skip this check - but expect some performance drop.\n",
+       "    You can later go back to the original state by replacing "
+       "'performance' with\n"
+       "    'ondemand'. If you don't want to change the settings, set "
+       "AFL_SKIP_CPUFREQ\n"
+       "    to make afl-fuzz skip this check - but expect some performance "
+       "drop.\n",
        min / 1024, max / 1024);
 
   FATAL("Suboptimal CPU scaling governor");
 #endif
-}
 
+}
 
 /* Count the number of logical CPU cores. */
 
 void get_core_count(void) {
 
-#if defined(__APPLE__) || defined(__FreeBSD__) || defined (__OpenBSD__)
+#if defined(__APPLE__) || defined(__FreeBSD__) || defined(__OpenBSD__)
 
   size_t s = sizeof(cpu_core_count);
 
   /* On *BSD systems, we can just use a sysctl to get the number of CPUs. */
 
-#ifdef __APPLE__
+#  ifdef __APPLE__
 
-  if (sysctlbyname("hw.logicalcpu", &cpu_core_count, &s, NULL, 0) < 0)
-    return;
+  if (sysctlbyname("hw.logicalcpu", &cpu_core_count, &s, NULL, 0) < 0) return;
 
-#else
+#  else
 
-  int s_name[2] = { CTL_HW, HW_NCPU };
+  int s_name[2] = {CTL_HW, HW_NCPU};
 
   if (sysctl(s_name, 2, &cpu_core_count, &s, NULL, 0) < 0) return;
 
-#endif /* ^__APPLE__ */
+#  endif /* ^__APPLE__ */
 
 #else
 
-#ifdef HAVE_AFFINITY
+#  ifdef HAVE_AFFINITY
 
   cpu_core_count = sysconf(_SC_NPROCESSORS_ONLN);
 
-#else
+#  else
 
   FILE* f = fopen("/proc/stat", "r");
-  u8 tmp[1024];
+  u8    tmp[1024];
 
   if (!f) return;
 
@@ -1428,7 +1521,7 @@ void get_core_count(void) {
 
   fclose(f);
 
-#endif /* ^HAVE_AFFINITY */
+#  endif /* ^HAVE_AFFINITY */
 
 #endif /* ^(__APPLE__ || __FreeBSD__ || __OpenBSD__) */
 
@@ -1438,7 +1531,7 @@ void get_core_count(void) {
 
     cur_runnable = (u32)get_runnable_processes();
 
-#if defined(__APPLE__) || defined(__FreeBSD__) || defined (__OpenBSD__)
+#if defined(__APPLE__) || defined(__FreeBSD__) || defined(__OpenBSD__)
 
     /* Add ourselves, since the 1-minute average doesn't include that yet. */
 
@@ -1447,8 +1540,8 @@ void get_core_count(void) {
 #endif /* __APPLE__ || __FreeBSD__ || __OpenBSD__ */
 
     OKF("You have %d CPU core%s and %u runnable tasks (utilization: %0.0f%%).",
-        cpu_core_count, cpu_core_count > 1 ? "s" : "",
-        cur_runnable, cur_runnable * 100.0 / cpu_core_count);
+        cpu_core_count, cpu_core_count > 1 ? "s" : "", cur_runnable,
+        cur_runnable * 100.0 / cpu_core_count);
 
     if (cpu_core_count > 1) {
 
@@ -1459,7 +1552,7 @@ void get_core_count(void) {
       } else if (cur_runnable + 1 <= cpu_core_count) {
 
         OKF("Try parallel jobs - see %s/parallel_fuzzing.txt.", doc_path);
-  
+
       }
 
     }
@@ -1473,21 +1566,18 @@ void get_core_count(void) {
 
 }
 
-
 /* Validate and fix up out_dir and sync_dir when using -S. */
 
 void fix_up_sync(void) {
 
   u8* x = sync_id;
 
-  if (dumb_mode)
-    FATAL("-S / -M and -n are mutually exclusive");
+  if (dumb_mode) FATAL("-S / -M and -n are mutually exclusive");
 
   if (skip_deterministic) {
 
-    if (force_deterministic)
-      FATAL("use -S instead of -M -d");
-    //else
+    if (force_deterministic) FATAL("use -S instead of -M -d");
+    // else
     //  FATAL("-S already implies -d");
 
   }
@@ -1506,26 +1596,29 @@ void fix_up_sync(void) {
   x = alloc_printf("%s/%s", out_dir, sync_id);
 
   sync_dir = out_dir;
-  out_dir  = x;
+  out_dir = x;
 
   if (!force_deterministic) {
+
     skip_deterministic = 1;
     use_splicing = 1;
+
   }
 
 }
 
-
 /* Handle screen resize (SIGWINCH). */
 
 static void handle_resize(int sig) {
+
   clear_screen = 1;
-}
 
+}
 
 /* Check ASAN options. */
 
 void check_asan_opts(void) {
+
   u8* x = getenv("ASAN_OPTIONS");
 
   if (x) {
@@ -1543,29 +1636,27 @@ void check_asan_opts(void) {
   if (x) {
 
     if (!strstr(x, "exit_code=" STRINGIFY(MSAN_ERROR)))
-      FATAL("Custom MSAN_OPTIONS set without exit_code="
-            STRINGIFY(MSAN_ERROR) " - please fix!");
+      FATAL("Custom MSAN_OPTIONS set without exit_code=" STRINGIFY(
+          MSAN_ERROR) " - please fix!");
 
     if (!strstr(x, "symbolize=0"))
       FATAL("Custom MSAN_OPTIONS set without symbolize=0 - please fix!");
 
   }
 
-} 
-
+}
 
 /* Handle stop signal (Ctrl-C, etc). */
 
 static void handle_stop_sig(int sig) {
 
-  stop_soon = 1; 
+  stop_soon = 1;
 
   if (child_pid > 0) kill(child_pid, SIGKILL);
   if (forksrv_pid > 0) kill(forksrv_pid, SIGKILL);
 
 }
 
-
 /* Handle skip request (SIGUSR1). */
 
 static void handle_skipreq(int sig) {
@@ -1574,14 +1665,13 @@ static void handle_skipreq(int sig) {
 
 }
 
-
 /* Do a PATH search and find target binary to see that it exists and
    isn't a shell script - a common and painful mistake. We also check for
    a valid ELF header and for evidence of AFL instrumentation. */
 
 void check_binary(u8* fname) {
 
-  u8* env_path = 0;
+  u8*         env_path = 0;
   struct stat st;
 
   s32 fd;
@@ -1609,7 +1699,9 @@ void check_binary(u8* fname) {
         memcpy(cur_elem, env_path, delim - env_path);
         ++delim;
 
-      } else cur_elem = ck_strdup(env_path);
+      } else
+
+        cur_elem = ck_strdup(env_path);
 
       env_path = delim;
 
@@ -1621,7 +1713,8 @@ void check_binary(u8* fname) {
       ck_free(cur_elem);
 
       if (!stat(target_path, &st) && S_ISREG(st.st_mode) &&
-          (st.st_mode & 0111) && (f_len = st.st_size) >= 4) break;
+          (st.st_mode & 0111) && (f_len = st.st_size) >= 4)
+        break;
 
       ck_free(target_path);
       target_path = 0;
@@ -1638,7 +1731,7 @@ void check_binary(u8* fname) {
 
   if ((!strncmp(target_path, "/tmp/", 5) && !strchr(target_path + 5, '/')) ||
       (!strncmp(target_path, "/var/tmp/", 9) && !strchr(target_path + 9, '/')))
-     FATAL("Please don't keep binaries in /tmp or /var/tmp");
+    FATAL("Please don't keep binaries in /tmp or /var/tmp");
 
   fd = open(target_path, O_RDONLY);
 
@@ -1653,13 +1746,19 @@ void check_binary(u8* fname) {
   if (f_data[0] == '#' && f_data[1] == '!') {
 
     SAYF("\n" cLRD "[-] " cRST
-         "Oops, the target binary looks like a shell script. Some build systems will\n"
-         "    sometimes generate shell stubs for dynamically linked programs; try static\n"
-         "    library mode (./configure --disable-shared) if that's the case.\n\n"
-
-         "    Another possible cause is that you are actually trying to use a shell\n" 
-         "    wrapper around the fuzzed component. Invoking shell can slow down the\n" 
-         "    fuzzing process by a factor of 20x or more; it's best to write the wrapper\n"
+         "Oops, the target binary looks like a shell script. Some build "
+         "systems will\n"
+         "    sometimes generate shell stubs for dynamically linked programs; "
+         "try static\n"
+         "    library mode (./configure --disable-shared) if that's the "
+         "case.\n\n"
+
+         "    Another possible cause is that you are actually trying to use a "
+         "shell\n"
+         "    wrapper around the fuzzed component. Invoking shell can slow "
+         "down the\n"
+         "    fuzzing process by a factor of 20x or more; it's best to write "
+         "the wrapper\n"
          "    in a compiled language instead.\n");
 
     FATAL("Program '%s' is a shell script", target_path);
@@ -1673,28 +1772,35 @@ void check_binary(u8* fname) {
 
 #else
 
-#if !defined(__arm__) && !defined(__arm64__)
+#  if !defined(__arm__) && !defined(__arm64__)
   if (f_data[0] != 0xCF || f_data[1] != 0xFA || f_data[2] != 0xED)
     FATAL("Program '%s' is not a 64-bit Mach-O binary", target_path);
-#endif
+#  endif
 
 #endif /* ^!__APPLE__ */
 
   if (!qemu_mode && !unicorn_mode && !dumb_mode &&
       !memmem(f_data, f_len, SHM_ENV_VAR, strlen(SHM_ENV_VAR) + 1)) {
 
-    SAYF("\n" cLRD "[-] " cRST
-         "Looks like the target binary is not instrumented! The fuzzer depends on\n"
-         "    compile-time instrumentation to isolate interesting test cases while\n"
-         "    mutating the input data. For more information, and for tips on how to\n"
-         "    instrument binaries, please see %s/README.\n\n"
-
-         "    When source code is not available, you may be able to leverage QEMU\n"
-         "    mode support. Consult the README for tips on how to enable this.\n"
-
-         "    (It is also possible to use afl-fuzz as a traditional, \"dumb\" fuzzer.\n"
-         "    For that, you can use the -n option - but expect much worse results.)\n",
-         doc_path);
+    SAYF(
+        "\n" cLRD "[-] " cRST
+        "Looks like the target binary is not instrumented! The fuzzer depends "
+        "on\n"
+        "    compile-time instrumentation to isolate interesting test cases "
+        "while\n"
+        "    mutating the input data. For more information, and for tips on "
+        "how to\n"
+        "    instrument binaries, please see %s/README.\n\n"
+
+        "    When source code is not available, you may be able to leverage "
+        "QEMU\n"
+        "    mode support. Consult the README for tips on how to enable this.\n"
+
+        "    (It is also possible to use afl-fuzz as a traditional, \"dumb\" "
+        "fuzzer.\n"
+        "    For that, you can use the -n option - but expect much worse "
+        "results.)\n",
+        doc_path);
 
     FATAL("No instrumentation detected");
 
@@ -1704,8 +1810,10 @@ void check_binary(u8* fname) {
       memmem(f_data, f_len, SHM_ENV_VAR, strlen(SHM_ENV_VAR) + 1)) {
 
     SAYF("\n" cLRD "[-] " cRST
-         "This program appears to be instrumented with afl-gcc, but is being run in\n"
-         "    QEMU or Unicorn mode (-Q or -U). This is probably not what you want -\n"
+         "This program appears to be instrumented with afl-gcc, but is being "
+         "run in\n"
+         "    QEMU or Unicorn mode (-Q or -U). This is probably not what you "
+         "want -\n"
          "    this setup will be slow and offer no practical benefits.\n");
 
     FATAL("Instrumentation found in -Q or -U mode");
@@ -1713,7 +1821,8 @@ void check_binary(u8* fname) {
   }
 
   if (memmem(f_data, f_len, "libasan.so", 10) ||
-      memmem(f_data, f_len, "__msan_init", 11)) uses_asan = 1;
+      memmem(f_data, f_len, "__msan_init", 11))
+    uses_asan = 1;
 
   /* Detect persistent & deferred init signatures in the binary. */
 
@@ -1745,7 +1854,6 @@ void check_binary(u8* fname) {
 
 }
 
-
 /* Trim and possibly create a banner for the run. */
 
 void fix_up_banner(u8* name) {
@@ -1759,7 +1867,10 @@ void fix_up_banner(u8* name) {
     } else {
 
       u8* trim = strrchr(name, '/');
-      if (!trim) use_banner = name; else use_banner = trim + 1;
+      if (!trim)
+        use_banner = name;
+      else
+        use_banner = trim + 1;
 
     }
 
@@ -1775,7 +1886,6 @@ void fix_up_banner(u8* name) {
 
 }
 
-
 /* Check if we're on TTY. */
 
 void check_if_tty(void) {
@@ -1783,24 +1893,29 @@ void check_if_tty(void) {
   struct winsize ws;
 
   if (getenv("AFL_NO_UI")) {
+
     OKF("Disabling the UI because AFL_NO_UI is set.");
     not_on_tty = 1;
     return;
+
   }
 
   if (ioctl(1, TIOCGWINSZ, &ws)) {
 
     if (errno == ENOTTY) {
-      OKF("Looks like we're not running on a tty, so I'll be a bit less verbose.");
+
+      OKF("Looks like we're not running on a tty, so I'll be a bit less "
+          "verbose.");
       not_on_tty = 1;
+
     }
 
     return;
+
   }
 
 }
 
-
 /* Set up signal handlers. More complicated that needs to be, because libc on
    Solaris doesn't resume interrupted reads(), sets SA_RESETHAND when you call
    siginterrupt(), and does other stupid things. */
@@ -1809,8 +1924,8 @@ void setup_signal_handlers(void) {
 
   struct sigaction sa;
 
-  sa.sa_handler   = NULL;
-  sa.sa_flags     = SA_RESTART;
+  sa.sa_handler = NULL;
+  sa.sa_flags = SA_RESTART;
   sa.sa_sigaction = NULL;
 
   sigemptyset(&sa.sa_mask);
@@ -1845,13 +1960,12 @@ void setup_signal_handlers(void) {
 
 }
 
-
 /* Rewrite argv for QEMU. */
 
 char** get_qemu_argv(u8* own_loc, char** argv, int argc) {
 
   char** new_argv = ck_alloc(sizeof(char*) * (argc + 4));
-  u8 *tmp, *cp, *rsl, *own_copy;
+  u8 *   tmp, *cp, *rsl, *own_copy;
 
   memcpy(new_argv + 3, argv + 1, sizeof(char*) * argc);
 
@@ -1866,8 +1980,7 @@ char** get_qemu_argv(u8* own_loc, char** argv, int argc) {
 
     cp = alloc_printf("%s/afl-qemu-trace", tmp);
 
-    if (access(cp, X_OK))
-      FATAL("Unable to find '%s'", tmp);
+    if (access(cp, X_OK)) FATAL("Unable to find '%s'", tmp);
 
     target_path = new_argv[0] = cp;
     return new_argv;
@@ -1891,7 +2004,9 @@ char** get_qemu_argv(u8* own_loc, char** argv, int argc) {
 
     }
 
-  } else ck_free(own_copy);
+  } else
+
+    ck_free(own_copy);
 
   if (!access(BIN_PATH "/afl-qemu-trace", X_OK)) {
 
@@ -1901,14 +2016,20 @@ char** get_qemu_argv(u8* own_loc, char** argv, int argc) {
   }
 
   SAYF("\n" cLRD "[-] " cRST
-       "Oops, unable to find the 'afl-qemu-trace' binary. The binary must be built\n"
-       "    separately by following the instructions in qemu_mode/README.qemu. If you\n"
-       "    already have the binary installed, you may need to specify AFL_PATH in the\n"
+       "Oops, unable to find the 'afl-qemu-trace' binary. The binary must be "
+       "built\n"
+       "    separately by following the instructions in qemu_mode/README.qemu. "
+       "If you\n"
+       "    already have the binary installed, you may need to specify "
+       "AFL_PATH in the\n"
        "    environment.\n\n"
 
-       "    Of course, even without QEMU, afl-fuzz can still work with binaries that are\n"
-       "    instrumented at compile time with afl-gcc. It is also possible to use it as a\n"
-       "    traditional \"dumb\" fuzzer by specifying '-n' in the command line.\n");
+       "    Of course, even without QEMU, afl-fuzz can still work with "
+       "binaries that are\n"
+       "    instrumented at compile time with afl-gcc. It is also possible to "
+       "use it as a\n"
+       "    traditional \"dumb\" fuzzer by specifying '-n' in the command "
+       "line.\n");
 
   FATAL("Failed to locate 'afl-qemu-trace'.");
 
@@ -1923,7 +2044,7 @@ void save_cmdline(u32 argc, char** argv) {
 
   for (i = 0; i < argc; ++i)
     len += strlen(argv[i]) + 1;
-  
+
   buf = orig_cmdline = ck_alloc(len);
 
   for (i = 0; i < argc; ++i) {
diff --git a/src/afl-fuzz-misc.c b/src/afl-fuzz-misc.c
index 69ff2f6b..eb0cc187 100644
--- a/src/afl-fuzz-misc.c
+++ b/src/afl-fuzz-misc.c
@@ -33,11 +33,16 @@ u8* DI(u64 val) {
 
   cur = (cur + 1) % 12;
 
-#define CHK_FORMAT(_divisor, _limit_mult, _fmt, _cast) do { \
-    if (val < (_divisor) * (_limit_mult)) { \
+#define CHK_FORMAT(_divisor, _limit_mult, _fmt, _cast)    \
+  do {                                                    \
+                                                          \
+    if (val < (_divisor) * (_limit_mult)) {               \
+                                                          \
       sprintf(tmp[cur], _fmt, ((_cast)val) / (_divisor)); \
-      return tmp[cur]; \
-    } \
+      return tmp[cur];                                    \
+                                                          \
+    }                                                     \
+                                                          \
   } while (0)
 
   /* 0-9999 */
@@ -79,8 +84,7 @@ u8* DI(u64 val) {
 
 }
 
-
-/* Describe float. Similar to the above, except with a single 
+/* Describe float. Similar to the above, except with a single
    static buffer. */
 
 u8* DF(double val) {
@@ -88,20 +92,23 @@ u8* DF(double val) {
   static u8 tmp[16];
 
   if (val < 99.995) {
+
     sprintf(tmp, "%0.02f", val);
     return tmp;
+
   }
 
   if (val < 999.95) {
+
     sprintf(tmp, "%0.01f", val);
     return tmp;
+
   }
 
   return DI((u64)val);
 
 }
 
-
 /* Describe integer as memory size. */
 
 u8* DMS(u64 val) {
@@ -152,14 +159,13 @@ u8* DMS(u64 val) {
 
 }
 
-
 /* Describe time delta. Returns one static buffer, 34 chars of less. */
 
 u8* DTD(u64 cur_ms, u64 event_ms) {
 
   static u8 tmp[64];
-  u64 delta;
-  s32 t_d, t_h, t_m, t_s;
+  u64       delta;
+  s32       t_d, t_h, t_m, t_s;
 
   if (!event_ms) return "none seen yet";
 
@@ -174,3 +180,4 @@ u8* DTD(u64 cur_ms, u64 event_ms) {
   return tmp;
 
 }
+
diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c
index 59370c3d..1b7abedd 100644
--- a/src/afl-fuzz-one.c
+++ b/src/afl-fuzz-one.c
@@ -28,22 +28,31 @@ int select_algorithm(void) {
 
   int i_puppet, j_puppet;
 
-  double sele = ((double)(UR(10000))*0.0001);
+  double sele = ((double)(UR(10000)) * 0.0001);
   j_puppet = 0;
   for (i_puppet = 0; i_puppet < operator_num; ++i_puppet) {
-      if (unlikely(i_puppet == 0)) {
-          if (sele < probability_now[swarm_now][i_puppet])
-            break;
-      } else {
-          if (sele < probability_now[swarm_now][i_puppet]) {
-              j_puppet =1;
-              break;
-          }
+
+    if (unlikely(i_puppet == 0)) {
+
+      if (sele < probability_now[swarm_now][i_puppet]) break;
+
+    } else {
+
+      if (sele < probability_now[swarm_now][i_puppet]) {
+
+        j_puppet = 1;
+        break;
+
       }
+
+    }
+
   }
-  if (j_puppet ==1 && sele < probability_now[swarm_now][i_puppet-1])
+
+  if (j_puppet == 1 && sele < probability_now[swarm_now][i_puppet - 1])
     FATAL("error select_algorithm");
   return i_puppet;
+
 }
 
 /* Helper to choose random block len for block operations in fuzz_one().
@@ -58,27 +67,29 @@ static u32 choose_block_len(u32 limit) {
 
   switch (UR(rlim)) {
 
-    case 0:  min_value = 1;
-             max_value = HAVOC_BLK_SMALL;
-             break;
+    case 0:
+      min_value = 1;
+      max_value = HAVOC_BLK_SMALL;
+      break;
 
-    case 1:  min_value = HAVOC_BLK_SMALL;
-             max_value = HAVOC_BLK_MEDIUM;
-             break;
+    case 1:
+      min_value = HAVOC_BLK_SMALL;
+      max_value = HAVOC_BLK_MEDIUM;
+      break;
 
-    default: 
+    default:
 
-             if (UR(10)) {
+      if (UR(10)) {
 
-               min_value = HAVOC_BLK_MEDIUM;
-               max_value = HAVOC_BLK_LARGE;
+        min_value = HAVOC_BLK_MEDIUM;
+        max_value = HAVOC_BLK_LARGE;
 
-             } else {
+      } else {
 
-               min_value = HAVOC_BLK_LARGE;
-               max_value = HAVOC_BLK_XL;
+        min_value = HAVOC_BLK_LARGE;
+        max_value = HAVOC_BLK_XL;
 
-             }
+      }
 
   }
 
@@ -88,7 +99,6 @@ static u32 choose_block_len(u32 limit) {
 
 }
 
-
 /* Helper function to see if a particular change (xor_val = old ^ new) could
    be a product of deterministic bit flips with the lengths and stepovers
    attempted by afl-fuzz. This is used to avoid dupes in some of the
@@ -104,7 +114,12 @@ static u8 could_be_bitflip(u32 xor_val) {
 
   /* Shift left until first bit set. */
 
-  while (!(xor_val & 1)) { ++sh; xor_val >>= 1; }
+  while (!(xor_val & 1)) {
+
+    ++sh;
+    xor_val >>= 1;
+
+  }
 
   /* 1-, 2-, and 4-bit patterns are OK anywhere. */
 
@@ -115,14 +130,12 @@ static u8 could_be_bitflip(u32 xor_val) {
 
   if (sh & 7) return 0;
 
-  if (xor_val == 0xff || xor_val == 0xffff || xor_val == 0xffffffff)
-    return 1;
+  if (xor_val == 0xff || xor_val == 0xffff || xor_val == 0xffffffff) return 1;
 
   return 0;
 
 }
 
-
 /* Helper function to see if a particular value is reachable through
    arithmetic operations. Used for similar purposes. */
 
@@ -136,10 +149,15 @@ static u8 could_be_arith(u32 old_val, u32 new_val, u8 blen) {
 
   for (i = 0; i < blen; ++i) {
 
-    u8 a = old_val >> (8 * i),
-       b = new_val >> (8 * i);
+    u8 a = old_val >> (8 * i), b = new_val >> (8 * i);
+
+    if (a != b) {
+
+      ++diffs;
+      ov = a;
+      nv = b;
 
-    if (a != b) { ++diffs; ov = a; nv = b; }
+    }
 
   }
 
@@ -147,8 +165,7 @@ static u8 could_be_arith(u32 old_val, u32 new_val, u8 blen) {
 
   if (diffs == 1) {
 
-    if ((u8)(ov - nv) <= ARITH_MAX ||
-        (u8)(nv - ov) <= ARITH_MAX) return 1;
+    if ((u8)(ov - nv) <= ARITH_MAX || (u8)(nv - ov) <= ARITH_MAX) return 1;
 
   }
 
@@ -160,10 +177,15 @@ static u8 could_be_arith(u32 old_val, u32 new_val, u8 blen) {
 
   for (i = 0; i < blen / 2; ++i) {
 
-    u16 a = old_val >> (16 * i),
-        b = new_val >> (16 * i);
+    u16 a = old_val >> (16 * i), b = new_val >> (16 * i);
+
+    if (a != b) {
 
-    if (a != b) { ++diffs; ov = a; nv = b; }
+      ++diffs;
+      ov = a;
+      nv = b;
+
+    }
 
   }
 
@@ -171,13 +193,12 @@ static u8 could_be_arith(u32 old_val, u32 new_val, u8 blen) {
 
   if (diffs == 1) {
 
-    if ((u16)(ov - nv) <= ARITH_MAX ||
-        (u16)(nv - ov) <= ARITH_MAX) return 1;
+    if ((u16)(ov - nv) <= ARITH_MAX || (u16)(nv - ov) <= ARITH_MAX) return 1;
 
-    ov = SWAP16(ov); nv = SWAP16(nv);
+    ov = SWAP16(ov);
+    nv = SWAP16(nv);
 
-    if ((u16)(ov - nv) <= ARITH_MAX ||
-        (u16)(nv - ov) <= ARITH_MAX) return 1;
+    if ((u16)(ov - nv) <= ARITH_MAX || (u16)(nv - ov) <= ARITH_MAX) return 1;
 
   }
 
@@ -186,13 +207,15 @@ static u8 could_be_arith(u32 old_val, u32 new_val, u8 blen) {
   if (blen == 4) {
 
     if ((u32)(old_val - new_val) <= ARITH_MAX ||
-        (u32)(new_val - old_val) <= ARITH_MAX) return 1;
+        (u32)(new_val - old_val) <= ARITH_MAX)
+      return 1;
 
     new_val = SWAP32(new_val);
     old_val = SWAP32(old_val);
 
     if ((u32)(old_val - new_val) <= ARITH_MAX ||
-        (u32)(new_val - old_val) <= ARITH_MAX) return 1;
+        (u32)(new_val - old_val) <= ARITH_MAX)
+      return 1;
 
   }
 
@@ -200,8 +223,7 @@ static u8 could_be_arith(u32 old_val, u32 new_val, u8 blen) {
 
 }
 
-
-/* Last but not least, a similar helper to see if insertion of an 
+/* Last but not least, a similar helper to see if insertion of an
    interesting integer is redundant given the insertions done for
    shorter blen. The last param (check_le) is set if the caller
    already executed LE insertion for current blen and wants to see
@@ -220,8 +242,8 @@ static u8 could_be_interest(u32 old_val, u32 new_val, u8 blen, u8 check_le) {
 
     for (j = 0; j < sizeof(interesting_8); ++j) {
 
-      u32 tval = (old_val & ~(0xff << (i * 8))) |
-                 (((u8)interesting_8[j]) << (i * 8));
+      u32 tval =
+          (old_val & ~(0xff << (i * 8))) | (((u8)interesting_8[j]) << (i * 8));
 
       if (new_val == tval) return 1;
 
@@ -274,11 +296,10 @@ static u8 could_be_interest(u32 old_val, u32 new_val, u8 blen, u8 check_le) {
 
 }
 
-
 #ifndef IGNORE_FINDS
 
-/* Helper function to compare buffers; returns first and last differing offset. We
-   use this to find reasonable locations for splicing two files. */
+/* Helper function to compare buffers; returns first and last differing offset.
+   We use this to find reasonable locations for splicing two files. */
 
 static void locate_diffs(u8* ptr1, u8* ptr2, u32 len, s32* first, s32* last) {
 
@@ -313,11 +334,11 @@ static void locate_diffs(u8* ptr1, u8* ptr2, u32 len, s32* first, s32* last) {
 u8 fuzz_one_original(char** argv) {
 
   s32 len, fd, temp_len, i, j;
-  u8  *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0;
-  u64 havoc_queued = 0,  orig_hit_cnt, new_hit_cnt;
+  u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0;
+  u64 havoc_queued = 0, orig_hit_cnt, new_hit_cnt;
   u32 splice_cycle = 0, perf_score = 100, orig_perf, prev_cksum, eff_cnt = 1;
 
-  u8  ret_val = 1, doing_det = 0;
+  u8 ret_val = 1, doing_det = 0;
 
   u8  a_collect[MAX_AUTO_EXTRA];
   u32 a_len = 0;
@@ -337,8 +358,10 @@ u8 fuzz_one_original(char** argv) {
        possibly skip to them at the expense of already-fuzzed or non-favored
        cases. */
 
-    if (((queue_cur->was_fuzzed > 0 || queue_cur->fuzz_level > 0) || !queue_cur->favored) &&
-        UR(100) < SKIP_TO_NEW_PROB) return 1;
+    if (((queue_cur->was_fuzzed > 0 || queue_cur->fuzz_level > 0) ||
+         !queue_cur->favored) &&
+        UR(100) < SKIP_TO_NEW_PROB)
+      return 1;
 
   } else if (!dumb_mode && !queue_cur->favored && queued_paths > 10) {
 
@@ -346,7 +369,8 @@ u8 fuzz_one_original(char** argv) {
        The odds of skipping stuff are higher for already-fuzzed inputs and
        lower for never-fuzzed entries. */
 
-    if (queue_cycle > 1 && (queue_cur->fuzz_level == 0 || queue_cur->was_fuzzed)) {
+    if (queue_cycle > 1 &&
+        (queue_cur->fuzz_level == 0 || queue_cur->was_fuzzed)) {
 
       if (UR(100) < SKIP_NFAV_NEW_PROB) return 1;
 
@@ -361,9 +385,11 @@ u8 fuzz_one_original(char** argv) {
 #endif /* ^IGNORE_FINDS */
 
   if (not_on_tty) {
+
     ACTF("Fuzzing test case #%u (%u total, %llu uniq crashes found)...",
          current_entry, queued_paths, unique_crashes);
     fflush(stdout);
+
   }
 
   /* Map the test case into memory. */
@@ -376,7 +402,8 @@ u8 fuzz_one_original(char** argv) {
 
   orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
 
-  if (orig_in == MAP_FAILED) PFATAL("Unable to mmap '%s' with len %d", queue_cur->fname, len);
+  if (orig_in == MAP_FAILED)
+    PFATAL("Unable to mmap '%s' with len %d", queue_cur->fname, len);
 
   close(fd);
 
@@ -402,14 +429,15 @@ u8 fuzz_one_original(char** argv) {
 
       res = calibrate_case(argv, queue_cur, in_buf, queue_cycle - 1, 0);
 
-      if (res == FAULT_ERROR)
-        FATAL("Unable to execute target application");
+      if (res == FAULT_ERROR) FATAL("Unable to execute target application");
 
     }
 
     if (stop_soon || res != crash_mode) {
+
       ++cur_skipped_paths;
       goto abandon_entry;
+
     }
 
   }
@@ -422,12 +450,13 @@ u8 fuzz_one_original(char** argv) {
 
     u8 res = trim_case(argv, queue_cur, in_buf);
 
-    if (res == FAULT_ERROR)
-      FATAL("Unable to execute target application");
+    if (res == FAULT_ERROR) FATAL("Unable to execute target application");
 
     if (stop_soon) {
+
       ++cur_skipped_paths;
       goto abandon_entry;
+
     }
 
     /* Don't retry trimming, even if it failed. */
@@ -449,49 +478,56 @@ u8 fuzz_one_original(char** argv) {
   if (perf_score == 0) goto abandon_entry;
 
   if (custom_mutator) {
+
     stage_short = "custom";
     stage_name = "custom mutator";
     stage_max = len << 3;
     stage_val_type = STAGE_VAL_NONE;
 
-    const u32 max_seed_size = 4096*4096;
-    u8* mutated_buf = ck_alloc(max_seed_size);
+    const u32 max_seed_size = 4096 * 4096;
+    u8*       mutated_buf = ck_alloc(max_seed_size);
 
     orig_hit_cnt = queued_paths + unique_crashes;
 
-    for (stage_cur = 0 ; stage_cur < stage_max ; ++stage_cur) {
-      size_t orig_size = (size_t) len;
-      size_t mutated_size = custom_mutator(out_buf, orig_size, mutated_buf, max_seed_size, UR(UINT32_MAX));
+    for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+
+      size_t orig_size = (size_t)len;
+      size_t mutated_size = custom_mutator(out_buf, orig_size, mutated_buf,
+                                           max_seed_size, UR(UINT32_MAX));
       if (mutated_size > 0) {
+
         out_buf = ck_realloc(out_buf, mutated_size);
         memcpy(out_buf, mutated_buf, mutated_size);
-        if (common_fuzz_stuff(argv, out_buf, (u32) mutated_size)) {
+        if (common_fuzz_stuff(argv, out_buf, (u32)mutated_size)) {
+
           goto abandon_entry;
+
         }
+
       }
+
     }
 
     ck_free(mutated_buf);
     new_hit_cnt = queued_paths + unique_crashes;
 
-    stage_finds[STAGE_CUSTOM_MUTATOR]  += new_hit_cnt - orig_hit_cnt;
+    stage_finds[STAGE_CUSTOM_MUTATOR] += new_hit_cnt - orig_hit_cnt;
     stage_cycles[STAGE_CUSTOM_MUTATOR] += stage_max;
     goto abandon_entry;
-  }
 
+  }
 
   /* Skip right away if -d is given, if it has not been chosen sufficiently
      often to warrant the expensive deterministic stage (fuzz_level), or
      if it has gone through deterministic testing in earlier, resumed runs
      (passed_det). */
 
-  if (skip_deterministic
-     || ((!queue_cur->passed_det)
-        && perf_score < (
-              queue_cur->depth * 30 <= havoc_max_mult * 100
-              ? queue_cur->depth * 30
-              : havoc_max_mult * 100))
-     || queue_cur->passed_det)
+  if (skip_deterministic ||
+      ((!queue_cur->passed_det) &&
+       perf_score < (queue_cur->depth * 30 <= havoc_max_mult * 100
+                         ? queue_cur->depth * 30
+                         : havoc_max_mult * 100)) ||
+      queue_cur->passed_det)
 #ifdef USE_PYTHON
     goto python_stage;
 #else
@@ -514,17 +550,20 @@ u8 fuzz_one_original(char** argv) {
    * SIMPLE BITFLIP (+dictionary construction) *
    *********************************************/
 
-#define FLIP_BIT(_ar, _b) do { \
-    u8* _arf = (u8*)(_ar); \
-    u32 _bf = (_b); \
-    _arf[(_bf) >> 3] ^= (128 >> ((_bf) & 7)); \
+#define FLIP_BIT(_ar, _b)                   \
+  do {                                      \
+                                            \
+    u8* _arf = (u8*)(_ar);                  \
+    u32 _bf = (_b);                         \
+    _arf[(_bf) >> 3] ^= (128 >> ((_bf)&7)); \
+                                            \
   } while (0)
 
   /* Single walking bit. */
 
   stage_short = "flip1";
-  stage_max   = len << 3;
-  stage_name  = "bitflip 1/1";
+  stage_max = len << 3;
+  stage_name = "bitflip 1/1";
 
   stage_val_type = STAGE_VAL_NONE;
 
@@ -556,7 +595,7 @@ u8 fuzz_one_original(char** argv) {
 
        We do this here, rather than as a separate stage, because it's a nice
        way to keep the operation approximately "free" (i.e., no extra execs).
-       
+
        Empirically, performing the check when flipping the least significant bit
        is advantageous, compared to doing it at the time of more disruptive
        changes, where the program flow may be affected in more violent ways.
@@ -602,7 +641,7 @@ u8 fuzz_one_original(char** argv) {
 
       if (cksum != queue_cur->exec_cksum) {
 
-        if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3];        
+        if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3];
         ++a_len;
 
       }
@@ -613,14 +652,14 @@ u8 fuzz_one_original(char** argv) {
 
   new_hit_cnt = queued_paths + unique_crashes;
 
-  stage_finds[STAGE_FLIP1]  += new_hit_cnt - orig_hit_cnt;
+  stage_finds[STAGE_FLIP1] += new_hit_cnt - orig_hit_cnt;
   stage_cycles[STAGE_FLIP1] += stage_max;
 
   /* Two walking bits. */
 
-  stage_name  = "bitflip 2/1";
+  stage_name = "bitflip 2/1";
   stage_short = "flip2";
-  stage_max   = (len << 3) - 1;
+  stage_max = (len << 3) - 1;
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -640,14 +679,14 @@ u8 fuzz_one_original(char** argv) {
 
   new_hit_cnt = queued_paths + unique_crashes;
 
-  stage_finds[STAGE_FLIP2]  += new_hit_cnt - orig_hit_cnt;
+  stage_finds[STAGE_FLIP2] += new_hit_cnt - orig_hit_cnt;
   stage_cycles[STAGE_FLIP2] += stage_max;
 
   /* Four walking bits. */
 
-  stage_name  = "bitflip 4/1";
+  stage_name = "bitflip 4/1";
   stage_short = "flip4";
-  stage_max   = (len << 3) - 3;
+  stage_max = (len << 3) - 3;
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -671,7 +710,7 @@ u8 fuzz_one_original(char** argv) {
 
   new_hit_cnt = queued_paths + unique_crashes;
 
-  stage_finds[STAGE_FLIP4]  += new_hit_cnt - orig_hit_cnt;
+  stage_finds[STAGE_FLIP4] += new_hit_cnt - orig_hit_cnt;
   stage_cycles[STAGE_FLIP4] += stage_max;
 
   /* Effector map setup. These macros calculate:
@@ -682,27 +721,29 @@ u8 fuzz_one_original(char** argv) {
 
    */
 
-#define EFF_APOS(_p)          ((_p) >> EFF_MAP_SCALE2)
-#define EFF_REM(_x)           ((_x) & ((1 << EFF_MAP_SCALE2) - 1))
-#define EFF_ALEN(_l)          (EFF_APOS(_l) + !!EFF_REM(_l))
-#define EFF_SPAN_ALEN(_p, _l) (EFF_APOS((_p) + (_l) - 1) - EFF_APOS(_p) + 1)
+#define EFF_APOS(_p) ((_p) >> EFF_MAP_SCALE2)
+#define EFF_REM(_x) ((_x) & ((1 << EFF_MAP_SCALE2) - 1))
+#define EFF_ALEN(_l) (EFF_APOS(_l) + !!EFF_REM(_l))
+#define EFF_SPAN_ALEN(_p, _l) (EFF_APOS((_p) + (_l)-1) - EFF_APOS(_p) + 1)
 
   /* Initialize effector map for the next step (see comments below). Always
      flag first and last byte as doing something. */
 
-  eff_map    = ck_alloc(EFF_ALEN(len));
+  eff_map = ck_alloc(EFF_ALEN(len));
   eff_map[0] = 1;
 
   if (EFF_APOS(len - 1) != 0) {
+
     eff_map[EFF_APOS(len - 1)] = 1;
     ++eff_cnt;
+
   }
 
   /* Walking byte. */
 
-  stage_name  = "bitflip 8/8";
+  stage_name = "bitflip 8/8";
   stage_short = "flip8";
-  stage_max   = len;
+  stage_max = len;
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -732,8 +773,10 @@ u8 fuzz_one_original(char** argv) {
         cksum = ~queue_cur->exec_cksum;
 
       if (cksum != queue_cur->exec_cksum) {
+
         eff_map[EFF_APOS(stage_cur)] = 1;
         ++eff_cnt;
+
       }
 
     }
@@ -763,17 +806,17 @@ u8 fuzz_one_original(char** argv) {
 
   new_hit_cnt = queued_paths + unique_crashes;
 
-  stage_finds[STAGE_FLIP8]  += new_hit_cnt - orig_hit_cnt;
+  stage_finds[STAGE_FLIP8] += new_hit_cnt - orig_hit_cnt;
   stage_cycles[STAGE_FLIP8] += stage_max;
 
   /* Two walking bytes. */
 
   if (len < 2) goto skip_bitflip;
 
-  stage_name  = "bitflip 16/8";
+  stage_name = "bitflip 16/8";
   stage_short = "flip16";
-  stage_cur   = 0;
-  stage_max   = len - 1;
+  stage_cur = 0;
+  stage_max = len - 1;
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -782,8 +825,10 @@ u8 fuzz_one_original(char** argv) {
     /* Let's consult the effector map... */
 
     if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
+
       --stage_max;
       continue;
+
     }
 
     stage_cur_byte = i;
@@ -795,22 +840,21 @@ u8 fuzz_one_original(char** argv) {
 
     *(u16*)(out_buf + i) ^= 0xFFFF;
 
-
   }
 
   new_hit_cnt = queued_paths + unique_crashes;
 
-  stage_finds[STAGE_FLIP16]  += new_hit_cnt - orig_hit_cnt;
+  stage_finds[STAGE_FLIP16] += new_hit_cnt - orig_hit_cnt;
   stage_cycles[STAGE_FLIP16] += stage_max;
 
   if (len < 4) goto skip_bitflip;
 
   /* Four walking bytes. */
 
-  stage_name  = "bitflip 32/8";
+  stage_name = "bitflip 32/8";
   stage_short = "flip32";
-  stage_cur   = 0;
-  stage_max   = len - 3;
+  stage_cur = 0;
+  stage_max = len - 3;
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -819,8 +863,10 @@ u8 fuzz_one_original(char** argv) {
     /* Let's consult the effector map... */
     if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
         !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
+
       --stage_max;
       continue;
+
     }
 
     stage_cur_byte = i;
@@ -836,7 +882,7 @@ u8 fuzz_one_original(char** argv) {
 
   new_hit_cnt = queued_paths + unique_crashes;
 
-  stage_finds[STAGE_FLIP32]  += new_hit_cnt - orig_hit_cnt;
+  stage_finds[STAGE_FLIP32] += new_hit_cnt - orig_hit_cnt;
   stage_cycles[STAGE_FLIP32] += stage_max;
 
 skip_bitflip:
@@ -849,10 +895,10 @@ skip_bitflip:
 
   /* 8-bit arithmetics. */
 
-  stage_name  = "arith 8/8";
+  stage_name = "arith 8/8";
   stage_short = "arith8";
-  stage_cur   = 0;
-  stage_max   = 2 * len * ARITH_MAX;
+  stage_cur = 0;
+  stage_max = 2 * len * ARITH_MAX;
 
   stage_val_type = STAGE_VAL_LE;
 
@@ -865,8 +911,10 @@ skip_bitflip:
     /* Let's consult the effector map... */
 
     if (!eff_map[EFF_APOS(i)]) {
+
       stage_max -= 2 * ARITH_MAX;
       continue;
+
     }
 
     stage_cur_byte = i;
@@ -886,9 +934,11 @@ skip_bitflip:
         if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
         ++stage_cur;
 
-      } else --stage_max;
+      } else
+
+        --stage_max;
 
-      r =  orig ^ (orig - j);
+      r = orig ^ (orig - j);
 
       if (!could_be_bitflip(r)) {
 
@@ -898,7 +948,9 @@ skip_bitflip:
         if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
         ++stage_cur;
 
-      } else --stage_max;
+      } else
+
+        --stage_max;
 
       out_buf[i] = orig;
 
@@ -908,17 +960,17 @@ skip_bitflip:
 
   new_hit_cnt = queued_paths + unique_crashes;
 
-  stage_finds[STAGE_ARITH8]  += new_hit_cnt - orig_hit_cnt;
+  stage_finds[STAGE_ARITH8] += new_hit_cnt - orig_hit_cnt;
   stage_cycles[STAGE_ARITH8] += stage_max;
 
   /* 16-bit arithmetics, both endians. */
 
   if (len < 2) goto skip_arith;
 
-  stage_name  = "arith 16/8";
+  stage_name = "arith 16/8";
   stage_short = "arith16";
-  stage_cur   = 0;
-  stage_max   = 4 * (len - 1) * ARITH_MAX;
+  stage_cur = 0;
+  stage_max = 4 * (len - 1) * ARITH_MAX;
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -929,25 +981,26 @@ skip_bitflip:
     /* Let's consult the effector map... */
 
     if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
+
       stage_max -= 4 * ARITH_MAX;
       continue;
+
     }
 
     stage_cur_byte = i;
 
     for (j = 1; j <= ARITH_MAX; ++j) {
 
-      u16 r1 = orig ^ (orig + j),
-          r2 = orig ^ (orig - j),
+      u16 r1 = orig ^ (orig + j), r2 = orig ^ (orig - j),
           r3 = orig ^ SWAP16(SWAP16(orig) + j),
           r4 = orig ^ SWAP16(SWAP16(orig) - j);
 
       /* Try little endian addition and subtraction first. Do it only
-         if the operation would affect more than one byte (hence the 
+         if the operation would affect more than one byte (hence the
          & 0xff overflow checks) and if it couldn't be a product of
          a bitflip. */
 
-      stage_val_type = STAGE_VAL_LE; 
+      stage_val_type = STAGE_VAL_LE;
 
       if ((orig & 0xff) + j > 0xff && !could_be_bitflip(r1)) {
 
@@ -956,8 +1009,10 @@ skip_bitflip:
 
         if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
         ++stage_cur;
- 
-      } else --stage_max;
+
+      } else
+
+        --stage_max;
 
       if ((orig & 0xff) < j && !could_be_bitflip(r2)) {
 
@@ -967,13 +1022,14 @@ skip_bitflip:
         if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
         ++stage_cur;
 
-      } else --stage_max;
+      } else
+
+        --stage_max;
 
       /* Big endian comes next. Same deal. */
 
       stage_val_type = STAGE_VAL_BE;
 
-
       if ((orig >> 8) + j > 0xff && !could_be_bitflip(r3)) {
 
         stage_cur_val = j;
@@ -982,7 +1038,9 @@ skip_bitflip:
         if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
         ++stage_cur;
 
-      } else --stage_max;
+      } else
+
+        --stage_max;
 
       if ((orig >> 8) < j && !could_be_bitflip(r4)) {
 
@@ -992,7 +1050,9 @@ skip_bitflip:
         if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
         ++stage_cur;
 
-      } else --stage_max;
+      } else
+
+        --stage_max;
 
       *(u16*)(out_buf + i) = orig;
 
@@ -1002,17 +1062,17 @@ skip_bitflip:
 
   new_hit_cnt = queued_paths + unique_crashes;
 
-  stage_finds[STAGE_ARITH16]  += new_hit_cnt - orig_hit_cnt;
+  stage_finds[STAGE_ARITH16] += new_hit_cnt - orig_hit_cnt;
   stage_cycles[STAGE_ARITH16] += stage_max;
 
   /* 32-bit arithmetics, both endians. */
 
   if (len < 4) goto skip_arith;
 
-  stage_name  = "arith 32/8";
+  stage_name = "arith 32/8";
   stage_short = "arith32";
-  stage_cur   = 0;
-  stage_max   = 4 * (len - 3) * ARITH_MAX;
+  stage_cur = 0;
+  stage_max = 4 * (len - 3) * ARITH_MAX;
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -1024,16 +1084,17 @@ skip_bitflip:
 
     if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
         !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
+
       stage_max -= 4 * ARITH_MAX;
       continue;
+
     }
 
     stage_cur_byte = i;
 
     for (j = 1; j <= ARITH_MAX; ++j) {
 
-      u32 r1 = orig ^ (orig + j),
-          r2 = orig ^ (orig - j),
+      u32 r1 = orig ^ (orig + j), r2 = orig ^ (orig - j),
           r3 = orig ^ SWAP32(SWAP32(orig) + j),
           r4 = orig ^ SWAP32(SWAP32(orig) - j);
 
@@ -1050,7 +1111,9 @@ skip_bitflip:
         if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
         ++stage_cur;
 
-      } else --stage_max;
+      } else
+
+        --stage_max;
 
       if ((orig & 0xffff) < j && !could_be_bitflip(r2)) {
 
@@ -1060,7 +1123,9 @@ skip_bitflip:
         if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
         ++stage_cur;
 
-      } else --stage_max;
+      } else
+
+        --stage_max;
 
       /* Big endian next. */
 
@@ -1074,7 +1139,9 @@ skip_bitflip:
         if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
         ++stage_cur;
 
-      } else --stage_max;
+      } else
+
+        --stage_max;
 
       if ((SWAP32(orig) & 0xffff) < j && !could_be_bitflip(r4)) {
 
@@ -1084,7 +1151,9 @@ skip_bitflip:
         if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
         ++stage_cur;
 
-      } else --stage_max;
+      } else
+
+        --stage_max;
 
       *(u32*)(out_buf + i) = orig;
 
@@ -1094,7 +1163,7 @@ skip_bitflip:
 
   new_hit_cnt = queued_paths + unique_crashes;
 
-  stage_finds[STAGE_ARITH32]  += new_hit_cnt - orig_hit_cnt;
+  stage_finds[STAGE_ARITH32] += new_hit_cnt - orig_hit_cnt;
   stage_cycles[STAGE_ARITH32] += stage_max;
 
 skip_arith:
@@ -1103,10 +1172,10 @@ skip_arith:
    * INTERESTING VALUES *
    **********************/
 
-  stage_name  = "interest 8/8";
+  stage_name = "interest 8/8";
   stage_short = "int8";
-  stage_cur   = 0;
-  stage_max   = len * sizeof(interesting_8);
+  stage_cur = 0;
+  stage_max = len * sizeof(interesting_8);
 
   stage_val_type = STAGE_VAL_LE;
 
@@ -1121,8 +1190,10 @@ skip_arith:
     /* Let's consult the effector map... */
 
     if (!eff_map[EFF_APOS(i)]) {
+
       stage_max -= sizeof(interesting_8);
       continue;
+
     }
 
     stage_cur_byte = i;
@@ -1133,8 +1204,10 @@ skip_arith:
 
       if (could_be_bitflip(orig ^ (u8)interesting_8[j]) ||
           could_be_arith(orig, (u8)interesting_8[j], 1)) {
+
         --stage_max;
         continue;
+
       }
 
       stage_cur_val = interesting_8[j];
@@ -1151,17 +1224,17 @@ skip_arith:
 
   new_hit_cnt = queued_paths + unique_crashes;
 
-  stage_finds[STAGE_INTEREST8]  += new_hit_cnt - orig_hit_cnt;
+  stage_finds[STAGE_INTEREST8] += new_hit_cnt - orig_hit_cnt;
   stage_cycles[STAGE_INTEREST8] += stage_max;
 
   /* Setting 16-bit integers, both endians. */
 
   if (no_arith || len < 2) goto skip_interest;
 
-  stage_name  = "interest 16/8";
+  stage_name = "interest 16/8";
   stage_short = "int16";
-  stage_cur   = 0;
-  stage_max   = 2 * (len - 1) * (sizeof(interesting_16) >> 1);
+  stage_cur = 0;
+  stage_max = 2 * (len - 1) * (sizeof(interesting_16) >> 1);
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -1172,8 +1245,10 @@ skip_arith:
     /* Let's consult the effector map... */
 
     if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
+
       stage_max -= sizeof(interesting_16);
       continue;
+
     }
 
     stage_cur_byte = i;
@@ -1196,7 +1271,9 @@ skip_arith:
         if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
         ++stage_cur;
 
-      } else --stage_max;
+      } else
+
+        --stage_max;
 
       if ((u16)interesting_16[j] != SWAP16(interesting_16[j]) &&
           !could_be_bitflip(orig ^ SWAP16(interesting_16[j])) &&
@@ -1209,7 +1286,9 @@ skip_arith:
         if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
         ++stage_cur;
 
-      } else --stage_max;
+      } else
+
+        --stage_max;
 
     }
 
@@ -1219,17 +1298,17 @@ skip_arith:
 
   new_hit_cnt = queued_paths + unique_crashes;
 
-  stage_finds[STAGE_INTEREST16]  += new_hit_cnt - orig_hit_cnt;
+  stage_finds[STAGE_INTEREST16] += new_hit_cnt - orig_hit_cnt;
   stage_cycles[STAGE_INTEREST16] += stage_max;
 
   if (len < 4) goto skip_interest;
 
   /* Setting 32-bit integers, both endians. */
 
-  stage_name  = "interest 32/8";
+  stage_name = "interest 32/8";
   stage_short = "int32";
-  stage_cur   = 0;
-  stage_max   = 2 * (len - 3) * (sizeof(interesting_32) >> 2);
+  stage_cur = 0;
+  stage_max = 2 * (len - 3) * (sizeof(interesting_32) >> 2);
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -1241,8 +1320,10 @@ skip_arith:
 
     if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
         !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
+
       stage_max -= sizeof(interesting_32) >> 1;
       continue;
+
     }
 
     stage_cur_byte = i;
@@ -1265,7 +1346,9 @@ skip_arith:
         if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
         ++stage_cur;
 
-      } else --stage_max;
+      } else
+
+        --stage_max;
 
       if ((u32)interesting_32[j] != SWAP32(interesting_32[j]) &&
           !could_be_bitflip(orig ^ SWAP32(interesting_32[j])) &&
@@ -1278,7 +1361,9 @@ skip_arith:
         if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
         ++stage_cur;
 
-      } else --stage_max;
+      } else
+
+        --stage_max;
 
     }
 
@@ -1288,7 +1373,7 @@ skip_arith:
 
   new_hit_cnt = queued_paths + unique_crashes;
 
-  stage_finds[STAGE_INTEREST32]  += new_hit_cnt - orig_hit_cnt;
+  stage_finds[STAGE_INTEREST32] += new_hit_cnt - orig_hit_cnt;
   stage_cycles[STAGE_INTEREST32] += stage_max;
 
 skip_interest:
@@ -1301,10 +1386,10 @@ skip_interest:
 
   /* Overwrite with user-supplied extras. */
 
-  stage_name  = "user extras (over)";
+  stage_name = "user extras (over)";
   stage_short = "ext_UO";
-  stage_cur   = 0;
-  stage_max   = extras_cnt * len;
+  stage_cur = 0;
+  stage_max = extras_cnt * len;
 
   stage_val_type = STAGE_VAL_NONE;
 
@@ -1354,15 +1439,15 @@ skip_interest:
 
   new_hit_cnt = queued_paths + unique_crashes;
 
-  stage_finds[STAGE_EXTRAS_UO]  += new_hit_cnt - orig_hit_cnt;
+  stage_finds[STAGE_EXTRAS_UO] += new_hit_cnt - orig_hit_cnt;
   stage_cycles[STAGE_EXTRAS_UO] += stage_max;
 
   /* Insertion of user-supplied extras. */
 
-  stage_name  = "user extras (insert)";
+  stage_name = "user extras (insert)";
   stage_short = "ext_UI";
-  stage_cur   = 0;
-  stage_max   = extras_cnt * len;
+  stage_cur = 0;
+  stage_max = extras_cnt * len;
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -1375,8 +1460,10 @@ skip_interest:
     for (j = 0; j < extras_cnt; ++j) {
 
       if (len + extras[j].len > MAX_FILE) {
-        --stage_max; 
+
+        --stage_max;
         continue;
+
       }
 
       /* Insert token */
@@ -1386,8 +1473,10 @@ skip_interest:
       memcpy(ex_tmp + i + extras[j].len, out_buf + i, len - i);
 
       if (common_fuzz_stuff(argv, ex_tmp, len + extras[j].len)) {
+
         ck_free(ex_tmp);
         goto abandon_entry;
+
       }
 
       ++stage_cur;
@@ -1403,17 +1492,17 @@ skip_interest:
 
   new_hit_cnt = queued_paths + unique_crashes;
 
-  stage_finds[STAGE_EXTRAS_UI]  += new_hit_cnt - orig_hit_cnt;
+  stage_finds[STAGE_EXTRAS_UI] += new_hit_cnt - orig_hit_cnt;
   stage_cycles[STAGE_EXTRAS_UI] += stage_max;
 
 skip_user_extras:
 
   if (!a_extras_cnt) goto skip_extras;
 
-  stage_name  = "auto extras (over)";
+  stage_name = "auto extras (over)";
   stage_short = "ext_AO";
-  stage_cur   = 0;
-  stage_max   = MIN(a_extras_cnt, USE_AUTO_EXTRAS) * len;
+  stage_cur = 0;
+  stage_max = MIN(a_extras_cnt, USE_AUTO_EXTRAS) * len;
 
   stage_val_type = STAGE_VAL_NONE;
 
@@ -1431,7 +1520,8 @@ skip_user_extras:
 
       if (a_extras[j].len > len - i ||
           !memcmp(a_extras[j].data, out_buf + i, a_extras[j].len) ||
-          !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, a_extras[j].len))) {
+          !memchr(eff_map + EFF_APOS(i), 1,
+                  EFF_SPAN_ALEN(i, a_extras[j].len))) {
 
         --stage_max;
         continue;
@@ -1454,7 +1544,7 @@ skip_user_extras:
 
   new_hit_cnt = queued_paths + unique_crashes;
 
-  stage_finds[STAGE_EXTRAS_AO]  += new_hit_cnt - orig_hit_cnt;
+  stage_finds[STAGE_EXTRAS_AO] += new_hit_cnt - orig_hit_cnt;
   stage_cycles[STAGE_EXTRAS_AO] += stage_max;
 
 skip_extras:
@@ -1473,36 +1563,51 @@ python_stage:
 
   if (!py_module) goto havoc_stage;
 
-  stage_name  = "python";
+  stage_name = "python";
   stage_short = "python";
-  stage_max   = HAVOC_CYCLES * perf_score / havoc_div / 100;
+  stage_max = HAVOC_CYCLES * perf_score / havoc_div / 100;
 
   if (stage_max < HAVOC_MIN) stage_max = HAVOC_MIN;
 
   orig_hit_cnt = queued_paths + unique_crashes;
 
-  char* retbuf = NULL;
+  char*  retbuf = NULL;
   size_t retlen = 0;
 
   for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+
     struct queue_entry* target;
-    u32 tid;
-    u8* new_buf;
+    u32                 tid;
+    u8*                 new_buf;
 
-retry_external_pick:
+  retry_external_pick:
     /* Pick a random other queue entry for passing to external API */
-    do { tid = UR(queued_paths); } while (tid == current_entry && queued_paths > 1);
+    do {
+
+      tid = UR(queued_paths);
+
+    } while (tid == current_entry && queued_paths > 1);
 
     target = queue;
 
-    while (tid >= 100) { target = target->next_100; tid -= 100; }
-    while (tid--) target = target->next;
+    while (tid >= 100) {
+
+      target = target->next_100;
+      tid -= 100;
+
+    }
+
+    while (tid--)
+      target = target->next;
 
     /* Make sure that the target has a reasonable length. */
 
-    while (target && (target->len < 2 || target == queue_cur) && queued_paths > 1) {
+    while (target && (target->len < 2 || target == queue_cur) &&
+           queued_paths > 1) {
+
       target = target->next;
       ++splicing_with;
+
     }
 
     if (!target) goto retry_external_pick;
@@ -1519,12 +1624,14 @@ retry_external_pick:
     ck_free(new_buf);
 
     if (retbuf) {
-      if (!retlen)
-        goto abandon_entry;
+
+      if (!retlen) goto abandon_entry;
 
       if (common_fuzz_stuff(argv, retbuf, retlen)) {
+
         free(retbuf);
         goto abandon_entry;
+
       }
 
       /* Reset retbuf/retlen */
@@ -1536,26 +1643,35 @@ retry_external_pick:
          permitting. */
 
       if (queued_paths != havoc_queued) {
+
         if (perf_score <= havoc_max_mult * 100) {
-          stage_max  *= 2;
+
+          stage_max *= 2;
           perf_score *= 2;
+
         }
 
         havoc_queued = queued_paths;
+
       }
+
     }
+
   }
 
   new_hit_cnt = queued_paths + unique_crashes;
 
-  stage_finds[STAGE_PYTHON]  += new_hit_cnt - orig_hit_cnt;
+  stage_finds[STAGE_PYTHON] += new_hit_cnt - orig_hit_cnt;
   stage_cycles[STAGE_PYTHON] += stage_max;
 
   if (python_only) {
+
     /* Skip other stages */
     ret_val = 0;
     goto abandon_entry;
+
   }
+
 #endif
 
   /****************
@@ -1571,10 +1687,10 @@ havoc_stage:
 
   if (!splice_cycle) {
 
-    stage_name  = "havoc";
+    stage_name = "havoc";
     stage_short = "havoc";
-    stage_max   = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) *
-                  perf_score / havoc_div / 100;
+    stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * perf_score /
+                havoc_div / 100;
 
   } else {
 
@@ -1583,9 +1699,9 @@ havoc_stage:
     perf_score = orig_perf;
 
     sprintf(tmp, "splice %u", splice_cycle);
-    stage_name  = tmp;
+    stage_name = tmp;
     stage_short = "splice";
-    stage_max   = SPLICE_HAVOC * perf_score / havoc_div / 100;
+    stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100;
 
   }
 
@@ -1605,7 +1721,7 @@ havoc_stage:
     u32 use_stacking = 1 << (1 + UR(HAVOC_STACK_POW2));
 
     stage_cur_val = use_stacking;
- 
+
     for (i = 0; i < use_stacking; ++i) {
 
       switch (UR(15 + ((extras_cnt + a_extras_cnt) ? 2 : 0))) {
@@ -1617,7 +1733,7 @@ havoc_stage:
           FLIP_BIT(out_buf, UR(temp_len << 3));
           break;
 
-        case 1: 
+        case 1:
 
           /* Set byte to interesting value. */
 
@@ -1633,12 +1749,12 @@ havoc_stage:
           if (UR(2)) {
 
             *(u16*)(out_buf + UR(temp_len - 1)) =
-              interesting_16[UR(sizeof(interesting_16) >> 1)];
+                interesting_16[UR(sizeof(interesting_16) >> 1)];
 
           } else {
 
-            *(u16*)(out_buf + UR(temp_len - 1)) = SWAP16(
-              interesting_16[UR(sizeof(interesting_16) >> 1)]);
+            *(u16*)(out_buf + UR(temp_len - 1)) =
+                SWAP16(interesting_16[UR(sizeof(interesting_16) >> 1)]);
 
           }
 
@@ -1651,14 +1767,14 @@ havoc_stage:
           if (temp_len < 4) break;
 
           if (UR(2)) {
-  
+
             *(u32*)(out_buf + UR(temp_len - 3)) =
-              interesting_32[UR(sizeof(interesting_32) >> 2)];
+                interesting_32[UR(sizeof(interesting_32) >> 2)];
 
           } else {
 
-            *(u32*)(out_buf + UR(temp_len - 3)) = SWAP32(
-              interesting_32[UR(sizeof(interesting_32) >> 2)]);
+            *(u32*)(out_buf + UR(temp_len - 3)) =
+                SWAP32(interesting_32[UR(sizeof(interesting_32) >> 2)]);
 
           }
 
@@ -1696,7 +1812,7 @@ havoc_stage:
             u16 num = 1 + UR(ARITH_MAX);
 
             *(u16*)(out_buf + pos) =
-              SWAP16(SWAP16(*(u16*)(out_buf + pos)) - num);
+                SWAP16(SWAP16(*(u16*)(out_buf + pos)) - num);
 
           }
 
@@ -1720,7 +1836,7 @@ havoc_stage:
             u16 num = 1 + UR(ARITH_MAX);
 
             *(u16*)(out_buf + pos) =
-              SWAP16(SWAP16(*(u16*)(out_buf + pos)) + num);
+                SWAP16(SWAP16(*(u16*)(out_buf + pos)) + num);
 
           }
 
@@ -1744,7 +1860,7 @@ havoc_stage:
             u32 num = 1 + UR(ARITH_MAX);
 
             *(u32*)(out_buf + pos) =
-              SWAP32(SWAP32(*(u32*)(out_buf + pos)) - num);
+                SWAP32(SWAP32(*(u32*)(out_buf + pos)) - num);
 
           }
 
@@ -1768,7 +1884,7 @@ havoc_stage:
             u32 num = 1 + UR(ARITH_MAX);
 
             *(u32*)(out_buf + pos) =
-              SWAP32(SWAP32(*(u32*)(out_buf + pos)) + num);
+                SWAP32(SWAP32(*(u32*)(out_buf + pos)) + num);
 
           }
 
@@ -1785,28 +1901,28 @@ havoc_stage:
 
         case 11 ... 12: {
 
-            /* Delete bytes. We're making this a bit more likely
-               than insertion (the next option) in hopes of keeping
-               files reasonably small. */
+          /* Delete bytes. We're making this a bit more likely
+             than insertion (the next option) in hopes of keeping
+             files reasonably small. */
 
-            u32 del_from, del_len;
+          u32 del_from, del_len;
 
-            if (temp_len < 2) break;
+          if (temp_len < 2) break;
 
-            /* Don't delete too much. */
+          /* Don't delete too much. */
 
-            del_len = choose_block_len(temp_len - 1);
+          del_len = choose_block_len(temp_len - 1);
 
-            del_from = UR(temp_len - del_len + 1);
+          del_from = UR(temp_len - del_len + 1);
 
-            memmove(out_buf + del_from, out_buf + del_from + del_len,
-                    temp_len - del_from - del_len);
+          memmove(out_buf + del_from, out_buf + del_from + del_len,
+                  temp_len - del_from - del_len);
 
-            temp_len -= del_len;
+          temp_len -= del_len;
 
-            break;
+          break;
 
-          }
+        }
 
         case 13:
 
@@ -1820,7 +1936,7 @@ havoc_stage:
 
             if (actually_clone) {
 
-              clone_len  = choose_block_len(temp_len);
+              clone_len = choose_block_len(temp_len);
               clone_from = UR(temp_len - clone_len + 1);
 
             } else {
@@ -1830,7 +1946,7 @@ havoc_stage:
 
             }
 
-            clone_to   = UR(temp_len);
+            clone_to = UR(temp_len);
 
             new_buf = ck_alloc_nozero(temp_len + clone_len);
 
@@ -1860,128 +1976,129 @@ havoc_stage:
 
         case 14: {
 
-            /* Overwrite bytes with a randomly selected chunk (75%) or fixed
-               bytes (25%). */
+          /* Overwrite bytes with a randomly selected chunk (75%) or fixed
+             bytes (25%). */
 
-            u32 copy_from, copy_to, copy_len;
+          u32 copy_from, copy_to, copy_len;
 
-            if (temp_len < 2) break;
+          if (temp_len < 2) break;
 
-            copy_len  = choose_block_len(temp_len - 1);
+          copy_len = choose_block_len(temp_len - 1);
 
-            copy_from = UR(temp_len - copy_len + 1);
-            copy_to   = UR(temp_len - copy_len + 1);
+          copy_from = UR(temp_len - copy_len + 1);
+          copy_to = UR(temp_len - copy_len + 1);
 
-            if (UR(4)) {
+          if (UR(4)) {
 
-              if (copy_from != copy_to)
-                memmove(out_buf + copy_to, out_buf + copy_from, copy_len);
+            if (copy_from != copy_to)
+              memmove(out_buf + copy_to, out_buf + copy_from, copy_len);
 
-            } else memset(out_buf + copy_to,
-                          UR(2) ? UR(256) : out_buf[UR(temp_len)], copy_len);
+          } else
 
-            break;
+            memset(out_buf + copy_to, UR(2) ? UR(256) : out_buf[UR(temp_len)],
+                   copy_len);
 
-          }
+          break;
+
+        }
 
-        /* Values 15 and 16 can be selected only if there are any extras
-           present in the dictionaries. */
+          /* Values 15 and 16 can be selected only if there are any extras
+             present in the dictionaries. */
 
         case 15: {
 
-            /* Overwrite bytes with an extra. */
+          /* Overwrite bytes with an extra. */
 
-            if (!extras_cnt || (a_extras_cnt && UR(2))) {
+          if (!extras_cnt || (a_extras_cnt && UR(2))) {
 
-              /* No user-specified extras or odds in our favor. Let's use an
-                 auto-detected one. */
+            /* No user-specified extras or odds in our favor. Let's use an
+               auto-detected one. */
 
-              u32 use_extra = UR(a_extras_cnt);
-              u32 extra_len = a_extras[use_extra].len;
-              u32 insert_at;
+            u32 use_extra = UR(a_extras_cnt);
+            u32 extra_len = a_extras[use_extra].len;
+            u32 insert_at;
 
-              if (extra_len > temp_len) break;
+            if (extra_len > temp_len) break;
 
-              insert_at = UR(temp_len - extra_len + 1);
-              memcpy(out_buf + insert_at, a_extras[use_extra].data, extra_len);
+            insert_at = UR(temp_len - extra_len + 1);
+            memcpy(out_buf + insert_at, a_extras[use_extra].data, extra_len);
 
-            } else {
+          } else {
 
-              /* No auto extras or odds in our favor. Use the dictionary. */
+            /* No auto extras or odds in our favor. Use the dictionary. */
 
-              u32 use_extra = UR(extras_cnt);
-              u32 extra_len = extras[use_extra].len;
-              u32 insert_at;
+            u32 use_extra = UR(extras_cnt);
+            u32 extra_len = extras[use_extra].len;
+            u32 insert_at;
 
-              if (extra_len > temp_len) break;
+            if (extra_len > temp_len) break;
 
-              insert_at = UR(temp_len - extra_len + 1);
-              memcpy(out_buf + insert_at, extras[use_extra].data, extra_len);
+            insert_at = UR(temp_len - extra_len + 1);
+            memcpy(out_buf + insert_at, extras[use_extra].data, extra_len);
 
-            }
+          }
 
-            break;
+          break;
 
-          }
+        }
 
         case 16: {
 
-            u32 use_extra, extra_len, insert_at = UR(temp_len + 1);
-            u8* new_buf;
+          u32 use_extra, extra_len, insert_at = UR(temp_len + 1);
+          u8* new_buf;
 
-            /* Insert an extra. Do the same dice-rolling stuff as for the
-               previous case. */
+          /* Insert an extra. Do the same dice-rolling stuff as for the
+             previous case. */
 
-            if (!extras_cnt || (a_extras_cnt && UR(2))) {
+          if (!extras_cnt || (a_extras_cnt && UR(2))) {
 
-              use_extra = UR(a_extras_cnt);
-              extra_len = a_extras[use_extra].len;
+            use_extra = UR(a_extras_cnt);
+            extra_len = a_extras[use_extra].len;
 
-              if (temp_len + extra_len >= MAX_FILE) break;
+            if (temp_len + extra_len >= MAX_FILE) break;
 
-              new_buf = ck_alloc_nozero(temp_len + extra_len);
+            new_buf = ck_alloc_nozero(temp_len + extra_len);
 
-              /* Head */
-              memcpy(new_buf, out_buf, insert_at);
+            /* Head */
+            memcpy(new_buf, out_buf, insert_at);
 
-              /* Inserted part */
-              memcpy(new_buf + insert_at, a_extras[use_extra].data, extra_len);
+            /* Inserted part */
+            memcpy(new_buf + insert_at, a_extras[use_extra].data, extra_len);
 
-	    } else {
+          } else {
 
-              use_extra = UR(extras_cnt);
-              extra_len = extras[use_extra].len;
+            use_extra = UR(extras_cnt);
+            extra_len = extras[use_extra].len;
 
-              if (temp_len + extra_len >= MAX_FILE) break;
+            if (temp_len + extra_len >= MAX_FILE) break;
 
-              new_buf = ck_alloc_nozero(temp_len + extra_len);
+            new_buf = ck_alloc_nozero(temp_len + extra_len);
 
-              /* Head */
-              memcpy(new_buf, out_buf, insert_at);
+            /* Head */
+            memcpy(new_buf, out_buf, insert_at);
 
-              /* Inserted part */
-              memcpy(new_buf + insert_at, extras[use_extra].data, extra_len);
+            /* Inserted part */
+            memcpy(new_buf + insert_at, extras[use_extra].data, extra_len);
 
-            }
+          }
 
-            /* Tail */
-            memcpy(new_buf + insert_at + extra_len, out_buf + insert_at,
-                   temp_len - insert_at);
+          /* Tail */
+          memcpy(new_buf + insert_at + extra_len, out_buf + insert_at,
+                 temp_len - insert_at);
 
-            ck_free(out_buf);
-            out_buf   = new_buf;
-            temp_len += extra_len;
+          ck_free(out_buf);
+          out_buf = new_buf;
+          temp_len += extra_len;
 
-            break;
+          break;
 
-          }
+        }
 
       }
 
     }
 
-    if (common_fuzz_stuff(argv, out_buf, temp_len))
-      goto abandon_entry;
+    if (common_fuzz_stuff(argv, out_buf, temp_len)) goto abandon_entry;
 
     /* out_buf might have been mangled a bit, so let's restore it to its
        original size and shape. */
@@ -1996,8 +2113,10 @@ havoc_stage:
     if (queued_paths != havoc_queued) {
 
       if (perf_score <= havoc_max_mult * 100) {
-        stage_max  *= 2;
+
+        stage_max *= 2;
         perf_score *= 2;
+
       }
 
       havoc_queued = queued_paths;
@@ -2009,11 +2128,15 @@ havoc_stage:
   new_hit_cnt = queued_paths + unique_crashes;
 
   if (!splice_cycle) {
-    stage_finds[STAGE_HAVOC]  += new_hit_cnt - orig_hit_cnt;
+
+    stage_finds[STAGE_HAVOC] += new_hit_cnt - orig_hit_cnt;
     stage_cycles[STAGE_HAVOC] += stage_max;
+
   } else {
-    stage_finds[STAGE_SPLICE]  += new_hit_cnt - orig_hit_cnt;
+
+    stage_finds[STAGE_SPLICE] += new_hit_cnt - orig_hit_cnt;
     stage_cycles[STAGE_SPLICE] += stage_max;
+
   }
 
 #ifndef IGNORE_FINDS
@@ -2029,38 +2152,53 @@ havoc_stage:
 
 retry_splicing:
 
-  if (use_splicing && splice_cycle++ < SPLICE_CYCLES &&
-      queued_paths > 1 && queue_cur->len > 1) {
+  if (use_splicing && splice_cycle++ < SPLICE_CYCLES && queued_paths > 1 &&
+      queue_cur->len > 1) {
 
     struct queue_entry* target;
-    u32 tid, split_at;
-    u8* new_buf;
-    s32 f_diff, l_diff;
+    u32                 tid, split_at;
+    u8*                 new_buf;
+    s32                 f_diff, l_diff;
 
     /* First of all, if we've modified in_buf for havoc, let's clean that
        up... */
 
     if (in_buf != orig_in) {
+
       ck_free(in_buf);
       in_buf = orig_in;
       len = queue_cur->len;
+
     }
 
     /* Pick a random queue entry and seek to it. Don't splice with yourself. */
 
-    do { tid = UR(queued_paths); } while (tid == current_entry);
+    do {
+
+      tid = UR(queued_paths);
+
+    } while (tid == current_entry);
 
     splicing_with = tid;
     target = queue;
 
-    while (tid >= 100) { target = target->next_100; tid -= 100; }
-    while (tid--) target = target->next;
+    while (tid >= 100) {
+
+      target = target->next_100;
+      tid -= 100;
+
+    }
+
+    while (tid--)
+      target = target->next;
 
     /* Make sure that the target has a reasonable length. */
 
     while (target && (target->len < 2 || target == queue_cur)) {
+
       target = target->next;
       ++splicing_with;
+
     }
 
     if (!target) goto retry_splicing;
@@ -2084,8 +2222,10 @@ retry_splicing:
     locate_diffs(in_buf, new_buf, MIN(len, target->len), &f_diff, &l_diff);
 
     if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) {
+
       ck_free(new_buf);
       goto retry_splicing;
+
     }
 
     /* Split somewhere between the first and last differing byte. */
@@ -2102,11 +2242,11 @@ retry_splicing:
     out_buf = ck_alloc_nozero(len);
     memcpy(out_buf, in_buf, len);
 
-#ifdef USE_PYTHON
+#  ifdef USE_PYTHON
     goto python_stage;
-#else
+#  else
     goto havoc_stage;
-#endif
+#  endif
 
   }
 
@@ -2121,10 +2261,13 @@ abandon_entry:
   /* Update pending_not_fuzzed count if we made it through the calibration
      cycle and have not seen this entry before. */
 
-  if (!stop_soon && !queue_cur->cal_failed && (queue_cur->was_fuzzed == 0 || queue_cur->fuzz_level == 0)) {
+  if (!stop_soon && !queue_cur->cal_failed &&
+      (queue_cur->was_fuzzed == 0 || queue_cur->fuzz_level == 0)) {
+
     --pending_not_fuzzed;
     queue_cur->was_fuzzed = 1;
     if (queue_cur->favored) --pending_favored;
+
   }
 
   ++queue_cur->fuzz_level;
@@ -2144,3576 +2287,3738 @@ abandon_entry:
 /* MOpt mode */
 u8 pilot_fuzzing(char** argv) {
 
-	s32 len, fd, temp_len, i, j;
-	u8  *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0;
-	u64 havoc_queued, orig_hit_cnt, new_hit_cnt, cur_ms_lv;
-	u32 splice_cycle = 0, perf_score = 100, orig_perf, prev_cksum, eff_cnt = 1;
+  s32 len, fd, temp_len, i, j;
+  u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0;
+  u64 havoc_queued, orig_hit_cnt, new_hit_cnt, cur_ms_lv;
+  u32 splice_cycle = 0, perf_score = 100, orig_perf, prev_cksum, eff_cnt = 1;
 
-	u8  ret_val = 1, doing_det = 0;
+  u8 ret_val = 1, doing_det = 0;
 
-	u8  a_collect[MAX_AUTO_EXTRA];
-	u32 a_len = 0;
+  u8  a_collect[MAX_AUTO_EXTRA];
+  u32 a_len = 0;
 
 #ifdef IGNORE_FINDS
 
-	/* In IGNORE_FINDS mode, skip any entries that weren't in the
-	   initial data set. */
+  /* In IGNORE_FINDS mode, skip any entries that weren't in the
+     initial data set. */
 
-	if (queue_cur->depth > 1) return 1;
+  if (queue_cur->depth > 1) return 1;
 
 #else
 
-	if (pending_favored) {
+  if (pending_favored) {
 
-		/* If we have any favored, non-fuzzed new arrivals in the queue,
-		   possibly skip to them at the expense of already-fuzzed or non-favored
-		   cases. */
+    /* If we have any favored, non-fuzzed new arrivals in the queue,
+       possibly skip to them at the expense of already-fuzzed or non-favored
+       cases. */
 
-		if ((queue_cur->was_fuzzed || !queue_cur->favored) &&
-			UR(100) < SKIP_TO_NEW_PROB) return 1;
+    if ((queue_cur->was_fuzzed || !queue_cur->favored) &&
+        UR(100) < SKIP_TO_NEW_PROB)
+      return 1;
 
-	}
-	else if (!dumb_mode && !queue_cur->favored && queued_paths > 10) {
+  } else if (!dumb_mode && !queue_cur->favored && queued_paths > 10) {
 
-		/* Otherwise, still possibly skip non-favored cases, albeit less often.
-		   The odds of skipping stuff are higher for already-fuzzed inputs and
-		   lower for never-fuzzed entries. */
+    /* Otherwise, still possibly skip non-favored cases, albeit less often.
+       The odds of skipping stuff are higher for already-fuzzed inputs and
+       lower for never-fuzzed entries. */
 
-		if (queue_cycle > 1 && !queue_cur->was_fuzzed) {
+    if (queue_cycle > 1 && !queue_cur->was_fuzzed) {
 
-			if (UR(100) < SKIP_NFAV_NEW_PROB) return 1;
+      if (UR(100) < SKIP_NFAV_NEW_PROB) return 1;
 
-		}
-		else {
+    } else {
 
-			if (UR(100) < SKIP_NFAV_OLD_PROB) return 1;
+      if (UR(100) < SKIP_NFAV_OLD_PROB) return 1;
 
-		}
+    }
 
-	}
+  }
 
 #endif /* ^IGNORE_FINDS */
 
-	if (not_on_tty) {
-		ACTF("Fuzzing test case #%u (%u total, %llu uniq crashes found)...",
-			current_entry, queued_paths, unique_crashes);
-		fflush(stdout);
-	}
+  if (not_on_tty) {
 
-	/* Map the test case into memory. */
+    ACTF("Fuzzing test case #%u (%u total, %llu uniq crashes found)...",
+         current_entry, queued_paths, unique_crashes);
+    fflush(stdout);
 
-	fd = open(queue_cur->fname, O_RDONLY);
+  }
 
-	if (fd < 0) PFATAL("Unable to open '%s'", queue_cur->fname);
+  /* Map the test case into memory. */
 
-	len = queue_cur->len;
+  fd = open(queue_cur->fname, O_RDONLY);
 
-	orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
+  if (fd < 0) PFATAL("Unable to open '%s'", queue_cur->fname);
 
-	if (orig_in == MAP_FAILED) PFATAL("Unable to mmap '%s'", queue_cur->fname);
+  len = queue_cur->len;
 
-	close(fd);
+  orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
 
-	/* We could mmap() out_buf as MAP_PRIVATE, but we end up clobbering every
-	   single byte anyway, so it wouldn't give us any performance or memory usage
-	   benefits. */
+  if (orig_in == MAP_FAILED) PFATAL("Unable to mmap '%s'", queue_cur->fname);
 
-	out_buf = ck_alloc_nozero(len);
+  close(fd);
 
-	subseq_tmouts = 0;
+  /* We could mmap() out_buf as MAP_PRIVATE, but we end up clobbering every
+     single byte anyway, so it wouldn't give us any performance or memory usage
+     benefits. */
 
-	cur_depth = queue_cur->depth;
+  out_buf = ck_alloc_nozero(len);
 
-	/*******************************************
-	 * CALIBRATION (only if failed earlier on) *
-	 *******************************************/
+  subseq_tmouts = 0;
 
-	if (queue_cur->cal_failed) {
+  cur_depth = queue_cur->depth;
 
-		u8 res = FAULT_TMOUT;
+  /*******************************************
+   * CALIBRATION (only if failed earlier on) *
+   *******************************************/
 
-		if (queue_cur->cal_failed < CAL_CHANCES) {
+  if (queue_cur->cal_failed) {
 
-			res = calibrate_case(argv, queue_cur, in_buf, queue_cycle - 1, 0);
+    u8 res = FAULT_TMOUT;
 
-			if (res == FAULT_ERROR)
-				FATAL("Unable to execute target application");
+    if (queue_cur->cal_failed < CAL_CHANCES) {
 
-		}
+      res = calibrate_case(argv, queue_cur, in_buf, queue_cycle - 1, 0);
 
-		if (stop_soon || res != crash_mode) {
-			++cur_skipped_paths;
-			goto abandon_entry;
-		}
+      if (res == FAULT_ERROR) FATAL("Unable to execute target application");
 
-	}
+    }
 
-	/************
-	 * TRIMMING *
-	 ************/
+    if (stop_soon || res != crash_mode) {
 
-	if (!dumb_mode && !queue_cur->trim_done) {
+      ++cur_skipped_paths;
+      goto abandon_entry;
 
-		u8 res = trim_case(argv, queue_cur, in_buf);
+    }
 
-		if (res == FAULT_ERROR)
-			FATAL("Unable to execute target application");
+  }
 
-		if (stop_soon) {
-			++cur_skipped_paths;
-			goto abandon_entry;
-		}
+  /************
+   * TRIMMING *
+   ************/
 
-		/* Don't retry trimming, even if it failed. */
+  if (!dumb_mode && !queue_cur->trim_done) {
 
-		queue_cur->trim_done = 1;
+    u8 res = trim_case(argv, queue_cur, in_buf);
+
+    if (res == FAULT_ERROR) FATAL("Unable to execute target application");
+
+    if (stop_soon) {
 
-		len = queue_cur->len;
+      ++cur_skipped_paths;
+      goto abandon_entry;
+
+    }
+
+    /* Don't retry trimming, even if it failed. */
 
-	}
+    queue_cur->trim_done = 1;
 
-	memcpy(out_buf, in_buf, len);
+    len = queue_cur->len;
 
-	/*********************
-	 * PERFORMANCE SCORE *
-	 *********************/
+  }
 
-	orig_perf = perf_score = calculate_score(queue_cur);
+  memcpy(out_buf, in_buf, len);
 
-	/* Skip right away if -d is given, if we have done deterministic fuzzing on
-	   this entry ourselves (was_fuzzed), or if it has gone through deterministic
-	   testing in earlier, resumed runs (passed_det). */
+  /*********************
+   * PERFORMANCE SCORE *
+   *********************/
 
-	if (skip_deterministic || queue_cur->was_fuzzed || queue_cur->passed_det)
-		goto havoc_stage;
+  orig_perf = perf_score = calculate_score(queue_cur);
 
-	/* Skip deterministic fuzzing if exec path checksum puts this out of scope
-	   for this master instance. */
+  /* Skip right away if -d is given, if we have done deterministic fuzzing on
+     this entry ourselves (was_fuzzed), or if it has gone through deterministic
+     testing in earlier, resumed runs (passed_det). */
 
-	if (master_max && (queue_cur->exec_cksum % master_max) != master_id - 1)
-		goto havoc_stage;
+  if (skip_deterministic || queue_cur->was_fuzzed || queue_cur->passed_det)
+    goto havoc_stage;
 
+  /* Skip deterministic fuzzing if exec path checksum puts this out of scope
+     for this master instance. */
 
-	cur_ms_lv = get_cur_time();
-	if (!(key_puppet == 0 && ((cur_ms_lv - last_path_time < limit_time_puppet) ||
-		(last_crash_time != 0 && cur_ms_lv - last_crash_time < limit_time_puppet) || last_path_time == 0)))
-	{
-		key_puppet = 1;
-		goto pacemaker_fuzzing;
-	}
+  if (master_max && (queue_cur->exec_cksum % master_max) != master_id - 1)
+    goto havoc_stage;
 
-	doing_det = 1;
+  cur_ms_lv = get_cur_time();
+  if (!(key_puppet == 0 && ((cur_ms_lv - last_path_time < limit_time_puppet) ||
+                            (last_crash_time != 0 &&
+                             cur_ms_lv - last_crash_time < limit_time_puppet) ||
+                            last_path_time == 0))) {
 
-		/*********************************************
-		 * SIMPLE BITFLIP (+dictionary construction) *
-		 *********************************************/
+    key_puppet = 1;
+    goto pacemaker_fuzzing;
 
-#define FLIP_BIT(_ar, _b) do { \
-    u8* _arf = (u8*)(_ar); \
-    u32 _bf = (_b); \
-    _arf[(_bf) >> 3] ^= (128 >> ((_bf) & 7)); \
+  }
+
+  doing_det = 1;
+
+  /*********************************************
+   * SIMPLE BITFLIP (+dictionary construction) *
+   *********************************************/
+
+#define FLIP_BIT(_ar, _b)                   \
+  do {                                      \
+                                            \
+    u8* _arf = (u8*)(_ar);                  \
+    u32 _bf = (_b);                         \
+    _arf[(_bf) >> 3] ^= (128 >> ((_bf)&7)); \
+                                            \
   } while (0)
 
-		 /* Single walking bit. */
+  /* Single walking bit. */
 
-		stage_short = "flip1";
-		stage_max = len << 3;
-		stage_name = "bitflip 1/1";
+  stage_short = "flip1";
+  stage_max = len << 3;
+  stage_name = "bitflip 1/1";
 
+  stage_val_type = STAGE_VAL_NONE;
 
+  orig_hit_cnt = queued_paths + unique_crashes;
 
+  prev_cksum = queue_cur->exec_cksum;
 
-		stage_val_type = STAGE_VAL_NONE;
+  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
 
-		orig_hit_cnt = queued_paths + unique_crashes;
+    stage_cur_byte = stage_cur >> 3;
 
-		prev_cksum = queue_cur->exec_cksum;
+    FLIP_BIT(out_buf, stage_cur);
 
-		for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
 
-			stage_cur_byte = stage_cur >> 3;
+    FLIP_BIT(out_buf, stage_cur);
 
-			FLIP_BIT(out_buf, stage_cur);
+    /* While flipping the least significant bit in every byte, pull of an extra
+       trick to detect possible syntax tokens. In essence, the idea is that if
+       you have a binary blob like this:
 
-			if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+       xxxxxxxxIHDRxxxxxxxx
 
-			FLIP_BIT(out_buf, stage_cur);
+       ...and changing the leading and trailing bytes causes variable or no
+       changes in program flow, but touching any character in the "IHDR" string
+       always produces the same, distinctive path, it's highly likely that
+       "IHDR" is an atomically-checked magic value of special significance to
+       the fuzzed format.
 
-			/* While flipping the least significant bit in every byte, pull of an extra
-			   trick to detect possible syntax tokens. In essence, the idea is that if
-			   you have a binary blob like this:
+       We do this here, rather than as a separate stage, because it's a nice
+       way to keep the operation approximately "free" (i.e., no extra execs).
 
-			   xxxxxxxxIHDRxxxxxxxx
+       Empirically, performing the check when flipping the least significant bit
+       is advantageous, compared to doing it at the time of more disruptive
+       changes, where the program flow may be affected in more violent ways.
 
-			   ...and changing the leading and trailing bytes causes variable or no
-			   changes in program flow, but touching any character in the "IHDR" string
-			   always produces the same, distinctive path, it's highly likely that
-			   "IHDR" is an atomically-checked magic value of special significance to
-			   the fuzzed format.
+       The caveat is that we won't generate dictionaries in the -d mode or -S
+       mode - but that's probably a fair trade-off.
 
-			   We do this here, rather than as a separate stage, because it's a nice
-			   way to keep the operation approximately "free" (i.e., no extra execs).
+       This won't work particularly well with paths that exhibit variable
+       behavior, but fails gracefully, so we'll carry out the checks anyway.
 
-			   Empirically, performing the check when flipping the least significant bit
-			   is advantageous, compared to doing it at the time of more disruptive
-			   changes, where the program flow may be affected in more violent ways.
+      */
 
-			   The caveat is that we won't generate dictionaries in the -d mode or -S
-			   mode - but that's probably a fair trade-off.
+    if (!dumb_mode && (stage_cur & 7) == 7) {
 
-			   This won't work particularly well with paths that exhibit variable
-			   behavior, but fails gracefully, so we'll carry out the checks anyway.
+      u32 cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
 
-			  */
+      if (stage_cur == stage_max - 1 && cksum == prev_cksum) {
 
-			if (!dumb_mode && (stage_cur & 7) == 7) {
+        /* If at end of file and we are still collecting a string, grab the
+           final character and force output. */
 
-				u32 cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
+        if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3];
+        ++a_len;
 
-				if (stage_cur == stage_max - 1 && cksum == prev_cksum) {
+        if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA)
+          maybe_add_auto(a_collect, a_len);
 
-					/* If at end of file and we are still collecting a string, grab the
-					   final character and force output. */
+      } else if (cksum != prev_cksum) {
 
-					if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3];
-					++a_len;
+        /* Otherwise, if the checksum has changed, see if we have something
+           worthwhile queued up, and collect that if the answer is yes. */
 
-					if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA)
-						maybe_add_auto(a_collect, a_len);
+        if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA)
+          maybe_add_auto(a_collect, a_len);
 
-				}
-				else if (cksum != prev_cksum) {
+        a_len = 0;
+        prev_cksum = cksum;
+
+      }
+
+      /* Continue collecting string, but only if the bit flip actually made
+         any difference - we don't want no-op tokens. */
+
+      if (cksum != queue_cur->exec_cksum) {
+
+        if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3];
+        ++a_len;
+
+      }
+
+    }
+
+  }
+
+  new_hit_cnt = queued_paths + unique_crashes;
+
+  stage_finds[STAGE_FLIP1] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_FLIP1] += stage_max;
+
+  /* Two walking bits. */
+
+  stage_name = "bitflip 2/1";
+  stage_short = "flip2";
+  stage_max = (len << 3) - 1;
+
+  orig_hit_cnt = new_hit_cnt;
+
+  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+
+    stage_cur_byte = stage_cur >> 3;
+
+    FLIP_BIT(out_buf, stage_cur);
+    FLIP_BIT(out_buf, stage_cur + 1);
+
+    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+
+    FLIP_BIT(out_buf, stage_cur);
+    FLIP_BIT(out_buf, stage_cur + 1);
 
-					/* Otherwise, if the checksum has changed, see if we have something
-					   worthwhile queued up, and collect that if the answer is yes. */
+  }
 
-					if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA)
-						maybe_add_auto(a_collect, a_len);
+  new_hit_cnt = queued_paths + unique_crashes;
 
-					a_len = 0;
-					prev_cksum = cksum;
+  stage_finds[STAGE_FLIP2] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_FLIP2] += stage_max;
 
-				}
+  /* Four walking bits. */
 
-				/* Continue collecting string, but only if the bit flip actually made
-				   any difference - we don't want no-op tokens. */
+  stage_name = "bitflip 4/1";
+  stage_short = "flip4";
+  stage_max = (len << 3) - 3;
 
-				if (cksum != queue_cur->exec_cksum) {
+  orig_hit_cnt = new_hit_cnt;
 
-					if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3];
-					++a_len;
+  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+
+    stage_cur_byte = stage_cur >> 3;
+
+    FLIP_BIT(out_buf, stage_cur);
+    FLIP_BIT(out_buf, stage_cur + 1);
+    FLIP_BIT(out_buf, stage_cur + 2);
+    FLIP_BIT(out_buf, stage_cur + 3);
 
-				}
+    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
 
-			}
+    FLIP_BIT(out_buf, stage_cur);
+    FLIP_BIT(out_buf, stage_cur + 1);
+    FLIP_BIT(out_buf, stage_cur + 2);
+    FLIP_BIT(out_buf, stage_cur + 3);
 
-		}
+  }
 
-		new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = queued_paths + unique_crashes;
 
-		stage_finds[STAGE_FLIP1] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_FLIP1] += stage_max;
+  stage_finds[STAGE_FLIP4] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_FLIP4] += stage_max;
 
-		/* Two walking bits. */
+  /* Effector map setup. These macros calculate:
 
-		stage_name = "bitflip 2/1";
-		stage_short = "flip2";
-		stage_max = (len << 3) - 1;
+     EFF_APOS      - position of a particular file offset in the map.
+     EFF_ALEN      - length of a map with a particular number of bytes.
+     EFF_SPAN_ALEN - map span for a sequence of bytes.
 
-		orig_hit_cnt = new_hit_cnt;
+   */
 
-		for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+#define EFF_APOS(_p) ((_p) >> EFF_MAP_SCALE2)
+#define EFF_REM(_x) ((_x) & ((1 << EFF_MAP_SCALE2) - 1))
+#define EFF_ALEN(_l) (EFF_APOS(_l) + !!EFF_REM(_l))
+#define EFF_SPAN_ALEN(_p, _l) (EFF_APOS((_p) + (_l)-1) - EFF_APOS(_p) + 1)
 
-			stage_cur_byte = stage_cur >> 3;
+  /* Initialize effector map for the next step (see comments below). Always
+         flag first and last byte as doing something. */
 
-			FLIP_BIT(out_buf, stage_cur);
-			FLIP_BIT(out_buf, stage_cur + 1);
+  eff_map = ck_alloc(EFF_ALEN(len));
+  eff_map[0] = 1;
 
-			if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+  if (EFF_APOS(len - 1) != 0) {
 
-			FLIP_BIT(out_buf, stage_cur);
-			FLIP_BIT(out_buf, stage_cur + 1);
+    eff_map[EFF_APOS(len - 1)] = 1;
+    ++eff_cnt;
 
-		}
+  }
 
-		new_hit_cnt = queued_paths + unique_crashes;
+  /* Walking byte. */
 
-		stage_finds[STAGE_FLIP2] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_FLIP2] += stage_max;
+  stage_name = "bitflip 8/8";
+  stage_short = "flip8";
+  stage_max = len;
 
+  orig_hit_cnt = new_hit_cnt;
 
+  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
 
-		/* Four walking bits. */
+    stage_cur_byte = stage_cur;
 
-		stage_name = "bitflip 4/1";
-		stage_short = "flip4";
-		stage_max = (len << 3) - 3;
+    out_buf[stage_cur] ^= 0xFF;
 
+    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
 
+    /* We also use this stage to pull off a simple trick: we identify
+       bytes that seem to have no effect on the current execution path
+       even when fully flipped - and we skip them during more expensive
+       deterministic stages, such as arithmetics or known ints. */
 
+    if (!eff_map[EFF_APOS(stage_cur)]) {
 
+      u32 cksum;
 
-		orig_hit_cnt = new_hit_cnt;
+      /* If in dumb mode or if the file is very short, just flag everything
+         without wasting time on checksums. */
 
-		for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+      if (!dumb_mode && len >= EFF_MIN_LEN)
+        cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
+      else
+        cksum = ~queue_cur->exec_cksum;
 
-			stage_cur_byte = stage_cur >> 3;
+      if (cksum != queue_cur->exec_cksum) {
 
-			FLIP_BIT(out_buf, stage_cur);
-			FLIP_BIT(out_buf, stage_cur + 1);
-			FLIP_BIT(out_buf, stage_cur + 2);
-			FLIP_BIT(out_buf, stage_cur + 3);
+        eff_map[EFF_APOS(stage_cur)] = 1;
+        ++eff_cnt;
 
-			if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+      }
 
-			FLIP_BIT(out_buf, stage_cur);
-			FLIP_BIT(out_buf, stage_cur + 1);
-			FLIP_BIT(out_buf, stage_cur + 2);
-			FLIP_BIT(out_buf, stage_cur + 3);
+    }
 
-		}
+    out_buf[stage_cur] ^= 0xFF;
 
-		new_hit_cnt = queued_paths + unique_crashes;
+  }
 
-		stage_finds[STAGE_FLIP4] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_FLIP4] += stage_max;
+  /* If the effector map is more than EFF_MAX_PERC dense, just flag the
+     whole thing as worth fuzzing, since we wouldn't be saving much time
+     anyway. */
 
+  if (eff_cnt != EFF_ALEN(len) &&
+      eff_cnt * 100 / EFF_ALEN(len) > EFF_MAX_PERC) {
 
+    memset(eff_map, 1, EFF_ALEN(len));
 
+    blocks_eff_select += EFF_ALEN(len);
 
-		/* Effector map setup. These macros calculate:
+  } else {
 
-		   EFF_APOS      - position of a particular file offset in the map.
-		   EFF_ALEN      - length of a map with a particular number of bytes.
-		   EFF_SPAN_ALEN - map span for a sequence of bytes.
+    blocks_eff_select += eff_cnt;
 
-		 */
+  }
 
-#define EFF_APOS(_p)          ((_p) >> EFF_MAP_SCALE2)
-#define EFF_REM(_x)           ((_x) & ((1 << EFF_MAP_SCALE2) - 1))
-#define EFF_ALEN(_l)          (EFF_APOS(_l) + !!EFF_REM(_l))
-#define EFF_SPAN_ALEN(_p, _l) (EFF_APOS((_p) + (_l) - 1) - EFF_APOS(_p) + 1)
+  blocks_eff_total += EFF_ALEN(len);
 
-		 /* Initialize effector map for the next step (see comments below). Always
-			flag first and last byte as doing something. */
+  new_hit_cnt = queued_paths + unique_crashes;
 
-		eff_map = ck_alloc(EFF_ALEN(len));
-		eff_map[0] = 1;
+  stage_finds[STAGE_FLIP8] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_FLIP8] += stage_max;
 
-		if (EFF_APOS(len - 1) != 0) {
-			eff_map[EFF_APOS(len - 1)] = 1;
-			++eff_cnt;
-		}
+  /* Two walking bytes. */
 
-		/* Walking byte. */
+  if (len < 2) goto skip_bitflip;
 
-		stage_name = "bitflip 8/8";
-		stage_short = "flip8";
-		stage_max = len;
+  stage_name = "bitflip 16/8";
+  stage_short = "flip16";
+  stage_cur = 0;
+  stage_max = len - 1;
 
+  orig_hit_cnt = new_hit_cnt;
 
+  for (i = 0; i < len - 1; ++i) {
 
-		orig_hit_cnt = new_hit_cnt;
+    /* Let's consult the effector map... */
 
-		for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+    if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
 
-			stage_cur_byte = stage_cur;
+      --stage_max;
+      continue;
 
-			out_buf[stage_cur] ^= 0xFF;
+    }
 
-			if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+    stage_cur_byte = i;
 
-			/* We also use this stage to pull off a simple trick: we identify
-			   bytes that seem to have no effect on the current execution path
-			   even when fully flipped - and we skip them during more expensive
-			   deterministic stages, such as arithmetics or known ints. */
+    *(u16*)(out_buf + i) ^= 0xFFFF;
 
-			if (!eff_map[EFF_APOS(stage_cur)]) {
+    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+    ++stage_cur;
 
-				u32 cksum;
+    *(u16*)(out_buf + i) ^= 0xFFFF;
 
-				/* If in dumb mode or if the file is very short, just flag everything
-				   without wasting time on checksums. */
+  }
 
-				if (!dumb_mode && len >= EFF_MIN_LEN)
-					cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
-				else
-					cksum = ~queue_cur->exec_cksum;
+  new_hit_cnt = queued_paths + unique_crashes;
 
-				if (cksum != queue_cur->exec_cksum) {
-					eff_map[EFF_APOS(stage_cur)] = 1;
-					++eff_cnt;
-				}
+  stage_finds[STAGE_FLIP16] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_FLIP16] += stage_max;
 
-			}
+  if (len < 4) goto skip_bitflip;
 
-			out_buf[stage_cur] ^= 0xFF;
+  /* Four walking bytes. */
 
-		}
+  stage_name = "bitflip 32/8";
+  stage_short = "flip32";
+  stage_cur = 0;
+  stage_max = len - 3;
 
-		/* If the effector map is more than EFF_MAX_PERC dense, just flag the
-		   whole thing as worth fuzzing, since we wouldn't be saving much time
-		   anyway. */
+  orig_hit_cnt = new_hit_cnt;
 
-		if (eff_cnt != EFF_ALEN(len) &&
-			eff_cnt * 100 / EFF_ALEN(len) > EFF_MAX_PERC) {
+  for (i = 0; i < len - 3; ++i) {
 
-			memset(eff_map, 1, EFF_ALEN(len));
+    /* Let's consult the effector map... */
+    if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
+        !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
 
-			blocks_eff_select += EFF_ALEN(len);
+      --stage_max;
+      continue;
 
-		}
-		else {
+    }
 
-			blocks_eff_select += eff_cnt;
+    stage_cur_byte = i;
 
-		}
+    *(u32*)(out_buf + i) ^= 0xFFFFFFFF;
 
-		blocks_eff_total += EFF_ALEN(len);
+    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+    ++stage_cur;
 
-		new_hit_cnt = queued_paths + unique_crashes;
+    *(u32*)(out_buf + i) ^= 0xFFFFFFFF;
 
-		stage_finds[STAGE_FLIP8] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_FLIP8] += stage_max;
+  }
 
+  new_hit_cnt = queued_paths + unique_crashes;
 
+  stage_finds[STAGE_FLIP32] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_FLIP32] += stage_max;
 
+skip_bitflip:
 
+  if (no_arith) goto skip_arith;
 
-		/* Two walking bytes. */
+  /**********************
+   * ARITHMETIC INC/DEC *
+   **********************/
 
-		if (len < 2) goto skip_bitflip;
+  /* 8-bit arithmetics. */
 
-		stage_name = "bitflip 16/8";
-		stage_short = "flip16";
-		stage_cur = 0;
-		stage_max = len - 1;
+  stage_name = "arith 8/8";
+  stage_short = "arith8";
+  stage_cur = 0;
+  stage_max = 2 * len * ARITH_MAX;
 
+  stage_val_type = STAGE_VAL_LE;
 
+  orig_hit_cnt = new_hit_cnt;
 
-		orig_hit_cnt = new_hit_cnt;
+  for (i = 0; i < len; ++i) {
 
-		for (i = 0; i < len - 1; ++i) {
+    u8 orig = out_buf[i];
 
-			/* Let's consult the effector map... */
+    /* Let's consult the effector map... */
 
-			if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
-				--stage_max;
-				continue;
-			}
+    if (!eff_map[EFF_APOS(i)]) {
 
-			stage_cur_byte = i;
+      stage_max -= 2 * ARITH_MAX;
+      continue;
 
-			*(u16*)(out_buf + i) ^= 0xFFFF;
+    }
 
-			if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-			++stage_cur;
+    stage_cur_byte = i;
 
-			*(u16*)(out_buf + i) ^= 0xFFFF;
+    for (j = 1; j <= ARITH_MAX; ++j) {
 
+      u8 r = orig ^ (orig + j);
 
-		}
+      /* Do arithmetic operations only if the result couldn't be a product
+         of a bitflip. */
 
-		new_hit_cnt = queued_paths + unique_crashes;
+      if (!could_be_bitflip(r)) {
 
-		stage_finds[STAGE_FLIP16] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_FLIP16] += stage_max;
+        stage_cur_val = j;
+        out_buf[i] = orig + j;
 
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
 
+      } else
 
+        --stage_max;
 
-		if (len < 4) goto skip_bitflip;
+      r = orig ^ (orig - j);
 
-		/* Four walking bytes. */
+      if (!could_be_bitflip(r)) {
 
-		stage_name = "bitflip 32/8";
-		stage_short = "flip32";
-		stage_cur = 0;
-		stage_max = len - 3;
+        stage_cur_val = -j;
+        out_buf[i] = orig - j;
 
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
 
+      } else
 
-		orig_hit_cnt = new_hit_cnt;
+        --stage_max;
 
-		for (i = 0; i < len - 3; ++i) {
+      out_buf[i] = orig;
 
-			/* Let's consult the effector map... */
-			if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
-				!eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
-				--stage_max;
-				continue;
-			}
+    }
 
-			stage_cur_byte = i;
+  }
 
-			*(u32*)(out_buf + i) ^= 0xFFFFFFFF;
+  new_hit_cnt = queued_paths + unique_crashes;
 
-			if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-			++stage_cur;
+  stage_finds[STAGE_ARITH8] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_ARITH8] += stage_max;
 
-			*(u32*)(out_buf + i) ^= 0xFFFFFFFF;
+  /* 16-bit arithmetics, both endians. */
 
-		}
+  if (len < 2) goto skip_arith;
 
-		new_hit_cnt = queued_paths + unique_crashes;
+  stage_name = "arith 16/8";
+  stage_short = "arith16";
+  stage_cur = 0;
+  stage_max = 4 * (len - 1) * ARITH_MAX;
 
-		stage_finds[STAGE_FLIP32] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_FLIP32] += stage_max;
+  orig_hit_cnt = new_hit_cnt;
 
+  for (i = 0; i < len - 1; ++i) {
 
+    u16 orig = *(u16*)(out_buf + i);
 
+    /* Let's consult the effector map... */
 
+    if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
 
+      stage_max -= 4 * ARITH_MAX;
+      continue;
 
-	skip_bitflip:
+    }
 
-		if (no_arith) goto skip_arith;
+    stage_cur_byte = i;
 
-		/**********************
-		 * ARITHMETIC INC/DEC *
-		 **********************/
+    for (j = 1; j <= ARITH_MAX; ++j) {
 
-		 /* 8-bit arithmetics. */
+      u16 r1 = orig ^ (orig + j), r2 = orig ^ (orig - j),
+          r3 = orig ^ SWAP16(SWAP16(orig) + j),
+          r4 = orig ^ SWAP16(SWAP16(orig) - j);
 
-		stage_name = "arith 8/8";
-		stage_short = "arith8";
-		stage_cur = 0;
-		stage_max = 2 * len * ARITH_MAX;
+      /* Try little endian addition and subtraction first. Do it only
+         if the operation would affect more than one byte (hence the
+         & 0xff overflow checks) and if it couldn't be a product of
+         a bitflip. */
 
+      stage_val_type = STAGE_VAL_LE;
 
+      if ((orig & 0xff) + j > 0xff && !could_be_bitflip(r1)) {
 
+        stage_cur_val = j;
+        *(u16*)(out_buf + i) = orig + j;
 
-		stage_val_type = STAGE_VAL_LE;
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
 
-		orig_hit_cnt = new_hit_cnt;
+      } else
 
-		for (i = 0; i < len; ++i) {
+        --stage_max;
 
-			u8 orig = out_buf[i];
+      if ((orig & 0xff) < j && !could_be_bitflip(r2)) {
 
-			/* Let's consult the effector map... */
+        stage_cur_val = -j;
+        *(u16*)(out_buf + i) = orig - j;
 
-			if (!eff_map[EFF_APOS(i)]) {
-				stage_max -= 2 * ARITH_MAX;
-				continue;
-			}
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
 
-			stage_cur_byte = i;
+      } else
 
-			for (j = 1; j <= ARITH_MAX; ++j) {
+        --stage_max;
 
-				u8 r = orig ^ (orig + j);
+      /* Big endian comes next. Same deal. */
 
-				/* Do arithmetic operations only if the result couldn't be a product
-				   of a bitflip. */
+      stage_val_type = STAGE_VAL_BE;
 
-				if (!could_be_bitflip(r)) {
+      if ((orig >> 8) + j > 0xff && !could_be_bitflip(r3)) {
 
-					stage_cur_val = j;
-					out_buf[i] = orig + j;
+        stage_cur_val = j;
+        *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) + j);
 
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
 
-				} else --stage_max;
+      } else
 
-				r = orig ^ (orig - j);
+        --stage_max;
 
-				if (!could_be_bitflip(r)) {
+      if ((orig >> 8) < j && !could_be_bitflip(r4)) {
 
-					stage_cur_val = -j;
-					out_buf[i] = orig - j;
+        stage_cur_val = -j;
+        *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) - j);
 
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
 
-				} else --stage_max;
+      } else
 
-				out_buf[i] = orig;
+        --stage_max;
 
-			}
+      *(u16*)(out_buf + i) = orig;
 
-		}
+    }
 
-		new_hit_cnt = queued_paths + unique_crashes;
+  }
 
-		stage_finds[STAGE_ARITH8] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_ARITH8] += stage_max;
+  new_hit_cnt = queued_paths + unique_crashes;
 
+  stage_finds[STAGE_ARITH16] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_ARITH16] += stage_max;
 
+  /* 32-bit arithmetics, both endians. */
 
+  if (len < 4) goto skip_arith;
 
+  stage_name = "arith 32/8";
+  stage_short = "arith32";
+  stage_cur = 0;
+  stage_max = 4 * (len - 3) * ARITH_MAX;
 
-		/* 16-bit arithmetics, both endians. */
+  orig_hit_cnt = new_hit_cnt;
 
-		if (len < 2) goto skip_arith;
+  for (i = 0; i < len - 3; ++i) {
 
-		stage_name = "arith 16/8";
-		stage_short = "arith16";
-		stage_cur = 0;
-		stage_max = 4 * (len - 1) * ARITH_MAX;
+    u32 orig = *(u32*)(out_buf + i);
 
+    /* Let's consult the effector map... */
 
+    if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
+        !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
 
+      stage_max -= 4 * ARITH_MAX;
+      continue;
 
-		orig_hit_cnt = new_hit_cnt;
+    }
 
-		for (i = 0; i < len - 1; ++i) {
+    stage_cur_byte = i;
 
-			u16 orig = *(u16*)(out_buf + i);
+    for (j = 1; j <= ARITH_MAX; ++j) {
 
-			/* Let's consult the effector map... */
+      u32 r1 = orig ^ (orig + j), r2 = orig ^ (orig - j),
+          r3 = orig ^ SWAP32(SWAP32(orig) + j),
+          r4 = orig ^ SWAP32(SWAP32(orig) - j);
 
-			if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
-				stage_max -= 4 * ARITH_MAX;
-				continue;
-			}
+      /* Little endian first. Same deal as with 16-bit: we only want to
+         try if the operation would have effect on more than two bytes. */
 
-			stage_cur_byte = i;
+      stage_val_type = STAGE_VAL_LE;
 
-			for (j = 1; j <= ARITH_MAX; ++j) {
+      if ((orig & 0xffff) + j > 0xffff && !could_be_bitflip(r1)) {
 
-				u16 r1 = orig ^ (orig + j),
-					r2 = orig ^ (orig - j),
-					r3 = orig ^ SWAP16(SWAP16(orig) + j),
-					r4 = orig ^ SWAP16(SWAP16(orig) - j);
+        stage_cur_val = j;
+        *(u32*)(out_buf + i) = orig + j;
 
-				/* Try little endian addition and subtraction first. Do it only
-				   if the operation would affect more than one byte (hence the
-				   & 0xff overflow checks) and if it couldn't be a product of
-				   a bitflip. */
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
 
-				stage_val_type = STAGE_VAL_LE;
+      } else
 
-				if ((orig & 0xff) + j > 0xff && !could_be_bitflip(r1)) {
+        --stage_max;
 
-					stage_cur_val = j;
-					*(u16*)(out_buf + i) = orig + j;
+      if ((orig & 0xffff) < j && !could_be_bitflip(r2)) {
 
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
+        stage_cur_val = -j;
+        *(u32*)(out_buf + i) = orig - j;
 
-				} else --stage_max;
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        stage_cur++;
 
-				if ((orig & 0xff) < j && !could_be_bitflip(r2)) {
+      } else
 
-					stage_cur_val = -j;
-					*(u16*)(out_buf + i) = orig - j;
+        --stage_max;
 
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
+      /* Big endian next. */
 
-				} else --stage_max;
+      stage_val_type = STAGE_VAL_BE;
 
-				/* Big endian comes next. Same deal. */
+      if ((SWAP32(orig) & 0xffff) + j > 0xffff && !could_be_bitflip(r3)) {
 
-				stage_val_type = STAGE_VAL_BE;
+        stage_cur_val = j;
+        *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) + j);
 
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
 
-				if ((orig >> 8) + j > 0xff && !could_be_bitflip(r3)) {
+      } else
 
-					stage_cur_val = j;
-					*(u16*)(out_buf + i) = SWAP16(SWAP16(orig) + j);
+        --stage_max;
 
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
+      if ((SWAP32(orig) & 0xffff) < j && !could_be_bitflip(r4)) {
 
-				} else --stage_max;
+        stage_cur_val = -j;
+        *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) - j);
 
-				if ((orig >> 8) < j && !could_be_bitflip(r4)) {
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
 
-					stage_cur_val = -j;
-					*(u16*)(out_buf + i) = SWAP16(SWAP16(orig) - j);
+      } else
 
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
+        --stage_max;
 
-				} else --stage_max;
+      *(u32*)(out_buf + i) = orig;
 
-				*(u16*)(out_buf + i) = orig;
+    }
 
-			}
+  }
 
-		}
+  new_hit_cnt = queued_paths + unique_crashes;
 
-		new_hit_cnt = queued_paths + unique_crashes;
+  stage_finds[STAGE_ARITH32] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_ARITH32] += stage_max;
 
-		stage_finds[STAGE_ARITH16] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_ARITH16] += stage_max;
+skip_arith:
 
+  /**********************
+   * INTERESTING VALUES *
+   **********************/
 
+  stage_name = "interest 8/8";
+  stage_short = "int8";
+  stage_cur = 0;
+  stage_max = len * sizeof(interesting_8);
 
+  stage_val_type = STAGE_VAL_LE;
 
-		/* 32-bit arithmetics, both endians. */
+  orig_hit_cnt = new_hit_cnt;
 
-		if (len < 4) goto skip_arith;
+  /* Setting 8-bit integers. */
 
-		stage_name = "arith 32/8";
-		stage_short = "arith32";
-		stage_cur = 0;
-		stage_max = 4 * (len - 3) * ARITH_MAX;
+  for (i = 0; i < len; ++i) {
 
+    u8 orig = out_buf[i];
 
+    /* Let's consult the effector map... */
 
-		orig_hit_cnt = new_hit_cnt;
+    if (!eff_map[EFF_APOS(i)]) {
 
-		for (i = 0; i < len - 3; ++i) {
+      stage_max -= sizeof(interesting_8);
+      continue;
 
-			u32 orig = *(u32*)(out_buf + i);
+    }
 
-			/* Let's consult the effector map... */
+    stage_cur_byte = i;
 
-			if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
-				!eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
-				stage_max -= 4 * ARITH_MAX;
-				continue;
-			}
+    for (j = 0; j < sizeof(interesting_8); ++j) {
 
-			stage_cur_byte = i;
+      /* Skip if the value could be a product of bitflips or arithmetics. */
 
-			for (j = 1; j <= ARITH_MAX; ++j) {
+      if (could_be_bitflip(orig ^ (u8)interesting_8[j]) ||
+          could_be_arith(orig, (u8)interesting_8[j], 1)) {
 
-				u32 r1 = orig ^ (orig + j),
-					r2 = orig ^ (orig - j),
-					r3 = orig ^ SWAP32(SWAP32(orig) + j),
-					r4 = orig ^ SWAP32(SWAP32(orig) - j);
+        --stage_max;
+        continue;
 
-				/* Little endian first. Same deal as with 16-bit: we only want to
-				   try if the operation would have effect on more than two bytes. */
+      }
 
-				stage_val_type = STAGE_VAL_LE;
+      stage_cur_val = interesting_8[j];
+      out_buf[i] = interesting_8[j];
 
-				if ((orig & 0xffff) + j > 0xffff && !could_be_bitflip(r1)) {
+      if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
 
-					stage_cur_val = j;
-					*(u32*)(out_buf + i) = orig + j;
+      out_buf[i] = orig;
+      ++stage_cur;
 
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
+    }
 
-				} else --stage_max;
+  }
 
-				if ((orig & 0xffff) < j && !could_be_bitflip(r2)) {
+  new_hit_cnt = queued_paths + unique_crashes;
 
-					stage_cur_val = -j;
-					*(u32*)(out_buf + i) = orig - j;
+  stage_finds[STAGE_INTEREST8] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_INTEREST8] += stage_max;
 
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					stage_cur++;
+  /* Setting 16-bit integers, both endians. */
 
-				} else --stage_max;
+  if (no_arith || len < 2) goto skip_interest;
 
-				/* Big endian next. */
+  stage_name = "interest 16/8";
+  stage_short = "int16";
+  stage_cur = 0;
+  stage_max = 2 * (len - 1) * (sizeof(interesting_16) >> 1);
 
-				stage_val_type = STAGE_VAL_BE;
+  orig_hit_cnt = new_hit_cnt;
 
-				if ((SWAP32(orig) & 0xffff) + j > 0xffff && !could_be_bitflip(r3)) {
+  for (i = 0; i < len - 1; ++i) {
 
-					stage_cur_val = j;
-					*(u32*)(out_buf + i) = SWAP32(SWAP32(orig) + j);
+    u16 orig = *(u16*)(out_buf + i);
 
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
+    /* Let's consult the effector map... */
 
-				} else --stage_max;
+    if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
 
-				if ((SWAP32(orig) & 0xffff) < j && !could_be_bitflip(r4)) {
+      stage_max -= sizeof(interesting_16);
+      continue;
 
-					stage_cur_val = -j;
-					*(u32*)(out_buf + i) = SWAP32(SWAP32(orig) - j);
+    }
 
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
+    stage_cur_byte = i;
 
-				} else --stage_max;
+    for (j = 0; j < sizeof(interesting_16) / 2; ++j) {
 
-				*(u32*)(out_buf + i) = orig;
+      stage_cur_val = interesting_16[j];
 
-			}
+      /* Skip if this could be a product of a bitflip, arithmetics,
+         or single-byte interesting value insertion. */
 
-		}
+      if (!could_be_bitflip(orig ^ (u16)interesting_16[j]) &&
+          !could_be_arith(orig, (u16)interesting_16[j], 2) &&
+          !could_be_interest(orig, (u16)interesting_16[j], 2, 0)) {
 
-		new_hit_cnt = queued_paths + unique_crashes;
+        stage_val_type = STAGE_VAL_LE;
 
-		stage_finds[STAGE_ARITH32] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_ARITH32] += stage_max;
+        *(u16*)(out_buf + i) = interesting_16[j];
 
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
 
+      } else
 
+        --stage_max;
 
-	skip_arith:
+      if ((u16)interesting_16[j] != SWAP16(interesting_16[j]) &&
+          !could_be_bitflip(orig ^ SWAP16(interesting_16[j])) &&
+          !could_be_arith(orig, SWAP16(interesting_16[j]), 2) &&
+          !could_be_interest(orig, SWAP16(interesting_16[j]), 2, 1)) {
 
-		/**********************
-		 * INTERESTING VALUES *
-		 **********************/
+        stage_val_type = STAGE_VAL_BE;
 
-		stage_name = "interest 8/8";
-		stage_short = "int8";
-		stage_cur = 0;
-		stage_max = len * sizeof(interesting_8);
+        *(u16*)(out_buf + i) = SWAP16(interesting_16[j]);
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
 
+      } else
 
+        --stage_max;
 
-		stage_val_type = STAGE_VAL_LE;
+    }
 
-		orig_hit_cnt = new_hit_cnt;
+    *(u16*)(out_buf + i) = orig;
 
-		/* Setting 8-bit integers. */
+  }
 
-		for (i = 0; i < len; ++i) {
+  new_hit_cnt = queued_paths + unique_crashes;
 
-			u8 orig = out_buf[i];
+  stage_finds[STAGE_INTEREST16] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_INTEREST16] += stage_max;
 
-			/* Let's consult the effector map... */
+  if (len < 4) goto skip_interest;
 
-			if (!eff_map[EFF_APOS(i)]) {
-				stage_max -= sizeof(interesting_8);
-				continue;
-			}
+  /* Setting 32-bit integers, both endians. */
 
-			stage_cur_byte = i;
+  stage_name = "interest 32/8";
+  stage_short = "int32";
+  stage_cur = 0;
+  stage_max = 2 * (len - 3) * (sizeof(interesting_32) >> 2);
 
-			for (j = 0; j < sizeof(interesting_8); ++j) {
+  orig_hit_cnt = new_hit_cnt;
 
-				/* Skip if the value could be a product of bitflips or arithmetics. */
+  for (i = 0; i < len - 3; ++i) {
 
-				if (could_be_bitflip(orig ^ (u8)interesting_8[j]) ||
-					could_be_arith(orig, (u8)interesting_8[j], 1)) {
-					--stage_max;
-					continue;
-				}
+    u32 orig = *(u32*)(out_buf + i);
 
-				stage_cur_val = interesting_8[j];
-				out_buf[i] = interesting_8[j];
+    /* Let's consult the effector map... */
 
-				if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+    if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
+        !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
 
-				out_buf[i] = orig;
-				++stage_cur;
+      stage_max -= sizeof(interesting_32) >> 1;
+      continue;
 
-			}
+    }
 
-		}
+    stage_cur_byte = i;
 
-		new_hit_cnt = queued_paths + unique_crashes;
+    for (j = 0; j < sizeof(interesting_32) / 4; ++j) {
 
-		stage_finds[STAGE_INTEREST8] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_INTEREST8] += stage_max;
+      stage_cur_val = interesting_32[j];
 
+      /* Skip if this could be a product of a bitflip, arithmetics,
+         or word interesting value insertion. */
 
+      if (!could_be_bitflip(orig ^ (u32)interesting_32[j]) &&
+          !could_be_arith(orig, interesting_32[j], 4) &&
+          !could_be_interest(orig, interesting_32[j], 4, 0)) {
 
+        stage_val_type = STAGE_VAL_LE;
 
-		/* Setting 16-bit integers, both endians. */
+        *(u32*)(out_buf + i) = interesting_32[j];
 
-		if (no_arith || len < 2) goto skip_interest;
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
 
-		stage_name = "interest 16/8";
-		stage_short = "int16";
-		stage_cur = 0;
-		stage_max = 2 * (len - 1) * (sizeof(interesting_16) >> 1);
+      } else
 
+        --stage_max;
 
+      if ((u32)interesting_32[j] != SWAP32(interesting_32[j]) &&
+          !could_be_bitflip(orig ^ SWAP32(interesting_32[j])) &&
+          !could_be_arith(orig, SWAP32(interesting_32[j]), 4) &&
+          !could_be_interest(orig, SWAP32(interesting_32[j]), 4, 1)) {
 
-		orig_hit_cnt = new_hit_cnt;
+        stage_val_type = STAGE_VAL_BE;
 
-		for (i = 0; i < len - 1; ++i) {
+        *(u32*)(out_buf + i) = SWAP32(interesting_32[j]);
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
 
-			u16 orig = *(u16*)(out_buf + i);
+      } else
 
-			/* Let's consult the effector map... */
+        --stage_max;
 
-			if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
-				stage_max -= sizeof(interesting_16);
-				continue;
-			}
+    }
 
-			stage_cur_byte = i;
+    *(u32*)(out_buf + i) = orig;
 
-			for (j = 0; j < sizeof(interesting_16) / 2; ++j) {
+  }
 
-				stage_cur_val = interesting_16[j];
+  new_hit_cnt = queued_paths + unique_crashes;
 
-				/* Skip if this could be a product of a bitflip, arithmetics,
-				   or single-byte interesting value insertion. */
+  stage_finds[STAGE_INTEREST32] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_INTEREST32] += stage_max;
 
-				if (!could_be_bitflip(orig ^ (u16)interesting_16[j]) &&
-					!could_be_arith(orig, (u16)interesting_16[j], 2) &&
-					!could_be_interest(orig, (u16)interesting_16[j], 2, 0)) {
+skip_interest:
 
-					stage_val_type = STAGE_VAL_LE;
+  /********************
+   * DICTIONARY STUFF *
+   ********************/
 
-					*(u16*)(out_buf + i) = interesting_16[j];
+  if (!extras_cnt) goto skip_user_extras;
 
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
+  /* Overwrite with user-supplied extras. */
 
-				} else --stage_max;
+  stage_name = "user extras (over)";
+  stage_short = "ext_UO";
+  stage_cur = 0;
+  stage_max = extras_cnt * len;
 
-				if ((u16)interesting_16[j] != SWAP16(interesting_16[j]) &&
-					!could_be_bitflip(orig ^ SWAP16(interesting_16[j])) &&
-					!could_be_arith(orig, SWAP16(interesting_16[j]), 2) &&
-					!could_be_interest(orig, SWAP16(interesting_16[j]), 2, 1)) {
+  stage_val_type = STAGE_VAL_NONE;
 
-					stage_val_type = STAGE_VAL_BE;
+  orig_hit_cnt = new_hit_cnt;
 
-					*(u16*)(out_buf + i) = SWAP16(interesting_16[j]);
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
+  for (i = 0; i < len; ++i) {
 
-				} else --stage_max;
+    u32 last_len = 0;
 
-			}
+    stage_cur_byte = i;
 
-			*(u16*)(out_buf + i) = orig;
+    /* Extras are sorted by size, from smallest to largest. This means
+       that we don't have to worry about restoring the buffer in
+       between writes at a particular offset determined by the outer
+       loop. */
 
-		}
+    for (j = 0; j < extras_cnt; ++j) {
 
-		new_hit_cnt = queued_paths + unique_crashes;
+      /* Skip extras probabilistically if extras_cnt > MAX_DET_EXTRAS. Also
+         skip them if there's no room to insert the payload, if the token
+         is redundant, or if its entire span has no bytes set in the effector
+         map. */
 
-		stage_finds[STAGE_INTEREST16] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_INTEREST16] += stage_max;
+      if ((extras_cnt > MAX_DET_EXTRAS && UR(extras_cnt) >= MAX_DET_EXTRAS) ||
+          extras[j].len > len - i ||
+          !memcmp(extras[j].data, out_buf + i, extras[j].len) ||
+          !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, extras[j].len))) {
 
+        --stage_max;
+        continue;
 
+      }
 
+      last_len = extras[j].len;
+      memcpy(out_buf + i, extras[j].data, last_len);
 
+      if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
 
-		if (len < 4) goto skip_interest;
+      ++stage_cur;
 
-		/* Setting 32-bit integers, both endians. */
+    }
 
-		stage_name = "interest 32/8";
-		stage_short = "int32";
-		stage_cur = 0;
-		stage_max = 2 * (len - 3) * (sizeof(interesting_32) >> 2);
+    /* Restore all the clobbered memory. */
+    memcpy(out_buf + i, in_buf + i, last_len);
 
+  }
 
-		orig_hit_cnt = new_hit_cnt;
+  new_hit_cnt = queued_paths + unique_crashes;
 
-		for (i = 0; i < len - 3; ++i) {
+  stage_finds[STAGE_EXTRAS_UO] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_EXTRAS_UO] += stage_max;
 
-			u32 orig = *(u32*)(out_buf + i);
+  /* Insertion of user-supplied extras. */
 
-			/* Let's consult the effector map... */
+  stage_name = "user extras (insert)";
+  stage_short = "ext_UI";
+  stage_cur = 0;
+  stage_max = extras_cnt * len;
 
-			if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
-				!eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
-				stage_max -= sizeof(interesting_32) >> 1;
-				continue;
-			}
+  orig_hit_cnt = new_hit_cnt;
 
-			stage_cur_byte = i;
+  ex_tmp = ck_alloc(len + MAX_DICT_FILE);
 
-			for (j = 0; j < sizeof(interesting_32) / 4; ++j) {
+  for (i = 0; i <= len; ++i) {
 
-				stage_cur_val = interesting_32[j];
+    stage_cur_byte = i;
 
-				/* Skip if this could be a product of a bitflip, arithmetics,
-				   or word interesting value insertion. */
+    for (j = 0; j < extras_cnt; ++j) {
 
-				if (!could_be_bitflip(orig ^ (u32)interesting_32[j]) &&
-					!could_be_arith(orig, interesting_32[j], 4) &&
-					!could_be_interest(orig, interesting_32[j], 4, 0)) {
+      if (len + extras[j].len > MAX_FILE) {
 
-					stage_val_type = STAGE_VAL_LE;
+        --stage_max;
+        continue;
 
-					*(u32*)(out_buf + i) = interesting_32[j];
+      }
 
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
+      /* Insert token */
+      memcpy(ex_tmp + i, extras[j].data, extras[j].len);
 
-				} else --stage_max;
+      /* Copy tail */
+      memcpy(ex_tmp + i + extras[j].len, out_buf + i, len - i);
 
-				if ((u32)interesting_32[j] != SWAP32(interesting_32[j]) &&
-					!could_be_bitflip(orig ^ SWAP32(interesting_32[j])) &&
-					!could_be_arith(orig, SWAP32(interesting_32[j]), 4) &&
-					!could_be_interest(orig, SWAP32(interesting_32[j]), 4, 1)) {
+      if (common_fuzz_stuff(argv, ex_tmp, len + extras[j].len)) {
 
-					stage_val_type = STAGE_VAL_BE;
+        ck_free(ex_tmp);
+        goto abandon_entry;
 
-					*(u32*)(out_buf + i) = SWAP32(interesting_32[j]);
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
+      }
 
-				} else --stage_max;
+      ++stage_cur;
 
-			}
+    }
 
-			*(u32*)(out_buf + i) = orig;
+    /* Copy head */
+    ex_tmp[i] = out_buf[i];
 
-		}
+  }
 
-		new_hit_cnt = queued_paths + unique_crashes;
+  ck_free(ex_tmp);
 
-		stage_finds[STAGE_INTEREST32] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_INTEREST32] += stage_max;
+  new_hit_cnt = queued_paths + unique_crashes;
 
+  stage_finds[STAGE_EXTRAS_UI] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_EXTRAS_UI] += stage_max;
 
+skip_user_extras:
 
+  if (!a_extras_cnt) goto skip_extras;
 
+  stage_name = "auto extras (over)";
+  stage_short = "ext_AO";
+  stage_cur = 0;
+  stage_max = MIN(a_extras_cnt, USE_AUTO_EXTRAS) * len;
 
-	skip_interest:
+  stage_val_type = STAGE_VAL_NONE;
 
-		/********************
-		 * DICTIONARY STUFF *
-		 ********************/
+  orig_hit_cnt = new_hit_cnt;
 
-		if (!extras_cnt) goto skip_user_extras;
+  for (i = 0; i < len; ++i) {
 
-		/* Overwrite with user-supplied extras. */
+    u32 last_len = 0;
 
-		stage_name = "user extras (over)";
-		stage_short = "ext_UO";
-		stage_cur = 0;
-		stage_max = extras_cnt * len;
+    stage_cur_byte = i;
 
+    for (j = 0; j < MIN(a_extras_cnt, USE_AUTO_EXTRAS); ++j) {
 
+      /* See the comment in the earlier code; extras are sorted by size. */
 
+      if (a_extras[j].len > len - i ||
+          !memcmp(a_extras[j].data, out_buf + i, a_extras[j].len) ||
+          !memchr(eff_map + EFF_APOS(i), 1,
+                  EFF_SPAN_ALEN(i, a_extras[j].len))) {
 
-		stage_val_type = STAGE_VAL_NONE;
+        --stage_max;
+        continue;
 
-		orig_hit_cnt = new_hit_cnt;
+      }
 
-		for (i = 0; i < len; ++i) {
+      last_len = a_extras[j].len;
+      memcpy(out_buf + i, a_extras[j].data, last_len);
 
-			u32 last_len = 0;
+      if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
 
-			stage_cur_byte = i;
+      ++stage_cur;
 
-			/* Extras are sorted by size, from smallest to largest. This means
-			   that we don't have to worry about restoring the buffer in
-			   between writes at a particular offset determined by the outer
-			   loop. */
+    }
 
-			for (j = 0; j < extras_cnt; ++j) {
+    /* Restore all the clobbered memory. */
+    memcpy(out_buf + i, in_buf + i, last_len);
 
-				/* Skip extras probabilistically if extras_cnt > MAX_DET_EXTRAS. Also
-				   skip them if there's no room to insert the payload, if the token
-				   is redundant, or if its entire span has no bytes set in the effector
-				   map. */
+  }
 
-				if ((extras_cnt > MAX_DET_EXTRAS && UR(extras_cnt) >= MAX_DET_EXTRAS) ||
-					extras[j].len > len - i ||
-					!memcmp(extras[j].data, out_buf + i, extras[j].len) ||
-					!memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, extras[j].len))) {
+  new_hit_cnt = queued_paths + unique_crashes;
 
-					--stage_max;
-					continue;
+  stage_finds[STAGE_EXTRAS_AO] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_EXTRAS_AO] += stage_max;
 
-				}
+skip_extras:
 
-				last_len = extras[j].len;
-				memcpy(out_buf + i, extras[j].data, last_len);
+  /* If we made this to here without jumping to havoc_stage or abandon_entry,
+     we're properly done with deterministic steps and can mark it as such
+     in the .state/ directory. */
 
-				if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+  if (!queue_cur->passed_det) mark_as_det_done(queue_cur);
 
-				++stage_cur;
+  /****************
+   * RANDOM HAVOC *
+   ****************/
 
-			}
+havoc_stage:
+pacemaker_fuzzing:
 
-			/* Restore all the clobbered memory. */
-			memcpy(out_buf + i, in_buf + i, last_len);
+  stage_cur_byte = -1;
 
-		}
+  /* The havoc stage mutation code is also invoked when splicing files; if the
+     splice_cycle variable is set, generate different descriptions and such. */
 
-		new_hit_cnt = queued_paths + unique_crashes;
+  if (!splice_cycle) {
 
-		stage_finds[STAGE_EXTRAS_UO] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_EXTRAS_UO] += stage_max;
+    stage_name = "MOpt-havoc";
+    stage_short = "MOpt_havoc";
+    stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * perf_score /
+                havoc_div / 100;
 
-		/* Insertion of user-supplied extras. */
+  } else {
 
-		stage_name = "user extras (insert)";
-		stage_short = "ext_UI";
-		stage_cur = 0;
-		stage_max = extras_cnt * len;
+    static u8 tmp[32];
 
+    perf_score = orig_perf;
 
+    sprintf(tmp, "MOpt-splice %u", splice_cycle);
+    stage_name = tmp;
+    stage_short = "MOpt_splice";
+    stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100;
 
+  }
 
-		orig_hit_cnt = new_hit_cnt;
+  s32 temp_len_puppet;
+  cur_ms_lv = get_cur_time();
 
-		ex_tmp = ck_alloc(len + MAX_DICT_FILE);
+  {
 
-		for (i = 0; i <= len; ++i) {
+    if (key_puppet == 1) {
 
-			stage_cur_byte = i;
+      if (unlikely(orig_hit_cnt_puppet == 0)) {
 
-			for (j = 0; j < extras_cnt; ++j) {
+        orig_hit_cnt_puppet = queued_paths + unique_crashes;
+        last_limit_time_start = get_cur_time();
+        SPLICE_CYCLES_puppet =
+            (UR(SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) +
+             SPLICE_CYCLES_puppet_low);
 
-				if (len + extras[j].len > MAX_FILE) {
-					--stage_max;
-					continue;
-				}
+      }
 
-				/* Insert token */
-				memcpy(ex_tmp + i, extras[j].data, extras[j].len);
+    }
 
-				/* Copy tail */
-				memcpy(ex_tmp + i + extras[j].len, out_buf + i, len - i);
+    {
 
-				if (common_fuzz_stuff(argv, ex_tmp, len + extras[j].len)) {
-					ck_free(ex_tmp);
-					goto abandon_entry;
-				}
+#ifndef IGNORE_FINDS
+    havoc_stage_puppet:
+#endif
 
-				++stage_cur;
+      stage_cur_byte = -1;
 
-			}
+      /* The havoc stage mutation code is also invoked when splicing files; if
+         the splice_cycle variable is set, generate different descriptions and
+         such. */
 
-			/* Copy head */
-			ex_tmp[i] = out_buf[i];
+      if (!splice_cycle) {
 
-		}
+        stage_name = "MOpt avoc";
+        stage_short = "MOpt_havoc";
+        stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) *
+                    perf_score / havoc_div / 100;
 
-		ck_free(ex_tmp);
+      } else {
 
-		new_hit_cnt = queued_paths + unique_crashes;
+        static u8 tmp[32];
+        perf_score = orig_perf;
+        sprintf(tmp, "MOpt splice %u", splice_cycle);
+        stage_name = tmp;
+        stage_short = "MOpt_splice";
+        stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100;
 
-		stage_finds[STAGE_EXTRAS_UI] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_EXTRAS_UI] += stage_max;
+      }
 
-	skip_user_extras:
+      if (stage_max < HAVOC_MIN) stage_max = HAVOC_MIN;
 
-		if (!a_extras_cnt) goto skip_extras;
+      temp_len = len;
 
-		stage_name = "auto extras (over)";
-		stage_short = "ext_AO";
-		stage_cur = 0;
-		stage_max = MIN(a_extras_cnt, USE_AUTO_EXTRAS) * len;
+      orig_hit_cnt = queued_paths + unique_crashes;
 
+      havoc_queued = queued_paths;
 
-		stage_val_type = STAGE_VAL_NONE;
+      for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
 
-		orig_hit_cnt = new_hit_cnt;
+        u32 use_stacking = 1 << (1 + UR(HAVOC_STACK_POW2));
 
-		for (i = 0; i < len; ++i) {
+        stage_cur_val = use_stacking;
 
-			u32 last_len = 0;
+        for (i = 0; i < operator_num; ++i) {
 
-			stage_cur_byte = i;
+          stage_cycles_puppet_v3[swarm_now][i] =
+              stage_cycles_puppet_v2[swarm_now][i];
 
-			for (j = 0; j < MIN(a_extras_cnt, USE_AUTO_EXTRAS); ++j) {
+        }
 
-				/* See the comment in the earlier code; extras are sorted by size. */
+        for (i = 0; i < use_stacking; ++i) {
 
-				if (a_extras[j].len > len - i ||
-					!memcmp(a_extras[j].data, out_buf + i, a_extras[j].len) ||
-					!memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, a_extras[j].len))) {
+          switch (select_algorithm()) {
 
-					--stage_max;
-					continue;
+            case 0:
+              /* Flip a single bit somewhere. Spooky! */
+              FLIP_BIT(out_buf, UR(temp_len << 3));
+              stage_cycles_puppet_v2[swarm_now][STAGE_FLIP1] += 1;
+              break;
 
-				}
+            case 1:
+              if (temp_len < 2) break;
+              temp_len_puppet = UR(temp_len << 3);
+              FLIP_BIT(out_buf, temp_len_puppet);
+              FLIP_BIT(out_buf, temp_len_puppet + 1);
+              stage_cycles_puppet_v2[swarm_now][STAGE_FLIP2] += 1;
+              break;
 
-				last_len = a_extras[j].len;
-				memcpy(out_buf + i, a_extras[j].data, last_len);
+            case 2:
+              if (temp_len < 2) break;
+              temp_len_puppet = UR(temp_len << 3);
+              FLIP_BIT(out_buf, temp_len_puppet);
+              FLIP_BIT(out_buf, temp_len_puppet + 1);
+              FLIP_BIT(out_buf, temp_len_puppet + 2);
+              FLIP_BIT(out_buf, temp_len_puppet + 3);
+              stage_cycles_puppet_v2[swarm_now][STAGE_FLIP4] += 1;
+              break;
 
-				if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+            case 3:
+              if (temp_len < 4) break;
+              out_buf[UR(temp_len)] ^= 0xFF;
+              stage_cycles_puppet_v2[swarm_now][STAGE_FLIP8] += 1;
+              break;
 
-				++stage_cur;
+            case 4:
+              if (temp_len < 8) break;
+              *(u16*)(out_buf + UR(temp_len - 1)) ^= 0xFFFF;
+              stage_cycles_puppet_v2[swarm_now][STAGE_FLIP16] += 1;
+              break;
 
-			}
+            case 5:
+              if (temp_len < 8) break;
+              *(u32*)(out_buf + UR(temp_len - 3)) ^= 0xFFFFFFFF;
+              stage_cycles_puppet_v2[swarm_now][STAGE_FLIP32] += 1;
+              break;
 
-			/* Restore all the clobbered memory. */
-			memcpy(out_buf + i, in_buf + i, last_len);
+            case 6:
+              out_buf[UR(temp_len)] -= 1 + UR(ARITH_MAX);
+              out_buf[UR(temp_len)] += 1 + UR(ARITH_MAX);
+              stage_cycles_puppet_v2[swarm_now][STAGE_ARITH8] += 1;
+              break;
 
-		}
+            case 7:
+              /* Randomly subtract from word, random endian. */
+              if (temp_len < 8) break;
+              if (UR(2)) {
 
-		new_hit_cnt = queued_paths + unique_crashes;
+                u32 pos = UR(temp_len - 1);
+                *(u16*)(out_buf + pos) -= 1 + UR(ARITH_MAX);
 
-		stage_finds[STAGE_EXTRAS_AO] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_EXTRAS_AO] += stage_max;
+              } else {
 
-	skip_extras:
+                u32 pos = UR(temp_len - 1);
+                u16 num = 1 + UR(ARITH_MAX);
+                *(u16*)(out_buf + pos) =
+                    SWAP16(SWAP16(*(u16*)(out_buf + pos)) - num);
 
-		/* If we made this to here without jumping to havoc_stage or abandon_entry,
-		   we're properly done with deterministic steps and can mark it as such
-		   in the .state/ directory. */
+              }
 
-		if (!queue_cur->passed_det) mark_as_det_done(queue_cur);
+              /* Randomly add to word, random endian. */
+              if (UR(2)) {
 
-		/****************
-		 * RANDOM HAVOC *
-		 ****************/
+                u32 pos = UR(temp_len - 1);
+                *(u16*)(out_buf + pos) += 1 + UR(ARITH_MAX);
 
-	havoc_stage:
-	pacemaker_fuzzing:
+              } else {
 
+                u32 pos = UR(temp_len - 1);
+                u16 num = 1 + UR(ARITH_MAX);
+                *(u16*)(out_buf + pos) =
+                    SWAP16(SWAP16(*(u16*)(out_buf + pos)) + num);
 
-		stage_cur_byte = -1;
+              }
 
-		/* The havoc stage mutation code is also invoked when splicing files; if the
-		   splice_cycle variable is set, generate different descriptions and such. */
+              stage_cycles_puppet_v2[swarm_now][STAGE_ARITH16] += 1;
+              break;
 
-		if (!splice_cycle) {
+            case 8:
+              /* Randomly subtract from dword, random endian. */
+              if (temp_len < 8) break;
+              if (UR(2)) {
 
-			stage_name = "MOpt-havoc";
-			stage_short = "MOpt_havoc";
-			stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) *
-				perf_score / havoc_div / 100;
+                u32 pos = UR(temp_len - 3);
+                *(u32*)(out_buf + pos) -= 1 + UR(ARITH_MAX);
 
-		}
-		else {
+              } else {
 
-			static u8 tmp[32];
+                u32 pos = UR(temp_len - 3);
+                u32 num = 1 + UR(ARITH_MAX);
+                *(u32*)(out_buf + pos) =
+                    SWAP32(SWAP32(*(u32*)(out_buf + pos)) - num);
 
-			perf_score = orig_perf;
+              }
 
-			sprintf(tmp, "MOpt-splice %u", splice_cycle);
-			stage_name = tmp;
-			stage_short = "MOpt_splice";
-			stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100;
+              /* Randomly add to dword, random endian. */
+              // if (temp_len < 4) break;
+              if (UR(2)) {
 
-		}
+                u32 pos = UR(temp_len - 3);
+                *(u32*)(out_buf + pos) += 1 + UR(ARITH_MAX);
 
-		s32 temp_len_puppet;
-		cur_ms_lv = get_cur_time();
+              } else {
 
-		{
+                u32 pos = UR(temp_len - 3);
+                u32 num = 1 + UR(ARITH_MAX);
+                *(u32*)(out_buf + pos) =
+                    SWAP32(SWAP32(*(u32*)(out_buf + pos)) + num);
 
+              }
 
-			if (key_puppet == 1)
-			{
-				if (unlikely(orig_hit_cnt_puppet == 0))
-				{
-					orig_hit_cnt_puppet = queued_paths + unique_crashes;
-					last_limit_time_start = get_cur_time();
-					SPLICE_CYCLES_puppet = (UR(SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) + SPLICE_CYCLES_puppet_low);
-				}
-			}
+              stage_cycles_puppet_v2[swarm_now][STAGE_ARITH32] += 1;
+              break;
 
+            case 9:
+              /* Set byte to interesting value. */
+              if (temp_len < 4) break;
+              out_buf[UR(temp_len)] = interesting_8[UR(sizeof(interesting_8))];
+              stage_cycles_puppet_v2[swarm_now][STAGE_INTEREST8] += 1;
+              break;
 
-			{
-#ifndef IGNORE_FINDS
-			havoc_stage_puppet:
-#endif
+            case 10:
+              /* Set word to interesting value, randomly choosing endian. */
+              if (temp_len < 8) break;
+              if (UR(2)) {
 
-				stage_cur_byte = -1;
+                *(u16*)(out_buf + UR(temp_len - 1)) =
+                    interesting_16[UR(sizeof(interesting_16) >> 1)];
 
-				/* The havoc stage mutation code is also invoked when splicing files; if the
-				   splice_cycle variable is set, generate different descriptions and such. */
+              } else {
 
-				if (!splice_cycle) {
+                *(u16*)(out_buf + UR(temp_len - 1)) =
+                    SWAP16(interesting_16[UR(sizeof(interesting_16) >> 1)]);
 
-					stage_name = "MOpt avoc";
-					stage_short = "MOpt_havoc";
-					stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) *
-						perf_score / havoc_div / 100;
+              }
 
-				}
-				else {
-					static u8 tmp[32];
-					perf_score = orig_perf;
-					sprintf(tmp, "MOpt splice %u", splice_cycle);
-					stage_name = tmp;
-					stage_short = "MOpt_splice";
-					stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100;
-				}
+              stage_cycles_puppet_v2[swarm_now][STAGE_INTEREST16] += 1;
+              break;
 
+            case 11:
+              /* Set dword to interesting value, randomly choosing endian. */
 
+              if (temp_len < 8) break;
 
-				if (stage_max < HAVOC_MIN) stage_max = HAVOC_MIN;
-
-				temp_len = len;
-
-				orig_hit_cnt = queued_paths + unique_crashes;
-
-				havoc_queued = queued_paths;
-
-
-
-				for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
-
-					u32 use_stacking = 1 << (1 + UR(HAVOC_STACK_POW2));
-
-					stage_cur_val = use_stacking;
-
-
-					for (i = 0; i < operator_num; ++i)
-					{
-						stage_cycles_puppet_v3[swarm_now][i] = stage_cycles_puppet_v2[swarm_now][i];
-					}
-
-
-					for (i = 0; i < use_stacking; ++i) {
-
-						switch (select_algorithm()) {
-
-						case 0:
-							/* Flip a single bit somewhere. Spooky! */
-							FLIP_BIT(out_buf, UR(temp_len << 3));
-							stage_cycles_puppet_v2[swarm_now][STAGE_FLIP1] += 1;
-							break;
-
-
-						case 1:
-							if (temp_len < 2) break;
-							temp_len_puppet = UR(temp_len << 3);
-							FLIP_BIT(out_buf, temp_len_puppet);
-							FLIP_BIT(out_buf, temp_len_puppet + 1);
-							stage_cycles_puppet_v2[swarm_now][STAGE_FLIP2] += 1;
-							break;
-
-						case 2:
-							if (temp_len < 2) break;
-							temp_len_puppet = UR(temp_len << 3);
-							FLIP_BIT(out_buf, temp_len_puppet);
-							FLIP_BIT(out_buf, temp_len_puppet + 1);
-							FLIP_BIT(out_buf, temp_len_puppet + 2);
-							FLIP_BIT(out_buf, temp_len_puppet + 3);
-							stage_cycles_puppet_v2[swarm_now][STAGE_FLIP4] += 1;
-							break;
-
-						case 3:
-							if (temp_len < 4) break;
-							out_buf[UR(temp_len)] ^= 0xFF;
-							stage_cycles_puppet_v2[swarm_now][STAGE_FLIP8] += 1;
-							break;
-
-						case 4:
-							if (temp_len < 8) break;
-							*(u16*)(out_buf + UR(temp_len - 1)) ^= 0xFFFF;
-							stage_cycles_puppet_v2[swarm_now][STAGE_FLIP16] += 1;
-							break;
-
-						case 5:
-							if (temp_len < 8) break;
-							*(u32*)(out_buf + UR(temp_len - 3)) ^= 0xFFFFFFFF;
-							stage_cycles_puppet_v2[swarm_now][STAGE_FLIP32] += 1;
-							break;
-
-						case 6:
-							out_buf[UR(temp_len)] -= 1 + UR(ARITH_MAX);
-							out_buf[UR(temp_len)] += 1 + UR(ARITH_MAX);
-							stage_cycles_puppet_v2[swarm_now][STAGE_ARITH8] += 1;
-							break;
-
-						case 7:
-							/* Randomly subtract from word, random endian. */
-							if (temp_len < 8) break;
-							if (UR(2)) {
-								u32 pos = UR(temp_len - 1);
-								*(u16*)(out_buf + pos) -= 1 + UR(ARITH_MAX);
-							}
-							else {
-								u32 pos = UR(temp_len - 1);
-								u16 num = 1 + UR(ARITH_MAX);
-								*(u16*)(out_buf + pos) =
-									SWAP16(SWAP16(*(u16*)(out_buf + pos)) - num);
-							}
-							/* Randomly add to word, random endian. */
-							if (UR(2)) {
-								u32 pos = UR(temp_len - 1);
-								*(u16*)(out_buf + pos) += 1 + UR(ARITH_MAX);
-							}
-							else {
-								u32 pos = UR(temp_len - 1);
-								u16 num = 1 + UR(ARITH_MAX);
-								*(u16*)(out_buf + pos) =
-									SWAP16(SWAP16(*(u16*)(out_buf + pos)) + num);
-							}
-							stage_cycles_puppet_v2[swarm_now][STAGE_ARITH16] += 1;
-							break;
-
-
-						case 8:
-							/* Randomly subtract from dword, random endian. */
-							if (temp_len < 8) break;
-							if (UR(2)) {
-								u32 pos = UR(temp_len - 3);
-								*(u32*)(out_buf + pos) -= 1 + UR(ARITH_MAX);
-							}
-							else {
-								u32 pos = UR(temp_len - 3);
-								u32 num = 1 + UR(ARITH_MAX);
-								*(u32*)(out_buf + pos) =
-									SWAP32(SWAP32(*(u32*)(out_buf + pos)) - num);
-							}
-							/* Randomly add to dword, random endian. */
-							//if (temp_len < 4) break;
-							if (UR(2)) {
-								u32 pos = UR(temp_len - 3);
-								*(u32*)(out_buf + pos) += 1 + UR(ARITH_MAX);
-							}
-							else {
-								u32 pos = UR(temp_len - 3);
-								u32 num = 1 + UR(ARITH_MAX);
-								*(u32*)(out_buf + pos) =
-									SWAP32(SWAP32(*(u32*)(out_buf + pos)) + num);
-							}
-							stage_cycles_puppet_v2[swarm_now][STAGE_ARITH32] += 1;
-							break;
-
-
-						case 9:
-							/* Set byte to interesting value. */
-							if (temp_len < 4) break;
-							out_buf[UR(temp_len)] = interesting_8[UR(sizeof(interesting_8))];
-							stage_cycles_puppet_v2[swarm_now][STAGE_INTEREST8] += 1;
-							break;
-
-						case 10:
-							/* Set word to interesting value, randomly choosing endian. */
-							if (temp_len < 8) break;
-							if (UR(2)) {
-								*(u16*)(out_buf + UR(temp_len - 1)) =
-									interesting_16[UR(sizeof(interesting_16) >> 1)];
-							}
-							else {
-								*(u16*)(out_buf + UR(temp_len - 1)) = SWAP16(
-									interesting_16[UR(sizeof(interesting_16) >> 1)]);
-							}
-							stage_cycles_puppet_v2[swarm_now][STAGE_INTEREST16] += 1;
-							break;
-
-
-						case 11:
-							/* Set dword to interesting value, randomly choosing endian. */
-
-							if (temp_len < 8) break;
-
-							if (UR(2)) {
-								*(u32*)(out_buf + UR(temp_len - 3)) =
-									interesting_32[UR(sizeof(interesting_32) >> 2)];
-							}
-							else {
-								*(u32*)(out_buf + UR(temp_len - 3)) = SWAP32(
-									interesting_32[UR(sizeof(interesting_32) >> 2)]);
-							}
-							stage_cycles_puppet_v2[swarm_now][STAGE_INTEREST32] += 1;
-							break;
+              if (UR(2)) {
 
+                *(u32*)(out_buf + UR(temp_len - 3)) =
+                    interesting_32[UR(sizeof(interesting_32) >> 2)];
 
-						case 12:
+              } else {
 
-							/* Just set a random byte to a random value. Because,
-							   why not. We use XOR with 1-255 to eliminate the
-							   possibility of a no-op. */
+                *(u32*)(out_buf + UR(temp_len - 3)) =
+                    SWAP32(interesting_32[UR(sizeof(interesting_32) >> 2)]);
 
-							out_buf[UR(temp_len)] ^= 1 + UR(255);
-							stage_cycles_puppet_v2[swarm_now][STAGE_RANDOMBYTE] += 1;
-							break;
+              }
 
+              stage_cycles_puppet_v2[swarm_now][STAGE_INTEREST32] += 1;
+              break;
 
+            case 12:
 
-						case 13: {
+              /* Just set a random byte to a random value. Because,
+                 why not. We use XOR with 1-255 to eliminate the
+                 possibility of a no-op. */
 
-							/* Delete bytes. We're making this a bit more likely
-							   than insertion (the next option) in hopes of keeping
-							   files reasonably small. */
+              out_buf[UR(temp_len)] ^= 1 + UR(255);
+              stage_cycles_puppet_v2[swarm_now][STAGE_RANDOMBYTE] += 1;
+              break;
 
-							u32 del_from, del_len;
+            case 13: {
 
-							if (temp_len < 2) break;
+              /* Delete bytes. We're making this a bit more likely
+                 than insertion (the next option) in hopes of keeping
+                 files reasonably small. */
 
-							/* Don't delete too much. */
+              u32 del_from, del_len;
 
-							del_len = choose_block_len(temp_len - 1);
+              if (temp_len < 2) break;
 
-							del_from = UR(temp_len - del_len + 1);
+              /* Don't delete too much. */
 
-							memmove(out_buf + del_from, out_buf + del_from + del_len,
-								temp_len - del_from - del_len);
+              del_len = choose_block_len(temp_len - 1);
 
-							temp_len -= del_len;
-							stage_cycles_puppet_v2[swarm_now][STAGE_DELETEBYTE] += 1;
-							break;
+              del_from = UR(temp_len - del_len + 1);
 
-						}
+              memmove(out_buf + del_from, out_buf + del_from + del_len,
+                      temp_len - del_from - del_len);
 
-						case 14:
+              temp_len -= del_len;
+              stage_cycles_puppet_v2[swarm_now][STAGE_DELETEBYTE] += 1;
+              break;
 
-							if (temp_len + HAVOC_BLK_XL < MAX_FILE) {
+            }
 
-								/* Clone bytes (75%) or insert a block of constant bytes (25%). */
+            case 14:
 
-								u8  actually_clone = UR(4);
-								u32 clone_from, clone_to, clone_len;
-								u8* new_buf;
+              if (temp_len + HAVOC_BLK_XL < MAX_FILE) {
 
-								if (actually_clone) {
+                /* Clone bytes (75%) or insert a block of constant bytes (25%).
+                 */
 
-									clone_len = choose_block_len(temp_len);
-									clone_from = UR(temp_len - clone_len + 1);
+                u8  actually_clone = UR(4);
+                u32 clone_from, clone_to, clone_len;
+                u8* new_buf;
 
-								}
-								else {
+                if (actually_clone) {
 
-									clone_len = choose_block_len(HAVOC_BLK_XL);
-									clone_from = 0;
+                  clone_len = choose_block_len(temp_len);
+                  clone_from = UR(temp_len - clone_len + 1);
 
-								}
+                } else {
 
-								clone_to = UR(temp_len);
+                  clone_len = choose_block_len(HAVOC_BLK_XL);
+                  clone_from = 0;
 
-								new_buf = ck_alloc_nozero(temp_len + clone_len);
+                }
 
-								/* Head */
+                clone_to = UR(temp_len);
 
-								memcpy(new_buf, out_buf, clone_to);
+                new_buf = ck_alloc_nozero(temp_len + clone_len);
 
-								/* Inserted part */
+                /* Head */
 
-								if (actually_clone)
-									memcpy(new_buf + clone_to, out_buf + clone_from, clone_len);
-								else
-									memset(new_buf + clone_to,
-										UR(2) ? UR(256) : out_buf[UR(temp_len)], clone_len);
+                memcpy(new_buf, out_buf, clone_to);
 
-								/* Tail */
-								memcpy(new_buf + clone_to + clone_len, out_buf + clone_to,
-									temp_len - clone_to);
+                /* Inserted part */
 
-								ck_free(out_buf);
-								out_buf = new_buf;
-								temp_len += clone_len;
-								stage_cycles_puppet_v2[swarm_now][STAGE_Clone75] += 1;
-							}
+                if (actually_clone)
+                  memcpy(new_buf + clone_to, out_buf + clone_from, clone_len);
+                else
+                  memset(new_buf + clone_to,
+                         UR(2) ? UR(256) : out_buf[UR(temp_len)], clone_len);
 
-							break;
+                /* Tail */
+                memcpy(new_buf + clone_to + clone_len, out_buf + clone_to,
+                       temp_len - clone_to);
 
-						case 15: {
+                ck_free(out_buf);
+                out_buf = new_buf;
+                temp_len += clone_len;
+                stage_cycles_puppet_v2[swarm_now][STAGE_Clone75] += 1;
 
-							/* Overwrite bytes with a randomly selected chunk (75%) or fixed
-							   bytes (25%). */
+              }
 
-							u32 copy_from, copy_to, copy_len;
+              break;
 
-							if (temp_len < 2) break;
+            case 15: {
 
-							copy_len = choose_block_len(temp_len - 1);
+              /* Overwrite bytes with a randomly selected chunk (75%) or fixed
+                 bytes (25%). */
 
-							copy_from = UR(temp_len - copy_len + 1);
-							copy_to = UR(temp_len - copy_len + 1);
+              u32 copy_from, copy_to, copy_len;
 
-							if (UR(4)) {
+              if (temp_len < 2) break;
 
-								if (copy_from != copy_to)
-									memmove(out_buf + copy_to, out_buf + copy_from, copy_len);
+              copy_len = choose_block_len(temp_len - 1);
 
-							}
-							else memset(out_buf + copy_to,
-								UR(2) ? UR(256) : out_buf[UR(temp_len)], copy_len);
-							stage_cycles_puppet_v2[swarm_now][STAGE_OverWrite75] += 1;
-							break;
+              copy_from = UR(temp_len - copy_len + 1);
+              copy_to = UR(temp_len - copy_len + 1);
 
-						}
+              if (UR(4)) {
 
+                if (copy_from != copy_to)
+                  memmove(out_buf + copy_to, out_buf + copy_from, copy_len);
 
-						}
+              } else
 
-					}
+                memset(out_buf + copy_to,
+                       UR(2) ? UR(256) : out_buf[UR(temp_len)], copy_len);
+              stage_cycles_puppet_v2[swarm_now][STAGE_OverWrite75] += 1;
+              break;
 
+            }
 
-					tmp_pilot_time += 1;
+          }
 
+        }
 
+        tmp_pilot_time += 1;
 
+        u64 temp_total_found = queued_paths + unique_crashes;
 
-					u64 temp_total_found = queued_paths + unique_crashes;
+        if (common_fuzz_stuff(argv, out_buf, temp_len))
+          goto abandon_entry_puppet;
 
+        /* out_buf might have been mangled a bit, so let's restore it to its
+           original size and shape. */
 
+        if (temp_len < len) out_buf = ck_realloc(out_buf, len);
+        temp_len = len;
+        memcpy(out_buf, in_buf, len);
 
+        /* If we're finding new stuff, let's run for a bit longer, limits
+           permitting. */
 
-					if (common_fuzz_stuff(argv, out_buf, temp_len))
-						goto abandon_entry_puppet;
+        if (queued_paths != havoc_queued) {
 
-					/* out_buf might have been mangled a bit, so let's restore it to its
-					   original size and shape. */
+          if (perf_score <= havoc_max_mult * 100) {
 
-					if (temp_len < len) out_buf = ck_realloc(out_buf, len);
-					temp_len = len;
-					memcpy(out_buf, in_buf, len);
+            stage_max *= 2;
+            perf_score *= 2;
 
-					/* If we're finding new stuff, let's run for a bit longer, limits
-					   permitting. */
+          }
 
-					if (queued_paths != havoc_queued) {
+          havoc_queued = queued_paths;
 
-						if (perf_score <= havoc_max_mult * 100) {
-							stage_max *= 2;
-							perf_score *= 2;
-						}
+        }
 
-						havoc_queued = queued_paths;
+        if (unlikely(queued_paths + unique_crashes > temp_total_found)) {
 
-					}
+          u64 temp_temp_puppet =
+              queued_paths + unique_crashes - temp_total_found;
+          total_puppet_find = total_puppet_find + temp_temp_puppet;
+          for (i = 0; i < 16; ++i) {
 
-					if (unlikely(queued_paths + unique_crashes > temp_total_found))
-					{
-						u64 temp_temp_puppet = queued_paths + unique_crashes - temp_total_found;
-						total_puppet_find = total_puppet_find + temp_temp_puppet;
-						for (i = 0; i < 16; ++i)
-						{
-							if (stage_cycles_puppet_v2[swarm_now][i] > stage_cycles_puppet_v3[swarm_now][i])
-								stage_finds_puppet_v2[swarm_now][i] += temp_temp_puppet;
-						}
-					}
+            if (stage_cycles_puppet_v2[swarm_now][i] >
+                stage_cycles_puppet_v3[swarm_now][i])
+              stage_finds_puppet_v2[swarm_now][i] += temp_temp_puppet;
 
-				}
-				new_hit_cnt = queued_paths + unique_crashes;
+          }
 
-				if (!splice_cycle) {
-          stage_finds[STAGE_HAVOC]  += new_hit_cnt - orig_hit_cnt;
-          stage_cycles[STAGE_HAVOC] += stage_max;
-        } else {
-          stage_finds[STAGE_SPLICE]  += new_hit_cnt - orig_hit_cnt;
-          stage_cycles[STAGE_SPLICE] += stage_max;
         }
 
+      }
+
+      new_hit_cnt = queued_paths + unique_crashes;
+
+      if (!splice_cycle) {
+
+        stage_finds[STAGE_HAVOC] += new_hit_cnt - orig_hit_cnt;
+        stage_cycles[STAGE_HAVOC] += stage_max;
+
+      } else {
+
+        stage_finds[STAGE_SPLICE] += new_hit_cnt - orig_hit_cnt;
+        stage_cycles[STAGE_SPLICE] += stage_max;
+
+      }
+
 #ifndef IGNORE_FINDS
 
-				/************
-				 * SPLICING *
-				 ************/
+      /************
+       * SPLICING *
+       ************/
 
+    retry_splicing_puppet:
 
-			retry_splicing_puppet:
+      if (use_splicing && splice_cycle++ < SPLICE_CYCLES_puppet &&
+          queued_paths > 1 && queue_cur->len > 1) {
 
-				if (use_splicing && splice_cycle++ < SPLICE_CYCLES_puppet &&
-					queued_paths > 1 && queue_cur->len > 1) {
+        struct queue_entry* target;
+        u32                 tid, split_at;
+        u8*                 new_buf;
+        s32                 f_diff, l_diff;
 
-					struct queue_entry* target;
-					u32 tid, split_at;
-					u8* new_buf;
-					s32 f_diff, l_diff;
+        /* First of all, if we've modified in_buf for havoc, let's clean that
+           up... */
 
-					/* First of all, if we've modified in_buf for havoc, let's clean that
-					   up... */
+        if (in_buf != orig_in) {
 
-					if (in_buf != orig_in) {
-						ck_free(in_buf);
-						in_buf = orig_in;
-						len = queue_cur->len;
-					}
+          ck_free(in_buf);
+          in_buf = orig_in;
+          len = queue_cur->len;
 
-					/* Pick a random queue entry and seek to it. Don't splice with yourself. */
+        }
+
+        /* Pick a random queue entry and seek to it. Don't splice with yourself.
+         */
+
+        do {
 
-					do { tid = UR(queued_paths); } while (tid == current_entry);
+          tid = UR(queued_paths);
 
-					splicing_with = tid;
-					target = queue;
+        } while (tid == current_entry);
 
-					while (tid >= 100) { target = target->next_100; tid -= 100; }
-					while (tid--) target = target->next;
+        splicing_with = tid;
+        target = queue;
 
-					/* Make sure that the target has a reasonable length. */
+        while (tid >= 100) {
 
-					while (target && (target->len < 2 || target == queue_cur)) {
-						target = target->next;
-						++splicing_with;
-					}
+          target = target->next_100;
+          tid -= 100;
 
-					if (!target) goto retry_splicing_puppet;
+        }
+
+        while (tid--)
+          target = target->next;
 
-					/* Read the testcase into a new buffer. */
+        /* Make sure that the target has a reasonable length. */
 
-					fd = open(target->fname, O_RDONLY);
+        while (target && (target->len < 2 || target == queue_cur)) {
 
-					if (fd < 0) PFATAL("Unable to open '%s'", target->fname);
+          target = target->next;
+          ++splicing_with;
 
-					new_buf = ck_alloc_nozero(target->len);
+        }
 
-					ck_read(fd, new_buf, target->len, target->fname);
+        if (!target) goto retry_splicing_puppet;
 
-					close(fd);
+        /* Read the testcase into a new buffer. */
 
-					/* Find a suitable splicin g location, somewhere between the first and
-					   the last differing byte. Bail out if the difference is just a single
-					   byte or so. */
+        fd = open(target->fname, O_RDONLY);
 
-					locate_diffs(in_buf, new_buf, MIN(len, target->len), &f_diff, &l_diff);
+        if (fd < 0) PFATAL("Unable to open '%s'", target->fname);
 
-					if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) {
-						ck_free(new_buf);
-						goto retry_splicing_puppet;
-					}
+        new_buf = ck_alloc_nozero(target->len);
 
-					/* Split somewhere between the first and last differing byte. */
+        ck_read(fd, new_buf, target->len, target->fname);
 
-					split_at = f_diff + UR(l_diff - f_diff);
+        close(fd);
 
-					/* Do the thing. */
+        /* Find a suitable splicin g location, somewhere between the first and
+           the last differing byte. Bail out if the difference is just a single
+           byte or so. */
 
-					len = target->len;
-					memcpy(new_buf, in_buf, split_at);
-					in_buf = new_buf;
-					ck_free(out_buf);
-					out_buf = ck_alloc_nozero(len);
-					memcpy(out_buf, in_buf, len);
-					goto havoc_stage_puppet;
+        locate_diffs(in_buf, new_buf, MIN(len, target->len), &f_diff, &l_diff);
 
-				}
+        if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) {
+
+          ck_free(new_buf);
+          goto retry_splicing_puppet;
+
+        }
+
+        /* Split somewhere between the first and last differing byte. */
+
+        split_at = f_diff + UR(l_diff - f_diff);
+
+        /* Do the thing. */
+
+        len = target->len;
+        memcpy(new_buf, in_buf, split_at);
+        in_buf = new_buf;
+        ck_free(out_buf);
+        out_buf = ck_alloc_nozero(len);
+        memcpy(out_buf, in_buf, len);
+        goto havoc_stage_puppet;
+
+      }
 
 #endif /* !IGNORE_FINDS */
 
-				ret_val = 0;
+      ret_val = 0;
+
+    abandon_entry:
+    abandon_entry_puppet:
+
+      if (splice_cycle >= SPLICE_CYCLES_puppet)
+        SPLICE_CYCLES_puppet =
+            (UR(SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) +
+             SPLICE_CYCLES_puppet_low);
+
+      splicing_with = -1;
+
+      /* Update pending_not_fuzzed count if we made it through the calibration
+         cycle and have not seen this entry before. */
 
-			abandon_entry:
-			abandon_entry_puppet:
+      // if (!stop_soon && !queue_cur->cal_failed && !queue_cur->was_fuzzed) {
 
-				if (splice_cycle >= SPLICE_CYCLES_puppet)
-					SPLICE_CYCLES_puppet = (UR(SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) + SPLICE_CYCLES_puppet_low);
+      //   queue_cur->was_fuzzed = 1;
+      //   --pending_not_fuzzed;
+      //   if (queue_cur->favored) --pending_favored;
+      // }
 
+      munmap(orig_in, queue_cur->len);
 
-				splicing_with = -1;
+      if (in_buf != orig_in) ck_free(in_buf);
+      ck_free(out_buf);
+      ck_free(eff_map);
 
-				/* Update pending_not_fuzzed count if we made it through the calibration
-				   cycle and have not seen this entry before. */
+      if (key_puppet == 1) {
 
-				   // if (!stop_soon && !queue_cur->cal_failed && !queue_cur->was_fuzzed) {
-				   //   queue_cur->was_fuzzed = 1;
-				   //   --pending_not_fuzzed;
-				   //   if (queue_cur->favored) --pending_favored;
-				   // }
+        if (unlikely(queued_paths + unique_crashes >
+                     ((queued_paths + unique_crashes) * limit_time_bound +
+                      orig_hit_cnt_puppet))) {
 
-				munmap(orig_in, queue_cur->len);
+          key_puppet = 0;
+          cur_ms_lv = get_cur_time();
+          new_hit_cnt = queued_paths + unique_crashes;
+          orig_hit_cnt_puppet = 0;
+          last_limit_time_start = 0;
 
-				if (in_buf != orig_in) ck_free(in_buf);
-				ck_free(out_buf);
-				ck_free(eff_map);
+        }
+
+      }
 
+      if (unlikely(tmp_pilot_time > period_pilot)) {
 
-				if (key_puppet == 1) {
-					if (unlikely(queued_paths + unique_crashes > ((queued_paths + unique_crashes)*limit_time_bound + orig_hit_cnt_puppet)))	{
-						key_puppet = 0;
-						cur_ms_lv = get_cur_time();
-						new_hit_cnt = queued_paths + unique_crashes;
-						orig_hit_cnt_puppet = 0;
-						last_limit_time_start = 0;
-					}
-				}
+        total_pacemaker_time += tmp_pilot_time;
+        new_hit_cnt = queued_paths + unique_crashes;
+        swarm_fitness[swarm_now] =
+            (double)(total_puppet_find - temp_puppet_find) /
+            ((double)(tmp_pilot_time) / period_pilot_tmp);
+        tmp_pilot_time = 0;
+        temp_puppet_find = total_puppet_find;
 
+        u64 temp_stage_finds_puppet = 0;
+        for (i = 0; i < operator_num; ++i) {
 
-				if (unlikely(tmp_pilot_time > period_pilot)) {
-					total_pacemaker_time += tmp_pilot_time;
-					new_hit_cnt = queued_paths + unique_crashes;
-					swarm_fitness[swarm_now] = (double)(total_puppet_find - temp_puppet_find) / ((double)(tmp_pilot_time)/ period_pilot_tmp);
-					tmp_pilot_time = 0;
-					temp_puppet_find = total_puppet_find;
+          double temp_eff = 0.0;
 
-					u64 temp_stage_finds_puppet = 0;
-					for (i = 0; i < operator_num; ++i) {
-						double temp_eff = 0.0;
+          if (stage_cycles_puppet_v2[swarm_now][i] >
+              stage_cycles_puppet[swarm_now][i])
+            temp_eff = (double)(stage_finds_puppet_v2[swarm_now][i] -
+                                stage_finds_puppet[swarm_now][i]) /
+                       (double)(stage_cycles_puppet_v2[swarm_now][i] -
+                                stage_cycles_puppet[swarm_now][i]);
 
-						if (stage_cycles_puppet_v2[swarm_now][i] > stage_cycles_puppet[swarm_now][i])
-							temp_eff = (double)(stage_finds_puppet_v2[swarm_now][i] - stage_finds_puppet[swarm_now][i]) /
-							(double)(stage_cycles_puppet_v2[swarm_now][i] - stage_cycles_puppet[swarm_now][i]);
+          if (eff_best[swarm_now][i] < temp_eff) {
 
-						if (eff_best[swarm_now][i] < temp_eff) {
-							eff_best[swarm_now][i] = temp_eff;
-							L_best[swarm_now][i] = x_now[swarm_now][i];
-						}
+            eff_best[swarm_now][i] = temp_eff;
+            L_best[swarm_now][i] = x_now[swarm_now][i];
 
-						stage_finds_puppet[swarm_now][i] = stage_finds_puppet_v2[swarm_now][i];
-						stage_cycles_puppet[swarm_now][i] = stage_cycles_puppet_v2[swarm_now][i];
-						temp_stage_finds_puppet += stage_finds_puppet[swarm_now][i];
-					}
+          }
+
+          stage_finds_puppet[swarm_now][i] =
+              stage_finds_puppet_v2[swarm_now][i];
+          stage_cycles_puppet[swarm_now][i] =
+              stage_cycles_puppet_v2[swarm_now][i];
+          temp_stage_finds_puppet += stage_finds_puppet[swarm_now][i];
+
+        }
 
-					swarm_now = swarm_now + 1;
-						if (swarm_now == swarm_num) {
-							key_module = 1;
-							for (i = 0; i < operator_num; ++i) {
-								core_operator_cycles_puppet_v2[i] = core_operator_cycles_puppet[i];
-								core_operator_cycles_puppet_v3[i] = core_operator_cycles_puppet[i];
-								core_operator_finds_puppet_v2[i] = core_operator_finds_puppet[i];
-							}
+        swarm_now = swarm_now + 1;
+        if (swarm_now == swarm_num) {
 
-							double swarm_eff = 0.0;
-							swarm_now = 0;
-							for (i = 0; i < swarm_num; ++i)	{
-								if (swarm_fitness[i] > swarm_eff) {
-									swarm_eff = swarm_fitness[i];
-									swarm_now = i;
-								}
-							}
-							if (swarm_now <0 || swarm_now > swarm_num - 1)
-								PFATAL("swarm_now error number  %d", swarm_now);
+          key_module = 1;
+          for (i = 0; i < operator_num; ++i) {
 
-						}
-				}
-				return ret_val;
-			}
-		}
+            core_operator_cycles_puppet_v2[i] = core_operator_cycles_puppet[i];
+            core_operator_cycles_puppet_v3[i] = core_operator_cycles_puppet[i];
+            core_operator_finds_puppet_v2[i] = core_operator_finds_puppet[i];
 
+          }
+
+          double swarm_eff = 0.0;
+          swarm_now = 0;
+          for (i = 0; i < swarm_num; ++i) {
+
+            if (swarm_fitness[i] > swarm_eff) {
+
+              swarm_eff = swarm_fitness[i];
+              swarm_now = i;
+
+            }
+
+          }
+
+          if (swarm_now < 0 || swarm_now > swarm_num - 1)
+            PFATAL("swarm_now error number  %d", swarm_now);
+
+        }
+
+      }
+
+      return ret_val;
+
+    }
+
+  }
 
 #undef FLIP_BIT
 
 }
 
-
 u8 core_fuzzing(char** argv) {
-	int i;
 
-	if (swarm_num == 1) {
-		key_module = 2;
-		return 0;
-	}
+  int i;
 
+  if (swarm_num == 1) {
 
-		s32 len, fd, temp_len, j;
-		u8  *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0;
-		u64 havoc_queued, orig_hit_cnt, new_hit_cnt, cur_ms_lv;
-		u32 splice_cycle = 0, perf_score = 100, orig_perf, prev_cksum, eff_cnt = 1;
+    key_module = 2;
+    return 0;
 
-		u8  ret_val = 1, doing_det = 0;
+  }
+
+  s32 len, fd, temp_len, j;
+  u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0;
+  u64 havoc_queued, orig_hit_cnt, new_hit_cnt, cur_ms_lv;
+  u32 splice_cycle = 0, perf_score = 100, orig_perf, prev_cksum, eff_cnt = 1;
 
-		u8  a_collect[MAX_AUTO_EXTRA];
-		u32 a_len = 0;
+  u8 ret_val = 1, doing_det = 0;
+
+  u8  a_collect[MAX_AUTO_EXTRA];
+  u32 a_len = 0;
 
 #ifdef IGNORE_FINDS
 
-		/* In IGNORE_FINDS mode, skip any entries that weren't in the
-		   initial data set. */
+  /* In IGNORE_FINDS mode, skip any entries that weren't in the
+     initial data set. */
 
-		if (queue_cur->depth > 1) return 1;
+  if (queue_cur->depth > 1) return 1;
 
 #else
 
-		if (pending_favored) {
+  if (pending_favored) {
 
-			/* If we have any favored, non-fuzzed new arrivals in the queue,
-			   possibly skip to them at the expense of already-fuzzed or non-favored
-			   cases. */
+    /* If we have any favored, non-fuzzed new arrivals in the queue,
+       possibly skip to them at the expense of already-fuzzed or non-favored
+       cases. */
 
-			if ((queue_cur->was_fuzzed || !queue_cur->favored) &&
-				UR(100) < SKIP_TO_NEW_PROB) return 1;
+    if ((queue_cur->was_fuzzed || !queue_cur->favored) &&
+        UR(100) < SKIP_TO_NEW_PROB)
+      return 1;
 
-		} else if (!dumb_mode && !queue_cur->favored && queued_paths > 10) {
+  } else if (!dumb_mode && !queue_cur->favored && queued_paths > 10) {
 
-			/* Otherwise, still possibly skip non-favored cases, albeit less often.
-			   The odds of skipping stuff are higher for already-fuzzed inputs and
-			   lower for never-fuzzed entries. */
+    /* Otherwise, still possibly skip non-favored cases, albeit less often.
+       The odds of skipping stuff are higher for already-fuzzed inputs and
+       lower for never-fuzzed entries. */
 
-			if (queue_cycle > 1 && !queue_cur->was_fuzzed) {
+    if (queue_cycle > 1 && !queue_cur->was_fuzzed) {
 
-				if (UR(100) < SKIP_NFAV_NEW_PROB) return 1;
+      if (UR(100) < SKIP_NFAV_NEW_PROB) return 1;
 
-			} else {
+    } else {
 
-				if (UR(100) < SKIP_NFAV_OLD_PROB) return 1;
+      if (UR(100) < SKIP_NFAV_OLD_PROB) return 1;
 
-			}
+    }
 
-		}
+  }
 
 #endif /* ^IGNORE_FINDS */
 
-		if (not_on_tty) {
-			ACTF("Fuzzing test case #%u (%u total, %llu uniq crashes found)...",
-				current_entry, queued_paths, unique_crashes);
-			fflush(stdout);
-		}
+  if (not_on_tty) {
+
+    ACTF("Fuzzing test case #%u (%u total, %llu uniq crashes found)...",
+         current_entry, queued_paths, unique_crashes);
+    fflush(stdout);
+
+  }
+
+  /* Map the test case into memory. */
+
+  fd = open(queue_cur->fname, O_RDONLY);
+
+  if (fd < 0) PFATAL("Unable to open '%s'", queue_cur->fname);
+
+  len = queue_cur->len;
+
+  orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
 
-		/* Map the test case into memory. */
+  if (orig_in == MAP_FAILED) PFATAL("Unable to mmap '%s'", queue_cur->fname);
 
-		fd = open(queue_cur->fname, O_RDONLY);
+  close(fd);
 
-		if (fd < 0) PFATAL("Unable to open '%s'", queue_cur->fname);
+  /* We could mmap() out_buf as MAP_PRIVATE, but we end up clobbering every
+     single byte anyway, so it wouldn't give us any performance or memory usage
+     benefits. */
 
-		len = queue_cur->len;
+  out_buf = ck_alloc_nozero(len);
 
-		orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
+  subseq_tmouts = 0;
 
-		if (orig_in == MAP_FAILED) PFATAL("Unable to mmap '%s'", queue_cur->fname);
+  cur_depth = queue_cur->depth;
 
-		close(fd);
+  /*******************************************
+   * CALIBRATION (only if failed earlier on) *
+   *******************************************/
 
-		/* We could mmap() out_buf as MAP_PRIVATE, but we end up clobbering every
-		   single byte anyway, so it wouldn't give us any performance or memory usage
-		   benefits. */
+  if (queue_cur->cal_failed) {
 
-		out_buf = ck_alloc_nozero(len);
+    u8 res = FAULT_TMOUT;
 
-		subseq_tmouts = 0;
+    if (queue_cur->cal_failed < CAL_CHANCES) {
 
-		cur_depth = queue_cur->depth;
+      res = calibrate_case(argv, queue_cur, in_buf, queue_cycle - 1, 0);
 
-		/*******************************************
-		 * CALIBRATION (only if failed earlier on) *
-		 *******************************************/
+      if (res == FAULT_ERROR) FATAL("Unable to execute target application");
 
-		if (queue_cur->cal_failed) {
+    }
 
-			u8 res = FAULT_TMOUT;
+    if (stop_soon || res != crash_mode) {
 
-			if (queue_cur->cal_failed < CAL_CHANCES) {
+      ++cur_skipped_paths;
+      goto abandon_entry;
 
-				res = calibrate_case(argv, queue_cur, in_buf, queue_cycle - 1, 0);
+    }
 
-				if (res == FAULT_ERROR)
-					FATAL("Unable to execute target application");
+  }
 
-			}
+  /************
+   * TRIMMING *
+   ************/
 
-			if (stop_soon || res != crash_mode) {
-				++cur_skipped_paths;
-				goto abandon_entry;
-			}
+  if (!dumb_mode && !queue_cur->trim_done) {
 
-		}
+    u8 res = trim_case(argv, queue_cur, in_buf);
 
-		/************
-		 * TRIMMING *
-		 ************/
+    if (res == FAULT_ERROR) FATAL("Unable to execute target application");
 
-		if (!dumb_mode && !queue_cur->trim_done) {
+    if (stop_soon) {
 
-			u8 res = trim_case(argv, queue_cur, in_buf);
+      ++cur_skipped_paths;
+      goto abandon_entry;
 
-			if (res == FAULT_ERROR)
-				FATAL("Unable to execute target application");
+    }
 
-			if (stop_soon) {
-				++cur_skipped_paths;
-				goto abandon_entry;
-			}
+    /* Don't retry trimming, even if it failed. */
 
-			/* Don't retry trimming, even if it failed. */
+    queue_cur->trim_done = 1;
 
-			queue_cur->trim_done = 1;
+    len = queue_cur->len;
 
-			len = queue_cur->len;
+  }
 
-		}
+  memcpy(out_buf, in_buf, len);
 
-		memcpy(out_buf, in_buf, len);
+  /*********************
+   * PERFORMANCE SCORE *
+   *********************/
 
-		/*********************
-		 * PERFORMANCE SCORE *
-		 *********************/
+  orig_perf = perf_score = calculate_score(queue_cur);
 
-		orig_perf = perf_score = calculate_score(queue_cur);
+  /* Skip right away if -d is given, if we have done deterministic fuzzing on
+     this entry ourselves (was_fuzzed), or if it has gone through deterministic
+     testing in earlier, resumed runs (passed_det). */
 
-		/* Skip right away if -d is given, if we have done deterministic fuzzing on
-		   this entry ourselves (was_fuzzed), or if it has gone through deterministic
-		   testing in earlier, resumed runs (passed_det). */
+  if (skip_deterministic || queue_cur->was_fuzzed || queue_cur->passed_det)
+    goto havoc_stage;
 
-		if (skip_deterministic || queue_cur->was_fuzzed || queue_cur->passed_det)
-			goto havoc_stage;
+  /* Skip deterministic fuzzing if exec path checksum puts this out of scope
+     for this master instance. */
 
-		/* Skip deterministic fuzzing if exec path checksum puts this out of scope
-		   for this master instance. */
+  if (master_max && (queue_cur->exec_cksum % master_max) != master_id - 1)
+    goto havoc_stage;
 
-		if (master_max && (queue_cur->exec_cksum % master_max) != master_id - 1)
-			goto havoc_stage;
+  cur_ms_lv = get_cur_time();
+  if (!(key_puppet == 0 && ((cur_ms_lv - last_path_time < limit_time_puppet) ||
+                            (last_crash_time != 0 &&
+                             cur_ms_lv - last_crash_time < limit_time_puppet) ||
+                            last_path_time == 0))) {
 
+    key_puppet = 1;
+    goto pacemaker_fuzzing;
 
-		cur_ms_lv = get_cur_time();
-		if (!(key_puppet == 0 && ((cur_ms_lv - last_path_time < limit_time_puppet) ||
-			(last_crash_time != 0 && cur_ms_lv - last_crash_time < limit_time_puppet) || last_path_time == 0)))
-		{
-			key_puppet = 1;
-			goto pacemaker_fuzzing;
-		}
+  }
 
-		doing_det = 1;
+  doing_det = 1;
 
-		/*********************************************
-		 * SIMPLE BITFLIP (+dictionary construction) *
-		 *********************************************/
+  /*********************************************
+   * SIMPLE BITFLIP (+dictionary construction) *
+   *********************************************/
 
-#define FLIP_BIT(_ar, _b) do { \
-    u8* _arf = (u8*)(_ar); \
-    u32 _bf = (_b); \
-    _arf[(_bf) >> 3] ^= (128 >> ((_bf) & 7)); \
+#define FLIP_BIT(_ar, _b)                   \
+  do {                                      \
+                                            \
+    u8* _arf = (u8*)(_ar);                  \
+    u32 _bf = (_b);                         \
+    _arf[(_bf) >> 3] ^= (128 >> ((_bf)&7)); \
+                                            \
   } while (0)
 
-		 /* Single walking bit. */
+  /* Single walking bit. */
 
-		stage_short = "flip1";
-		stage_max = len << 3;
-		stage_name = "bitflip 1/1";
+  stage_short = "flip1";
+  stage_max = len << 3;
+  stage_name = "bitflip 1/1";
 
-		stage_val_type = STAGE_VAL_NONE;
+  stage_val_type = STAGE_VAL_NONE;
 
-		orig_hit_cnt = queued_paths + unique_crashes;
+  orig_hit_cnt = queued_paths + unique_crashes;
 
-		prev_cksum = queue_cur->exec_cksum;
+  prev_cksum = queue_cur->exec_cksum;
 
-		for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
 
-			stage_cur_byte = stage_cur >> 3;
+    stage_cur_byte = stage_cur >> 3;
 
-			FLIP_BIT(out_buf, stage_cur);
+    FLIP_BIT(out_buf, stage_cur);
 
-			if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
 
-			FLIP_BIT(out_buf, stage_cur);
+    FLIP_BIT(out_buf, stage_cur);
 
-			/* While flipping the least significant bit in every byte, pull of an extra
-			   trick to detect possible syntax tokens. In essence, the idea is that if
-			   you have a binary blob like this:
+    /* While flipping the least significant bit in every byte, pull of an extra
+       trick to detect possible syntax tokens. In essence, the idea is that if
+       you have a binary blob like this:
 
-			   xxxxxxxxIHDRxxxxxxxx
+       xxxxxxxxIHDRxxxxxxxx
 
-			   ...and changing the leading and trailing bytes causes variable or no
-			   changes in program flow, but touching any character in the "IHDR" string
-			   always produces the same, distinctive path, it's highly likely that
-			   "IHDR" is an atomically-checked magic value of special significance to
-			   the fuzzed format.
+       ...and changing the leading and trailing bytes causes variable or no
+       changes in program flow, but touching any character in the "IHDR" string
+       always produces the same, distinctive path, it's highly likely that
+       "IHDR" is an atomically-checked magic value of special significance to
+       the fuzzed format.
 
-			   We do this here, rather than as a separate stage, because it's a nice
-			   way to keep the operation approximately "free" (i.e., no extra execs).
+       We do this here, rather than as a separate stage, because it's a nice
+       way to keep the operation approximately "free" (i.e., no extra execs).
 
-			   Empirically, performing the check when flipping the least significant bit
-			   is advantageous, compared to doing it at the time of more disruptive
-			   changes, where the program flow may be affected in more violent ways.
+       Empirically, performing the check when flipping the least significant bit
+       is advantageous, compared to doing it at the time of more disruptive
+       changes, where the program flow may be affected in more violent ways.
 
-			   The caveat is that we won't generate dictionaries in the -d mode or -S
-			   mode - but that's probably a fair trade-off.
+       The caveat is that we won't generate dictionaries in the -d mode or -S
+       mode - but that's probably a fair trade-off.
 
-			   This won't work particularly well with paths that exhibit variable
-			   behavior, but fails gracefully, so we'll carry out the checks anyway.
+       This won't work particularly well with paths that exhibit variable
+       behavior, but fails gracefully, so we'll carry out the checks anyway.
 
-			  */
+      */
 
-			if (!dumb_mode && (stage_cur & 7) == 7) {
+    if (!dumb_mode && (stage_cur & 7) == 7) {
 
-				u32 cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
+      u32 cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
 
-				if (stage_cur == stage_max - 1 && cksum == prev_cksum) {
+      if (stage_cur == stage_max - 1 && cksum == prev_cksum) {
 
-					/* If at end of file and we are still collecting a string, grab the
-					   final character and force output. */
+        /* If at end of file and we are still collecting a string, grab the
+           final character and force output. */
 
-					if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3];
-					++a_len;
+        if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3];
+        ++a_len;
 
-					if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA)
-						maybe_add_auto(a_collect, a_len);
+        if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA)
+          maybe_add_auto(a_collect, a_len);
 
-				}
-				else if (cksum != prev_cksum) {
+      } else if (cksum != prev_cksum) {
 
-					/* Otherwise, if the checksum has changed, see if we have something
-					   worthwhile queued up, and collect that if the answer is yes. */
+        /* Otherwise, if the checksum has changed, see if we have something
+           worthwhile queued up, and collect that if the answer is yes. */
 
-					if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA)
-						maybe_add_auto(a_collect, a_len);
+        if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA)
+          maybe_add_auto(a_collect, a_len);
 
-					a_len = 0;
-					prev_cksum = cksum;
+        a_len = 0;
+        prev_cksum = cksum;
 
-				}
+      }
 
-				/* Continue collecting string, but only if the bit flip actually made
-				   any difference - we don't want no-op tokens. */
+      /* Continue collecting string, but only if the bit flip actually made
+         any difference - we don't want no-op tokens. */
 
-				if (cksum != queue_cur->exec_cksum) {
+      if (cksum != queue_cur->exec_cksum) {
 
-					if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3];
-					++a_len;
+        if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3];
+        ++a_len;
 
-				}
+      }
 
-			}
+    }
 
-		}
+  }
 
-		new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = queued_paths + unique_crashes;
 
-		stage_finds[STAGE_FLIP1] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_FLIP1] += stage_max;
+  stage_finds[STAGE_FLIP1] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_FLIP1] += stage_max;
 
+  /* Two walking bits. */
 
+  stage_name = "bitflip 2/1";
+  stage_short = "flip2";
+  stage_max = (len << 3) - 1;
 
-		/* Two walking bits. */
+  orig_hit_cnt = new_hit_cnt;
 
-		stage_name = "bitflip 2/1";
-		stage_short = "flip2";
-		stage_max = (len << 3) - 1;
+  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
 
-		orig_hit_cnt = new_hit_cnt;
+    stage_cur_byte = stage_cur >> 3;
 
-		for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+    FLIP_BIT(out_buf, stage_cur);
+    FLIP_BIT(out_buf, stage_cur + 1);
 
-			stage_cur_byte = stage_cur >> 3;
+    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
 
-			FLIP_BIT(out_buf, stage_cur);
-			FLIP_BIT(out_buf, stage_cur + 1);
+    FLIP_BIT(out_buf, stage_cur);
+    FLIP_BIT(out_buf, stage_cur + 1);
 
-			if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+  }
 
-			FLIP_BIT(out_buf, stage_cur);
-			FLIP_BIT(out_buf, stage_cur + 1);
+  new_hit_cnt = queued_paths + unique_crashes;
 
-		}
+  stage_finds[STAGE_FLIP2] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_FLIP2] += stage_max;
 
-		new_hit_cnt = queued_paths + unique_crashes;
+  /* Four walking bits. */
 
-		stage_finds[STAGE_FLIP2] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_FLIP2] += stage_max;
+  stage_name = "bitflip 4/1";
+  stage_short = "flip4";
+  stage_max = (len << 3) - 3;
 
+  orig_hit_cnt = new_hit_cnt;
 
-		/* Four walking bits. */
+  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
 
-		stage_name = "bitflip 4/1";
-		stage_short = "flip4";
-		stage_max = (len << 3) - 3;
+    stage_cur_byte = stage_cur >> 3;
 
+    FLIP_BIT(out_buf, stage_cur);
+    FLIP_BIT(out_buf, stage_cur + 1);
+    FLIP_BIT(out_buf, stage_cur + 2);
+    FLIP_BIT(out_buf, stage_cur + 3);
 
-		orig_hit_cnt = new_hit_cnt;
+    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
 
-		for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+    FLIP_BIT(out_buf, stage_cur);
+    FLIP_BIT(out_buf, stage_cur + 1);
+    FLIP_BIT(out_buf, stage_cur + 2);
+    FLIP_BIT(out_buf, stage_cur + 3);
 
-			stage_cur_byte = stage_cur >> 3;
+  }
 
-			FLIP_BIT(out_buf, stage_cur);
-			FLIP_BIT(out_buf, stage_cur + 1);
-			FLIP_BIT(out_buf, stage_cur + 2);
-			FLIP_BIT(out_buf, stage_cur + 3);
+  new_hit_cnt = queued_paths + unique_crashes;
 
-			if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+  stage_finds[STAGE_FLIP4] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_FLIP4] += stage_max;
 
-			FLIP_BIT(out_buf, stage_cur);
-			FLIP_BIT(out_buf, stage_cur + 1);
-			FLIP_BIT(out_buf, stage_cur + 2);
-			FLIP_BIT(out_buf, stage_cur + 3);
+  /* Effector map setup. These macros calculate:
 
-		}
+     EFF_APOS      - position of a particular file offset in the map.
+     EFF_ALEN      - length of a map with a particular number of bytes.
+     EFF_SPAN_ALEN - map span for a sequence of bytes.
 
-		new_hit_cnt = queued_paths + unique_crashes;
+   */
 
-		stage_finds[STAGE_FLIP4] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_FLIP4] += stage_max;
+#define EFF_APOS(_p) ((_p) >> EFF_MAP_SCALE2)
+#define EFF_REM(_x) ((_x) & ((1 << EFF_MAP_SCALE2) - 1))
+#define EFF_ALEN(_l) (EFF_APOS(_l) + !!EFF_REM(_l))
+#define EFF_SPAN_ALEN(_p, _l) (EFF_APOS((_p) + (_l)-1) - EFF_APOS(_p) + 1)
 
+  /* Initialize effector map for the next step (see comments below). Always
+         flag first and last byte as doing something. */
 
-		/* Effector map setup. These macros calculate:
+  eff_map = ck_alloc(EFF_ALEN(len));
+  eff_map[0] = 1;
 
-		   EFF_APOS      - position of a particular file offset in the map.
-		   EFF_ALEN      - length of a map with a particular number of bytes.
-		   EFF_SPAN_ALEN - map span for a sequence of bytes.
+  if (EFF_APOS(len - 1) != 0) {
 
-		 */
+    eff_map[EFF_APOS(len - 1)] = 1;
+    ++eff_cnt;
 
-#define EFF_APOS(_p)          ((_p) >> EFF_MAP_SCALE2)
-#define EFF_REM(_x)           ((_x) & ((1 << EFF_MAP_SCALE2) - 1))
-#define EFF_ALEN(_l)          (EFF_APOS(_l) + !!EFF_REM(_l))
-#define EFF_SPAN_ALEN(_p, _l) (EFF_APOS((_p) + (_l) - 1) - EFF_APOS(_p) + 1)
+  }
 
-		 /* Initialize effector map for the next step (see comments below). Always
-			flag first and last byte as doing something. */
+  /* Walking byte. */
 
-		eff_map = ck_alloc(EFF_ALEN(len));
-		eff_map[0] = 1;
+  stage_name = "bitflip 8/8";
+  stage_short = "flip8";
+  stage_max = len;
 
-		if (EFF_APOS(len - 1) != 0) {
-			eff_map[EFF_APOS(len - 1)] = 1;
-			++eff_cnt;
-		}
+  orig_hit_cnt = new_hit_cnt;
 
-		/* Walking byte. */
+  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
 
-		stage_name = "bitflip 8/8";
-		stage_short = "flip8";
-		stage_max = len;
+    stage_cur_byte = stage_cur;
 
+    out_buf[stage_cur] ^= 0xFF;
 
-		orig_hit_cnt = new_hit_cnt;
+    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
 
-		for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+    /* We also use this stage to pull off a simple trick: we identify
+       bytes that seem to have no effect on the current execution path
+       even when fully flipped - and we skip them during more expensive
+       deterministic stages, such as arithmetics or known ints. */
 
-			stage_cur_byte = stage_cur;
+    if (!eff_map[EFF_APOS(stage_cur)]) {
 
-			out_buf[stage_cur] ^= 0xFF;
+      u32 cksum;
 
-			if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+      /* If in dumb mode or if the file is very short, just flag everything
+         without wasting time on checksums. */
 
-			/* We also use this stage to pull off a simple trick: we identify
-			   bytes that seem to have no effect on the current execution path
-			   even when fully flipped - and we skip them during more expensive
-			   deterministic stages, such as arithmetics or known ints. */
+      if (!dumb_mode && len >= EFF_MIN_LEN)
+        cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
+      else
+        cksum = ~queue_cur->exec_cksum;
 
-			if (!eff_map[EFF_APOS(stage_cur)]) {
+      if (cksum != queue_cur->exec_cksum) {
 
-				u32 cksum;
+        eff_map[EFF_APOS(stage_cur)] = 1;
+        ++eff_cnt;
 
-				/* If in dumb mode or if the file is very short, just flag everything
-				   without wasting time on checksums. */
+      }
 
-				if (!dumb_mode && len >= EFF_MIN_LEN)
-					cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
-				else
-					cksum = ~queue_cur->exec_cksum;
+    }
 
-				if (cksum != queue_cur->exec_cksum) {
-					eff_map[EFF_APOS(stage_cur)] = 1;
-					++eff_cnt;
-				}
+    out_buf[stage_cur] ^= 0xFF;
 
-			}
+  }
 
-			out_buf[stage_cur] ^= 0xFF;
+  /* If the effector map is more than EFF_MAX_PERC dense, just flag the
+     whole thing as worth fuzzing, since we wouldn't be saving much time
+     anyway. */
 
-		}
+  if (eff_cnt != EFF_ALEN(len) &&
+      eff_cnt * 100 / EFF_ALEN(len) > EFF_MAX_PERC) {
 
-		/* If the effector map is more than EFF_MAX_PERC dense, just flag the
-		   whole thing as worth fuzzing, since we wouldn't be saving much time
-		   anyway. */
+    memset(eff_map, 1, EFF_ALEN(len));
 
-		if (eff_cnt != EFF_ALEN(len) &&
-			eff_cnt * 100 / EFF_ALEN(len) > EFF_MAX_PERC) {
+    blocks_eff_select += EFF_ALEN(len);
 
-			memset(eff_map, 1, EFF_ALEN(len));
+  } else {
 
-			blocks_eff_select += EFF_ALEN(len);
+    blocks_eff_select += eff_cnt;
 
-		}
-		else {
+  }
 
-			blocks_eff_select += eff_cnt;
+  blocks_eff_total += EFF_ALEN(len);
 
-		}
+  new_hit_cnt = queued_paths + unique_crashes;
 
-		blocks_eff_total += EFF_ALEN(len);
+  stage_finds[STAGE_FLIP8] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_FLIP8] += stage_max;
 
-		new_hit_cnt = queued_paths + unique_crashes;
+  /* Two walking bytes. */
 
-		stage_finds[STAGE_FLIP8] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_FLIP8] += stage_max;
+  if (len < 2) goto skip_bitflip;
 
+  stage_name = "bitflip 16/8";
+  stage_short = "flip16";
+  stage_cur = 0;
+  stage_max = len - 1;
 
+  orig_hit_cnt = new_hit_cnt;
 
-		/* Two walking bytes. */
+  for (i = 0; i < len - 1; ++i) {
 
-		if (len < 2) goto skip_bitflip;
+    /* Let's consult the effector map... */
 
-		stage_name = "bitflip 16/8";
-		stage_short = "flip16";
-		stage_cur = 0;
-		stage_max = len - 1;
+    if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
 
+      --stage_max;
+      continue;
 
-		orig_hit_cnt = new_hit_cnt;
+    }
 
-		for (i = 0; i < len - 1; ++i) {
+    stage_cur_byte = i;
 
-			/* Let's consult the effector map... */
+    *(u16*)(out_buf + i) ^= 0xFFFF;
 
-			if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
-				--stage_max;
-				continue;
-			}
+    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+    ++stage_cur;
 
-			stage_cur_byte = i;
+    *(u16*)(out_buf + i) ^= 0xFFFF;
 
-			*(u16*)(out_buf + i) ^= 0xFFFF;
+  }
 
-			if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-			++stage_cur;
+  new_hit_cnt = queued_paths + unique_crashes;
 
-			*(u16*)(out_buf + i) ^= 0xFFFF;
+  stage_finds[STAGE_FLIP16] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_FLIP16] += stage_max;
 
+  if (len < 4) goto skip_bitflip;
 
-		}
+  /* Four walking bytes. */
 
-		new_hit_cnt = queued_paths + unique_crashes;
+  stage_name = "bitflip 32/8";
+  stage_short = "flip32";
+  stage_cur = 0;
+  stage_max = len - 3;
 
-		stage_finds[STAGE_FLIP16] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_FLIP16] += stage_max;
+  orig_hit_cnt = new_hit_cnt;
 
+  for (i = 0; i < len - 3; ++i) {
 
+    /* Let's consult the effector map... */
+    if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
+        !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
 
-		if (len < 4) goto skip_bitflip;
+      --stage_max;
+      continue;
 
-		/* Four walking bytes. */
+    }
 
-		stage_name = "bitflip 32/8";
-		stage_short = "flip32";
-		stage_cur = 0;
-		stage_max = len - 3;
+    stage_cur_byte = i;
 
+    *(u32*)(out_buf + i) ^= 0xFFFFFFFF;
+
+    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+    ++stage_cur;
+
+    *(u32*)(out_buf + i) ^= 0xFFFFFFFF;
+
+  }
+
+  new_hit_cnt = queued_paths + unique_crashes;
 
-		orig_hit_cnt = new_hit_cnt;
+  stage_finds[STAGE_FLIP32] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_FLIP32] += stage_max;
 
-		for (i = 0; i < len - 3; ++i) {
+skip_bitflip:
 
-			/* Let's consult the effector map... */
-			if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
-				!eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
-				--stage_max;
-				continue;
-			}
+  if (no_arith) goto skip_arith;
 
-			stage_cur_byte = i;
+  /**********************
+   * ARITHMETIC INC/DEC *
+   **********************/
 
-			*(u32*)(out_buf + i) ^= 0xFFFFFFFF;
+  /* 8-bit arithmetics. */
 
-			if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-			++stage_cur;
+  stage_name = "arith 8/8";
+  stage_short = "arith8";
+  stage_cur = 0;
+  stage_max = 2 * len * ARITH_MAX;
 
-			*(u32*)(out_buf + i) ^= 0xFFFFFFFF;
+  stage_val_type = STAGE_VAL_LE;
 
-		}
+  orig_hit_cnt = new_hit_cnt;
 
-		new_hit_cnt = queued_paths + unique_crashes;
+  for (i = 0; i < len; ++i) {
 
-		stage_finds[STAGE_FLIP32] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_FLIP32] += stage_max;
+    u8 orig = out_buf[i];
 
+    /* Let's consult the effector map... */
 
+    if (!eff_map[EFF_APOS(i)]) {
 
+      stage_max -= 2 * ARITH_MAX;
+      continue;
 
-	skip_bitflip:
+    }
 
-		if (no_arith) goto skip_arith;
+    stage_cur_byte = i;
 
-		/**********************
-		 * ARITHMETIC INC/DEC *
-		 **********************/
+    for (j = 1; j <= ARITH_MAX; ++j) {
 
-		 /* 8-bit arithmetics. */
+      u8 r = orig ^ (orig + j);
 
-		stage_name = "arith 8/8";
-		stage_short = "arith8";
-		stage_cur = 0;
-		stage_max = 2 * len * ARITH_MAX;
+      /* Do arithmetic operations only if the result couldn't be a product
+         of a bitflip. */
 
+      if (!could_be_bitflip(r)) {
 
-		stage_val_type = STAGE_VAL_LE;
+        stage_cur_val = j;
+        out_buf[i] = orig + j;
 
-		orig_hit_cnt = new_hit_cnt;
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
 
-		for (i = 0; i < len; ++i) {
+      } else
 
-			u8 orig = out_buf[i];
+        --stage_max;
 
-			/* Let's consult the effector map... */
+      r = orig ^ (orig - j);
 
-			if (!eff_map[EFF_APOS(i)]) {
-				stage_max -= 2 * ARITH_MAX;
-				continue;
-			}
+      if (!could_be_bitflip(r)) {
 
-			stage_cur_byte = i;
+        stage_cur_val = -j;
+        out_buf[i] = orig - j;
 
-			for (j = 1; j <= ARITH_MAX; ++j) {
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
 
-				u8 r = orig ^ (orig + j);
+      } else
 
-				/* Do arithmetic operations only if the result couldn't be a product
-				   of a bitflip. */
+        --stage_max;
 
-				if (!could_be_bitflip(r)) {
+      out_buf[i] = orig;
 
-					stage_cur_val = j;
-					out_buf[i] = orig + j;
+    }
 
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
+  }
 
-				} else --stage_max;
+  new_hit_cnt = queued_paths + unique_crashes;
 
-				r = orig ^ (orig - j);
+  stage_finds[STAGE_ARITH8] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_ARITH8] += stage_max;
 
-				if (!could_be_bitflip(r)) {
+  /* 16-bit arithmetics, both endians. */
 
-					stage_cur_val = -j;
-					out_buf[i] = orig - j;
+  if (len < 2) goto skip_arith;
 
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
+  stage_name = "arith 16/8";
+  stage_short = "arith16";
+  stage_cur = 0;
+  stage_max = 4 * (len - 1) * ARITH_MAX;
 
-				} else --stage_max;
+  orig_hit_cnt = new_hit_cnt;
 
-				out_buf[i] = orig;
+  for (i = 0; i < len - 1; ++i) {
 
-			}
+    u16 orig = *(u16*)(out_buf + i);
 
-		}
+    /* Let's consult the effector map... */
 
-		new_hit_cnt = queued_paths + unique_crashes;
+    if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
 
-		stage_finds[STAGE_ARITH8] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_ARITH8] += stage_max;
+      stage_max -= 4 * ARITH_MAX;
+      continue;
 
+    }
 
+    stage_cur_byte = i;
 
+    for (j = 1; j <= ARITH_MAX; ++j) {
 
-		/* 16-bit arithmetics, both endians. */
+      u16 r1 = orig ^ (orig + j), r2 = orig ^ (orig - j),
+          r3 = orig ^ SWAP16(SWAP16(orig) + j),
+          r4 = orig ^ SWAP16(SWAP16(orig) - j);
 
-		if (len < 2) goto skip_arith;
+      /* Try little endian addition and subtraction first. Do it only
+         if the operation would affect more than one byte (hence the
+         & 0xff overflow checks) and if it couldn't be a product of
+         a bitflip. */
 
-		stage_name = "arith 16/8";
-		stage_short = "arith16";
-		stage_cur = 0;
-		stage_max = 4 * (len - 1) * ARITH_MAX;
+      stage_val_type = STAGE_VAL_LE;
 
+      if ((orig & 0xff) + j > 0xff && !could_be_bitflip(r1)) {
 
-		orig_hit_cnt = new_hit_cnt;
+        stage_cur_val = j;
+        *(u16*)(out_buf + i) = orig + j;
 
-		for (i = 0; i < len - 1; ++i) {
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
 
-			u16 orig = *(u16*)(out_buf + i);
+      } else
 
-			/* Let's consult the effector map... */
+        --stage_max;
 
-			if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
-				stage_max -= 4 * ARITH_MAX;
-				continue;
-			}
+      if ((orig & 0xff) < j && !could_be_bitflip(r2)) {
 
-			stage_cur_byte = i;
+        stage_cur_val = -j;
+        *(u16*)(out_buf + i) = orig - j;
 
-			for (j = 1; j <= ARITH_MAX; ++j) {
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
 
-				u16 r1 = orig ^ (orig + j),
-					r2 = orig ^ (orig - j),
-					r3 = orig ^ SWAP16(SWAP16(orig) + j),
-					r4 = orig ^ SWAP16(SWAP16(orig) - j);
+      } else
 
-				/* Try little endian addition and subtraction first. Do it only
-				   if the operation would affect more than one byte (hence the
-				   & 0xff overflow checks) and if it couldn't be a product of
-				   a bitflip. */
+        --stage_max;
 
-				stage_val_type = STAGE_VAL_LE;
+      /* Big endian comes next. Same deal. */
 
-				if ((orig & 0xff) + j > 0xff && !could_be_bitflip(r1)) {
+      stage_val_type = STAGE_VAL_BE;
 
-					stage_cur_val = j;
-					*(u16*)(out_buf + i) = orig + j;
+      if ((orig >> 8) + j > 0xff && !could_be_bitflip(r3)) {
 
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
+        stage_cur_val = j;
+        *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) + j);
 
-				} else --stage_max;
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
 
-				if ((orig & 0xff) < j && !could_be_bitflip(r2)) {
+      } else
 
-					stage_cur_val = -j;
-					*(u16*)(out_buf + i) = orig - j;
+        --stage_max;
 
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
+      if ((orig >> 8) < j && !could_be_bitflip(r4)) {
 
-				} else --stage_max;
+        stage_cur_val = -j;
+        *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) - j);
 
-				/* Big endian comes next. Same deal. */
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
 
-				stage_val_type = STAGE_VAL_BE;
+      } else
 
+        --stage_max;
 
-				if ((orig >> 8) + j > 0xff && !could_be_bitflip(r3)) {
+      *(u16*)(out_buf + i) = orig;
 
-					stage_cur_val = j;
-					*(u16*)(out_buf + i) = SWAP16(SWAP16(orig) + j);
+    }
 
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
+  }
 
-				} else --stage_max;
+  new_hit_cnt = queued_paths + unique_crashes;
 
-				if ((orig >> 8) < j && !could_be_bitflip(r4)) {
+  stage_finds[STAGE_ARITH16] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_ARITH16] += stage_max;
 
-					stage_cur_val = -j;
-					*(u16*)(out_buf + i) = SWAP16(SWAP16(orig) - j);
+  /* 32-bit arithmetics, both endians. */
 
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
+  if (len < 4) goto skip_arith;
 
-				} else --stage_max;
+  stage_name = "arith 32/8";
+  stage_short = "arith32";
+  stage_cur = 0;
+  stage_max = 4 * (len - 3) * ARITH_MAX;
 
-				*(u16*)(out_buf + i) = orig;
+  orig_hit_cnt = new_hit_cnt;
 
-			}
+  for (i = 0; i < len - 3; ++i) {
 
-		}
+    u32 orig = *(u32*)(out_buf + i);
 
-		new_hit_cnt = queued_paths + unique_crashes;
+    /* Let's consult the effector map... */
 
-		stage_finds[STAGE_ARITH16] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_ARITH16] += stage_max;
+    if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
+        !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
 
+      stage_max -= 4 * ARITH_MAX;
+      continue;
 
+    }
 
-		/* 32-bit arithmetics, both endians. */
+    stage_cur_byte = i;
 
-		if (len < 4) goto skip_arith;
+    for (j = 1; j <= ARITH_MAX; ++j) {
 
-		stage_name = "arith 32/8";
-		stage_short = "arith32";
-		stage_cur = 0;
-		stage_max = 4 * (len - 3) * ARITH_MAX;
+      u32 r1 = orig ^ (orig + j), r2 = orig ^ (orig - j),
+          r3 = orig ^ SWAP32(SWAP32(orig) + j),
+          r4 = orig ^ SWAP32(SWAP32(orig) - j);
 
-		orig_hit_cnt = new_hit_cnt;
+      /* Little endian first. Same deal as with 16-bit: we only want to
+         try if the operation would have effect on more than two bytes. */
 
-		for (i = 0; i < len - 3; ++i) {
+      stage_val_type = STAGE_VAL_LE;
 
-			u32 orig = *(u32*)(out_buf + i);
+      if ((orig & 0xffff) + j > 0xffff && !could_be_bitflip(r1)) {
 
-			/* Let's consult the effector map... */
+        stage_cur_val = j;
+        *(u32*)(out_buf + i) = orig + j;
 
-			if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
-				!eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
-				stage_max -= 4 * ARITH_MAX;
-				continue;
-			}
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
 
-			stage_cur_byte = i;
+      } else
 
-			for (j = 1; j <= ARITH_MAX; ++j) {
+        --stage_max;
 
-				u32 r1 = orig ^ (orig + j),
-					r2 = orig ^ (orig - j),
-					r3 = orig ^ SWAP32(SWAP32(orig) + j),
-					r4 = orig ^ SWAP32(SWAP32(orig) - j);
+      if ((orig & 0xffff) < j && !could_be_bitflip(r2)) {
 
-				/* Little endian first. Same deal as with 16-bit: we only want to
-				   try if the operation would have effect on more than two bytes. */
+        stage_cur_val = -j;
+        *(u32*)(out_buf + i) = orig - j;
 
-				stage_val_type = STAGE_VAL_LE;
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
 
-				if ((orig & 0xffff) + j > 0xffff && !could_be_bitflip(r1)) {
+      } else
 
-					stage_cur_val = j;
-					*(u32*)(out_buf + i) = orig + j;
+        --stage_max;
 
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
+      /* Big endian next. */
 
-				} else --stage_max;
+      stage_val_type = STAGE_VAL_BE;
 
-				if ((orig & 0xffff) < j && !could_be_bitflip(r2)) {
+      if ((SWAP32(orig) & 0xffff) + j > 0xffff && !could_be_bitflip(r3)) {
 
-					stage_cur_val = -j;
-					*(u32*)(out_buf + i) = orig - j;
+        stage_cur_val = j;
+        *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) + j);
 
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
 
-				} else --stage_max;
+      } else
 
-				/* Big endian next. */
+        --stage_max;
 
-				stage_val_type = STAGE_VAL_BE;
+      if ((SWAP32(orig) & 0xffff) < j && !could_be_bitflip(r4)) {
 
-				if ((SWAP32(orig) & 0xffff) + j > 0xffff && !could_be_bitflip(r3)) {
+        stage_cur_val = -j;
+        *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) - j);
 
-					stage_cur_val = j;
-					*(u32*)(out_buf + i) = SWAP32(SWAP32(orig) + j);
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
 
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
+      } else
 
-				} else --stage_max;
+        --stage_max;
 
-				if ((SWAP32(orig) & 0xffff) < j && !could_be_bitflip(r4)) {
+      *(u32*)(out_buf + i) = orig;
 
-					stage_cur_val = -j;
-					*(u32*)(out_buf + i) = SWAP32(SWAP32(orig) - j);
+    }
 
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
+  }
 
-				} else --stage_max;
+  new_hit_cnt = queued_paths + unique_crashes;
 
-				*(u32*)(out_buf + i) = orig;
+  stage_finds[STAGE_ARITH32] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_ARITH32] += stage_max;
 
-			}
+skip_arith:
 
-		}
+  /**********************
+   * INTERESTING VALUES *
+   **********************/
 
-		new_hit_cnt = queued_paths + unique_crashes;
+  stage_name = "interest 8/8";
+  stage_short = "int8";
+  stage_cur = 0;
+  stage_max = len * sizeof(interesting_8);
 
-		stage_finds[STAGE_ARITH32] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_ARITH32] += stage_max;
+  stage_val_type = STAGE_VAL_LE;
 
+  orig_hit_cnt = new_hit_cnt;
 
+  /* Setting 8-bit integers. */
 
-	skip_arith:
+  for (i = 0; i < len; ++i) {
 
-		/**********************
-		 * INTERESTING VALUES *
-		 **********************/
+    u8 orig = out_buf[i];
 
-		stage_name = "interest 8/8";
-		stage_short = "int8";
-		stage_cur = 0;
-		stage_max = len * sizeof(interesting_8);
+    /* Let's consult the effector map... */
 
+    if (!eff_map[EFF_APOS(i)]) {
 
+      stage_max -= sizeof(interesting_8);
+      continue;
 
-		stage_val_type = STAGE_VAL_LE;
+    }
 
-		orig_hit_cnt = new_hit_cnt;
+    stage_cur_byte = i;
 
-		/* Setting 8-bit integers. */
+    for (j = 0; j < sizeof(interesting_8); ++j) {
 
-		for (i = 0; i < len; ++i) {
+      /* Skip if the value could be a product of bitflips or arithmetics. */
 
-			u8 orig = out_buf[i];
+      if (could_be_bitflip(orig ^ (u8)interesting_8[j]) ||
+          could_be_arith(orig, (u8)interesting_8[j], 1)) {
 
-			/* Let's consult the effector map... */
+        --stage_max;
+        continue;
 
-			if (!eff_map[EFF_APOS(i)]) {
-				stage_max -= sizeof(interesting_8);
-				continue;
-			}
+      }
 
-			stage_cur_byte = i;
+      stage_cur_val = interesting_8[j];
+      out_buf[i] = interesting_8[j];
 
-			for (j = 0; j < sizeof(interesting_8); ++j) {
+      if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
 
-				/* Skip if the value could be a product of bitflips or arithmetics. */
+      out_buf[i] = orig;
+      ++stage_cur;
 
-				if (could_be_bitflip(orig ^ (u8)interesting_8[j]) ||
-					could_be_arith(orig, (u8)interesting_8[j], 1)) {
-					--stage_max;
-					continue;
-				}
+    }
 
-				stage_cur_val = interesting_8[j];
-				out_buf[i] = interesting_8[j];
+  }
 
-				if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+  new_hit_cnt = queued_paths + unique_crashes;
 
-				out_buf[i] = orig;
-				++stage_cur;
+  stage_finds[STAGE_INTEREST8] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_INTEREST8] += stage_max;
 
-			}
+  /* Setting 16-bit integers, both endians. */
 
-		}
+  if (no_arith || len < 2) goto skip_interest;
 
-		new_hit_cnt = queued_paths + unique_crashes;
+  stage_name = "interest 16/8";
+  stage_short = "int16";
+  stage_cur = 0;
+  stage_max = 2 * (len - 1) * (sizeof(interesting_16) >> 1);
 
-		stage_finds[STAGE_INTEREST8] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_INTEREST8] += stage_max;
+  orig_hit_cnt = new_hit_cnt;
 
+  for (i = 0; i < len - 1; ++i) {
 
+    u16 orig = *(u16*)(out_buf + i);
 
-		/* Setting 16-bit integers, both endians. */
+    /* Let's consult the effector map... */
 
-		if (no_arith || len < 2) goto skip_interest;
+    if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
 
-		stage_name = "interest 16/8";
-		stage_short = "int16";
-		stage_cur = 0;
-		stage_max = 2 * (len - 1) * (sizeof(interesting_16) >> 1);
+      stage_max -= sizeof(interesting_16);
+      continue;
 
+    }
 
-		orig_hit_cnt = new_hit_cnt;
+    stage_cur_byte = i;
 
-		for (i = 0; i < len - 1; ++i) {
+    for (j = 0; j < sizeof(interesting_16) / 2; ++j) {
 
-			u16 orig = *(u16*)(out_buf + i);
+      stage_cur_val = interesting_16[j];
 
-			/* Let's consult the effector map... */
+      /* Skip if this could be a product of a bitflip, arithmetics,
+         or single-byte interesting value insertion. */
 
-			if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
-				stage_max -= sizeof(interesting_16);
-				continue;
-			}
+      if (!could_be_bitflip(orig ^ (u16)interesting_16[j]) &&
+          !could_be_arith(orig, (u16)interesting_16[j], 2) &&
+          !could_be_interest(orig, (u16)interesting_16[j], 2, 0)) {
 
-			stage_cur_byte = i;
+        stage_val_type = STAGE_VAL_LE;
 
-			for (j = 0; j < sizeof(interesting_16) / 2; ++j) {
+        *(u16*)(out_buf + i) = interesting_16[j];
 
-				stage_cur_val = interesting_16[j];
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
 
-				/* Skip if this could be a product of a bitflip, arithmetics,
-				   or single-byte interesting value insertion. */
+      } else
 
-				if (!could_be_bitflip(orig ^ (u16)interesting_16[j]) &&
-					!could_be_arith(orig, (u16)interesting_16[j], 2) &&
-					!could_be_interest(orig, (u16)interesting_16[j], 2, 0)) {
+        --stage_max;
 
-					stage_val_type = STAGE_VAL_LE;
+      if ((u16)interesting_16[j] != SWAP16(interesting_16[j]) &&
+          !could_be_bitflip(orig ^ SWAP16(interesting_16[j])) &&
+          !could_be_arith(orig, SWAP16(interesting_16[j]), 2) &&
+          !could_be_interest(orig, SWAP16(interesting_16[j]), 2, 1)) {
 
-					*(u16*)(out_buf + i) = interesting_16[j];
+        stage_val_type = STAGE_VAL_BE;
 
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
+        *(u16*)(out_buf + i) = SWAP16(interesting_16[j]);
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
 
-				} else --stage_max;
+      } else
 
-				if ((u16)interesting_16[j] != SWAP16(interesting_16[j]) &&
-					!could_be_bitflip(orig ^ SWAP16(interesting_16[j])) &&
-					!could_be_arith(orig, SWAP16(interesting_16[j]), 2) &&
-					!could_be_interest(orig, SWAP16(interesting_16[j]), 2, 1)) {
+        --stage_max;
 
-					stage_val_type = STAGE_VAL_BE;
+    }
 
-					*(u16*)(out_buf + i) = SWAP16(interesting_16[j]);
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
+    *(u16*)(out_buf + i) = orig;
 
-				} else --stage_max;
+  }
 
-			}
+  new_hit_cnt = queued_paths + unique_crashes;
 
-			*(u16*)(out_buf + i) = orig;
+  stage_finds[STAGE_INTEREST16] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_INTEREST16] += stage_max;
 
-		}
+  if (len < 4) goto skip_interest;
 
-		new_hit_cnt = queued_paths + unique_crashes;
+  /* Setting 32-bit integers, both endians. */
 
-		stage_finds[STAGE_INTEREST16] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_INTEREST16] += stage_max;
+  stage_name = "interest 32/8";
+  stage_short = "int32";
+  stage_cur = 0;
+  stage_max = 2 * (len - 3) * (sizeof(interesting_32) >> 2);
 
+  orig_hit_cnt = new_hit_cnt;
 
+  for (i = 0; i < len - 3; ++i) {
 
+    u32 orig = *(u32*)(out_buf + i);
 
-		if (len < 4) goto skip_interest;
+    /* Let's consult the effector map... */
 
-		/* Setting 32-bit integers, both endians. */
+    if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
+        !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
 
-		stage_name = "interest 32/8";
-		stage_short = "int32";
-		stage_cur = 0;
-		stage_max = 2 * (len - 3) * (sizeof(interesting_32) >> 2);
+      stage_max -= sizeof(interesting_32) >> 1;
+      continue;
 
+    }
 
-		orig_hit_cnt = new_hit_cnt;
+    stage_cur_byte = i;
 
-		for (i = 0; i < len - 3; ++i) {
+    for (j = 0; j < sizeof(interesting_32) / 4; ++j) {
 
-			u32 orig = *(u32*)(out_buf + i);
+      stage_cur_val = interesting_32[j];
 
-			/* Let's consult the effector map... */
+      /* Skip if this could be a product of a bitflip, arithmetics,
+         or word interesting value insertion. */
 
-			if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
-				!eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
-				stage_max -= sizeof(interesting_32) >> 1;
-				continue;
-			}
+      if (!could_be_bitflip(orig ^ (u32)interesting_32[j]) &&
+          !could_be_arith(orig, interesting_32[j], 4) &&
+          !could_be_interest(orig, interesting_32[j], 4, 0)) {
 
-			stage_cur_byte = i;
+        stage_val_type = STAGE_VAL_LE;
 
-			for (j = 0; j < sizeof(interesting_32) / 4; ++j) {
+        *(u32*)(out_buf + i) = interesting_32[j];
 
-				stage_cur_val = interesting_32[j];
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
 
-				/* Skip if this could be a product of a bitflip, arithmetics,
-				   or word interesting value insertion. */
+      } else
 
-				if (!could_be_bitflip(orig ^ (u32)interesting_32[j]) &&
-					!could_be_arith(orig, interesting_32[j], 4) &&
-					!could_be_interest(orig, interesting_32[j], 4, 0)) {
+        --stage_max;
 
-					stage_val_type = STAGE_VAL_LE;
+      if ((u32)interesting_32[j] != SWAP32(interesting_32[j]) &&
+          !could_be_bitflip(orig ^ SWAP32(interesting_32[j])) &&
+          !could_be_arith(orig, SWAP32(interesting_32[j]), 4) &&
+          !could_be_interest(orig, SWAP32(interesting_32[j]), 4, 1)) {
 
-					*(u32*)(out_buf + i) = interesting_32[j];
+        stage_val_type = STAGE_VAL_BE;
 
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
+        *(u32*)(out_buf + i) = SWAP32(interesting_32[j]);
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
 
-				} else --stage_max;
+      } else
 
-				if ((u32)interesting_32[j] != SWAP32(interesting_32[j]) &&
-					!could_be_bitflip(orig ^ SWAP32(interesting_32[j])) &&
-					!could_be_arith(orig, SWAP32(interesting_32[j]), 4) &&
-					!could_be_interest(orig, SWAP32(interesting_32[j]), 4, 1)) {
+        --stage_max;
 
-					stage_val_type = STAGE_VAL_BE;
+    }
 
-					*(u32*)(out_buf + i) = SWAP32(interesting_32[j]);
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
+    *(u32*)(out_buf + i) = orig;
 
-				} else --stage_max;
+  }
 
-			}
+  new_hit_cnt = queued_paths + unique_crashes;
 
-			*(u32*)(out_buf + i) = orig;
+  stage_finds[STAGE_INTEREST32] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_INTEREST32] += stage_max;
 
-		}
+skip_interest:
 
-		new_hit_cnt = queued_paths + unique_crashes;
+  /********************
+   * DICTIONARY STUFF *
+   ********************/
 
-		stage_finds[STAGE_INTEREST32] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_INTEREST32] += stage_max;
+  if (!extras_cnt) goto skip_user_extras;
 
+  /* Overwrite with user-supplied extras. */
 
+  stage_name = "user extras (over)";
+  stage_short = "ext_UO";
+  stage_cur = 0;
+  stage_max = extras_cnt * len;
 
-	skip_interest:
+  stage_val_type = STAGE_VAL_NONE;
 
-		/********************
-		 * DICTIONARY STUFF *
-		 ********************/
+  orig_hit_cnt = new_hit_cnt;
 
-		if (!extras_cnt) goto skip_user_extras;
+  for (i = 0; i < len; ++i) {
 
-		/* Overwrite with user-supplied extras. */
+    u32 last_len = 0;
 
-		stage_name = "user extras (over)";
-		stage_short = "ext_UO";
-		stage_cur = 0;
-		stage_max = extras_cnt * len;
+    stage_cur_byte = i;
 
+    /* Extras are sorted by size, from smallest to largest. This means
+       that we don't have to worry about restoring the buffer in
+       between writes at a particular offset determined by the outer
+       loop. */
 
-		stage_val_type = STAGE_VAL_NONE;
+    for (j = 0; j < extras_cnt; ++j) {
 
-		orig_hit_cnt = new_hit_cnt;
+      /* Skip extras probabilistically if extras_cnt > MAX_DET_EXTRAS. Also
+         skip them if there's no room to insert the payload, if the token
+         is redundant, or if its entire span has no bytes set in the effector
+         map. */
 
-		for (i = 0; i < len; ++i) {
+      if ((extras_cnt > MAX_DET_EXTRAS && UR(extras_cnt) >= MAX_DET_EXTRAS) ||
+          extras[j].len > len - i ||
+          !memcmp(extras[j].data, out_buf + i, extras[j].len) ||
+          !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, extras[j].len))) {
 
-			u32 last_len = 0;
+        --stage_max;
+        continue;
 
-			stage_cur_byte = i;
+      }
 
-			/* Extras are sorted by size, from smallest to largest. This means
-			   that we don't have to worry about restoring the buffer in
-			   between writes at a particular offset determined by the outer
-			   loop. */
+      last_len = extras[j].len;
+      memcpy(out_buf + i, extras[j].data, last_len);
 
-			for (j = 0; j < extras_cnt; ++j) {
+      if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
 
-				/* Skip extras probabilistically if extras_cnt > MAX_DET_EXTRAS. Also
-				   skip them if there's no room to insert the payload, if the token
-				   is redundant, or if its entire span has no bytes set in the effector
-				   map. */
+      ++stage_cur;
 
-				if ((extras_cnt > MAX_DET_EXTRAS && UR(extras_cnt) >= MAX_DET_EXTRAS) ||
-					extras[j].len > len - i ||
-					!memcmp(extras[j].data, out_buf + i, extras[j].len) ||
-					!memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, extras[j].len))) {
+    }
 
-					--stage_max;
-					continue;
+    /* Restore all the clobbered memory. */
+    memcpy(out_buf + i, in_buf + i, last_len);
 
-				}
+  }
 
-				last_len = extras[j].len;
-				memcpy(out_buf + i, extras[j].data, last_len);
+  new_hit_cnt = queued_paths + unique_crashes;
 
-				if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+  stage_finds[STAGE_EXTRAS_UO] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_EXTRAS_UO] += stage_max;
 
-				++stage_cur;
+  /* Insertion of user-supplied extras. */
 
-			}
+  stage_name = "user extras (insert)";
+  stage_short = "ext_UI";
+  stage_cur = 0;
+  stage_max = extras_cnt * len;
 
-			/* Restore all the clobbered memory. */
-			memcpy(out_buf + i, in_buf + i, last_len);
+  orig_hit_cnt = new_hit_cnt;
 
-		}
+  ex_tmp = ck_alloc(len + MAX_DICT_FILE);
 
-		new_hit_cnt = queued_paths + unique_crashes;
+  for (i = 0; i <= len; ++i) {
 
-		stage_finds[STAGE_EXTRAS_UO] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_EXTRAS_UO] += stage_max;
+    stage_cur_byte = i;
 
-		/* Insertion of user-supplied extras. */
+    for (j = 0; j < extras_cnt; ++j) {
 
-		stage_name = "user extras (insert)";
-		stage_short = "ext_UI";
-		stage_cur = 0;
-		stage_max = extras_cnt * len;
+      if (len + extras[j].len > MAX_FILE) {
 
+        --stage_max;
+        continue;
 
+      }
 
+      /* Insert token */
+      memcpy(ex_tmp + i, extras[j].data, extras[j].len);
 
-		orig_hit_cnt = new_hit_cnt;
+      /* Copy tail */
+      memcpy(ex_tmp + i + extras[j].len, out_buf + i, len - i);
 
-		ex_tmp = ck_alloc(len + MAX_DICT_FILE);
+      if (common_fuzz_stuff(argv, ex_tmp, len + extras[j].len)) {
 
-		for (i = 0; i <= len; ++i) {
+        ck_free(ex_tmp);
+        goto abandon_entry;
 
-			stage_cur_byte = i;
+      }
 
-			for (j = 0; j < extras_cnt; ++j) {
+      ++stage_cur;
 
-				if (len + extras[j].len > MAX_FILE) {
-					--stage_max;
-					continue;
-				}
+    }
 
-				/* Insert token */
-				memcpy(ex_tmp + i, extras[j].data, extras[j].len);
+    /* Copy head */
+    ex_tmp[i] = out_buf[i];
 
-				/* Copy tail */
-				memcpy(ex_tmp + i + extras[j].len, out_buf + i, len - i);
+  }
 
-				if (common_fuzz_stuff(argv, ex_tmp, len + extras[j].len)) {
-					ck_free(ex_tmp);
-					goto abandon_entry;
-				}
+  ck_free(ex_tmp);
 
-				++stage_cur;
+  new_hit_cnt = queued_paths + unique_crashes;
 
-			}
+  stage_finds[STAGE_EXTRAS_UI] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_EXTRAS_UI] += stage_max;
 
-			/* Copy head */
-			ex_tmp[i] = out_buf[i];
+skip_user_extras:
 
-		}
+  if (!a_extras_cnt) goto skip_extras;
 
-		ck_free(ex_tmp);
+  stage_name = "auto extras (over)";
+  stage_short = "ext_AO";
+  stage_cur = 0;
+  stage_max = MIN(a_extras_cnt, USE_AUTO_EXTRAS) * len;
 
-		new_hit_cnt = queued_paths + unique_crashes;
+  stage_val_type = STAGE_VAL_NONE;
 
-		stage_finds[STAGE_EXTRAS_UI] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_EXTRAS_UI] += stage_max;
+  orig_hit_cnt = new_hit_cnt;
 
-	skip_user_extras:
+  for (i = 0; i < len; ++i) {
 
-		if (!a_extras_cnt) goto skip_extras;
+    u32 last_len = 0;
 
-		stage_name = "auto extras (over)";
-		stage_short = "ext_AO";
-		stage_cur = 0;
-		stage_max = MIN(a_extras_cnt, USE_AUTO_EXTRAS) * len;
+    stage_cur_byte = i;
 
+    for (j = 0; j < MIN(a_extras_cnt, USE_AUTO_EXTRAS); ++j) {
 
-		stage_val_type = STAGE_VAL_NONE;
+      /* See the comment in the earlier code; extras are sorted by size. */
 
-		orig_hit_cnt = new_hit_cnt;
+      if (a_extras[j].len > len - i ||
+          !memcmp(a_extras[j].data, out_buf + i, a_extras[j].len) ||
+          !memchr(eff_map + EFF_APOS(i), 1,
+                  EFF_SPAN_ALEN(i, a_extras[j].len))) {
 
-		for (i = 0; i < len; ++i) {
+        --stage_max;
+        continue;
 
-			u32 last_len = 0;
+      }
 
-			stage_cur_byte = i;
+      last_len = a_extras[j].len;
+      memcpy(out_buf + i, a_extras[j].data, last_len);
 
-			for (j = 0; j < MIN(a_extras_cnt, USE_AUTO_EXTRAS); ++j) {
+      if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
 
-				/* See the comment in the earlier code; extras are sorted by size. */
+      ++stage_cur;
 
-				if (a_extras[j].len > len - i ||
-					!memcmp(a_extras[j].data, out_buf + i, a_extras[j].len) ||
-					!memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, a_extras[j].len))) {
+    }
 
-					--stage_max;
-					continue;
+    /* Restore all the clobbered memory. */
+    memcpy(out_buf + i, in_buf + i, last_len);
 
-				}
+  }
 
-				last_len = a_extras[j].len;
-				memcpy(out_buf + i, a_extras[j].data, last_len);
+  new_hit_cnt = queued_paths + unique_crashes;
 
-				if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+  stage_finds[STAGE_EXTRAS_AO] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_EXTRAS_AO] += stage_max;
 
-				++stage_cur;
+skip_extras:
 
-			}
+  /* If we made this to here without jumping to havoc_stage or abandon_entry,
+     we're properly done with deterministic steps and can mark it as such
+     in the .state/ directory. */
 
-			/* Restore all the clobbered memory. */
-			memcpy(out_buf + i, in_buf + i, last_len);
+  if (!queue_cur->passed_det) mark_as_det_done(queue_cur);
 
-		}
+  /****************
+   * RANDOM HAVOC *
+   ****************/
 
-		new_hit_cnt = queued_paths + unique_crashes;
+havoc_stage:
+pacemaker_fuzzing:
 
-		stage_finds[STAGE_EXTRAS_AO] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_EXTRAS_AO] += stage_max;
+  stage_cur_byte = -1;
 
-	skip_extras:
+  /* The havoc stage mutation code is also invoked when splicing files; if the
+     splice_cycle variable is set, generate different descriptions and such. */
 
-		/* If we made this to here without jumping to havoc_stage or abandon_entry,
-		   we're properly done with deterministic steps and can mark it as such
-		   in the .state/ directory. */
+  if (!splice_cycle) {
 
-		if (!queue_cur->passed_det) mark_as_det_done(queue_cur);
+    stage_name = "MOpt-havoc";
+    stage_short = "MOpt_havoc";
+    stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * perf_score /
+                havoc_div / 100;
 
-		/****************
-		 * RANDOM HAVOC *
-		 ****************/
+  } else {
 
-	havoc_stage:
-	pacemaker_fuzzing:
+    static u8 tmp[32];
 
+    perf_score = orig_perf;
 
-		stage_cur_byte = -1;
+    sprintf(tmp, "MOpt-core-splice %u", splice_cycle);
+    stage_name = tmp;
+    stage_short = "MOpt_core_splice";
+    stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100;
 
-		/* The havoc stage mutation code is also invoked when splicing files; if the
-		   splice_cycle variable is set, generate different descriptions and such. */
+  }
 
-		if (!splice_cycle) {
+  s32 temp_len_puppet;
+  cur_ms_lv = get_cur_time();
 
-			stage_name = "MOpt-havoc";
-			stage_short = "MOpt_havoc";
-			stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) *
-				perf_score / havoc_div / 100;
+  // for (; swarm_now < swarm_num; ++swarm_now)
+  {
 
-		} else {
+    if (key_puppet == 1) {
 
-			static u8 tmp[32];
+      if (unlikely(orig_hit_cnt_puppet == 0)) {
 
-			perf_score = orig_perf;
+        orig_hit_cnt_puppet = queued_paths + unique_crashes;
+        last_limit_time_start = get_cur_time();
+        SPLICE_CYCLES_puppet =
+            (UR(SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) +
+             SPLICE_CYCLES_puppet_low);
 
-			sprintf(tmp, "MOpt-core-splice %u", splice_cycle);
-			stage_name = tmp;
-			stage_short = "MOpt_core_splice";
-			stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100;
+      }
 
-		}
+    }
 
-		s32 temp_len_puppet;
-		cur_ms_lv = get_cur_time();
+    {
 
-		//for (; swarm_now < swarm_num; ++swarm_now)
-		{
-			if (key_puppet == 1) {
-				if (unlikely(orig_hit_cnt_puppet == 0)) {
-					orig_hit_cnt_puppet = queued_paths + unique_crashes;
-					last_limit_time_start = get_cur_time();
-					SPLICE_CYCLES_puppet = (UR(SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) + SPLICE_CYCLES_puppet_low);
-				}
-			}
-			{
 #ifndef IGNORE_FINDS
-			havoc_stage_puppet:
+    havoc_stage_puppet:
 #endif
 
-				stage_cur_byte = -1;
-
-				/* The havoc stage mutation code is also invoked when splicing files; if the
-				   splice_cycle variable is set, generate different descriptions and such. */
-
-				if (!splice_cycle) {
-					stage_name = "MOpt core avoc";
-					stage_short = "MOpt_core_havoc";
-					stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) *
-						perf_score / havoc_div / 100;
-				} else {
-					static u8 tmp[32];
-					perf_score = orig_perf;
-					sprintf(tmp, "MOpt core splice %u", splice_cycle);
-					stage_name = tmp;
-					stage_short = "MOpt_core_splice";
-					stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100;
-				}
-
-				if (stage_max < HAVOC_MIN) stage_max = HAVOC_MIN;
-				temp_len = len;
-				orig_hit_cnt = queued_paths + unique_crashes;
-				havoc_queued = queued_paths;
-
-				for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
-
-					u32 use_stacking = 1 << (1 + UR(HAVOC_STACK_POW2));
-					stage_cur_val = use_stacking;
-
-					for (i = 0; i < operator_num; ++i) {
-						core_operator_cycles_puppet_v3[i] = core_operator_cycles_puppet_v2[i];
-					}
-
-					for (i = 0; i < use_stacking; ++i) {
-
-						switch (select_algorithm()) {
-
-						case 0:
-							/* Flip a single bit somewhere. Spooky! */
-							FLIP_BIT(out_buf, UR(temp_len << 3));
-							core_operator_cycles_puppet_v2[STAGE_FLIP1] += 1;
-							break;
-
-
-						case 1:
-							if (temp_len < 2) break;
-							temp_len_puppet = UR(temp_len << 3);
-							FLIP_BIT(out_buf, temp_len_puppet);
-							FLIP_BIT(out_buf, temp_len_puppet + 1);
-							core_operator_cycles_puppet_v2[STAGE_FLIP2] += 1;
-							break;
-
-						case 2:
-							if (temp_len < 2) break;
-							temp_len_puppet = UR(temp_len << 3);
-							FLIP_BIT(out_buf, temp_len_puppet);
-							FLIP_BIT(out_buf, temp_len_puppet + 1);
-							FLIP_BIT(out_buf, temp_len_puppet + 2);
-							FLIP_BIT(out_buf, temp_len_puppet + 3);
-							core_operator_cycles_puppet_v2[STAGE_FLIP4] += 1;
-							break;
-
-						case 3:
-							if (temp_len < 4) break;
-							out_buf[UR(temp_len)] ^= 0xFF;
-							core_operator_cycles_puppet_v2[STAGE_FLIP8] += 1;
-							break;
-
-						case 4:
-							if (temp_len < 8) break;
-							*(u16*)(out_buf + UR(temp_len - 1)) ^= 0xFFFF;
-							core_operator_cycles_puppet_v2[STAGE_FLIP16] += 1;
-							break;
-
-						case 5:
-							if (temp_len < 8) break;
-							*(u32*)(out_buf + UR(temp_len - 3)) ^= 0xFFFFFFFF;
-							core_operator_cycles_puppet_v2[STAGE_FLIP32] += 1;
-							break;
-
-						case 6:
-							out_buf[UR(temp_len)] -= 1 + UR(ARITH_MAX);
-							out_buf[UR(temp_len)] += 1 + UR(ARITH_MAX);
-							core_operator_cycles_puppet_v2[STAGE_ARITH8] += 1;
-							break;
-
-						case 7:
-							/* Randomly subtract from word, random endian. */
-							if (temp_len < 8) break;
-							if (UR(2)) {
-								u32 pos = UR(temp_len - 1);
-								*(u16*)(out_buf + pos) -= 1 + UR(ARITH_MAX);
-							} else {
-								u32 pos = UR(temp_len - 1);
-								u16 num = 1 + UR(ARITH_MAX);
-								*(u16*)(out_buf + pos) =
-									SWAP16(SWAP16(*(u16*)(out_buf + pos)) - num);
-							}
-							/* Randomly add to word, random endian. */
-							if (UR(2)) {
-								u32 pos = UR(temp_len - 1);
-								*(u16*)(out_buf + pos) += 1 + UR(ARITH_MAX);
-							} else {
-								u32 pos = UR(temp_len - 1);
-								u16 num = 1 + UR(ARITH_MAX);
-								*(u16*)(out_buf + pos) =
-									SWAP16(SWAP16(*(u16*)(out_buf + pos)) + num);
-							}
-							core_operator_cycles_puppet_v2[STAGE_ARITH16] += 1;
-							break;
-
-
-						case 8:
-							/* Randomly subtract from dword, random endian. */
-							if (temp_len < 8) break;
-							if (UR(2)) {
-								u32 pos = UR(temp_len - 3);
-								*(u32*)(out_buf + pos) -= 1 + UR(ARITH_MAX);
-							} else {
-								u32 pos = UR(temp_len - 3);
-								u32 num = 1 + UR(ARITH_MAX);
-								*(u32*)(out_buf + pos) =
-									SWAP32(SWAP32(*(u32*)(out_buf + pos)) - num);
-							}
-							/* Randomly add to dword, random endian. */
-							if (UR(2)) {
-								u32 pos = UR(temp_len - 3);
-								*(u32*)(out_buf + pos) += 1 + UR(ARITH_MAX);
-							} else {
-								u32 pos = UR(temp_len - 3);
-								u32 num = 1 + UR(ARITH_MAX);
-								*(u32*)(out_buf + pos) =
-									SWAP32(SWAP32(*(u32*)(out_buf + pos)) + num);
-							}
-							core_operator_cycles_puppet_v2[STAGE_ARITH32] += 1;
-							break;
-
-
-						case 9:
-							/* Set byte to interesting value. */
-							if (temp_len < 4) break;
-							out_buf[UR(temp_len)] = interesting_8[UR(sizeof(interesting_8))];
-							core_operator_cycles_puppet_v2[STAGE_INTEREST8] += 1;
-							break;
-
-						case 10:
-							/* Set word to interesting value, randomly choosing endian. */
-							if (temp_len < 8) break;
-							if (UR(2)) {
-								*(u16*)(out_buf + UR(temp_len - 1)) =
-									interesting_16[UR(sizeof(interesting_16) >> 1)];
-							} else {
-								*(u16*)(out_buf + UR(temp_len - 1)) = SWAP16(
-									interesting_16[UR(sizeof(interesting_16) >> 1)]);
-							}
-							core_operator_cycles_puppet_v2[STAGE_INTEREST16] += 1;
-							break;
-
-
-						case 11:
-							/* Set dword to interesting value, randomly choosing endian. */
-
-							if (temp_len < 8) break;
-
-							if (UR(2)) {
-								*(u32*)(out_buf + UR(temp_len - 3)) =
-									interesting_32[UR(sizeof(interesting_32) >> 2)];
-							} else {
-								*(u32*)(out_buf + UR(temp_len - 3)) = SWAP32(
-									interesting_32[UR(sizeof(interesting_32) >> 2)]);
-							}
-							core_operator_cycles_puppet_v2[STAGE_INTEREST32] += 1;
-							break;
+      stage_cur_byte = -1;
+
+      /* The havoc stage mutation code is also invoked when splicing files; if
+         the splice_cycle variable is set, generate different descriptions and
+         such. */
+
+      if (!splice_cycle) {
+
+        stage_name = "MOpt core avoc";
+        stage_short = "MOpt_core_havoc";
+        stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) *
+                    perf_score / havoc_div / 100;
+
+      } else {
+
+        static u8 tmp[32];
+        perf_score = orig_perf;
+        sprintf(tmp, "MOpt core splice %u", splice_cycle);
+        stage_name = tmp;
+        stage_short = "MOpt_core_splice";
+        stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100;
+
+      }
+
+      if (stage_max < HAVOC_MIN) stage_max = HAVOC_MIN;
+      temp_len = len;
+      orig_hit_cnt = queued_paths + unique_crashes;
+      havoc_queued = queued_paths;
+
+      for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+
+        u32 use_stacking = 1 << (1 + UR(HAVOC_STACK_POW2));
+        stage_cur_val = use_stacking;
+
+        for (i = 0; i < operator_num; ++i) {
+
+          core_operator_cycles_puppet_v3[i] = core_operator_cycles_puppet_v2[i];
+
+        }
+
+        for (i = 0; i < use_stacking; ++i) {
+
+          switch (select_algorithm()) {
+
+            case 0:
+              /* Flip a single bit somewhere. Spooky! */
+              FLIP_BIT(out_buf, UR(temp_len << 3));
+              core_operator_cycles_puppet_v2[STAGE_FLIP1] += 1;
+              break;
+
+            case 1:
+              if (temp_len < 2) break;
+              temp_len_puppet = UR(temp_len << 3);
+              FLIP_BIT(out_buf, temp_len_puppet);
+              FLIP_BIT(out_buf, temp_len_puppet + 1);
+              core_operator_cycles_puppet_v2[STAGE_FLIP2] += 1;
+              break;
+
+            case 2:
+              if (temp_len < 2) break;
+              temp_len_puppet = UR(temp_len << 3);
+              FLIP_BIT(out_buf, temp_len_puppet);
+              FLIP_BIT(out_buf, temp_len_puppet + 1);
+              FLIP_BIT(out_buf, temp_len_puppet + 2);
+              FLIP_BIT(out_buf, temp_len_puppet + 3);
+              core_operator_cycles_puppet_v2[STAGE_FLIP4] += 1;
+              break;
+
+            case 3:
+              if (temp_len < 4) break;
+              out_buf[UR(temp_len)] ^= 0xFF;
+              core_operator_cycles_puppet_v2[STAGE_FLIP8] += 1;
+              break;
+
+            case 4:
+              if (temp_len < 8) break;
+              *(u16*)(out_buf + UR(temp_len - 1)) ^= 0xFFFF;
+              core_operator_cycles_puppet_v2[STAGE_FLIP16] += 1;
+              break;
+
+            case 5:
+              if (temp_len < 8) break;
+              *(u32*)(out_buf + UR(temp_len - 3)) ^= 0xFFFFFFFF;
+              core_operator_cycles_puppet_v2[STAGE_FLIP32] += 1;
+              break;
+
+            case 6:
+              out_buf[UR(temp_len)] -= 1 + UR(ARITH_MAX);
+              out_buf[UR(temp_len)] += 1 + UR(ARITH_MAX);
+              core_operator_cycles_puppet_v2[STAGE_ARITH8] += 1;
+              break;
+
+            case 7:
+              /* Randomly subtract from word, random endian. */
+              if (temp_len < 8) break;
+              if (UR(2)) {
+
+                u32 pos = UR(temp_len - 1);
+                *(u16*)(out_buf + pos) -= 1 + UR(ARITH_MAX);
+
+              } else {
+
+                u32 pos = UR(temp_len - 1);
+                u16 num = 1 + UR(ARITH_MAX);
+                *(u16*)(out_buf + pos) =
+                    SWAP16(SWAP16(*(u16*)(out_buf + pos)) - num);
 
+              }
 
-						case 12:
+              /* Randomly add to word, random endian. */
+              if (UR(2)) {
 
-							/* Just set a random byte to a random value. Because,
-							   why not. We use XOR with 1-255 to eliminate the
-							   possibility of a no-op. */
+                u32 pos = UR(temp_len - 1);
+                *(u16*)(out_buf + pos) += 1 + UR(ARITH_MAX);
 
-							out_buf[UR(temp_len)] ^= 1 + UR(255);
-							core_operator_cycles_puppet_v2[STAGE_RANDOMBYTE] += 1;
-							break;
+              } else {
 
+                u32 pos = UR(temp_len - 1);
+                u16 num = 1 + UR(ARITH_MAX);
+                *(u16*)(out_buf + pos) =
+                    SWAP16(SWAP16(*(u16*)(out_buf + pos)) + num);
 
-						case 13: {
+              }
 
-							/* Delete bytes. We're making this a bit more likely
-							   than insertion (the next option) in hopes of keeping
-							   files reasonably small. */
+              core_operator_cycles_puppet_v2[STAGE_ARITH16] += 1;
+              break;
 
-							u32 del_from, del_len;
+            case 8:
+              /* Randomly subtract from dword, random endian. */
+              if (temp_len < 8) break;
+              if (UR(2)) {
 
-							if (temp_len < 2) break;
+                u32 pos = UR(temp_len - 3);
+                *(u32*)(out_buf + pos) -= 1 + UR(ARITH_MAX);
 
-							/* Don't delete too much. */
+              } else {
 
-							del_len = choose_block_len(temp_len - 1);
+                u32 pos = UR(temp_len - 3);
+                u32 num = 1 + UR(ARITH_MAX);
+                *(u32*)(out_buf + pos) =
+                    SWAP32(SWAP32(*(u32*)(out_buf + pos)) - num);
 
-							del_from = UR(temp_len - del_len + 1);
+              }
 
-							memmove(out_buf + del_from, out_buf + del_from + del_len,
-								temp_len - del_from - del_len);
+              /* Randomly add to dword, random endian. */
+              if (UR(2)) {
 
-							temp_len -= del_len;
-							core_operator_cycles_puppet_v2[STAGE_DELETEBYTE] += 1;
-							break;
+                u32 pos = UR(temp_len - 3);
+                *(u32*)(out_buf + pos) += 1 + UR(ARITH_MAX);
 
-						}
+              } else {
 
-						case 14:
+                u32 pos = UR(temp_len - 3);
+                u32 num = 1 + UR(ARITH_MAX);
+                *(u32*)(out_buf + pos) =
+                    SWAP32(SWAP32(*(u32*)(out_buf + pos)) + num);
 
-							if (temp_len + HAVOC_BLK_XL < MAX_FILE) {
+              }
 
-								/* Clone bytes (75%) or insert a block of constant bytes (25%). */
+              core_operator_cycles_puppet_v2[STAGE_ARITH32] += 1;
+              break;
 
-								u8  actually_clone = UR(4);
-								u32 clone_from, clone_to, clone_len;
-								u8* new_buf;
+            case 9:
+              /* Set byte to interesting value. */
+              if (temp_len < 4) break;
+              out_buf[UR(temp_len)] = interesting_8[UR(sizeof(interesting_8))];
+              core_operator_cycles_puppet_v2[STAGE_INTEREST8] += 1;
+              break;
 
-								if (actually_clone) {
+            case 10:
+              /* Set word to interesting value, randomly choosing endian. */
+              if (temp_len < 8) break;
+              if (UR(2)) {
 
-									clone_len = choose_block_len(temp_len);
-									clone_from = UR(temp_len - clone_len + 1);
+                *(u16*)(out_buf + UR(temp_len - 1)) =
+                    interesting_16[UR(sizeof(interesting_16) >> 1)];
 
-								} else {
+              } else {
 
-									clone_len = choose_block_len(HAVOC_BLK_XL);
-									clone_from = 0;
+                *(u16*)(out_buf + UR(temp_len - 1)) =
+                    SWAP16(interesting_16[UR(sizeof(interesting_16) >> 1)]);
 
-								}
+              }
 
-								clone_to = UR(temp_len);
+              core_operator_cycles_puppet_v2[STAGE_INTEREST16] += 1;
+              break;
 
-								new_buf = ck_alloc_nozero(temp_len + clone_len);
+            case 11:
+              /* Set dword to interesting value, randomly choosing endian. */
 
-								/* Head */
+              if (temp_len < 8) break;
 
-								memcpy(new_buf, out_buf, clone_to);
+              if (UR(2)) {
 
-								/* Inserted part */
+                *(u32*)(out_buf + UR(temp_len - 3)) =
+                    interesting_32[UR(sizeof(interesting_32) >> 2)];
 
-								if (actually_clone)
-									memcpy(new_buf + clone_to, out_buf + clone_from, clone_len);
-								else
-									memset(new_buf + clone_to,
-										UR(2) ? UR(256) : out_buf[UR(temp_len)], clone_len);
+              } else {
 
-								/* Tail */
-								memcpy(new_buf + clone_to + clone_len, out_buf + clone_to,
-									temp_len - clone_to);
+                *(u32*)(out_buf + UR(temp_len - 3)) =
+                    SWAP32(interesting_32[UR(sizeof(interesting_32) >> 2)]);
 
-								ck_free(out_buf);
-								out_buf = new_buf;
-								temp_len += clone_len;
-								core_operator_cycles_puppet_v2[STAGE_Clone75] += 1;
-							}
+              }
 
-							break;
+              core_operator_cycles_puppet_v2[STAGE_INTEREST32] += 1;
+              break;
 
-						case 15: {
+            case 12:
 
-							/* Overwrite bytes with a randomly selected chunk (75%) or fixed
-							   bytes (25%). */
+              /* Just set a random byte to a random value. Because,
+                 why not. We use XOR with 1-255 to eliminate the
+                 possibility of a no-op. */
 
-							u32 copy_from, copy_to, copy_len;
+              out_buf[UR(temp_len)] ^= 1 + UR(255);
+              core_operator_cycles_puppet_v2[STAGE_RANDOMBYTE] += 1;
+              break;
 
-							if (temp_len < 2) break;
+            case 13: {
 
-							copy_len = choose_block_len(temp_len - 1);
+              /* Delete bytes. We're making this a bit more likely
+                 than insertion (the next option) in hopes of keeping
+                 files reasonably small. */
 
-							copy_from = UR(temp_len - copy_len + 1);
-							copy_to = UR(temp_len - copy_len + 1);
+              u32 del_from, del_len;
 
-							if (UR(4)) {
+              if (temp_len < 2) break;
 
-								if (copy_from != copy_to)
-									memmove(out_buf + copy_to, out_buf + copy_from, copy_len);
+              /* Don't delete too much. */
 
-							}
-							else memset(out_buf + copy_to,
-								UR(2) ? UR(256) : out_buf[UR(temp_len)], copy_len);
-							core_operator_cycles_puppet_v2[STAGE_OverWrite75] += 1;
-							break;
+              del_len = choose_block_len(temp_len - 1);
 
-						}
+              del_from = UR(temp_len - del_len + 1);
 
+              memmove(out_buf + del_from, out_buf + del_from + del_len,
+                      temp_len - del_from - del_len);
 
-						}
+              temp_len -= del_len;
+              core_operator_cycles_puppet_v2[STAGE_DELETEBYTE] += 1;
+              break;
 
-					}
+            }
+
+            case 14:
+
+              if (temp_len + HAVOC_BLK_XL < MAX_FILE) {
 
-					tmp_core_time += 1;
+                /* Clone bytes (75%) or insert a block of constant bytes (25%).
+                 */
 
-					u64 temp_total_found = queued_paths + unique_crashes;
+                u8  actually_clone = UR(4);
+                u32 clone_from, clone_to, clone_len;
+                u8* new_buf;
 
-					if (common_fuzz_stuff(argv, out_buf, temp_len))
-						goto abandon_entry_puppet;
+                if (actually_clone) {
 
-					/* out_buf might have been mangled a bit, so let's restore it to its
-					   original size and shape. */
+                  clone_len = choose_block_len(temp_len);
+                  clone_from = UR(temp_len - clone_len + 1);
 
-					if (temp_len < len) out_buf = ck_realloc(out_buf, len);
-					temp_len = len;
-					memcpy(out_buf, in_buf, len);
+                } else {
 
-					/* If we're finding new stuff, let's run for a bit longer, limits
-					   permitting. */
+                  clone_len = choose_block_len(HAVOC_BLK_XL);
+                  clone_from = 0;
 
-					if (queued_paths != havoc_queued) {
+                }
 
-						if (perf_score <= havoc_max_mult * 100) {
-							stage_max *= 2;
-							perf_score *= 2;
-						}
+                clone_to = UR(temp_len);
 
-						havoc_queued = queued_paths;
+                new_buf = ck_alloc_nozero(temp_len + clone_len);
 
-					}
+                /* Head */
 
-					if (unlikely(queued_paths + unique_crashes > temp_total_found))
-					{
-						u64 temp_temp_puppet = queued_paths + unique_crashes - temp_total_found;
-						total_puppet_find = total_puppet_find + temp_temp_puppet;
-						for (i = 0; i < 16; ++i)
-						{
-							if (core_operator_cycles_puppet_v2[i] > core_operator_cycles_puppet_v3[i])
-								core_operator_finds_puppet_v2[i] += temp_temp_puppet;
-						}
-					}
+                memcpy(new_buf, out_buf, clone_to);
 
-				}
+                /* Inserted part */
 
-				new_hit_cnt = queued_paths + unique_crashes;
+                if (actually_clone)
+                  memcpy(new_buf + clone_to, out_buf + clone_from, clone_len);
+                else
+                  memset(new_buf + clone_to,
+                         UR(2) ? UR(256) : out_buf[UR(temp_len)], clone_len);
 
+                /* Tail */
+                memcpy(new_buf + clone_to + clone_len, out_buf + clone_to,
+                       temp_len - clone_to);
+
+                ck_free(out_buf);
+                out_buf = new_buf;
+                temp_len += clone_len;
+                core_operator_cycles_puppet_v2[STAGE_Clone75] += 1;
+
+              }
+
+              break;
+
+            case 15: {
+
+              /* Overwrite bytes with a randomly selected chunk (75%) or fixed
+                 bytes (25%). */
+
+              u32 copy_from, copy_to, copy_len;
+
+              if (temp_len < 2) break;
+
+              copy_len = choose_block_len(temp_len - 1);
+
+              copy_from = UR(temp_len - copy_len + 1);
+              copy_to = UR(temp_len - copy_len + 1);
+
+              if (UR(4)) {
+
+                if (copy_from != copy_to)
+                  memmove(out_buf + copy_to, out_buf + copy_from, copy_len);
+
+              } else
+
+                memset(out_buf + copy_to,
+                       UR(2) ? UR(256) : out_buf[UR(temp_len)], copy_len);
+              core_operator_cycles_puppet_v2[STAGE_OverWrite75] += 1;
+              break;
+
+            }
+
+          }
+
+        }
+
+        tmp_core_time += 1;
+
+        u64 temp_total_found = queued_paths + unique_crashes;
+
+        if (common_fuzz_stuff(argv, out_buf, temp_len))
+          goto abandon_entry_puppet;
+
+        /* out_buf might have been mangled a bit, so let's restore it to its
+           original size and shape. */
+
+        if (temp_len < len) out_buf = ck_realloc(out_buf, len);
+        temp_len = len;
+        memcpy(out_buf, in_buf, len);
+
+        /* If we're finding new stuff, let's run for a bit longer, limits
+           permitting. */
+
+        if (queued_paths != havoc_queued) {
+
+          if (perf_score <= havoc_max_mult * 100) {
+
+            stage_max *= 2;
+            perf_score *= 2;
+
+          }
+
+          havoc_queued = queued_paths;
+
+        }
+
+        if (unlikely(queued_paths + unique_crashes > temp_total_found)) {
+
+          u64 temp_temp_puppet =
+              queued_paths + unique_crashes - temp_total_found;
+          total_puppet_find = total_puppet_find + temp_temp_puppet;
+          for (i = 0; i < 16; ++i) {
+
+            if (core_operator_cycles_puppet_v2[i] >
+                core_operator_cycles_puppet_v3[i])
+              core_operator_finds_puppet_v2[i] += temp_temp_puppet;
+
+          }
+
+        }
+
+      }
+
+      new_hit_cnt = queued_paths + unique_crashes;
 
 #ifndef IGNORE_FINDS
 
-				/************
-				 * SPLICING *
-				 ************/
+      /************
+       * SPLICING *
+       ************/
+
+    retry_splicing_puppet:
 
+      if (use_splicing && splice_cycle++ < SPLICE_CYCLES_puppet &&
+          queued_paths > 1 && queue_cur->len > 1) {
 
-			retry_splicing_puppet:
+        struct queue_entry* target;
+        u32                 tid, split_at;
+        u8*                 new_buf;
+        s32                 f_diff, l_diff;
 
+        /* First of all, if we've modified in_buf for havoc, let's clean that
+           up... */
 
+        if (in_buf != orig_in) {
+
+          ck_free(in_buf);
+          in_buf = orig_in;
+          len = queue_cur->len;
+
+        }
 
-				if (use_splicing && splice_cycle++ < SPLICE_CYCLES_puppet &&
-					queued_paths > 1 && queue_cur->len > 1) {
+        /* Pick a random queue entry and seek to it. Don't splice with yourself.
+         */
 
-					struct queue_entry* target;
-					u32 tid, split_at;
-					u8* new_buf;
-					s32 f_diff, l_diff;
+        do {
 
-					/* First of all, if we've modified in_buf for havoc, let's clean that
-					   up... */
+          tid = UR(queued_paths);
+
+        } while (tid == current_entry);
+
+        splicing_with = tid;
+        target = queue;
+
+        while (tid >= 100) {
+
+          target = target->next_100;
+          tid -= 100;
+
+        }
 
-					if (in_buf != orig_in) {
-						ck_free(in_buf);
-						in_buf = orig_in;
-						len = queue_cur->len;
-					}
+        while (tid--)
+          target = target->next;
 
-					/* Pick a random queue entry and seek to it. Don't splice with yourself. */
+        /* Make sure that the target has a reasonable length. */
 
-					do { tid = UR(queued_paths); } while (tid == current_entry);
+        while (target && (target->len < 2 || target == queue_cur)) {
 
-					splicing_with = tid;
-					target = queue;
+          target = target->next;
+          ++splicing_with;
 
-					while (tid >= 100) { target = target->next_100; tid -= 100; }
-					while (tid--) target = target->next;
+        }
 
-					/* Make sure that the target has a reasonable length. */
+        if (!target) goto retry_splicing_puppet;
 
-					while (target && (target->len < 2 || target == queue_cur)) {
-						target = target->next;
-						++splicing_with;
-					}
+        /* Read the testcase into a new buffer. */
 
-					if (!target) goto retry_splicing_puppet;
+        fd = open(target->fname, O_RDONLY);
 
-					/* Read the testcase into a new buffer. */
+        if (fd < 0) PFATAL("Unable to open '%s'", target->fname);
 
-					fd = open(target->fname, O_RDONLY);
+        new_buf = ck_alloc_nozero(target->len);
 
-					if (fd < 0) PFATAL("Unable to open '%s'", target->fname);
+        ck_read(fd, new_buf, target->len, target->fname);
 
-					new_buf = ck_alloc_nozero(target->len);
+        close(fd);
 
-					ck_read(fd, new_buf, target->len, target->fname);
+        /* Find a suitable splicin g location, somewhere between the first and
+           the last differing byte. Bail out if the difference is just a single
+           byte or so. */
 
-					close(fd);
+        locate_diffs(in_buf, new_buf, MIN(len, target->len), &f_diff, &l_diff);
 
-					/* Find a suitable splicin g location, somewhere between the first and
-					   the last differing byte. Bail out if the difference is just a single
-					   byte or so. */
+        if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) {
 
-					locate_diffs(in_buf, new_buf, MIN(len, target->len), &f_diff, &l_diff);
+          ck_free(new_buf);
+          goto retry_splicing_puppet;
 
-					if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) {
-						ck_free(new_buf);
-						goto retry_splicing_puppet;
-					}
+        }
 
-					/* Split somewhere between the first and last differing byte. */
+        /* Split somewhere between the first and last differing byte. */
 
-					split_at = f_diff + UR(l_diff - f_diff);
+        split_at = f_diff + UR(l_diff - f_diff);
 
-					/* Do the thing. */
+        /* Do the thing. */
 
-					len = target->len;
-					memcpy(new_buf, in_buf, split_at);
-					in_buf = new_buf;
-					ck_free(out_buf);
-					out_buf = ck_alloc_nozero(len);
-					memcpy(out_buf, in_buf, len);
+        len = target->len;
+        memcpy(new_buf, in_buf, split_at);
+        in_buf = new_buf;
+        ck_free(out_buf);
+        out_buf = ck_alloc_nozero(len);
+        memcpy(out_buf, in_buf, len);
 
-					goto havoc_stage_puppet;
+        goto havoc_stage_puppet;
 
-				}
+      }
 
 #endif /* !IGNORE_FINDS */
 
-				ret_val = 0;
-			abandon_entry:
-			abandon_entry_puppet:
+      ret_val = 0;
+    abandon_entry:
+    abandon_entry_puppet:
 
-				if (splice_cycle >= SPLICE_CYCLES_puppet)
-					SPLICE_CYCLES_puppet = (UR(SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) + SPLICE_CYCLES_puppet_low);
+      if (splice_cycle >= SPLICE_CYCLES_puppet)
+        SPLICE_CYCLES_puppet =
+            (UR(SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) +
+             SPLICE_CYCLES_puppet_low);
 
+      splicing_with = -1;
 
-				splicing_with = -1;
+      munmap(orig_in, queue_cur->len);
 
+      if (in_buf != orig_in) ck_free(in_buf);
+      ck_free(out_buf);
+      ck_free(eff_map);
 
-				munmap(orig_in, queue_cur->len);
+      if (key_puppet == 1) {
 
-				if (in_buf != orig_in) ck_free(in_buf);
-				ck_free(out_buf);
-				ck_free(eff_map);
+        if (unlikely(queued_paths + unique_crashes >
+                     ((queued_paths + unique_crashes) * limit_time_bound +
+                      orig_hit_cnt_puppet))) {
 
+          key_puppet = 0;
+          cur_ms_lv = get_cur_time();
+          new_hit_cnt = queued_paths + unique_crashes;
+          orig_hit_cnt_puppet = 0;
+          last_limit_time_start = 0;
 
-				if (key_puppet == 1)
-				{
-					if (unlikely(queued_paths + unique_crashes > ((queued_paths + unique_crashes)*limit_time_bound + orig_hit_cnt_puppet)))
-					{
-						key_puppet = 0;
-						cur_ms_lv = get_cur_time();
-						new_hit_cnt = queued_paths + unique_crashes;
-						orig_hit_cnt_puppet = 0;
-						last_limit_time_start = 0;
-					}
-				}
+        }
 
+      }
+
+      if (unlikely(tmp_core_time > period_core)) {
 
-				if (unlikely(tmp_core_time > period_core))
-				{
-					total_pacemaker_time += tmp_core_time;
-					tmp_core_time = 0;
-					temp_puppet_find = total_puppet_find;
-					new_hit_cnt = queued_paths + unique_crashes;
+        total_pacemaker_time += tmp_core_time;
+        tmp_core_time = 0;
+        temp_puppet_find = total_puppet_find;
+        new_hit_cnt = queued_paths + unique_crashes;
 
-					u64 temp_stage_finds_puppet = 0;
-					for (i = 0; i < operator_num; ++i)
-					{
+        u64 temp_stage_finds_puppet = 0;
+        for (i = 0; i < operator_num; ++i) {
 
-						core_operator_finds_puppet[i] = core_operator_finds_puppet_v2[i];
-						core_operator_cycles_puppet[i] = core_operator_cycles_puppet_v2[i];
-						temp_stage_finds_puppet += core_operator_finds_puppet[i];
-					}
+          core_operator_finds_puppet[i] = core_operator_finds_puppet_v2[i];
+          core_operator_cycles_puppet[i] = core_operator_cycles_puppet_v2[i];
+          temp_stage_finds_puppet += core_operator_finds_puppet[i];
 
-					key_module = 2;
+        }
 
-					old_hit_count = new_hit_cnt;
-				}
-				return ret_val;
-			}
-		}
+        key_module = 2;
+
+        old_hit_count = new_hit_cnt;
+
+      }
+
+      return ret_val;
+
+    }
 
+  }
 
 #undef FLIP_BIT
 
 }
 
-
 void pso_updating(void) {
 
-	g_now += 1;
-	if (g_now > g_max) g_now = 0;
-	w_now = (w_init - w_end)*(g_max - g_now) / (g_max)+w_end;
-	int tmp_swarm, i, j;
-	u64 temp_operator_finds_puppet = 0;
-	for (i = 0; i < operator_num; ++i)
-	{
-		operator_finds_puppet[i] = core_operator_finds_puppet[i];
-
-		for (j = 0; j < swarm_num; ++j)
-		{
-			operator_finds_puppet[i] = operator_finds_puppet[i] + stage_finds_puppet[j][i];
-		}
-		temp_operator_finds_puppet = temp_operator_finds_puppet + operator_finds_puppet[i];
-	}
-
-	for (i = 0; i < operator_num; ++i)
-	{
-		if (operator_finds_puppet[i])
-			G_best[i] = (double)((double)(operator_finds_puppet[i]) / (double)(temp_operator_finds_puppet));
-	}
-
-	for (tmp_swarm = 0; tmp_swarm < swarm_num; ++tmp_swarm)
-	{
-		double x_temp = 0.0;
-		for (i = 0; i < operator_num; ++i)
-		{
-			probability_now[tmp_swarm][i] = 0.0;
-			v_now[tmp_swarm][i] = w_now * v_now[tmp_swarm][i] + RAND_C * (L_best[tmp_swarm][i] - x_now[tmp_swarm][i]) + RAND_C * (G_best[i] - x_now[tmp_swarm][i]);
-			x_now[tmp_swarm][i] += v_now[tmp_swarm][i];
-			if (x_now[tmp_swarm][i] > v_max)
-				x_now[tmp_swarm][i] = v_max;
-			else if (x_now[tmp_swarm][i] < v_min)
-				x_now[tmp_swarm][i] = v_min;
-			x_temp += x_now[tmp_swarm][i];
-		}
-
-		for (i = 0; i < operator_num; ++i)
-		{
-			x_now[tmp_swarm][i] = x_now[tmp_swarm][i] / x_temp;
-			if (likely(i != 0))
-				probability_now[tmp_swarm][i] = probability_now[tmp_swarm][i - 1] + x_now[tmp_swarm][i];
-			else
-				probability_now[tmp_swarm][i] = x_now[tmp_swarm][i];
-		}
-		if (probability_now[tmp_swarm][operator_num - 1] < 0.99 || probability_now[tmp_swarm][operator_num - 1] > 1.01) FATAL("ERROR probability");
-	}
-	swarm_now = 0;
-	key_module = 0;
-}
+  g_now += 1;
+  if (g_now > g_max) g_now = 0;
+  w_now = (w_init - w_end) * (g_max - g_now) / (g_max) + w_end;
+  int tmp_swarm, i, j;
+  u64 temp_operator_finds_puppet = 0;
+  for (i = 0; i < operator_num; ++i) {
+
+    operator_finds_puppet[i] = core_operator_finds_puppet[i];
+
+    for (j = 0; j < swarm_num; ++j) {
+
+      operator_finds_puppet[i] =
+          operator_finds_puppet[i] + stage_finds_puppet[j][i];
+
+    }
+
+    temp_operator_finds_puppet =
+        temp_operator_finds_puppet + operator_finds_puppet[i];
+
+  }
+
+  for (i = 0; i < operator_num; ++i) {
+
+    if (operator_finds_puppet[i])
+      G_best[i] = (double)((double)(operator_finds_puppet[i]) /
+                           (double)(temp_operator_finds_puppet));
 
+  }
+
+  for (tmp_swarm = 0; tmp_swarm < swarm_num; ++tmp_swarm) {
+
+    double x_temp = 0.0;
+    for (i = 0; i < operator_num; ++i) {
+
+      probability_now[tmp_swarm][i] = 0.0;
+      v_now[tmp_swarm][i] =
+          w_now * v_now[tmp_swarm][i] +
+          RAND_C * (L_best[tmp_swarm][i] - x_now[tmp_swarm][i]) +
+          RAND_C * (G_best[i] - x_now[tmp_swarm][i]);
+      x_now[tmp_swarm][i] += v_now[tmp_swarm][i];
+      if (x_now[tmp_swarm][i] > v_max)
+        x_now[tmp_swarm][i] = v_max;
+      else if (x_now[tmp_swarm][i] < v_min)
+        x_now[tmp_swarm][i] = v_min;
+      x_temp += x_now[tmp_swarm][i];
+
+    }
+
+    for (i = 0; i < operator_num; ++i) {
+
+      x_now[tmp_swarm][i] = x_now[tmp_swarm][i] / x_temp;
+      if (likely(i != 0))
+        probability_now[tmp_swarm][i] =
+            probability_now[tmp_swarm][i - 1] + x_now[tmp_swarm][i];
+      else
+        probability_now[tmp_swarm][i] = x_now[tmp_swarm][i];
+
+    }
+
+    if (probability_now[tmp_swarm][operator_num - 1] < 0.99 ||
+        probability_now[tmp_swarm][operator_num - 1] > 1.01)
+      FATAL("ERROR probability");
+
+  }
+
+  swarm_now = 0;
+  key_module = 0;
+
+}
 
 /* larger change for MOpt implementation: the original fuzz_one was renamed
    to fuzz_one_original. All documentation references to fuzz_one therefore
    mean fuzz_one_original */
 
 u8 fuzz_one(char** argv) {
-	int key_val_lv = 0;
-	if (limit_time_sig == 0) {
-		key_val_lv = fuzz_one_original(argv);
-	} else {
-		if (key_module == 0)
-			key_val_lv = pilot_fuzzing(argv);
-		else if (key_module == 1)
-			key_val_lv = core_fuzzing(argv);
-		else if (key_module == 2)
-			pso_updating();
-	}
-
-	return key_val_lv;
+
+  int key_val_lv = 0;
+  if (limit_time_sig == 0) {
+
+    key_val_lv = fuzz_one_original(argv);
+
+  } else {
+
+    if (key_module == 0)
+      key_val_lv = pilot_fuzzing(argv);
+    else if (key_module == 1)
+      key_val_lv = core_fuzzing(argv);
+    else if (key_module == 2)
+      pso_updating();
+
+  }
+
+  return key_val_lv;
+
 }
 
diff --git a/src/afl-fuzz-python.c b/src/afl-fuzz-python.c
index ed158e6c..e22291b5 100644
--- a/src/afl-fuzz-python.c
+++ b/src/afl-fuzz-python.c
@@ -26,45 +26,62 @@
 #ifdef USE_PYTHON
 
 int init_py() {
+
   Py_Initialize();
   u8* module_name = getenv("AFL_PYTHON_MODULE");
 
   if (module_name) {
+
     PyObject* py_name = PyString_FromString(module_name);
 
     py_module = PyImport_Import(py_name);
     Py_DECREF(py_name);
 
     if (py_module != NULL) {
+
       u8 py_notrim = 0;
       py_functions[PY_FUNC_INIT] = PyObject_GetAttrString(py_module, "init");
       py_functions[PY_FUNC_FUZZ] = PyObject_GetAttrString(py_module, "fuzz");
-      py_functions[PY_FUNC_INIT_TRIM] = PyObject_GetAttrString(py_module, "init_trim");
-      py_functions[PY_FUNC_POST_TRIM] = PyObject_GetAttrString(py_module, "post_trim");
+      py_functions[PY_FUNC_INIT_TRIM] =
+          PyObject_GetAttrString(py_module, "init_trim");
+      py_functions[PY_FUNC_POST_TRIM] =
+          PyObject_GetAttrString(py_module, "post_trim");
       py_functions[PY_FUNC_TRIM] = PyObject_GetAttrString(py_module, "trim");
 
       for (u8 py_idx = 0; py_idx < PY_FUNC_COUNT; ++py_idx) {
+
         if (!py_functions[py_idx] || !PyCallable_Check(py_functions[py_idx])) {
+
           if (py_idx >= PY_FUNC_INIT_TRIM && py_idx <= PY_FUNC_TRIM) {
+
             // Implementing the trim API is optional for now
-            if (PyErr_Occurred())
-              PyErr_Print();
+            if (PyErr_Occurred()) PyErr_Print();
             py_notrim = 1;
+
           } else {
-            if (PyErr_Occurred())
-              PyErr_Print();
-            fprintf(stderr, "Cannot find/call function with index %d in external Python module.\n", py_idx);
+
+            if (PyErr_Occurred()) PyErr_Print();
+            fprintf(stderr,
+                    "Cannot find/call function with index %d in external "
+                    "Python module.\n",
+                    py_idx);
             return 1;
+
           }
+
         }
 
       }
 
       if (py_notrim) {
+
         py_functions[PY_FUNC_INIT_TRIM] = NULL;
         py_functions[PY_FUNC_POST_TRIM] = NULL;
         py_functions[PY_FUNC_TRIM] = NULL;
-        WARNF("Python module does not implement trim API, standard trimming will be used.");
+        WARNF(
+            "Python module does not implement trim API, standard trimming will "
+            "be used.");
+
       }
 
       PyObject *py_args, *py_value;
@@ -73,9 +90,11 @@ int init_py() {
       py_args = PyTuple_New(1);
       py_value = PyInt_FromLong(UR(0xFFFFFFFF));
       if (!py_value) {
+
         Py_DECREF(py_args);
         fprintf(stderr, "Cannot convert argument\n");
         return 1;
+
       }
 
       PyTuple_SetItem(py_args, 0, py_value);
@@ -85,51 +104,68 @@ int init_py() {
       Py_DECREF(py_args);
 
       if (py_value == NULL) {
+
         PyErr_Print();
-        fprintf(stderr,"Call failed\n");
+        fprintf(stderr, "Call failed\n");
         return 1;
+
       }
+
     } else {
+
       PyErr_Print();
       fprintf(stderr, "Failed to load \"%s\"\n", module_name);
       return 1;
+
     }
+
   }
 
   return 0;
+
 }
 
 void finalize_py() {
+
   if (py_module != NULL) {
+
     u32 i;
     for (i = 0; i < PY_FUNC_COUNT; ++i)
       Py_XDECREF(py_functions[i]);
 
     Py_DECREF(py_module);
+
   }
 
   Py_Finalize();
+
 }
 
-void fuzz_py(char* buf, size_t buflen, char* add_buf, size_t add_buflen, char** ret, size_t* retlen) {
+void fuzz_py(char* buf, size_t buflen, char* add_buf, size_t add_buflen,
+             char** ret, size_t* retlen) {
 
   if (py_module != NULL) {
+
     PyObject *py_args, *py_value;
     py_args = PyTuple_New(2);
     py_value = PyByteArray_FromStringAndSize(buf, buflen);
     if (!py_value) {
+
       Py_DECREF(py_args);
       fprintf(stderr, "Cannot convert argument\n");
       return;
+
     }
 
     PyTuple_SetItem(py_args, 0, py_value);
 
     py_value = PyByteArray_FromStringAndSize(add_buf, add_buflen);
     if (!py_value) {
+
       Py_DECREF(py_args);
       fprintf(stderr, "Cannot convert argument\n");
       return;
+
     }
 
     PyTuple_SetItem(py_args, 1, py_value);
@@ -139,26 +175,35 @@ void fuzz_py(char* buf, size_t buflen, char* add_buf, size_t add_buflen, char**
     Py_DECREF(py_args);
 
     if (py_value != NULL) {
+
       *retlen = PyByteArray_Size(py_value);
       *ret = malloc(*retlen);
       memcpy(*ret, PyByteArray_AsString(py_value), *retlen);
       Py_DECREF(py_value);
+
     } else {
+
       PyErr_Print();
-      fprintf(stderr,"Call failed\n");
+      fprintf(stderr, "Call failed\n");
       return;
+
     }
+
   }
+
 }
 
 u32 init_trim_py(char* buf, size_t buflen) {
+
   PyObject *py_args, *py_value;
 
   py_args = PyTuple_New(1);
   py_value = PyByteArray_FromStringAndSize(buf, buflen);
   if (!py_value) {
+
     Py_DECREF(py_args);
     FATAL("Failed to convert arguments");
+
   }
 
   PyTuple_SetItem(py_args, 0, py_value);
@@ -167,24 +212,32 @@ u32 init_trim_py(char* buf, size_t buflen) {
   Py_DECREF(py_args);
 
   if (py_value != NULL) {
+
     u32 retcnt = PyInt_AsLong(py_value);
     Py_DECREF(py_value);
     return retcnt;
+
   } else {
+
     PyErr_Print();
     FATAL("Call failed");
+
   }
+
 }
 
 u32 post_trim_py(char success) {
+
   PyObject *py_args, *py_value;
 
   py_args = PyTuple_New(1);
 
   py_value = PyBool_FromLong(success);
   if (!py_value) {
+
     Py_DECREF(py_args);
     FATAL("Failed to convert arguments");
+
   }
 
   PyTuple_SetItem(py_args, 0, py_value);
@@ -193,16 +246,22 @@ u32 post_trim_py(char success) {
   Py_DECREF(py_args);
 
   if (py_value != NULL) {
+
     u32 retcnt = PyInt_AsLong(py_value);
     Py_DECREF(py_value);
     return retcnt;
+
   } else {
+
     PyErr_Print();
     FATAL("Call failed");
+
   }
+
 }
 
 void trim_py(char** ret, size_t* retlen) {
+
   PyObject *py_args, *py_value;
 
   py_args = PyTuple_New(0);
@@ -210,14 +269,19 @@ void trim_py(char** ret, size_t* retlen) {
   Py_DECREF(py_args);
 
   if (py_value != NULL) {
+
     *retlen = PyByteArray_Size(py_value);
     *ret = malloc(*retlen);
     memcpy(*ret, PyByteArray_AsString(py_value), *retlen);
     Py_DECREF(py_value);
+
   } else {
+
     PyErr_Print();
     FATAL("Call failed");
+
   }
+
 }
 
 u8 trim_case_python(char** argv, struct queue_entry* q, u8* in_buf) {
@@ -237,20 +301,24 @@ u8 trim_case_python(char** argv, struct queue_entry* q, u8* in_buf) {
   stage_max = init_trim_py(in_buf, q->len);
 
   if (not_on_tty && debug)
-    SAYF("[Python Trimming] START: Max %d iterations, %u bytes", stage_max, q->len);
+    SAYF("[Python Trimming] START: Max %d iterations, %u bytes", stage_max,
+         q->len);
+
+  while (stage_cur < stage_max) {
 
-  while(stage_cur < stage_max) {
     sprintf(tmp, "ptrim %s", DI(trim_exec));
 
     u32 cksum;
 
-    char* retbuf = NULL;
+    char*  retbuf = NULL;
     size_t retlen = 0;
 
     trim_py(&retbuf, &retlen);
 
     if (retlen > orig_len)
-      FATAL("Trimmed data returned by Python module is larger than original data");
+      FATAL(
+          "Trimmed data returned by Python module is larger than original "
+          "data");
 
     write_to_testcase(retbuf, retlen);
 
@@ -280,17 +348,23 @@ u8 trim_case_python(char** argv, struct queue_entry* q, u8* in_buf) {
       stage_cur = post_trim_py(1);
 
       if (not_on_tty && debug)
-        SAYF("[Python Trimming] SUCCESS: %d/%d iterations (now at %u bytes)", stage_cur, stage_max, q->len);
+        SAYF("[Python Trimming] SUCCESS: %d/%d iterations (now at %u bytes)",
+             stage_cur, stage_max, q->len);
+
     } else {
+
       /* Tell the Python module that the trimming was unsuccessful */
       stage_cur = post_trim_py(0);
       if (not_on_tty && debug)
-        SAYF("[Python Trimming] FAILURE: %d/%d iterations", stage_cur, stage_max);
+        SAYF("[Python Trimming] FAILURE: %d/%d iterations", stage_cur,
+             stage_max);
+
     }
 
-      /* Since this can be slow, update the screen every now and then. */
+    /* Since this can be slow, update the screen every now and then. */
+
+    if (!(trim_exec++ % stats_update_freq)) show_stats();
 
-      if (!(trim_exec++ % stats_update_freq)) show_stats();
   }
 
   if (not_on_tty && debug)
@@ -303,7 +377,7 @@ u8 trim_case_python(char** argv, struct queue_entry* q, u8* in_buf) {
 
     s32 fd;
 
-    unlink(q->fname); /* ignore errors */
+    unlink(q->fname);                                      /* ignore errors */
 
     fd = open(q->fname, O_WRONLY | O_CREAT | O_EXCL, 0600);
 
@@ -317,8 +391,6 @@ u8 trim_case_python(char** argv, struct queue_entry* q, u8* in_buf) {
 
   }
 
-
-
 abort_trimming:
 
   bytes_trim_out += q->len;
@@ -327,3 +399,4 @@ abort_trimming:
 }
 
 #endif /* USE_PYTHON */
+
diff --git a/src/afl-fuzz-queue.c b/src/afl-fuzz-queue.c
index c1547b48..22a9ccb0 100644
--- a/src/afl-fuzz-queue.c
+++ b/src/afl-fuzz-queue.c
@@ -43,7 +43,6 @@ void mark_as_det_done(struct queue_entry* q) {
 
 }
 
-
 /* Mark as variable. Create symlinks if possible to make it easier to examine
    the files. */
 
@@ -69,7 +68,6 @@ void mark_as_variable(struct queue_entry* q) {
 
 }
 
-
 /* Mark / unmark as redundant (edge-only). This is not used for restoring state,
    but may be useful for post-processing datasets. */
 
@@ -102,18 +100,17 @@ void mark_as_redundant(struct queue_entry* q, u8 state) {
 
 }
 
-
 /* Append new test case to the queue. */
 
 void add_to_queue(u8* fname, u32 len, u8 passed_det) {
 
   struct queue_entry* q = ck_alloc(sizeof(struct queue_entry));
 
-  q->fname        = fname;
-  q->len          = len;
-  q->depth        = cur_depth + 1;
-  q->passed_det   = passed_det;
-  q->n_fuzz       = 1;
+  q->fname = fname;
+  q->len = len;
+  q->depth = cur_depth + 1;
+  q->passed_det = passed_det;
+  q->n_fuzz = 1;
 
   if (q->depth > max_depth) max_depth = q->depth;
 
@@ -122,7 +119,9 @@ void add_to_queue(u8* fname, u32 len, u8 passed_det) {
     queue_top->next = q;
     queue_top = q;
 
-  } else q_prev100 = queue = queue_top = q;
+  } else
+
+    q_prev100 = queue = queue_top = q;
 
   ++queued_paths;
   ++pending_not_fuzzed;
@@ -140,7 +139,6 @@ void add_to_queue(u8* fname, u32 len, u8 passed_det) {
 
 }
 
-
 /* Destroy the entire queue. */
 
 void destroy_queue(void) {
@@ -159,7 +157,6 @@ void destroy_queue(void) {
 
 }
 
-
 /* When we bump into a new path, we call this to see if the path appears
    more "favorable" than any of the existing ones. The purpose of the
    "favorables" is to have a minimal set of paths that trigger all the bits
@@ -170,12 +167,11 @@ void destroy_queue(void) {
    for every byte in the bitmap. We win that slot if there is no previous
    contender, or if the contender has a more favorable speed x size factor. */
 
-
 void update_bitmap_score(struct queue_entry* q) {
 
   u32 i;
   u64 fav_factor = q->exec_us * q->len;
-  u64 fuzz_p2      = next_p2 (q->n_fuzz);
+  u64 fuzz_p2 = next_p2(q->n_fuzz);
 
   /* For every byte set in trace_bits[], see if there is a previous winner,
      and how it compares to us. */
@@ -184,47 +180,53 @@ void update_bitmap_score(struct queue_entry* q) {
 
     if (trace_bits[i]) {
 
-       if (top_rated[i]) {
+      if (top_rated[i]) {
 
-         /* Faster-executing or smaller test cases are favored. */
-         u64 top_rated_fuzz_p2    = next_p2 (top_rated[i]->n_fuzz);
-         u64 top_rated_fav_factor = top_rated[i]->exec_us * top_rated[i]->len;
+        /* Faster-executing or smaller test cases are favored. */
+        u64 top_rated_fuzz_p2 = next_p2(top_rated[i]->n_fuzz);
+        u64 top_rated_fav_factor = top_rated[i]->exec_us * top_rated[i]->len;
 
-         if (fuzz_p2 > top_rated_fuzz_p2) {
-           continue;
-         } else if (fuzz_p2 == top_rated_fuzz_p2) {
-           if (fav_factor > top_rated_fav_factor)
-             continue;
-         }
+        if (fuzz_p2 > top_rated_fuzz_p2) {
 
-         if (fav_factor > top_rated[i]->exec_us * top_rated[i]->len) continue;
+          continue;
 
-         /* Looks like we're going to win. Decrease ref count for the
-            previous winner, discard its trace_bits[] if necessary. */
+        } else if (fuzz_p2 == top_rated_fuzz_p2) {
 
-         if (!--top_rated[i]->tc_ref) {
-           ck_free(top_rated[i]->trace_mini);
-           top_rated[i]->trace_mini = 0;
-         }
+          if (fav_factor > top_rated_fav_factor) continue;
 
-       }
+        }
 
-       /* Insert ourselves as the new winner. */
+        if (fav_factor > top_rated[i]->exec_us * top_rated[i]->len) continue;
 
-       top_rated[i] = q;
-       ++q->tc_ref;
+        /* Looks like we're going to win. Decrease ref count for the
+           previous winner, discard its trace_bits[] if necessary. */
 
-       if (!q->trace_mini) {
-         q->trace_mini = ck_alloc(MAP_SIZE >> 3);
-         minimize_bits(q->trace_mini, trace_bits);
-       }
+        if (!--top_rated[i]->tc_ref) {
 
-       score_changed = 1;
+          ck_free(top_rated[i]->trace_mini);
+          top_rated[i]->trace_mini = 0;
 
-     }
+        }
 
-}
+      }
+
+      /* Insert ourselves as the new winner. */
+
+      top_rated[i] = q;
+      ++q->tc_ref;
+
+      if (!q->trace_mini) {
 
+        q->trace_mini = ck_alloc(MAP_SIZE >> 3);
+        minimize_bits(q->trace_mini, trace_bits);
+
+      }
+
+      score_changed = 1;
+
+    }
+
+}
 
 /* The second part of the mechanism discussed above is a routine that
    goes over top_rated[] entries, and then sequentially grabs winners for
@@ -235,8 +237,8 @@ void update_bitmap_score(struct queue_entry* q) {
 void cull_queue(void) {
 
   struct queue_entry* q;
-  static u8 temp_v[MAP_SIZE >> 3];
-  u32 i;
+  static u8           temp_v[MAP_SIZE >> 3];
+  u32                 i;
 
   if (dumb_mode || !score_changed) return;
 
@@ -244,14 +246,16 @@ void cull_queue(void) {
 
   memset(temp_v, 255, MAP_SIZE >> 3);
 
-  queued_favored  = 0;
+  queued_favored = 0;
   pending_favored = 0;
 
   q = queue;
 
   while (q) {
+
     q->favored = 0;
     q = q->next;
+
   }
 
   /* Let's see if anything in the bitmap isn't captured in temp_v.
@@ -264,27 +268,29 @@ void cull_queue(void) {
 
       /* Remove all bits belonging to the current entry from temp_v. */
 
-      while (j--) 
+      while (j--)
         if (top_rated[i]->trace_mini[j])
           temp_v[j] &= ~top_rated[i]->trace_mini[j];
 
       top_rated[i]->favored = 1;
       ++queued_favored;
 
-      if (top_rated[i]->fuzz_level == 0 || !top_rated[i]->was_fuzzed) ++pending_favored;
+      if (top_rated[i]->fuzz_level == 0 || !top_rated[i]->was_fuzzed)
+        ++pending_favored;
 
     }
 
   q = queue;
 
   while (q) {
+
     mark_as_redundant(q, !q->favored);
     q = q->next;
+
   }
 
 }
 
-
 /* Calculate case desirability score to adjust the length of havoc fuzzing.
    A helper function for fuzz_one(). Maybe some of these constants should
    go into config.h. */
@@ -305,34 +311,51 @@ u32 calculate_score(struct queue_entry* q) {
   // Longer execution time means longer work on the input, the deeper in
   // coverage, the better the fuzzing, right? -mh
 
-  if (q->exec_us * 0.1 > avg_exec_us) perf_score = 10;
-  else if (q->exec_us * 0.25 > avg_exec_us) perf_score = 25;
-  else if (q->exec_us * 0.5 > avg_exec_us) perf_score = 50;
-  else if (q->exec_us * 0.75 > avg_exec_us) perf_score = 75;
-  else if (q->exec_us * 4 < avg_exec_us) perf_score = 300;
-  else if (q->exec_us * 3 < avg_exec_us) perf_score = 200;
-  else if (q->exec_us * 2 < avg_exec_us) perf_score = 150;
+  if (q->exec_us * 0.1 > avg_exec_us)
+    perf_score = 10;
+  else if (q->exec_us * 0.25 > avg_exec_us)
+    perf_score = 25;
+  else if (q->exec_us * 0.5 > avg_exec_us)
+    perf_score = 50;
+  else if (q->exec_us * 0.75 > avg_exec_us)
+    perf_score = 75;
+  else if (q->exec_us * 4 < avg_exec_us)
+    perf_score = 300;
+  else if (q->exec_us * 3 < avg_exec_us)
+    perf_score = 200;
+  else if (q->exec_us * 2 < avg_exec_us)
+    perf_score = 150;
 
   /* Adjust score based on bitmap size. The working theory is that better
      coverage translates to better targets. Multiplier from 0.25x to 3x. */
 
-  if (q->bitmap_size * 0.3 > avg_bitmap_size) perf_score *= 3;
-  else if (q->bitmap_size * 0.5 > avg_bitmap_size) perf_score *= 2;
-  else if (q->bitmap_size * 0.75 > avg_bitmap_size) perf_score *= 1.5;
-  else if (q->bitmap_size * 3 < avg_bitmap_size) perf_score *= 0.25;
-  else if (q->bitmap_size * 2 < avg_bitmap_size) perf_score *= 0.5;
-  else if (q->bitmap_size * 1.5 < avg_bitmap_size) perf_score *= 0.75;
+  if (q->bitmap_size * 0.3 > avg_bitmap_size)
+    perf_score *= 3;
+  else if (q->bitmap_size * 0.5 > avg_bitmap_size)
+    perf_score *= 2;
+  else if (q->bitmap_size * 0.75 > avg_bitmap_size)
+    perf_score *= 1.5;
+  else if (q->bitmap_size * 3 < avg_bitmap_size)
+    perf_score *= 0.25;
+  else if (q->bitmap_size * 2 < avg_bitmap_size)
+    perf_score *= 0.5;
+  else if (q->bitmap_size * 1.5 < avg_bitmap_size)
+    perf_score *= 0.75;
 
   /* Adjust score based on handicap. Handicap is proportional to how late
      in the game we learned about this path. Latecomers are allowed to run
      for a bit longer until they catch up with the rest. */
 
   if (q->handicap >= 4) {
+
     perf_score *= 4;
     q->handicap -= 4;
+
   } else if (q->handicap) {
+
     perf_score *= 2;
     --q->handicap;
+
   }
 
   /* Final adjustment based on input depth, under the assumption that fuzzing
@@ -341,11 +364,11 @@ u32 calculate_score(struct queue_entry* q) {
 
   switch (q->depth) {
 
-    case 0 ... 3:   break;
-    case 4 ... 7:   perf_score *= 2; break;
-    case 8 ... 13:  perf_score *= 3; break;
+    case 0 ... 3: break;
+    case 4 ... 7: perf_score *= 2; break;
+    case 8 ... 13: perf_score *= 3; break;
     case 14 ... 25: perf_score *= 4; break;
-    default:        perf_score *= 5;
+    default: perf_score *= 5;
 
   }
 
@@ -357,61 +380,69 @@ u32 calculate_score(struct queue_entry* q) {
 
   switch (schedule) {
 
-    case EXPLORE:
-      break;
+    case EXPLORE: break;
 
-    case EXPLOIT:
-      factor = MAX_FACTOR;
-      break;
+    case EXPLOIT: factor = MAX_FACTOR; break;
 
     case COE:
       fuzz_total = 0;
       n_paths = 0;
 
-      struct queue_entry *queue_it = queue;
+      struct queue_entry* queue_it = queue;
       while (queue_it) {
+
         fuzz_total += queue_it->n_fuzz;
-        n_paths ++;
+        n_paths++;
         queue_it = queue_it->next;
+
       }
 
       fuzz_mu = fuzz_total / n_paths;
       if (fuzz <= fuzz_mu) {
+
         if (q->fuzz_level < 16)
-          factor = ((u32) (1 << q->fuzz_level));
+          factor = ((u32)(1 << q->fuzz_level));
         else
           factor = MAX_FACTOR;
+
       } else {
+
         factor = 0;
+
       }
+
       break;
 
     case FAST:
       if (q->fuzz_level < 16) {
-         factor = ((u32) (1 << q->fuzz_level)) / (fuzz == 0 ? 1 : fuzz);
+
+        factor = ((u32)(1 << q->fuzz_level)) / (fuzz == 0 ? 1 : fuzz);
+
       } else
-        factor = MAX_FACTOR / (fuzz == 0 ? 1 : next_p2 (fuzz));
-      break;
 
-    case LIN:
-      factor = q->fuzz_level / (fuzz == 0 ? 1 : fuzz);
+        factor = MAX_FACTOR / (fuzz == 0 ? 1 : next_p2(fuzz));
       break;
 
+    case LIN: factor = q->fuzz_level / (fuzz == 0 ? 1 : fuzz); break;
+
     case QUAD:
       factor = q->fuzz_level * q->fuzz_level / (fuzz == 0 ? 1 : fuzz);
       break;
 
-    default:
-      PFATAL ("Unknown Power Schedule");
+    default: PFATAL("Unknown Power Schedule");
+
   }
-  if (factor > MAX_FACTOR)
-    factor = MAX_FACTOR;
+
+  if (factor > MAX_FACTOR) factor = MAX_FACTOR;
 
   perf_score *= factor / POWER_BETA;
 
   // MOpt mode
-  if (limit_time_sig != 0 && max_depth - q->depth < 3) perf_score *= 2;
-  else if (perf_score < 1) perf_score = 1; // Add a lower bound to AFLFast's energy assignment strategies
+  if (limit_time_sig != 0 && max_depth - q->depth < 3)
+    perf_score *= 2;
+  else if (perf_score < 1)
+    perf_score =
+        1;  // Add a lower bound to AFLFast's energy assignment strategies
 
   /* Make sure that we don't go over limit. */
 
@@ -420,3 +451,4 @@ u32 calculate_score(struct queue_entry* q) {
   return perf_score;
 
 }
+
diff --git a/src/afl-fuzz-run.c b/src/afl-fuzz-run.c
index c14ecc87..4093d991 100644
--- a/src/afl-fuzz-run.c
+++ b/src/afl-fuzz-run.c
@@ -28,8 +28,8 @@
 u8 run_target(char** argv, u32 timeout) {
 
   static struct itimerval it;
-  static u32 prev_timed_out = 0;
-  static u64 exec_ms = 0;
+  static u32              prev_timed_out = 0;
+  static u64              exec_ms = 0;
 
   int status = 0;
   u32 tb4;
@@ -45,7 +45,7 @@ u8 run_target(char** argv, u32 timeout) {
 
   /* If we're running in "dumb" mode, we can't rely on the fork server
      logic compiled into the target program, so we will just keep calling
-     execve(). There is a bit of code duplication between here and 
+     execve(). There is a bit of code duplication between here and
      init_forkserver(), but c'est la vie. */
 
   if (dumb_mode == 1 || no_forkserver) {
@@ -64,11 +64,11 @@ u8 run_target(char** argv, u32 timeout) {
 
 #ifdef RLIMIT_AS
 
-        setrlimit(RLIMIT_AS, &r); /* Ignore errors */
+        setrlimit(RLIMIT_AS, &r);                          /* Ignore errors */
 
 #else
 
-        setrlimit(RLIMIT_DATA, &r); /* Ignore errors */
+        setrlimit(RLIMIT_DATA, &r);                        /* Ignore errors */
 
 #endif /* ^RLIMIT_AS */
 
@@ -76,7 +76,7 @@ u8 run_target(char** argv, u32 timeout) {
 
       r.rlim_max = r.rlim_cur = 0;
 
-      setrlimit(RLIMIT_CORE, &r); /* Ignore errors */
+      setrlimit(RLIMIT_CORE, &r);                          /* Ignore errors */
 
       /* Isolate the process and configure standard descriptors. If out_file is
          specified, stdin is /dev/null; otherwise, out_fd is cloned instead. */
@@ -108,10 +108,12 @@ u8 run_target(char** argv, u32 timeout) {
 
       /* Set sane defaults for ASAN if nothing else specified. */
 
-      setenv("ASAN_OPTIONS", "abort_on_error=1:"
-                             "detect_leaks=0:"
-                             "symbolize=0:"
-                             "allocator_may_return_null=1", 0);
+      setenv("ASAN_OPTIONS",
+             "abort_on_error=1:"
+             "detect_leaks=0:"
+             "symbolize=0:"
+             "allocator_may_return_null=1",
+             0);
 
       setenv("MSAN_OPTIONS", "exit_code=" STRINGIFY(MSAN_ERROR) ":"
                              "symbolize=0:"
@@ -152,7 +154,8 @@ u8 run_target(char** argv, u32 timeout) {
 
   }
 
-  /* Configure timeout, as requested by user, then wait for child to terminate. */
+  /* Configure timeout, as requested by user, then wait for child to terminate.
+   */
 
   it.it_value.tv_sec = (timeout / 1000);
   it.it_value.tv_usec = (timeout % 1000) * 1000;
@@ -179,9 +182,10 @@ u8 run_target(char** argv, u32 timeout) {
   }
 
   if (!WIFSTOPPED(status)) child_pid = 0;
-  
+
   getitimer(ITIMER_REAL, &it);
-  exec_ms = (u64) timeout - (it.it_value.tv_sec * 1000 + it.it_value.tv_usec / 1000);
+  exec_ms =
+      (u64)timeout - (it.it_value.tv_sec * 1000 + it.it_value.tv_usec / 1000);
   if (slowest_exec_ms < exec_ms) slowest_exec_ms = exec_ms;
 
   it.it_value.tv_sec = 0;
@@ -223,8 +227,10 @@ u8 run_target(char** argv, u32 timeout) {
      must use a special exit code. */
 
   if (uses_asan && WEXITSTATUS(status) == MSAN_ERROR) {
+
     kill_signal = 0;
     return FAULT_CRASH;
+
   }
 
   if ((dumb_mode == 1 || no_forkserver) && tb4 == EXEC_FAIL_SIG)
@@ -234,7 +240,6 @@ u8 run_target(char** argv, u32 timeout) {
 
 }
 
-
 /* Write modified data to file for testing. If out_file is set, the old file
    is unlinked and a new one is created. Otherwise, out_fd is rewound and
    truncated. */
@@ -245,20 +250,26 @@ void write_to_testcase(void* mem, u32 len) {
 
   if (out_file) {
 
-    unlink(out_file); /* Ignore errors. */
+    unlink(out_file);                                     /* Ignore errors. */
 
     fd = open(out_file, O_WRONLY | O_CREAT | O_EXCL, 0600);
 
     if (fd < 0) PFATAL("Unable to create '%s'", out_file);
 
-  } else lseek(fd, 0, SEEK_SET);
+  } else
+
+    lseek(fd, 0, SEEK_SET);
 
   if (pre_save_handler) {
-    u8* new_data;
+
+    u8*    new_data;
     size_t new_size = pre_save_handler(mem, len, &new_data);
     ck_write(fd, new_data, new_size, out_file);
+
   } else {
+
     ck_write(fd, mem, len, out_file);
+
   }
 
   if (!out_file) {
@@ -266,10 +277,11 @@ void write_to_testcase(void* mem, u32 len) {
     if (ftruncate(fd, len)) PFATAL("ftruncate() failed");
     lseek(fd, 0, SEEK_SET);
 
-  } else close(fd);
+  } else
 
-}
+    close(fd);
 
+}
 
 /* The same, but with an adjustable gap. Used for trimming. */
 
@@ -280,17 +292,19 @@ void write_with_gap(void* mem, u32 len, u32 skip_at, u32 skip_len) {
 
   if (out_file) {
 
-    unlink(out_file); /* Ignore errors. */
+    unlink(out_file);                                     /* Ignore errors. */
 
     fd = open(out_file, O_WRONLY | O_CREAT | O_EXCL, 0600);
 
     if (fd < 0) PFATAL("Unable to create '%s'", out_file);
 
-  } else lseek(fd, 0, SEEK_SET);
+  } else
+
+    lseek(fd, 0, SEEK_SET);
 
   if (skip_at) ck_write(fd, mem, skip_at, out_file);
 
-  u8 *memu8 = mem;
+  u8* memu8 = mem;
   if (tail_len) ck_write(fd, memu8 + skip_at + skip_len, tail_len, out_file);
 
   if (!out_file) {
@@ -298,22 +312,23 @@ void write_with_gap(void* mem, u32 len, u32 skip_at, u32 skip_len) {
     if (ftruncate(fd, len - skip_len)) PFATAL("ftruncate() failed");
     lseek(fd, 0, SEEK_SET);
 
-  } else close(fd);
+  } else
 
-}
+    close(fd);
 
+}
 
 /* Calibrate a new test case. This is done when processing the input directory
    to warn about flaky or otherwise problematic test cases early on; and when
    new paths are discovered to detect variable behavior and so on. */
 
-u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem,
-                         u32 handicap, u8 from_queue) {
+u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem, u32 handicap,
+                  u8 from_queue) {
 
   static u8 first_trace[MAP_SIZE];
 
-  u8  fault = 0, new_bits = 0, var_detected = 0,
-      first_run = (q->exec_cksum == 0);
+  u8 fault = 0, new_bits = 0, var_detected = 0,
+     first_run = (q->exec_cksum == 0);
 
   u64 start_us, stop_us;
 
@@ -326,19 +341,18 @@ u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem,
      to intermittent latency. */
 
   if (!from_queue || resuming_fuzz)
-    use_tmout = MAX(exec_tmout + CAL_TMOUT_ADD,
-                    exec_tmout * CAL_TMOUT_PERC / 100);
+    use_tmout =
+        MAX(exec_tmout + CAL_TMOUT_ADD, exec_tmout * CAL_TMOUT_PERC / 100);
 
   ++q->cal_failed;
 
   stage_name = "calibration";
-  stage_max  = fast_cal ? 3 : CAL_CYCLES;
+  stage_max = fast_cal ? 3 : CAL_CYCLES;
 
   /* Make sure the forkserver is up before we do anything, and let's not
      count its spin-up time toward binary calibration. */
 
-  if (dumb_mode != 1 && !no_forkserver && !forksrv_pid)
-    init_forkserver(argv);
+  if (dumb_mode != 1 && !no_forkserver && !forksrv_pid) init_forkserver(argv);
 
   if (q->exec_cksum) memcpy(first_trace, trace_bits, MAP_SIZE);
 
@@ -360,8 +374,10 @@ u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem,
     if (stop_soon || fault != crash_mode) goto abort_calibration;
 
     if (!dumb_mode && !stage_cur && !count_bytes(trace_bits)) {
+
       fault = FAULT_NOINST;
       goto abort_calibration;
+
     }
 
     cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
@@ -380,7 +396,7 @@ u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem,
           if (!var_bytes[i] && first_trace[i] != trace_bits[i]) {
 
             var_bytes[i] = 1;
-            stage_max    = CAL_CYCLES_LONG;
+            stage_max = CAL_CYCLES_LONG;
 
           }
 
@@ -401,16 +417,16 @@ u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem,
 
   stop_us = get_cur_time_us();
 
-  total_cal_us     += stop_us - start_us;
+  total_cal_us += stop_us - start_us;
   total_cal_cycles += stage_max;
 
   /* OK, let's collect some stats about the performance of this test case.
      This is used for fuzzing air time calculations in calculate_score(). */
 
-  q->exec_us     = (stop_us - start_us) / stage_max;
+  q->exec_us = (stop_us - start_us) / stage_max;
   q->bitmap_size = count_bytes(trace_bits);
-  q->handicap    = handicap;
-  q->cal_failed  = 0;
+  q->handicap = handicap;
+  q->cal_failed = 0;
 
   total_bitmap_size += q->bitmap_size;
   ++total_bitmap_entries;
@@ -426,8 +442,10 @@ u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem,
 abort_calibration:
 
   if (new_bits == 2 && !q->has_new_cov) {
+
     q->has_new_cov = 1;
     ++queued_with_cov;
+
   }
 
   /* Mark variable paths. */
@@ -437,15 +455,17 @@ abort_calibration:
     var_byte_count = count_bytes(var_bytes);
 
     if (!q->var_behavior) {
+
       mark_as_variable(q);
       ++queued_variable;
+
     }
 
   }
 
   stage_name = old_sn;
-  stage_cur  = old_sc;
-  stage_max  = old_sm;
+  stage_cur = old_sc;
+  stage_max = old_sm;
 
   if (!first_run) show_stats();
 
@@ -453,14 +473,13 @@ abort_calibration:
 
 }
 
-
 /* Grab interesting test cases from other fuzzers. */
 
 void sync_fuzzers(char** argv) {
 
-  DIR* sd;
+  DIR*           sd;
   struct dirent* sd_ent;
-  u32 sync_cnt = 0;
+  u32            sync_cnt = 0;
 
   sd = opendir(sync_dir);
   if (!sd) PFATAL("Unable to open '%s'", sync_dir);
@@ -468,16 +487,17 @@ void sync_fuzzers(char** argv) {
   stage_max = stage_cur = 0;
   cur_depth = 0;
 
-  /* Look at the entries created for every other fuzzer in the sync directory. */
+  /* Look at the entries created for every other fuzzer in the sync directory.
+   */
 
   while ((sd_ent = readdir(sd))) {
 
     static u8 stage_tmp[128];
 
-    DIR* qd;
+    DIR*           qd;
     struct dirent* qd_ent;
-    u8 *qd_path, *qd_synced_path;
-    u32 min_accept = 0, next_min_accept;
+    u8 *           qd_path, *qd_synced_path;
+    u32            min_accept = 0, next_min_accept;
 
     s32 id_fd;
 
@@ -490,8 +510,10 @@ void sync_fuzzers(char** argv) {
     qd_path = alloc_printf("%s/%s/queue", sync_dir, sd_ent->d_name);
 
     if (!(qd = opendir(qd_path))) {
+
       ck_free(qd_path);
       continue;
+
     }
 
     /* Retrieve the ID of the last seen test case. */
@@ -502,35 +524,34 @@ void sync_fuzzers(char** argv) {
 
     if (id_fd < 0) PFATAL("Unable to create '%s'", qd_synced_path);
 
-    if (read(id_fd, &min_accept, sizeof(u32)) > 0) 
-      lseek(id_fd, 0, SEEK_SET);
+    if (read(id_fd, &min_accept, sizeof(u32)) > 0) lseek(id_fd, 0, SEEK_SET);
 
     next_min_accept = min_accept;
 
-    /* Show stats */    
+    /* Show stats */
 
     sprintf(stage_tmp, "sync %u", ++sync_cnt);
     stage_name = stage_tmp;
-    stage_cur  = 0;
-    stage_max  = 0;
+    stage_cur = 0;
+    stage_max = 0;
 
-    /* For every file queued by this fuzzer, parse ID and see if we have looked at
-       it before; exec a test case if not. */
+    /* For every file queued by this fuzzer, parse ID and see if we have looked
+       at it before; exec a test case if not. */
 
     while ((qd_ent = readdir(qd))) {
 
-      u8* path;
-      s32 fd;
+      u8*         path;
+      s32         fd;
       struct stat st;
 
       if (qd_ent->d_name[0] == '.' ||
-          sscanf(qd_ent->d_name, CASE_PREFIX "%06u", &syncing_case) != 1 || 
-          syncing_case < min_accept) continue;
+          sscanf(qd_ent->d_name, CASE_PREFIX "%06u", &syncing_case) != 1 ||
+          syncing_case < min_accept)
+        continue;
 
       /* OK, sounds like a new one. Let's give it a try. */
 
-      if (syncing_case >= next_min_accept)
-        next_min_accept = syncing_case + 1;
+      if (syncing_case >= next_min_accept) next_min_accept = syncing_case + 1;
 
       path = alloc_printf("%s/%s", qd_path, qd_ent->d_name);
 
@@ -539,8 +560,10 @@ void sync_fuzzers(char** argv) {
       fd = open(path, O_RDONLY);
 
       if (fd < 0) {
-         ck_free(path);
-         continue;
+
+        ck_free(path);
+        continue;
+
       }
 
       if (fstat(fd, &st)) PFATAL("fstat() failed");
@@ -584,14 +607,13 @@ void sync_fuzzers(char** argv) {
     closedir(qd);
     ck_free(qd_path);
     ck_free(qd_synced_path);
-    
-  }  
+
+  }
 
   closedir(sd);
 
 }
 
-
 /* Trim all new test cases to save cycles when doing deterministic checks. The
    trimmer uses power-of-two increments somewhere between 1/16 and 1/1024 of
    file size, to keep the stage short and sweet. */
@@ -599,8 +621,7 @@ void sync_fuzzers(char** argv) {
 u8 trim_case(char** argv, struct queue_entry* q, u8* in_buf) {
 
 #ifdef USE_PYTHON
-  if (py_functions[PY_FUNC_TRIM])
-    return trim_case_python(argv, q, in_buf);
+  if (py_functions[PY_FUNC_TRIM]) return trim_case_python(argv, q, in_buf);
 #endif
 
   static u8 tmp[64];
@@ -664,9 +685,9 @@ u8 trim_case(char** argv, struct queue_entry* q, u8* in_buf) {
         u32 move_tail = q->len - remove_pos - trim_avail;
 
         q->len -= trim_avail;
-        len_p2  = next_p2(q->len);
+        len_p2 = next_p2(q->len);
 
-        memmove(in_buf + remove_pos, in_buf + remove_pos + trim_avail, 
+        memmove(in_buf + remove_pos, in_buf + remove_pos + trim_avail,
                 move_tail);
 
         /* Let's save a clean trace, which will be needed by
@@ -679,7 +700,9 @@ u8 trim_case(char** argv, struct queue_entry* q, u8* in_buf) {
 
         }
 
-      } else remove_pos += remove_len;
+      } else
+
+        remove_pos += remove_len;
 
       /* Since this can be slow, update the screen every now and then. */
 
@@ -699,7 +722,7 @@ u8 trim_case(char** argv, struct queue_entry* q, u8* in_buf) {
 
     s32 fd;
 
-    unlink(q->fname); /* ignore errors */
+    unlink(q->fname);                                      /* ignore errors */
 
     fd = open(q->fname, O_WRONLY | O_CREAT | O_EXCL, 0600);
 
@@ -720,7 +743,6 @@ abort_trimming:
 
 }
 
-
 /* Write a modified test case, run program, process results. Handle
    error conditions, returning 1 if it's time to bail out. This is
    a helper function for fuzz_one(). */
@@ -745,20 +767,24 @@ u8 common_fuzz_stuff(char** argv, u8* out_buf, u32 len) {
   if (fault == FAULT_TMOUT) {
 
     if (subseq_tmouts++ > TMOUT_LIMIT) {
+
       ++cur_skipped_paths;
       return 1;
+
     }
 
-  } else subseq_tmouts = 0;
+  } else
+
+    subseq_tmouts = 0;
 
   /* Users can hit us with SIGUSR1 to request the current input
      to be abandoned. */
 
   if (skip_requested) {
 
-     skip_requested = 0;
-     ++cur_skipped_paths;
-     return 1;
+    skip_requested = 0;
+    ++cur_skipped_paths;
+    return 1;
 
   }
 
diff --git a/src/afl-fuzz-stats.c b/src/afl-fuzz-stats.c
index 5dbd59ac..3614599d 100644
--- a/src/afl-fuzz-stats.c
+++ b/src/afl-fuzz-stats.c
@@ -26,11 +26,11 @@
 
 void write_stats_file(double bitmap_cvg, double stability, double eps) {
 
-  static double last_bcvg, last_stab, last_eps;
+  static double        last_bcvg, last_stab, last_eps;
   static struct rusage usage;
 
-  u8* fn = alloc_printf("%s/fuzzer_stats", out_dir);
-  s32 fd;
+  u8*   fn = alloc_printf("%s/fuzzer_stats", out_dir);
+  s32   fd;
   FILE* f;
 
   fd = open(fn, O_WRONLY | O_CREAT | O_TRUNC, 0600);
@@ -47,66 +47,74 @@ void write_stats_file(double bitmap_cvg, double stability, double eps) {
      where exec/sec stats and such are not readily available. */
 
   if (!bitmap_cvg && !stability && !eps) {
+
     bitmap_cvg = last_bcvg;
-    stability  = last_stab;
-    eps        = last_eps;
+    stability = last_stab;
+    eps = last_eps;
+
   } else {
+
     last_bcvg = bitmap_cvg;
     last_stab = stability;
-    last_eps  = eps;
+    last_eps = eps;
+
   }
 
-  fprintf(f, "start_time        : %llu\n"
-             "last_update       : %llu\n"
-             "fuzzer_pid        : %d\n"
-             "cycles_done       : %llu\n"
-             "execs_done        : %llu\n"
-             "execs_per_sec     : %0.02f\n"
-             "paths_total       : %u\n"
-             "paths_favored     : %u\n"
-             "paths_found       : %u\n"
-             "paths_imported    : %u\n"
-             "max_depth         : %u\n"
-             "cur_path          : %u\n" /* Must match find_start_position() */
-             "pending_favs      : %u\n"
-             "pending_total     : %u\n"
-             "variable_paths    : %u\n"
-             "stability         : %0.02f%%\n"
-             "bitmap_cvg        : %0.02f%%\n"
-             "unique_crashes    : %llu\n"
-             "unique_hangs      : %llu\n"
-             "last_path         : %llu\n"
-             "last_crash        : %llu\n"
-             "last_hang         : %llu\n"
-             "execs_since_crash : %llu\n"
-             "exec_timeout      : %u\n"
-             "slowest_exec_ms   : %llu\n"
-             "peak_rss_mb       : %lu\n"
-             "afl_banner        : %s\n"
-             "afl_version       : " VERSION "\n"
-             "target_mode       : %s%s%s%s%s%s%s%s\n"
-             "command_line      : %s\n",
-             start_time / 1000, get_cur_time() / 1000, getpid(),
-             queue_cycle ? (queue_cycle - 1) : 0, total_execs, eps,
-             queued_paths, queued_favored, queued_discovered, queued_imported,
-             max_depth, current_entry, pending_favored, pending_not_fuzzed,
-             queued_variable, stability, bitmap_cvg, unique_crashes,
-             unique_hangs, last_path_time / 1000, last_crash_time / 1000,
-             last_hang_time / 1000, total_execs - last_crash_execs,
-             exec_tmout, slowest_exec_ms, (unsigned long int)usage.ru_maxrss, use_banner,
-             unicorn_mode ? "unicorn" : "", qemu_mode ? "qemu " : "", dumb_mode ? " dumb " : "",
-             no_forkserver ? "no_forksrv " : "", crash_mode ? "crash " : "",
-             persistent_mode ? "persistent " : "", deferred_mode ? "deferred " : "",
-             (unicorn_mode || qemu_mode || dumb_mode || no_forkserver || crash_mode ||
-              persistent_mode || deferred_mode) ? "" : "default",
-             orig_cmdline);
-             /* ignore errors */
+  fprintf(f,
+          "start_time        : %llu\n"
+          "last_update       : %llu\n"
+          "fuzzer_pid        : %d\n"
+          "cycles_done       : %llu\n"
+          "execs_done        : %llu\n"
+          "execs_per_sec     : %0.02f\n"
+          "paths_total       : %u\n"
+          "paths_favored     : %u\n"
+          "paths_found       : %u\n"
+          "paths_imported    : %u\n"
+          "max_depth         : %u\n"
+          "cur_path          : %u\n"    /* Must match find_start_position() */
+          "pending_favs      : %u\n"
+          "pending_total     : %u\n"
+          "variable_paths    : %u\n"
+          "stability         : %0.02f%%\n"
+          "bitmap_cvg        : %0.02f%%\n"
+          "unique_crashes    : %llu\n"
+          "unique_hangs      : %llu\n"
+          "last_path         : %llu\n"
+          "last_crash        : %llu\n"
+          "last_hang         : %llu\n"
+          "execs_since_crash : %llu\n"
+          "exec_timeout      : %u\n"
+          "slowest_exec_ms   : %llu\n"
+          "peak_rss_mb       : %lu\n"
+          "afl_banner        : %s\n"
+          "afl_version       : " VERSION
+          "\n"
+          "target_mode       : %s%s%s%s%s%s%s%s\n"
+          "command_line      : %s\n",
+          start_time / 1000, get_cur_time() / 1000, getpid(),
+          queue_cycle ? (queue_cycle - 1) : 0, total_execs, eps, queued_paths,
+          queued_favored, queued_discovered, queued_imported, max_depth,
+          current_entry, pending_favored, pending_not_fuzzed, queued_variable,
+          stability, bitmap_cvg, unique_crashes, unique_hangs,
+          last_path_time / 1000, last_crash_time / 1000, last_hang_time / 1000,
+          total_execs - last_crash_execs, exec_tmout, slowest_exec_ms,
+          (unsigned long int)usage.ru_maxrss, use_banner,
+          unicorn_mode ? "unicorn" : "", qemu_mode ? "qemu " : "",
+          dumb_mode ? " dumb " : "", no_forkserver ? "no_forksrv " : "",
+          crash_mode ? "crash " : "", persistent_mode ? "persistent " : "",
+          deferred_mode ? "deferred " : "",
+          (unicorn_mode || qemu_mode || dumb_mode || no_forkserver ||
+           crash_mode || persistent_mode || deferred_mode)
+              ? ""
+              : "default",
+          orig_cmdline);
+  /* ignore errors */
 
   fclose(f);
 
 }
 
-
 /* Update the plot file if there is a reason to. */
 
 void maybe_update_plot_file(double bitmap_cvg, double eps) {
@@ -114,19 +122,20 @@ void maybe_update_plot_file(double bitmap_cvg, double eps) {
   static u32 prev_qp, prev_pf, prev_pnf, prev_ce, prev_md;
   static u64 prev_qc, prev_uc, prev_uh;
 
-  if (prev_qp == queued_paths && prev_pf == pending_favored && 
+  if (prev_qp == queued_paths && prev_pf == pending_favored &&
       prev_pnf == pending_not_fuzzed && prev_ce == current_entry &&
       prev_qc == queue_cycle && prev_uc == unique_crashes &&
-      prev_uh == unique_hangs && prev_md == max_depth) return;
+      prev_uh == unique_hangs && prev_md == max_depth)
+    return;
 
-  prev_qp  = queued_paths;
-  prev_pf  = pending_favored;
+  prev_qp = queued_paths;
+  prev_pf = pending_favored;
   prev_pnf = pending_not_fuzzed;
-  prev_ce  = current_entry;
-  prev_qc  = queue_cycle;
-  prev_uc  = unique_crashes;
-  prev_uh  = unique_hangs;
-  prev_md  = max_depth;
+  prev_ce = current_entry;
+  prev_qc = queue_cycle;
+  prev_uc = unique_crashes;
+  prev_uh = unique_hangs;
+  prev_md = max_depth;
 
   /* Fields in the file:
 
@@ -134,17 +143,16 @@ void maybe_update_plot_file(double bitmap_cvg, double eps) {
      favored_not_fuzzed, unique_crashes, unique_hangs, max_depth,
      execs_per_sec */
 
-  fprintf(plot_file, 
+  fprintf(plot_file,
           "%llu, %llu, %u, %u, %u, %u, %0.02f%%, %llu, %llu, %u, %0.02f\n",
           get_cur_time() / 1000, queue_cycle - 1, current_entry, queued_paths,
           pending_not_fuzzed, pending_favored, bitmap_cvg, unique_crashes,
-          unique_hangs, max_depth, eps); /* ignore errors */
+          unique_hangs, max_depth, eps);                   /* ignore errors */
 
   fflush(plot_file);
 
 }
 
-
 /* Check terminal dimensions after resize. */
 
 static void check_term_size(void) {
@@ -160,15 +168,14 @@ static void check_term_size(void) {
 
 }
 
-
 /* A spiffy retro stats screen! This is called every stats_update_freq
    execve() calls, plus in several other circumstances. */
 
 void show_stats(void) {
 
-  static u64 last_stats_ms, last_plot_ms, last_ms, last_execs;
+  static u64    last_stats_ms, last_plot_ms, last_ms, last_execs;
   static double avg_exec;
-  double t_byte_ratio, stab_ratio;
+  double        t_byte_ratio, stab_ratio;
 
   u64 cur_ms;
   u32 t_bytes, t_bits;
@@ -194,14 +201,13 @@ void show_stats(void) {
 
   } else {
 
-    double cur_avg = ((double)(total_execs - last_execs)) * 1000 /
-                     (cur_ms - last_ms);
+    double cur_avg =
+        ((double)(total_execs - last_execs)) * 1000 / (cur_ms - last_ms);
 
     /* If there is a dramatic (5x+) jump in speed, reset the indicator
        more quickly. */
 
-    if (cur_avg * 5 < avg_exec || cur_avg / 5 > avg_exec)
-      avg_exec = cur_avg;
+    if (cur_avg * 5 < avg_exec || cur_avg / 5 > avg_exec) avg_exec = cur_avg;
 
     avg_exec = avg_exec * (1.0 - 1.0 / AVG_SMOOTHING) +
                cur_avg * (1.0 / AVG_SMOOTHING);
@@ -249,7 +255,8 @@ void show_stats(void) {
   /* Honor AFL_EXIT_WHEN_DONE and AFL_BENCH_UNTIL_CRASH. */
 
   if (!dumb_mode && cycles_wo_finds > 100 && !pending_not_fuzzed &&
-      getenv("AFL_EXIT_WHEN_DONE")) stop_soon = 2;
+      getenv("AFL_EXIT_WHEN_DONE"))
+    stop_soon = 2;
 
   if (total_crashes && getenv("AFL_BENCH_UNTIL_CRASH")) stop_soon = 2;
 
@@ -276,7 +283,8 @@ void show_stats(void) {
 
   if (term_too_small) {
 
-    SAYF(cBRI "Your terminal is too small to display the UI.\n"
+    SAYF(cBRI
+         "Your terminal is too small to display the UI.\n"
          "Please resize terminal window to at least 79x24.\n" cRST);
 
     return;
@@ -285,38 +293,41 @@ void show_stats(void) {
 
   /* Let's start by drawing a centered banner. */
 
-  banner_len = (crash_mode ? 24 : 22) + strlen(VERSION) + strlen(use_banner) + strlen(power_name) + 3 + 5;
+  banner_len = (crash_mode ? 24 : 22) + strlen(VERSION) + strlen(use_banner) +
+               strlen(power_name) + 3 + 5;
   banner_pad = (79 - banner_len) / 2;
   memset(tmp, ' ', banner_pad);
 
 #ifdef HAVE_AFFINITY
-  sprintf(tmp + banner_pad, "%s " cLCY VERSION cLGN
-          " (%s) " cPIN "[%s]" cBLU " {%d}",  crash_mode ? cPIN "peruvian were-rabbit" :
-          cYEL "american fuzzy lop", use_banner, power_name, cpu_aff);
+  sprintf(tmp + banner_pad,
+          "%s " cLCY VERSION cLGN " (%s) " cPIN "[%s]" cBLU " {%d}",
+          crash_mode ? cPIN "peruvian were-rabbit" : cYEL "american fuzzy lop",
+          use_banner, power_name, cpu_aff);
 #else
-  sprintf(tmp + banner_pad, "%s " cLCY VERSION cLGN
-          " (%s) " cPIN "[%s]",  crash_mode ? cPIN "peruvian were-rabbit" :
-          cYEL "american fuzzy lop", use_banner, power_name);
+  sprintf(tmp + banner_pad, "%s " cLCY VERSION cLGN " (%s) " cPIN "[%s]",
+          crash_mode ? cPIN "peruvian were-rabbit" : cYEL "american fuzzy lop",
+          use_banner, power_name);
 #endif /* HAVE_AFFINITY */
 
   SAYF("\n%s\n", tmp);
 
   /* "Handy" shortcuts for drawing boxes... */
 
-#define bSTG    bSTART cGRA
-#define bH2     bH bH
-#define bH5     bH2 bH2 bH
-#define bH10    bH5 bH5
-#define bH20    bH10 bH10
-#define bH30    bH20 bH10
-#define SP5     "     "
-#define SP10    SP5 SP5
-#define SP20    SP10 SP10
+#define bSTG bSTART cGRA
+#define bH2 bH bH
+#define bH5 bH2 bH2 bH
+#define bH10 bH5 bH5
+#define bH20 bH10 bH10
+#define bH30 bH20 bH10
+#define SP5 "     "
+#define SP10 SP5 SP5
+#define SP20 SP10 SP10
 
   /* Lord, forgive me this. */
 
-  SAYF(SET_G1 bSTG bLT bH bSTOP cCYA " process timing " bSTG bH30 bH5 bH bHB
-       bH bSTOP cCYA " overall results " bSTG bH2 bH2 bRT "\n");
+  SAYF(SET_G1 bSTG bLT bH bSTOP cCYA
+       " process timing " bSTG bH30 bH5 bH bHB bH bSTOP cCYA
+       " overall results " bSTG bH2 bH2 bRT "\n");
 
   if (dumb_mode) {
 
@@ -327,29 +338,34 @@ void show_stats(void) {
     u64 min_wo_finds = (cur_ms - last_path_time) / 1000 / 60;
 
     /* First queue cycle: don't stop now! */
-    if (queue_cycle == 1 || min_wo_finds < 15) strcpy(tmp, cMGN); else
+    if (queue_cycle == 1 || min_wo_finds < 15)
+      strcpy(tmp, cMGN);
+    else
 
-    /* Subsequent cycles, but we're still making finds. */
-    if (cycles_wo_finds < 25 || min_wo_finds < 30) strcpy(tmp, cYEL); else
+        /* Subsequent cycles, but we're still making finds. */
+        if (cycles_wo_finds < 25 || min_wo_finds < 30)
+      strcpy(tmp, cYEL);
+    else
 
-    /* No finds for a long time and no test cases to try. */
-    if (cycles_wo_finds > 100 && !pending_not_fuzzed && min_wo_finds > 120)
+        /* No finds for a long time and no test cases to try. */
+        if (cycles_wo_finds > 100 && !pending_not_fuzzed && min_wo_finds > 120)
       strcpy(tmp, cLGN);
 
     /* Default: cautiously OK to stop? */
-    else strcpy(tmp, cLBL);
+    else
+      strcpy(tmp, cLBL);
 
   }
 
   SAYF(bV bSTOP "        run time : " cRST "%-33s " bSTG bV bSTOP
-       "  cycles done : %s%-5s " bSTG bV "\n",
+                "  cycles done : %s%-5s " bSTG              bV "\n",
        DTD(cur_ms, start_time), tmp, DI(queue_cycle - 1));
 
   /* We want to warn people about not seeing new paths after a full cycle,
      except when resuming fuzzing or running in non-instrumented mode. */
 
   if (!dumb_mode && (last_path_time || resuming_fuzz || queue_cycle == 1 ||
-      in_bitmap || crash_mode)) {
+                     in_bitmap || crash_mode)) {
 
     SAYF(bV bSTOP "   last new path : " cRST "%-33s ",
          DTD(cur_ms, last_path_time));
@@ -359,12 +375,12 @@ void show_stats(void) {
     if (dumb_mode)
 
       SAYF(bV bSTOP "   last new path : " cPIN "n/a" cRST
-           " (non-instrumented mode)       ");
+                    " (non-instrumented mode)       ");
 
-     else
+    else
 
       SAYF(bV bSTOP "   last new path : " cRST "none yet " cLRD
-           "(odd, check syntax!)     ");
+                    "(odd, check syntax!)     ");
 
   }
 
@@ -378,18 +394,18 @@ void show_stats(void) {
           (unique_crashes >= KEEP_UNIQUE_CRASH) ? "+" : "");
 
   SAYF(bV bSTOP " last uniq crash : " cRST "%-33s " bSTG bV bSTOP
-       " uniq crashes : %s%-6s" bSTG bV "\n",
-       DTD(cur_ms, last_crash_time), unique_crashes ? cLRD : cRST,
-       tmp);
+                " uniq crashes : %s%-6s" bSTG               bV "\n",
+       DTD(cur_ms, last_crash_time), unique_crashes ? cLRD : cRST, tmp);
 
   sprintf(tmp, "%s%s", DI(unique_hangs),
-         (unique_hangs >= KEEP_UNIQUE_HANG) ? "+" : "");
+          (unique_hangs >= KEEP_UNIQUE_HANG) ? "+" : "");
 
   SAYF(bV bSTOP "  last uniq hang : " cRST "%-33s " bSTG bV bSTOP
-       "   uniq hangs : " cRST "%-6s" bSTG bV "\n",
+                "   uniq hangs : " cRST "%-6s" bSTG         bV "\n",
        DTD(cur_ms, last_hang_time), tmp);
 
-  SAYF(bVR bH bSTOP cCYA " cycle progress " bSTG bH10 bH5 bH2 bH2 bHB bH bSTOP cCYA
+  SAYF(bVR bH bSTOP            cCYA
+       " cycle progress " bSTG bH10 bH5 bH2 bH2 bHB bH bSTOP cCYA
        " map coverage " bSTG bH bHT bH20 bH2 bVL "\n");
 
   /* This gets funny because we want to print several variable-length variables
@@ -402,23 +418,24 @@ void show_stats(void) {
 
   SAYF(bV bSTOP "  now processing : " cRST "%-16s " bSTG bV bSTOP, tmp);
 
-  sprintf(tmp, "%0.02f%% / %0.02f%%", ((double)queue_cur->bitmap_size) *
-          100 / MAP_SIZE, t_byte_ratio);
+  sprintf(tmp, "%0.02f%% / %0.02f%%",
+          ((double)queue_cur->bitmap_size) * 100 / MAP_SIZE, t_byte_ratio);
 
-  SAYF("    map density : %s%-21s" bSTG bV "\n", t_byte_ratio > 70 ? cLRD :
-       ((t_bytes < 200 && !dumb_mode) ? cPIN : cRST), tmp);
+  SAYF("    map density : %s%-21s" bSTG bV "\n",
+       t_byte_ratio > 70 ? cLRD : ((t_bytes < 200 && !dumb_mode) ? cPIN : cRST),
+       tmp);
 
   sprintf(tmp, "%s (%0.02f%%)", DI(cur_skipped_paths),
           ((double)cur_skipped_paths * 100) / queued_paths);
 
   SAYF(bV bSTOP " paths timed out : " cRST "%-16s " bSTG bV, tmp);
 
-  sprintf(tmp, "%0.02f bits/tuple",
-          t_bytes ? (((double)t_bits) / t_bytes) : 0);
+  sprintf(tmp, "%0.02f bits/tuple", t_bytes ? (((double)t_bits) / t_bytes) : 0);
 
   SAYF(bSTOP " count coverage : " cRST "%-21s" bSTG bV "\n", tmp);
 
-  SAYF(bVR bH bSTOP cCYA " stage progress " bSTG bH10 bH5 bH2 bH2 bX bH bSTOP cCYA
+  SAYF(bVR bH bSTOP            cCYA
+       " stage progress " bSTG bH10 bH5 bH2 bH2 bX bH bSTOP cCYA
        " findings in depth " bSTG bH10 bH5 bH2 bH2 bVL "\n");
 
   sprintf(tmp, "%s (%0.02f%%)", DI(queued_favored),
@@ -427,7 +444,8 @@ void show_stats(void) {
   /* Yeah... it's still going on... halp? */
 
   SAYF(bV bSTOP "  now trying : " cRST "%-20s " bSTG bV bSTOP
-       " favored paths : " cRST "%-22s" bSTG bV "\n", stage_name, tmp);
+                " favored paths : " cRST "%-22s" bSTG   bV "\n",
+       stage_name, tmp);
 
   if (!stage_max) {
 
@@ -453,14 +471,14 @@ void show_stats(void) {
   if (crash_mode) {
 
     SAYF(bV bSTOP " total execs : " cRST "%-20s " bSTG bV bSTOP
-         "   new crashes : %s%-22s" bSTG bV "\n", DI(total_execs),
-         unique_crashes ? cLRD : cRST, tmp);
+                  "   new crashes : %s%-22s" bSTG         bV "\n",
+         DI(total_execs), unique_crashes ? cLRD : cRST, tmp);
 
   } else {
 
     SAYF(bV bSTOP " total execs : " cRST "%-20s " bSTG bV bSTOP
-         " total crashes : %s%-22s" bSTG bV "\n", DI(total_execs),
-         unique_crashes ? cLRD : cRST, tmp);
+                  " total crashes : %s%-22s" bSTG         bV "\n",
+         DI(total_execs), unique_crashes ? cLRD : cRST, tmp);
 
   }
 
@@ -468,8 +486,8 @@ void show_stats(void) {
 
   if (avg_exec < 100) {
 
-    sprintf(tmp, "%s/sec (%s)", DF(avg_exec), avg_exec < 20 ?
-            "zzzz..." : "slow!");
+    sprintf(tmp, "%s/sec (%s)", DF(avg_exec),
+            avg_exec < 20 ? "zzzz..." : "slow!");
 
     SAYF(bV bSTOP "  exec speed : " cLRD "%-20s ", tmp);
 
@@ -483,12 +501,13 @@ void show_stats(void) {
   sprintf(tmp, "%s (%s%s unique)", DI(total_tmouts), DI(unique_tmouts),
           (unique_hangs >= KEEP_UNIQUE_HANG) ? "+" : "");
 
-  SAYF (bSTG bV bSTOP "  total tmouts : " cRST "%-22s" bSTG bV "\n", tmp);
+  SAYF(bSTG bV bSTOP "  total tmouts : " cRST "%-22s" bSTG bV "\n", tmp);
 
   /* Aaaalmost there... hold on! */
 
-  SAYF(bVR bH cCYA bSTOP " fuzzing strategy yields " bSTG bH10 bHT bH10
-       bH5 bHB bH bSTOP cCYA " path geometry " bSTG bH5 bH2 bVL "\n");
+  SAYF(bVR bH cCYA                      bSTOP
+       " fuzzing strategy yields " bSTG bH10 bHT bH10 bH5 bHB bH bSTOP cCYA
+       " path geometry " bSTG bH5 bH2 bVL "\n");
 
   if (skip_deterministic) {
 
@@ -496,66 +515,77 @@ void show_stats(void) {
 
   } else {
 
-    sprintf(tmp, "%s/%s, %s/%s, %s/%s",
-            DI(stage_finds[STAGE_FLIP1]), DI(stage_cycles[STAGE_FLIP1]),
-            DI(stage_finds[STAGE_FLIP2]), DI(stage_cycles[STAGE_FLIP2]),
-            DI(stage_finds[STAGE_FLIP4]), DI(stage_cycles[STAGE_FLIP4]));
+    sprintf(tmp, "%s/%s, %s/%s, %s/%s", DI(stage_finds[STAGE_FLIP1]),
+            DI(stage_cycles[STAGE_FLIP1]), DI(stage_finds[STAGE_FLIP2]),
+            DI(stage_cycles[STAGE_FLIP2]), DI(stage_finds[STAGE_FLIP4]),
+            DI(stage_cycles[STAGE_FLIP4]));
 
   }
 
-  SAYF(bV bSTOP "   bit flips : " cRST "%-36s " bSTG bV bSTOP "    levels : "
-       cRST "%-10s" bSTG bV "\n", tmp, DI(max_depth));
+  SAYF(bV bSTOP "   bit flips : " cRST "%-36s " bSTG bV bSTOP
+                "    levels : " cRST "%-10s" bSTG       bV "\n",
+       tmp, DI(max_depth));
 
   if (!skip_deterministic)
-    sprintf(tmp, "%s/%s, %s/%s, %s/%s",
-            DI(stage_finds[STAGE_FLIP8]), DI(stage_cycles[STAGE_FLIP8]),
-            DI(stage_finds[STAGE_FLIP16]), DI(stage_cycles[STAGE_FLIP16]),
-            DI(stage_finds[STAGE_FLIP32]), DI(stage_cycles[STAGE_FLIP32]));
+    sprintf(tmp, "%s/%s, %s/%s, %s/%s", DI(stage_finds[STAGE_FLIP8]),
+            DI(stage_cycles[STAGE_FLIP8]), DI(stage_finds[STAGE_FLIP16]),
+            DI(stage_cycles[STAGE_FLIP16]), DI(stage_finds[STAGE_FLIP32]),
+            DI(stage_cycles[STAGE_FLIP32]));
 
-  SAYF(bV bSTOP "  byte flips : " cRST "%-36s " bSTG bV bSTOP "   pending : "
-       cRST "%-10s" bSTG bV "\n", tmp, DI(pending_not_fuzzed));
+  SAYF(bV bSTOP "  byte flips : " cRST "%-36s " bSTG bV bSTOP
+                "   pending : " cRST "%-10s" bSTG       bV "\n",
+       tmp, DI(pending_not_fuzzed));
 
   if (!skip_deterministic)
-    sprintf(tmp, "%s/%s, %s/%s, %s/%s",
-            DI(stage_finds[STAGE_ARITH8]), DI(stage_cycles[STAGE_ARITH8]),
-            DI(stage_finds[STAGE_ARITH16]), DI(stage_cycles[STAGE_ARITH16]),
-            DI(stage_finds[STAGE_ARITH32]), DI(stage_cycles[STAGE_ARITH32]));
+    sprintf(tmp, "%s/%s, %s/%s, %s/%s", DI(stage_finds[STAGE_ARITH8]),
+            DI(stage_cycles[STAGE_ARITH8]), DI(stage_finds[STAGE_ARITH16]),
+            DI(stage_cycles[STAGE_ARITH16]), DI(stage_finds[STAGE_ARITH32]),
+            DI(stage_cycles[STAGE_ARITH32]));
 
-  SAYF(bV bSTOP " arithmetics : " cRST "%-36s " bSTG bV bSTOP "  pend fav : "
-       cRST "%-10s" bSTG bV "\n", tmp, DI(pending_favored));
+  SAYF(bV bSTOP " arithmetics : " cRST "%-36s " bSTG bV bSTOP
+                "  pend fav : " cRST "%-10s" bSTG       bV "\n",
+       tmp, DI(pending_favored));
 
   if (!skip_deterministic)
-    sprintf(tmp, "%s/%s, %s/%s, %s/%s",
-            DI(stage_finds[STAGE_INTEREST8]), DI(stage_cycles[STAGE_INTEREST8]),
-            DI(stage_finds[STAGE_INTEREST16]), DI(stage_cycles[STAGE_INTEREST16]),
-            DI(stage_finds[STAGE_INTEREST32]), DI(stage_cycles[STAGE_INTEREST32]));
+    sprintf(
+        tmp, "%s/%s, %s/%s, %s/%s", DI(stage_finds[STAGE_INTEREST8]),
+        DI(stage_cycles[STAGE_INTEREST8]), DI(stage_finds[STAGE_INTEREST16]),
+        DI(stage_cycles[STAGE_INTEREST16]), DI(stage_finds[STAGE_INTEREST32]),
+        DI(stage_cycles[STAGE_INTEREST32]));
 
-  SAYF(bV bSTOP "  known ints : " cRST "%-36s " bSTG bV bSTOP " own finds : "
-       cRST "%-10s" bSTG bV "\n", tmp, DI(queued_discovered));
+  SAYF(bV bSTOP "  known ints : " cRST "%-36s " bSTG bV bSTOP
+                " own finds : " cRST "%-10s" bSTG       bV "\n",
+       tmp, DI(queued_discovered));
 
   if (!skip_deterministic)
-    sprintf(tmp, "%s/%s, %s/%s, %s/%s",
-            DI(stage_finds[STAGE_EXTRAS_UO]), DI(stage_cycles[STAGE_EXTRAS_UO]),
-            DI(stage_finds[STAGE_EXTRAS_UI]), DI(stage_cycles[STAGE_EXTRAS_UI]),
-            DI(stage_finds[STAGE_EXTRAS_AO]), DI(stage_cycles[STAGE_EXTRAS_AO]));
+    sprintf(tmp, "%s/%s, %s/%s, %s/%s", DI(stage_finds[STAGE_EXTRAS_UO]),
+            DI(stage_cycles[STAGE_EXTRAS_UO]), DI(stage_finds[STAGE_EXTRAS_UI]),
+            DI(stage_cycles[STAGE_EXTRAS_UI]), DI(stage_finds[STAGE_EXTRAS_AO]),
+            DI(stage_cycles[STAGE_EXTRAS_AO]));
 
   SAYF(bV bSTOP "  dictionary : " cRST "%-36s " bSTG bV bSTOP
-       "  imported : " cRST "%-10s" bSTG bV "\n", tmp,
-       sync_id ? DI(queued_imported) : (u8*)"n/a");
+                "  imported : " cRST "%-10s" bSTG       bV "\n",
+       tmp, sync_id ? DI(queued_imported) : (u8*)"n/a");
 
-  sprintf(tmp, "%s/%s, %s/%s, %s/%s",
-          DI(stage_finds[STAGE_HAVOC]), DI(stage_cycles[STAGE_HAVOC]),
-          DI(stage_finds[STAGE_SPLICE]), DI(stage_cycles[STAGE_SPLICE]),
-          DI(stage_finds[STAGE_PYTHON]), DI(stage_cycles[STAGE_PYTHON]));
+  sprintf(tmp, "%s/%s, %s/%s, %s/%s", DI(stage_finds[STAGE_HAVOC]),
+          DI(stage_cycles[STAGE_HAVOC]), DI(stage_finds[STAGE_SPLICE]),
+          DI(stage_cycles[STAGE_SPLICE]), DI(stage_finds[STAGE_PYTHON]),
+          DI(stage_cycles[STAGE_PYTHON]));
 
   SAYF(bV bSTOP "       havoc : " cRST "%-36s " bSTG bV bSTOP, tmp);
 
-  if (t_bytes) sprintf(tmp, "%0.02f%%", stab_ratio);
-    else strcpy(tmp, "n/a");
-
-  SAYF(" stability : %s%-10s" bSTG bV "\n", (stab_ratio < 85 && var_byte_count > 40)
-       ? cLRD : ((queued_variable && (!persistent_mode || var_byte_count > 20))
-       ? cMGN : cRST), tmp);
+  if (t_bytes)
+    sprintf(tmp, "%0.02f%%", stab_ratio);
+  else
+    strcpy(tmp, "n/a");
+
+  SAYF(" stability : %s%-10s" bSTG bV "\n",
+       (stab_ratio < 85 && var_byte_count > 40)
+           ? cLRD
+           : ((queued_variable && (!persistent_mode || var_byte_count > 20))
+                  ? cMGN
+                  : cRST),
+       tmp);
 
   if (!bytes_trim_out) {
 
@@ -582,18 +612,26 @@ void show_stats(void) {
 
     sprintf(tmp2, "%0.02f%%",
             ((double)(blocks_eff_total - blocks_eff_select)) * 100 /
-            blocks_eff_total);
+                blocks_eff_total);
 
     strcat(tmp, tmp2);
 
   }
+
   if (custom_mutator) {
-    sprintf(tmp, "%s/%s", DI(stage_finds[STAGE_CUSTOM_MUTATOR]), DI(stage_cycles[STAGE_CUSTOM_MUTATOR]));
-    SAYF(bV bSTOP " custom mut. : " cRST "%-36s " bSTG bVR bH20 bH2 bH bRB "\n"
-             bLB bH30 bH20 bH2 bH bRB bSTOP cRST RESET_G1, tmp);
+
+    sprintf(tmp, "%s/%s", DI(stage_finds[STAGE_CUSTOM_MUTATOR]),
+            DI(stage_cycles[STAGE_CUSTOM_MUTATOR]));
+    SAYF(bV bSTOP " custom mut. : " cRST "%-36s " bSTG bVR bH20 bH2 bH bRB
+                  "\n" bLB bH30 bH20 bH2 bH bRB bSTOP cRST RESET_G1,
+         tmp);
+
   } else {
-    SAYF(bV bSTOP "        trim : " cRST "%-36s " bSTG bVR bH20 bH2 bH bRB "\n"
-       bLB bH30 bH20 bH2 bRB bSTOP cRST RESET_G1, tmp);
+
+    SAYF(bV bSTOP "        trim : " cRST "%-36s " bSTG bVR bH20 bH2 bH bRB
+                  "\n" bLB bH30 bH20 bH2 bRB bSTOP cRST RESET_G1,
+         tmp);
+
   }
 
   /* Provide some CPU utilization stats. */
@@ -601,7 +639,7 @@ void show_stats(void) {
   if (cpu_core_count) {
 
     double cur_runnable = get_runnable_processes();
-    u32 cur_utilization = cur_runnable * 100 / cpu_core_count;
+    u32    cur_utilization = cur_runnable * 100 / cpu_core_count;
 
     u8* cpu_color = cCYA;
 
@@ -618,25 +656,26 @@ void show_stats(void) {
 
     if (cpu_aff >= 0) {
 
-      SAYF(SP10 cGRA "[cpu%03u:%s%3u%%" cGRA "]\r" cRST, 
-           MIN(cpu_aff, 999), cpu_color,
-           MIN(cur_utilization, 999));
+      SAYF(SP10 cGRA "[cpu%03u:%s%3u%%" cGRA "]\r" cRST, MIN(cpu_aff, 999),
+           cpu_color, MIN(cur_utilization, 999));
 
     } else {
 
-      SAYF(SP10 cGRA "   [cpu:%s%3u%%" cGRA "]\r" cRST,
-           cpu_color, MIN(cur_utilization, 999));
- 
-   }
+      SAYF(SP10 cGRA "   [cpu:%s%3u%%" cGRA "]\r" cRST, cpu_color,
+           MIN(cur_utilization, 999));
+
+    }
 
 #else
 
-    SAYF(SP10 cGRA "   [cpu:%s%3u%%" cGRA "]\r" cRST,
-         cpu_color, MIN(cur_utilization, 999));
+    SAYF(SP10 cGRA "   [cpu:%s%3u%%" cGRA "]\r" cRST, cpu_color,
+         MIN(cur_utilization, 999));
 
 #endif /* ^HAVE_AFFINITY */
 
-  } else SAYF("\r");
+  } else
+
+    SAYF("\r");
 
   /* Hallelujah! */
 
@@ -644,7 +683,6 @@ void show_stats(void) {
 
 }
 
-
 /* Display quick statistics at the end of processing the input directory,
    plus a bunch of warnings. Some calibration stuff also ended up here,
    along with several hardcoded constants. Maybe clean up eventually. */
@@ -652,10 +690,10 @@ void show_stats(void) {
 void show_init_stats(void) {
 
   struct queue_entry* q = queue;
-  u32 min_bits = 0, max_bits = 0;
-  u64 min_us = 0, max_us = 0;
-  u64 avg_us = 0;
-  u32 max_len = 0;
+  u32                 min_bits = 0, max_bits = 0;
+  u64                 min_us = 0, max_us = 0;
+  u64                 avg_us = 0;
+  u32                 max_len = 0;
 
   if (total_cal_cycles) avg_us = total_cal_us / total_cal_cycles;
 
@@ -681,9 +719,12 @@ void show_init_stats(void) {
 
   /* Let's keep things moving with slow binaries. */
 
-  if (avg_us > 50000) havoc_div = 10;     /* 0-19 execs/sec   */
-  else if (avg_us > 20000) havoc_div = 5; /* 20-49 execs/sec  */
-  else if (avg_us > 10000) havoc_div = 2; /* 50-100 execs/sec */
+  if (avg_us > 50000)
+    havoc_div = 10;                                     /* 0-19 execs/sec   */
+  else if (avg_us > 20000)
+    havoc_div = 5;                                      /* 20-49 execs/sec  */
+  else if (avg_us > 10000)
+    havoc_div = 2;                                      /* 50-100 execs/sec */
 
   if (!resuming_fuzz) {
 
@@ -698,7 +739,9 @@ void show_init_stats(void) {
       WARNF(cLRD "Some test cases look useless. Consider using a smaller set.");
 
     if (queued_paths > 100)
-      WARNF(cLRD "You probably have far too many input files! Consider trimming down.");
+      WARNF(cLRD
+            "You probably have far too many input files! Consider trimming "
+            "down.");
     else if (queued_paths > 20)
       WARNF("You have lots of input files; try starting small.");
 
@@ -706,11 +749,13 @@ void show_init_stats(void) {
 
   OKF("Here are some useful stats:\n\n"
 
-      cGRA "    Test case count : " cRST "%u favored, %u variable, %u total\n"
-      cGRA "       Bitmap range : " cRST "%u to %u bits (average: %0.02f bits)\n"
-      cGRA "        Exec timing : " cRST "%s to %s us (average: %s us)\n",
-      queued_favored, queued_variable, queued_paths, min_bits, max_bits, 
-      ((double)total_bitmap_size) / (total_bitmap_entries ? total_bitmap_entries : 1),
+      cGRA "    Test case count : " cRST
+      "%u favored, %u variable, %u total\n" cGRA "       Bitmap range : " cRST
+      "%u to %u bits (average: %0.02f bits)\n" cGRA
+      "        Exec timing : " cRST "%s to %s us (average: %s us)\n",
+      queued_favored, queued_variable, queued_paths, min_bits, max_bits,
+      ((double)total_bitmap_size) /
+          (total_bitmap_entries ? total_bitmap_entries : 1),
       DI(min_us), DI(max_us), DI(avg_us));
 
   if (!timeout_given) {
@@ -722,16 +767,19 @@ void show_init_stats(void) {
        random scheduler jitter is less likely to have any impact, and because
        our patience is wearing thin =) */
 
-    if (avg_us > 50000) exec_tmout = avg_us * 2 / 1000;
-    else if (avg_us > 10000) exec_tmout = avg_us * 3 / 1000;
-    else exec_tmout = avg_us * 5 / 1000;
+    if (avg_us > 50000)
+      exec_tmout = avg_us * 2 / 1000;
+    else if (avg_us > 10000)
+      exec_tmout = avg_us * 3 / 1000;
+    else
+      exec_tmout = avg_us * 5 / 1000;
 
     exec_tmout = MAX(exec_tmout, max_us / 1000);
     exec_tmout = (exec_tmout + EXEC_TM_ROUND) / EXEC_TM_ROUND * EXEC_TM_ROUND;
 
     if (exec_tmout > EXEC_TIMEOUT) exec_tmout = EXEC_TIMEOUT;
 
-    ACTF("No -t option specified, so I'll use exec timeout of %u ms.", 
+    ACTF("No -t option specified, so I'll use exec timeout of %u ms.",
          exec_tmout);
 
     timeout_given = 1;
diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c
index 2242dd6b..685840c6 100644
--- a/src/afl-fuzz.c
+++ b/src/afl-fuzz.c
@@ -27,53 +27,62 @@
 static void usage(u8* argv0) {
 
 #ifdef USE_PYTHON
-#define PHYTON_SUPPORT \
-       "Compiled with Python 2.7 module support, see docs/python_mutators.txt\n"
+#  define PHYTON_SUPPORT\
+  "Compiled with Python 2.7 module support, see docs/python_mutators.txt\n"
 #else
-#define PHYTON_SUPPORT ""
+#  define PHYTON_SUPPORT ""
 #endif
 
-  SAYF("\n%s [ options ] -- /path/to/fuzzed_app [ ... ]\n\n"
-
-       "Required parameters:\n"
-       "  -i dir        - input directory with test cases\n"
-       "  -o dir        - output directory for fuzzer findings\n\n"
-
-       "Execution control settings:\n"
-       "  -p schedule   - power schedules recompute a seed's performance score.\n"
-       "                  <explore (default), fast, coe, lin, quad, or exploit>\n"
-       "                  see docs/power_schedules.txt\n"
-       "  -f file       - location read by the fuzzed program (stdin)\n"
-       "  -t msec       - timeout for each run (auto-scaled, 50-%d ms)\n"
-       "  -m megs       - memory limit for child process (%d MB)\n"
-       "  -Q            - use binary-only instrumentation (QEMU mode)\n"
-       "  -U            - use Unicorn-based instrumentation (Unicorn mode)\n\n"
-       "  -L minutes    - use MOpt(imize) mode and set the limit time for entering the\n"
-       "                  pacemaker mode (minutes of no new paths, 0 = immediately).\n"
-       "                  a recommended value is 10-60. see docs/README.MOpt\n\n"
- 
-       "Fuzzing behavior settings:\n"
-       "  -d            - quick & dirty mode (skips deterministic steps)\n"
-       "  -n            - fuzz without instrumentation (dumb mode)\n"
-       "  -x dir        - optional fuzzer dictionary (see README)\n\n"
-
-       "Testing settings:\n"
-       "  -s seed       - use a fixed seed for the RNG\n"
-       "  -V seconds    - fuzz for a maximum total time of seconds then terminate\n"
-       "  -E execs      - fuzz for a maximum number of total executions then terminate\n\n"
-
-       "Other stuff:\n"
-       "  -T text       - text banner to show on the screen\n"
-       "  -M / -S id    - distributed mode (see parallel_fuzzing.txt)\n"
-       "  -B bitmap.txt - mutate a specific test case, use the out/fuzz_bitmap file\n"
-       "  -C            - crash exploration mode (the peruvian rabbit thing)\n"
-       "  -e ext        - File extension for the temporarily generated test case\n\n"
-
-       PHYTON_SUPPORT
-
-       "For additional tips, please consult %s/README\n\n",
-
-       argv0, EXEC_TIMEOUT, MEM_LIMIT, doc_path);
+  SAYF(
+      "\n%s [ options ] -- /path/to/fuzzed_app [ ... ]\n\n"
+
+      "Required parameters:\n"
+      "  -i dir        - input directory with test cases\n"
+      "  -o dir        - output directory for fuzzer findings\n\n"
+
+      "Execution control settings:\n"
+      "  -p schedule   - power schedules recompute a seed's performance "
+      "score.\n"
+      "                  <explore (default), fast, coe, lin, quad, or "
+      "exploit>\n"
+      "                  see docs/power_schedules.txt\n"
+      "  -f file       - location read by the fuzzed program (stdin)\n"
+      "  -t msec       - timeout for each run (auto-scaled, 50-%d ms)\n"
+      "  -m megs       - memory limit for child process (%d MB)\n"
+      "  -Q            - use binary-only instrumentation (QEMU mode)\n"
+      "  -U            - use Unicorn-based instrumentation (Unicorn mode)\n\n"
+      "  -L minutes    - use MOpt(imize) mode and set the limit time for "
+      "entering the\n"
+      "                  pacemaker mode (minutes of no new paths, 0 = "
+      "immediately).\n"
+      "                  a recommended value is 10-60. see docs/README.MOpt\n\n"
+
+      "Fuzzing behavior settings:\n"
+      "  -d            - quick & dirty mode (skips deterministic steps)\n"
+      "  -n            - fuzz without instrumentation (dumb mode)\n"
+      "  -x dir        - optional fuzzer dictionary (see README)\n\n"
+
+      "Testing settings:\n"
+      "  -s seed       - use a fixed seed for the RNG\n"
+      "  -V seconds    - fuzz for a maximum total time of seconds then "
+      "terminate\n"
+      "  -E execs      - fuzz for a maximum number of total executions then "
+      "terminate\n\n"
+
+      "Other stuff:\n"
+      "  -T text       - text banner to show on the screen\n"
+      "  -M / -S id    - distributed mode (see parallel_fuzzing.txt)\n"
+      "  -B bitmap.txt - mutate a specific test case, use the out/fuzz_bitmap "
+      "file\n"
+      "  -C            - crash exploration mode (the peruvian rabbit thing)\n"
+      "  -e ext        - File extension for the temporarily generated test "
+      "case\n\n"
+
+      PHYTON_SUPPORT
+
+      "For additional tips, please consult %s/README\n\n",
+
+      argv0, EXEC_TIMEOUT, MEM_LIMIT, doc_path);
 
   exit(1);
 #undef PHYTON_SUPPORT
@@ -82,65 +91,90 @@ static void usage(u8* argv0) {
 
 #ifndef AFL_LIB
 
-static int stricmp(char const *a, char const *b) {
+static int stricmp(char const* a, char const* b) {
+
   for (;; ++a, ++b) {
+
     int d;
     d = tolower(*a) - tolower(*b);
-    if (d != 0 || !*a)
-      return d;
+    if (d != 0 || !*a) return d;
+
   }
+
 }
 
 /* Main entry point */
 
 int main(int argc, char** argv) {
 
-  s32 opt;
-  u64 prev_queued = 0;
-  u32 sync_interval_cnt = 0, seek_to;
-  u8  *extras_dir = 0;
-  u8  mem_limit_given = 0;
-  u8  exit_1 = !!getenv("AFL_BENCH_JUST_ONE");
+  s32    opt;
+  u64    prev_queued = 0;
+  u32    sync_interval_cnt = 0, seek_to;
+  u8*    extras_dir = 0;
+  u8     mem_limit_given = 0;
+  u8     exit_1 = !!getenv("AFL_BENCH_JUST_ONE");
   char** use_argv;
-  s64 init_seed;
+  s64    init_seed;
 
-  struct timeval tv;
+  struct timeval  tv;
   struct timezone tz;
 
-  SAYF(cCYA "afl-fuzz" VERSION cRST " based on afl by <lcamtuf@google.com> and a big online community\n");
+  SAYF(cCYA
+       "afl-fuzz" VERSION cRST
+       " based on afl by <lcamtuf@google.com> and a big online community\n");
 
   doc_path = access(DOC_PATH, F_OK) ? "docs" : DOC_PATH;
 
   gettimeofday(&tv, &tz);
   init_seed = tv.tv_sec ^ tv.tv_usec ^ getpid();
 
-  while ((opt = getopt(argc, argv, "+i:o:f:m:t:T:dnCB:S:M:x:QUe:p:s:V:E:L:")) > 0)
+  while ((opt = getopt(argc, argv, "+i:o:f:m:t:T:dnCB:S:M:x:QUe:p:s:V:E:L:")) >
+         0)
 
     switch (opt) {
 
       case 's': {
+
         init_seed = strtoul(optarg, 0L, 10);
         fixed_seed = 1;
         break;
+
       }
 
-      case 'p': /* Power schedule */
+      case 'p':                                           /* Power schedule */
 
         if (!stricmp(optarg, "fast")) {
+
           schedule = FAST;
+
         } else if (!stricmp(optarg, "coe")) {
+
           schedule = COE;
+
         } else if (!stricmp(optarg, "exploit")) {
+
           schedule = EXPLOIT;
+
         } else if (!stricmp(optarg, "lin")) {
+
           schedule = LIN;
+
         } else if (!stricmp(optarg, "quad")) {
+
           schedule = QUAD;
-        } else if (!stricmp(optarg, "explore") || !stricmp(optarg, "default") || !stricmp(optarg, "normal") || !stricmp(optarg, "afl")) {
+
+        } else if (!stricmp(optarg, "explore") || !stricmp(optarg, "default") ||
+
+                   !stricmp(optarg, "normal") || !stricmp(optarg, "afl")) {
+
           schedule = EXPLORE;
+
         } else {
+
           FATAL("Unknown -p power schedule");
+
         }
+
         break;
 
       case 'e':
@@ -151,7 +185,7 @@ int main(int argc, char** argv) {
 
         break;
 
-      case 'i': /* input dir */
+      case 'i':                                                /* input dir */
 
         if (in_dir) FATAL("Multiple -i options not supported");
         in_dir = optarg;
@@ -160,115 +194,121 @@ int main(int argc, char** argv) {
 
         break;
 
-      case 'o': /* output dir */
+      case 'o':                                               /* output dir */
 
         if (out_dir) FATAL("Multiple -o options not supported");
         out_dir = optarg;
         break;
 
-      case 'M': { /* master sync ID */
+      case 'M': {                                         /* master sync ID */
 
-          u8* c;
+        u8* c;
 
-          if (sync_id) FATAL("Multiple -S or -M options not supported");
-          sync_id = ck_strdup(optarg);
+        if (sync_id) FATAL("Multiple -S or -M options not supported");
+        sync_id = ck_strdup(optarg);
 
-          if ((c = strchr(sync_id, ':'))) {
+        if ((c = strchr(sync_id, ':'))) {
 
-            *c = 0;
+          *c = 0;
 
-            if (sscanf(c + 1, "%u/%u", &master_id, &master_max) != 2 ||
-                !master_id || !master_max || master_id > master_max ||
-                master_max > 1000000) FATAL("Bogus master ID passed to -M");
+          if (sscanf(c + 1, "%u/%u", &master_id, &master_max) != 2 ||
+              !master_id || !master_max || master_id > master_max ||
+              master_max > 1000000)
+            FATAL("Bogus master ID passed to -M");
 
-          }
+        }
 
-          force_deterministic = 1;
+        force_deterministic = 1;
 
-        }
+      }
 
-        break;
+      break;
 
-      case 'S': 
+      case 'S':
 
         if (sync_id) FATAL("Multiple -S or -M options not supported");
         sync_id = ck_strdup(optarg);
         break;
 
-      case 'f': /* target file */
+      case 'f':                                              /* target file */
 
         if (out_file) FATAL("Multiple -f options not supported");
         out_file = optarg;
         break;
 
-      case 'x': /* dictionary */
+      case 'x':                                               /* dictionary */
 
         if (extras_dir) FATAL("Multiple -x options not supported");
         extras_dir = optarg;
         break;
 
-      case 't': { /* timeout */
+      case 't': {                                                /* timeout */
 
-          u8 suffix = 0;
+        u8 suffix = 0;
 
-          if (timeout_given) FATAL("Multiple -t options not supported");
+        if (timeout_given) FATAL("Multiple -t options not supported");
 
-          if (sscanf(optarg, "%u%c", &exec_tmout, &suffix) < 1 ||
-              optarg[0] == '-') FATAL("Bad syntax used for -t");
+        if (sscanf(optarg, "%u%c", &exec_tmout, &suffix) < 1 ||
+            optarg[0] == '-')
+          FATAL("Bad syntax used for -t");
 
-          if (exec_tmout < 5) FATAL("Dangerously low value of -t");
+        if (exec_tmout < 5) FATAL("Dangerously low value of -t");
 
-          if (suffix == '+') timeout_given = 2; else timeout_given = 1;
+        if (suffix == '+')
+          timeout_given = 2;
+        else
+          timeout_given = 1;
 
-          break;
+        break;
 
       }
 
-      case 'm': { /* mem limit */
+      case 'm': {                                              /* mem limit */
 
-          u8 suffix = 'M';
+        u8 suffix = 'M';
 
-          if (mem_limit_given) FATAL("Multiple -m options not supported");
-          mem_limit_given = 1;
+        if (mem_limit_given) FATAL("Multiple -m options not supported");
+        mem_limit_given = 1;
 
-          if (!strcmp(optarg, "none")) {
+        if (!strcmp(optarg, "none")) {
 
-            mem_limit = 0;
-            break;
+          mem_limit = 0;
+          break;
 
-          }
+        }
 
-          if (sscanf(optarg, "%llu%c", &mem_limit, &suffix) < 1 ||
-              optarg[0] == '-') FATAL("Bad syntax used for -m");
+        if (sscanf(optarg, "%llu%c", &mem_limit, &suffix) < 1 ||
+            optarg[0] == '-')
+          FATAL("Bad syntax used for -m");
 
-          switch (suffix) {
+        switch (suffix) {
 
-            case 'T': mem_limit *= 1024 * 1024; break;
-            case 'G': mem_limit *= 1024; break;
-            case 'k': mem_limit /= 1024; break;
-            case 'M': break;
+          case 'T': mem_limit *= 1024 * 1024; break;
+          case 'G': mem_limit *= 1024; break;
+          case 'k': mem_limit /= 1024; break;
+          case 'M': break;
 
-            default:  FATAL("Unsupported suffix or bad syntax for -m");
+          default: FATAL("Unsupported suffix or bad syntax for -m");
 
-          }
+        }
 
-          if (mem_limit < 5) FATAL("Dangerously low value of -m");
+        if (mem_limit < 5) FATAL("Dangerously low value of -m");
 
-          if (sizeof(rlim_t) == 4 && mem_limit > 2000)
-            FATAL("Value of -m out of range on 32-bit systems");
+        if (sizeof(rlim_t) == 4 && mem_limit > 2000)
+          FATAL("Value of -m out of range on 32-bit systems");
 
-        }
+      }
 
-        break;
+      break;
 
-      case 'd': /* skip deterministic */
+      case 'd':                                       /* skip deterministic */
 
         if (skip_deterministic) FATAL("Multiple -d options not supported");
         skip_deterministic = 1;
         use_splicing = 1;
         break;
 
-      case 'B': /* load bitmap */
+      case 'B':                                              /* load bitmap */
 
         /* This is a secret undocumented option! It is useful if you find
            an interesting test case during a normal fuzzing process, and want
@@ -287,26 +327,29 @@ int main(int argc, char** argv) {
         read_bitmap(in_bitmap);
         break;
 
-      case 'C': /* crash mode */
+      case 'C':                                               /* crash mode */
 
         if (crash_mode) FATAL("Multiple -C options not supported");
         crash_mode = FAULT_CRASH;
         break;
 
-      case 'n': /* dumb mode */
+      case 'n':                                                /* dumb mode */
 
         if (dumb_mode) FATAL("Multiple -n options not supported");
-        if (getenv("AFL_DUMB_FORKSRV")) dumb_mode = 2; else dumb_mode = 1;
+        if (getenv("AFL_DUMB_FORKSRV"))
+          dumb_mode = 2;
+        else
+          dumb_mode = 1;
 
         break;
 
-      case 'T': /* banner */
+      case 'T':                                                   /* banner */
 
         if (use_banner) FATAL("Multiple -T options not supported");
         use_banner = optarg;
         break;
 
-      case 'Q': /* QEMU mode */
+      case 'Q':                                                /* QEMU mode */
 
         if (qemu_mode) FATAL("Multiple -Q options not supported");
         qemu_mode = 1;
@@ -315,7 +358,7 @@ int main(int argc, char** argv) {
 
         break;
 
-      case 'U': /* Unicorn mode */
+      case 'U':                                             /* Unicorn mode */
 
         if (unicorn_mode) FATAL("Multiple -U options not supported");
         unicorn_mode = 1;
@@ -325,115 +368,132 @@ int main(int argc, char** argv) {
         break;
 
       case 'V': {
-           most_time_key = 1;
-           if (sscanf(optarg, "%llu", &most_time) < 1 || optarg[0] == '-')
-             FATAL("Bad syntax used for -V");
-        }
-        break;
+
+        most_time_key = 1;
+        if (sscanf(optarg, "%llu", &most_time) < 1 || optarg[0] == '-')
+          FATAL("Bad syntax used for -V");
+
+      } break;
 
       case 'E': {
-           most_execs_key = 1;
-           if (sscanf(optarg, "%llu", &most_execs) < 1 || optarg[0] == '-')
-             FATAL("Bad syntax used for -E");
-        }
-        break;
 
-      case 'L': { /* MOpt mode */
+        most_execs_key = 1;
+        if (sscanf(optarg, "%llu", &most_execs) < 1 || optarg[0] == '-')
+          FATAL("Bad syntax used for -E");
 
-              if (limit_time_sig)  FATAL("Multiple -L options not supported");
-              limit_time_sig = 1;
-              havoc_max_mult = HAVOC_MAX_MULT_MOPT;
+      } break;
 
-			if (sscanf(optarg, "%llu", &limit_time_puppet) < 1 ||
-				optarg[0] == '-') FATAL("Bad syntax used for -L");
+      case 'L': {                                              /* MOpt mode */
 
-			u64 limit_time_puppet2 = limit_time_puppet * 60 * 1000;
+        if (limit_time_sig) FATAL("Multiple -L options not supported");
+        limit_time_sig = 1;
+        havoc_max_mult = HAVOC_MAX_MULT_MOPT;
 
-			if (limit_time_puppet2 < limit_time_puppet ) FATAL("limit_time overflow");
-				limit_time_puppet = limit_time_puppet2;
+        if (sscanf(optarg, "%llu", &limit_time_puppet) < 1 || optarg[0] == '-')
+          FATAL("Bad syntax used for -L");
 
-			SAYF("limit_time_puppet %llu\n",limit_time_puppet);
-			swarm_now = 0;
+        u64 limit_time_puppet2 = limit_time_puppet * 60 * 1000;
 
-			if (limit_time_puppet == 0 )
-			    key_puppet = 1;
+        if (limit_time_puppet2 < limit_time_puppet)
+          FATAL("limit_time overflow");
+        limit_time_puppet = limit_time_puppet2;
 
-			int i;
-			int tmp_swarm = 0;
+        SAYF("limit_time_puppet %llu\n", limit_time_puppet);
+        swarm_now = 0;
 
-			if (g_now > g_max) g_now = 0;
-			w_now = (w_init - w_end)*(g_max - g_now) / (g_max)+w_end;
+        if (limit_time_puppet == 0) key_puppet = 1;
 
-			for (tmp_swarm = 0; tmp_swarm < swarm_num; ++tmp_swarm) {
-				double total_puppet_temp = 0.0;
-				swarm_fitness[tmp_swarm] = 0.0;
+        int i;
+        int tmp_swarm = 0;
 
-				for (i = 0; i < operator_num; ++i) {
-					stage_finds_puppet[tmp_swarm][i] = 0;
-					probability_now[tmp_swarm][i] = 0.0;
-					x_now[tmp_swarm][i] = ((double)(random() % 7000)*0.0001 + 0.1);
-					total_puppet_temp += x_now[tmp_swarm][i];
-					v_now[tmp_swarm][i] = 0.1;
-					L_best[tmp_swarm][i] = 0.5;
-					G_best[i] = 0.5;
-					eff_best[tmp_swarm][i] = 0.0;
+        if (g_now > g_max) g_now = 0;
+        w_now = (w_init - w_end) * (g_max - g_now) / (g_max) + w_end;
 
-				}
+        for (tmp_swarm = 0; tmp_swarm < swarm_num; ++tmp_swarm) {
 
-				for (i = 0; i < operator_num; ++i) {
-					stage_cycles_puppet_v2[tmp_swarm][i] = stage_cycles_puppet[tmp_swarm][i];
-					stage_finds_puppet_v2[tmp_swarm][i] = stage_finds_puppet[tmp_swarm][i];
-					x_now[tmp_swarm][i] = x_now[tmp_swarm][i] / total_puppet_temp;
-				}
+          double total_puppet_temp = 0.0;
+          swarm_fitness[tmp_swarm] = 0.0;
 
-				double x_temp = 0.0;
+          for (i = 0; i < operator_num; ++i) {
 
-				for (i = 0; i < operator_num; ++i) {
-					probability_now[tmp_swarm][i] = 0.0;
-					v_now[tmp_swarm][i] = w_now * v_now[tmp_swarm][i] + RAND_C * (L_best[tmp_swarm][i] - x_now[tmp_swarm][i]) + RAND_C * (G_best[i] - x_now[tmp_swarm][i]);
+            stage_finds_puppet[tmp_swarm][i] = 0;
+            probability_now[tmp_swarm][i] = 0.0;
+            x_now[tmp_swarm][i] = ((double)(random() % 7000) * 0.0001 + 0.1);
+            total_puppet_temp += x_now[tmp_swarm][i];
+            v_now[tmp_swarm][i] = 0.1;
+            L_best[tmp_swarm][i] = 0.5;
+            G_best[i] = 0.5;
+            eff_best[tmp_swarm][i] = 0.0;
 
-					x_now[tmp_swarm][i] += v_now[tmp_swarm][i];
+          }
+
+          for (i = 0; i < operator_num; ++i) {
+
+            stage_cycles_puppet_v2[tmp_swarm][i] =
+                stage_cycles_puppet[tmp_swarm][i];
+            stage_finds_puppet_v2[tmp_swarm][i] =
+                stage_finds_puppet[tmp_swarm][i];
+            x_now[tmp_swarm][i] = x_now[tmp_swarm][i] / total_puppet_temp;
+
+          }
+
+          double x_temp = 0.0;
+
+          for (i = 0; i < operator_num; ++i) {
 
-					if (x_now[tmp_swarm][i] > v_max)
-						x_now[tmp_swarm][i] = v_max;
-					else if (x_now[tmp_swarm][i] < v_min)
-						x_now[tmp_swarm][i] = v_min;
+            probability_now[tmp_swarm][i] = 0.0;
+            v_now[tmp_swarm][i] =
+                w_now * v_now[tmp_swarm][i] +
+                RAND_C * (L_best[tmp_swarm][i] - x_now[tmp_swarm][i]) +
+                RAND_C * (G_best[i] - x_now[tmp_swarm][i]);
 
-					x_temp += x_now[tmp_swarm][i];
-				}
+            x_now[tmp_swarm][i] += v_now[tmp_swarm][i];
 
-				for (i = 0; i < operator_num; ++i) {
-					x_now[tmp_swarm][i] = x_now[tmp_swarm][i] / x_temp;
-					if (likely(i != 0))
-						probability_now[tmp_swarm][i] = probability_now[tmp_swarm][i - 1] + x_now[tmp_swarm][i];
-					else
-						probability_now[tmp_swarm][i] = x_now[tmp_swarm][i];
-				}
-				if (probability_now[tmp_swarm][operator_num - 1] < 0.99 || probability_now[tmp_swarm][operator_num - 1] > 1.01)
-                                    FATAL("ERROR probability");
-			}
+            if (x_now[tmp_swarm][i] > v_max)
+              x_now[tmp_swarm][i] = v_max;
+            else if (x_now[tmp_swarm][i] < v_min)
+              x_now[tmp_swarm][i] = v_min;
+
+            x_temp += x_now[tmp_swarm][i];
+
+          }
+
+          for (i = 0; i < operator_num; ++i) {
+
+            x_now[tmp_swarm][i] = x_now[tmp_swarm][i] / x_temp;
+            if (likely(i != 0))
+              probability_now[tmp_swarm][i] =
+                  probability_now[tmp_swarm][i - 1] + x_now[tmp_swarm][i];
+            else
+              probability_now[tmp_swarm][i] = x_now[tmp_swarm][i];
+
+          }
 
-			for (i = 0; i < operator_num; ++i) {
-				core_operator_finds_puppet[i] = 0;
-				core_operator_finds_puppet_v2[i] = 0;
-				core_operator_cycles_puppet[i] = 0;
-				core_operator_cycles_puppet_v2[i] = 0;
-				core_operator_cycles_puppet_v3[i] = 0;
-			}
+          if (probability_now[tmp_swarm][operator_num - 1] < 0.99 ||
+              probability_now[tmp_swarm][operator_num - 1] > 1.01)
+            FATAL("ERROR probability");
+
+        }
+
+        for (i = 0; i < operator_num; ++i) {
+
+          core_operator_finds_puppet[i] = 0;
+          core_operator_finds_puppet_v2[i] = 0;
+          core_operator_cycles_puppet[i] = 0;
+          core_operator_cycles_puppet_v2[i] = 0;
+          core_operator_cycles_puppet_v3[i] = 0;
 
         }
-        break;
 
-      default:
+      } break;
 
-        usage(argv[0]);
+      default: usage(argv[0]);
 
     }
 
   if (optind == argc || !in_dir || !out_dir) usage(argv[0]);
 
-  if (fixed_seed)
-    OKF("Running with fixed seed: %u", (u32)init_seed);
+  if (fixed_seed) OKF("Running with fixed seed: %u", (u32)init_seed);
   srandom((u32)init_seed);
   setup_signal_handlers();
   check_asan_opts();
@@ -446,28 +506,39 @@ int main(int argc, char** argv) {
     FATAL("Input and output directories can't be the same");
 
   if ((tmp_dir = getenv("AFL_TMPDIR")) != NULL) {
+
     char tmpfile[strlen(tmp_dir + 16)];
     sprintf(tmpfile, "%s/%s", tmp_dir, ".cur_input");
-    if (access(tmpfile, F_OK) != -1) // there is still a race condition here, but well ...
-      FATAL("TMP_DIR already has an existing temporary input file: %s", tmpfile);
+    if (access(tmpfile, F_OK) !=
+        -1)  // there is still a race condition here, but well ...
+      FATAL("TMP_DIR already has an existing temporary input file: %s",
+            tmpfile);
+
   } else
+
     tmp_dir = out_dir;
 
   if (dumb_mode) {
 
     if (crash_mode) FATAL("-C and -n are mutually exclusive");
-    if (qemu_mode)  FATAL("-Q and -n are mutually exclusive");
+    if (qemu_mode) FATAL("-Q and -n are mutually exclusive");
     if (unicorn_mode) FATAL("-U and -n are mutually exclusive");
 
   }
-  
+
   if (getenv("AFL_NO_UI") && getenv("AFL_FORCE_UI"))
     FATAL("AFL_NO_UI and AFL_FORCE_UI are mutually exclusive");
-  
-  if (strchr(argv[optind], '/') == NULL) WARNF(cLRD "Target binary called without a prefixed path, make sure you are fuzzing the right binary: " cRST "%s", argv[optind]);
 
-  OKF("afl++ is maintained by Marc \"van Hauser\" Heuse, Heiko \"hexcoder\" Eissfeldt and Andrea Fioraldi");
-  OKF("afl++ is open source, get it at https://github.com/vanhauser-thc/AFLplusplus");
+  if (strchr(argv[optind], '/') == NULL)
+    WARNF(cLRD
+          "Target binary called without a prefixed path, make sure you are "
+          "fuzzing the right binary: " cRST "%s",
+          argv[optind]);
+
+  OKF("afl++ is maintained by Marc \"van Hauser\" Heuse, Heiko \"hexcoder\" "
+      "Eissfeldt and Andrea Fioraldi");
+  OKF("afl++ is open source, get it at "
+      "https://github.com/vanhauser-thc/AFLplusplus");
   OKF("Power schedules from github.com/mboehme/aflfast");
   OKF("Python Mutator and llvm_mode whitelisting from github.com/choller/afl");
   OKF("afl-tmin fork server patch from github.com/nccgroup/TriforceAFL");
@@ -475,32 +546,42 @@ int main(int argc, char** argv) {
   ACTF("Getting to work...");
 
   switch (schedule) {
-    case FAST:    OKF ("Using exponential power schedule (FAST)"); break;
-    case COE:     OKF ("Using cut-off exponential power schedule (COE)"); break;
-    case EXPLOIT: OKF ("Using exploitation-based constant power schedule (EXPLOIT)"); break;
-    case LIN:     OKF ("Using linear power schedule (LIN)"); break;
-    case QUAD:    OKF ("Using quadratic power schedule (QUAD)"); break;
-    case EXPLORE: OKF ("Using exploration-based constant power schedule (EXPLORE)"); break;
-    default : FATAL ("Unknown power schedule"); break;
+
+    case FAST: OKF("Using exponential power schedule (FAST)"); break;
+    case COE: OKF("Using cut-off exponential power schedule (COE)"); break;
+    case EXPLOIT:
+      OKF("Using exploitation-based constant power schedule (EXPLOIT)");
+      break;
+    case LIN: OKF("Using linear power schedule (LIN)"); break;
+    case QUAD: OKF("Using quadratic power schedule (QUAD)"); break;
+    case EXPLORE:
+      OKF("Using exploration-based constant power schedule (EXPLORE)");
+      break;
+    default: FATAL("Unknown power schedule"); break;
+
   }
 
-  if (getenv("AFL_NO_FORKSRV"))    no_forkserver    = 1;
-  if (getenv("AFL_NO_CPU_RED"))    no_cpu_meter_red = 1;
-  if (getenv("AFL_NO_ARITH"))      no_arith         = 1;
-  if (getenv("AFL_SHUFFLE_QUEUE")) shuffle_queue    = 1;
-  if (getenv("AFL_FAST_CAL"))      fast_cal         = 1;
+  if (getenv("AFL_NO_FORKSRV")) no_forkserver = 1;
+  if (getenv("AFL_NO_CPU_RED")) no_cpu_meter_red = 1;
+  if (getenv("AFL_NO_ARITH")) no_arith = 1;
+  if (getenv("AFL_SHUFFLE_QUEUE")) shuffle_queue = 1;
+  if (getenv("AFL_FAST_CAL")) fast_cal = 1;
 
   if (getenv("AFL_HANG_TMOUT")) {
+
     hang_tmout = atoi(getenv("AFL_HANG_TMOUT"));
     if (!hang_tmout) FATAL("Invalid value of AFL_HANG_TMOUT");
+
   }
 
   if (dumb_mode == 2 && no_forkserver)
     FATAL("AFL_DUMB_FORKSRV and AFL_NO_FORKSRV are mutually exclusive");
 
   if (getenv("AFL_PRELOAD")) {
+
     setenv("LD_PRELOAD", getenv("AFL_PRELOAD"), 1);
     setenv("DYLD_INSERT_LIBRARIES", getenv("AFL_PRELOAD"), 1);
+
   }
 
   if (getenv("AFL_LD_PRELOAD"))
@@ -511,31 +592,33 @@ int main(int argc, char** argv) {
   fix_up_banner(argv[optind]);
 
   check_if_tty();
-  if (getenv("AFL_FORCE_UI"))
-    not_on_tty = 0;
+  if (getenv("AFL_FORCE_UI")) not_on_tty = 0;
 
   if (getenv("AFL_CAL_FAST")) {
+
     /* Use less calibration cycles, for slow applications */
     cal_cycles = 3;
     cal_cycles_long = 5;
+
   }
 
-  if (getenv("AFL_DEBUG"))
-    debug = 1;
+  if (getenv("AFL_DEBUG")) debug = 1;
 
   if (getenv("AFL_PYTHON_ONLY")) {
+
     /* This ensures we don't proceed to havoc/splice */
     python_only = 1;
 
     /* Ensure we also skip all deterministic steps */
     skip_deterministic = 1;
+
   }
 
   get_core_count();
 
-#ifdef HAVE_AFFINITY
+#  ifdef HAVE_AFFINITY
   bind_to_free_cpu();
-#endif /* HAVE_AFFINITY */
+#  endif /* HAVE_AFFINITY */
 
   check_crash_handling();
   check_cpu_governor();
@@ -552,13 +635,12 @@ int main(int argc, char** argv) {
 
   setup_dirs_fds();
 
-#ifdef USE_PYTHON
-  if (init_py())
-    FATAL("Failed to initialize Python module");
-#else
+#  ifdef USE_PYTHON
+  if (init_py()) FATAL("Failed to initialize Python module");
+#  else
   if (getenv("AFL_PYTHON_MODULE"))
-     FATAL("Your AFL binary was built without Python support");
-#endif
+    FATAL("Your AFL binary was built without Python support");
+#  endif
 
   setup_cmdline_file(argv + optind);
 
@@ -574,24 +656,33 @@ int main(int argc, char** argv) {
   /* If we don't have a file name chosen yet, use a safe default. */
 
   if (!out_file) {
+
     u32 i = optind + 1;
     while (argv[i]) {
 
       u8* aa_loc = strstr(argv[i], "@@");
 
       if (aa_loc && !out_file) {
+
         if (file_extension) {
+
           out_file = alloc_printf("%s/.cur_input.%s", out_dir, file_extension);
+
         } else {
+
           out_file = alloc_printf("%s/.cur_input", out_dir);
+
         }
+
         detect_file_args(argv + optind + 1, out_file);
-	break;
+        break;
+
       }
 
       ++i;
 
     }
+
   }
 
   if (!out_file) setup_stdio_file();
@@ -621,9 +712,11 @@ int main(int argc, char** argv) {
   /* Woop woop woop */
 
   if (!not_on_tty) {
+
     sleep(4);
     start_time += 4000;
     if (stop_soon) goto stop_fuzzing;
+
   }
 
   // real start time, we reset, so this works correctly with -V
@@ -638,21 +731,25 @@ int main(int argc, char** argv) {
     if (!queue_cur) {
 
       ++queue_cycle;
-      current_entry     = 0;
+      current_entry = 0;
       cur_skipped_paths = 0;
-      queue_cur         = queue;
+      queue_cur = queue;
 
       while (seek_to) {
+
         ++current_entry;
         --seek_to;
         queue_cur = queue_cur->next;
+
       }
 
       show_stats();
 
       if (not_on_tty) {
+
         ACTF("Entering queue cycle %llu.", queue_cycle);
         fflush(stdout);
+
       }
 
       /* If we had a full queue cycle with no new finds, try
@@ -660,9 +757,14 @@ int main(int argc, char** argv) {
 
       if (queued_paths == prev_queued) {
 
-        if (use_splicing) ++cycles_wo_finds; else use_splicing = 1;
+        if (use_splicing)
+          ++cycles_wo_finds;
+        else
+          use_splicing = 1;
 
-      } else cycles_wo_finds = 0;
+      } else
+
+        cycles_wo_finds = 0;
 
       prev_queued = queued_paths;
 
@@ -674,9 +776,8 @@ int main(int argc, char** argv) {
     skipped_fuzz = fuzz_one(use_argv);
 
     if (!stop_soon && sync_id && !skipped_fuzz) {
-      
-      if (!(sync_interval_cnt++ % SYNC_INTERVAL))
-        sync_fuzzers(use_argv);
+
+      if (!(sync_interval_cnt++ % SYNC_INTERVAL)) sync_fuzzers(use_argv);
 
     }
 
@@ -688,18 +789,28 @@ int main(int argc, char** argv) {
     ++current_entry;
 
     if (most_time_key == 1) {
+
       u64 cur_ms_lv = get_cur_time();
-      if (most_time * 1000 < cur_ms_lv  - start_time) {
+      if (most_time * 1000 < cur_ms_lv - start_time) {
+
         most_time_key = 2;
         break;
+
       }
+
     }
+
     if (most_execs_key == 1) {
+
       if (most_execs <= total_execs) {
+
         most_execs_key = 2;
         break;
+
       }
+
     }
+
   }
 
   if (queue_cur) show_stats();
@@ -708,19 +819,20 @@ int main(int argc, char** argv) {
    * ATTENTION - the following 10 lines were copied from a PR to Google's afl
    * repository - and slightly fixed.
    * These lines have nothing to do with the purpose of original PR though.
-   * Looks like when an exit condition was completed (AFL_BENCH_JUST_ONE, 
+   * Looks like when an exit condition was completed (AFL_BENCH_JUST_ONE,
    * AFL_EXIT_WHEN_DONE or AFL_BENCH_UNTIL_CRASH) the child and forkserver
    * where not killed?
    */
-  /* if we stopped programmatically, we kill the forkserver and the current runner. 
-     if we stopped manually, this is done by the signal handler */
-  if (stop_soon == 2){
+  /* if we stopped programmatically, we kill the forkserver and the current
+     runner. if we stopped manually, this is done by the signal handler */
+  if (stop_soon == 2) {
+
     if (child_pid > 0) kill(child_pid, SIGKILL);
     if (forksrv_pid > 0) kill(forksrv_pid, SIGKILL);
-    /* Now that we've killed the forkserver, we wait for it to be able to get rusage stats. */
-    if (waitpid(forksrv_pid, NULL, 0) <= 0) {
-      WARNF("error waitpid\n");
-    }
+    /* Now that we've killed the forkserver, we wait for it to be able to get
+     * rusage stats. */
+    if (waitpid(forksrv_pid, NULL, 0) <= 0) { WARNF("error waitpid\n"); }
+
   }
 
   write_bitmap();
@@ -732,8 +844,7 @@ stop_fuzzing:
   SAYF(CURSOR_SHOW cLRD "\n\n+++ Testing aborted %s +++\n" cRST,
        stop_soon == 2 ? "programmatically" : "by user");
 
-  if (most_time_key == 2)
-    SAYF(cYEL "[!] " cRST "Time limit was reached\n");
+  if (most_time_key == 2) SAYF(cYEL "[!] " cRST "Time limit was reached\n");
   if (most_execs_key == 2)
     SAYF(cYEL "[!] " cRST "Execution limit was reached\n");
 
@@ -742,8 +853,9 @@ stop_fuzzing:
   if (queue_cycle == 1 && get_cur_time() - start_time > 30 * 60 * 1000) {
 
     SAYF("\n" cYEL "[!] " cRST
-           "Stopped during the first cycle, results may be incomplete.\n"
-           "    (For info on resuming, see %s/README)\n", doc_path);
+         "Stopped during the first cycle, results may be incomplete.\n"
+         "    (For info on resuming, see %s/README)\n",
+         doc_path);
 
   }
 
@@ -755,9 +867,9 @@ stop_fuzzing:
 
   alloc_report();
 
-#ifdef USE_PYTHON
+#  ifdef USE_PYTHON
   finalize_py();
-#endif
+#  endif
 
   OKF("We're done here. Have a nice day!\n");
 
@@ -766,3 +878,4 @@ stop_fuzzing:
 }
 
 #endif /* !AFL_LIB */
+
diff --git a/src/afl-gcc.c b/src/afl-gcc.c
index f6ededeb..750f9b72 100644
--- a/src/afl-gcc.c
+++ b/src/afl-gcc.c
@@ -43,19 +43,18 @@
 #include <stdlib.h>
 #include <string.h>
 
-static u8*  as_path;                /* Path to the AFL 'as' wrapper      */
-static u8** cc_params;              /* Parameters passed to the real CC  */
-static u32  cc_par_cnt = 1;         /* Param count, including argv0      */
-static u8   be_quiet,               /* Quiet mode                        */
-            clang_mode;             /* Invoked as afl-clang*?            */
-
+static u8*  as_path;                   /* Path to the AFL 'as' wrapper      */
+static u8** cc_params;                 /* Parameters passed to the real CC  */
+static u32  cc_par_cnt = 1;            /* Param count, including argv0      */
+static u8   be_quiet,                  /* Quiet mode                        */
+    clang_mode;                        /* Invoked as afl-clang*?            */
 
 /* Try to find our "fake" GNU assembler in AFL_PATH or at the location derived
    from argv[0]. If that fails, abort. */
 
 static void find_as(u8* argv0) {
 
-  u8 *afl_path = getenv("AFL_PATH");
+  u8* afl_path = getenv("AFL_PATH");
   u8 *slash, *tmp;
 
   if (afl_path) {
@@ -63,9 +62,11 @@ static void find_as(u8* argv0) {
     tmp = alloc_printf("%s/as", afl_path);
 
     if (!access(tmp, X_OK)) {
+
       as_path = afl_path;
       ck_free(tmp);
       return;
+
     }
 
     ck_free(tmp);
@@ -76,7 +77,7 @@ static void find_as(u8* argv0) {
 
   if (slash) {
 
-    u8 *dir;
+    u8* dir;
 
     *slash = 0;
     dir = ck_strdup(argv0);
@@ -85,9 +86,11 @@ static void find_as(u8* argv0) {
     tmp = alloc_printf("%s/afl-as", dir);
 
     if (!access(tmp, X_OK)) {
+
       as_path = dir;
       ck_free(tmp);
       return;
+
     }
 
     ck_free(tmp);
@@ -96,21 +99,22 @@ static void find_as(u8* argv0) {
   }
 
   if (!access(AFL_PATH "/as", X_OK)) {
+
     as_path = AFL_PATH;
     return;
+
   }
 
   FATAL("Unable to find AFL wrapper binary for 'as'. Please set AFL_PATH");
- 
-}
 
+}
 
 /* Copy argv to cc_params, making the necessary edits. */
 
 static void edit_params(u32 argc, char** argv) {
 
-  u8 fortify_set = 0, asan_set = 0;
-  u8 *name;
+  u8  fortify_set = 0, asan_set = 0;
+  u8* name;
 
 #if defined(__FreeBSD__) && defined(__x86_64__)
   u8 m32_set = 0;
@@ -119,7 +123,10 @@ static void edit_params(u32 argc, char** argv) {
   cc_params = ck_alloc((argc + 128) * sizeof(u8*));
 
   name = strrchr(argv[0], '/');
-  if (!name) name = argv[0]; else name++;
+  if (!name)
+    name = argv[0];
+  else
+    name++;
 
   if (!strncmp(name, "afl-clang", 9)) {
 
@@ -128,11 +135,15 @@ static void edit_params(u32 argc, char** argv) {
     setenv(CLANG_ENV_VAR, "1", 1);
 
     if (!strcmp(name, "afl-clang++")) {
+
       u8* alt_cxx = getenv("AFL_CXX");
       cc_params[0] = alt_cxx ? alt_cxx : (u8*)"clang++";
+
     } else {
+
       u8* alt_cc = getenv("AFL_CC");
       cc_params[0] = alt_cc ? alt_cc : (u8*)"clang";
+
     }
 
   } else {
@@ -145,16 +156,22 @@ static void edit_params(u32 argc, char** argv) {
 
 #ifdef __APPLE__
 
-    if (!strcmp(name, "afl-g++")) cc_params[0] = getenv("AFL_CXX");
-    else if (!strcmp(name, "afl-gcj")) cc_params[0] = getenv("AFL_GCJ");
-    else cc_params[0] = getenv("AFL_CC");
+    if (!strcmp(name, "afl-g++"))
+      cc_params[0] = getenv("AFL_CXX");
+    else if (!strcmp(name, "afl-gcj"))
+      cc_params[0] = getenv("AFL_GCJ");
+    else
+      cc_params[0] = getenv("AFL_CC");
 
     if (!cc_params[0]) {
 
       SAYF("\n" cLRD "[-] " cRST
-           "On Apple systems, 'gcc' is usually just a wrapper for clang. Please use the\n"
-           "    'afl-clang' utility instead of 'afl-gcc'. If you really have GCC installed,\n"
-           "    set AFL_CC or AFL_CXX to specify the correct path to that compiler.\n");
+           "On Apple systems, 'gcc' is usually just a wrapper for clang. "
+           "Please use the\n"
+           "    'afl-clang' utility instead of 'afl-gcc'. If you really have "
+           "GCC installed,\n"
+           "    set AFL_CC or AFL_CXX to specify the correct path to that "
+           "compiler.\n");
 
       FATAL("AFL_CC or AFL_CXX required on MacOS X");
 
@@ -163,14 +180,20 @@ static void edit_params(u32 argc, char** argv) {
 #else
 
     if (!strcmp(name, "afl-g++")) {
+
       u8* alt_cxx = getenv("AFL_CXX");
       cc_params[0] = alt_cxx ? alt_cxx : (u8*)"g++";
+
     } else if (!strcmp(name, "afl-gcj")) {
+
       u8* alt_cc = getenv("AFL_GCJ");
       cc_params[0] = alt_cc ? alt_cc : (u8*)"gcj";
+
     } else {
+
       u8* alt_cc = getenv("AFL_CC");
       cc_params[0] = alt_cc ? alt_cc : (u8*)"gcc";
+
     }
 
 #endif /* __APPLE__ */
@@ -178,13 +201,20 @@ static void edit_params(u32 argc, char** argv) {
   }
 
   while (--argc) {
+
     u8* cur = *(++argv);
 
     if (!strncmp(cur, "-B", 2)) {
 
       if (!be_quiet) WARNF("-B is already set, overriding");
 
-      if (!cur[2] && argc > 1) { argc--; argv++; }
+      if (!cur[2] && argc > 1) {
+
+        argc--;
+        argv++;
+
+      }
+
       continue;
 
     }
@@ -197,8 +227,8 @@ static void edit_params(u32 argc, char** argv) {
     if (!strcmp(cur, "-m32")) m32_set = 1;
 #endif
 
-    if (!strcmp(cur, "-fsanitize=address") ||
-        !strcmp(cur, "-fsanitize=memory")) asan_set = 1;
+    if (!strcmp(cur, "-fsanitize=address") || !strcmp(cur, "-fsanitize=memory"))
+      asan_set = 1;
 
     if (strstr(cur, "FORTIFY_SOURCE")) fortify_set = 1;
 
@@ -209,15 +239,13 @@ static void edit_params(u32 argc, char** argv) {
   cc_params[cc_par_cnt++] = "-B";
   cc_params[cc_par_cnt++] = as_path;
 
-  if (clang_mode)
-    cc_params[cc_par_cnt++] = "-no-integrated-as";
+  if (clang_mode) cc_params[cc_par_cnt++] = "-no-integrated-as";
 
   if (getenv("AFL_HARDEN")) {
 
     cc_params[cc_par_cnt++] = "-fstack-protector-all";
 
-    if (!fortify_set)
-      cc_params[cc_par_cnt++] = "-D_FORTIFY_SOURCE=2";
+    if (!fortify_set) cc_params[cc_par_cnt++] = "-D_FORTIFY_SOURCE=2";
 
   }
 
@@ -229,8 +257,7 @@ static void edit_params(u32 argc, char** argv) {
 
   } else if (getenv("AFL_USE_ASAN")) {
 
-    if (getenv("AFL_USE_MSAN"))
-      FATAL("ASAN and MSAN are mutually exclusive");
+    if (getenv("AFL_USE_MSAN")) FATAL("ASAN and MSAN are mutually exclusive");
 
     if (getenv("AFL_HARDEN"))
       FATAL("ASAN and AFL_HARDEN are mutually exclusive");
@@ -240,8 +267,7 @@ static void edit_params(u32 argc, char** argv) {
 
   } else if (getenv("AFL_USE_MSAN")) {
 
-    if (getenv("AFL_USE_ASAN"))
-      FATAL("ASAN and MSAN are mutually exclusive");
+    if (getenv("AFL_USE_ASAN")) FATAL("ASAN and MSAN are mutually exclusive");
 
     if (getenv("AFL_HARDEN"))
       FATAL("MSAN and AFL_HARDEN are mutually exclusive");
@@ -249,11 +275,10 @@ static void edit_params(u32 argc, char** argv) {
     cc_params[cc_par_cnt++] = "-U_FORTIFY_SOURCE";
     cc_params[cc_par_cnt++] = "-fsanitize=memory";
 
-
   }
 
 #ifdef USEMMAP
-    cc_params[cc_par_cnt++] = "-lrt";
+  cc_params[cc_par_cnt++] = "-lrt";
 #endif
 
   if (!getenv("AFL_DONT_OPTIMIZE")) {
@@ -264,12 +289,11 @@ static void edit_params(u32 argc, char** argv) {
        works OK. This has nothing to do with us, but let's avoid triggering
        that bug. */
 
-    if (!clang_mode || !m32_set)
-      cc_params[cc_par_cnt++] = "-g";
+    if (!clang_mode || !m32_set) cc_params[cc_par_cnt++] = "-g";
 
 #else
 
-      cc_params[cc_par_cnt++] = "-g";
+    cc_params[cc_par_cnt++] = "-g";
 
 #endif
 
@@ -300,7 +324,6 @@ static void edit_params(u32 argc, char** argv) {
 
 }
 
-
 /* Main entry point */
 
 int main(int argc, char** argv) {
@@ -308,23 +331,33 @@ int main(int argc, char** argv) {
   if (isatty(2) && !getenv("AFL_QUIET")) {
 
     SAYF(cCYA "afl-cc" VERSION cRST " by <lcamtuf@google.com>\n");
-    SAYF(cYEL "[!] " cBRI "NOTE: " cRST "afl-gcc is deprecated, llvm_mode is much faster and has more options\n");
+    SAYF(cYEL "[!] " cBRI "NOTE: " cRST
+              "afl-gcc is deprecated, llvm_mode is much faster and has more "
+              "options\n");
+
+  } else
 
-  } else be_quiet = 1;
+    be_quiet = 1;
 
   if (argc < 2) {
 
-    SAYF("\n"
-         "This is a helper application for afl-fuzz. It serves as a drop-in replacement\n"
-         "for gcc or clang, letting you recompile third-party code with the required\n"
-         "runtime instrumentation. A common use pattern would be one of the following:\n\n"
+    SAYF(
+        "\n"
+        "This is a helper application for afl-fuzz. It serves as a drop-in "
+        "replacement\n"
+        "for gcc or clang, letting you recompile third-party code with the "
+        "required\n"
+        "runtime instrumentation. A common use pattern would be one of the "
+        "following:\n\n"
 
-         "  CC=%s/afl-gcc ./configure\n"
-         "  CXX=%s/afl-g++ ./configure\n\n"
+        "  CC=%s/afl-gcc ./configure\n"
+        "  CXX=%s/afl-g++ ./configure\n\n"
 
-         "You can specify custom next-stage toolchain via AFL_CC, AFL_CXX, and AFL_AS.\n"
-         "Setting AFL_HARDEN enables hardening optimizations in the compiled code.\n\n",
-         BIN_PATH, BIN_PATH);
+        "You can specify custom next-stage toolchain via AFL_CC, AFL_CXX, and "
+        "AFL_AS.\n"
+        "Setting AFL_HARDEN enables hardening optimizations in the compiled "
+        "code.\n\n",
+        BIN_PATH, BIN_PATH);
 
     exit(1);
 
@@ -341,3 +374,4 @@ int main(int argc, char** argv) {
   return 0;
 
 }
+
diff --git a/src/afl-gotcpu.c b/src/afl-gotcpu.c
index fa629eb7..5aa9b35c 100644
--- a/src/afl-gotcpu.c
+++ b/src/afl-gotcpu.c
@@ -31,7 +31,7 @@
 #endif
 
 #ifdef __ANDROID__
-  #include "android-ashmem.h"
+#  include "android-ashmem.h"
 #endif
 #include <stdio.h>
 #include <stdlib.h>
@@ -51,12 +51,11 @@
 #  define HAVE_AFFINITY 1
 #endif /* __linux__ */
 
-
 /* Get unix time in microseconds. */
 
 static u64 get_cur_time_us(void) {
 
-  struct timeval tv;
+  struct timeval  tv;
   struct timezone tz;
 
   gettimeofday(&tv, &tz);
@@ -65,7 +64,6 @@ static u64 get_cur_time_us(void) {
 
 }
 
-
 /* Get CPU usage in microseconds. */
 
 static u64 get_cpu_usage_us(void) {
@@ -79,7 +77,6 @@ static u64 get_cpu_usage_us(void) {
 
 }
 
-
 /* Measure preemption rate. */
 
 static u32 measure_preemption(u32 target_ms) {
@@ -96,14 +93,17 @@ repeat_loop:
 
   v1 = CTEST_BUSY_CYCLES;
 
-  while (v1--) v2++;
+  while (v1--)
+    v2++;
   sched_yield();
 
   en_t = get_cur_time_us();
 
   if (en_t - st_t < target_ms * 1000) {
+
     loop_repeats++;
     goto repeat_loop;
+
   }
 
   /* Let's see what percentage of this time we actually had a chance to
@@ -111,22 +111,20 @@ repeat_loop:
 
   en_c = get_cpu_usage_us();
 
-  real_delta  = (en_t - st_t) / 1000;
+  real_delta = (en_t - st_t) / 1000;
   slice_delta = (en_c - st_c) / 1000;
 
   return real_delta * 100 / slice_delta;
 
 }
 
-
 /* Do the benchmark thing. */
 
 int main(int argc, char** argv) {
 
 #ifdef HAVE_AFFINITY
 
-  u32 cpu_cnt = sysconf(_SC_NPROCESSORS_ONLN),
-      idle_cpus = 0, maybe_cpus = 0, i;
+  u32 cpu_cnt = sysconf(_SC_NPROCESSORS_ONLN), idle_cpus = 0, maybe_cpus = 0, i;
 
   SAYF(cCYA "afl-gotcpu" VERSION cRST " by <lcamtuf@google.com>\n");
 
@@ -142,7 +140,7 @@ int main(int argc, char** argv) {
     if (!fr) {
 
       cpu_set_t c;
-      u32 util_perc;
+      u32       util_perc;
 
       CPU_ZERO(&c);
       CPU_SET(i, &c);
@@ -159,7 +157,7 @@ int main(int argc, char** argv) {
 
       } else if (util_perc < 250) {
 
-        SAYF("    Core #%u: " cYEL "CAUTION " cRST "(%u%%)\n", i, util_perc); 
+        SAYF("    Core #%u: " cYEL "CAUTION " cRST "(%u%%)\n", i, util_perc);
         exit(1);
 
       }
@@ -255,3 +253,4 @@ int main(int argc, char** argv) {
 #endif /* ^HAVE_AFFINITY */
 
 }
+
diff --git a/src/afl-sharedmem.c b/src/afl-sharedmem.c
index ce3b76e6..9c7ac7c3 100644
--- a/src/afl-sharedmem.c
+++ b/src/afl-sharedmem.c
@@ -5,7 +5,7 @@
 #define AFL_MAIN
 
 #ifdef __ANDROID__
-  #include "android-ashmem.h"
+#  include "android-ashmem.h"
 #endif
 #include "config.h"
 #include "types.h"
@@ -32,68 +32,79 @@
 #include <sys/mman.h>
 
 #ifndef USEMMAP
- #include <sys/ipc.h>
- #include <sys/shm.h>
+#  include <sys/ipc.h>
+#  include <sys/shm.h>
 #endif
 
-extern unsigned char*trace_bits;
+extern unsigned char *trace_bits;
 
 #ifdef USEMMAP
 /* ================ Proteas ================ */
-int g_shm_fd = -1;
+int            g_shm_fd = -1;
 unsigned char *g_shm_base = NULL;
-char g_shm_file_path[L_tmpnam];
+char           g_shm_file_path[L_tmpnam];
 /* ========================================= */
 #else
-static s32 shm_id;                    /* ID of the SHM region              */
+static s32 shm_id;                     /* ID of the SHM region              */
 #endif
 
 /* Get rid of shared memory (atexit handler). */
 
 void remove_shm(void) {
+
 #ifdef USEMMAP
   if (g_shm_base != NULL) {
+
     munmap(g_shm_base, MAP_SIZE);
     g_shm_base = NULL;
+
   }
 
   if (g_shm_fd != -1) {
+
     close(g_shm_fd);
     g_shm_fd = -1;
+
   }
+
 #else
   shmctl(shm_id, IPC_RMID, NULL);
 #endif
-}
 
+}
 
 /* Configure shared memory. */
 
 void setup_shm(unsigned char dumb_mode) {
+
 #ifdef USEMMAP
   /* generate random file name for multi instance */
 
-  /* thanks to f*cking glibc we can not use tmpnam securely, it generates a security warning that cannot be suppressed */
+  /* thanks to f*cking glibc we can not use tmpnam securely, it generates a
+   * security warning that cannot be suppressed */
   /* so we do this worse workaround */
   snprintf(g_shm_file_path, L_tmpnam, "/afl_%d_%ld", getpid(), random());
 
   /* create the shared memory segment as if it was a file */
   g_shm_fd = shm_open(g_shm_file_path, O_CREAT | O_RDWR | O_EXCL, 0600);
-  if (g_shm_fd == -1) {
-    PFATAL("shm_open() failed");
-  }
+  if (g_shm_fd == -1) { PFATAL("shm_open() failed"); }
 
   /* configure the size of the shared memory segment */
   if (ftruncate(g_shm_fd, MAP_SIZE)) {
+
     PFATAL("setup_shm(): ftruncate() failed");
+
   }
 
   /* map the shared memory segment to the address space of the process */
-  g_shm_base = mmap(0, MAP_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, g_shm_fd, 0);
+  g_shm_base =
+      mmap(0, MAP_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, g_shm_fd, 0);
   if (g_shm_base == MAP_FAILED) {
+
     close(g_shm_fd);
     g_shm_fd = -1;
     PFATAL("mmap() failed");
+
   }
 
   atexit(remove_shm);
@@ -108,7 +119,7 @@ void setup_shm(unsigned char dumb_mode) {
   trace_bits = g_shm_base;
 
   if (!trace_bits) PFATAL("mmap() failed");
-  
+
 #else
   u8* shm_str;
 
@@ -132,9 +143,10 @@ void setup_shm(unsigned char dumb_mode) {
   ck_free(shm_str);
 
   trace_bits = shmat(shm_id, NULL, 0);
-  
+
   if (!trace_bits) PFATAL("shmat() failed");
 
 #endif
+
 }
 
diff --git a/src/afl-showmap.c b/src/afl-showmap.c
index ee00bf22..ac3d687d 100644
--- a/src/afl-showmap.c
+++ b/src/afl-showmap.c
@@ -24,7 +24,7 @@
 #define AFL_MAIN
 
 #ifdef __ANDROID__
-  #include "android-ashmem.h"
+#  include "android-ashmem.h"
 #endif
 #include "config.h"
 #include "types.h"
@@ -51,61 +51,54 @@
 #include <sys/types.h>
 #include <sys/resource.h>
 
-static s32 child_pid;                 /* PID of the tested program         */
+static s32 child_pid;                  /* PID of the tested program         */
 
-       u8* trace_bits;                /* SHM with instrumentation bitmap   */
+u8* trace_bits;                        /* SHM with instrumentation bitmap   */
 
-static u8 *out_file,                  /* Trace output file                 */
-          *doc_path,                  /* Path to docs                      */
-          *target_path,               /* Path to target binary             */
-          *at_file;                   /* Substitution string for @@        */
+static u8 *out_file,                   /* Trace output file                 */
+    *doc_path,                         /* Path to docs                      */
+    *target_path,                      /* Path to target binary             */
+    *at_file;                          /* Substitution string for @@        */
 
-static u32 exec_tmout;                /* Exec timeout (ms)                 */
+static u32 exec_tmout;                 /* Exec timeout (ms)                 */
 
-static u32 total, highest;            /* tuple content information         */
+static u32 total, highest;             /* tuple content information         */
 
-static u64 mem_limit = MEM_LIMIT;     /* Memory limit (MB)                 */
+static u64 mem_limit = MEM_LIMIT;      /* Memory limit (MB)                 */
 
-static u8  quiet_mode,                /* Hide non-essential messages?      */
-           edges_only,                /* Ignore hit counts?                */
-           raw_instr_output,          /* Do not apply AFL filters          */
-           cmin_mode,                 /* Generate output in afl-cmin mode? */
-           binary_mode,               /* Write output as a binary map      */
-           keep_cores;                /* Allow coredumps?                  */
+static u8 quiet_mode,                  /* Hide non-essential messages?      */
+    edges_only,                        /* Ignore hit counts?                */
+    raw_instr_output,                  /* Do not apply AFL filters          */
+    cmin_mode,                         /* Generate output in afl-cmin mode? */
+    binary_mode,                       /* Write output as a binary map      */
+    keep_cores;                        /* Allow coredumps?                  */
 
-static volatile u8
-           stop_soon,                 /* Ctrl-C pressed?                   */
-           child_timed_out,           /* Child timed out?                  */
-           child_crashed;             /* Child crashed?                    */
+static volatile u8 stop_soon,          /* Ctrl-C pressed?                   */
+    child_timed_out,                   /* Child timed out?                  */
+    child_crashed;                     /* Child crashed?                    */
 
 /* Classify tuple counts. Instead of mapping to individual bits, as in
    afl-fuzz.c, we map to more user-friendly numbers between 1 and 8. */
 
 static const u8 count_class_human[256] = {
 
-  [0]           = 0,
-  [1]           = 1,
-  [2]           = 2,
-  [3]           = 3,
-  [4 ... 7]     = 4,
-  [8 ... 15]    = 5,
-  [16 ... 31]   = 6,
-  [32 ... 127]  = 7,
-  [128 ... 255] = 8
+    [0] = 0,          [1] = 1,        [2] = 2,         [3] = 3,
+    [4 ... 7] = 4,    [8 ... 15] = 5, [16 ... 31] = 6, [32 ... 127] = 7,
+    [128 ... 255] = 8
 
 };
 
 static const u8 count_class_binary[256] = {
 
-  [0]           = 0,
-  [1]           = 1,
-  [2]           = 2,
-  [3]           = 4,
-  [4 ... 7]     = 8,
-  [8 ... 15]    = 16,
-  [16 ... 31]   = 32,
-  [32 ... 127]  = 64,
-  [128 ... 255] = 128
+    [0] = 0,
+    [1] = 1,
+    [2] = 2,
+    [3] = 4,
+    [4 ... 7] = 8,
+    [8 ... 15] = 16,
+    [16 ... 31] = 32,
+    [32 ... 127] = 64,
+    [128 ... 255] = 128
 
 };
 
@@ -116,22 +109,25 @@ static void classify_counts(u8* mem, const u8* map) {
   if (edges_only) {
 
     while (i--) {
+
       if (*mem) *mem = 1;
       mem++;
+
     }
 
   } else if (!raw_instr_output) {
 
     while (i--) {
+
       *mem = map[*mem];
       mem++;
+
     }
 
   }
 
 }
 
-
 /* Write results. */
 
 static u32 write_results(void) {
@@ -139,8 +135,8 @@ static u32 write_results(void) {
   s32 fd;
   u32 i, ret = 0;
 
-  u8  cco = !!getenv("AFL_CMIN_CRASHES_ONLY"),
-      caa = !!getenv("AFL_CMIN_ALLOW_ANY");
+  u8 cco = !!getenv("AFL_CMIN_CRASHES_ONLY"),
+     caa = !!getenv("AFL_CMIN_ALLOW_ANY");
 
   if (!strncmp(out_file, "/dev/", 5)) {
 
@@ -154,7 +150,7 @@ static u32 write_results(void) {
 
   } else {
 
-    unlink(out_file); /* Ignore errors */
+    unlink(out_file);                                      /* Ignore errors */
     fd = open(out_file, O_WRONLY | O_CREAT | O_EXCL, 0600);
     if (fd < 0) PFATAL("Unable to create '%s'", out_file);
 
@@ -164,7 +160,7 @@ static u32 write_results(void) {
 
     for (i = 0; i < MAP_SIZE; i++)
       if (trace_bits[i]) ret++;
-    
+
     ck_write(fd, trace_bits, MAP_SIZE, out_file);
     close(fd);
 
@@ -178,10 +174,9 @@ static u32 write_results(void) {
 
       if (!trace_bits[i]) continue;
       ret++;
-      
+
       total += trace_bits[i];
-      if (highest < trace_bits[i])
-        highest = trace_bits[i];
+      if (highest < trace_bits[i]) highest = trace_bits[i];
 
       if (cmin_mode) {
 
@@ -190,10 +185,12 @@ static u32 write_results(void) {
 
         fprintf(f, "%u%u\n", trace_bits[i], i);
 
-      } else fprintf(f, "%06u:%u\n", i, trace_bits[i]);
+      } else
+
+        fprintf(f, "%06u:%u\n", i, trace_bits[i]);
 
     }
-  
+
     fclose(f);
 
   }
@@ -202,7 +199,6 @@ static u32 write_results(void) {
 
 }
 
-
 /* Handle timeout signal. */
 
 static void handle_timeout(int sig) {
@@ -212,16 +208,14 @@ static void handle_timeout(int sig) {
 
 }
 
-
 /* Execute target application. */
 
 static void run_target(char** argv) {
 
   static struct itimerval it;
-  int status = 0;
+  int                     status = 0;
 
-  if (!quiet_mode)
-    SAYF("-- Program output begins --\n" cRST);
+  if (!quiet_mode) SAYF("-- Program output begins --\n" cRST);
 
   MEM_BARRIER();
 
@@ -238,8 +232,10 @@ static void run_target(char** argv) {
       s32 fd = open("/dev/null", O_RDWR);
 
       if (fd < 0 || dup2(fd, 1) < 0 || dup2(fd, 2) < 0) {
+
         *(u32*)trace_bits = EXEC_FAIL_SIG;
         PFATAL("Descriptor initialization failed");
+
       }
 
       close(fd);
@@ -252,20 +248,22 @@ static void run_target(char** argv) {
 
 #ifdef RLIMIT_AS
 
-      setrlimit(RLIMIT_AS, &r); /* Ignore errors */
+      setrlimit(RLIMIT_AS, &r);                            /* Ignore errors */
 
 #else
 
-      setrlimit(RLIMIT_DATA, &r); /* Ignore errors */
+      setrlimit(RLIMIT_DATA, &r);                          /* Ignore errors */
 
 #endif /* ^RLIMIT_AS */
 
     }
 
-    if (!keep_cores) r.rlim_max = r.rlim_cur = 0;
-    else r.rlim_max = r.rlim_cur = RLIM_INFINITY;
+    if (!keep_cores)
+      r.rlim_max = r.rlim_cur = 0;
+    else
+      r.rlim_max = r.rlim_cur = RLIM_INFINITY;
 
-    setrlimit(RLIMIT_CORE, &r); /* Ignore errors */
+    setrlimit(RLIMIT_CORE, &r);                            /* Ignore errors */
 
     if (!getenv("LD_BIND_LAZY")) setenv("LD_BIND_NOW", "1", 0);
 
@@ -304,14 +302,12 @@ static void run_target(char** argv) {
   if (*(u32*)trace_bits == EXEC_FAIL_SIG)
     FATAL("Unable to execute '%s'", argv[0]);
 
-  classify_counts(trace_bits, binary_mode ?
-                  count_class_binary : count_class_human);
+  classify_counts(trace_bits,
+                  binary_mode ? count_class_binary : count_class_human);
 
-  if (!quiet_mode)
-    SAYF(cRST "-- Program output ends --\n");
+  if (!quiet_mode) SAYF(cRST "-- Program output ends --\n");
 
-  if (!child_timed_out && !stop_soon && WIFSIGNALED(status))
-    child_crashed = 1;
+  if (!child_timed_out && !stop_soon && WIFSIGNALED(status)) child_crashed = 1;
 
   if (!quiet_mode) {
 
@@ -320,14 +316,13 @@ static void run_target(char** argv) {
     else if (stop_soon)
       SAYF(cLRD "\n+++ Program aborted by user +++\n" cRST);
     else if (child_crashed)
-      SAYF(cLRD "\n+++ Program killed by signal %u +++\n" cRST, WTERMSIG(status));
+      SAYF(cLRD "\n+++ Program killed by signal %u +++\n" cRST,
+           WTERMSIG(status));
 
   }
 
-
 }
 
-
 /* Handle Ctrl-C and the like. */
 
 static void handle_stop_sig(int sig) {
@@ -338,15 +333,16 @@ static void handle_stop_sig(int sig) {
 
 }
 
-
 /* Do basic preparations - persistent fds, filenames, etc. */
 
 static void set_up_environment(void) {
 
-  setenv("ASAN_OPTIONS", "abort_on_error=1:"
-                         "detect_leaks=0:"
-                         "symbolize=0:"
-                         "allocator_may_return_null=1", 0);
+  setenv("ASAN_OPTIONS",
+         "abort_on_error=1:"
+         "detect_leaks=0:"
+         "symbolize=0:"
+         "allocator_may_return_null=1",
+         0);
 
   setenv("MSAN_OPTIONS", "exit_code=" STRINGIFY(MSAN_ERROR) ":"
                          "symbolize=0:"
@@ -355,21 +351,22 @@ static void set_up_environment(void) {
                          "msan_track_origins=0", 0);
 
   if (getenv("AFL_PRELOAD")) {
+
     setenv("LD_PRELOAD", getenv("AFL_PRELOAD"), 1);
     setenv("DYLD_INSERT_LIBRARIES", getenv("AFL_PRELOAD"), 1);
+
   }
 
 }
 
-
 /* Setup signal handlers, duh. */
 
 static void setup_signal_handlers(void) {
 
   struct sigaction sa;
 
-  sa.sa_handler   = NULL;
-  sa.sa_flags     = SA_RESTART;
+  sa.sa_handler = NULL;
+  sa.sa_flags = SA_RESTART;
   sa.sa_sigaction = NULL;
 
   sigemptyset(&sa.sa_mask);
@@ -388,7 +385,6 @@ static void setup_signal_handlers(void) {
 
 }
 
-
 /* Show banner. */
 
 static void show_banner(void) {
@@ -403,42 +399,43 @@ static void usage(u8* argv0) {
 
   show_banner();
 
-  SAYF("\n%s [ options ] -- /path/to/target_app [ ... ]\n\n"
+  SAYF(
+      "\n%s [ options ] -- /path/to/target_app [ ... ]\n\n"
 
-       "Required parameters:\n\n"
+      "Required parameters:\n\n"
 
-       "  -o file       - file to write the trace data to\n\n"
+      "  -o file       - file to write the trace data to\n\n"
 
-       "Execution control settings:\n\n"
+      "Execution control settings:\n\n"
 
-       "  -t msec       - timeout for each run (none)\n"
-       "  -m megs       - memory limit for child process (%d MB)\n"
-       "  -Q            - use binary-only instrumentation (QEMU mode)\n"
-       "  -U            - use Unicorn-based instrumentation (Unicorn mode)\n"
-       "                  (Not necessary, here for consistency with other afl-* tools)\n\n"  
+      "  -t msec       - timeout for each run (none)\n"
+      "  -m megs       - memory limit for child process (%d MB)\n"
+      "  -Q            - use binary-only instrumentation (QEMU mode)\n"
+      "  -U            - use Unicorn-based instrumentation (Unicorn mode)\n"
+      "                  (Not necessary, here for consistency with other afl-* "
+      "tools)\n\n"
 
-       "Other settings:\n\n"
+      "Other settings:\n\n"
 
-       "  -q            - sink program's output and don't show messages\n"
-       "  -e            - show edge coverage only, ignore hit counts\n"
-       "  -r            - show real tuple values instead of AFL filter values\n"
-       "  -c            - allow core dumps\n\n"
+      "  -q            - sink program's output and don't show messages\n"
+      "  -e            - show edge coverage only, ignore hit counts\n"
+      "  -r            - show real tuple values instead of AFL filter values\n"
+      "  -c            - allow core dumps\n\n"
 
-       "This tool displays raw tuple data captured by AFL instrumentation.\n"
-       "For additional help, consult %s/README.\n\n" cRST,
+      "This tool displays raw tuple data captured by AFL instrumentation.\n"
+      "For additional help, consult %s/README.\n\n" cRST,
 
-       argv0, MEM_LIMIT, doc_path);
+      argv0, MEM_LIMIT, doc_path);
 
   exit(1);
 
 }
 
-
 /* Find binary. */
 
 static void find_binary(u8* fname) {
 
-  u8* env_path = 0;
+  u8*         env_path = 0;
   struct stat st;
 
   if (strchr(fname, '/') || !(env_path = getenv("PATH"))) {
@@ -461,7 +458,9 @@ static void find_binary(u8* fname) {
         memcpy(cur_elem, env_path, delim - env_path);
         delim++;
 
-      } else cur_elem = ck_strdup(env_path);
+      } else
+
+        cur_elem = ck_strdup(env_path);
 
       env_path = delim;
 
@@ -473,7 +472,8 @@ static void find_binary(u8* fname) {
       ck_free(cur_elem);
 
       if (!stat(target_path, &st) && S_ISREG(st.st_mode) &&
-          (st.st_mode & 0111) && st.st_size >= 4) break;
+          (st.st_mode & 0111) && st.st_size >= 4)
+        break;
 
       ck_free(target_path);
       target_path = 0;
@@ -486,13 +486,12 @@ static void find_binary(u8* fname) {
 
 }
 
-
 /* Fix up argv for QEMU. */
 
 static char** get_qemu_argv(u8* own_loc, char** argv, int argc) {
 
   char** new_argv = ck_alloc(sizeof(char*) * (argc + 4));
-  u8 *tmp, *cp, *rsl, *own_copy;
+  u8 *   tmp, *cp, *rsl, *own_copy;
 
   memcpy(new_argv + 3, argv + 1, sizeof(char*) * argc);
 
@@ -507,8 +506,7 @@ static char** get_qemu_argv(u8* own_loc, char** argv, int argc) {
 
     cp = alloc_printf("%s/afl-qemu-trace", tmp);
 
-    if (access(cp, X_OK))
-      FATAL("Unable to find '%s'", tmp);
+    if (access(cp, X_OK)) FATAL("Unable to find '%s'", tmp);
 
     target_path = new_argv[0] = cp;
     return new_argv;
@@ -532,7 +530,9 @@ static char** get_qemu_argv(u8* own_loc, char** argv, int argc) {
 
     }
 
-  } else ck_free(own_copy);
+  } else
+
+    ck_free(own_copy);
 
   if (!access(BIN_PATH "/afl-qemu-trace", X_OK)) {
 
@@ -556,7 +556,7 @@ int main(int argc, char** argv) {
 
   doc_path = access(DOC_PATH, F_OK) ? "docs" : DOC_PATH;
 
-  while ((opt = getopt(argc,argv,"+o:m:t:A:eqZQUbcr")) > 0)
+  while ((opt = getopt(argc, argv, "+o:m:t:A:eqZQUbcr")) > 0)
 
     switch (opt) {
 
@@ -568,40 +568,41 @@ int main(int argc, char** argv) {
 
       case 'm': {
 
-          u8 suffix = 'M';
+        u8 suffix = 'M';
 
-          if (mem_limit_given) FATAL("Multiple -m options not supported");
-          mem_limit_given = 1;
+        if (mem_limit_given) FATAL("Multiple -m options not supported");
+        mem_limit_given = 1;
 
-          if (!strcmp(optarg, "none")) {
+        if (!strcmp(optarg, "none")) {
 
-            mem_limit = 0;
-            break;
+          mem_limit = 0;
+          break;
 
-          }
+        }
 
-          if (sscanf(optarg, "%llu%c", &mem_limit, &suffix) < 1 ||
-              optarg[0] == '-') FATAL("Bad syntax used for -m");
+        if (sscanf(optarg, "%llu%c", &mem_limit, &suffix) < 1 ||
+            optarg[0] == '-')
+          FATAL("Bad syntax used for -m");
 
-          switch (suffix) {
+        switch (suffix) {
 
-            case 'T': mem_limit *= 1024 * 1024; break;
-            case 'G': mem_limit *= 1024; break;
-            case 'k': mem_limit /= 1024; break;
-            case 'M': break;
+          case 'T': mem_limit *= 1024 * 1024; break;
+          case 'G': mem_limit *= 1024; break;
+          case 'k': mem_limit /= 1024; break;
+          case 'M': break;
 
-            default:  FATAL("Unsupported suffix or bad syntax for -m");
+          default: FATAL("Unsupported suffix or bad syntax for -m");
 
-          }
+        }
 
-          if (mem_limit < 5) FATAL("Dangerously low value of -m");
+        if (mem_limit < 5) FATAL("Dangerously low value of -m");
 
-          if (sizeof(rlim_t) == 4 && mem_limit > 2000)
-            FATAL("Value of -m out of range on 32-bit systems");
+        if (sizeof(rlim_t) == 4 && mem_limit > 2000)
+          FATAL("Value of -m out of range on 32-bit systems");
 
-        }
+      }
 
-        break;
+      break;
 
       case 't':
 
@@ -609,6 +610,7 @@ int main(int argc, char** argv) {
         timeout_given = 1;
 
         if (strcmp(optarg, "none")) {
+
           exec_tmout = atoi(optarg);
 
           if (exec_tmout < 20 || optarg[0] == '-')
@@ -636,7 +638,7 @@ int main(int argc, char** argv) {
         /* This is an undocumented option to write data in the syntax expected
            by afl-cmin. Nobody else should have any use for this. */
 
-        cmin_mode  = 1;
+        cmin_mode = 1;
         quiet_mode = 1;
         break;
 
@@ -675,7 +677,7 @@ int main(int argc, char** argv) {
         if (keep_cores) FATAL("Multiple -c options not supported");
         keep_cores = 1;
         break;
-      
+
       case 'r':
 
         if (raw_instr_output) FATAL("Multiple -r options not supported");
@@ -683,9 +685,7 @@ int main(int argc, char** argv) {
         raw_instr_output = 1;
         break;
 
-      default:
-
-        usage(argv[0]);
+      default: usage(argv[0]);
 
     }
 
@@ -699,8 +699,10 @@ int main(int argc, char** argv) {
   find_binary(argv[optind]);
 
   if (!quiet_mode) {
+
     show_banner();
     ACTF("Executing '%s'...\n", target_path);
+
   }
 
   detect_file_args(argv + optind, at_file);
@@ -717,7 +719,8 @@ int main(int argc, char** argv) {
   if (!quiet_mode) {
 
     if (!tcnt) FATAL("No instrumentation detected" cRST);
-    OKF("Captured %u tuples (highest value %u, total values %u) in '%s'." cRST, tcnt, highest, total, out_file);
+    OKF("Captured %u tuples (highest value %u, total values %u) in '%s'." cRST,
+        tcnt, highest, total, out_file);
 
   }
 
diff --git a/src/afl-tmin.c b/src/afl-tmin.c
index 529720ca..9decdb4d 100644
--- a/src/afl-tmin.c
+++ b/src/afl-tmin.c
@@ -22,7 +22,7 @@
 #define AFL_MAIN
 
 #ifdef __ANDROID__
-  #include "android-ashmem.h"
+#  include "android-ashmem.h"
 #endif
 
 #include "config.h"
@@ -51,72 +51,71 @@
 #include <sys/types.h>
 #include <sys/resource.h>
 
-s32 forksrv_pid,                      /* PID of the fork server           */
-    child_pid;                        /* PID of the tested program        */
+s32 forksrv_pid,                        /* PID of the fork server           */
+    child_pid;                          /* PID of the tested program        */
 
-s32 fsrv_ctl_fd,                      /* Fork server control pipe (write) */
-    fsrv_st_fd;                       /* Fork server status pipe (read)   */
+s32 fsrv_ctl_fd,                        /* Fork server control pipe (write) */
+    fsrv_st_fd;                         /* Fork server status pipe (read)   */
 
-       u8 *trace_bits;                /* SHM with instrumentation bitmap   */
-static u8 *mask_bitmap;               /* Mask for trace bits (-B)          */
+u8*        trace_bits;                 /* SHM with instrumentation bitmap   */
+static u8* mask_bitmap;                /* Mask for trace bits (-B)          */
 
-       u8 *in_file,                   /* Minimizer input test case         */
-          *output_file,               /* Minimizer output file             */
-          *out_file,                  /* Targeted program input file       */
-          *target_path,               /* Path to target binary             */
-          *doc_path;                  /* Path to docs                      */
+u8 *in_file,                           /* Minimizer input test case         */
+    *output_file,                      /* Minimizer output file             */
+    *out_file,                         /* Targeted program input file       */
+    *target_path,                      /* Path to target binary             */
+    *doc_path;                         /* Path to docs                      */
 
-       s32 out_fd;                    /* Persistent fd for out_file         */
+s32 out_fd;                           /* Persistent fd for out_file         */
 
-static u8* in_data;                   /* Input data for trimming           */
+static u8* in_data;                    /* Input data for trimming           */
 
-static u32 in_len,                    /* Input data length                 */
-           orig_cksum,                /* Original checksum                 */
-           total_execs,               /* Total number of execs             */
-           missed_hangs,              /* Misses due to hangs               */
-           missed_crashes,            /* Misses due to crashes             */
-           missed_paths;              /* Misses due to exec path diffs     */
-       u32 exec_tmout = EXEC_TIMEOUT; /* Exec timeout (ms)                 */
+static u32 in_len,                     /* Input data length                 */
+    orig_cksum,                        /* Original checksum                 */
+    total_execs,                       /* Total number of execs             */
+    missed_hangs,                      /* Misses due to hangs               */
+    missed_crashes,                    /* Misses due to crashes             */
+    missed_paths;                      /* Misses due to exec path diffs     */
+u32 exec_tmout = EXEC_TIMEOUT;         /* Exec timeout (ms)                 */
 
-       u64 mem_limit = MEM_LIMIT;     /* Memory limit (MB)                 */
+u64 mem_limit = MEM_LIMIT;             /* Memory limit (MB)                 */
 
-       s32 dev_null_fd = -1;          /* FD to /dev/null                   */
+s32 dev_null_fd = -1;                  /* FD to /dev/null                   */
 
-static u8  crash_mode,                /* Crash-centric mode?               */
-           exit_crash,                /* Treat non-zero exit as crash?     */
-           edges_only,                /* Ignore hit counts?                */
-           exact_mode,                /* Require path match for crashes?   */
-           use_stdin = 1;             /* Use stdin for program input?      */
+static u8 crash_mode,                  /* Crash-centric mode?               */
+    exit_crash,                        /* Treat non-zero exit as crash?     */
+    edges_only,                        /* Ignore hit counts?                */
+    exact_mode,                        /* Require path match for crashes?   */
+    use_stdin = 1;                     /* Use stdin for program input?      */
 
-static volatile u8
-           stop_soon;                 /* Ctrl-C pressed?                   */
+static volatile u8 stop_soon;          /* Ctrl-C pressed?                   */
 
 /*
  * forkserver section
  */
 
 /* we only need this to use afl-forkserver */
-FILE *plot_file;
-u8 uses_asan;
-s32 out_fd = -1, out_dir_fd = -1, dev_urandom_fd = -1;
+FILE* plot_file;
+u8    uses_asan;
+s32   out_fd = -1, out_dir_fd = -1, dev_urandom_fd = -1;
 
 /* we import this as we need this information */
 extern u8 child_timed_out;
 
-
-/* Classify tuple counts. This is a slow & naive version, but good enough here. */
+/* Classify tuple counts. This is a slow & naive version, but good enough here.
+ */
 
 static const u8 count_class_lookup[256] = {
 
-  [0]           = 0,
-  [1]           = 1,
-  [2]           = 2,
-  [3]           = 4,
-  [4 ... 7]     = 8,
-  [8 ... 15]    = 16,
-  [16 ... 31]   = 32,
-  [32 ... 127]  = 64,
-  [128 ... 255] = 128
+    [0] = 0,
+    [1] = 1,
+    [2] = 2,
+    [3] = 4,
+    [4 ... 7] = 8,
+    [8 ... 15] = 16,
+    [16 ... 31] = 32,
+    [32 ... 127] = 64,
+    [128 ... 255] = 128
 
 };
 
@@ -127,22 +126,25 @@ static void classify_counts(u8* mem) {
   if (edges_only) {
 
     while (i--) {
+
       if (*mem) *mem = 1;
       mem++;
+
     }
 
   } else {
 
     while (i--) {
+
       *mem = count_class_lookup[*mem];
       mem++;
+
     }
 
   }
 
 }
 
-
 /* Apply mask to classified bitmap (if set). */
 
 static void apply_mask(u32* mem, u32* mask) {
@@ -161,25 +163,26 @@ static void apply_mask(u32* mem, u32* mask) {
 
 }
 
-
 /* See if any bytes are set in the bitmap. */
 
 static inline u8 anything_set(void) {
 
   u32* ptr = (u32*)trace_bits;
-  u32  i   = (MAP_SIZE >> 2);
+  u32  i = (MAP_SIZE >> 2);
 
-  while (i--) if (*(ptr++)) return 1;
+  while (i--)
+    if (*(ptr++)) return 1;
 
   return 0;
 
 }
 
-
 /* Get rid of temp files (atexit handler). */
 
 static void at_exit_handler(void) {
-  if (out_file) unlink(out_file); /* Ignore errors */
+
+  if (out_file) unlink(out_file);                          /* Ignore errors */
+
 }
 
 /* Read initial file. */
@@ -187,17 +190,16 @@ static void at_exit_handler(void) {
 static void read_initial_file(void) {
 
   struct stat st;
-  s32 fd = open(in_file, O_RDONLY);
+  s32         fd = open(in_file, O_RDONLY);
 
   if (fd < 0) PFATAL("Unable to open '%s'", in_file);
 
-  if (fstat(fd, &st) || !st.st_size)
-    FATAL("Zero-sized input file.");
+  if (fstat(fd, &st) || !st.st_size) FATAL("Zero-sized input file.");
 
   if (st.st_size >= TMIN_MAX_FILE)
     FATAL("Input file is too large (%u MB max)", TMIN_MAX_FILE / 1024 / 1024);
 
-  in_len  = st.st_size;
+  in_len = st.st_size;
   in_data = ck_alloc_nozero(in_len);
 
   ck_read(fd, in_data, in_len, in_file);
@@ -208,14 +210,13 @@ static void read_initial_file(void) {
 
 }
 
-
 /* Write output file. */
 
 static s32 write_to_file(u8* path, u8* mem, u32 len) {
 
   s32 ret;
 
-  unlink(path); /* Ignore errors */
+  unlink(path);                                            /* Ignore errors */
 
   ret = open(path, O_RDWR | O_CREAT | O_EXCL, 0600);
 
@@ -239,13 +240,15 @@ static void write_to_testcase(void* mem, u32 len) {
 
   if (!use_stdin) {
 
-    unlink(out_file); /* Ignore errors. */
+    unlink(out_file);                                     /* Ignore errors. */
 
     fd = open(out_file, O_WRONLY | O_CREAT | O_EXCL, 0600);
 
     if (fd < 0) PFATAL("Unable to create '%s'", out_file);
 
-  } else lseek(fd, 0, SEEK_SET);
+  } else
+
+    lseek(fd, 0, SEEK_SET);
 
   ck_write(fd, mem, len, out_file);
 
@@ -254,11 +257,11 @@ static void write_to_testcase(void* mem, u32 len) {
     if (ftruncate(fd, len)) PFATAL("ftruncate() failed");
     lseek(fd, 0, SEEK_SET);
 
-  } else close(fd);
-
-}
+  } else
 
+    close(fd);
 
+}
 
 /* Handle timeout signal. */
 /*
@@ -277,11 +280,13 @@ static void handle_timeout(int sig) {
   }
 
 }
+
 */
 
 /* start the app and it's forkserver */
 /*
 static void init_forkserver(char **argv) {
+
   static struct itimerval it;
   int st_pipe[2], ctl_pipe[2];
   int status = 0;
@@ -348,7 +353,7 @@ static void init_forkserver(char **argv) {
 
   }
 
-  // Close the unneeded endpoints. 
+  // Close the unneeded endpoints.
 
   close(ctl_pipe[0]);
   close(st_pipe[1]);
@@ -378,8 +383,10 @@ static void init_forkserver(char **argv) {
   // Otherwise, try to figure out what went wrong.
 
   if (rlen == 4) {
+
     ACTF("All right - fork server is up.");
     return;
+
   }
 
   if (waitpid(forksrv_pid, &status, 0) <= 0)
@@ -398,6 +405,7 @@ static void init_forkserver(char **argv) {
     SAYF(cLRD "\n+++ Program killed by signal %u +++\n" cRST, WTERMSIG(status));
 
 }
+
 */
 
 /* Execute target application. Returns 0 if the changes are a dud, or
@@ -406,8 +414,8 @@ static void init_forkserver(char **argv) {
 static u8 run_target(char** argv, u8* mem, u32 len, u8 first_run) {
 
   static struct itimerval it;
-  static u32 prev_timed_out = 0;
-  int status = 0;
+  static u32              prev_timed_out = 0;
+  int                     status = 0;
 
   u32 cksum;
 
@@ -440,8 +448,10 @@ static u8 run_target(char** argv, u8* mem, u32 len, u8 first_run) {
   /* Configure timeout, wait for child, cancel timeout. */
 
   if (exec_tmout) {
+
     it.it_value.tv_sec = (exec_tmout / 1000);
     it.it_value.tv_usec = (exec_tmout % 1000) * 1000;
+
   }
 
   setitimer(ITIMER_REAL, &it, NULL);
@@ -508,9 +518,9 @@ static u8 run_target(char** argv, u8* mem, u32 len, u8 first_run) {
 
   } else
 
-  /* Handle non-crashing inputs appropriately. */
+      /* Handle non-crashing inputs appropriately. */
 
-  if (crash_mode) {
+      if (crash_mode) {
 
     missed_paths++;
     return 0;
@@ -522,24 +532,23 @@ static u8 run_target(char** argv, u8* mem, u32 len, u8 first_run) {
   if (first_run) orig_cksum = cksum;
 
   if (orig_cksum == cksum) return 1;
-  
+
   missed_paths++;
   return 0;
 
 }
 
-
 /* Find first power of two greater or equal to val. */
 
 static u32 next_p2(u32 val) {
 
   u32 ret = 1;
-  while (val > ret) ret <<= 1;
+  while (val > ret)
+    ret <<= 1;
   return ret;
 
 }
 
-
 /* Actually minimize! */
 
 static void minimize(char** argv) {
@@ -557,8 +566,8 @@ static void minimize(char** argv) {
    * BLOCK NORMALIZATION *
    ***********************/
 
-  set_len    = next_p2(in_len / TMIN_SET_STEPS);
-  set_pos    = 0;
+  set_len = next_p2(in_len / TMIN_SET_STEPS);
+  set_pos = 0;
 
   if (set_len < TMIN_SET_MIN_SIZE) set_len = TMIN_SET_MIN_SIZE;
 
@@ -575,14 +584,14 @@ static void minimize(char** argv) {
 
       memcpy(tmp_buf, in_data, in_len);
       memset(tmp_buf + set_pos, '0', use_len);
-  
-      u8  res;
+
+      u8 res;
       res = run_target(argv, tmp_buf, in_len, 0);
 
       if (res) {
 
         memset(in_data + set_pos, '0', use_len);
-/*        changed_any = 1; value is not used */
+        /*        changed_any = 1; value is not used */
         alpha_del0 += use_len;
 
       }
@@ -615,11 +624,11 @@ next_pass:
 next_del_blksize:
 
   if (!del_len) del_len = 1;
-  del_pos  = 0;
+  del_pos = 0;
   prev_del = 1;
 
-  SAYF(cGRA "    Block length = %u, remaining size = %u\n" cRST,
-       del_len, in_len);
+  SAYF(cGRA "    Block length = %u, remaining size = %u\n" cRST, del_len,
+       in_len);
 
   while (del_pos < in_len) {
 
@@ -634,8 +643,8 @@ next_del_blksize:
        very end of the buffer (tail_len > 0), and the current block is the same
        as the previous one... skip this step as a no-op. */
 
-    if (!prev_del && tail_len && !memcmp(in_data + del_pos - del_len,
-        in_data + del_pos, del_len)) {
+    if (!prev_del && tail_len &&
+        !memcmp(in_data + del_pos - del_len, in_data + del_pos, del_len)) {
 
       del_pos += del_len;
       continue;
@@ -656,11 +665,13 @@ next_del_blksize:
 
       memcpy(in_data, tmp_buf, del_pos + tail_len);
       prev_del = 1;
-      in_len   = del_pos + tail_len;
+      in_len = del_pos + tail_len;
 
       changed_any = 1;
 
-    } else del_pos += del_len;
+    } else
+
+      del_pos += del_len;
 
   }
 
@@ -674,7 +685,8 @@ next_del_blksize:
   OKF("Block removal complete, %u bytes deleted.", stage_o_len - in_len);
 
   if (!in_len && changed_any)
-    WARNF(cLRD "Down to zero bytes - check the command line and mem limit!" cRST);
+    WARNF(cLRD
+          "Down to zero bytes - check the command line and mem limit!" cRST);
 
   if (cur_pass > 1 && !changed_any) goto finalize_all;
 
@@ -682,15 +694,17 @@ next_del_blksize:
    * ALPHABET MINIMIZATION *
    *************************/
 
-  alpha_size   = 0;
-  alpha_del1   = 0;
+  alpha_size = 0;
+  alpha_del1 = 0;
   syms_removed = 0;
 
   memset(alpha_map, 0, sizeof(alpha_map));
 
   for (i = 0; i < in_len; i++) {
+
     if (!alpha_map[in_data[i]]) alpha_size++;
     alpha_map[in_data[i]]++;
+
   }
 
   ACTF(cBRI "Stage #2: " cRST "Minimizing symbols (%u code point%s)...",
@@ -699,14 +713,14 @@ next_del_blksize:
   for (i = 0; i < 256; i++) {
 
     u32 r;
-    u8 res;
+    u8  res;
 
     if (i == '0' || !alpha_map[i]) continue;
 
     memcpy(tmp_buf, in_data, in_len);
 
     for (r = 0; r < in_len; r++)
-      if (tmp_buf[r] == i) tmp_buf[r] = '0'; 
+      if (tmp_buf[r] == i) tmp_buf[r] = '0';
 
     res = run_target(argv, tmp_buf, in_len, 0);
 
@@ -724,8 +738,8 @@ next_del_blksize:
   alpha_d_total += alpha_del1;
 
   OKF("Symbol minimization finished, %u symbol%s (%u byte%s) replaced.",
-      syms_removed, syms_removed == 1 ? "" : "s",
-      alpha_del1, alpha_del1 == 1 ? "" : "s");
+      syms_removed, syms_removed == 1 ? "" : "s", alpha_del1,
+      alpha_del1 == 1 ? "" : "s");
 
   /**************************
    * CHARACTER MINIMIZATION *
@@ -752,36 +766,34 @@ next_del_blksize:
       alpha_del2++;
       changed_any = 1;
 
-    } else tmp_buf[i] = orig;
+    } else
+
+      tmp_buf[i] = orig;
 
   }
 
   alpha_d_total += alpha_del2;
 
-  OKF("Character minimization done, %u byte%s replaced.",
-      alpha_del2, alpha_del2 == 1 ? "" : "s");
+  OKF("Character minimization done, %u byte%s replaced.", alpha_del2,
+      alpha_del2 == 1 ? "" : "s");
 
   if (changed_any) goto next_pass;
 
 finalize_all:
 
-  SAYF("\n"
-       cGRA "     File size reduced by : " cRST "%0.02f%% (to %u byte%s)\n"
-       cGRA "    Characters simplified : " cRST "%0.02f%%\n"
-       cGRA "     Number of execs done : " cRST "%u\n"
-       cGRA "          Fruitless execs : " cRST "path=%u crash=%u hang=%s%u\n\n",
+  SAYF("\n" cGRA "     File size reduced by : " cRST
+       "%0.02f%% (to %u byte%s)\n" cGRA "    Characters simplified : " cRST
+       "%0.02f%%\n" cGRA "     Number of execs done : " cRST "%u\n" cGRA
+       "          Fruitless execs : " cRST "path=%u crash=%u hang=%s%u\n\n",
        100 - ((double)in_len) * 100 / orig_len, in_len, in_len == 1 ? "" : "s",
-       ((double)(alpha_d_total)) * 100 / (in_len ? in_len : 1),
-       total_execs, missed_paths, missed_crashes, missed_hangs ? cLRD : "",
-       missed_hangs);
+       ((double)(alpha_d_total)) * 100 / (in_len ? in_len : 1), total_execs,
+       missed_paths, missed_crashes, missed_hangs ? cLRD : "", missed_hangs);
 
   if (total_execs > 50 && missed_hangs * 10 > total_execs)
     WARNF(cLRD "Frequent timeouts - results may be skewed." cRST);
 
 }
 
-
-
 /* Handle Ctrl-C and the like. */
 
 static void handle_stop_sig(int sig) {
@@ -792,7 +804,6 @@ static void handle_stop_sig(int sig) {
 
 }
 
-
 /* Do basic preparations - persistent fds, filenames, etc. */
 
 static void set_up_environment(void) {
@@ -823,7 +834,6 @@ static void set_up_environment(void) {
 
   if (out_fd < 0) PFATAL("Unable to create '%s'", out_file);
 
-
   /* Set sane defaults... */
 
   x = getenv("ASAN_OPTIONS");
@@ -843,18 +853,20 @@ static void set_up_environment(void) {
   if (x) {
 
     if (!strstr(x, "exit_code=" STRINGIFY(MSAN_ERROR)))
-      FATAL("Custom MSAN_OPTIONS set without exit_code="
-            STRINGIFY(MSAN_ERROR) " - please fix!");
+      FATAL("Custom MSAN_OPTIONS set without exit_code=" STRINGIFY(
+          MSAN_ERROR) " - please fix!");
 
     if (!strstr(x, "symbolize=0"))
       FATAL("Custom MSAN_OPTIONS set without symbolize=0 - please fix!");
 
   }
 
-  setenv("ASAN_OPTIONS", "abort_on_error=1:"
-                         "detect_leaks=0:"
-                         "symbolize=0:"
-                         "allocator_may_return_null=1", 0);
+  setenv("ASAN_OPTIONS",
+         "abort_on_error=1:"
+         "detect_leaks=0:"
+         "symbolize=0:"
+         "allocator_may_return_null=1",
+         0);
 
   setenv("MSAN_OPTIONS", "exit_code=" STRINGIFY(MSAN_ERROR) ":"
                          "symbolize=0:"
@@ -863,21 +875,22 @@ static void set_up_environment(void) {
                          "msan_track_origins=0", 0);
 
   if (getenv("AFL_PRELOAD")) {
+
     setenv("LD_PRELOAD", getenv("AFL_PRELOAD"), 1);
     setenv("DYLD_INSERT_LIBRARIES", getenv("AFL_PRELOAD"), 1);
+
   }
 
 }
 
-
 /* Setup signal handlers, duh. */
 
 static void setup_signal_handlers(void) {
 
   struct sigaction sa;
 
-  sa.sa_handler   = NULL;
-  sa.sa_flags     = SA_RESTART;
+  sa.sa_handler = NULL;
+  sa.sa_flags = SA_RESTART;
   sa.sa_sigaction = NULL;
 
   sigemptyset(&sa.sa_mask);
@@ -896,46 +909,46 @@ static void setup_signal_handlers(void) {
 
 }
 
-
 /* Display usage hints. */
 
 static void usage(u8* argv0) {
 
-  SAYF("\n%s [ options ] -- /path/to/target_app [ ... ]\n\n"
+  SAYF(
+      "\n%s [ options ] -- /path/to/target_app [ ... ]\n\n"
 
-       "Required parameters:\n\n"
+      "Required parameters:\n\n"
 
-       "  -i file       - input test case to be shrunk by the tool\n"
-       "  -o file       - final output location for the minimized data\n\n"
+      "  -i file       - input test case to be shrunk by the tool\n"
+      "  -o file       - final output location for the minimized data\n\n"
 
-       "Execution control settings:\n\n"
+      "Execution control settings:\n\n"
 
-       "  -f file       - input file read by the tested program (stdin)\n"
-       "  -t msec       - timeout for each run (%d ms)\n"
-       "  -m megs       - memory limit for child process (%d MB)\n"
-       "  -Q            - use binary-only instrumentation (QEMU mode)\n"
-       "  -U            - use Unicorn-based instrumentation (Unicorn mode)\n\n"
-       "                  (Not necessary, here for consistency with other afl-* tools)\n\n"
+      "  -f file       - input file read by the tested program (stdin)\n"
+      "  -t msec       - timeout for each run (%d ms)\n"
+      "  -m megs       - memory limit for child process (%d MB)\n"
+      "  -Q            - use binary-only instrumentation (QEMU mode)\n"
+      "  -U            - use Unicorn-based instrumentation (Unicorn mode)\n\n"
+      "                  (Not necessary, here for consistency with other afl-* "
+      "tools)\n\n"
 
-       "Minimization settings:\n\n"
+      "Minimization settings:\n\n"
 
-       "  -e            - solve for edge coverage only, ignore hit counts\n"
-       "  -x            - treat non-zero exit codes as crashes\n\n"
+      "  -e            - solve for edge coverage only, ignore hit counts\n"
+      "  -x            - treat non-zero exit codes as crashes\n\n"
 
-       "For additional tips, please consult %s/README.\n\n",
+      "For additional tips, please consult %s/README.\n\n",
 
-       argv0, EXEC_TIMEOUT, MEM_LIMIT, doc_path);
+      argv0, EXEC_TIMEOUT, MEM_LIMIT, doc_path);
 
   exit(1);
 
 }
 
-
 /* Find binary. */
 
 static void find_binary(u8* fname) {
 
-  u8* env_path = 0;
+  u8*         env_path = 0;
   struct stat st;
 
   if (strchr(fname, '/') || !(env_path = getenv("PATH"))) {
@@ -958,7 +971,9 @@ static void find_binary(u8* fname) {
         memcpy(cur_elem, env_path, delim - env_path);
         delim++;
 
-      } else cur_elem = ck_strdup(env_path);
+      } else
+
+        cur_elem = ck_strdup(env_path);
 
       env_path = delim;
 
@@ -970,7 +985,8 @@ static void find_binary(u8* fname) {
       ck_free(cur_elem);
 
       if (!stat(target_path, &st) && S_ISREG(st.st_mode) &&
-          (st.st_mode & 0111) && st.st_size >= 4) break;
+          (st.st_mode & 0111) && st.st_size >= 4)
+        break;
 
       ck_free(target_path);
       target_path = 0;
@@ -983,13 +999,12 @@ static void find_binary(u8* fname) {
 
 }
 
-
 /* Fix up argv for QEMU. */
 
 static char** get_qemu_argv(u8* own_loc, char** argv, int argc) {
 
   char** new_argv = ck_alloc(sizeof(char*) * (argc + 4));
-  u8 *tmp, *cp, *rsl, *own_copy;
+  u8 *   tmp, *cp, *rsl, *own_copy;
 
   memcpy(new_argv + 3, argv + 1, sizeof(char*) * argc);
 
@@ -1004,8 +1019,7 @@ static char** get_qemu_argv(u8* own_loc, char** argv, int argc) {
 
     cp = alloc_printf("%s/afl-qemu-trace", tmp);
 
-    if (access(cp, X_OK))
-      FATAL("Unable to find '%s'", tmp);
+    if (access(cp, X_OK)) FATAL("Unable to find '%s'", tmp);
 
     target_path = new_argv[0] = cp;
     return new_argv;
@@ -1029,7 +1043,9 @@ static char** get_qemu_argv(u8* own_loc, char** argv, int argc) {
 
     }
 
-  } else ck_free(own_copy);
+  } else
+
+    ck_free(own_copy);
 
   if (!access(BIN_PATH "/afl-qemu-trace", X_OK)) {
 
@@ -1056,8 +1072,6 @@ static void read_bitmap(u8* fname) {
 
 }
 
-
-
 /* Main entry point */
 
 int main(int argc, char** argv) {
@@ -1070,7 +1084,7 @@ int main(int argc, char** argv) {
 
   SAYF(cCYA "afl-tmin" VERSION cRST " by <lcamtuf@google.com>\n");
 
-  while ((opt = getopt(argc,argv,"+i:o:f:m:t:B:xeQU")) > 0)
+  while ((opt = getopt(argc, argv, "+i:o:f:m:t:B:xeQU")) > 0)
 
     switch (opt) {
 
@@ -1090,7 +1104,7 @@ int main(int argc, char** argv) {
 
         if (out_file) FATAL("Multiple -f options not supported");
         use_stdin = 0;
-        out_file   = optarg;
+        out_file = optarg;
         break;
 
       case 'e':
@@ -1107,40 +1121,41 @@ int main(int argc, char** argv) {
 
       case 'm': {
 
-          u8 suffix = 'M';
+        u8 suffix = 'M';
 
-          if (mem_limit_given) FATAL("Multiple -m options not supported");
-          mem_limit_given = 1;
+        if (mem_limit_given) FATAL("Multiple -m options not supported");
+        mem_limit_given = 1;
 
-          if (!strcmp(optarg, "none")) {
+        if (!strcmp(optarg, "none")) {
 
-            mem_limit = 0;
-            break;
+          mem_limit = 0;
+          break;
 
-          }
+        }
 
-          if (sscanf(optarg, "%llu%c", &mem_limit, &suffix) < 1 ||
-              optarg[0] == '-') FATAL("Bad syntax used for -m");
+        if (sscanf(optarg, "%llu%c", &mem_limit, &suffix) < 1 ||
+            optarg[0] == '-')
+          FATAL("Bad syntax used for -m");
 
-          switch (suffix) {
+        switch (suffix) {
 
-            case 'T': mem_limit *= 1024 * 1024; break;
-            case 'G': mem_limit *= 1024; break;
-            case 'k': mem_limit /= 1024; break;
-            case 'M': break;
+          case 'T': mem_limit *= 1024 * 1024; break;
+          case 'G': mem_limit *= 1024; break;
+          case 'k': mem_limit /= 1024; break;
+          case 'M': break;
 
-            default:  FATAL("Unsupported suffix or bad syntax for -m");
+          default: FATAL("Unsupported suffix or bad syntax for -m");
 
-          }
+        }
 
-          if (mem_limit < 5) FATAL("Dangerously low value of -m");
+        if (mem_limit < 5) FATAL("Dangerously low value of -m");
 
-          if (sizeof(rlim_t) == 4 && mem_limit > 2000)
-            FATAL("Value of -m out of range on 32-bit systems");
+        if (sizeof(rlim_t) == 4 && mem_limit > 2000)
+          FATAL("Value of -m out of range on 32-bit systems");
 
-        }
+      }
 
-        break;
+      break;
 
       case 't':
 
@@ -1170,7 +1185,7 @@ int main(int argc, char** argv) {
         unicorn_mode = 1;
         break;
 
-      case 'B': /* load bitmap */
+      case 'B':                                              /* load bitmap */
 
         /* This is a secret undocumented option! It is speculated to be useful
            if you have a baseline "boring" input file and another "interesting"
@@ -1190,9 +1205,7 @@ int main(int argc, char** argv) {
         read_bitmap(optarg);
         break;
 
-      default:
-
-        usage(argv[0]);
+      default: usage(argv[0]);
 
     }
 
@@ -1230,15 +1243,16 @@ int main(int argc, char** argv) {
 
   if (!crash_mode) {
 
-     OKF("Program terminates normally, minimizing in " 
-         cCYA "instrumented" cRST " mode.");
+    OKF("Program terminates normally, minimizing in " cCYA "instrumented" cRST
+        " mode.");
 
-     if (!anything_set()) FATAL("No instrumentation detected.");
+    if (!anything_set()) FATAL("No instrumentation detected.");
 
   } else {
 
-     OKF("Program exits with a signal, minimizing in " cMGN "%scrash" cRST
-         " mode.", exact_mode ? "EXACT " : "");
+    OKF("Program exits with a signal, minimizing in " cMGN "%scrash" cRST
+        " mode.",
+        exact_mode ? "EXACT " : "");
 
   }