about summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--.clang-format148
-rwxr-xr-x.custom-format.py119
-rw-r--r--.gitignore1
-rw-r--r--Makefile66
-rw-r--r--TODO8
-rw-r--r--afl-fuzz.c12660
-rw-r--r--android-ashmem.h81
l---------[-rw-r--r--]config.h364
-rw-r--r--debug.h251
-rw-r--r--docs/ChangeLog13
-rw-r--r--docs/env_variables.txt3
-rw-r--r--include/afl-as.h (renamed from afl-as.h)95
-rw-r--r--include/afl-fuzz.h639
-rw-r--r--include/alloc-inl.h (renamed from alloc-inl.h)239
-rw-r--r--include/android-ashmem.h80
-rw-r--r--include/common.h (renamed from afl-common.h)1
-rw-r--r--include/config.h370
-rw-r--r--include/debug.h285
-rw-r--r--include/forkserver.h25
-rw-r--r--include/hash.h (renamed from hash.h)25
-rw-r--r--include/sharedmem.h (renamed from sharedmem.h)6
-rw-r--r--include/types.h96
-rw-r--r--libdislocator/Makefile2
-rw-r--r--libdislocator/libdislocator.so.c67
-rw-r--r--libtokencap/Makefile2
-rw-r--r--libtokencap/libtokencap.so.c66
-rw-r--r--llvm_mode/LLVMInsTrim.so.cc550
-rw-r--r--llvm_mode/Makefile4
-rw-r--r--llvm_mode/MarkNodes.cc416
-rw-r--r--llvm_mode/MarkNodes.h13
-rw-r--r--llvm_mode/afl-clang-fast.c188
-rw-r--r--llvm_mode/afl-llvm-pass.so.cc298
-rw-r--r--llvm_mode/afl-llvm-rt.o.c59
-rw-r--r--llvm_mode/compare-transform-pass.so.cc286
-rw-r--r--llvm_mode/split-compares-pass.so.cc338
-rw-r--r--llvm_mode/split-switches-pass.so.cc294
-rw-r--r--qemu_mode/libcompcov/Makefile2
-rw-r--r--qemu_mode/libcompcov/compcovtest.cc32
-rw-r--r--qemu_mode/libcompcov/libcompcov.so.c199
-rw-r--r--qemu_mode/libcompcov/pmparser.h456
-rw-r--r--qemu_mode/patches/afl-qemu-common.h20
-rw-r--r--qemu_mode/patches/afl-qemu-cpu-inl.h164
-rw-r--r--qemu_mode/patches/afl-qemu-cpu-translate-inl.h84
-rw-r--r--qemu_mode/patches/afl-qemu-tcg-inl.h522
-rw-r--r--qemu_mode/patches/afl-qemu-translate-inl.h13
-rw-r--r--src/afl-analyze.c (renamed from afl-analyze.c)366
-rw-r--r--src/afl-as.c (renamed from afl-as.c)164
-rw-r--r--src/afl-common.c (renamed from afl-common.c)29
-rw-r--r--src/afl-forkserver.c430
-rw-r--r--src/afl-fuzz-bitmap.c708
-rw-r--r--src/afl-fuzz-extras.c485
-rw-r--r--src/afl-fuzz-globals.c257
-rw-r--r--src/afl-fuzz-init.c2064
-rw-r--r--src/afl-fuzz-misc.c183
-rw-r--r--src/afl-fuzz-one.c6024
-rw-r--r--src/afl-fuzz-python.c402
-rw-r--r--src/afl-fuzz-queue.c454
-rw-r--r--src/afl-fuzz-run.c801
-rw-r--r--src/afl-fuzz-stats.c802
-rw-r--r--src/afl-fuzz.c881
-rw-r--r--src/afl-gcc.c (renamed from afl-gcc.c)128
-rw-r--r--src/afl-gotcpu.c (renamed from afl-gotcpu.c)27
-rw-r--r--src/afl-sharedmem.c (renamed from sharedmem.c)42
-rw-r--r--src/afl-showmap.c (renamed from afl-showmap.c)265
-rw-r--r--src/afl-tmin.c (renamed from afl-tmin.c)436
-rw-r--r--test-instr.c7
l---------[-rw-r--r--]types.h92
-rw-r--r--unicorn_mode/patches/afl-unicorn-common.h20
-rw-r--r--unicorn_mode/patches/afl-unicorn-cpu-inl.h80
-rw-r--r--unicorn_mode/patches/afl-unicorn-cpu-translate-inl.h31
-rw-r--r--unicorn_mode/patches/afl-unicorn-tcg-op-inl.h33
-rw-r--r--unicorn_mode/patches/afl-unicorn-tcg-runtime-inl.h43
72 files changed, 18779 insertions, 16125 deletions
diff --git a/.clang-format b/.clang-format
new file mode 100644
index 00000000..ef4cb190
--- /dev/null
+++ b/.clang-format
@@ -0,0 +1,148 @@
+---
+Language:        Cpp
+# BasedOnStyle:  Google
+AccessModifierOffset: -1
+AlignAfterOpenBracket: Align
+AlignConsecutiveAssignments: false
+AlignConsecutiveDeclarations: true
+AlignEscapedNewlines: Left
+AlignOperands:   true
+AlignTrailingComments: true
+AllowAllParametersOfDeclarationOnNextLine: true
+AllowShortBlocksOnASingleLine: true
+AllowShortCaseLabelsOnASingleLine: true
+AllowShortFunctionsOnASingleLine: false
+AllowShortIfStatementsOnASingleLine: true
+AllowShortLoopsOnASingleLine: false
+AlwaysBreakAfterDefinitionReturnType: None
+AlwaysBreakAfterReturnType: None
+AlwaysBreakBeforeMultilineStrings: true
+AlwaysBreakTemplateDeclarations: Yes
+BinPackArguments: true
+BinPackParameters: true
+BraceWrapping:   
+  AfterClass:      false
+  AfterControlStatement: false
+  AfterEnum:       false
+  AfterFunction:   false
+  AfterNamespace:  false
+  AfterObjCDeclaration: false
+  AfterStruct:     false
+  AfterUnion:      false
+  AfterExternBlock: false
+  BeforeCatch:     false
+  BeforeElse:      false
+  IndentBraces:    false
+  SplitEmptyFunction: true
+  SplitEmptyRecord: true
+  SplitEmptyNamespace: true
+BreakBeforeBinaryOperators: None
+BreakBeforeBraces: Attach
+BreakBeforeInheritanceComma: false
+BreakInheritanceList: BeforeColon
+BreakBeforeTernaryOperators: true
+BreakConstructorInitializersBeforeComma: false
+BreakConstructorInitializers: BeforeColon
+BreakAfterJavaFieldAnnotations: false
+BreakStringLiterals: true
+ColumnLimit:     80
+CommentPragmas:  '^ IWYU pragma:'
+CompactNamespaces: false
+ConstructorInitializerAllOnOneLineOrOnePerLine: true
+ConstructorInitializerIndentWidth: 4
+ContinuationIndentWidth: 4
+Cpp11BracedListStyle: true
+DerivePointerAlignment: true
+DisableFormat:   false
+ExperimentalAutoDetectBinPacking: false
+FixNamespaceComments: true
+ForEachMacros:   
+  - foreach
+  - Q_FOREACH
+  - BOOST_FOREACH
+IncludeBlocks:   Preserve
+IncludeCategories: 
+  - Regex:           '^<ext/.*\.h>'
+    Priority:        2
+  - Regex:           '^<.*\.h>'
+    Priority:        1
+  - Regex:           '^<.*'
+    Priority:        2
+  - Regex:           '.*'
+    Priority:        3
+IncludeIsMainRegex: '([-_](test|unittest))?$'
+IndentCaseLabels: true
+IndentPPDirectives: None
+IndentWidth:     2
+IndentWrappedFunctionNames: false
+JavaScriptQuotes: Leave
+JavaScriptWrapImports: true
+KeepEmptyLinesAtTheStartOfBlocks: false
+MacroBlockBegin: ''
+MacroBlockEnd:   ''
+MaxEmptyLinesToKeep: 1
+NamespaceIndentation: None
+ObjCBinPackProtocolList: Never
+ObjCBlockIndentWidth: 2
+ObjCSpaceAfterProperty: false
+ObjCSpaceBeforeProtocolList: true
+PenaltyBreakAssignment: 2
+PenaltyBreakBeforeFirstCallParameter: 1
+PenaltyBreakComment: 300
+PenaltyBreakFirstLessLess: 120
+PenaltyBreakString: 1000
+PenaltyBreakTemplateDeclaration: 10
+PenaltyExcessCharacter: 1000000
+PenaltyReturnTypeOnItsOwnLine: 200
+PointerAlignment: Right
+RawStringFormats: 
+  - Language:        Cpp
+    Delimiters:      
+      - cc
+      - CC
+      - cpp
+      - Cpp
+      - CPP
+      - 'c++'
+      - 'C++'
+    CanonicalDelimiter: ''
+    BasedOnStyle:    google
+  - Language:        TextProto
+    Delimiters:      
+      - pb
+      - PB
+      - proto
+      - PROTO
+    EnclosingFunctions: 
+      - EqualsProto
+      - EquivToProto
+      - PARSE_PARTIAL_TEXT_PROTO
+      - PARSE_TEST_PROTO
+      - PARSE_TEXT_PROTO
+      - ParseTextOrDie
+      - ParseTextProtoOrDie
+    CanonicalDelimiter: ''
+    BasedOnStyle:    google
+ReflowComments:  true
+SortIncludes:    false
+SortUsingDeclarations: true
+SpaceAfterCStyleCast: false
+SpaceAfterTemplateKeyword: true
+SpaceBeforeAssignmentOperators: true
+SpaceBeforeCpp11BracedList: false
+SpaceBeforeCtorInitializerColon: true
+SpaceBeforeInheritanceColon: true
+SpaceBeforeParens: ControlStatements
+SpaceBeforeRangeBasedForLoopColon: true
+SpaceInEmptyParentheses: false
+SpacesBeforeTrailingComments: 2
+SpacesInAngles:  false
+SpacesInContainerLiterals: true
+SpacesInCStyleCastParentheses: false
+SpacesInParentheses: false
+SpacesInSquareBrackets: false
+Standard:        Auto
+TabWidth:        8
+UseTab:          Never
+...
+
diff --git a/.custom-format.py b/.custom-format.py
new file mode 100755
index 00000000..32b8f7c9
--- /dev/null
+++ b/.custom-format.py
@@ -0,0 +1,119 @@
+#!/usr/bin/env python3
+
+import subprocess
+import sys
+import os
+import re
+
+# string_re = re.compile('(\\"(\\\\.|[^"\\\\])*\\")') # future use
+
+with open(".clang-format") as f:
+    fmt = f.read()
+
+CLANG_FORMAT_BIN = os.getenv("CLANG_FORMAT_BIN")
+if CLANG_FORMAT_BIN is None:
+    p = subprocess.Popen(["clang-format", "--version"], stdout=subprocess.PIPE)
+    o, _ = p.communicate()
+    o = str(o, "utf-8")
+    o = o[len("clang-format version "):].strip()
+    o = o[:o.find(".")]
+    o = int(o)
+    if o < 7:
+        if subprocess.call(['which', 'clang-format-7'], stdout=subprocess.PIPE) == 0:
+            CLANG_FORMAT_BIN = 'clang-format-7'
+        elif subprocess.call(['which', 'clang-format-8'], stdout=subprocess.PIPE) == 0:
+            CLANG_FORMAT_BIN = 'clang-format-8'
+        elif subprocess.call(['which', 'clang-format-9'], stdout=subprocess.PIPE) == 0:
+            CLANG_FORMAT_BIN = 'clang-format-9'
+        elif subprocess.call(['which', 'clang-format-10'], stdout=subprocess.PIPE) == 0:
+            CLANG_FORMAT_BIN = 'clang-format-10'
+        else:
+            print ("clang-format 7 or above is needed. Aborted.")
+            exit(1)
+    else:
+        CLANG_FORMAT_BIN = 'clang-format'
+            
+COLUMN_LIMIT = 80
+for line in fmt.split("\n"):
+    line = line.split(":")
+    if line[0].strip() == "ColumnLimit":
+        COLUMN_LIMIT = int(line[1].strip())
+
+
+def custom_format(filename):
+    p = subprocess.Popen([CLANG_FORMAT_BIN, filename], stdout=subprocess.PIPE)
+    src, _ = p.communicate()
+    src = str(src, "utf-8")
+
+    macro_indent = 0
+    in_define = False
+    last_line = None
+    out = ""
+    
+    for line in src.split("\n"):
+        if line.startswith("#"):
+            i = macro_indent
+            if line.startswith("#end") and macro_indent > 0:
+                macro_indent -= 1
+                i -= 1
+            elif line.startswith("#el") and macro_indent > 0:
+                i -= 1
+            elif line.startswith("#if") and not (line.startswith("#ifndef") and (line.endswith("_H") or line.endswith("H_"))):
+                macro_indent += 1
+            elif line.startswith("#define"):
+                in_define = True
+            r = "#" + (i * "  ") + line[1:]
+            if i != 0 and line.endswith("\\"):
+                r = r[:-1]
+                while r[-1].isspace() and len(r) != (len(line)-1):
+                    r = r[:-1]
+                r += "\\"
+            if len(r) <= COLUMN_LIMIT:
+                line = r
+        
+        elif "/*" in line and not line.strip().startswith("/*") and line.endswith("*/") and len(line) < (COLUMN_LIMIT-2):
+            cmt_start = line.rfind("/*")
+            line = line[:cmt_start] + " " * (COLUMN_LIMIT-2 - len(line)) + line[cmt_start:]
+
+        define_padding = 0
+        if last_line is not None and in_define and last_line.endswith("\\"):
+            last_line = last_line[:-1]
+            define_padding = max(0, len(last_line[last_line.rfind("\n")+1:]))
+
+        if last_line is not None and last_line.strip().endswith("{") and line.strip() != "":
+            line = (" " * define_padding + "\\" if in_define else "") + "\n" + line
+        elif last_line is not None and last_line.strip().startswith("}") and line.strip() != "":
+            line = (" " * define_padding + "\\" if in_define else "") + "\n" + line
+        elif line.strip().startswith("}") and last_line is not None and last_line.strip() != "":
+            line = (" " * define_padding + "\\" if in_define else "") + "\n" + line
+
+        if not line.endswith("\\"):
+            in_define = False
+
+        out += line + "\n"
+        last_line = line
+
+    return (out)
+
+args = sys.argv[1:]
+if len(args) == 0:
+    print ("Usage: ./format.py [-i] <filename>")
+    print ()
+    print (" The -i option, if specified, let the script to modify in-place")
+    print (" the source files. By default the results are written to stdout.")
+    print()
+    exit(1)
+
+in_place = False
+if args[0] == "-i":
+    in_place = True
+    args = args[1:]
+
+for filename in args:
+    code = custom_format(filename)
+    if in_place:
+        with open(filename, "w") as f:
+            f.write(code)
+    else:
+        print(code)
+
diff --git a/.gitignore b/.gitignore
index 2ee40f62..e4d2346e 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,6 +1,5 @@
 *.o
 *.so
-.gitignore
 afl-analyze
 afl-as
 afl-clang
diff --git a/Makefile b/Makefile
index e6e3af85..edf3d99b 100644
--- a/Makefile
+++ b/Makefile
@@ -17,7 +17,7 @@
 #TEST_MMAP=1
 
 PROGNAME    = afl
-VERSION     = $(shell grep '^\#define VERSION ' config.h | cut -d '"' -f2)
+VERSION     = $(shell grep '^\#define VERSION ' include/config.h | cut -d '"' -f2)
 
 PREFIX     ?= /usr/local
 BIN_PATH    = $(PREFIX)/bin
@@ -31,9 +31,11 @@ PROGS       = afl-gcc afl-fuzz afl-showmap afl-tmin afl-gotcpu afl-analyze
 SH_PROGS    = afl-plot afl-cmin afl-whatsup afl-system-config
 
 CFLAGS     ?= -O3 -funroll-loops
-CFLAGS     += -Wall -D_FORTIFY_SOURCE=2 -g -Wno-pointer-sign \
+CFLAGS     += -Wall -D_FORTIFY_SOURCE=2 -g -Wno-pointer-sign -I include/ \
 	      -DAFL_PATH=\"$(HELPER_PATH)\" -DDOC_PATH=\"$(DOC_PATH)\" \
-	      -DBIN_PATH=\"$(BIN_PATH)\"
+	      -DBIN_PATH=\"$(BIN_PATH)\" -Wno-unused-function
+
+AFL_FUZZ_FILES = $(wildcard src/afl-fuzz*.c)
 
 PYTHON_INCLUDE	?= /usr/include/python2.7
 
@@ -47,7 +49,7 @@ else
   TEST_CC   = afl-clang
 endif
 
-COMM_HDR    = alloc-inl.h config.h debug.h types.h
+COMM_HDR    = include/alloc-inl.h include/config.h include/debug.h include/types.h
 
 
 ifeq "$(shell echo '\#include <Python.h>@int main() {return 0; }' | tr @ '\n' | $(CC) -x c - -o .test -I$(PYTHON_INCLUDE) -lpython2.7 2>/dev/null && echo 1 || echo 0 )" "1"
@@ -123,34 +125,54 @@ endif
 ready:
 	@echo "[+] Everything seems to be working, ready to compile."
 
-afl-gcc: afl-gcc.c $(COMM_HDR) | test_x86
-	$(CC) $(CFLAGS) $@.c -o $@ $(LDFLAGS)
+afl-gcc: src/afl-gcc.c $(COMM_HDR) | test_x86
+	$(CC) $(CFLAGS) src/$@.c -o $@ $(LDFLAGS)
 	set -e; for i in afl-g++ afl-clang afl-clang++; do ln -sf afl-gcc $$i; done
 
-afl-as: afl-as.c afl-as.h $(COMM_HDR) | test_x86
-	$(CC) $(CFLAGS) $@.c -o $@ $(LDFLAGS)
+afl-as: src/afl-as.c include/afl-as.h $(COMM_HDR) | test_x86
+	$(CC) $(CFLAGS) src/$@.c -o $@ $(LDFLAGS)
 	ln -sf afl-as as
 
-afl-common.o : afl-common.c
-	$(CC) $(CFLAGS) -c afl-common.c
+afl-common.o : src/afl-common.c include/common.h
+	$(CC) $(CFLAGS) -c src/afl-common.c
+
+afl-forkserver.o : src/afl-forkserver.c include/forkserver.h
+	$(CC) $(CFLAGS) -c src/afl-forkserver.c
+
+afl-sharedmem.o : src/afl-sharedmem.c include/sharedmem.h
+	$(CC) $(CFLAGS) -c src/afl-sharedmem.c
+
+afl-fuzz: include/afl-fuzz.h $(AFL_FUZZ_FILES) afl-common.o afl-sharedmem.o afl-forkserver.o $(COMM_HDR) | test_x86
+	$(CC) $(CFLAGS) $(AFL_FUZZ_FILES) afl-common.o afl-sharedmem.o afl-forkserver.o -o $@ $(LDFLAGS) $(PYFLAGS)
 
-sharedmem.o : sharedmem.c
-	$(CC) $(CFLAGS) -c sharedmem.c
+afl-showmap: src/afl-showmap.c afl-common.o afl-sharedmem.o $(COMM_HDR) | test_x86
+	$(CC) $(CFLAGS) src/$@.c afl-common.o afl-sharedmem.o -o $@ $(LDFLAGS)
 
-afl-fuzz: afl-fuzz.c afl-common.o sharedmem.o $(COMM_HDR) | test_x86
-	$(CC) $(CFLAGS) $@.c afl-common.o sharedmem.o -o $@ $(LDFLAGS) $(PYFLAGS)
+afl-tmin: src/afl-tmin.c afl-common.o afl-sharedmem.o afl-forkserver.o $(COMM_HDR) | test_x86
+	$(CC) $(CFLAGS) src/$@.c afl-common.o afl-sharedmem.o afl-forkserver.o -o $@ $(LDFLAGS)
 
-afl-showmap: afl-showmap.c afl-common.o sharedmem.o $(COMM_HDR) | test_x86
-	$(CC) $(CFLAGS) $@.c afl-common.o sharedmem.o -o $@ $(LDFLAGS)
+afl-analyze: src/afl-analyze.c afl-common.o afl-sharedmem.o $(COMM_HDR) | test_x86
+	$(CC) $(CFLAGS) src/$@.c afl-common.o afl-sharedmem.o -o $@ $(LDFLAGS)
 
-afl-tmin: afl-tmin.c afl-common.o sharedmem.o $(COMM_HDR) | test_x86
-	$(CC) $(CFLAGS) $@.c afl-common.o sharedmem.o -o $@ $(LDFLAGS)
+afl-gotcpu: src/afl-gotcpu.c $(COMM_HDR) | test_x86
+	$(CC) $(CFLAGS) src/$@.c -o $@ $(LDFLAGS)
 
-afl-analyze: afl-analyze.c afl-common.o sharedmem.o $(COMM_HDR) | test_x86
-	$(CC) $(CFLAGS) $@.c afl-common.o sharedmem.o -o $@ $(LDFLAGS)
 
-afl-gotcpu: afl-gotcpu.c $(COMM_HDR) | test_x86
-	$(CC) $(CFLAGS) $@.c -o $@ $(LDFLAGS)
+code-format:
+	./.custom-format.py -i src/*.c
+	./.custom-format.py -i include/*.h
+	./.custom-format.py -i libdislocator/*.c 
+	./.custom-format.py -i libtokencap/*.c 
+	./.custom-format.py -i llvm_mode/*.c
+	./.custom-format.py -i llvm_mode/*.h
+	./.custom-format.py -i llvm_mode/*.cc
+	./.custom-format.py -i qemu_mode/patches/*.h
+	./.custom-format.py -i qemu_mode/libcompcov/*.c
+	./.custom-format.py -i qemu_mode/libcompcov/*.cc
+	./.custom-format.py -i qemu_mode/libcompcov/*.h
+	./.custom-format.py -i unicorn_mode/patches/*.h
+	./.custom-format.py -i *.h
+	./.custom-format.py -i *.c
 
 
 ifndef AFL_NO_X86
diff --git a/TODO b/TODO
index cb95f899..c2cf10a5 100644
--- a/TODO
+++ b/TODO
@@ -1,8 +1,9 @@
 Roadmap 2.53d:
 ==============
- - indent all the code: .clang-format
 
- - update docs/sister_projects.txt
+all:
+ - indent all the code: .clang-format?
+   (vh: tried, the variable definion look very ugly then, what to do?)
 
 afl-fuzz:
  - put mutator, scheduler, forkserver and input channels in individual files
@@ -18,8 +19,9 @@ gcc_plugin:
  - neverZero
 
 qemu_mode:
+ - update to 4.x
  - deferred mode with AFL_DEFERRED_QEMU=0xaddress
-   @andrea - dont we have that already with AFL_ENTRYPOINT?
+   (vh: @andrea - dont we have that already with AFL_ENTRYPOINT?)
 
 unit testing / or large testcase campaign
 
diff --git a/afl-fuzz.c b/afl-fuzz.c
deleted file mode 100644
index 0e252bea..00000000
--- a/afl-fuzz.c
+++ /dev/null
@@ -1,12660 +0,0 @@
-/*
-   american fuzzy lop - fuzzer code
-   --------------------------------
-
-   Written and maintained by Michal Zalewski <lcamtuf@google.com>
-
-   Forkserver design by Jann Horn <jannhorn@googlemail.com>
-
-   Copyright 2013, 2014, 2015, 2016, 2017 Google Inc. All rights reserved.
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at:
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
-   This is the real deal: the program takes an instrumented binary and
-   attempts a variety of basic fuzzing tricks, paying close attention to
-   how they affect the execution path.
-
- */
-
-#define AFL_MAIN
-#define MESSAGES_TO_STDOUT
-
-#ifndef _GNU_SOURCE
-#define _GNU_SOURCE
-#endif
-#define _FILE_OFFSET_BITS 64
-
-#ifdef __ANDROID__
-  #include "android-ashmem.h"
-#endif
-#include "config.h"
-#include "types.h"
-#include "debug.h"
-#include "alloc-inl.h"
-#include "hash.h"
-#include "sharedmem.h"
-#include "afl-common.h"
-
-#include <stdio.h>
-#include <unistd.h>
-#include <stdlib.h>
-#include <string.h>
-#include <time.h>
-#include <errno.h>
-#include <signal.h>
-#include <dirent.h>
-#include <ctype.h>
-#include <fcntl.h>
-#include <termios.h>
-#include <dlfcn.h>
-#include <sched.h>
-
-#include <sys/wait.h>
-#include <sys/time.h>
-#include <sys/shm.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <sys/resource.h>
-#include <sys/mman.h>
-#include <sys/ioctl.h>
-#include <sys/file.h>
-
-#if defined(__APPLE__) || defined(__FreeBSD__) || defined (__OpenBSD__)
-#  include <sys/sysctl.h>
-#  define HAVE_ARC4RANDOM 1
-#endif /* __APPLE__ || __FreeBSD__ || __OpenBSD__ */
-
-/* For systems that have sched_setaffinity; right now just Linux, but one
-   can hope... */
-
-#ifdef __linux__
-#  define HAVE_AFFINITY 1
-#endif /* __linux__ */
-
-/* A toggle to export some variables when building as a library. Not very
-   useful for the general public. */
-
-#ifdef AFL_LIB
-#  define EXP_ST
-#else
-#  define EXP_ST static
-#endif /* ^AFL_LIB */
-
-/* MOpt:
-   Lots of globals, but mostly for the status UI and other things where it
-   really makes no sense to haul them around as function parameters. */
-EXP_ST u64 limit_time_puppet = 0;
-u64 orig_hit_cnt_puppet = 0;
-u64 last_limit_time_start = 0;
-u64 tmp_pilot_time = 0;
-u64 total_pacemaker_time = 0;
-u64 total_puppet_find = 0;
-u64 temp_puppet_find = 0;
-u64 most_time_key = 0;
-u64 most_time = 0;
-u64 most_execs_key = 0;
-u64 most_execs = 0;
-u64 old_hit_count = 0;
-int SPLICE_CYCLES_puppet;
-int limit_time_sig = 0;
-int key_puppet = 0;
-int key_module = 0;
-double w_init = 0.9;
-double w_end = 0.3;
-double w_now;
-int g_now = 0;
-int g_max = 5000;
-#define operator_num 16
-#define swarm_num 5
-#define period_core  500000
-u64 tmp_core_time = 0;
-int swarm_now = 0 ;
-double x_now[swarm_num][operator_num],
-       L_best[swarm_num][operator_num],
-       eff_best[swarm_num][operator_num],
-       G_best[operator_num],
-       v_now[swarm_num][operator_num],
-       probability_now[swarm_num][operator_num],
-       swarm_fitness[swarm_num];
-
- static u64 stage_finds_puppet[swarm_num][operator_num],           /* Patterns found per fuzz stage    */
-            stage_finds_puppet_v2[swarm_num][operator_num],
-            stage_cycles_puppet_v2[swarm_num][operator_num],
-            stage_cycles_puppet_v3[swarm_num][operator_num],
-            stage_cycles_puppet[swarm_num][operator_num],
-            operator_finds_puppet[operator_num],
-            core_operator_finds_puppet[operator_num],
-            core_operator_finds_puppet_v2[operator_num],
-            core_operator_cycles_puppet[operator_num],
-            core_operator_cycles_puppet_v2[operator_num],
-            core_operator_cycles_puppet_v3[operator_num];          /* Execs per fuzz stage             */
-
-#define RAND_C (rand()%1000*0.001)
-#define v_max 1
-#define v_min 0.05
-#define limit_time_bound 1.1
-#define SPLICE_CYCLES_puppet_up 25
-#define SPLICE_CYCLES_puppet_low 5
-#define STAGE_RANDOMBYTE 12
-#define STAGE_DELETEBYTE 13
-#define STAGE_Clone75 14
-#define STAGE_OverWrite75 15
-#define period_pilot 50000
-double period_pilot_tmp = 5000.0;
-int key_lv = 0;
-
-EXP_ST u8 *in_dir,                    /* Input directory with test cases  */
-          *out_file,                  /* File to fuzz, if any             */
-          *out_dir,                   /* Working & output directory       */
-          *tmp_dir       ,            /* Temporary directory for input    */
-          *sync_dir,                  /* Synchronization directory        */
-          *sync_id,                   /* Fuzzer ID                        */
-          *power_name,                /* Power schedule name              */
-          *use_banner,                /* Display banner                   */
-          *in_bitmap,                 /* Input bitmap                     */
-          *doc_path,                  /* Path to documentation dir        */
-          *target_path,               /* Path to target binary            */
-          *file_extension,            /* File extension                   */
-          *orig_cmdline;              /* Original command line            */
-
-EXP_ST u32 exec_tmout = EXEC_TIMEOUT; /* Configurable exec timeout (ms)   */
-static u32 hang_tmout = EXEC_TIMEOUT; /* Timeout used for hang det (ms)   */
-
-EXP_ST u64 mem_limit  = MEM_LIMIT;    /* Memory cap for child (MB)        */
-
-EXP_ST u8  cal_cycles = CAL_CYCLES;   /* Calibration cycles defaults      */
-EXP_ST u8  cal_cycles_long = CAL_CYCLES_LONG;
-EXP_ST u8  debug,                     /* Debug mode                       */
-           python_only;               /* Python-only mode                 */
-
-static u32 stats_update_freq = 1;     /* Stats update frequency (execs)   */
-
-enum {
-  /* 00 */ EXPLORE,                   /* AFL default, Exploration-based constant schedule */
-  /* 01 */ FAST,                      /* Exponential schedule             */
-  /* 02 */ COE,                       /* Cut-Off Exponential schedule     */
-  /* 03 */ LIN,                       /* Linear schedule                  */
-  /* 04 */ QUAD,                      /* Quadratic schedule               */
-  /* 05 */ EXPLOIT                    /* AFL's exploitation-based const.  */
-};
-
-char *power_names[] = {
-  "explore",
-  "fast",
-  "coe",
-  "lin",
-  "quad",
-  "exploit"
-};
-
-static u8 schedule = EXPLORE;         /* Power schedule (default: EXPLORE)*/
-static u8 havoc_max_mult = HAVOC_MAX_MULT;
-
-EXP_ST u8  skip_deterministic,        /* Skip deterministic stages?       */
-           force_deterministic,       /* Force deterministic stages?      */
-           use_splicing,              /* Recombine input files?           */
-           dumb_mode,                 /* Run in non-instrumented mode?    */
-           score_changed,             /* Scoring for favorites changed?   */
-           kill_signal,               /* Signal that killed the child     */
-           resuming_fuzz,             /* Resuming an older fuzzing job?   */
-           timeout_given,             /* Specific timeout given?          */
-           not_on_tty,                /* stdout is not a tty              */
-           term_too_small,            /* terminal dimensions too small    */
-           uses_asan,                 /* Target uses ASAN?                */
-           no_forkserver,             /* Disable forkserver?              */
-           crash_mode,                /* Crash mode! Yeah!                */
-           in_place_resume,           /* Attempt in-place resume?         */
-           auto_changed,              /* Auto-generated tokens changed?   */
-           no_cpu_meter_red,          /* Feng shui on the status screen   */
-           no_arith,                  /* Skip most arithmetic ops         */
-           shuffle_queue,             /* Shuffle input queue?             */
-           bitmap_changed = 1,        /* Time to update bitmap?           */
-           qemu_mode,                 /* Running in QEMU mode?            */
-           unicorn_mode,              /* Running in Unicorn mode?         */
-           skip_requested,            /* Skip request, via SIGUSR1        */
-           run_over10m,               /* Run time over 10 minutes?        */
-           persistent_mode,           /* Running in persistent mode?      */
-           deferred_mode,             /* Deferred forkserver mode?        */
-           fixed_seed,                /* do not reseed                    */
-           fast_cal;                  /* Try to calibrate faster?         */
-
-static s32 out_fd,                    /* Persistent fd for out_file       */
-#ifndef HAVE_ARC4RANDOM
-           dev_urandom_fd = -1,       /* Persistent fd for /dev/urandom   */
-#endif
-           dev_null_fd = -1,          /* Persistent fd for /dev/null      */
-           fsrv_ctl_fd,               /* Fork server control pipe (write) */
-           fsrv_st_fd;                /* Fork server status pipe (read)   */
-
-static s32 forksrv_pid,               /* PID of the fork server           */
-           child_pid = -1,            /* PID of the fuzzed program        */
-           out_dir_fd = -1;           /* FD of the lock file              */
-
-       u8* trace_bits;                /* SHM with instrumentation bitmap  */
-
-EXP_ST u8  virgin_bits[MAP_SIZE],     /* Regions yet untouched by fuzzing */
-           virgin_tmout[MAP_SIZE],    /* Bits we haven't seen in tmouts   */
-           virgin_crash[MAP_SIZE];    /* Bits we haven't seen in crashes  */
-
-static u8  var_bytes[MAP_SIZE];       /* Bytes that appear to be variable */
-
-static volatile u8 stop_soon,         /* Ctrl-C pressed?                  */
-                   clear_screen = 1,  /* Window resized?                  */
-                   child_timed_out;   /* Traced process timed out?        */
-
-EXP_ST u32 queued_paths,              /* Total number of queued testcases */
-           queued_variable,           /* Testcases with variable behavior */
-           queued_at_start,           /* Total number of initial inputs   */
-           queued_discovered,         /* Items discovered during this run */
-           queued_imported,           /* Items imported via -S            */
-           queued_favored,            /* Paths deemed favorable           */
-           queued_with_cov,           /* Paths with new coverage bytes    */
-           pending_not_fuzzed,        /* Queued but not done yet          */
-           pending_favored,           /* Pending favored paths            */
-           cur_skipped_paths,         /* Abandoned inputs in cur cycle    */
-           cur_depth,                 /* Current path depth               */
-           max_depth,                 /* Max path depth                   */
-           useless_at_start,          /* Number of useless starting paths */
-           var_byte_count,            /* Bitmap bytes with var behavior   */
-           current_entry,             /* Current queue entry ID           */
-           havoc_div = 1;             /* Cycle count divisor for havoc    */
-
-EXP_ST u64 total_crashes,             /* Total number of crashes          */
-           unique_crashes,            /* Crashes with unique signatures   */
-           total_tmouts,              /* Total number of timeouts         */
-           unique_tmouts,             /* Timeouts with unique signatures  */
-           unique_hangs,              /* Hangs with unique signatures     */
-           total_execs,               /* Total execve() calls             */
-           start_time,                /* Unix start time (ms)             */
-           last_path_time,            /* Time for most recent path (ms)   */
-           last_crash_time,           /* Time for most recent crash (ms)  */
-           last_hang_time,            /* Time for most recent hang (ms)   */
-           last_crash_execs,          /* Exec counter at last crash       */
-           queue_cycle,               /* Queue round counter              */
-           cycles_wo_finds,           /* Cycles without any new paths     */
-           trim_execs,                /* Execs done to trim input files   */
-           bytes_trim_in,             /* Bytes coming into the trimmer    */
-           bytes_trim_out,            /* Bytes coming outa the trimmer    */
-           blocks_eff_total,          /* Blocks subject to effector maps  */
-           blocks_eff_select;         /* Blocks selected as fuzzable      */
-
-static u32 subseq_tmouts;             /* Number of timeouts in a row      */
-
-static u8 *stage_name = "init",       /* Name of the current fuzz stage   */
-          *stage_short,               /* Short stage name                 */
-          *syncing_party;             /* Currently syncing with...        */
-
-static s32 stage_cur, stage_max;      /* Stage progression                */
-static s32 splicing_with = -1;        /* Splicing with which test case?   */
-
-static u32 master_id, master_max;     /* Master instance job splitting    */
-
-static u32 syncing_case;              /* Syncing with case #...           */
-
-static s32 stage_cur_byte,            /* Byte offset of current stage op  */
-           stage_cur_val;             /* Value used for stage op          */
-
-static u8  stage_val_type;            /* Value type (STAGE_VAL_*)         */
-
-static u64 stage_finds[32],           /* Patterns found per fuzz stage    */
-           stage_cycles[32];          /* Execs per fuzz stage             */
-
-#ifndef HAVE_ARC4RANDOM
-static u32 rand_cnt;                  /* Random number counter            */
-#endif
-
-static u64 total_cal_us,              /* Total calibration time (us)      */
-           total_cal_cycles;          /* Total calibration cycles         */
-
-static u64 total_bitmap_size,         /* Total bit count for all bitmaps  */
-           total_bitmap_entries;      /* Number of bitmaps counted        */
-
-static s32 cpu_core_count;            /* CPU core count                   */
-
-#ifdef HAVE_AFFINITY
-
-static s32 cpu_aff = -1;       	      /* Selected CPU core                */
-
-#endif /* HAVE_AFFINITY */
-
-static FILE* plot_file;               /* Gnuplot output file              */
-
-struct queue_entry {
-
-  u8* fname;                          /* File name for the test case      */
-  u32 len;                            /* Input length                     */
-
-  u8  cal_failed,                     /* Calibration failed?              */
-      trim_done,                      /* Trimmed?                         */
-      was_fuzzed,                     /* historical, but needed for MOpt  */
-      passed_det,                     /* Deterministic stages passed?     */
-      has_new_cov,                    /* Triggers new coverage?           */
-      var_behavior,                   /* Variable behavior?               */
-      favored,                        /* Currently favored?               */
-      fs_redundant;                   /* Marked as redundant in the fs?   */
-
-  u32 bitmap_size,                    /* Number of bits set in bitmap     */
-      fuzz_level,                     /* Number of fuzzing iterations     */
-      exec_cksum;                     /* Checksum of the execution trace  */
-
-  u64 exec_us,                        /* Execution time (us)              */
-      handicap,                       /* Number of queue cycles behind    */
-      n_fuzz,                         /* Number of fuzz, does not overflow */
-      depth;                          /* Path depth                       */
-
-  u8* trace_mini;                     /* Trace bytes, if kept             */
-  u32 tc_ref;                         /* Trace bytes ref count            */
-
-  struct queue_entry *next,           /* Next element, if any             */
-                     *next_100;       /* 100 elements ahead               */
-
-};
-
-static struct queue_entry *queue,     /* Fuzzing queue (linked list)      */
-                          *queue_cur, /* Current offset within the queue  */
-                          *queue_top, /* Top of the list                  */
-                          *q_prev100; /* Previous 100 marker              */
-
-static struct queue_entry*
-  top_rated[MAP_SIZE];                /* Top entries for bitmap bytes     */
-
-struct extra_data {
-  u8* data;                           /* Dictionary token data            */
-  u32 len;                            /* Dictionary token length          */
-  u32 hit_cnt;                        /* Use count in the corpus          */
-};
-
-static struct extra_data* extras;     /* Extra tokens to fuzz with        */
-static u32 extras_cnt;                /* Total number of tokens read      */
-
-static struct extra_data* a_extras;   /* Automatically selected extras    */
-static u32 a_extras_cnt;              /* Total number of tokens available */
-
-static u8* (*post_handler)(u8* buf, u32* len);
-
-/* hooks for the custom mutator function */
-static size_t (*custom_mutator)(u8 *data, size_t size, u8* mutated_out, size_t max_size, unsigned int seed);
-static size_t (*pre_save_handler)(u8 *data, size_t size, u8 **new_data);
-
-
-/* Interesting values, as per config.h */
-
-static s8  interesting_8[]  = { INTERESTING_8 };
-static s16 interesting_16[] = { INTERESTING_8, INTERESTING_16 };
-static s32 interesting_32[] = { INTERESTING_8, INTERESTING_16, INTERESTING_32 };
-
-/* Fuzzing stages */
-
-enum {
-  /* 00 */ STAGE_FLIP1,
-  /* 01 */ STAGE_FLIP2,
-  /* 02 */ STAGE_FLIP4,
-  /* 03 */ STAGE_FLIP8,
-  /* 04 */ STAGE_FLIP16,
-  /* 05 */ STAGE_FLIP32,
-  /* 06 */ STAGE_ARITH8,
-  /* 07 */ STAGE_ARITH16,
-  /* 08 */ STAGE_ARITH32,
-  /* 09 */ STAGE_INTEREST8,
-  /* 10 */ STAGE_INTEREST16,
-  /* 11 */ STAGE_INTEREST32,
-  /* 12 */ STAGE_EXTRAS_UO,
-  /* 13 */ STAGE_EXTRAS_UI,
-  /* 14 */ STAGE_EXTRAS_AO,
-  /* 15 */ STAGE_HAVOC,
-  /* 16 */ STAGE_SPLICE,
-  /* 17 */ STAGE_PYTHON,
-  /* 18 */ STAGE_CUSTOM_MUTATOR
-};
-
-/* Stage value types */
-
-enum {
-  /* 00 */ STAGE_VAL_NONE,
-  /* 01 */ STAGE_VAL_LE,
-  /* 02 */ STAGE_VAL_BE
-};
-
-/* Execution status fault codes */
-
-enum {
-  /* 00 */ FAULT_NONE,
-  /* 01 */ FAULT_TMOUT,
-  /* 02 */ FAULT_CRASH,
-  /* 03 */ FAULT_ERROR,
-  /* 04 */ FAULT_NOINST,
-  /* 05 */ FAULT_NOBITS
-};
-
-
-static inline u32 UR(u32 limit);
-
-/* Python stuff */
-#ifdef USE_PYTHON
-#include <Python.h>
-
-static PyObject *py_module;
-
-enum {
-  /* 00 */ PY_FUNC_INIT,
-  /* 01 */ PY_FUNC_FUZZ,
-  /* 02 */ PY_FUNC_INIT_TRIM,
-  /* 03 */ PY_FUNC_POST_TRIM,
-  /* 04 */ PY_FUNC_TRIM,
-  PY_FUNC_COUNT
-};
-
-static PyObject *py_functions[PY_FUNC_COUNT];
-
-static int init_py() {
-  Py_Initialize();
-  u8* module_name = getenv("AFL_PYTHON_MODULE");
-  u8 py_notrim = 0;
-
-  if (module_name) {
-    PyObject* py_name = PyString_FromString(module_name);
-
-    py_module = PyImport_Import(py_name);
-    Py_DECREF(py_name);
-
-    if (py_module != NULL) {
-      py_functions[PY_FUNC_INIT] = PyObject_GetAttrString(py_module, "init");
-      py_functions[PY_FUNC_FUZZ] = PyObject_GetAttrString(py_module, "fuzz");
-      py_functions[PY_FUNC_INIT_TRIM] = PyObject_GetAttrString(py_module, "init_trim");
-      py_functions[PY_FUNC_POST_TRIM] = PyObject_GetAttrString(py_module, "post_trim");
-      py_functions[PY_FUNC_TRIM] = PyObject_GetAttrString(py_module, "trim");
-
-      for (u8 py_idx = 0; py_idx < PY_FUNC_COUNT; ++py_idx) {
-        if (!py_functions[py_idx] || !PyCallable_Check(py_functions[py_idx])) {
-          if (py_idx >= PY_FUNC_INIT_TRIM && py_idx <= PY_FUNC_TRIM) {
-            // Implementing the trim API is optional for now
-            if (PyErr_Occurred())
-              PyErr_Print();
-            py_notrim = 1;
-          } else {
-            if (PyErr_Occurred())
-              PyErr_Print();
-            fprintf(stderr, "Cannot find/call function with index %d in external Python module.\n", py_idx);
-            return 1;
-          }
-        }
-
-      }
-
-      if (py_notrim) {
-        py_functions[PY_FUNC_INIT_TRIM] = NULL;
-        py_functions[PY_FUNC_POST_TRIM] = NULL;
-        py_functions[PY_FUNC_TRIM] = NULL;
-        WARNF("Python module does not implement trim API, standard trimming will be used.");
-      }
-
-      PyObject *py_args, *py_value;
-
-      /* Provide the init function a seed for the Python RNG */
-      py_args = PyTuple_New(1);
-      py_value = PyInt_FromLong(UR(0xFFFFFFFF));
-      if (!py_value) {
-        Py_DECREF(py_args);
-        fprintf(stderr, "Cannot convert argument\n");
-        return 1;
-      }
-
-      PyTuple_SetItem(py_args, 0, py_value);
-
-      py_value = PyObject_CallObject(py_functions[PY_FUNC_INIT], py_args);
-
-      Py_DECREF(py_args);
-
-      if (py_value == NULL) {
-        PyErr_Print();
-        fprintf(stderr,"Call failed\n");
-        return 1;
-      }
-    } else {
-      PyErr_Print();
-      fprintf(stderr, "Failed to load \"%s\"\n", module_name);
-      return 1;
-    }
-  }
-
-  return 0;
-}
-
-static void finalize_py() {
-  if (py_module != NULL) {
-    u32 i;
-    for (i = 0; i < PY_FUNC_COUNT; ++i)
-      Py_XDECREF(py_functions[i]);
-
-    Py_DECREF(py_module);
-  }
-
-  Py_Finalize();
-}
-
-static void fuzz_py(char* buf, size_t buflen, char* add_buf, size_t add_buflen, char** ret, size_t* retlen) {
-  PyObject *py_args, *py_value;
-
-  if (py_module != NULL) {
-    py_args = PyTuple_New(2);
-    py_value = PyByteArray_FromStringAndSize(buf, buflen);
-    if (!py_value) {
-      Py_DECREF(py_args);
-      fprintf(stderr, "Cannot convert argument\n");
-      return;
-    }
-
-    PyTuple_SetItem(py_args, 0, py_value);
-
-    py_value = PyByteArray_FromStringAndSize(add_buf, add_buflen);
-    if (!py_value) {
-      Py_DECREF(py_args);
-      fprintf(stderr, "Cannot convert argument\n");
-      return;
-    }
-
-    PyTuple_SetItem(py_args, 1, py_value);
-
-    py_value = PyObject_CallObject(py_functions[PY_FUNC_FUZZ], py_args);
-
-    Py_DECREF(py_args);
-
-    if (py_value != NULL) {
-      *retlen = PyByteArray_Size(py_value);
-      *ret = malloc(*retlen);
-      memcpy(*ret, PyByteArray_AsString(py_value), *retlen);
-      Py_DECREF(py_value);
-    } else {
-      PyErr_Print();
-      fprintf(stderr,"Call failed\n");
-      return;
-    }
-  }
-}
-static u32 init_trim_py(char* buf, size_t buflen) {
-  PyObject *py_args, *py_value;
-
-  py_args = PyTuple_New(1);
-  py_value = PyByteArray_FromStringAndSize(buf, buflen);
-  if (!py_value) {
-    Py_DECREF(py_args);
-    FATAL("Failed to convert arguments");
-  }
-
-  PyTuple_SetItem(py_args, 0, py_value);
-
-  py_value = PyObject_CallObject(py_functions[PY_FUNC_INIT_TRIM], py_args);
-  Py_DECREF(py_args);
-
-  if (py_value != NULL) {
-    u32 retcnt = PyInt_AsLong(py_value);
-    Py_DECREF(py_value);
-    return retcnt;
-  } else {
-    PyErr_Print();
-    FATAL("Call failed");
-  }
-}
-static u32 post_trim_py(char success) {
-  PyObject *py_args, *py_value;
-
-  py_args = PyTuple_New(1);
-
-  py_value = PyBool_FromLong(success);
-  if (!py_value) {
-    Py_DECREF(py_args);
-    FATAL("Failed to convert arguments");
-  }
-
-  PyTuple_SetItem(py_args, 0, py_value);
-
-  py_value = PyObject_CallObject(py_functions[PY_FUNC_POST_TRIM], py_args);
-  Py_DECREF(py_args);
-
-  if (py_value != NULL) {
-    u32 retcnt = PyInt_AsLong(py_value);
-    Py_DECREF(py_value);
-    return retcnt;
-  } else {
-    PyErr_Print();
-    FATAL("Call failed");
-  }
-}
-
-static void trim_py(char** ret, size_t* retlen) {
-  PyObject *py_args, *py_value;
-
-  py_args = PyTuple_New(0);
-  py_value = PyObject_CallObject(py_functions[PY_FUNC_TRIM], py_args);
-  Py_DECREF(py_args);
-
-  if (py_value != NULL) {
-    *retlen = PyByteArray_Size(py_value);
-    *ret = malloc(*retlen);
-    memcpy(*ret, PyByteArray_AsString(py_value), *retlen);
-    Py_DECREF(py_value);
-  } else {
-    PyErr_Print();
-    FATAL("Call failed");
-  }
-}
-
-#endif /* USE_PYTHON */
-
-
-int select_algorithm(void) {
-
-  int i_puppet, j_puppet;
-
-  double sele = ((double)(UR(10000))*0.0001);
-  j_puppet = 0;
-  for (i_puppet = 0; i_puppet < operator_num; ++i_puppet) {
-      if (unlikely(i_puppet == 0)) {
-          if (sele < probability_now[swarm_now][i_puppet])
-            break;
-      } else {
-          if (sele < probability_now[swarm_now][i_puppet]) {
-              j_puppet =1;
-              break;
-          }
-      }
-  }
-  if (j_puppet ==1 && sele < probability_now[swarm_now][i_puppet-1])
-    FATAL("error select_algorithm");
-  return i_puppet;
-}
-
-
-/* Get unix time in milliseconds */
-
-static u64 get_cur_time(void) {
-
-  struct timeval tv;
-  struct timezone tz;
-
-  gettimeofday(&tv, &tz);
-
-  return (tv.tv_sec * 1000ULL) + (tv.tv_usec / 1000);
-
-}
-
-
-/* Get unix time in microseconds */
-
-static u64 get_cur_time_us(void) {
-
-  struct timeval tv;
-  struct timezone tz;
-
-  gettimeofday(&tv, &tz);
-
-  return (tv.tv_sec * 1000000ULL) + tv.tv_usec;
-
-}
-
-
-/* Generate a random number (from 0 to limit - 1). This may
-   have slight bias. */
-
-static inline u32 UR(u32 limit) {
-#ifdef HAVE_ARC4RANDOM
-  if (fixed_seed) {
-    return random() % limit;
-  }
-
-  /* The boundary not being necessarily a power of 2,
-     we need to ensure the result uniformity. */
-  return arc4random_uniform(limit);
-#else
-  if (!fixed_seed && unlikely(!rand_cnt--)) {
-    u32 seed[2];
-
-    ck_read(dev_urandom_fd, &seed, sizeof(seed), "/dev/urandom");
-    srandom(seed[0]);
-    rand_cnt = (RESEED_RNG / 2) + (seed[1] % RESEED_RNG);
-  }
-
-  return random() % limit;
-#endif
-}
-
-
-/* Shuffle an array of pointers. Might be slightly biased. */
-
-static void shuffle_ptrs(void** ptrs, u32 cnt) {
-
-  u32 i;
-
-  for (i = 0; i < cnt - 2; ++i) {
-
-    u32 j = i + UR(cnt - i);
-    void *s = ptrs[i];
-    ptrs[i] = ptrs[j];
-    ptrs[j] = s;
-
-  }
-
-}
-
-
-#ifdef HAVE_AFFINITY
-
-/* Build a list of processes bound to specific cores. Returns -1 if nothing
-   can be found. Assumes an upper bound of 4k CPUs. */
-
-static void bind_to_free_cpu(void) {
-
-  DIR* d;
-  struct dirent* de;
-  cpu_set_t c;
-
-  u8 cpu_used[4096] = { 0 };
-  u32 i;
-
-  if (cpu_core_count < 2) return;
-
-  if (getenv("AFL_NO_AFFINITY")) {
-
-    WARNF("Not binding to a CPU core (AFL_NO_AFFINITY set).");
-    return;
-
-  }
-
-  d = opendir("/proc");
-
-  if (!d) {
-
-    WARNF("Unable to access /proc - can't scan for free CPU cores.");
-    return;
-
-  }
-
-  ACTF("Checking CPU core loadout...");
-
-  /* Introduce some jitter, in case multiple AFL tasks are doing the same
-     thing at the same time... */
-
-  usleep(R(1000) * 250);
-
-  /* Scan all /proc/<pid>/status entries, checking for Cpus_allowed_list.
-     Flag all processes bound to a specific CPU using cpu_used[]. This will
-     fail for some exotic binding setups, but is likely good enough in almost
-     all real-world use cases. */
-
-  while ((de = readdir(d))) {
-
-    u8* fn;
-    FILE* f;
-    u8 tmp[MAX_LINE];
-    u8 has_vmsize = 0;
-
-    if (!isdigit(de->d_name[0])) continue;
-
-    fn = alloc_printf("/proc/%s/status", de->d_name);
-
-    if (!(f = fopen(fn, "r"))) {
-      ck_free(fn);
-      continue;
-    }
-
-    while (fgets(tmp, MAX_LINE, f)) {
-
-      u32 hval;
-
-      /* Processes without VmSize are probably kernel tasks. */
-
-      if (!strncmp(tmp, "VmSize:\t", 8)) has_vmsize = 1;
-
-      if (!strncmp(tmp, "Cpus_allowed_list:\t", 19) &&
-          !strchr(tmp, '-') && !strchr(tmp, ',') &&
-          sscanf(tmp + 19, "%u", &hval) == 1 && hval < sizeof(cpu_used) &&
-          has_vmsize) {
-
-        cpu_used[hval] = 1;
-        break;
-
-      }
-
-    }
-
-    ck_free(fn);
-    fclose(f);
-
-  }
-
-  closedir(d);
-
-  for (i = 0; i < cpu_core_count; ++i) if (!cpu_used[i]) break;
-
-  if (i == cpu_core_count) {
-
-    SAYF("\n" cLRD "[-] " cRST
-         "Uh-oh, looks like all %u CPU cores on your system are allocated to\n"
-         "    other instances of afl-fuzz (or similar CPU-locked tasks). Starting\n"
-         "    another fuzzer on this machine is probably a bad plan, but if you are\n"
-         "    absolutely sure, you can set AFL_NO_AFFINITY and try again.\n",
-         cpu_core_count);
-
-    FATAL("No more free CPU cores");
-
-  }
-
-  OKF("Found a free CPU core, binding to #%u.", i);
-
-  cpu_aff = i;
-
-  CPU_ZERO(&c);
-  CPU_SET(i, &c);
-
-  if (sched_setaffinity(0, sizeof(c), &c))
-    PFATAL("sched_setaffinity failed");
-
-}
-
-#endif /* HAVE_AFFINITY */
-
-#ifndef IGNORE_FINDS
-
-/* Helper function to compare buffers; returns first and last differing offset. We
-   use this to find reasonable locations for splicing two files. */
-
-static void locate_diffs(u8* ptr1, u8* ptr2, u32 len, s32* first, s32* last) {
-
-  s32 f_loc = -1;
-  s32 l_loc = -1;
-  u32 pos;
-
-  for (pos = 0; pos < len; ++pos) {
-
-    if (*(ptr1++) != *(ptr2++)) {
-
-      if (f_loc == -1) f_loc = pos;
-      l_loc = pos;
-
-    }
-
-  }
-
-  *first = f_loc;
-  *last = l_loc;
-
-  return;
-
-}
-
-#endif /* !IGNORE_FINDS */
-
-
-/* Describe integer. Uses 12 cyclic static buffers for return values. The value
-   returned should be five characters or less for all the integers we reasonably
-   expect to see. */
-
-static u8* DI(u64 val) {
-
-  static u8 tmp[12][16];
-  static u8 cur;
-
-  cur = (cur + 1) % 12;
-
-#define CHK_FORMAT(_divisor, _limit_mult, _fmt, _cast) do { \
-    if (val < (_divisor) * (_limit_mult)) { \
-      sprintf(tmp[cur], _fmt, ((_cast)val) / (_divisor)); \
-      return tmp[cur]; \
-    } \
-  } while (0)
-
-  /* 0-9999 */
-  CHK_FORMAT(1, 10000, "%llu", u64);
-
-  /* 10.0k - 99.9k */
-  CHK_FORMAT(1000, 99.95, "%0.01fk", double);
-
-  /* 100k - 999k */
-  CHK_FORMAT(1000, 1000, "%lluk", u64);
-
-  /* 1.00M - 9.99M */
-  CHK_FORMAT(1000 * 1000, 9.995, "%0.02fM", double);
-
-  /* 10.0M - 99.9M */
-  CHK_FORMAT(1000 * 1000, 99.95, "%0.01fM", double);
-
-  /* 100M - 999M */
-  CHK_FORMAT(1000 * 1000, 1000, "%lluM", u64);
-
-  /* 1.00G - 9.99G */
-  CHK_FORMAT(1000LL * 1000 * 1000, 9.995, "%0.02fG", double);
-
-  /* 10.0G - 99.9G */
-  CHK_FORMAT(1000LL * 1000 * 1000, 99.95, "%0.01fG", double);
-
-  /* 100G - 999G */
-  CHK_FORMAT(1000LL * 1000 * 1000, 1000, "%lluG", u64);
-
-  /* 1.00T - 9.99G */
-  CHK_FORMAT(1000LL * 1000 * 1000 * 1000, 9.995, "%0.02fT", double);
-
-  /* 10.0T - 99.9T */
-  CHK_FORMAT(1000LL * 1000 * 1000 * 1000, 99.95, "%0.01fT", double);
-
-  /* 100T+ */
-  strcpy(tmp[cur], "infty");
-  return tmp[cur];
-
-}
-
-
-/* Describe float. Similar to the above, except with a single 
-   static buffer. */
-
-static u8* DF(double val) {
-
-  static u8 tmp[16];
-
-  if (val < 99.995) {
-    sprintf(tmp, "%0.02f", val);
-    return tmp;
-  }
-
-  if (val < 999.95) {
-    sprintf(tmp, "%0.01f", val);
-    return tmp;
-  }
-
-  return DI((u64)val);
-
-}
-
-
-/* Describe integer as memory size. */
-
-static u8* DMS(u64 val) {
-
-  static u8 tmp[12][16];
-  static u8 cur;
-
-  cur = (cur + 1) % 12;
-
-  /* 0-9999 */
-  CHK_FORMAT(1, 10000, "%llu B", u64);
-
-  /* 10.0k - 99.9k */
-  CHK_FORMAT(1024, 99.95, "%0.01f kB", double);
-
-  /* 100k - 999k */
-  CHK_FORMAT(1024, 1000, "%llu kB", u64);
-
-  /* 1.00M - 9.99M */
-  CHK_FORMAT(1024 * 1024, 9.995, "%0.02f MB", double);
-
-  /* 10.0M - 99.9M */
-  CHK_FORMAT(1024 * 1024, 99.95, "%0.01f MB", double);
-
-  /* 100M - 999M */
-  CHK_FORMAT(1024 * 1024, 1000, "%llu MB", u64);
-
-  /* 1.00G - 9.99G */
-  CHK_FORMAT(1024LL * 1024 * 1024, 9.995, "%0.02f GB", double);
-
-  /* 10.0G - 99.9G */
-  CHK_FORMAT(1024LL * 1024 * 1024, 99.95, "%0.01f GB", double);
-
-  /* 100G - 999G */
-  CHK_FORMAT(1024LL * 1024 * 1024, 1000, "%llu GB", u64);
-
-  /* 1.00T - 9.99G */
-  CHK_FORMAT(1024LL * 1024 * 1024 * 1024, 9.995, "%0.02f TB", double);
-
-  /* 10.0T - 99.9T */
-  CHK_FORMAT(1024LL * 1024 * 1024 * 1024, 99.95, "%0.01f TB", double);
-
-#undef CHK_FORMAT
-
-  /* 100T+ */
-  strcpy(tmp[cur], "infty");
-  return tmp[cur];
-
-}
-
-
-/* Describe time delta. Returns one static buffer, 34 chars of less. */
-
-static u8* DTD(u64 cur_ms, u64 event_ms) {
-
-  static u8 tmp[64];
-  u64 delta;
-  s32 t_d, t_h, t_m, t_s;
-
-  if (!event_ms) return "none seen yet";
-
-  delta = cur_ms - event_ms;
-
-  t_d = delta / 1000 / 60 / 60 / 24;
-  t_h = (delta / 1000 / 60 / 60) % 24;
-  t_m = (delta / 1000 / 60) % 60;
-  t_s = (delta / 1000) % 60;
-
-  sprintf(tmp, "%s days, %u hrs, %u min, %u sec", DI(t_d), t_h, t_m, t_s);
-  return tmp;
-
-}
-
-
-/* Mark deterministic checks as done for a particular queue entry. We use the
-   .state file to avoid repeating deterministic fuzzing when resuming aborted
-   scans. */
-
-static void mark_as_det_done(struct queue_entry* q) {
-
-  u8* fn = strrchr(q->fname, '/');
-  s32 fd;
-
-  fn = alloc_printf("%s/queue/.state/deterministic_done/%s", out_dir, fn + 1);
-
-  fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600);
-  if (fd < 0) PFATAL("Unable to create '%s'", fn);
-  close(fd);
-
-  ck_free(fn);
-
-  q->passed_det = 1;
-
-}
-
-
-/* Mark as variable. Create symlinks if possible to make it easier to examine
-   the files. */
-
-static void mark_as_variable(struct queue_entry* q) {
-
-  u8 *fn = strrchr(q->fname, '/') + 1, *ldest;
-
-  ldest = alloc_printf("../../%s", fn);
-  fn = alloc_printf("%s/queue/.state/variable_behavior/%s", out_dir, fn);
-
-  if (symlink(ldest, fn)) {
-
-    s32 fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600);
-    if (fd < 0) PFATAL("Unable to create '%s'", fn);
-    close(fd);
-
-  }
-
-  ck_free(ldest);
-  ck_free(fn);
-
-  q->var_behavior = 1;
-
-}
-
-
-/* Mark / unmark as redundant (edge-only). This is not used for restoring state,
-   but may be useful for post-processing datasets. */
-
-static void mark_as_redundant(struct queue_entry* q, u8 state) {
-
-  u8* fn;
-  s32 fd;
-
-  if (state == q->fs_redundant) return;
-
-  q->fs_redundant = state;
-
-  fn = strrchr(q->fname, '/');
-  fn = alloc_printf("%s/queue/.state/redundant_edges/%s", out_dir, fn + 1);
-
-  if (state) {
-
-    fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600);
-    if (fd < 0) PFATAL("Unable to create '%s'", fn);
-    close(fd);
-
-  } else {
-
-    if (unlink(fn)) PFATAL("Unable to remove '%s'", fn);
-
-  }
-
-  ck_free(fn);
-
-}
-
-
-/* Append new test case to the queue. */
-
-static void add_to_queue(u8* fname, u32 len, u8 passed_det) {
-
-  struct queue_entry* q = ck_alloc(sizeof(struct queue_entry));
-
-  q->fname        = fname;
-  q->len          = len;
-  q->depth        = cur_depth + 1;
-  q->passed_det   = passed_det;
-  q->n_fuzz       = 1;
-
-  if (q->depth > max_depth) max_depth = q->depth;
-
-  if (queue_top) {
-
-    queue_top->next = q;
-    queue_top = q;
-
-  } else q_prev100 = queue = queue_top = q;
-
-  ++queued_paths;
-  ++pending_not_fuzzed;
-
-  cycles_wo_finds = 0;
-
-  if (!(queued_paths % 100)) {
-
-    q_prev100->next_100 = q;
-    q_prev100 = q;
-
-  }
-
-  last_path_time = get_cur_time();
-
-}
-
-
-/* Destroy the entire queue. */
-
-EXP_ST void destroy_queue(void) {
-
-  struct queue_entry *q = queue, *n;
-
-  while (q) {
-
-    n = q->next;
-    ck_free(q->fname);
-    ck_free(q->trace_mini);
-    ck_free(q);
-    q = n;
-
-  }
-
-}
-
-
-/* Write bitmap to file. The bitmap is useful mostly for the secret
-   -B option, to focus a separate fuzzing session on a particular
-   interesting input without rediscovering all the others. */
-
-EXP_ST void write_bitmap(void) {
-
-  u8* fname;
-  s32 fd;
-
-  if (!bitmap_changed) return;
-  bitmap_changed = 0;
-
-  fname = alloc_printf("%s/fuzz_bitmap", out_dir);
-  fd = open(fname, O_WRONLY | O_CREAT | O_TRUNC, 0600);
-
-  if (fd < 0) PFATAL("Unable to open '%s'", fname);
-
-  ck_write(fd, virgin_bits, MAP_SIZE, fname);
-
-  close(fd);
-  ck_free(fname);
-
-}
-
-
-/* Read bitmap from file. This is for the -B option again. */
-
-EXP_ST void read_bitmap(u8* fname) {
-
-  s32 fd = open(fname, O_RDONLY);
-
-  if (fd < 0) PFATAL("Unable to open '%s'", fname);
-
-  ck_read(fd, virgin_bits, MAP_SIZE, fname);
-
-  close(fd);
-
-}
-
-
-/* Check if the current execution path brings anything new to the table.
-   Update virgin bits to reflect the finds. Returns 1 if the only change is
-   the hit-count for a particular tuple; 2 if there are new tuples seen. 
-   Updates the map, so subsequent calls will always return 0.
-
-   This function is called after every exec() on a fairly large buffer, so
-   it needs to be fast. We do this in 32-bit and 64-bit flavors. */
-
-static inline u8 has_new_bits(u8* virgin_map) {
-
-#ifdef __x86_64__
-
-  u64* current = (u64*)trace_bits;
-  u64* virgin  = (u64*)virgin_map;
-
-  u32  i = (MAP_SIZE >> 3);
-
-#else
-
-  u32* current = (u32*)trace_bits;
-  u32* virgin  = (u32*)virgin_map;
-
-  u32  i = (MAP_SIZE >> 2);
-
-#endif /* ^__x86_64__ */
-
-  u8   ret = 0;
-
-  while (i--) {
-
-    /* Optimize for (*current & *virgin) == 0 - i.e., no bits in current bitmap
-       that have not been already cleared from the virgin map - since this will
-       almost always be the case. */
-
-    if (unlikely(*current) && unlikely(*current & *virgin)) {
-
-      if (likely(ret < 2)) {
-
-        u8* cur = (u8*)current;
-        u8* vir = (u8*)virgin;
-
-        /* Looks like we have not found any new bytes yet; see if any non-zero
-           bytes in current[] are pristine in virgin[]. */
-
-#ifdef __x86_64__
-
-        if ((cur[0] && vir[0] == 0xff) || (cur[1] && vir[1] == 0xff) ||
-            (cur[2] && vir[2] == 0xff) || (cur[3] && vir[3] == 0xff) ||
-            (cur[4] && vir[4] == 0xff) || (cur[5] && vir[5] == 0xff) ||
-            (cur[6] && vir[6] == 0xff) || (cur[7] && vir[7] == 0xff)) ret = 2;
-        else ret = 1;
-
-#else
-
-        if ((cur[0] && vir[0] == 0xff) || (cur[1] && vir[1] == 0xff) ||
-            (cur[2] && vir[2] == 0xff) || (cur[3] && vir[3] == 0xff)) ret = 2;
-        else ret = 1;
-
-#endif /* ^__x86_64__ */
-
-      }
-
-      *virgin &= ~*current;
-
-    }
-
-    ++current;
-    ++virgin;
-
-  }
-
-  if (ret && virgin_map == virgin_bits) bitmap_changed = 1;
-
-  return ret;
-
-}
-
-
-/* Count the number of bits set in the provided bitmap. Used for the status
-   screen several times every second, does not have to be fast. */
-
-static u32 count_bits(u8* mem) {
-
-  u32* ptr = (u32*)mem;
-  u32  i   = (MAP_SIZE >> 2);
-  u32  ret = 0;
-
-  while (i--) {
-
-    u32 v = *(ptr++);
-
-    /* This gets called on the inverse, virgin bitmap; optimize for sparse
-       data. */
-
-    if (v == 0xffffffff) {
-      ret += 32;
-      continue;
-    }
-
-    v -= ((v >> 1) & 0x55555555);
-    v = (v & 0x33333333) + ((v >> 2) & 0x33333333);
-    ret += (((v + (v >> 4)) & 0xF0F0F0F) * 0x01010101) >> 24;
-
-  }
-
-  return ret;
-
-}
-
-
-#define FF(_b)  (0xff << ((_b) << 3))
-
-/* Count the number of bytes set in the bitmap. Called fairly sporadically,
-   mostly to update the status screen or calibrate and examine confirmed
-   new paths. */
-
-static u32 count_bytes(u8* mem) {
-
-  u32* ptr = (u32*)mem;
-  u32  i   = (MAP_SIZE >> 2);
-  u32  ret = 0;
-
-  while (i--) {
-
-    u32 v = *(ptr++);
-
-    if (!v) continue;
-    if (v & FF(0)) ++ret;
-    if (v & FF(1)) ++ret;
-    if (v & FF(2)) ++ret;
-    if (v & FF(3)) ++ret;
-
-  }
-
-  return ret;
-
-}
-
-
-/* Count the number of non-255 bytes set in the bitmap. Used strictly for the
-   status screen, several calls per second or so. */
-
-static u32 count_non_255_bytes(u8* mem) {
-
-  u32* ptr = (u32*)mem;
-  u32  i   = (MAP_SIZE >> 2);
-  u32  ret = 0;
-
-  while (i--) {
-
-    u32 v = *(ptr++);
-
-    /* This is called on the virgin bitmap, so optimize for the most likely
-       case. */
-
-    if (v == 0xffffffff) continue;
-    if ((v & FF(0)) != FF(0)) ++ret;
-    if ((v & FF(1)) != FF(1)) ++ret;
-    if ((v & FF(2)) != FF(2)) ++ret;
-    if ((v & FF(3)) != FF(3)) ++ret;
-
-  }
-
-  return ret;
-
-}
-
-
-/* Destructively simplify trace by eliminating hit count information
-   and replacing it with 0x80 or 0x01 depending on whether the tuple
-   is hit or not. Called on every new crash or timeout, should be
-   reasonably fast. */
-
-static const u8 simplify_lookup[256] = { 
-
-  [0]         = 1,
-  [1 ... 255] = 128
-
-};
-
-#ifdef __x86_64__
-
-static void simplify_trace(u64* mem) {
-
-  u32 i = MAP_SIZE >> 3;
-
-  while (i--) {
-
-    /* Optimize for sparse bitmaps. */
-
-    if (unlikely(*mem)) {
-
-      u8* mem8 = (u8*)mem;
-
-      mem8[0] = simplify_lookup[mem8[0]];
-      mem8[1] = simplify_lookup[mem8[1]];
-      mem8[2] = simplify_lookup[mem8[2]];
-      mem8[3] = simplify_lookup[mem8[3]];
-      mem8[4] = simplify_lookup[mem8[4]];
-      mem8[5] = simplify_lookup[mem8[5]];
-      mem8[6] = simplify_lookup[mem8[6]];
-      mem8[7] = simplify_lookup[mem8[7]];
-
-    } else *mem = 0x0101010101010101ULL;
-
-    ++mem;
-
-  }
-
-}
-
-#else
-
-static void simplify_trace(u32* mem) {
-
-  u32 i = MAP_SIZE >> 2;
-
-  while (i--) {
-
-    /* Optimize for sparse bitmaps. */
-
-    if (unlikely(*mem)) {
-
-      u8* mem8 = (u8*)mem;
-
-      mem8[0] = simplify_lookup[mem8[0]];
-      mem8[1] = simplify_lookup[mem8[1]];
-      mem8[2] = simplify_lookup[mem8[2]];
-      mem8[3] = simplify_lookup[mem8[3]];
-
-    } else *mem = 0x01010101;
-
-    ++mem;
-  }
-
-}
-
-#endif /* ^__x86_64__ */
-
-
-/* Destructively classify execution counts in a trace. This is used as a
-   preprocessing step for any newly acquired traces. Called on every exec,
-   must be fast. */
-
-static const u8 count_class_lookup8[256] = {
-
-  [0]           = 0,
-  [1]           = 1,
-  [2]           = 2,
-  [3]           = 4,
-  [4 ... 7]     = 8,
-  [8 ... 15]    = 16,
-  [16 ... 31]   = 32,
-  [32 ... 127]  = 64,
-  [128 ... 255] = 128
-
-};
-
-static u16 count_class_lookup16[65536];
-
-
-EXP_ST void init_count_class16(void) {
-
-  u32 b1, b2;
-
-  for (b1 = 0; b1 < 256; b1++) 
-    for (b2 = 0; b2 < 256; b2++)
-      count_class_lookup16[(b1 << 8) + b2] = 
-        (count_class_lookup8[b1] << 8) |
-        count_class_lookup8[b2];
-
-}
-
-
-#ifdef __x86_64__
-
-static inline void classify_counts(u64* mem) {
-
-  u32 i = MAP_SIZE >> 3;
-
-  while (i--) {
-
-    /* Optimize for sparse bitmaps. */
-
-    if (unlikely(*mem)) {
-
-      u16* mem16 = (u16*)mem;
-
-      mem16[0] = count_class_lookup16[mem16[0]];
-      mem16[1] = count_class_lookup16[mem16[1]];
-      mem16[2] = count_class_lookup16[mem16[2]];
-      mem16[3] = count_class_lookup16[mem16[3]];
-
-    }
-
-    ++mem;
-
-  }
-
-}
-
-#else
-
-static inline void classify_counts(u32* mem) {
-
-  u32 i = MAP_SIZE >> 2;
-
-  while (i--) {
-
-    /* Optimize for sparse bitmaps. */
-
-    if (unlikely(*mem)) {
-
-      u16* mem16 = (u16*)mem;
-
-      mem16[0] = count_class_lookup16[mem16[0]];
-      mem16[1] = count_class_lookup16[mem16[1]];
-
-    }
-
-    ++mem;
-
-  }
-
-}
-
-#endif /* ^__x86_64__ */
-
-
-/* Compact trace bytes into a smaller bitmap. We effectively just drop the
-   count information here. This is called only sporadically, for some
-   new paths. */
-
-static void minimize_bits(u8* dst, u8* src) {
-
-  u32 i = 0;
-
-  while (i < MAP_SIZE) {
-
-    if (*(src++)) dst[i >> 3] |= 1 << (i & 7);
-    ++i;
-
-  }
-
-}
-
-
-
-/* Find first power of two greater or equal to val (assuming val under
-   2^63). */
-
-static u64 next_p2(u64 val) {
-
-  u64 ret = 1;
-  while (val > ret) ret <<= 1;
-  return ret;
-
-}
-
-
-/* When we bump into a new path, we call this to see if the path appears
-   more "favorable" than any of the existing ones. The purpose of the
-   "favorables" is to have a minimal set of paths that trigger all the bits
-   seen in the bitmap so far, and focus on fuzzing them at the expense of
-   the rest.
-
-   The first step of the process is to maintain a list of top_rated[] entries
-   for every byte in the bitmap. We win that slot if there is no previous
-   contender, or if the contender has a more favorable speed x size factor. */
-
-
-static void update_bitmap_score(struct queue_entry* q) {
-
-  u32 i;
-  u64 fav_factor = q->exec_us * q->len;
-  u64 fuzz_p2      = next_p2 (q->n_fuzz);
-
-  /* For every byte set in trace_bits[], see if there is a previous winner,
-     and how it compares to us. */
-
-  for (i = 0; i < MAP_SIZE; ++i)
-
-    if (trace_bits[i]) {
-
-       if (top_rated[i]) {
-
-         /* Faster-executing or smaller test cases are favored. */
-         u64 top_rated_fuzz_p2    = next_p2 (top_rated[i]->n_fuzz);
-         u64 top_rated_fav_factor = top_rated[i]->exec_us * top_rated[i]->len;
-
-         if (fuzz_p2 > top_rated_fuzz_p2) {
-           continue;
-         } else if (fuzz_p2 == top_rated_fuzz_p2) {
-           if (fav_factor > top_rated_fav_factor)
-             continue;
-         }
-
-         if (fav_factor > top_rated[i]->exec_us * top_rated[i]->len) continue;
-
-         /* Looks like we're going to win. Decrease ref count for the
-            previous winner, discard its trace_bits[] if necessary. */
-
-         if (!--top_rated[i]->tc_ref) {
-           ck_free(top_rated[i]->trace_mini);
-           top_rated[i]->trace_mini = 0;
-         }
-
-       }
-
-       /* Insert ourselves as the new winner. */
-
-       top_rated[i] = q;
-       ++q->tc_ref;
-
-       if (!q->trace_mini) {
-         q->trace_mini = ck_alloc(MAP_SIZE >> 3);
-         minimize_bits(q->trace_mini, trace_bits);
-       }
-
-       score_changed = 1;
-
-     }
-
-}
-
-
-/* The second part of the mechanism discussed above is a routine that
-   goes over top_rated[] entries, and then sequentially grabs winners for
-   previously-unseen bytes (temp_v) and marks them as favored, at least
-   until the next run. The favored entries are given more air time during
-   all fuzzing steps. */
-
-static void cull_queue(void) {
-
-  struct queue_entry* q;
-  static u8 temp_v[MAP_SIZE >> 3];
-  u32 i;
-
-  if (dumb_mode || !score_changed) return;
-
-  score_changed = 0;
-
-  memset(temp_v, 255, MAP_SIZE >> 3);
-
-  queued_favored  = 0;
-  pending_favored = 0;
-
-  q = queue;
-
-  while (q) {
-    q->favored = 0;
-    q = q->next;
-  }
-
-  /* Let's see if anything in the bitmap isn't captured in temp_v.
-     If yes, and if it has a top_rated[] contender, let's use it. */
-
-  for (i = 0; i < MAP_SIZE; ++i)
-    if (top_rated[i] && (temp_v[i >> 3] & (1 << (i & 7)))) {
-
-      u32 j = MAP_SIZE >> 3;
-
-      /* Remove all bits belonging to the current entry from temp_v. */
-
-      while (j--) 
-        if (top_rated[i]->trace_mini[j])
-          temp_v[j] &= ~top_rated[i]->trace_mini[j];
-
-      top_rated[i]->favored = 1;
-      ++queued_favored;
-
-      if (top_rated[i]->fuzz_level == 0 || !top_rated[i]->was_fuzzed) ++pending_favored;
-
-    }
-
-  q = queue;
-
-  while (q) {
-    mark_as_redundant(q, !q->favored);
-    q = q->next;
-  }
-
-}
-
-
-/* Load postprocessor, if available. */
-
-static void setup_post(void) {
-
-  void* dh;
-  u8* fn = getenv("AFL_POST_LIBRARY");
-  u32 tlen = 6;
-
-  if (!fn) return;
-
-  ACTF("Loading postprocessor from '%s'...", fn);
-
-  dh = dlopen(fn, RTLD_NOW);
-  if (!dh) FATAL("%s", dlerror());
-
-  post_handler = dlsym(dh, "afl_postprocess");
-  if (!post_handler) FATAL("Symbol 'afl_postprocess' not found.");
-
-  /* Do a quick test. It's better to segfault now than later =) */
-
-  post_handler("hello", &tlen);
-
-  OKF("Postprocessor installed successfully.");
-
-}
-
-static void setup_custom_mutator(void) {
-  void* dh;
-  u8* fn = getenv("AFL_CUSTOM_MUTATOR_LIBRARY");
-
-  if (!fn) return;
-
-  ACTF("Loading custom mutator library from '%s'...", fn);
-
-  dh = dlopen(fn, RTLD_NOW);
-  if (!dh) FATAL("%s", dlerror());
-
-  custom_mutator = dlsym(dh, "afl_custom_mutator");
-  if (!custom_mutator) FATAL("Symbol 'afl_custom_mutator' not found.");
-
-  pre_save_handler = dlsym(dh, "afl_pre_save_handler");
-//  if (!pre_save_handler) WARNF("Symbol 'afl_pre_save_handler' not found.");
-
-  OKF("Custom mutator installed successfully.");
-}
-
-
-/* Read all testcases from the input directory, then queue them for testing.
-   Called at startup. */
-
-static void read_testcases(void) {
-
-  struct dirent **nl;
-  s32 nl_cnt;
-  u32 i;
-  u8* fn;
-
-  /* Auto-detect non-in-place resumption attempts. */
-
-  fn = alloc_printf("%s/queue", in_dir);
-  if (!access(fn, F_OK)) in_dir = fn; else ck_free(fn);
-
-  ACTF("Scanning '%s'...", in_dir);
-
-  /* We use scandir() + alphasort() rather than readdir() because otherwise,
-     the ordering  of test cases would vary somewhat randomly and would be
-     difficult to control. */
-
-  nl_cnt = scandir(in_dir, &nl, NULL, alphasort);
-
-  if (nl_cnt < 0) {
-
-    if (errno == ENOENT || errno == ENOTDIR)
-
-      SAYF("\n" cLRD "[-] " cRST
-           "The input directory does not seem to be valid - try again. The fuzzer needs\n"
-           "    one or more test case to start with - ideally, a small file under 1 kB\n"
-           "    or so. The cases must be stored as regular files directly in the input\n"
-           "    directory.\n");
-
-    PFATAL("Unable to open '%s'", in_dir);
-
-  }
-
-  if (shuffle_queue && nl_cnt > 1) {
-
-    ACTF("Shuffling queue...");
-    shuffle_ptrs((void**)nl, nl_cnt);
-
-  }
-
-  for (i = 0; i < nl_cnt; ++i) {
-
-    struct stat st;
-
-    u8* fn = alloc_printf("%s/%s", in_dir, nl[i]->d_name);
-    u8* dfn = alloc_printf("%s/.state/deterministic_done/%s", in_dir, nl[i]->d_name);
-
-    u8  passed_det = 0;
-
-    free(nl[i]); /* not tracked */
- 
-    if (lstat(fn, &st) || access(fn, R_OK))
-      PFATAL("Unable to access '%s'", fn);
-
-    /* This also takes care of . and .. */
-
-    if (!S_ISREG(st.st_mode) || !st.st_size || strstr(fn, "/README.txt")) {
-
-      ck_free(fn);
-      ck_free(dfn);
-      continue;
-
-    }
-
-    if (st.st_size > MAX_FILE) 
-      FATAL("Test case '%s' is too big (%s, limit is %s)", fn,
-            DMS(st.st_size), DMS(MAX_FILE));
-
-    /* Check for metadata that indicates that deterministic fuzzing
-       is complete for this entry. We don't want to repeat deterministic
-       fuzzing when resuming aborted scans, because it would be pointless
-       and probably very time-consuming. */
-
-    if (!access(dfn, F_OK)) passed_det = 1;
-    ck_free(dfn);
-
-    add_to_queue(fn, st.st_size, passed_det);
-
-  }
-
-  free(nl); /* not tracked */
-
-  if (!queued_paths) {
-
-    SAYF("\n" cLRD "[-] " cRST
-         "Looks like there are no valid test cases in the input directory! The fuzzer\n"
-         "    needs one or more test case to start with - ideally, a small file under\n"
-         "    1 kB or so. The cases must be stored as regular files directly in the\n"
-         "    input directory.\n");
-
-    FATAL("No usable test cases in '%s'", in_dir);
-
-  }
-
-  last_path_time = 0;
-  queued_at_start = queued_paths;
-
-}
-
-
-/* Helper function for load_extras. */
-
-static int compare_extras_len(const void* p1, const void* p2) {
-  struct extra_data *e1 = (struct extra_data*)p1,
-                    *e2 = (struct extra_data*)p2;
-
-  return e1->len - e2->len;
-}
-
-static int compare_extras_use_d(const void* p1, const void* p2) {
-  struct extra_data *e1 = (struct extra_data*)p1,
-                    *e2 = (struct extra_data*)p2;
-
-  return e2->hit_cnt - e1->hit_cnt;
-}
-
-
-/* Read extras from a file, sort by size. */
-
-static void load_extras_file(u8* fname, u32* min_len, u32* max_len,
-                             u32 dict_level) {
-
-  FILE* f;
-  u8  buf[MAX_LINE];
-  u8  *lptr;
-  u32 cur_line = 0;
-
-  f = fopen(fname, "r");
-
-  if (!f) PFATAL("Unable to open '%s'", fname);
-
-  while ((lptr = fgets(buf, MAX_LINE, f))) {
-
-    u8 *rptr, *wptr;
-    u32 klen = 0;
-
-    ++cur_line;
-
-    /* Trim on left and right. */
-
-    while (isspace(*lptr)) ++lptr;
-
-    rptr = lptr + strlen(lptr) - 1;
-    while (rptr >= lptr && isspace(*rptr)) --rptr;
-    ++rptr;
-    *rptr = 0;
-
-    /* Skip empty lines and comments. */
-
-    if (!*lptr || *lptr == '#') continue;
-
-    /* All other lines must end with '"', which we can consume. */
-
-    --rptr;
-
-    if (rptr < lptr || *rptr != '"')
-      FATAL("Malformed name=\"value\" pair in line %u.", cur_line);
-
-    *rptr = 0;
-
-    /* Skip alphanumerics and dashes (label). */
-
-    while (isalnum(*lptr) || *lptr == '_') ++lptr;
-
-    /* If @number follows, parse that. */
-
-    if (*lptr == '@') {
-
-      ++lptr;
-      if (atoi(lptr) > dict_level) continue;
-      while (isdigit(*lptr)) ++lptr;
-
-    }
-
-    /* Skip whitespace and = signs. */
-
-    while (isspace(*lptr) || *lptr == '=') ++lptr;
-
-    /* Consume opening '"'. */
-
-    if (*lptr != '"')
-      FATAL("Malformed name=\"keyword\" pair in line %u.", cur_line);
-
-    ++lptr;
-
-    if (!*lptr) FATAL("Empty keyword in line %u.", cur_line);
-
-    /* Okay, let's allocate memory and copy data between "...", handling
-       \xNN escaping, \\, and \". */
-
-    extras = ck_realloc_block(extras, (extras_cnt + 1) *
-               sizeof(struct extra_data));
-
-    wptr = extras[extras_cnt].data = ck_alloc(rptr - lptr);
-
-    while (*lptr) {
-
-      char* hexdigits = "0123456789abcdef";
-
-      switch (*lptr) {
-
-        case 1 ... 31:
-        case 128 ... 255:
-          FATAL("Non-printable characters in line %u.", cur_line);
-
-        case '\\':
-
-          ++lptr;
-
-          if (*lptr == '\\' || *lptr == '"') {
-            *(wptr++) = *(lptr++);
-            klen++;
-            break;
-          }
-
-          if (*lptr != 'x' || !isxdigit(lptr[1]) || !isxdigit(lptr[2]))
-            FATAL("Invalid escaping (not \\xNN) in line %u.", cur_line);
-
-          *(wptr++) =
-            ((strchr(hexdigits, tolower(lptr[1])) - hexdigits) << 4) |
-            (strchr(hexdigits, tolower(lptr[2])) - hexdigits);
-
-          lptr += 3;
-          ++klen;
-
-          break;
-
-        default:
-
-          *(wptr++) = *(lptr++);
-          ++klen;
-
-      }
-
-    }
-
-    extras[extras_cnt].len = klen;
-
-    if (extras[extras_cnt].len > MAX_DICT_FILE)
-      FATAL("Keyword too big in line %u (%s, limit is %s)", cur_line,
-            DMS(klen), DMS(MAX_DICT_FILE));
-
-    if (*min_len > klen) *min_len = klen;
-    if (*max_len < klen) *max_len = klen;
-
-    ++extras_cnt;
-
-  }
-
-  fclose(f);
-
-}
-
-
-/* Read extras from the extras directory and sort them by size. */
-
-static void load_extras(u8* dir) {
-
-  DIR* d;
-  struct dirent* de;
-  u32 min_len = MAX_DICT_FILE, max_len = 0, dict_level = 0;
-  u8* x;
-
-  /* If the name ends with @, extract level and continue. */
-
-  if ((x = strchr(dir, '@'))) {
-
-    *x = 0;
-    dict_level = atoi(x + 1);
-
-  }
-
-  ACTF("Loading extra dictionary from '%s' (level %u)...", dir, dict_level);
-
-  d = opendir(dir);
-
-  if (!d) {
-
-    if (errno == ENOTDIR) {
-      load_extras_file(dir, &min_len, &max_len, dict_level);
-      goto check_and_sort;
-    }
-
-    PFATAL("Unable to open '%s'", dir);
-
-  }
-
-  if (x) FATAL("Dictionary levels not supported for directories.");
-
-  while ((de = readdir(d))) {
-
-    struct stat st;
-    u8* fn = alloc_printf("%s/%s", dir, de->d_name);
-    s32 fd;
-
-    if (lstat(fn, &st) || access(fn, R_OK))
-      PFATAL("Unable to access '%s'", fn);
-
-    /* This also takes care of . and .. */
-    if (!S_ISREG(st.st_mode) || !st.st_size) {
-
-      ck_free(fn);
-      continue;
-
-    }
-
-    if (st.st_size > MAX_DICT_FILE)
-      FATAL("Extra '%s' is too big (%s, limit is %s)", fn,
-            DMS(st.st_size), DMS(MAX_DICT_FILE));
-
-    if (min_len > st.st_size) min_len = st.st_size;
-    if (max_len < st.st_size) max_len = st.st_size;
-
-    extras = ck_realloc_block(extras, (extras_cnt + 1) *
-               sizeof(struct extra_data));
-
-    extras[extras_cnt].data = ck_alloc(st.st_size);
-    extras[extras_cnt].len  = st.st_size;
-
-    fd = open(fn, O_RDONLY);
-
-    if (fd < 0) PFATAL("Unable to open '%s'", fn);
-
-    ck_read(fd, extras[extras_cnt].data, st.st_size, fn);
-
-    close(fd);
-    ck_free(fn);
-
-    ++extras_cnt;
-
-  }
-
-  closedir(d);
-
-check_and_sort:
-
-  if (!extras_cnt) FATAL("No usable files in '%s'", dir);
-
-  qsort(extras, extras_cnt, sizeof(struct extra_data), compare_extras_len);
-
-  OKF("Loaded %u extra tokens, size range %s to %s.", extras_cnt,
-      DMS(min_len), DMS(max_len));
-
-  if (max_len > 32)
-    WARNF("Some tokens are relatively large (%s) - consider trimming.",
-          DMS(max_len));
-
-  if (extras_cnt > MAX_DET_EXTRAS)
-    WARNF("More than %u tokens - will use them probabilistically.",
-          MAX_DET_EXTRAS);
-
-}
-
-
-
-
-/* Helper function for maybe_add_auto() */
-
-static inline u8 memcmp_nocase(u8* m1, u8* m2, u32 len) {
-
-  while (len--) if (tolower(*(m1++)) ^ tolower(*(m2++))) return 1;
-  return 0;
-
-}
-
-
-/* Maybe add automatic extra. */
-
-static void maybe_add_auto(u8* mem, u32 len) {
-
-  u32 i;
-
-  /* Allow users to specify that they don't want auto dictionaries. */
-
-  if (!MAX_AUTO_EXTRAS || !USE_AUTO_EXTRAS) return;
-
-  /* Skip runs of identical bytes. */
-
-  for (i = 1; i < len; ++i)
-    if (mem[0] ^ mem[i]) break;
-
-  if (i == len) return;
-
-  /* Reject builtin interesting values. */
-
-  if (len == 2) {
-
-    i = sizeof(interesting_16) >> 1;
-
-    while (i--) 
-      if (*((u16*)mem) == interesting_16[i] ||
-          *((u16*)mem) == SWAP16(interesting_16[i])) return;
-
-  }
-
-  if (len == 4) {
-
-    i = sizeof(interesting_32) >> 2;
-
-    while (i--) 
-      if (*((u32*)mem) == interesting_32[i] ||
-          *((u32*)mem) == SWAP32(interesting_32[i])) return;
-
-  }
-
-  /* Reject anything that matches existing extras. Do a case-insensitive
-     match. We optimize by exploiting the fact that extras[] are sorted
-     by size. */
-
-  for (i = 0; i < extras_cnt; ++i)
-    if (extras[i].len >= len) break;
-
-  for (; i < extras_cnt && extras[i].len == len; ++i)
-    if (!memcmp_nocase(extras[i].data, mem, len)) return;
-
-  /* Last but not least, check a_extras[] for matches. There are no
-     guarantees of a particular sort order. */
-
-  auto_changed = 1;
-
-  for (i = 0; i < a_extras_cnt; ++i) {
-
-    if (a_extras[i].len == len && !memcmp_nocase(a_extras[i].data, mem, len)) {
-
-      a_extras[i].hit_cnt++;
-      goto sort_a_extras;
-
-    }
-
-  }
-
-  /* At this point, looks like we're dealing with a new entry. So, let's
-     append it if we have room. Otherwise, let's randomly evict some other
-     entry from the bottom half of the list. */
-
-  if (a_extras_cnt < MAX_AUTO_EXTRAS) {
-
-    a_extras = ck_realloc_block(a_extras, (a_extras_cnt + 1) *
-                                sizeof(struct extra_data));
-
-    a_extras[a_extras_cnt].data = ck_memdup(mem, len);
-    a_extras[a_extras_cnt].len  = len;
-    ++a_extras_cnt;
-
-  } else {
-
-    i = MAX_AUTO_EXTRAS / 2 +
-        UR((MAX_AUTO_EXTRAS + 1) / 2);
-
-    ck_free(a_extras[i].data);
-
-    a_extras[i].data    = ck_memdup(mem, len);
-    a_extras[i].len     = len;
-    a_extras[i].hit_cnt = 0;
-
-  }
-
-sort_a_extras:
-
-  /* First, sort all auto extras by use count, descending order. */
-
-  qsort(a_extras, a_extras_cnt, sizeof(struct extra_data),
-        compare_extras_use_d);
-
-  /* Then, sort the top USE_AUTO_EXTRAS entries by size. */
-
-  qsort(a_extras, MIN(USE_AUTO_EXTRAS, a_extras_cnt),
-        sizeof(struct extra_data), compare_extras_len);
-
-}
-
-
-/* Save automatically generated extras. */
-
-static void save_auto(void) {
-
-  u32 i;
-
-  if (!auto_changed) return;
-  auto_changed = 0;
-
-  for (i = 0; i < MIN(USE_AUTO_EXTRAS, a_extras_cnt); ++i) {
-
-    u8* fn = alloc_printf("%s/queue/.state/auto_extras/auto_%06u", out_dir, i);
-    s32 fd;
-
-    fd = open(fn, O_WRONLY | O_CREAT | O_TRUNC, 0600);
-
-    if (fd < 0) PFATAL("Unable to create '%s'", fn);
-
-    ck_write(fd, a_extras[i].data, a_extras[i].len, fn);
-
-    close(fd);
-    ck_free(fn);
-
-  }
-
-}
-
-
-/* Load automatically generated extras. */
-
-static void load_auto(void) {
-
-  u32 i;
-
-  for (i = 0; i < USE_AUTO_EXTRAS; ++i) {
-
-    u8  tmp[MAX_AUTO_EXTRA + 1];
-    u8* fn = alloc_printf("%s/.state/auto_extras/auto_%06u", in_dir, i);
-    s32 fd, len;
-
-    fd = open(fn, O_RDONLY, 0600);
-
-    if (fd < 0) {
-
-      if (errno != ENOENT) PFATAL("Unable to open '%s'", fn);
-      ck_free(fn);
-      break;
-
-    }
-
-    /* We read one byte more to cheaply detect tokens that are too
-       long (and skip them). */
-
-    len = read(fd, tmp, MAX_AUTO_EXTRA + 1);
-
-    if (len < 0) PFATAL("Unable to read from '%s'", fn);
-
-    if (len >= MIN_AUTO_EXTRA && len <= MAX_AUTO_EXTRA)
-      maybe_add_auto(tmp, len);
-
-    close(fd);
-    ck_free(fn);
-
-  }
-
-  if (i) OKF("Loaded %u auto-discovered dictionary tokens.", i);
-  else OKF("No auto-generated dictionary tokens to reuse.");
-
-}
-
-
-/* Destroy extras. */
-
-static void destroy_extras(void) {
-
-  u32 i;
-
-  for (i = 0; i < extras_cnt; ++i) 
-    ck_free(extras[i].data);
-
-  ck_free(extras);
-
-  for (i = 0; i < a_extras_cnt; ++i) 
-    ck_free(a_extras[i].data);
-
-  ck_free(a_extras);
-
-}
-
-
-/* Spin up fork server (instrumented mode only). The idea is explained here:
-
-   http://lcamtuf.blogspot.com/2014/10/fuzzing-binaries-without-execve.html
-
-   In essence, the instrumentation allows us to skip execve(), and just keep
-   cloning a stopped child. So, we just execute once, and then send commands
-   through a pipe. The other part of this logic is in afl-as.h. */
-
-EXP_ST void init_forkserver(char** argv) {
-
-  static struct itimerval it;
-  int st_pipe[2], ctl_pipe[2];
-  int status;
-  s32 rlen;
-
-  ACTF("Spinning up the fork server...");
-
-  if (pipe(st_pipe) || pipe(ctl_pipe)) PFATAL("pipe() failed");
-
-  forksrv_pid = fork();
-
-  if (forksrv_pid < 0) PFATAL("fork() failed");
-
-  if (!forksrv_pid) {
-
-    /* CHILD PROCESS */
-
-    struct rlimit r;
-
-    /* Umpf. On OpenBSD, the default fd limit for root users is set to
-       soft 128. Let's try to fix that... */
-
-    if (!getrlimit(RLIMIT_NOFILE, &r) && r.rlim_cur < FORKSRV_FD + 2) {
-
-      r.rlim_cur = FORKSRV_FD + 2;
-      setrlimit(RLIMIT_NOFILE, &r); /* Ignore errors */
-
-    }
-
-    if (mem_limit) {
-
-      r.rlim_max = r.rlim_cur = ((rlim_t)mem_limit) << 20;
-
-#ifdef RLIMIT_AS
-
-      setrlimit(RLIMIT_AS, &r); /* Ignore errors */
-
-#else
-
-      /* This takes care of OpenBSD, which doesn't have RLIMIT_AS, but
-         according to reliable sources, RLIMIT_DATA covers anonymous
-         maps - so we should be getting good protection against OOM bugs. */
-
-      setrlimit(RLIMIT_DATA, &r); /* Ignore errors */
-
-#endif /* ^RLIMIT_AS */
-
-
-    }
-
-    /* Dumping cores is slow and can lead to anomalies if SIGKILL is delivered
-       before the dump is complete. */
-
-    r.rlim_max = r.rlim_cur = 0;
-
-    setrlimit(RLIMIT_CORE, &r); /* Ignore errors */
-
-    /* Isolate the process and configure standard descriptors. If out_file is
-       specified, stdin is /dev/null; otherwise, out_fd is cloned instead. */
-
-    setsid();
-
-    if (!getenv("AFL_DEBUG_CHILD_OUTPUT")) {
-      dup2(dev_null_fd, 1);
-      dup2(dev_null_fd, 2);
-    }
-
-    if (out_file) {
-
-      dup2(dev_null_fd, 0);
-
-    } else {
-
-      dup2(out_fd, 0);
-      close(out_fd);
-
-    }
-
-    /* Set up control and status pipes, close the unneeded original fds. */
-
-    if (dup2(ctl_pipe[0], FORKSRV_FD) < 0) PFATAL("dup2() failed");
-    if (dup2(st_pipe[1], FORKSRV_FD + 1) < 0) PFATAL("dup2() failed");
-
-    close(ctl_pipe[0]);
-    close(ctl_pipe[1]);
-    close(st_pipe[0]);
-    close(st_pipe[1]);
-
-    close(out_dir_fd);
-    close(dev_null_fd);
-#ifndef HAVE_ARC4RANDOM
-    close(dev_urandom_fd);
-#endif
-    close(fileno(plot_file));
-
-    /* This should improve performance a bit, since it stops the linker from
-       doing extra work post-fork(). */
-
-    if (!getenv("LD_BIND_LAZY")) setenv("LD_BIND_NOW", "1", 0);
-
-    /* Set sane defaults for ASAN if nothing else specified. */
-
-    setenv("ASAN_OPTIONS", "abort_on_error=1:"
-                           "detect_leaks=0:"
-                           "symbolize=0:"
-                           "allocator_may_return_null=1", 0);
-
-    /* MSAN is tricky, because it doesn't support abort_on_error=1 at this
-       point. So, we do this in a very hacky way. */
-
-    setenv("MSAN_OPTIONS", "exit_code=" STRINGIFY(MSAN_ERROR) ":"
-                           "symbolize=0:"
-                           "abort_on_error=1:"
-                           "allocator_may_return_null=1:"
-                           "msan_track_origins=0", 0);
-
-    execv(target_path, argv);
-
-    /* Use a distinctive bitmap signature to tell the parent about execv()
-       falling through. */
-
-    *(u32*)trace_bits = EXEC_FAIL_SIG;
-    exit(0);
-
-  }
-
-  /* PARENT PROCESS */
-
-  /* Close the unneeded endpoints. */
-
-  close(ctl_pipe[0]);
-  close(st_pipe[1]);
-
-  fsrv_ctl_fd = ctl_pipe[1];
-  fsrv_st_fd  = st_pipe[0];
-
-  /* Wait for the fork server to come up, but don't wait too long. */
-
-  it.it_value.tv_sec = ((exec_tmout * FORK_WAIT_MULT) / 1000);
-  it.it_value.tv_usec = ((exec_tmout * FORK_WAIT_MULT) % 1000) * 1000;
-
-  setitimer(ITIMER_REAL, &it, NULL);
-
-  rlen = read(fsrv_st_fd, &status, 4);
-
-  it.it_value.tv_sec = 0;
-  it.it_value.tv_usec = 0;
-
-  setitimer(ITIMER_REAL, &it, NULL);
-
-  /* If we have a four-byte "hello" message from the server, we're all set.
-     Otherwise, try to figure out what went wrong. */
-
-  if (rlen == 4) {
-    OKF("All right - fork server is up.");
-    return;
-  }
-
-  if (child_timed_out)
-    FATAL("Timeout while initializing fork server (adjusting -t may help)");
-
-  if (waitpid(forksrv_pid, &status, 0) <= 0)
-    PFATAL("waitpid() failed");
-
-  if (WIFSIGNALED(status)) {
-
-    if (mem_limit && mem_limit < 500 && uses_asan) {
-
-      SAYF("\n" cLRD "[-] " cRST
-           "Whoops, the target binary crashed suddenly, before receiving any input\n"
-           "    from the fuzzer! Since it seems to be built with ASAN and you have a\n"
-           "    restrictive memory limit configured, this is expected; please read\n"
-           "    %s/notes_for_asan.txt for help.\n", doc_path);
-
-    } else if (!mem_limit) {
-
-      SAYF("\n" cLRD "[-] " cRST
-           "Whoops, the target binary crashed suddenly, before receiving any input\n"
-           "    from the fuzzer! There are several probable explanations:\n\n"
-
-           "    - The binary is just buggy and explodes entirely on its own. If so, you\n"
-           "      need to fix the underlying problem or find a better replacement.\n\n"
-
-#ifdef __APPLE__
-
-           "    - On MacOS X, the semantics of fork() syscalls are non-standard and may\n"
-           "      break afl-fuzz performance optimizations when running platform-specific\n"
-           "      targets. To fix this, set AFL_NO_FORKSRV=1 in the environment.\n\n"
-
-#endif /* __APPLE__ */
-
-           "    - Less likely, there is a horrible bug in the fuzzer. If other options\n"
-           "      fail, poke <afl-users@googlegroups.com> for troubleshooting tips.\n");
-
-    } else {
-
-      SAYF("\n" cLRD "[-] " cRST
-           "Whoops, the target binary crashed suddenly, before receiving any input\n"
-           "    from the fuzzer! There are several probable explanations:\n\n"
-
-           "    - The current memory limit (%s) is too restrictive, causing the\n"
-           "      target to hit an OOM condition in the dynamic linker. Try bumping up\n"
-           "      the limit with the -m setting in the command line. A simple way confirm\n"
-           "      this diagnosis would be:\n\n"
-
-#ifdef RLIMIT_AS
-           "      ( ulimit -Sv $[%llu << 10]; /path/to/fuzzed_app )\n\n"
-#else
-           "      ( ulimit -Sd $[%llu << 10]; /path/to/fuzzed_app )\n\n"
-#endif /* ^RLIMIT_AS */
-
-           "      Tip: you can use http://jwilk.net/software/recidivm to quickly\n"
-           "      estimate the required amount of virtual memory for the binary.\n\n"
-
-           "    - The binary is just buggy and explodes entirely on its own. If so, you\n"
-           "      need to fix the underlying problem or find a better replacement.\n\n"
-
-#ifdef __APPLE__
-
-           "    - On MacOS X, the semantics of fork() syscalls are non-standard and may\n"
-           "      break afl-fuzz performance optimizations when running platform-specific\n"
-           "      targets. To fix this, set AFL_NO_FORKSRV=1 in the environment.\n\n"
-
-#endif /* __APPLE__ */
-
-           "    - Less likely, there is a horrible bug in the fuzzer. If other options\n"
-           "      fail, poke <afl-users@googlegroups.com> for troubleshooting tips.\n",
-           DMS(mem_limit << 20), mem_limit - 1);
-
-    }
-
-    FATAL("Fork server crashed with signal %d", WTERMSIG(status));
-
-  }
-
-  if (*(u32*)trace_bits == EXEC_FAIL_SIG)
-    FATAL("Unable to execute target application ('%s')", argv[0]);
-
-  if (mem_limit && mem_limit < 500 && uses_asan) {
-
-    SAYF("\n" cLRD "[-] " cRST
-           "Hmm, looks like the target binary terminated before we could complete a\n"
-           "    handshake with the injected code. Since it seems to be built with ASAN and\n"
-           "    you have a restrictive memory limit configured, this is expected; please\n"
-           "    read %s/notes_for_asan.txt for help.\n", doc_path);
-
-  } else if (!mem_limit) {
-
-    SAYF("\n" cLRD "[-] " cRST
-         "Hmm, looks like the target binary terminated before we could complete a\n"
-         "    handshake with the injected code. Perhaps there is a horrible bug in the\n"
-         "    fuzzer. Poke <afl-users@googlegroups.com> for troubleshooting tips.\n");
-
-  } else {
-
-    SAYF("\n" cLRD "[-] " cRST
-         "Hmm, looks like the target binary terminated before we could complete a\n"
-         "    handshake with the injected code. There are %s probable explanations:\n\n"
-
-         "%s"
-         "    - The current memory limit (%s) is too restrictive, causing an OOM\n"
-         "      fault in the dynamic linker. This can be fixed with the -m option. A\n"
-         "      simple way to confirm the diagnosis may be:\n\n"
-
-#ifdef RLIMIT_AS
-         "      ( ulimit -Sv $[%llu << 10]; /path/to/fuzzed_app )\n\n"
-#else
-         "      ( ulimit -Sd $[%llu << 10]; /path/to/fuzzed_app )\n\n"
-#endif /* ^RLIMIT_AS */
-
-         "      Tip: you can use http://jwilk.net/software/recidivm to quickly\n"
-         "      estimate the required amount of virtual memory for the binary.\n\n"
-
-         "    - Less likely, there is a horrible bug in the fuzzer. If other options\n"
-         "      fail, poke <afl-users@googlegroups.com> for troubleshooting tips.\n",
-         getenv(DEFER_ENV_VAR) ? "three" : "two",
-         getenv(DEFER_ENV_VAR) ?
-         "    - You are using deferred forkserver, but __AFL_INIT() is never\n"
-         "      reached before the program terminates.\n\n" : "",
-         DMS(mem_limit << 20), mem_limit - 1);
-
-  }
-
-  FATAL("Fork server handshake failed");
-
-}
-
-
-/* Execute target application, monitoring for timeouts. Return status
-   information. The called program will update trace_bits[]. */
-
-static u8 run_target(char** argv, u32 timeout) {
-
-  static struct itimerval it;
-  static u32 prev_timed_out = 0;
-
-  int status = 0;
-  u32 tb4;
-
-  child_timed_out = 0;
-
-  /* After this memset, trace_bits[] are effectively volatile, so we
-     must prevent any earlier operations from venturing into that
-     territory. */
-
-  memset(trace_bits, 0, MAP_SIZE);
-  MEM_BARRIER();
-
-  /* If we're running in "dumb" mode, we can't rely on the fork server
-     logic compiled into the target program, so we will just keep calling
-     execve(). There is a bit of code duplication between here and 
-     init_forkserver(), but c'est la vie. */
-
-  if (dumb_mode == 1 || no_forkserver) {
-
-    child_pid = fork();
-
-    if (child_pid < 0) PFATAL("fork() failed");
-
-    if (!child_pid) {
-
-      struct rlimit r;
-
-      if (mem_limit) {
-
-        r.rlim_max = r.rlim_cur = ((rlim_t)mem_limit) << 20;
-
-#ifdef RLIMIT_AS
-
-        setrlimit(RLIMIT_AS, &r); /* Ignore errors */
-
-#else
-
-        setrlimit(RLIMIT_DATA, &r); /* Ignore errors */
-
-#endif /* ^RLIMIT_AS */
-
-      }
-
-      r.rlim_max = r.rlim_cur = 0;
-
-      setrlimit(RLIMIT_CORE, &r); /* Ignore errors */
-
-      /* Isolate the process and configure standard descriptors. If out_file is
-         specified, stdin is /dev/null; otherwise, out_fd is cloned instead. */
-
-      setsid();
-
-      dup2(dev_null_fd, 1);
-      dup2(dev_null_fd, 2);
-
-      if (out_file) {
-
-        dup2(dev_null_fd, 0);
-
-      } else {
-
-        dup2(out_fd, 0);
-        close(out_fd);
-
-      }
-
-      /* On Linux, would be faster to use O_CLOEXEC. Maybe TODO. */
-
-      close(dev_null_fd);
-      close(out_dir_fd);
-#ifndef HAVE_ARC4RANDOM
-      close(dev_urandom_fd);
-#endif
-      close(fileno(plot_file));
-
-      /* Set sane defaults for ASAN if nothing else specified. */
-
-      setenv("ASAN_OPTIONS", "abort_on_error=1:"
-                             "detect_leaks=0:"
-                             "symbolize=0:"
-                             "allocator_may_return_null=1", 0);
-
-      setenv("MSAN_OPTIONS", "exit_code=" STRINGIFY(MSAN_ERROR) ":"
-                             "symbolize=0:"
-                             "msan_track_origins=0", 0);
-
-      execv(target_path, argv);
-
-      /* Use a distinctive bitmap value to tell the parent about execv()
-         falling through. */
-
-      *(u32*)trace_bits = EXEC_FAIL_SIG;
-      exit(0);
-
-    }
-
-  } else {
-
-    s32 res;
-
-    /* In non-dumb mode, we have the fork server up and running, so simply
-       tell it to have at it, and then read back PID. */
-
-    if ((res = write(fsrv_ctl_fd, &prev_timed_out, 4)) != 4) {
-
-      if (stop_soon) return 0;
-      RPFATAL(res, "Unable to request new process from fork server (OOM?)");
-
-    }
-
-    if ((res = read(fsrv_st_fd, &child_pid, 4)) != 4) {
-
-      if (stop_soon) return 0;
-      RPFATAL(res, "Unable to request new process from fork server (OOM?)");
-
-    }
-
-    if (child_pid <= 0) FATAL("Fork server is misbehaving (OOM?)");
-
-  }
-
-  /* Configure timeout, as requested by user, then wait for child to terminate. */
-
-  it.it_value.tv_sec = (timeout / 1000);
-  it.it_value.tv_usec = (timeout % 1000) * 1000;
-
-  setitimer(ITIMER_REAL, &it, NULL);
-
-  /* The SIGALRM handler simply kills the child_pid and sets child_timed_out. */
-
-  if (dumb_mode == 1 || no_forkserver) {
-
-    if (waitpid(child_pid, &status, 0) <= 0) PFATAL("waitpid() failed");
-
-  } else {
-
-    s32 res;
-
-    if ((res = read(fsrv_st_fd, &status, 4)) != 4) {
-
-      if (stop_soon) return 0;
-      RPFATAL(res, "Unable to communicate with fork server (OOM?)");
-
-    }
-
-  }
-
-  if (!WIFSTOPPED(status)) child_pid = 0;
-
-  it.it_value.tv_sec = 0;
-  it.it_value.tv_usec = 0;
-
-  setitimer(ITIMER_REAL, &it, NULL);
-
-  ++total_execs;
-
-  /* Any subsequent operations on trace_bits must not be moved by the
-     compiler below this point. Past this location, trace_bits[] behave
-     very normally and do not have to be treated as volatile. */
-
-  MEM_BARRIER();
-
-  tb4 = *(u32*)trace_bits;
-
-#ifdef __x86_64__
-  classify_counts((u64*)trace_bits);
-#else
-  classify_counts((u32*)trace_bits);
-#endif /* ^__x86_64__ */
-
-  prev_timed_out = child_timed_out;
-
-  /* Report outcome to caller. */
-
-  if (WIFSIGNALED(status) && !stop_soon) {
-
-    kill_signal = WTERMSIG(status);
-
-    if (child_timed_out && kill_signal == SIGKILL) return FAULT_TMOUT;
-
-    return FAULT_CRASH;
-
-  }
-
-  /* A somewhat nasty hack for MSAN, which doesn't support abort_on_error and
-     must use a special exit code. */
-
-  if (uses_asan && WEXITSTATUS(status) == MSAN_ERROR) {
-    kill_signal = 0;
-    return FAULT_CRASH;
-  }
-
-  if ((dumb_mode == 1 || no_forkserver) && tb4 == EXEC_FAIL_SIG)
-    return FAULT_ERROR;
-
-  return FAULT_NONE;
-
-}
-
-
-/* Write modified data to file for testing. If out_file is set, the old file
-   is unlinked and a new one is created. Otherwise, out_fd is rewound and
-   truncated. */
-
-static void write_to_testcase(void* mem, u32 len) {
-
-  s32 fd = out_fd;
-
-  if (out_file) {
-
-    unlink(out_file); /* Ignore errors. */
-
-    fd = open(out_file, O_WRONLY | O_CREAT | O_EXCL, 0600);
-
-    if (fd < 0) PFATAL("Unable to create '%s'", out_file);
-
-  } else lseek(fd, 0, SEEK_SET);
-
-  if (pre_save_handler) {
-    u8* new_data;
-    size_t new_size = pre_save_handler(mem, len, &new_data);
-    ck_write(fd, new_data, new_size, out_file);
-  } else {
-    ck_write(fd, mem, len, out_file);
-  }
-
-  if (!out_file) {
-
-    if (ftruncate(fd, len)) PFATAL("ftruncate() failed");
-    lseek(fd, 0, SEEK_SET);
-
-  } else close(fd);
-
-}
-
-
-/* The same, but with an adjustable gap. Used for trimming. */
-
-static void write_with_gap(void* mem, u32 len, u32 skip_at, u32 skip_len) {
-
-  s32 fd = out_fd;
-  u32 tail_len = len - skip_at - skip_len;
-
-  if (out_file) {
-
-    unlink(out_file); /* Ignore errors. */
-
-    fd = open(out_file, O_WRONLY | O_CREAT | O_EXCL, 0600);
-
-    if (fd < 0) PFATAL("Unable to create '%s'", out_file);
-
-  } else lseek(fd, 0, SEEK_SET);
-
-  if (skip_at) ck_write(fd, mem, skip_at, out_file);
-
-  if (tail_len) ck_write(fd, mem + skip_at + skip_len, tail_len, out_file);
-
-  if (!out_file) {
-
-    if (ftruncate(fd, len - skip_len)) PFATAL("ftruncate() failed");
-    lseek(fd, 0, SEEK_SET);
-
-  } else close(fd);
-
-}
-
-
-static void show_stats(void);
-
-/* Calibrate a new test case. This is done when processing the input directory
-   to warn about flaky or otherwise problematic test cases early on; and when
-   new paths are discovered to detect variable behavior and so on. */
-
-static u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem,
-                         u32 handicap, u8 from_queue) {
-
-  static u8 first_trace[MAP_SIZE];
-
-  u8  fault = 0, new_bits = 0, var_detected = 0,
-      first_run = (q->exec_cksum == 0);
-
-  u64 start_us, stop_us;
-
-  s32 old_sc = stage_cur, old_sm = stage_max;
-  u32 use_tmout = exec_tmout;
-  u8* old_sn = stage_name;
-
-  /* Be a bit more generous about timeouts when resuming sessions, or when
-     trying to calibrate already-added finds. This helps avoid trouble due
-     to intermittent latency. */
-
-  if (!from_queue || resuming_fuzz)
-    use_tmout = MAX(exec_tmout + CAL_TMOUT_ADD,
-                    exec_tmout * CAL_TMOUT_PERC / 100);
-
-  ++q->cal_failed;
-
-  stage_name = "calibration";
-  stage_max  = fast_cal ? 3 : CAL_CYCLES;
-
-  /* Make sure the forkserver is up before we do anything, and let's not
-     count its spin-up time toward binary calibration. */
-
-  if (dumb_mode != 1 && !no_forkserver && !forksrv_pid)
-    init_forkserver(argv);
-
-  if (q->exec_cksum) memcpy(first_trace, trace_bits, MAP_SIZE);
-
-  start_us = get_cur_time_us();
-
-  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
-
-    u32 cksum;
-
-    if (!first_run && !(stage_cur % stats_update_freq)) show_stats();
-
-    write_to_testcase(use_mem, q->len);
-
-    fault = run_target(argv, use_tmout);
-
-    /* stop_soon is set by the handler for Ctrl+C. When it's pressed,
-       we want to bail out quickly. */
-
-    if (stop_soon || fault != crash_mode) goto abort_calibration;
-
-    if (!dumb_mode && !stage_cur && !count_bytes(trace_bits)) {
-      fault = FAULT_NOINST;
-      goto abort_calibration;
-    }
-
-    cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
-
-    if (q->exec_cksum != cksum) {
-
-      u8 hnb = has_new_bits(virgin_bits);
-      if (hnb > new_bits) new_bits = hnb;
-
-      if (q->exec_cksum) {
-
-        u32 i;
-
-        for (i = 0; i < MAP_SIZE; ++i) {
-
-          if (!var_bytes[i] && first_trace[i] != trace_bits[i]) {
-
-            var_bytes[i] = 1;
-            stage_max    = CAL_CYCLES_LONG;
-
-          }
-
-        }
-
-        var_detected = 1;
-
-      } else {
-
-        q->exec_cksum = cksum;
-        memcpy(first_trace, trace_bits, MAP_SIZE);
-
-      }
-
-    }
-
-  }
-
-  stop_us = get_cur_time_us();
-
-  total_cal_us     += stop_us - start_us;
-  total_cal_cycles += stage_max;
-
-  /* OK, let's collect some stats about the performance of this test case.
-     This is used for fuzzing air time calculations in calculate_score(). */
-
-  q->exec_us     = (stop_us - start_us) / stage_max;
-  q->bitmap_size = count_bytes(trace_bits);
-  q->handicap    = handicap;
-  q->cal_failed  = 0;
-
-  total_bitmap_size += q->bitmap_size;
-  ++total_bitmap_entries;
-
-  update_bitmap_score(q);
-
-  /* If this case didn't result in new output from the instrumentation, tell
-     parent. This is a non-critical problem, but something to warn the user
-     about. */
-
-  if (!dumb_mode && first_run && !fault && !new_bits) fault = FAULT_NOBITS;
-
-abort_calibration:
-
-  if (new_bits == 2 && !q->has_new_cov) {
-    q->has_new_cov = 1;
-    ++queued_with_cov;
-  }
-
-  /* Mark variable paths. */
-
-  if (var_detected) {
-
-    var_byte_count = count_bytes(var_bytes);
-
-    if (!q->var_behavior) {
-      mark_as_variable(q);
-      ++queued_variable;
-    }
-
-  }
-
-  stage_name = old_sn;
-  stage_cur  = old_sc;
-  stage_max  = old_sm;
-
-  if (!first_run) show_stats();
-
-  return fault;
-
-}
-
-
-/* Examine map coverage. Called once, for first test case. */
-
-static void check_map_coverage(void) {
-
-  u32 i;
-
-  if (count_bytes(trace_bits) < 100) return;
-
-  for (i = (1 << (MAP_SIZE_POW2 - 1)); i < MAP_SIZE; ++i)
-    if (trace_bits[i]) return;
-
-  WARNF("Recompile binary with newer version of afl to improve coverage!");
-
-}
-
-
-/* Perform dry run of all test cases to confirm that the app is working as
-   expected. This is done only for the initial inputs, and only once. */
-
-static void perform_dry_run(char** argv) {
-
-  struct queue_entry* q = queue;
-  u32 cal_failures = 0;
-  u8* skip_crashes = getenv("AFL_SKIP_CRASHES");
-
-  while (q) {
-
-    u8* use_mem;
-    u8  res;
-    s32 fd;
-
-    u8* fn = strrchr(q->fname, '/') + 1;
-
-    ACTF("Attempting dry run with '%s'...", fn);
-
-    fd = open(q->fname, O_RDONLY);
-    if (fd < 0) PFATAL("Unable to open '%s'", q->fname);
-
-    use_mem = ck_alloc_nozero(q->len);
-
-    if (read(fd, use_mem, q->len) != q->len)
-      FATAL("Short read from '%s'", q->fname);
-
-    close(fd);
-
-    res = calibrate_case(argv, q, use_mem, 0, 1);
-    ck_free(use_mem);
-
-    if (stop_soon) return;
-
-    if (res == crash_mode || res == FAULT_NOBITS)
-      SAYF(cGRA "    len = %u, map size = %u, exec speed = %llu us\n" cRST, 
-           q->len, q->bitmap_size, q->exec_us);
-
-    switch (res) {
-
-      case FAULT_NONE:
-
-        if (q == queue) check_map_coverage();
-
-        if (crash_mode) FATAL("Test case '%s' does *NOT* crash", fn);
-
-        break;
-
-      case FAULT_TMOUT:
-
-        if (timeout_given) {
-
-          /* The -t nn+ syntax in the command line sets timeout_given to '2' and
-             instructs afl-fuzz to tolerate but skip queue entries that time
-             out. */
-
-          if (timeout_given > 1) {
-            WARNF("Test case results in a timeout (skipping)");
-            q->cal_failed = CAL_CHANCES;
-            ++cal_failures;
-            break;
-          }
-
-          SAYF("\n" cLRD "[-] " cRST
-               "The program took more than %u ms to process one of the initial test cases.\n"
-               "    Usually, the right thing to do is to relax the -t option - or to delete it\n"
-               "    altogether and allow the fuzzer to auto-calibrate. That said, if you know\n"
-               "    what you are doing and want to simply skip the unruly test cases, append\n"
-               "    '+' at the end of the value passed to -t ('-t %u+').\n", exec_tmout,
-               exec_tmout);
-
-          FATAL("Test case '%s' results in a timeout", fn);
-
-        } else {
-
-          SAYF("\n" cLRD "[-] " cRST
-               "The program took more than %u ms to process one of the initial test cases.\n"
-               "    This is bad news; raising the limit with the -t option is possible, but\n"
-               "    will probably make the fuzzing process extremely slow.\n\n"
-
-               "    If this test case is just a fluke, the other option is to just avoid it\n"
-               "    altogether, and find one that is less of a CPU hog.\n", exec_tmout);
-
-          FATAL("Test case '%s' results in a timeout", fn);
-
-        }
-
-      case FAULT_CRASH:  
-
-        if (crash_mode) break;
-
-        if (skip_crashes) {
-          WARNF("Test case results in a crash (skipping)");
-          q->cal_failed = CAL_CHANCES;
-          ++cal_failures;
-          break;
-        }
-
-        if (mem_limit) {
-
-          SAYF("\n" cLRD "[-] " cRST
-               "Oops, the program crashed with one of the test cases provided. There are\n"
-               "    several possible explanations:\n\n"
-
-               "    - The test case causes known crashes under normal working conditions. If\n"
-               "      so, please remove it. The fuzzer should be seeded with interesting\n"
-               "      inputs - but not ones that cause an outright crash.\n\n"
-
-               "    - The current memory limit (%s) is too low for this program, causing\n"
-               "      it to die due to OOM when parsing valid files. To fix this, try\n"
-               "      bumping it up with the -m setting in the command line. If in doubt,\n"
-               "      try something along the lines of:\n\n"
-
-#ifdef RLIMIT_AS
-               "      ( ulimit -Sv $[%llu << 10]; /path/to/binary [...] <testcase )\n\n"
-#else
-               "      ( ulimit -Sd $[%llu << 10]; /path/to/binary [...] <testcase )\n\n"
-#endif /* ^RLIMIT_AS */
-
-               "      Tip: you can use http://jwilk.net/software/recidivm to quickly\n"
-               "      estimate the required amount of virtual memory for the binary. Also,\n"
-               "      if you are using ASAN, see %s/notes_for_asan.txt.\n\n"
-
-#ifdef __APPLE__
-  
-               "    - On MacOS X, the semantics of fork() syscalls are non-standard and may\n"
-               "      break afl-fuzz performance optimizations when running platform-specific\n"
-               "      binaries. To fix this, set AFL_NO_FORKSRV=1 in the environment.\n\n"
-
-#endif /* __APPLE__ */
-
-               "    - Least likely, there is a horrible bug in the fuzzer. If other options\n"
-               "      fail, poke <afl-users@googlegroups.com> for troubleshooting tips.\n",
-               DMS(mem_limit << 20), mem_limit - 1, doc_path);
-
-        } else {
-
-          SAYF("\n" cLRD "[-] " cRST
-               "Oops, the program crashed with one of the test cases provided. There are\n"
-               "    several possible explanations:\n\n"
-
-               "    - The test case causes known crashes under normal working conditions. If\n"
-               "      so, please remove it. The fuzzer should be seeded with interesting\n"
-               "      inputs - but not ones that cause an outright crash.\n\n"
-
-#ifdef __APPLE__
-  
-               "    - On MacOS X, the semantics of fork() syscalls are non-standard and may\n"
-               "      break afl-fuzz performance optimizations when running platform-specific\n"
-               "      binaries. To fix this, set AFL_NO_FORKSRV=1 in the environment.\n\n"
-
-#endif /* __APPLE__ */
-
-               "    - Least likely, there is a horrible bug in the fuzzer. If other options\n"
-               "      fail, poke <afl-users@googlegroups.com> for troubleshooting tips.\n");
-
-        }
-
-        FATAL("Test case '%s' results in a crash", fn);
-
-      case FAULT_ERROR:
-
-        FATAL("Unable to execute target application ('%s')", argv[0]);
-
-      case FAULT_NOINST:
-
-        FATAL("No instrumentation detected");
-
-      case FAULT_NOBITS: 
-
-        ++useless_at_start;
-
-        if (!in_bitmap && !shuffle_queue)
-          WARNF("No new instrumentation output, test case may be useless.");
-
-        break;
-
-    }
-
-    if (q->var_behavior) WARNF("Instrumentation output varies across runs.");
-
-    q = q->next;
-
-  }
-
-  if (cal_failures) {
-
-    if (cal_failures == queued_paths)
-      FATAL("All test cases time out%s, giving up!",
-            skip_crashes ? " or crash" : "");
-
-    WARNF("Skipped %u test cases (%0.02f%%) due to timeouts%s.", cal_failures,
-          ((double)cal_failures) * 100 / queued_paths,
-          skip_crashes ? " or crashes" : "");
-
-    if (cal_failures * 5 > queued_paths)
-      WARNF(cLRD "High percentage of rejected test cases, check settings!");
-
-  }
-
-  OKF("All test cases processed.");
-
-}
-
-
-/* Helper function: link() if possible, copy otherwise. */
-
-static void link_or_copy(u8* old_path, u8* new_path) {
-
-  s32 i = link(old_path, new_path);
-  s32 sfd, dfd;
-  u8* tmp;
-
-  if (!i) return;
-
-  sfd = open(old_path, O_RDONLY);
-  if (sfd < 0) PFATAL("Unable to open '%s'", old_path);
-
-  dfd = open(new_path, O_WRONLY | O_CREAT | O_EXCL, 0600);
-  if (dfd < 0) PFATAL("Unable to create '%s'", new_path);
-
-  tmp = ck_alloc(64 * 1024);
-
-  while ((i = read(sfd, tmp, 64 * 1024)) > 0) 
-    ck_write(dfd, tmp, i, new_path);
-
-  if (i < 0) PFATAL("read() failed");
-
-  ck_free(tmp);
-  close(sfd);
-  close(dfd);
-
-}
-
-
-static void nuke_resume_dir(void);
-
-/* Create hard links for input test cases in the output directory, choosing
-   good names and pivoting accordingly. */
-
-static void pivot_inputs(void) {
-
-  struct queue_entry* q = queue;
-  u32 id = 0;
-
-  ACTF("Creating hard links for all input files...");
-
-  while (q) {
-
-    u8  *nfn, *rsl = strrchr(q->fname, '/');
-    u32 orig_id;
-
-    if (!rsl) rsl = q->fname; else ++rsl;
-
-    /* If the original file name conforms to the syntax and the recorded
-       ID matches the one we'd assign, just use the original file name.
-       This is valuable for resuming fuzzing runs. */
-
-#ifndef SIMPLE_FILES
-#  define CASE_PREFIX "id:"
-#else
-#  define CASE_PREFIX "id_"
-#endif /* ^!SIMPLE_FILES */
-
-    if (!strncmp(rsl, CASE_PREFIX, 3) &&
-        sscanf(rsl + 3, "%06u", &orig_id) == 1 && orig_id == id) {
-
-      u8* src_str;
-      u32 src_id;
-
-      resuming_fuzz = 1;
-      nfn = alloc_printf("%s/queue/%s", out_dir, rsl);
-
-      /* Since we're at it, let's also try to find parent and figure out the
-         appropriate depth for this entry. */
-
-      src_str = strchr(rsl + 3, ':');
-
-      if (src_str && sscanf(src_str + 1, "%06u", &src_id) == 1) {
-
-        struct queue_entry* s = queue;
-        while (src_id-- && s) s = s->next;
-        if (s) q->depth = s->depth + 1;
-
-        if (max_depth < q->depth) max_depth = q->depth;
-
-      }
-
-    } else {
-
-      /* No dice - invent a new name, capturing the original one as a
-         substring. */
-
-#ifndef SIMPLE_FILES
-
-      u8* use_name = strstr(rsl, ",orig:");
-
-      if (use_name) use_name += 6; else use_name = rsl;
-      nfn = alloc_printf("%s/queue/id:%06u,orig:%s", out_dir, id, use_name);
-
-#else
-
-      nfn = alloc_printf("%s/queue/id_%06u", out_dir, id);
-
-#endif /* ^!SIMPLE_FILES */
-
-    }
-
-    /* Pivot to the new queue entry. */
-
-    link_or_copy(q->fname, nfn);
-    ck_free(q->fname);
-    q->fname = nfn;
-
-    /* Make sure that the passed_det value carries over, too. */
-
-    if (q->passed_det) mark_as_det_done(q);
-
-    q = q->next;
-    ++id;
-
-  }
-
-  if (in_place_resume) nuke_resume_dir();
-
-}
-
-
-#ifndef SIMPLE_FILES
-
-/* Construct a file name for a new test case, capturing the operation
-   that led to its discovery. Uses a static buffer. */
-
-static u8* describe_op(u8 hnb) {
-
-  static u8 ret[256];
-
-  if (syncing_party) {
-
-    sprintf(ret, "sync:%s,src:%06u", syncing_party, syncing_case);
-
-  } else {
-
-    sprintf(ret, "src:%06u", current_entry);
-
-    sprintf(ret + strlen(ret), ",time:%llu", get_cur_time() - start_time);
-
-    if (splicing_with >= 0)
-      sprintf(ret + strlen(ret), "+%06u", splicing_with);
-
-    sprintf(ret + strlen(ret), ",op:%s", stage_short);
-
-    if (stage_cur_byte >= 0) {
-
-      sprintf(ret + strlen(ret), ",pos:%u", stage_cur_byte);
-
-      if (stage_val_type != STAGE_VAL_NONE)
-        sprintf(ret + strlen(ret), ",val:%s%+d", 
-                (stage_val_type == STAGE_VAL_BE) ? "be:" : "",
-                stage_cur_val);
-
-    } else sprintf(ret + strlen(ret), ",rep:%u", stage_cur_val);
-
-  }
-
-  if (hnb == 2) strcat(ret, ",+cov");
-
-  return ret;
-
-}
-
-#endif /* !SIMPLE_FILES */
-
-
-/* Write a message accompanying the crash directory :-) */
-
-static void write_crash_readme(void) {
-
-  u8* fn = alloc_printf("%s/crashes/README.txt", out_dir);
-  s32 fd;
-  FILE* f;
-
-  fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600);
-  ck_free(fn);
-
-  /* Do not die on errors here - that would be impolite. */
-
-  if (fd < 0) return;
-
-  f = fdopen(fd, "w");
-
-  if (!f) {
-    close(fd);
-    return;
-  }
-
-  fprintf(f, "Command line used to find this crash:\n\n"
-
-             "%s\n\n"
-
-             "If you can't reproduce a bug outside of afl-fuzz, be sure to set the same\n"
-             "memory limit. The limit used for this fuzzing session was %s.\n\n"
-
-             "Need a tool to minimize test cases before investigating the crashes or sending\n"
-             "them to a vendor? Check out the afl-tmin that comes with the fuzzer!\n\n"
-
-             "Found any cool bugs in open-source tools using afl-fuzz? If yes, please drop\n"
-             "an mail at <afl-users@googlegroups.com> once the issues are fixed\n\n"
-
-             "  https://github.com/vanhauser-thc/AFLplusplus\n\n",
-
-             orig_cmdline, DMS(mem_limit << 20)); /* ignore errors */
-
-  fclose(f);
-
-}
-
-
-/* Check if the result of an execve() during routine fuzzing is interesting,
-   save or queue the input test case for further analysis if so. Returns 1 if
-   entry is saved, 0 otherwise. */
-
-static u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) {
-
-  if (len == 0) return 0;
-
-  u8  *fn = "";
-  u8  hnb;
-  s32 fd;
-  u8  keeping = 0, res;
-
-  /* Update path frequency. */
-  u32 cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
-
-  struct queue_entry* q = queue;
-  while (q) {
-    if (q->exec_cksum == cksum)
-      q->n_fuzz = q->n_fuzz + 1;
-
-    q = q->next;
-
-  }
-
-  if (fault == crash_mode) {
-
-    /* Keep only if there are new bits in the map, add to queue for
-       future fuzzing, etc. */
-
-    if (!(hnb = has_new_bits(virgin_bits))) {
-      if (crash_mode) ++total_crashes;
-      return 0;
-    }    
-
-#ifndef SIMPLE_FILES
-
-    fn = alloc_printf("%s/queue/id:%06u,%s", out_dir, queued_paths,
-                      describe_op(hnb));
-
-#else
-
-    fn = alloc_printf("%s/queue/id_%06u", out_dir, queued_paths);
-
-#endif /* ^!SIMPLE_FILES */
-
-    add_to_queue(fn, len, 0);
-
-    if (hnb == 2) {
-      queue_top->has_new_cov = 1;
-      ++queued_with_cov;
-    }
-
-    queue_top->exec_cksum = cksum;
-
-    /* Try to calibrate inline; this also calls update_bitmap_score() when
-       successful. */
-
-    res = calibrate_case(argv, queue_top, mem, queue_cycle - 1, 0);
-
-    if (res == FAULT_ERROR)
-      FATAL("Unable to execute target application");
-
-    fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600);
-    if (fd < 0) PFATAL("Unable to create '%s'", fn);
-    ck_write(fd, mem, len, fn);
-    close(fd);
-
-    keeping = 1;
-
-  }
-
-  switch (fault) {
-
-    case FAULT_TMOUT:
-
-      /* Timeouts are not very interesting, but we're still obliged to keep
-         a handful of samples. We use the presence of new bits in the
-         hang-specific bitmap as a signal of uniqueness. In "dumb" mode, we
-         just keep everything. */
-
-      ++total_tmouts;
-
-      if (unique_hangs >= KEEP_UNIQUE_HANG) return keeping;
-
-      if (!dumb_mode) {
-
-#ifdef __x86_64__
-        simplify_trace((u64*)trace_bits);
-#else
-        simplify_trace((u32*)trace_bits);
-#endif /* ^__x86_64__ */
-
-        if (!has_new_bits(virgin_tmout)) return keeping;
-
-      }
-
-      ++unique_tmouts;
-
-      /* Before saving, we make sure that it's a genuine hang by re-running
-         the target with a more generous timeout (unless the default timeout
-         is already generous). */
-
-      if (exec_tmout < hang_tmout) {
-
-        u8 new_fault;
-        write_to_testcase(mem, len);
-        new_fault = run_target(argv, hang_tmout);
-
-        /* A corner case that one user reported bumping into: increasing the
-           timeout actually uncovers a crash. Make sure we don't discard it if
-           so. */
-
-        if (!stop_soon && new_fault == FAULT_CRASH) goto keep_as_crash;
-
-        if (stop_soon || new_fault != FAULT_TMOUT) return keeping;
-
-      }
-
-#ifndef SIMPLE_FILES
-
-      fn = alloc_printf("%s/hangs/id:%06llu,%s", out_dir,
-                        unique_hangs, describe_op(0));
-
-#else
-
-      fn = alloc_printf("%s/hangs/id_%06llu", out_dir,
-                        unique_hangs);
-
-#endif /* ^!SIMPLE_FILES */
-
-      ++unique_hangs;
-
-      last_hang_time = get_cur_time();
-
-      break;
-
-    case FAULT_CRASH:
-
-keep_as_crash:
-
-      /* This is handled in a manner roughly similar to timeouts,
-         except for slightly different limits and no need to re-run test
-         cases. */
-
-      ++total_crashes;
-
-      if (unique_crashes >= KEEP_UNIQUE_CRASH) return keeping;
-
-      if (!dumb_mode) {
-
-#ifdef __x86_64__
-        simplify_trace((u64*)trace_bits);
-#else
-        simplify_trace((u32*)trace_bits);
-#endif /* ^__x86_64__ */
-
-        if (!has_new_bits(virgin_crash)) return keeping;
-
-      }
-
-      if (!unique_crashes) write_crash_readme();
-
-#ifndef SIMPLE_FILES
-
-      fn = alloc_printf("%s/crashes/id:%06llu,sig:%02u,%s", out_dir,
-                        unique_crashes, kill_signal, describe_op(0));
-
-#else
-
-      fn = alloc_printf("%s/crashes/id_%06llu_%02u", out_dir, unique_crashes,
-                        kill_signal);
-
-#endif /* ^!SIMPLE_FILES */
-
-      ++unique_crashes;
-
-      last_crash_time = get_cur_time();
-      last_crash_execs = total_execs;
-
-      break;
-
-    case FAULT_ERROR: FATAL("Unable to execute target application");
-
-    default: return keeping;
-
-  }
-
-  /* If we're here, we apparently want to save the crash or hang
-     test case, too. */
-
-  fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600);
-  if (fd < 0) PFATAL("Unable to create '%s'", fn);
-  ck_write(fd, mem, len, fn);
-  close(fd);
-
-  ck_free(fn);
-
-  return keeping;
-
-}
-
-
-/* When resuming, try to find the queue position to start from. This makes sense
-   only when resuming, and when we can find the original fuzzer_stats. */
-
-static u32 find_start_position(void) {
-
-  static u8 tmp[4096]; /* Ought to be enough for anybody. */
-
-  u8  *fn, *off;
-  s32 fd, i;
-  u32 ret;
-
-  if (!resuming_fuzz) return 0;
-
-  if (in_place_resume) fn = alloc_printf("%s/fuzzer_stats", out_dir);
-  else fn = alloc_printf("%s/../fuzzer_stats", in_dir);
-
-  fd = open(fn, O_RDONLY);
-  ck_free(fn);
-
-  if (fd < 0) return 0;
-
-  i = read(fd, tmp, sizeof(tmp) - 1); (void)i; /* Ignore errors */
-  close(fd);
-
-  off = strstr(tmp, "cur_path          : ");
-  if (!off) return 0;
-
-  ret = atoi(off + 20);
-  if (ret >= queued_paths) ret = 0;
-  return ret;
-
-}
-
-
-/* The same, but for timeouts. The idea is that when resuming sessions without
-   -t given, we don't want to keep auto-scaling the timeout over and over
-   again to prevent it from growing due to random flukes. */
-
-static void find_timeout(void) {
-
-  static u8 tmp[4096]; /* Ought to be enough for anybody. */
-
-  u8  *fn, *off;
-  s32 fd, i;
-  u32 ret;
-
-  if (!resuming_fuzz) return;
-
-  if (in_place_resume) fn = alloc_printf("%s/fuzzer_stats", out_dir);
-  else fn = alloc_printf("%s/../fuzzer_stats", in_dir);
-
-  fd = open(fn, O_RDONLY);
-  ck_free(fn);
-
-  if (fd < 0) return;
-
-  i = read(fd, tmp, sizeof(tmp) - 1); (void)i; /* Ignore errors */
-  close(fd);
-
-  off = strstr(tmp, "exec_timeout   : ");
-  if (!off) return;
-
-  ret = atoi(off + 17);
-  if (ret <= 4) return;
-
-  exec_tmout = ret;
-  timeout_given = 3;
-
-}
-
-
-/* Update stats file for unattended monitoring. */
-
-static void write_stats_file(double bitmap_cvg, double stability, double eps) {
-
-  static double last_bcvg, last_stab, last_eps;
-
-  u8* fn = alloc_printf("%s/fuzzer_stats", out_dir);
-  s32 fd;
-  FILE* f;
-
-  fd = open(fn, O_WRONLY | O_CREAT | O_TRUNC, 0600);
-
-  if (fd < 0) PFATAL("Unable to create '%s'", fn);
-
-  ck_free(fn);
-
-  f = fdopen(fd, "w");
-
-  if (!f) PFATAL("fdopen() failed");
-
-  /* Keep last values in case we're called from another context
-     where exec/sec stats and such are not readily available. */
-
-  if (!bitmap_cvg && !stability && !eps) {
-    bitmap_cvg = last_bcvg;
-    stability  = last_stab;
-    eps        = last_eps;
-  } else {
-    last_bcvg = bitmap_cvg;
-    last_stab = stability;
-    last_eps  = eps;
-  }
-
-  fprintf(f, "start_time        : %llu\n"
-             "last_update       : %llu\n"
-             "fuzzer_pid        : %u\n"
-             "cycles_done       : %llu\n"
-             "execs_done        : %llu\n"
-             "execs_per_sec     : %0.02f\n"
-             "paths_total       : %u\n"
-             "paths_favored     : %u\n"
-             "paths_found       : %u\n"
-             "paths_imported    : %u\n"
-             "max_depth         : %u\n"
-             "cur_path          : %u\n" /* Must match find_start_position() */
-             "pending_favs      : %u\n"
-             "pending_total     : %u\n"
-             "variable_paths    : %u\n"
-             "stability         : %0.02f%%\n"
-             "bitmap_cvg        : %0.02f%%\n"
-             "unique_crashes    : %llu\n"
-             "unique_hangs      : %llu\n"
-             "last_path         : %llu\n"
-             "last_crash        : %llu\n"
-             "last_hang         : %llu\n"
-             "execs_since_crash : %llu\n"
-             "exec_timeout      : %u\n"
-             "afl_banner        : %s\n"
-             "afl_version       : " VERSION "\n"
-             "target_mode       : %s%s%s%s%s%s%s%s\n"
-             "command_line      : %s\n",
-             start_time / 1000, get_cur_time() / 1000, getpid(),
-             queue_cycle ? (queue_cycle - 1) : 0, total_execs, eps,
-             queued_paths, queued_favored, queued_discovered, queued_imported,
-             max_depth, current_entry, pending_favored, pending_not_fuzzed,
-             queued_variable, stability, bitmap_cvg, unique_crashes,
-             unique_hangs, last_path_time / 1000, last_crash_time / 1000,
-             last_hang_time / 1000, total_execs - last_crash_execs,
-             exec_tmout, use_banner,
-             unicorn_mode ? "unicorn" : "", qemu_mode ? "qemu " : "", dumb_mode ? " dumb " : "",
-             no_forkserver ? "no_forksrv " : "", crash_mode ? "crash " : "",
-             persistent_mode ? "persistent " : "", deferred_mode ? "deferred " : "",
-             (unicorn_mode || qemu_mode || dumb_mode || no_forkserver || crash_mode ||
-              persistent_mode || deferred_mode) ? "" : "default",
-             orig_cmdline);
-             /* ignore errors */
-
-  fclose(f);
-
-}
-
-
-/* Update the plot file if there is a reason to. */
-
-static void maybe_update_plot_file(double bitmap_cvg, double eps) {
-
-  static u32 prev_qp, prev_pf, prev_pnf, prev_ce, prev_md;
-  static u64 prev_qc, prev_uc, prev_uh;
-
-  if (prev_qp == queued_paths && prev_pf == pending_favored && 
-      prev_pnf == pending_not_fuzzed && prev_ce == current_entry &&
-      prev_qc == queue_cycle && prev_uc == unique_crashes &&
-      prev_uh == unique_hangs && prev_md == max_depth) return;
-
-  prev_qp  = queued_paths;
-  prev_pf  = pending_favored;
-  prev_pnf = pending_not_fuzzed;
-  prev_ce  = current_entry;
-  prev_qc  = queue_cycle;
-  prev_uc  = unique_crashes;
-  prev_uh  = unique_hangs;
-  prev_md  = max_depth;
-
-  /* Fields in the file:
-
-     unix_time, cycles_done, cur_path, paths_total, paths_not_fuzzed,
-     favored_not_fuzzed, unique_crashes, unique_hangs, max_depth,
-     execs_per_sec */
-
-  fprintf(plot_file, 
-          "%llu, %llu, %u, %u, %u, %u, %0.02f%%, %llu, %llu, %u, %0.02f\n",
-          get_cur_time() / 1000, queue_cycle - 1, current_entry, queued_paths,
-          pending_not_fuzzed, pending_favored, bitmap_cvg, unique_crashes,
-          unique_hangs, max_depth, eps); /* ignore errors */
-
-  fflush(plot_file);
-
-}
-
-
-
-/* A helper function for maybe_delete_out_dir(), deleting all prefixed
-   files in a directory. */
-
-static u8 delete_files(u8* path, u8* prefix) {
-
-  DIR* d;
-  struct dirent* d_ent;
-
-  d = opendir(path);
-
-  if (!d) return 0;
-
-  while ((d_ent = readdir(d))) {
-
-    if (d_ent->d_name[0] != '.' && (!prefix ||
-        !strncmp(d_ent->d_name, prefix, strlen(prefix)))) {
-
-      u8* fname = alloc_printf("%s/%s", path, d_ent->d_name);
-      if (unlink(fname)) PFATAL("Unable to delete '%s'", fname);
-      ck_free(fname);
-
-    }
-
-  }
-
-  closedir(d);
-
-  return !!rmdir(path);
-
-}
-
-
-/* Get the number of runnable processes, with some simple smoothing. */
-
-static double get_runnable_processes(void) {
-
-  static double res;
-
-#if defined(__APPLE__) || defined(__FreeBSD__) || defined (__OpenBSD__)
-
-  /* I don't see any portable sysctl or so that would quickly give us the
-     number of runnable processes; the 1-minute load average can be a
-     semi-decent approximation, though. */
-
-  if (getloadavg(&res, 1) != 1) return 0;
-
-#else
-
-  /* On Linux, /proc/stat is probably the best way; load averages are
-     computed in funny ways and sometimes don't reflect extremely short-lived
-     processes well. */
-
-  FILE* f = fopen("/proc/stat", "r");
-  u8 tmp[1024];
-  u32 val = 0;
-
-  if (!f) return 0;
-
-  while (fgets(tmp, sizeof(tmp), f)) {
-
-    if (!strncmp(tmp, "procs_running ", 14) ||
-        !strncmp(tmp, "procs_blocked ", 14)) val += atoi(tmp + 14);
-
-  }
- 
-  fclose(f);
-
-  if (!res) {
-
-    res = val;
-
-  } else {
-
-    res = res * (1.0 - 1.0 / AVG_SMOOTHING) +
-          ((double)val) * (1.0 / AVG_SMOOTHING);
-
-  }
-
-#endif /* ^(__APPLE__ || __FreeBSD__ || __OpenBSD__) */
-
-  return res;
-
-}
-
-
-/* Delete the temporary directory used for in-place session resume. */
-
-static void nuke_resume_dir(void) {
-
-  u8* fn;
-
-  fn = alloc_printf("%s/_resume/.state/deterministic_done", out_dir);
-  if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed;
-  ck_free(fn);
-
-  fn = alloc_printf("%s/_resume/.state/auto_extras", out_dir);
-  if (delete_files(fn, "auto_")) goto dir_cleanup_failed;
-  ck_free(fn);
-
-  fn = alloc_printf("%s/_resume/.state/redundant_edges", out_dir);
-  if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed;
-  ck_free(fn);
-
-  fn = alloc_printf("%s/_resume/.state/variable_behavior", out_dir);
-  if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed;
-  ck_free(fn);
-
-  fn = alloc_printf("%s/_resume/.state", out_dir);
-  if (rmdir(fn) && errno != ENOENT) goto dir_cleanup_failed;
-  ck_free(fn);
-
-  fn = alloc_printf("%s/_resume", out_dir);
-  if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed;
-  ck_free(fn);
-
-  return;
-
-dir_cleanup_failed:
-
-  FATAL("_resume directory cleanup failed");
-
-}
-
-
-/* Delete fuzzer output directory if we recognize it as ours, if the fuzzer
-   is not currently running, and if the last run time isn't too great. */
-
-static void maybe_delete_out_dir(void) {
-
-  FILE* f;
-  u8 *fn = alloc_printf("%s/fuzzer_stats", out_dir);
-
-  /* See if the output directory is locked. If yes, bail out. If not,
-     create a lock that will persist for the lifetime of the process
-     (this requires leaving the descriptor open).*/
-
-  out_dir_fd = open(out_dir, O_RDONLY);
-  if (out_dir_fd < 0) PFATAL("Unable to open '%s'", out_dir);
-
-#ifndef __sun
-
-  if (flock(out_dir_fd, LOCK_EX | LOCK_NB) && errno == EWOULDBLOCK) {
-
-    SAYF("\n" cLRD "[-] " cRST
-         "Looks like the job output directory is being actively used by another\n"
-         "    instance of afl-fuzz. You will need to choose a different %s\n"
-         "    or stop the other process first.\n",
-         sync_id ? "fuzzer ID" : "output location");
-
-    FATAL("Directory '%s' is in use", out_dir);
-
-  }
-
-#endif /* !__sun */
-
-  f = fopen(fn, "r");
-
-  if (f) {
-
-    u64 start_time, last_update;
-
-    if (fscanf(f, "start_time     : %llu\n"
-                  "last_update    : %llu\n", &start_time, &last_update) != 2)
-      FATAL("Malformed data in '%s'", fn);
-
-    fclose(f);
-
-    /* Let's see how much work is at stake. */
-
-    if (!in_place_resume && last_update - start_time > OUTPUT_GRACE * 60) {
-
-      SAYF("\n" cLRD "[-] " cRST
-           "The job output directory already exists and contains the results of more\n"
-           "    than %u minutes worth of fuzzing. To avoid data loss, afl-fuzz will *NOT*\n"
-           "    automatically delete this data for you.\n\n"
-
-           "    If you wish to start a new session, remove or rename the directory manually,\n"
-           "    or specify a different output location for this job. To resume the old\n"
-           "    session, put '-' as the input directory in the command line ('-i -') and\n"
-           "    try again.\n", OUTPUT_GRACE);
-
-       FATAL("At-risk data found in '%s'", out_dir);
-
-    }
-
-  }
-
-  ck_free(fn);
-
-  /* The idea for in-place resume is pretty simple: we temporarily move the old
-     queue/ to a new location that gets deleted once import to the new queue/
-     is finished. If _resume/ already exists, the current queue/ may be
-     incomplete due to an earlier abort, so we want to use the old _resume/
-     dir instead, and we let rename() fail silently. */
-
-  if (in_place_resume) {
-
-    u8* orig_q = alloc_printf("%s/queue", out_dir);
-
-    in_dir = alloc_printf("%s/_resume", out_dir);
-
-    rename(orig_q, in_dir); /* Ignore errors */
-
-    OKF("Output directory exists, will attempt session resume.");
-
-    ck_free(orig_q);
-
-  } else {
-
-    OKF("Output directory exists but deemed OK to reuse.");
-
-  }
-
-  ACTF("Deleting old session data...");
-
-  /* Okay, let's get the ball rolling! First, we need to get rid of the entries
-     in <out_dir>/.synced/.../id:*, if any are present. */
-
-  if (!in_place_resume) {
-
-    fn = alloc_printf("%s/.synced", out_dir);
-    if (delete_files(fn, NULL)) goto dir_cleanup_failed;
-    ck_free(fn);
-
-  }
-
-  /* Next, we need to clean up <out_dir>/queue/.state/ subdirectories: */
-
-  fn = alloc_printf("%s/queue/.state/deterministic_done", out_dir);
-  if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed;
-  ck_free(fn);
-
-  fn = alloc_printf("%s/queue/.state/auto_extras", out_dir);
-  if (delete_files(fn, "auto_")) goto dir_cleanup_failed;
-  ck_free(fn);
-
-  fn = alloc_printf("%s/queue/.state/redundant_edges", out_dir);
-  if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed;
-  ck_free(fn);
-
-  fn = alloc_printf("%s/queue/.state/variable_behavior", out_dir);
-  if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed;
-  ck_free(fn);
-
-  /* Then, get rid of the .state subdirectory itself (should be empty by now)
-     and everything matching <out_dir>/queue/id:*. */
-
-  fn = alloc_printf("%s/queue/.state", out_dir);
-  if (rmdir(fn) && errno != ENOENT) goto dir_cleanup_failed;
-  ck_free(fn);
-
-  fn = alloc_printf("%s/queue", out_dir);
-  if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed;
-  ck_free(fn);
-
-  /* All right, let's do <out_dir>/crashes/id:* and <out_dir>/hangs/id:*. */
-
-  if (!in_place_resume) {
-
-    fn = alloc_printf("%s/crashes/README.txt", out_dir);
-    unlink(fn); /* Ignore errors */
-    ck_free(fn);
-
-  }
-
-  fn = alloc_printf("%s/crashes", out_dir);
-
-  /* Make backup of the crashes directory if it's not empty and if we're
-     doing in-place resume. */
-
-  if (in_place_resume && rmdir(fn)) {
-
-    time_t cur_t = time(0);
-    struct tm* t = localtime(&cur_t);
-
-#ifndef SIMPLE_FILES
-
-    u8* nfn = alloc_printf("%s.%04u-%02u-%02u-%02u:%02u:%02u", fn,
-                           t->tm_year + 1900, t->tm_mon + 1, t->tm_mday,
-                           t->tm_hour, t->tm_min, t->tm_sec);
-
-#else
-
-    u8* nfn = alloc_printf("%s_%04u%02u%02u%02u%02u%02u", fn,
-                           t->tm_year + 1900, t->tm_mon + 1, t->tm_mday,
-                           t->tm_hour, t->tm_min, t->tm_sec);
-
-#endif /* ^!SIMPLE_FILES */
-
-    rename(fn, nfn); /* Ignore errors. */
-    ck_free(nfn);
-
-  }
-
-  if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed;
-  ck_free(fn);
-
-  fn = alloc_printf("%s/hangs", out_dir);
-
-  /* Backup hangs, too. */
-
-  if (in_place_resume && rmdir(fn)) {
-
-    time_t cur_t = time(0);
-    struct tm* t = localtime(&cur_t);
-
-#ifndef SIMPLE_FILES
-
-    u8* nfn = alloc_printf("%s.%04u-%02u-%02u-%02u:%02u:%02u", fn,
-                           t->tm_year + 1900, t->tm_mon + 1, t->tm_mday,
-                           t->tm_hour, t->tm_min, t->tm_sec);
-
-#else
-
-    u8* nfn = alloc_printf("%s_%04u%02u%02u%02u%02u%02u", fn,
-                           t->tm_year + 1900, t->tm_mon + 1, t->tm_mday,
-                           t->tm_hour, t->tm_min, t->tm_sec);
-
-#endif /* ^!SIMPLE_FILES */
-
-    rename(fn, nfn); /* Ignore errors. */
-    ck_free(nfn);
-
-  }
-
-  if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed;
-  ck_free(fn);
-
-  /* And now, for some finishing touches. */
-
-  if (file_extension) {
-    fn = alloc_printf("%s/.cur_input.%s", out_dir, file_extension);
-  } else {
-    fn = alloc_printf("%s/.cur_input", out_dir);
-  }
-
-  if (unlink(fn) && errno != ENOENT) goto dir_cleanup_failed;
-  ck_free(fn);
-
-  fn = alloc_printf("%s/fuzz_bitmap", out_dir);
-  if (unlink(fn) && errno != ENOENT) goto dir_cleanup_failed;
-  ck_free(fn);
-
-  if (!in_place_resume) {
-    fn  = alloc_printf("%s/fuzzer_stats", out_dir);
-    if (unlink(fn) && errno != ENOENT) goto dir_cleanup_failed;
-    ck_free(fn);
-  }
-
-  fn = alloc_printf("%s/plot_data", out_dir);
-  if (unlink(fn) && errno != ENOENT) goto dir_cleanup_failed;
-  ck_free(fn);
-
-  fn = alloc_printf("%s/cmdline", out_dir);
-  if (unlink(fn) && errno != ENOENT) goto dir_cleanup_failed;
-  ck_free(fn);
-
-  OKF("Output dir cleanup successful.");
-
-  /* Wow... is that all? If yes, celebrate! */
-
-  return;
-
-dir_cleanup_failed:
-
-  SAYF("\n" cLRD "[-] " cRST
-       "Whoops, the fuzzer tried to reuse your output directory, but bumped into\n"
-       "    some files that shouldn't be there or that couldn't be removed - so it\n"
-       "    decided to abort! This happened while processing this path:\n\n"
-
-       "    %s\n\n"
-       "    Please examine and manually delete the files, or specify a different\n"
-       "    output location for the tool.\n", fn);
-
-  FATAL("Output directory cleanup failed");
-
-}
-
-
-static void check_term_size(void);
-
-
-/* A spiffy retro stats screen! This is called every stats_update_freq
-   execve() calls, plus in several other circumstances. */
-
-static void show_stats(void) {
-
-  static u64 last_stats_ms, last_plot_ms, last_ms, last_execs;
-  static double avg_exec;
-  double t_byte_ratio, stab_ratio;
-
-  u64 cur_ms;
-  u32 t_bytes, t_bits;
-
-  u32 banner_len, banner_pad;
-  u8  tmp[256];
-
-  cur_ms = get_cur_time();
-
-  /* If not enough time has passed since last UI update, bail out. */
-
-  if (cur_ms - last_ms < 1000 / UI_TARGET_HZ) return;
-
-  /* Check if we're past the 10 minute mark. */
-
-  if (cur_ms - start_time > 10 * 60 * 1000) run_over10m = 1;
-
-  /* Calculate smoothed exec speed stats. */
-
-  if (!last_execs) {
-
-    avg_exec = ((double)total_execs) * 1000 / (cur_ms - start_time);
-
-  } else {
-
-    double cur_avg = ((double)(total_execs - last_execs)) * 1000 /
-                     (cur_ms - last_ms);
-
-    /* If there is a dramatic (5x+) jump in speed, reset the indicator
-       more quickly. */
-
-    if (cur_avg * 5 < avg_exec || cur_avg / 5 > avg_exec)
-      avg_exec = cur_avg;
-
-    avg_exec = avg_exec * (1.0 - 1.0 / AVG_SMOOTHING) +
-               cur_avg * (1.0 / AVG_SMOOTHING);
-
-  }
-
-  last_ms = cur_ms;
-  last_execs = total_execs;
-
-  /* Tell the callers when to contact us (as measured in execs). */
-
-  stats_update_freq = avg_exec / (UI_TARGET_HZ * 10);
-  if (!stats_update_freq) stats_update_freq = 1;
-
-  /* Do some bitmap stats. */
-
-  t_bytes = count_non_255_bytes(virgin_bits);
-  t_byte_ratio = ((double)t_bytes * 100) / MAP_SIZE;
-
-  if (t_bytes)
-    stab_ratio = 100 - ((double)var_byte_count) * 100 / t_bytes;
-  else
-    stab_ratio = 100;
-
-  /* Roughly every minute, update fuzzer stats and save auto tokens. */
-
-  if (cur_ms - last_stats_ms > STATS_UPDATE_SEC * 1000) {
-
-    last_stats_ms = cur_ms;
-    write_stats_file(t_byte_ratio, stab_ratio, avg_exec);
-    save_auto();
-    write_bitmap();
-
-  }
-
-  /* Every now and then, write plot data. */
-
-  if (cur_ms - last_plot_ms > PLOT_UPDATE_SEC * 1000) {
-
-    last_plot_ms = cur_ms;
-    maybe_update_plot_file(t_byte_ratio, avg_exec);
-
-  }
-
-  /* Honor AFL_EXIT_WHEN_DONE and AFL_BENCH_UNTIL_CRASH. */
-
-  if (!dumb_mode && cycles_wo_finds > 100 && !pending_not_fuzzed &&
-      getenv("AFL_EXIT_WHEN_DONE")) stop_soon = 2;
-
-  if (total_crashes && getenv("AFL_BENCH_UNTIL_CRASH")) stop_soon = 2;
-
-  /* If we're not on TTY, bail out. */
-
-  if (not_on_tty) return;
-
-  /* Compute some mildly useful bitmap stats. */
-
-  t_bits = (MAP_SIZE << 3) - count_bits(virgin_bits);
-
-  /* Now, for the visuals... */
-
-  if (clear_screen) {
-
-    SAYF(TERM_CLEAR CURSOR_HIDE);
-    clear_screen = 0;
-
-    check_term_size();
-
-  }
-
-  SAYF(TERM_HOME);
-
-  if (term_too_small) {
-
-    SAYF(cBRI "Your terminal is too small to display the UI.\n"
-         "Please resize terminal window to at least 79x24.\n" cRST);
-
-    return;
-
-  }
-
-  /* Let's start by drawing a centered banner. */
-
-  banner_len = (crash_mode ? 24 : 22) + strlen(VERSION) + strlen(use_banner) + strlen(power_name) + 3 + 5;
-  banner_pad = (79 - banner_len) / 2;
-  memset(tmp, ' ', banner_pad);
-
-#ifdef HAVE_AFFINITY
-  sprintf(tmp + banner_pad, "%s " cLCY VERSION cLGN
-          " (%s) " cPIN "[%s]" cBLU " {%d}",  crash_mode ? cPIN "peruvian were-rabbit" :
-          cYEL "american fuzzy lop", use_banner, power_name, cpu_aff);
-#else
-  sprintf(tmp + banner_pad, "%s " cLCY VERSION cLGN
-          " (%s) " cPIN "[%s]",  crash_mode ? cPIN "peruvian were-rabbit" :
-          cYEL "american fuzzy lop", use_banner, power_name);
-#endif /* HAVE_AFFINITY */
-
-  SAYF("\n%s\n", tmp);
-
-  /* "Handy" shortcuts for drawing boxes... */
-
-#define bSTG    bSTART cGRA
-#define bH2     bH bH
-#define bH5     bH2 bH2 bH
-#define bH10    bH5 bH5
-#define bH20    bH10 bH10
-#define bH30    bH20 bH10
-#define SP5     "     "
-#define SP10    SP5 SP5
-#define SP20    SP10 SP10
-
-  /* Lord, forgive me this. */
-
-  SAYF(SET_G1 bSTG bLT bH bSTOP cCYA " process timing " bSTG bH30 bH5 bH bHB
-       bH bSTOP cCYA " overall results " bSTG bH2 bH2 bRT "\n");
-
-  if (dumb_mode) {
-
-    strcpy(tmp, cRST);
-
-  } else {
-
-    u64 min_wo_finds = (cur_ms - last_path_time) / 1000 / 60;
-
-    /* First queue cycle: don't stop now! */
-    if (queue_cycle == 1 || min_wo_finds < 15) strcpy(tmp, cMGN); else
-
-    /* Subsequent cycles, but we're still making finds. */
-    if (cycles_wo_finds < 25 || min_wo_finds < 30) strcpy(tmp, cYEL); else
-
-    /* No finds for a long time and no test cases to try. */
-    if (cycles_wo_finds > 100 && !pending_not_fuzzed && min_wo_finds > 120)
-      strcpy(tmp, cLGN);
-
-    /* Default: cautiously OK to stop? */
-    else strcpy(tmp, cLBL);
-
-  }
-
-  SAYF(bV bSTOP "        run time : " cRST "%-33s " bSTG bV bSTOP
-       "  cycles done : %s%-5s " bSTG bV "\n",
-       DTD(cur_ms, start_time), tmp, DI(queue_cycle - 1));
-
-  /* We want to warn people about not seeing new paths after a full cycle,
-     except when resuming fuzzing or running in non-instrumented mode. */
-
-  if (!dumb_mode && (last_path_time || resuming_fuzz || queue_cycle == 1 ||
-      in_bitmap || crash_mode)) {
-
-    SAYF(bV bSTOP "   last new path : " cRST "%-33s ",
-         DTD(cur_ms, last_path_time));
-
-  } else {
-
-    if (dumb_mode)
-
-      SAYF(bV bSTOP "   last new path : " cPIN "n/a" cRST
-           " (non-instrumented mode)       ");
-
-     else
-
-      SAYF(bV bSTOP "   last new path : " cRST "none yet " cLRD
-           "(odd, check syntax!)     ");
-
-  }
-
-  SAYF(bSTG bV bSTOP "  total paths : " cRST "%-5s " bSTG bV "\n",
-       DI(queued_paths));
-
-  /* Highlight crashes in red if found, denote going over the KEEP_UNIQUE_CRASH
-     limit with a '+' appended to the count. */
-
-  sprintf(tmp, "%s%s", DI(unique_crashes),
-          (unique_crashes >= KEEP_UNIQUE_CRASH) ? "+" : "");
-
-  SAYF(bV bSTOP " last uniq crash : " cRST "%-33s " bSTG bV bSTOP
-       " uniq crashes : %s%-6s" bSTG bV "\n",
-       DTD(cur_ms, last_crash_time), unique_crashes ? cLRD : cRST,
-       tmp);
-
-  sprintf(tmp, "%s%s", DI(unique_hangs),
-         (unique_hangs >= KEEP_UNIQUE_HANG) ? "+" : "");
-
-  SAYF(bV bSTOP "  last uniq hang : " cRST "%-33s " bSTG bV bSTOP
-       "   uniq hangs : " cRST "%-6s" bSTG bV "\n",
-       DTD(cur_ms, last_hang_time), tmp);
-
-  SAYF(bVR bH bSTOP cCYA " cycle progress " bSTG bH10 bH5 bH2 bH2 bHB bH bSTOP cCYA
-       " map coverage " bSTG bH bHT bH20 bH2 bVL "\n");
-
-  /* This gets funny because we want to print several variable-length variables
-     together, but then cram them into a fixed-width field - so we need to
-     put them in a temporary buffer first. */
-
-  sprintf(tmp, "%s%s%d (%0.02f%%)", DI(current_entry),
-          queue_cur->favored ? "." : "*", queue_cur->fuzz_level,
-          ((double)current_entry * 100) / queued_paths);
-
-  SAYF(bV bSTOP "  now processing : " cRST "%-16s " bSTG bV bSTOP, tmp);
-
-  sprintf(tmp, "%0.02f%% / %0.02f%%", ((double)queue_cur->bitmap_size) *
-          100 / MAP_SIZE, t_byte_ratio);
-
-  SAYF("    map density : %s%-21s" bSTG bV "\n", t_byte_ratio > 70 ? cLRD :
-       ((t_bytes < 200 && !dumb_mode) ? cPIN : cRST), tmp);
-
-  sprintf(tmp, "%s (%0.02f%%)", DI(cur_skipped_paths),
-          ((double)cur_skipped_paths * 100) / queued_paths);
-
-  SAYF(bV bSTOP " paths timed out : " cRST "%-16s " bSTG bV, tmp);
-
-  sprintf(tmp, "%0.02f bits/tuple",
-          t_bytes ? (((double)t_bits) / t_bytes) : 0);
-
-  SAYF(bSTOP " count coverage : " cRST "%-21s" bSTG bV "\n", tmp);
-
-  SAYF(bVR bH bSTOP cCYA " stage progress " bSTG bH10 bH5 bH2 bH2 bX bH bSTOP cCYA
-       " findings in depth " bSTG bH10 bH5 bH2 bH2 bVL "\n");
-
-  sprintf(tmp, "%s (%0.02f%%)", DI(queued_favored),
-          ((double)queued_favored) * 100 / queued_paths);
-
-  /* Yeah... it's still going on... halp? */
-
-  SAYF(bV bSTOP "  now trying : " cRST "%-20s " bSTG bV bSTOP
-       " favored paths : " cRST "%-22s" bSTG bV "\n", stage_name, tmp);
-
-  if (!stage_max) {
-
-    sprintf(tmp, "%s/-", DI(stage_cur));
-
-  } else {
-
-    sprintf(tmp, "%s/%s (%0.02f%%)", DI(stage_cur), DI(stage_max),
-            ((double)stage_cur) * 100 / stage_max);
-
-  }
-
-  SAYF(bV bSTOP " stage execs : " cRST "%-20s " bSTG bV bSTOP, tmp);
-
-  sprintf(tmp, "%s (%0.02f%%)", DI(queued_with_cov),
-          ((double)queued_with_cov) * 100 / queued_paths);
-
-  SAYF("  new edges on : " cRST "%-22s" bSTG bV "\n", tmp);
-
-  sprintf(tmp, "%s (%s%s unique)", DI(total_crashes), DI(unique_crashes),
-          (unique_crashes >= KEEP_UNIQUE_CRASH) ? "+" : "");
-
-  if (crash_mode) {
-
-    SAYF(bV bSTOP " total execs : " cRST "%-20s " bSTG bV bSTOP
-         "   new crashes : %s%-22s" bSTG bV "\n", DI(total_execs),
-         unique_crashes ? cLRD : cRST, tmp);
-
-  } else {
-
-    SAYF(bV bSTOP " total execs : " cRST "%-20s " bSTG bV bSTOP
-         " total crashes : %s%-22s" bSTG bV "\n", DI(total_execs),
-         unique_crashes ? cLRD : cRST, tmp);
-
-  }
-
-  /* Show a warning about slow execution. */
-
-  if (avg_exec < 100) {
-
-    sprintf(tmp, "%s/sec (%s)", DF(avg_exec), avg_exec < 20 ?
-            "zzzz..." : "slow!");
-
-    SAYF(bV bSTOP "  exec speed : " cLRD "%-20s ", tmp);
-
-  } else {
-
-    sprintf(tmp, "%s/sec", DF(avg_exec));
-    SAYF(bV bSTOP "  exec speed : " cRST "%-20s ", tmp);
-
-  }
-
-  sprintf(tmp, "%s (%s%s unique)", DI(total_tmouts), DI(unique_tmouts),
-          (unique_hangs >= KEEP_UNIQUE_HANG) ? "+" : "");
-
-  SAYF (bSTG bV bSTOP "  total tmouts : " cRST "%-22s" bSTG bV "\n", tmp);
-
-  /* Aaaalmost there... hold on! */
-
-  SAYF(bVR bH cCYA bSTOP " fuzzing strategy yields " bSTG bH10 bHT bH10
-       bH5 bHB bH bSTOP cCYA " path geometry " bSTG bH5 bH2 bVL "\n");
-
-  if (skip_deterministic) {
-
-    strcpy(tmp, "n/a, n/a, n/a");
-
-  } else {
-
-    sprintf(tmp, "%s/%s, %s/%s, %s/%s",
-            DI(stage_finds[STAGE_FLIP1]), DI(stage_cycles[STAGE_FLIP1]),
-            DI(stage_finds[STAGE_FLIP2]), DI(stage_cycles[STAGE_FLIP2]),
-            DI(stage_finds[STAGE_FLIP4]), DI(stage_cycles[STAGE_FLIP4]));
-
-  }
-
-  SAYF(bV bSTOP "   bit flips : " cRST "%-36s " bSTG bV bSTOP "    levels : "
-       cRST "%-10s" bSTG bV "\n", tmp, DI(max_depth));
-
-  if (!skip_deterministic)
-    sprintf(tmp, "%s/%s, %s/%s, %s/%s",
-            DI(stage_finds[STAGE_FLIP8]), DI(stage_cycles[STAGE_FLIP8]),
-            DI(stage_finds[STAGE_FLIP16]), DI(stage_cycles[STAGE_FLIP16]),
-            DI(stage_finds[STAGE_FLIP32]), DI(stage_cycles[STAGE_FLIP32]));
-
-  SAYF(bV bSTOP "  byte flips : " cRST "%-36s " bSTG bV bSTOP "   pending : "
-       cRST "%-10s" bSTG bV "\n", tmp, DI(pending_not_fuzzed));
-
-  if (!skip_deterministic)
-    sprintf(tmp, "%s/%s, %s/%s, %s/%s",
-            DI(stage_finds[STAGE_ARITH8]), DI(stage_cycles[STAGE_ARITH8]),
-            DI(stage_finds[STAGE_ARITH16]), DI(stage_cycles[STAGE_ARITH16]),
-            DI(stage_finds[STAGE_ARITH32]), DI(stage_cycles[STAGE_ARITH32]));
-
-  SAYF(bV bSTOP " arithmetics : " cRST "%-36s " bSTG bV bSTOP "  pend fav : "
-       cRST "%-10s" bSTG bV "\n", tmp, DI(pending_favored));
-
-  if (!skip_deterministic)
-    sprintf(tmp, "%s/%s, %s/%s, %s/%s",
-            DI(stage_finds[STAGE_INTEREST8]), DI(stage_cycles[STAGE_INTEREST8]),
-            DI(stage_finds[STAGE_INTEREST16]), DI(stage_cycles[STAGE_INTEREST16]),
-            DI(stage_finds[STAGE_INTEREST32]), DI(stage_cycles[STAGE_INTEREST32]));
-
-  SAYF(bV bSTOP "  known ints : " cRST "%-36s " bSTG bV bSTOP " own finds : "
-       cRST "%-10s" bSTG bV "\n", tmp, DI(queued_discovered));
-
-  if (!skip_deterministic)
-    sprintf(tmp, "%s/%s, %s/%s, %s/%s",
-            DI(stage_finds[STAGE_EXTRAS_UO]), DI(stage_cycles[STAGE_EXTRAS_UO]),
-            DI(stage_finds[STAGE_EXTRAS_UI]), DI(stage_cycles[STAGE_EXTRAS_UI]),
-            DI(stage_finds[STAGE_EXTRAS_AO]), DI(stage_cycles[STAGE_EXTRAS_AO]));
-
-  SAYF(bV bSTOP "  dictionary : " cRST "%-36s " bSTG bV bSTOP
-       "  imported : " cRST "%-10s" bSTG bV "\n", tmp,
-       sync_id ? DI(queued_imported) : (u8*)"n/a");
-
-  sprintf(tmp, "%s/%s, %s/%s, %s/%s",
-          DI(stage_finds[STAGE_HAVOC]), DI(stage_cycles[STAGE_HAVOC]),
-          DI(stage_finds[STAGE_SPLICE]), DI(stage_cycles[STAGE_SPLICE]),
-          DI(stage_finds[STAGE_PYTHON]), DI(stage_cycles[STAGE_PYTHON]));
-
-  SAYF(bV bSTOP "       havoc : " cRST "%-36s " bSTG bV bSTOP, tmp);
-
-  if (t_bytes) sprintf(tmp, "%0.02f%%", stab_ratio);
-    else strcpy(tmp, "n/a");
-
-  SAYF(" stability : %s%-10s" bSTG bV "\n", (stab_ratio < 85 && var_byte_count > 40)
-       ? cLRD : ((queued_variable && (!persistent_mode || var_byte_count > 20))
-       ? cMGN : cRST), tmp);
-
-  if (!bytes_trim_out) {
-
-    sprintf(tmp, "n/a, ");
-
-  } else {
-
-    sprintf(tmp, "%0.02f%%/%s, ",
-            ((double)(bytes_trim_in - bytes_trim_out)) * 100 / bytes_trim_in,
-            DI(trim_execs));
-
-  }
-
-  if (!blocks_eff_total) {
-
-    u8 tmp2[128];
-
-    sprintf(tmp2, "n/a");
-    strcat(tmp, tmp2);
-
-  } else {
-
-    u8 tmp2[128];
-
-    sprintf(tmp2, "%0.02f%%",
-            ((double)(blocks_eff_total - blocks_eff_select)) * 100 /
-            blocks_eff_total);
-
-    strcat(tmp, tmp2);
-
-  }
-  if (custom_mutator) {
-    sprintf(tmp, "%s/%s", DI(stage_finds[STAGE_CUSTOM_MUTATOR]), DI(stage_cycles[STAGE_CUSTOM_MUTATOR]));
-    SAYF(bV bSTOP " custom mut. : " cRST "%-36s " bSTG bVR bH20 bH2 bH bRB "\n"
-             bLB bH30 bH20 bH2 bH bRB bSTOP cRST RESET_G1, tmp);
-  } else {
-    SAYF(bV bSTOP "        trim : " cRST "%-36s " bSTG bVR bH20 bH2 bH bRB "\n"
-       bLB bH30 bH20 bH2 bRB bSTOP cRST RESET_G1, tmp);
-  }
-
-  /* Provide some CPU utilization stats. */
-
-  if (cpu_core_count) {
-
-    double cur_runnable = get_runnable_processes();
-    u32 cur_utilization = cur_runnable * 100 / cpu_core_count;
-
-    u8* cpu_color = cCYA;
-
-    /* If we could still run one or more processes, use green. */
-
-    if (cpu_core_count > 1 && cur_runnable + 1 <= cpu_core_count)
-      cpu_color = cLGN;
-
-    /* If we're clearly oversubscribed, use red. */
-
-    if (!no_cpu_meter_red && cur_utilization >= 150) cpu_color = cLRD;
-
-#ifdef HAVE_AFFINITY
-
-    if (cpu_aff >= 0) {
-
-      SAYF(SP10 cGRA "[cpu%03u:%s%3u%%" cGRA "]\r" cRST, 
-           MIN(cpu_aff, 999), cpu_color,
-           MIN(cur_utilization, 999));
-
-    } else {
-
-      SAYF(SP10 cGRA "   [cpu:%s%3u%%" cGRA "]\r" cRST,
-           cpu_color, MIN(cur_utilization, 999));
- 
-   }
-
-#else
-
-    SAYF(SP10 cGRA "   [cpu:%s%3u%%" cGRA "]\r" cRST,
-         cpu_color, MIN(cur_utilization, 999));
-
-#endif /* ^HAVE_AFFINITY */
-
-  } else SAYF("\r");
-
-  /* Hallelujah! */
-
-  fflush(0);
-
-}
-
-
-/* Display quick statistics at the end of processing the input directory,
-   plus a bunch of warnings. Some calibration stuff also ended up here,
-   along with several hardcoded constants. Maybe clean up eventually. */
-
-static void show_init_stats(void) {
-
-  struct queue_entry* q = queue;
-  u32 min_bits = 0, max_bits = 0;
-  u64 min_us = 0, max_us = 0;
-  u64 avg_us = 0;
-  u32 max_len = 0;
-
-  if (total_cal_cycles) avg_us = total_cal_us / total_cal_cycles;
-
-  while (q) {
-
-    if (!min_us || q->exec_us < min_us) min_us = q->exec_us;
-    if (q->exec_us > max_us) max_us = q->exec_us;
-
-    if (!min_bits || q->bitmap_size < min_bits) min_bits = q->bitmap_size;
-    if (q->bitmap_size > max_bits) max_bits = q->bitmap_size;
-
-    if (q->len > max_len) max_len = q->len;
-
-    q = q->next;
-
-  }
-
-  SAYF("\n");
-
-  if (avg_us > ((qemu_mode || unicorn_mode) ? 50000 : 10000))
-    WARNF(cLRD "The target binary is pretty slow! See %s/perf_tips.txt.",
-          doc_path);
-
-  /* Let's keep things moving with slow binaries. */
-
-  if (avg_us > 50000) havoc_div = 10;     /* 0-19 execs/sec   */
-  else if (avg_us > 20000) havoc_div = 5; /* 20-49 execs/sec  */
-  else if (avg_us > 10000) havoc_div = 2; /* 50-100 execs/sec */
-
-  if (!resuming_fuzz) {
-
-    if (max_len > 50 * 1024)
-      WARNF(cLRD "Some test cases are huge (%s) - see %s/perf_tips.txt!",
-            DMS(max_len), doc_path);
-    else if (max_len > 10 * 1024)
-      WARNF("Some test cases are big (%s) - see %s/perf_tips.txt.",
-            DMS(max_len), doc_path);
-
-    if (useless_at_start && !in_bitmap)
-      WARNF(cLRD "Some test cases look useless. Consider using a smaller set.");
-
-    if (queued_paths > 100)
-      WARNF(cLRD "You probably have far too many input files! Consider trimming down.");
-    else if (queued_paths > 20)
-      WARNF("You have lots of input files; try starting small.");
-
-  }
-
-  OKF("Here are some useful stats:\n\n"
-
-      cGRA "    Test case count : " cRST "%u favored, %u variable, %u total\n"
-      cGRA "       Bitmap range : " cRST "%u to %u bits (average: %0.02f bits)\n"
-      cGRA "        Exec timing : " cRST "%s to %s us (average: %s us)\n",
-      queued_favored, queued_variable, queued_paths, min_bits, max_bits, 
-      ((double)total_bitmap_size) / (total_bitmap_entries ? total_bitmap_entries : 1),
-      DI(min_us), DI(max_us), DI(avg_us));
-
-  if (!timeout_given) {
-
-    /* Figure out the appropriate timeout. The basic idea is: 5x average or
-       1x max, rounded up to EXEC_TM_ROUND ms and capped at 1 second.
-
-       If the program is slow, the multiplier is lowered to 2x or 3x, because
-       random scheduler jitter is less likely to have any impact, and because
-       our patience is wearing thin =) */
-
-    if (avg_us > 50000) exec_tmout = avg_us * 2 / 1000;
-    else if (avg_us > 10000) exec_tmout = avg_us * 3 / 1000;
-    else exec_tmout = avg_us * 5 / 1000;
-
-    exec_tmout = MAX(exec_tmout, max_us / 1000);
-    exec_tmout = (exec_tmout + EXEC_TM_ROUND) / EXEC_TM_ROUND * EXEC_TM_ROUND;
-
-    if (exec_tmout > EXEC_TIMEOUT) exec_tmout = EXEC_TIMEOUT;
-
-    ACTF("No -t option specified, so I'll use exec timeout of %u ms.", 
-         exec_tmout);
-
-    timeout_given = 1;
-
-  } else if (timeout_given == 3) {
-
-    ACTF("Applying timeout settings from resumed session (%u ms).", exec_tmout);
-
-  }
-
-  /* In dumb mode, re-running every timing out test case with a generous time
-     limit is very expensive, so let's select a more conservative default. */
-
-  if (dumb_mode && !getenv("AFL_HANG_TMOUT"))
-    hang_tmout = MIN(EXEC_TIMEOUT, exec_tmout * 2 + 100);
-
-  OKF("All set and ready to roll!");
-
-}
-
-
-#ifdef USE_PYTHON
-static u8 trim_case_python(char** argv, struct queue_entry* q, u8* in_buf) {
-
-  static u8 tmp[64];
-  static u8 clean_trace[MAP_SIZE];
-
-  u8  needs_write = 0, fault = 0;
-  u32 trim_exec = 0;
-  u32 orig_len = q->len;
-
-  stage_name = tmp;
-  bytes_trim_in += q->len;
-
-  /* Initialize trimming in the Python module */
-  stage_cur = 0;
-  stage_max = init_trim_py(in_buf, q->len);
-
-  if (not_on_tty && debug)
-    SAYF("[Python Trimming] START: Max %d iterations, %d bytes", stage_max, q->len);
-
-  while(stage_cur < stage_max) {
-    sprintf(tmp, "ptrim %s", DI(trim_exec));
-
-    u32 cksum;
-
-    char* retbuf = NULL;
-    size_t retlen = 0;
-
-    trim_py(&retbuf, &retlen);
-
-    if (retlen > orig_len)
-      FATAL("Trimmed data returned by Python module is larger than original data");
-
-    write_to_testcase(retbuf, retlen);
-
-    fault = run_target(argv, exec_tmout);
-    ++trim_execs;
-
-    if (stop_soon || fault == FAULT_ERROR) goto abort_trimming;
-
-    cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
-
-    if (cksum == q->exec_cksum) {
-
-      q->len = retlen;
-      memcpy(in_buf, retbuf, retlen);
-
-      /* Let's save a clean trace, which will be needed by
-         update_bitmap_score once we're done with the trimming stuff. */
-
-      if (!needs_write) {
-
-        needs_write = 1;
-        memcpy(clean_trace, trace_bits, MAP_SIZE);
-
-      }
-
-      /* Tell the Python module that the trimming was successful */
-      stage_cur = post_trim_py(1);
-
-      if (not_on_tty && debug)
-        SAYF("[Python Trimming] SUCCESS: %d/%d iterations (now at %d bytes)", stage_cur, stage_max, q->len);
-    } else {
-      /* Tell the Python module that the trimming was unsuccessful */
-      stage_cur = post_trim_py(0);
-      if (not_on_tty && debug)
-        SAYF("[Python Trimming] FAILURE: %d/%d iterations", stage_cur, stage_max);
-    }
-
-      /* Since this can be slow, update the screen every now and then. */
-
-      if (!(trim_exec++ % stats_update_freq)) show_stats();
-  }
-
-  if (not_on_tty && debug)
-    SAYF("[Python Trimming] DONE: %d bytes -> %d bytes", orig_len, q->len);
-
-  /* If we have made changes to in_buf, we also need to update the on-disk
-     version of the test case. */
-
-  if (needs_write) {
-
-    s32 fd;
-
-    unlink(q->fname); /* ignore errors */
-
-    fd = open(q->fname, O_WRONLY | O_CREAT | O_EXCL, 0600);
-
-    if (fd < 0) PFATAL("Unable to create '%s'", q->fname);
-
-    ck_write(fd, in_buf, q->len, q->fname);
-    close(fd);
-
-    memcpy(trace_bits, clean_trace, MAP_SIZE);
-    update_bitmap_score(q);
-
-  }
-
-
-
-abort_trimming:
-
-  bytes_trim_out += q->len;
-  return fault;
-
-}
-#endif
-
-/* Trim all new test cases to save cycles when doing deterministic checks. The
-   trimmer uses power-of-two increments somewhere between 1/16 and 1/1024 of
-   file size, to keep the stage short and sweet. */
-
-static u8 trim_case(char** argv, struct queue_entry* q, u8* in_buf) {
-
-#ifdef USE_PYTHON
-  if (py_functions[PY_FUNC_TRIM])
-    return trim_case_python(argv, q, in_buf);
-#endif
-
-  static u8 tmp[64];
-  static u8 clean_trace[MAP_SIZE];
-
-  u8  needs_write = 0, fault = 0;
-  u32 trim_exec = 0;
-  u32 remove_len;
-  u32 len_p2;
-
-  /* Although the trimmer will be less useful when variable behavior is
-     detected, it will still work to some extent, so we don't check for
-     this. */
-
-  if (q->len < 5) return 0;
-
-  stage_name = tmp;
-  bytes_trim_in += q->len;
-
-  /* Select initial chunk len, starting with large steps. */
-
-  len_p2 = next_p2(q->len);
-
-  remove_len = MAX(len_p2 / TRIM_START_STEPS, TRIM_MIN_BYTES);
-
-  /* Continue until the number of steps gets too high or the stepover
-     gets too small. */
-
-  while (remove_len >= MAX(len_p2 / TRIM_END_STEPS, TRIM_MIN_BYTES)) {
-
-    u32 remove_pos = remove_len;
-
-    sprintf(tmp, "trim %s/%s", DI(remove_len), DI(remove_len));
-
-    stage_cur = 0;
-    stage_max = q->len / remove_len;
-
-    while (remove_pos < q->len) {
-
-      u32 trim_avail = MIN(remove_len, q->len - remove_pos);
-      u32 cksum;
-
-      write_with_gap(in_buf, q->len, remove_pos, trim_avail);
-
-      fault = run_target(argv, exec_tmout);
-      ++trim_execs;
-
-      if (stop_soon || fault == FAULT_ERROR) goto abort_trimming;
-
-      /* Note that we don't keep track of crashes or hangs here; maybe TODO? */
-
-      cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
-
-      /* If the deletion had no impact on the trace, make it permanent. This
-         isn't perfect for variable-path inputs, but we're just making a
-         best-effort pass, so it's not a big deal if we end up with false
-         negatives every now and then. */
-
-      if (cksum == q->exec_cksum) {
-
-        u32 move_tail = q->len - remove_pos - trim_avail;
-
-        q->len -= trim_avail;
-        len_p2  = next_p2(q->len);
-
-        memmove(in_buf + remove_pos, in_buf + remove_pos + trim_avail, 
-                move_tail);
-
-        /* Let's save a clean trace, which will be needed by
-           update_bitmap_score once we're done with the trimming stuff. */
-
-        if (!needs_write) {
-
-          needs_write = 1;
-          memcpy(clean_trace, trace_bits, MAP_SIZE);
-
-        }
-
-      } else remove_pos += remove_len;
-
-      /* Since this can be slow, update the screen every now and then. */
-
-      if (!(trim_exec++ % stats_update_freq)) show_stats();
-      ++stage_cur;
-
-    }
-
-    remove_len >>= 1;
-
-  }
-
-  /* If we have made changes to in_buf, we also need to update the on-disk
-     version of the test case. */
-
-  if (needs_write) {
-
-    s32 fd;
-
-    unlink(q->fname); /* ignore errors */
-
-    fd = open(q->fname, O_WRONLY | O_CREAT | O_EXCL, 0600);
-
-    if (fd < 0) PFATAL("Unable to create '%s'", q->fname);
-
-    ck_write(fd, in_buf, q->len, q->fname);
-    close(fd);
-
-    memcpy(trace_bits, clean_trace, MAP_SIZE);
-    update_bitmap_score(q);
-
-  }
-
-abort_trimming:
-
-  bytes_trim_out += q->len;
-  return fault;
-
-}
-
-
-/* Write a modified test case, run program, process results. Handle
-   error conditions, returning 1 if it's time to bail out. This is
-   a helper function for fuzz_one(). */
-
-EXP_ST u8 common_fuzz_stuff(char** argv, u8* out_buf, u32 len) {
-
-  u8 fault;
-
-  if (post_handler) {
-
-    out_buf = post_handler(out_buf, &len);
-    if (!out_buf || !len) return 0;
-
-  }
-
-  write_to_testcase(out_buf, len);
-
-  fault = run_target(argv, exec_tmout);
-
-  if (stop_soon) return 1;
-
-  if (fault == FAULT_TMOUT) {
-
-    if (subseq_tmouts++ > TMOUT_LIMIT) {
-      ++cur_skipped_paths;
-      return 1;
-    }
-
-  } else subseq_tmouts = 0;
-
-  /* Users can hit us with SIGUSR1 to request the current input
-     to be abandoned. */
-
-  if (skip_requested) {
-
-     skip_requested = 0;
-     ++cur_skipped_paths;
-     return 1;
-
-  }
-
-  /* This handles FAULT_ERROR for us: */
-
-  queued_discovered += save_if_interesting(argv, out_buf, len, fault);
-
-  if (!(stage_cur % stats_update_freq) || stage_cur + 1 == stage_max)
-    show_stats();
-
-  return 0;
-
-}
-
-
-/* Helper to choose random block len for block operations in fuzz_one().
-   Doesn't return zero, provided that max_len is > 0. */
-
-static u32 choose_block_len(u32 limit) {
-
-  u32 min_value, max_value;
-  u32 rlim = MIN(queue_cycle, 3);
-
-  if (!run_over10m) rlim = 1;
-
-  switch (UR(rlim)) {
-
-    case 0:  min_value = 1;
-             max_value = HAVOC_BLK_SMALL;
-             break;
-
-    case 1:  min_value = HAVOC_BLK_SMALL;
-             max_value = HAVOC_BLK_MEDIUM;
-             break;
-
-    default: 
-
-             if (UR(10)) {
-
-               min_value = HAVOC_BLK_MEDIUM;
-               max_value = HAVOC_BLK_LARGE;
-
-             } else {
-
-               min_value = HAVOC_BLK_LARGE;
-               max_value = HAVOC_BLK_XL;
-
-             }
-
-  }
-
-  if (min_value >= limit) min_value = 1;
-
-  return min_value + UR(MIN(max_value, limit) - min_value + 1);
-
-}
-
-
-/* Calculate case desirability score to adjust the length of havoc fuzzing.
-   A helper function for fuzz_one(). Maybe some of these constants should
-   go into config.h. */
-
-static u32 calculate_score(struct queue_entry* q) {
-
-  u32 avg_exec_us = total_cal_us / total_cal_cycles;
-  u32 avg_bitmap_size = total_bitmap_size / total_bitmap_entries;
-  u32 perf_score = 100;
-
-  /* Adjust score based on execution speed of this path, compared to the
-     global average. Multiplier ranges from 0.1x to 3x. Fast inputs are
-     less expensive to fuzz, so we're giving them more air time. */
-
-  if (q->exec_us * 0.1 > avg_exec_us) perf_score = 10;
-  else if (q->exec_us * 0.25 > avg_exec_us) perf_score = 25;
-  else if (q->exec_us * 0.5 > avg_exec_us) perf_score = 50;
-  else if (q->exec_us * 0.75 > avg_exec_us) perf_score = 75;
-  else if (q->exec_us * 4 < avg_exec_us) perf_score = 300;
-  else if (q->exec_us * 3 < avg_exec_us) perf_score = 200;
-  else if (q->exec_us * 2 < avg_exec_us) perf_score = 150;
-
-  /* Adjust score based on bitmap size. The working theory is that better
-     coverage translates to better targets. Multiplier from 0.25x to 3x. */
-
-  if (q->bitmap_size * 0.3 > avg_bitmap_size) perf_score *= 3;
-  else if (q->bitmap_size * 0.5 > avg_bitmap_size) perf_score *= 2;
-  else if (q->bitmap_size * 0.75 > avg_bitmap_size) perf_score *= 1.5;
-  else if (q->bitmap_size * 3 < avg_bitmap_size) perf_score *= 0.25;
-  else if (q->bitmap_size * 2 < avg_bitmap_size) perf_score *= 0.5;
-  else if (q->bitmap_size * 1.5 < avg_bitmap_size) perf_score *= 0.75;
-
-  /* Adjust score based on handicap. Handicap is proportional to how late
-     in the game we learned about this path. Latecomers are allowed to run
-     for a bit longer until they catch up with the rest. */
-
-  if (q->handicap >= 4) {
-
-    perf_score *= 4;
-    q->handicap -= 4;
-
-  } else if (q->handicap) {
-
-    perf_score *= 2;
-    --q->handicap;
-
-  }
-
-  /* Final adjustment based on input depth, under the assumption that fuzzing
-     deeper test cases is more likely to reveal stuff that can't be
-     discovered with traditional fuzzers. */
-
-  switch (q->depth) {
-
-    case 0 ... 3:   break;
-    case 4 ... 7:   perf_score *= 2; break;
-    case 8 ... 13:  perf_score *= 3; break;
-    case 14 ... 25: perf_score *= 4; break;
-    default:        perf_score *= 5;
-
-  }
-
-  u64 fuzz = q->n_fuzz;
-  u64 fuzz_total;
-
-  u32 n_paths, fuzz_mu;
-  u32 factor = 1;
-
-  switch (schedule) {
-
-    case EXPLORE:
-      break;
-
-    case EXPLOIT:
-      factor = MAX_FACTOR;
-      break;
-
-    case COE:
-      fuzz_total = 0;
-      n_paths = 0;
-
-      struct queue_entry *queue_it = queue;
-      while (queue_it) {
-        fuzz_total += queue_it->n_fuzz;
-        n_paths ++;
-        queue_it = queue_it->next;
-      }
-
-      fuzz_mu = fuzz_total / n_paths;
-      if (fuzz <= fuzz_mu) {
-        if (q->fuzz_level < 16)
-          factor = ((u32) (1 << q->fuzz_level));
-        else
-          factor = MAX_FACTOR;
-      } else {
-        factor = 0;
-      }
-      break;
-
-    case FAST:
-      if (q->fuzz_level < 16) {
-         factor = ((u32) (1 << q->fuzz_level)) / (fuzz == 0 ? 1 : fuzz);
-      } else
-        factor = MAX_FACTOR / (fuzz == 0 ? 1 : next_p2 (fuzz));
-      break;
-
-    case LIN:
-      factor = q->fuzz_level / (fuzz == 0 ? 1 : fuzz);
-      break;
-
-    case QUAD:
-      factor = q->fuzz_level * q->fuzz_level / (fuzz == 0 ? 1 : fuzz);
-      break;
-
-    default:
-      PFATAL ("Unknown Power Schedule");
-  }
-  if (factor > MAX_FACTOR)
-    factor = MAX_FACTOR;
-
-  perf_score *= factor / POWER_BETA;
-
-  // MOpt mode
-  if (limit_time_sig != 0 && max_depth - q->depth < 3) perf_score *= 2;
-  else if (perf_score < 1) perf_score = 1; // Add a lower bound to AFLFast's energy assignment strategies
-
-  /* Make sure that we don't go over limit. */
-
-  if (perf_score > havoc_max_mult * 100) perf_score = havoc_max_mult * 100;
-
-  return perf_score;
-
-}
-
-
-/* Helper function to see if a particular change (xor_val = old ^ new) could
-   be a product of deterministic bit flips with the lengths and stepovers
-   attempted by afl-fuzz. This is used to avoid dupes in some of the
-   deterministic fuzzing operations that follow bit flips. We also
-   return 1 if xor_val is zero, which implies that the old and attempted new
-   values are identical and the exec would be a waste of time. */
-
-static u8 could_be_bitflip(u32 xor_val) {
-
-  u32 sh = 0;
-
-  if (!xor_val) return 1;
-
-  /* Shift left until first bit set. */
-
-  while (!(xor_val & 1)) { ++sh; xor_val >>= 1; }
-
-  /* 1-, 2-, and 4-bit patterns are OK anywhere. */
-
-  if (xor_val == 1 || xor_val == 3 || xor_val == 15) return 1;
-
-  /* 8-, 16-, and 32-bit patterns are OK only if shift factor is
-     divisible by 8, since that's the stepover for these ops. */
-
-  if (sh & 7) return 0;
-
-  if (xor_val == 0xff || xor_val == 0xffff || xor_val == 0xffffffff)
-    return 1;
-
-  return 0;
-
-}
-
-
-/* Helper function to see if a particular value is reachable through
-   arithmetic operations. Used for similar purposes. */
-
-static u8 could_be_arith(u32 old_val, u32 new_val, u8 blen) {
-
-  u32 i, ov = 0, nv = 0, diffs = 0;
-
-  if (old_val == new_val) return 1;
-
-  /* See if one-byte adjustments to any byte could produce this result. */
-
-  for (i = 0; i < blen; ++i) {
-
-    u8 a = old_val >> (8 * i),
-       b = new_val >> (8 * i);
-
-    if (a != b) { ++diffs; ov = a; nv = b; }
-
-  }
-
-  /* If only one byte differs and the values are within range, return 1. */
-
-  if (diffs == 1) {
-
-    if ((u8)(ov - nv) <= ARITH_MAX ||
-        (u8)(nv - ov) <= ARITH_MAX) return 1;
-
-  }
-
-  if (blen == 1) return 0;
-
-  /* See if two-byte adjustments to any byte would produce this result. */
-
-  diffs = 0;
-
-  for (i = 0; i < blen / 2; ++i) {
-
-    u16 a = old_val >> (16 * i),
-        b = new_val >> (16 * i);
-
-    if (a != b) { ++diffs; ov = a; nv = b; }
-
-  }
-
-  /* If only one word differs and the values are within range, return 1. */
-
-  if (diffs == 1) {
-
-    if ((u16)(ov - nv) <= ARITH_MAX ||
-        (u16)(nv - ov) <= ARITH_MAX) return 1;
-
-    ov = SWAP16(ov); nv = SWAP16(nv);
-
-    if ((u16)(ov - nv) <= ARITH_MAX ||
-        (u16)(nv - ov) <= ARITH_MAX) return 1;
-
-  }
-
-  /* Finally, let's do the same thing for dwords. */
-
-  if (blen == 4) {
-
-    if ((u32)(old_val - new_val) <= ARITH_MAX ||
-        (u32)(new_val - old_val) <= ARITH_MAX) return 1;
-
-    new_val = SWAP32(new_val);
-    old_val = SWAP32(old_val);
-
-    if ((u32)(old_val - new_val) <= ARITH_MAX ||
-        (u32)(new_val - old_val) <= ARITH_MAX) return 1;
-
-  }
-
-  return 0;
-
-}
-
-
-/* Last but not least, a similar helper to see if insertion of an 
-   interesting integer is redundant given the insertions done for
-   shorter blen. The last param (check_le) is set if the caller
-   already executed LE insertion for current blen and wants to see
-   if BE variant passed in new_val is unique. */
-
-static u8 could_be_interest(u32 old_val, u32 new_val, u8 blen, u8 check_le) {
-
-  u32 i, j;
-
-  if (old_val == new_val) return 1;
-
-  /* See if one-byte insertions from interesting_8 over old_val could
-     produce new_val. */
-
-  for (i = 0; i < blen; ++i) {
-
-    for (j = 0; j < sizeof(interesting_8); ++j) {
-
-      u32 tval = (old_val & ~(0xff << (i * 8))) |
-                 (((u8)interesting_8[j]) << (i * 8));
-
-      if (new_val == tval) return 1;
-
-    }
-
-  }
-
-  /* Bail out unless we're also asked to examine two-byte LE insertions
-     as a preparation for BE attempts. */
-
-  if (blen == 2 && !check_le) return 0;
-
-  /* See if two-byte insertions over old_val could give us new_val. */
-
-  for (i = 0; i < blen - 1; ++i) {
-
-    for (j = 0; j < sizeof(interesting_16) / 2; ++j) {
-
-      u32 tval = (old_val & ~(0xffff << (i * 8))) |
-                 (((u16)interesting_16[j]) << (i * 8));
-
-      if (new_val == tval) return 1;
-
-      /* Continue here only if blen > 2. */
-
-      if (blen > 2) {
-
-        tval = (old_val & ~(0xffff << (i * 8))) |
-               (SWAP16(interesting_16[j]) << (i * 8));
-
-        if (new_val == tval) return 1;
-
-      }
-
-    }
-
-  }
-
-  if (blen == 4 && check_le) {
-
-    /* See if four-byte insertions could produce the same result
-       (LE only). */
-
-    for (j = 0; j < sizeof(interesting_32) / 4; ++j)
-      if (new_val == (u32)interesting_32[j]) return 1;
-
-  }
-
-  return 0;
-
-}
-
-
-/* Take the current entry from the queue, fuzz it for a while. This
-   function is a tad too long... returns 0 if fuzzed successfully, 1 if
-   skipped or bailed out. */
-
-static u8 fuzz_one_original(char** argv) {
-
-  s32 len, fd, temp_len, i, j;
-  u8  *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0;
-  u64 havoc_queued = 0,  orig_hit_cnt, new_hit_cnt;
-  u32 splice_cycle = 0, perf_score = 100, orig_perf, prev_cksum, eff_cnt = 1;
-
-  u8  ret_val = 1, doing_det = 0;
-
-  u8  a_collect[MAX_AUTO_EXTRA];
-  u32 a_len = 0;
-
-#ifdef IGNORE_FINDS
-
-  /* In IGNORE_FINDS mode, skip any entries that weren't in the
-     initial data set. */
-
-  if (queue_cur->depth > 1) return 1;
-
-#else
-
-  if (pending_favored) {
-
-    /* If we have any favored, non-fuzzed new arrivals in the queue,
-       possibly skip to them at the expense of already-fuzzed or non-favored
-       cases. */
-
-    if (((queue_cur->was_fuzzed > 0 || queue_cur->fuzz_level > 0) || !queue_cur->favored) &&
-        UR(100) < SKIP_TO_NEW_PROB) return 1;
-
-  } else if (!dumb_mode && !queue_cur->favored && queued_paths > 10) {
-
-    /* Otherwise, still possibly skip non-favored cases, albeit less often.
-       The odds of skipping stuff are higher for already-fuzzed inputs and
-       lower for never-fuzzed entries. */
-
-    if (queue_cycle > 1 && (queue_cur->fuzz_level == 0 || queue_cur->was_fuzzed)) {
-
-      if (UR(100) < SKIP_NFAV_NEW_PROB) return 1;
-
-    } else {
-
-      if (UR(100) < SKIP_NFAV_OLD_PROB) return 1;
-
-    }
-
-  }
-
-#endif /* ^IGNORE_FINDS */
-
-  if (not_on_tty) {
-    ACTF("Fuzzing test case #%u (%u total, %llu uniq crashes found)...",
-         current_entry, queued_paths, unique_crashes);
-    fflush(stdout);
-  }
-
-  /* Map the test case into memory. */
-
-  fd = open(queue_cur->fname, O_RDONLY);
-
-  if (fd < 0) PFATAL("Unable to open '%s'", queue_cur->fname);
-
-  len = queue_cur->len;
-
-  orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
-
-  if (orig_in == MAP_FAILED) PFATAL("Unable to mmap '%s' with len %u", queue_cur->fname, len);
-
-  close(fd);
-
-  /* We could mmap() out_buf as MAP_PRIVATE, but we end up clobbering every
-     single byte anyway, so it wouldn't give us any performance or memory usage
-     benefits. */
-
-  out_buf = ck_alloc_nozero(len);
-
-  subseq_tmouts = 0;
-
-  cur_depth = queue_cur->depth;
-
-  /*******************************************
-   * CALIBRATION (only if failed earlier on) *
-   *******************************************/
-
-  if (queue_cur->cal_failed) {
-
-    u8 res = FAULT_TMOUT;
-
-    if (queue_cur->cal_failed < CAL_CHANCES) {
-
-      res = calibrate_case(argv, queue_cur, in_buf, queue_cycle - 1, 0);
-
-      if (res == FAULT_ERROR)
-        FATAL("Unable to execute target application");
-
-    }
-
-    if (stop_soon || res != crash_mode) {
-      ++cur_skipped_paths;
-      goto abandon_entry;
-    }
-
-  }
-
-  /************
-   * TRIMMING *
-   ************/
-
-  if (!dumb_mode && !queue_cur->trim_done && !custom_mutator) {
-
-    u8 res = trim_case(argv, queue_cur, in_buf);
-
-    if (res == FAULT_ERROR)
-      FATAL("Unable to execute target application");
-
-    if (stop_soon) {
-      ++cur_skipped_paths;
-      goto abandon_entry;
-    }
-
-    /* Don't retry trimming, even if it failed. */
-
-    queue_cur->trim_done = 1;
-
-    if (len != queue_cur->len) len = queue_cur->len;
-
-  }
-
-  memcpy(out_buf, in_buf, len);
-
-  /*********************
-   * PERFORMANCE SCORE *
-   *********************/
-
-  orig_perf = perf_score = calculate_score(queue_cur);
-
-  if (perf_score == 0) goto abandon_entry;
-
-  if (custom_mutator) {
-    stage_short = "custom";
-    stage_name = "custom mutator";
-    stage_max = len << 3;
-    stage_val_type = STAGE_VAL_NONE;
-
-    const u32 max_seed_size = 4096*4096;
-    u8* mutated_buf = ck_alloc(max_seed_size);
-
-    orig_hit_cnt = queued_paths + unique_crashes;
-
-    for (stage_cur = 0 ; stage_cur < stage_max ; ++stage_cur) {
-      size_t orig_size = (size_t) len;
-      size_t mutated_size = custom_mutator(out_buf, orig_size, mutated_buf, max_seed_size, UR(UINT32_MAX));
-      if (mutated_size > 0) {
-        out_buf = ck_realloc(out_buf, mutated_size);
-        memcpy(out_buf, mutated_buf, mutated_size);
-        if (common_fuzz_stuff(argv, out_buf, (u32) mutated_size)) {
-          goto abandon_entry;
-        }
-      }
-    }
-
-    ck_free(mutated_buf);
-    new_hit_cnt = queued_paths + unique_crashes;
-
-    stage_finds[STAGE_CUSTOM_MUTATOR]  += new_hit_cnt - orig_hit_cnt;
-    stage_cycles[STAGE_CUSTOM_MUTATOR] += stage_max;
-    goto abandon_entry;
-  }
-
-
-  /* Skip right away if -d is given, if it has not been chosen sufficiently
-     often to warrant the expensive deterministic stage (fuzz_level), or
-     if it has gone through deterministic testing in earlier, resumed runs
-     (passed_det). */
-
-  if (skip_deterministic
-     || ((!queue_cur->passed_det)
-        && perf_score < (
-              queue_cur->depth * 30 <= havoc_max_mult * 100
-              ? queue_cur->depth * 30
-              : havoc_max_mult * 100))
-     || queue_cur->passed_det)
-#ifdef USE_PYTHON
-    goto python_stage;
-#else
-    goto havoc_stage;
-#endif
-
-  /* Skip deterministic fuzzing if exec path checksum puts this out of scope
-     for this master instance. */
-
-  if (master_max && (queue_cur->exec_cksum % master_max) != master_id - 1)
-#ifdef USE_PYTHON
-    goto python_stage;
-#else
-    goto havoc_stage;
-#endif
-
-  doing_det = 1;
-
-  /*********************************************
-   * SIMPLE BITFLIP (+dictionary construction) *
-   *********************************************/
-
-#define FLIP_BIT(_ar, _b) do { \
-    u8* _arf = (u8*)(_ar); \
-    u32 _bf = (_b); \
-    _arf[(_bf) >> 3] ^= (128 >> ((_bf) & 7)); \
-  } while (0)
-
-  /* Single walking bit. */
-
-  stage_short = "flip1";
-  stage_max   = len << 3;
-  stage_name  = "bitflip 1/1";
-
-  stage_val_type = STAGE_VAL_NONE;
-
-  orig_hit_cnt = queued_paths + unique_crashes;
-
-  prev_cksum = queue_cur->exec_cksum;
-
-  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
-
-    stage_cur_byte = stage_cur >> 3;
-
-    FLIP_BIT(out_buf, stage_cur);
-
-    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-
-    FLIP_BIT(out_buf, stage_cur);
-
-    /* While flipping the least significant bit in every byte, pull of an extra
-       trick to detect possible syntax tokens. In essence, the idea is that if
-       you have a binary blob like this:
-
-       xxxxxxxxIHDRxxxxxxxx
-
-       ...and changing the leading and trailing bytes causes variable or no
-       changes in program flow, but touching any character in the "IHDR" string
-       always produces the same, distinctive path, it's highly likely that
-       "IHDR" is an atomically-checked magic value of special significance to
-       the fuzzed format.
-
-       We do this here, rather than as a separate stage, because it's a nice
-       way to keep the operation approximately "free" (i.e., no extra execs).
-       
-       Empirically, performing the check when flipping the least significant bit
-       is advantageous, compared to doing it at the time of more disruptive
-       changes, where the program flow may be affected in more violent ways.
-
-       The caveat is that we won't generate dictionaries in the -d mode or -S
-       mode - but that's probably a fair trade-off.
-
-       This won't work particularly well with paths that exhibit variable
-       behavior, but fails gracefully, so we'll carry out the checks anyway.
-
-      */
-
-    if (!dumb_mode && (stage_cur & 7) == 7) {
-
-      u32 cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
-
-      if (stage_cur == stage_max - 1 && cksum == prev_cksum) {
-
-        /* If at end of file and we are still collecting a string, grab the
-           final character and force output. */
-
-        if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3];
-        ++a_len;
-
-        if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA)
-          maybe_add_auto(a_collect, a_len);
-
-      } else if (cksum != prev_cksum) {
-
-        /* Otherwise, if the checksum has changed, see if we have something
-           worthwhile queued up, and collect that if the answer is yes. */
-
-        if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA)
-          maybe_add_auto(a_collect, a_len);
-
-        a_len = 0;
-        prev_cksum = cksum;
-
-      }
-
-      /* Continue collecting string, but only if the bit flip actually made
-         any difference - we don't want no-op tokens. */
-
-      if (cksum != queue_cur->exec_cksum) {
-
-        if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3];        
-        ++a_len;
-
-      }
-
-    }
-
-  }
-
-  new_hit_cnt = queued_paths + unique_crashes;
-
-  stage_finds[STAGE_FLIP1]  += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_FLIP1] += stage_max;
-
-  /* Two walking bits. */
-
-  stage_name  = "bitflip 2/1";
-  stage_short = "flip2";
-  stage_max   = (len << 3) - 1;
-
-  orig_hit_cnt = new_hit_cnt;
-
-  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
-
-    stage_cur_byte = stage_cur >> 3;
-
-    FLIP_BIT(out_buf, stage_cur);
-    FLIP_BIT(out_buf, stage_cur + 1);
-
-    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-
-    FLIP_BIT(out_buf, stage_cur);
-    FLIP_BIT(out_buf, stage_cur + 1);
-
-  }
-
-  new_hit_cnt = queued_paths + unique_crashes;
-
-  stage_finds[STAGE_FLIP2]  += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_FLIP2] += stage_max;
-
-  /* Four walking bits. */
-
-  stage_name  = "bitflip 4/1";
-  stage_short = "flip4";
-  stage_max   = (len << 3) - 3;
-
-  orig_hit_cnt = new_hit_cnt;
-
-  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
-
-    stage_cur_byte = stage_cur >> 3;
-
-    FLIP_BIT(out_buf, stage_cur);
-    FLIP_BIT(out_buf, stage_cur + 1);
-    FLIP_BIT(out_buf, stage_cur + 2);
-    FLIP_BIT(out_buf, stage_cur + 3);
-
-    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-
-    FLIP_BIT(out_buf, stage_cur);
-    FLIP_BIT(out_buf, stage_cur + 1);
-    FLIP_BIT(out_buf, stage_cur + 2);
-    FLIP_BIT(out_buf, stage_cur + 3);
-
-  }
-
-  new_hit_cnt = queued_paths + unique_crashes;
-
-  stage_finds[STAGE_FLIP4]  += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_FLIP4] += stage_max;
-
-  /* Effector map setup. These macros calculate:
-
-     EFF_APOS      - position of a particular file offset in the map.
-     EFF_ALEN      - length of a map with a particular number of bytes.
-     EFF_SPAN_ALEN - map span for a sequence of bytes.
-
-   */
-
-#define EFF_APOS(_p)          ((_p) >> EFF_MAP_SCALE2)
-#define EFF_REM(_x)           ((_x) & ((1 << EFF_MAP_SCALE2) - 1))
-#define EFF_ALEN(_l)          (EFF_APOS(_l) + !!EFF_REM(_l))
-#define EFF_SPAN_ALEN(_p, _l) (EFF_APOS((_p) + (_l) - 1) - EFF_APOS(_p) + 1)
-
-  /* Initialize effector map for the next step (see comments below). Always
-     flag first and last byte as doing something. */
-
-  eff_map    = ck_alloc(EFF_ALEN(len));
-  eff_map[0] = 1;
-
-  if (EFF_APOS(len - 1) != 0) {
-    eff_map[EFF_APOS(len - 1)] = 1;
-    ++eff_cnt;
-  }
-
-  /* Walking byte. */
-
-  stage_name  = "bitflip 8/8";
-  stage_short = "flip8";
-  stage_max   = len;
-
-  orig_hit_cnt = new_hit_cnt;
-
-  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
-
-    stage_cur_byte = stage_cur;
-
-    out_buf[stage_cur] ^= 0xFF;
-
-    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-
-    /* We also use this stage to pull off a simple trick: we identify
-       bytes that seem to have no effect on the current execution path
-       even when fully flipped - and we skip them during more expensive
-       deterministic stages, such as arithmetics or known ints. */
-
-    if (!eff_map[EFF_APOS(stage_cur)]) {
-
-      u32 cksum;
-
-      /* If in dumb mode or if the file is very short, just flag everything
-         without wasting time on checksums. */
-
-      if (!dumb_mode && len >= EFF_MIN_LEN)
-        cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
-      else
-        cksum = ~queue_cur->exec_cksum;
-
-      if (cksum != queue_cur->exec_cksum) {
-        eff_map[EFF_APOS(stage_cur)] = 1;
-        ++eff_cnt;
-      }
-
-    }
-
-    out_buf[stage_cur] ^= 0xFF;
-
-  }
-
-  /* If the effector map is more than EFF_MAX_PERC dense, just flag the
-     whole thing as worth fuzzing, since we wouldn't be saving much time
-     anyway. */
-
-  if (eff_cnt != EFF_ALEN(len) &&
-      eff_cnt * 100 / EFF_ALEN(len) > EFF_MAX_PERC) {
-
-    memset(eff_map, 1, EFF_ALEN(len));
-
-    blocks_eff_select += EFF_ALEN(len);
-
-  } else {
-
-    blocks_eff_select += eff_cnt;
-
-  }
-
-  blocks_eff_total += EFF_ALEN(len);
-
-  new_hit_cnt = queued_paths + unique_crashes;
-
-  stage_finds[STAGE_FLIP8]  += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_FLIP8] += stage_max;
-
-  /* Two walking bytes. */
-
-  if (len < 2) goto skip_bitflip;
-
-  stage_name  = "bitflip 16/8";
-  stage_short = "flip16";
-  stage_cur   = 0;
-  stage_max   = len - 1;
-
-  orig_hit_cnt = new_hit_cnt;
-
-  for (i = 0; i < len - 1; ++i) {
-
-    /* Let's consult the effector map... */
-
-    if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
-      --stage_max;
-      continue;
-    }
-
-    stage_cur_byte = i;
-
-    *(u16*)(out_buf + i) ^= 0xFFFF;
-
-    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-    ++stage_cur;
-
-    *(u16*)(out_buf + i) ^= 0xFFFF;
-
-
-  }
-
-  new_hit_cnt = queued_paths + unique_crashes;
-
-  stage_finds[STAGE_FLIP16]  += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_FLIP16] += stage_max;
-
-  if (len < 4) goto skip_bitflip;
-
-  /* Four walking bytes. */
-
-  stage_name  = "bitflip 32/8";
-  stage_short = "flip32";
-  stage_cur   = 0;
-  stage_max   = len - 3;
-
-  orig_hit_cnt = new_hit_cnt;
-
-  for (i = 0; i < len - 3; ++i) {
-
-    /* Let's consult the effector map... */
-    if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
-        !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
-      --stage_max;
-      continue;
-    }
-
-    stage_cur_byte = i;
-
-    *(u32*)(out_buf + i) ^= 0xFFFFFFFF;
-
-    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-    ++stage_cur;
-
-    *(u32*)(out_buf + i) ^= 0xFFFFFFFF;
-
-  }
-
-  new_hit_cnt = queued_paths + unique_crashes;
-
-  stage_finds[STAGE_FLIP32]  += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_FLIP32] += stage_max;
-
-skip_bitflip:
-
-  if (no_arith) goto skip_arith;
-
-  /**********************
-   * ARITHMETIC INC/DEC *
-   **********************/
-
-  /* 8-bit arithmetics. */
-
-  stage_name  = "arith 8/8";
-  stage_short = "arith8";
-  stage_cur   = 0;
-  stage_max   = 2 * len * ARITH_MAX;
-
-  stage_val_type = STAGE_VAL_LE;
-
-  orig_hit_cnt = new_hit_cnt;
-
-  for (i = 0; i < len; ++i) {
-
-    u8 orig = out_buf[i];
-
-    /* Let's consult the effector map... */
-
-    if (!eff_map[EFF_APOS(i)]) {
-      stage_max -= 2 * ARITH_MAX;
-      continue;
-    }
-
-    stage_cur_byte = i;
-
-    for (j = 1; j <= ARITH_MAX; ++j) {
-
-      u8 r = orig ^ (orig + j);
-
-      /* Do arithmetic operations only if the result couldn't be a product
-         of a bitflip. */
-
-      if (!could_be_bitflip(r)) {
-
-        stage_cur_val = j;
-        out_buf[i] = orig + j;
-
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
-
-      } else --stage_max;
-
-      r =  orig ^ (orig - j);
-
-      if (!could_be_bitflip(r)) {
-
-        stage_cur_val = -j;
-        out_buf[i] = orig - j;
-
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
-
-      } else --stage_max;
-
-      out_buf[i] = orig;
-
-    }
-
-  }
-
-  new_hit_cnt = queued_paths + unique_crashes;
-
-  stage_finds[STAGE_ARITH8]  += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_ARITH8] += stage_max;
-
-  /* 16-bit arithmetics, both endians. */
-
-  if (len < 2) goto skip_arith;
-
-  stage_name  = "arith 16/8";
-  stage_short = "arith16";
-  stage_cur   = 0;
-  stage_max   = 4 * (len - 1) * ARITH_MAX;
-
-  orig_hit_cnt = new_hit_cnt;
-
-  for (i = 0; i < len - 1; ++i) {
-
-    u16 orig = *(u16*)(out_buf + i);
-
-    /* Let's consult the effector map... */
-
-    if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
-      stage_max -= 4 * ARITH_MAX;
-      continue;
-    }
-
-    stage_cur_byte = i;
-
-    for (j = 1; j <= ARITH_MAX; ++j) {
-
-      u16 r1 = orig ^ (orig + j),
-          r2 = orig ^ (orig - j),
-          r3 = orig ^ SWAP16(SWAP16(orig) + j),
-          r4 = orig ^ SWAP16(SWAP16(orig) - j);
-
-      /* Try little endian addition and subtraction first. Do it only
-         if the operation would affect more than one byte (hence the 
-         & 0xff overflow checks) and if it couldn't be a product of
-         a bitflip. */
-
-      stage_val_type = STAGE_VAL_LE; 
-
-      if ((orig & 0xff) + j > 0xff && !could_be_bitflip(r1)) {
-
-        stage_cur_val = j;
-        *(u16*)(out_buf + i) = orig + j;
-
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
- 
-      } else --stage_max;
-
-      if ((orig & 0xff) < j && !could_be_bitflip(r2)) {
-
-        stage_cur_val = -j;
-        *(u16*)(out_buf + i) = orig - j;
-
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
-
-      } else --stage_max;
-
-      /* Big endian comes next. Same deal. */
-
-      stage_val_type = STAGE_VAL_BE;
-
-
-      if ((orig >> 8) + j > 0xff && !could_be_bitflip(r3)) {
-
-        stage_cur_val = j;
-        *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) + j);
-
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
-
-      } else --stage_max;
-
-      if ((orig >> 8) < j && !could_be_bitflip(r4)) {
-
-        stage_cur_val = -j;
-        *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) - j);
-
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
-
-      } else --stage_max;
-
-      *(u16*)(out_buf + i) = orig;
-
-    }
-
-  }
-
-  new_hit_cnt = queued_paths + unique_crashes;
-
-  stage_finds[STAGE_ARITH16]  += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_ARITH16] += stage_max;
-
-  /* 32-bit arithmetics, both endians. */
-
-  if (len < 4) goto skip_arith;
-
-  stage_name  = "arith 32/8";
-  stage_short = "arith32";
-  stage_cur   = 0;
-  stage_max   = 4 * (len - 3) * ARITH_MAX;
-
-  orig_hit_cnt = new_hit_cnt;
-
-  for (i = 0; i < len - 3; ++i) {
-
-    u32 orig = *(u32*)(out_buf + i);
-
-    /* Let's consult the effector map... */
-
-    if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
-        !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
-      stage_max -= 4 * ARITH_MAX;
-      continue;
-    }
-
-    stage_cur_byte = i;
-
-    for (j = 1; j <= ARITH_MAX; ++j) {
-
-      u32 r1 = orig ^ (orig + j),
-          r2 = orig ^ (orig - j),
-          r3 = orig ^ SWAP32(SWAP32(orig) + j),
-          r4 = orig ^ SWAP32(SWAP32(orig) - j);
-
-      /* Little endian first. Same deal as with 16-bit: we only want to
-         try if the operation would have effect on more than two bytes. */
-
-      stage_val_type = STAGE_VAL_LE;
-
-      if ((orig & 0xffff) + j > 0xffff && !could_be_bitflip(r1)) {
-
-        stage_cur_val = j;
-        *(u32*)(out_buf + i) = orig + j;
-
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
-
-      } else --stage_max;
-
-      if ((orig & 0xffff) < j && !could_be_bitflip(r2)) {
-
-        stage_cur_val = -j;
-        *(u32*)(out_buf + i) = orig - j;
-
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
-
-      } else --stage_max;
-
-      /* Big endian next. */
-
-      stage_val_type = STAGE_VAL_BE;
-
-      if ((SWAP32(orig) & 0xffff) + j > 0xffff && !could_be_bitflip(r3)) {
-
-        stage_cur_val = j;
-        *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) + j);
-
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
-
-      } else --stage_max;
-
-      if ((SWAP32(orig) & 0xffff) < j && !could_be_bitflip(r4)) {
-
-        stage_cur_val = -j;
-        *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) - j);
-
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
-
-      } else --stage_max;
-
-      *(u32*)(out_buf + i) = orig;
-
-    }
-
-  }
-
-  new_hit_cnt = queued_paths + unique_crashes;
-
-  stage_finds[STAGE_ARITH32]  += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_ARITH32] += stage_max;
-
-skip_arith:
-
-  /**********************
-   * INTERESTING VALUES *
-   **********************/
-
-  stage_name  = "interest 8/8";
-  stage_short = "int8";
-  stage_cur   = 0;
-  stage_max   = len * sizeof(interesting_8);
-
-  stage_val_type = STAGE_VAL_LE;
-
-  orig_hit_cnt = new_hit_cnt;
-
-  /* Setting 8-bit integers. */
-
-  for (i = 0; i < len; ++i) {
-
-    u8 orig = out_buf[i];
-
-    /* Let's consult the effector map... */
-
-    if (!eff_map[EFF_APOS(i)]) {
-      stage_max -= sizeof(interesting_8);
-      continue;
-    }
-
-    stage_cur_byte = i;
-
-    for (j = 0; j < sizeof(interesting_8); ++j) {
-
-      /* Skip if the value could be a product of bitflips or arithmetics. */
-
-      if (could_be_bitflip(orig ^ (u8)interesting_8[j]) ||
-          could_be_arith(orig, (u8)interesting_8[j], 1)) {
-        --stage_max;
-        continue;
-      }
-
-      stage_cur_val = interesting_8[j];
-      out_buf[i] = interesting_8[j];
-
-      if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-
-      out_buf[i] = orig;
-      ++stage_cur;
-
-    }
-
-  }
-
-  new_hit_cnt = queued_paths + unique_crashes;
-
-  stage_finds[STAGE_INTEREST8]  += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_INTEREST8] += stage_max;
-
-  /* Setting 16-bit integers, both endians. */
-
-  if (no_arith || len < 2) goto skip_interest;
-
-  stage_name  = "interest 16/8";
-  stage_short = "int16";
-  stage_cur   = 0;
-  stage_max   = 2 * (len - 1) * (sizeof(interesting_16) >> 1);
-
-  orig_hit_cnt = new_hit_cnt;
-
-  for (i = 0; i < len - 1; ++i) {
-
-    u16 orig = *(u16*)(out_buf + i);
-
-    /* Let's consult the effector map... */
-
-    if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
-      stage_max -= sizeof(interesting_16);
-      continue;
-    }
-
-    stage_cur_byte = i;
-
-    for (j = 0; j < sizeof(interesting_16) / 2; ++j) {
-
-      stage_cur_val = interesting_16[j];
-
-      /* Skip if this could be a product of a bitflip, arithmetics,
-         or single-byte interesting value insertion. */
-
-      if (!could_be_bitflip(orig ^ (u16)interesting_16[j]) &&
-          !could_be_arith(orig, (u16)interesting_16[j], 2) &&
-          !could_be_interest(orig, (u16)interesting_16[j], 2, 0)) {
-
-        stage_val_type = STAGE_VAL_LE;
-
-        *(u16*)(out_buf + i) = interesting_16[j];
-
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
-
-      } else --stage_max;
-
-      if ((u16)interesting_16[j] != SWAP16(interesting_16[j]) &&
-          !could_be_bitflip(orig ^ SWAP16(interesting_16[j])) &&
-          !could_be_arith(orig, SWAP16(interesting_16[j]), 2) &&
-          !could_be_interest(orig, SWAP16(interesting_16[j]), 2, 1)) {
-
-        stage_val_type = STAGE_VAL_BE;
-
-        *(u16*)(out_buf + i) = SWAP16(interesting_16[j]);
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
-
-      } else --stage_max;
-
-    }
-
-    *(u16*)(out_buf + i) = orig;
-
-  }
-
-  new_hit_cnt = queued_paths + unique_crashes;
-
-  stage_finds[STAGE_INTEREST16]  += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_INTEREST16] += stage_max;
-
-  if (len < 4) goto skip_interest;
-
-  /* Setting 32-bit integers, both endians. */
-
-  stage_name  = "interest 32/8";
-  stage_short = "int32";
-  stage_cur   = 0;
-  stage_max   = 2 * (len - 3) * (sizeof(interesting_32) >> 2);
-
-  orig_hit_cnt = new_hit_cnt;
-
-  for (i = 0; i < len - 3; i++) {
-
-    u32 orig = *(u32*)(out_buf + i);
-
-    /* Let's consult the effector map... */
-
-    if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
-        !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
-      stage_max -= sizeof(interesting_32) >> 1;
-      continue;
-    }
-
-    stage_cur_byte = i;
-
-    for (j = 0; j < sizeof(interesting_32) / 4; ++j) {
-
-      stage_cur_val = interesting_32[j];
-
-      /* Skip if this could be a product of a bitflip, arithmetics,
-         or word interesting value insertion. */
-
-      if (!could_be_bitflip(orig ^ (u32)interesting_32[j]) &&
-          !could_be_arith(orig, interesting_32[j], 4) &&
-          !could_be_interest(orig, interesting_32[j], 4, 0)) {
-
-        stage_val_type = STAGE_VAL_LE;
-
-        *(u32*)(out_buf + i) = interesting_32[j];
-
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
-
-      } else --stage_max;
-
-      if ((u32)interesting_32[j] != SWAP32(interesting_32[j]) &&
-          !could_be_bitflip(orig ^ SWAP32(interesting_32[j])) &&
-          !could_be_arith(orig, SWAP32(interesting_32[j]), 4) &&
-          !could_be_interest(orig, SWAP32(interesting_32[j]), 4, 1)) {
-
-        stage_val_type = STAGE_VAL_BE;
-
-        *(u32*)(out_buf + i) = SWAP32(interesting_32[j]);
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
-
-      } else --stage_max;
-
-    }
-
-    *(u32*)(out_buf + i) = orig;
-
-  }
-
-  new_hit_cnt = queued_paths + unique_crashes;
-
-  stage_finds[STAGE_INTEREST32]  += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_INTEREST32] += stage_max;
-
-skip_interest:
-
-  /********************
-   * DICTIONARY STUFF *
-   ********************/
-
-  if (!extras_cnt) goto skip_user_extras;
-
-  /* Overwrite with user-supplied extras. */
-
-  stage_name  = "user extras (over)";
-  stage_short = "ext_UO";
-  stage_cur   = 0;
-  stage_max   = extras_cnt * len;
-
-  stage_val_type = STAGE_VAL_NONE;
-
-  orig_hit_cnt = new_hit_cnt;
-
-  for (i = 0; i < len; ++i) {
-
-    u32 last_len = 0;
-
-    stage_cur_byte = i;
-
-    /* Extras are sorted by size, from smallest to largest. This means
-       that we don't have to worry about restoring the buffer in
-       between writes at a particular offset determined by the outer
-       loop. */
-
-    for (j = 0; j < extras_cnt; ++j) {
-
-      /* Skip extras probabilistically if extras_cnt > MAX_DET_EXTRAS. Also
-         skip them if there's no room to insert the payload, if the token
-         is redundant, or if its entire span has no bytes set in the effector
-         map. */
-
-      if ((extras_cnt > MAX_DET_EXTRAS && UR(extras_cnt) >= MAX_DET_EXTRAS) ||
-          extras[j].len > len - i ||
-          !memcmp(extras[j].data, out_buf + i, extras[j].len) ||
-          !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, extras[j].len))) {
-
-        --stage_max;
-        continue;
-
-      }
-
-      last_len = extras[j].len;
-      memcpy(out_buf + i, extras[j].data, last_len);
-
-      if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-
-      ++stage_cur;
-
-    }
-
-    /* Restore all the clobbered memory. */
-    memcpy(out_buf + i, in_buf + i, last_len);
-
-  }
-
-  new_hit_cnt = queued_paths + unique_crashes;
-
-  stage_finds[STAGE_EXTRAS_UO]  += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_EXTRAS_UO] += stage_max;
-
-  /* Insertion of user-supplied extras. */
-
-  stage_name  = "user extras (insert)";
-  stage_short = "ext_UI";
-  stage_cur   = 0;
-  stage_max   = extras_cnt * len;
-
-  orig_hit_cnt = new_hit_cnt;
-
-  ex_tmp = ck_alloc(len + MAX_DICT_FILE);
-
-  for (i = 0; i <= len; ++i) {
-
-    stage_cur_byte = i;
-
-    for (j = 0; j < extras_cnt; ++j) {
-
-      if (len + extras[j].len > MAX_FILE) {
-        --stage_max; 
-        continue;
-      }
-
-      /* Insert token */
-      memcpy(ex_tmp + i, extras[j].data, extras[j].len);
-
-      /* Copy tail */
-      memcpy(ex_tmp + i + extras[j].len, out_buf + i, len - i);
-
-      if (common_fuzz_stuff(argv, ex_tmp, len + extras[j].len)) {
-        ck_free(ex_tmp);
-        goto abandon_entry;
-      }
-
-      ++stage_cur;
-
-    }
-
-    /* Copy head */
-    ex_tmp[i] = out_buf[i];
-
-  }
-
-  ck_free(ex_tmp);
-
-  new_hit_cnt = queued_paths + unique_crashes;
-
-  stage_finds[STAGE_EXTRAS_UI]  += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_EXTRAS_UI] += stage_max;
-
-skip_user_extras:
-
-  if (!a_extras_cnt) goto skip_extras;
-
-  stage_name  = "auto extras (over)";
-  stage_short = "ext_AO";
-  stage_cur   = 0;
-  stage_max   = MIN(a_extras_cnt, USE_AUTO_EXTRAS) * len;
-
-  stage_val_type = STAGE_VAL_NONE;
-
-  orig_hit_cnt = new_hit_cnt;
-
-  for (i = 0; i < len; ++i) {
-
-    u32 last_len = 0;
-
-    stage_cur_byte = i;
-
-    for (j = 0; j < MIN(a_extras_cnt, USE_AUTO_EXTRAS); ++j) {
-
-      /* See the comment in the earlier code; extras are sorted by size. */
-
-      if (a_extras[j].len > len - i ||
-          !memcmp(a_extras[j].data, out_buf + i, a_extras[j].len) ||
-          !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, a_extras[j].len))) {
-
-        --stage_max;
-        continue;
-
-      }
-
-      last_len = a_extras[j].len;
-      memcpy(out_buf + i, a_extras[j].data, last_len);
-
-      if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-
-      ++stage_cur;
-
-    }
-
-    /* Restore all the clobbered memory. */
-    memcpy(out_buf + i, in_buf + i, last_len);
-
-  }
-
-  new_hit_cnt = queued_paths + unique_crashes;
-
-  stage_finds[STAGE_EXTRAS_AO]  += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_EXTRAS_AO] += stage_max;
-
-skip_extras:
-
-  /* If we made this to here without jumping to havoc_stage or abandon_entry,
-     we're properly done with deterministic steps and can mark it as such
-     in the .state/ directory. */
-
-  if (!queue_cur->passed_det) mark_as_det_done(queue_cur);
-
-#ifdef USE_PYTHON
-python_stage:
-  /**********************************
-   * EXTERNAL MUTATORS (Python API) *
-   **********************************/
-
-  if (!py_module) goto havoc_stage;
-
-  stage_name  = "python";
-  stage_short = "python";
-  stage_max   = HAVOC_CYCLES * perf_score / havoc_div / 100;
-
-  if (stage_max < HAVOC_MIN) stage_max = HAVOC_MIN;
-
-  orig_hit_cnt = queued_paths + unique_crashes;
-
-  char* retbuf = NULL;
-  size_t retlen = 0;
-
-  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
-    struct queue_entry* target;
-    u32 tid;
-    u8* new_buf;
-
-retry_external_pick:
-    /* Pick a random other queue entry for passing to external API */
-    do { tid = UR(queued_paths); } while (tid == current_entry && queued_paths > 1);
-
-    target = queue;
-
-    while (tid >= 100) { target = target->next_100; tid -= 100; }
-    while (tid--) target = target->next;
-
-    /* Make sure that the target has a reasonable length. */
-
-    while (target && (target->len < 2 || target == queue_cur) && queued_paths > 1) {
-      target = target->next;
-      ++splicing_with;
-    }
-
-    if (!target) goto retry_external_pick;
-
-    /* Read the additional testcase into a new buffer. */
-    fd = open(target->fname, O_RDONLY);
-    if (fd < 0) PFATAL("Unable to open '%s'", target->fname);
-    new_buf = ck_alloc_nozero(target->len);
-    ck_read(fd, new_buf, target->len, target->fname);
-    close(fd);
-
-    fuzz_py(out_buf, len, new_buf, target->len, &retbuf, &retlen);
-
-    ck_free(new_buf);
-
-    if (retbuf) {
-      if (!retlen)
-        goto abandon_entry;
-
-      if (common_fuzz_stuff(argv, retbuf, retlen)) {
-        free(retbuf);
-        goto abandon_entry;
-      }
-
-      /* Reset retbuf/retlen */
-      free(retbuf);
-      retbuf = NULL;
-      retlen = 0;
-
-      /* If we're finding new stuff, let's run for a bit longer, limits
-         permitting. */
-
-      if (queued_paths != havoc_queued) {
-        if (perf_score <= havoc_max_mult * 100) {
-          stage_max  *= 2;
-          perf_score *= 2;
-        }
-
-        havoc_queued = queued_paths;
-      }
-    }
-  }
-
-  new_hit_cnt = queued_paths + unique_crashes;
-
-  stage_finds[STAGE_PYTHON]  += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_PYTHON] += stage_max;
-
-  if (python_only) {
-    /* Skip other stages */
-    ret_val = 0;
-    goto abandon_entry;
-  }
-#endif
-
-  /****************
-   * RANDOM HAVOC *
-   ****************/
-
-havoc_stage:
-
-  stage_cur_byte = -1;
-
-  /* The havoc stage mutation code is also invoked when splicing files; if the
-     splice_cycle variable is set, generate different descriptions and such. */
-
-  if (!splice_cycle) {
-
-    stage_name  = "havoc";
-    stage_short = "havoc";
-    stage_max   = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) *
-                  perf_score / havoc_div / 100;
-
-  } else {
-
-    static u8 tmp[32];
-
-    perf_score = orig_perf;
-
-    sprintf(tmp, "splice %u", splice_cycle);
-    stage_name  = tmp;
-    stage_short = "splice";
-    stage_max   = SPLICE_HAVOC * perf_score / havoc_div / 100;
-
-  }
-
-  if (stage_max < HAVOC_MIN) stage_max = HAVOC_MIN;
-
-  temp_len = len;
-
-  orig_hit_cnt = queued_paths + unique_crashes;
-
-  havoc_queued = queued_paths;
-
-  /* We essentially just do several thousand runs (depending on perf_score)
-     where we take the input file and make random stacked tweaks. */
-
-  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
-
-    u32 use_stacking = 1 << (1 + UR(HAVOC_STACK_POW2));
-
-    stage_cur_val = use_stacking;
- 
-    for (i = 0; i < use_stacking; ++i) {
-
-      switch (UR(15 + ((extras_cnt + a_extras_cnt) ? 2 : 0))) {
-
-        case 0:
-
-          /* Flip a single bit somewhere. Spooky! */
-
-          FLIP_BIT(out_buf, UR(temp_len << 3));
-          break;
-
-        case 1: 
-
-          /* Set byte to interesting value. */
-
-          out_buf[UR(temp_len)] = interesting_8[UR(sizeof(interesting_8))];
-          break;
-
-        case 2:
-
-          /* Set word to interesting value, randomly choosing endian. */
-
-          if (temp_len < 2) break;
-
-          if (UR(2)) {
-
-            *(u16*)(out_buf + UR(temp_len - 1)) =
-              interesting_16[UR(sizeof(interesting_16) >> 1)];
-
-          } else {
-
-            *(u16*)(out_buf + UR(temp_len - 1)) = SWAP16(
-              interesting_16[UR(sizeof(interesting_16) >> 1)]);
-
-          }
-
-          break;
-
-        case 3:
-
-          /* Set dword to interesting value, randomly choosing endian. */
-
-          if (temp_len < 4) break;
-
-          if (UR(2)) {
-  
-            *(u32*)(out_buf + UR(temp_len - 3)) =
-              interesting_32[UR(sizeof(interesting_32) >> 2)];
-
-          } else {
-
-            *(u32*)(out_buf + UR(temp_len - 3)) = SWAP32(
-              interesting_32[UR(sizeof(interesting_32) >> 2)]);
-
-          }
-
-          break;
-
-        case 4:
-
-          /* Randomly subtract from byte. */
-
-          out_buf[UR(temp_len)] -= 1 + UR(ARITH_MAX);
-          break;
-
-        case 5:
-
-          /* Randomly add to byte. */
-
-          out_buf[UR(temp_len)] += 1 + UR(ARITH_MAX);
-          break;
-
-        case 6:
-
-          /* Randomly subtract from word, random endian. */
-
-          if (temp_len < 2) break;
-
-          if (UR(2)) {
-
-            u32 pos = UR(temp_len - 1);
-
-            *(u16*)(out_buf + pos) -= 1 + UR(ARITH_MAX);
-
-          } else {
-
-            u32 pos = UR(temp_len - 1);
-            u16 num = 1 + UR(ARITH_MAX);
-
-            *(u16*)(out_buf + pos) =
-              SWAP16(SWAP16(*(u16*)(out_buf + pos)) - num);
-
-          }
-
-          break;
-
-        case 7:
-
-          /* Randomly add to word, random endian. */
-
-          if (temp_len < 2) break;
-
-          if (UR(2)) {
-
-            u32 pos = UR(temp_len - 1);
-
-            *(u16*)(out_buf + pos) += 1 + UR(ARITH_MAX);
-
-          } else {
-
-            u32 pos = UR(temp_len - 1);
-            u16 num = 1 + UR(ARITH_MAX);
-
-            *(u16*)(out_buf + pos) =
-              SWAP16(SWAP16(*(u16*)(out_buf + pos)) + num);
-
-          }
-
-          break;
-
-        case 8:
-
-          /* Randomly subtract from dword, random endian. */
-
-          if (temp_len < 4) break;
-
-          if (UR(2)) {
-
-            u32 pos = UR(temp_len - 3);
-
-            *(u32*)(out_buf + pos) -= 1 + UR(ARITH_MAX);
-
-          } else {
-
-            u32 pos = UR(temp_len - 3);
-            u32 num = 1 + UR(ARITH_MAX);
-
-            *(u32*)(out_buf + pos) =
-              SWAP32(SWAP32(*(u32*)(out_buf + pos)) - num);
-
-          }
-
-          break;
-
-        case 9:
-
-          /* Randomly add to dword, random endian. */
-
-          if (temp_len < 4) break;
-
-          if (UR(2)) {
-
-            u32 pos = UR(temp_len - 3);
-
-            *(u32*)(out_buf + pos) += 1 + UR(ARITH_MAX);
-
-          } else {
-
-            u32 pos = UR(temp_len - 3);
-            u32 num = 1 + UR(ARITH_MAX);
-
-            *(u32*)(out_buf + pos) =
-              SWAP32(SWAP32(*(u32*)(out_buf + pos)) + num);
-
-          }
-
-          break;
-
-        case 10:
-
-          /* Just set a random byte to a random value. Because,
-             why not. We use XOR with 1-255 to eliminate the
-             possibility of a no-op. */
-
-          out_buf[UR(temp_len)] ^= 1 + UR(255);
-          break;
-
-        case 11 ... 12: {
-
-            /* Delete bytes. We're making this a bit more likely
-               than insertion (the next option) in hopes of keeping
-               files reasonably small. */
-
-            u32 del_from, del_len;
-
-            if (temp_len < 2) break;
-
-            /* Don't delete too much. */
-
-            del_len = choose_block_len(temp_len - 1);
-
-            del_from = UR(temp_len - del_len + 1);
-
-            memmove(out_buf + del_from, out_buf + del_from + del_len,
-                    temp_len - del_from - del_len);
-
-            temp_len -= del_len;
-
-            break;
-
-          }
-
-        case 13:
-
-          if (temp_len + HAVOC_BLK_XL < MAX_FILE) {
-
-            /* Clone bytes (75%) or insert a block of constant bytes (25%). */
-
-            u8  actually_clone = UR(4);
-            u32 clone_from, clone_to, clone_len;
-            u8* new_buf;
-
-            if (actually_clone) {
-
-              clone_len  = choose_block_len(temp_len);
-              clone_from = UR(temp_len - clone_len + 1);
-
-            } else {
-
-              clone_len = choose_block_len(HAVOC_BLK_XL);
-              clone_from = 0;
-
-            }
-
-            clone_to   = UR(temp_len);
-
-            new_buf = ck_alloc_nozero(temp_len + clone_len);
-
-            /* Head */
-
-            memcpy(new_buf, out_buf, clone_to);
-
-            /* Inserted part */
-
-            if (actually_clone)
-              memcpy(new_buf + clone_to, out_buf + clone_from, clone_len);
-            else
-              memset(new_buf + clone_to,
-                     UR(2) ? UR(256) : out_buf[UR(temp_len)], clone_len);
-
-            /* Tail */
-            memcpy(new_buf + clone_to + clone_len, out_buf + clone_to,
-                   temp_len - clone_to);
-
-            ck_free(out_buf);
-            out_buf = new_buf;
-            temp_len += clone_len;
-
-          }
-
-          break;
-
-        case 14: {
-
-            /* Overwrite bytes with a randomly selected chunk (75%) or fixed
-               bytes (25%). */
-
-            u32 copy_from, copy_to, copy_len;
-
-            if (temp_len < 2) break;
-
-            copy_len  = choose_block_len(temp_len - 1);
-
-            copy_from = UR(temp_len - copy_len + 1);
-            copy_to   = UR(temp_len - copy_len + 1);
-
-            if (UR(4)) {
-
-              if (copy_from != copy_to)
-                memmove(out_buf + copy_to, out_buf + copy_from, copy_len);
-
-            } else memset(out_buf + copy_to,
-                          UR(2) ? UR(256) : out_buf[UR(temp_len)], copy_len);
-
-            break;
-
-          }
-
-        /* Values 15 and 16 can be selected only if there are any extras
-           present in the dictionaries. */
-
-        case 15: {
-
-            /* Overwrite bytes with an extra. */
-
-            if (!extras_cnt || (a_extras_cnt && UR(2))) {
-
-              /* No user-specified extras or odds in our favor. Let's use an
-                 auto-detected one. */
-
-              u32 use_extra = UR(a_extras_cnt);
-              u32 extra_len = a_extras[use_extra].len;
-              u32 insert_at;
-
-              if (extra_len > temp_len) break;
-
-              insert_at = UR(temp_len - extra_len + 1);
-              memcpy(out_buf + insert_at, a_extras[use_extra].data, extra_len);
-
-            } else {
-
-              /* No auto extras or odds in our favor. Use the dictionary. */
-
-              u32 use_extra = UR(extras_cnt);
-              u32 extra_len = extras[use_extra].len;
-              u32 insert_at;
-
-              if (extra_len > temp_len) break;
-
-              insert_at = UR(temp_len - extra_len + 1);
-              memcpy(out_buf + insert_at, extras[use_extra].data, extra_len);
-
-            }
-
-            break;
-
-          }
-
-        case 16: {
-
-            u32 use_extra, extra_len, insert_at = UR(temp_len + 1);
-            u8* new_buf;
-
-            /* Insert an extra. Do the same dice-rolling stuff as for the
-               previous case. */
-
-            if (!extras_cnt || (a_extras_cnt && UR(2))) {
-
-              use_extra = UR(a_extras_cnt);
-              extra_len = a_extras[use_extra].len;
-
-              if (temp_len + extra_len >= MAX_FILE) break;
-
-              new_buf = ck_alloc_nozero(temp_len + extra_len);
-
-              /* Head */
-              memcpy(new_buf, out_buf, insert_at);
-
-              /* Inserted part */
-              memcpy(new_buf + insert_at, a_extras[use_extra].data, extra_len);
-
-	    } else {
-
-              use_extra = UR(extras_cnt);
-              extra_len = extras[use_extra].len;
-
-              if (temp_len + extra_len >= MAX_FILE) break;
-
-              new_buf = ck_alloc_nozero(temp_len + extra_len);
-
-              /* Head */
-              memcpy(new_buf, out_buf, insert_at);
-
-              /* Inserted part */
-              memcpy(new_buf + insert_at, extras[use_extra].data, extra_len);
-
-            }
-
-            /* Tail */
-            memcpy(new_buf + insert_at + extra_len, out_buf + insert_at,
-                   temp_len - insert_at);
-
-            ck_free(out_buf);
-            out_buf   = new_buf;
-            temp_len += extra_len;
-
-            break;
-
-          }
-
-      }
-
-    }
-
-    if (common_fuzz_stuff(argv, out_buf, temp_len))
-      goto abandon_entry;
-
-    /* out_buf might have been mangled a bit, so let's restore it to its
-       original size and shape. */
-
-    if (temp_len < len) out_buf = ck_realloc(out_buf, len);
-    temp_len = len;
-    memcpy(out_buf, in_buf, len);
-
-    /* If we're finding new stuff, let's run for a bit longer, limits
-       permitting. */
-
-    if (queued_paths != havoc_queued) {
-
-      if (perf_score <= havoc_max_mult * 100) {
-        stage_max  *= 2;
-        perf_score *= 2;
-      }
-
-      havoc_queued = queued_paths;
-
-    }
-
-  }
-
-  new_hit_cnt = queued_paths + unique_crashes;
-
-  if (!splice_cycle) {
-    stage_finds[STAGE_HAVOC]  += new_hit_cnt - orig_hit_cnt;
-    stage_cycles[STAGE_HAVOC] += stage_max;
-  } else {
-    stage_finds[STAGE_SPLICE]  += new_hit_cnt - orig_hit_cnt;
-    stage_cycles[STAGE_SPLICE] += stage_max;
-  }
-
-#ifndef IGNORE_FINDS
-
-  /************
-   * SPLICING *
-   ************/
-
-  /* This is a last-resort strategy triggered by a full round with no findings.
-     It takes the current input file, randomly selects another input, and
-     splices them together at some offset, then relies on the havoc
-     code to mutate that blob. */
-
-retry_splicing:
-
-  if (use_splicing && splice_cycle++ < SPLICE_CYCLES &&
-      queued_paths > 1 && queue_cur->len > 1) {
-
-    struct queue_entry* target;
-    u32 tid, split_at;
-    u8* new_buf;
-    s32 f_diff, l_diff;
-
-    /* First of all, if we've modified in_buf for havoc, let's clean that
-       up... */
-
-    if (in_buf != orig_in) {
-      ck_free(in_buf);
-      in_buf = orig_in;
-      len = queue_cur->len;
-    }
-
-    /* Pick a random queue entry and seek to it. Don't splice with yourself. */
-
-    do { tid = UR(queued_paths); } while (tid == current_entry);
-
-    splicing_with = tid;
-    target = queue;
-
-    while (tid >= 100) { target = target->next_100; tid -= 100; }
-    while (tid--) target = target->next;
-
-    /* Make sure that the target has a reasonable length. */
-
-    while (target && (target->len < 2 || target == queue_cur)) {
-      target = target->next;
-      ++splicing_with;
-    }
-
-    if (!target) goto retry_splicing;
-
-    /* Read the testcase into a new buffer. */
-
-    fd = open(target->fname, O_RDONLY);
-
-    if (fd < 0) PFATAL("Unable to open '%s'", target->fname);
-
-    new_buf = ck_alloc_nozero(target->len);
-
-    ck_read(fd, new_buf, target->len, target->fname);
-
-    close(fd);
-
-    /* Find a suitable splicing location, somewhere between the first and
-       the last differing byte. Bail out if the difference is just a single
-       byte or so. */
-
-    locate_diffs(in_buf, new_buf, MIN(len, target->len), &f_diff, &l_diff);
-
-    if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) {
-      ck_free(new_buf);
-      goto retry_splicing;
-    }
-
-    /* Split somewhere between the first and last differing byte. */
-
-    split_at = f_diff + UR(l_diff - f_diff);
-
-    /* Do the thing. */
-
-    len = target->len;
-    memcpy(new_buf, in_buf, split_at);
-    in_buf = new_buf;
-
-    ck_free(out_buf);
-    out_buf = ck_alloc_nozero(len);
-    memcpy(out_buf, in_buf, len);
-
-#ifdef USE_PYTHON
-    goto python_stage;
-#else
-    goto havoc_stage;
-#endif
-
-  }
-
-#endif /* !IGNORE_FINDS */
-
-  ret_val = 0;
-
-abandon_entry:
-
-  splicing_with = -1;
-
-  /* Update pending_not_fuzzed count if we made it through the calibration
-     cycle and have not seen this entry before. */
-
-  if (!stop_soon && !queue_cur->cal_failed && (queue_cur->was_fuzzed == 0 || queue_cur->fuzz_level == 0)) {
-    --pending_not_fuzzed;
-    queue_cur->was_fuzzed = 1;
-    if (queue_cur->favored) --pending_favored;
-  }
-
-  ++queue_cur->fuzz_level;
-
-  munmap(orig_in, queue_cur->len);
-
-  if (in_buf != orig_in) ck_free(in_buf);
-  ck_free(out_buf);
-  ck_free(eff_map);
-
-  return ret_val;
-
-#undef FLIP_BIT
-
-}
-
-/* MOpt mode */
-static u8 pilot_fuzzing(char** argv) {
-
-	s32 len, fd, temp_len, i, j;
-	u8  *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0;
-	u64 havoc_queued, orig_hit_cnt, new_hit_cnt, cur_ms_lv;
-	u32 splice_cycle = 0, perf_score = 100, orig_perf, prev_cksum, eff_cnt = 1;
-
-	u8  ret_val = 1, doing_det = 0;
-
-	u8  a_collect[MAX_AUTO_EXTRA];
-	u32 a_len = 0;
-
-#ifdef IGNORE_FINDS
-
-	/* In IGNORE_FINDS mode, skip any entries that weren't in the
-	   initial data set. */
-
-	if (queue_cur->depth > 1) return 1;
-
-#else
-
-	if (pending_favored) {
-
-		/* If we have any favored, non-fuzzed new arrivals in the queue,
-		   possibly skip to them at the expense of already-fuzzed or non-favored
-		   cases. */
-
-		if ((queue_cur->was_fuzzed || !queue_cur->favored) &&
-			UR(100) < SKIP_TO_NEW_PROB) return 1;
-
-	}
-	else if (!dumb_mode && !queue_cur->favored && queued_paths > 10) {
-
-		/* Otherwise, still possibly skip non-favored cases, albeit less often.
-		   The odds of skipping stuff are higher for already-fuzzed inputs and
-		   lower for never-fuzzed entries. */
-
-		if (queue_cycle > 1 && !queue_cur->was_fuzzed) {
-
-			if (UR(100) < SKIP_NFAV_NEW_PROB) return 1;
-
-		}
-		else {
-
-			if (UR(100) < SKIP_NFAV_OLD_PROB) return 1;
-
-		}
-
-	}
-
-#endif /* ^IGNORE_FINDS */
-
-	if (not_on_tty) {
-		ACTF("Fuzzing test case #%u (%u total, %llu uniq crashes found)...",
-			current_entry, queued_paths, unique_crashes);
-		fflush(stdout);
-	}
-
-	/* Map the test case into memory. */
-
-	fd = open(queue_cur->fname, O_RDONLY);
-
-	if (fd < 0) PFATAL("Unable to open '%s'", queue_cur->fname);
-
-	len = queue_cur->len;
-
-	orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
-
-	if (orig_in == MAP_FAILED) PFATAL("Unable to mmap '%s'", queue_cur->fname);
-
-	close(fd);
-
-	/* We could mmap() out_buf as MAP_PRIVATE, but we end up clobbering every
-	   single byte anyway, so it wouldn't give us any performance or memory usage
-	   benefits. */
-
-	out_buf = ck_alloc_nozero(len);
-
-	subseq_tmouts = 0;
-
-	cur_depth = queue_cur->depth;
-
-	/*******************************************
-	 * CALIBRATION (only if failed earlier on) *
-	 *******************************************/
-
-	if (queue_cur->cal_failed) {
-
-		u8 res = FAULT_TMOUT;
-
-		if (queue_cur->cal_failed < CAL_CHANCES) {
-
-			res = calibrate_case(argv, queue_cur, in_buf, queue_cycle - 1, 0);
-
-			if (res == FAULT_ERROR)
-				FATAL("Unable to execute target application");
-
-		}
-
-		if (stop_soon || res != crash_mode) {
-			++cur_skipped_paths;
-			goto abandon_entry;
-		}
-
-	}
-
-	/************
-	 * TRIMMING *
-	 ************/
-
-	if (!dumb_mode && !queue_cur->trim_done) {
-
-		u8 res = trim_case(argv, queue_cur, in_buf);
-
-		if (res == FAULT_ERROR)
-			FATAL("Unable to execute target application");
-
-		if (stop_soon) {
-			++cur_skipped_paths;
-			goto abandon_entry;
-		}
-
-		/* Don't retry trimming, even if it failed. */
-
-		queue_cur->trim_done = 1;
-
-		if (len != queue_cur->len) len = queue_cur->len;
-
-	}
-
-	memcpy(out_buf, in_buf, len);
-
-	/*********************
-	 * PERFORMANCE SCORE *
-	 *********************/
-
-	orig_perf = perf_score = calculate_score(queue_cur);
-
-	/* Skip right away if -d is given, if we have done deterministic fuzzing on
-	   this entry ourselves (was_fuzzed), or if it has gone through deterministic
-	   testing in earlier, resumed runs (passed_det). */
-
-	if (skip_deterministic || queue_cur->was_fuzzed || queue_cur->passed_det)
-		goto havoc_stage;
-
-	/* Skip deterministic fuzzing if exec path checksum puts this out of scope
-	   for this master instance. */
-
-	if (master_max && (queue_cur->exec_cksum % master_max) != master_id - 1)
-		goto havoc_stage;
-
-
-	cur_ms_lv = get_cur_time();
-	if (!(key_puppet == 0 && ((cur_ms_lv - last_path_time < limit_time_puppet) ||
-		(last_crash_time != 0 && cur_ms_lv - last_crash_time < limit_time_puppet) || last_path_time == 0)))
-	{
-		key_puppet = 1;
-		goto pacemaker_fuzzing;
-	}
-
-	doing_det = 1;
-
-		/*********************************************
-		 * SIMPLE BITFLIP (+dictionary construction) *
-		 *********************************************/
-
-#define FLIP_BIT(_ar, _b) do { \
-    u8* _arf = (u8*)(_ar); \
-    u32 _bf = (_b); \
-    _arf[(_bf) >> 3] ^= (128 >> ((_bf) & 7)); \
-  } while (0)
-
-		 /* Single walking bit. */
-
-		stage_short = "flip1";
-		stage_max = len << 3;
-		stage_name = "bitflip 1/1";
-
-
-
-
-		stage_val_type = STAGE_VAL_NONE;
-
-		orig_hit_cnt = queued_paths + unique_crashes;
-
-		prev_cksum = queue_cur->exec_cksum;
-
-		for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
-
-			stage_cur_byte = stage_cur >> 3;
-
-			FLIP_BIT(out_buf, stage_cur);
-
-			if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-
-			FLIP_BIT(out_buf, stage_cur);
-
-			/* While flipping the least significant bit in every byte, pull of an extra
-			   trick to detect possible syntax tokens. In essence, the idea is that if
-			   you have a binary blob like this:
-
-			   xxxxxxxxIHDRxxxxxxxx
-
-			   ...and changing the leading and trailing bytes causes variable or no
-			   changes in program flow, but touching any character in the "IHDR" string
-			   always produces the same, distinctive path, it's highly likely that
-			   "IHDR" is an atomically-checked magic value of special significance to
-			   the fuzzed format.
-
-			   We do this here, rather than as a separate stage, because it's a nice
-			   way to keep the operation approximately "free" (i.e., no extra execs).
-
-			   Empirically, performing the check when flipping the least significant bit
-			   is advantageous, compared to doing it at the time of more disruptive
-			   changes, where the program flow may be affected in more violent ways.
-
-			   The caveat is that we won't generate dictionaries in the -d mode or -S
-			   mode - but that's probably a fair trade-off.
-
-			   This won't work particularly well with paths that exhibit variable
-			   behavior, but fails gracefully, so we'll carry out the checks anyway.
-
-			  */
-
-			if (!dumb_mode && (stage_cur & 7) == 7) {
-
-				u32 cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
-
-				if (stage_cur == stage_max - 1 && cksum == prev_cksum) {
-
-					/* If at end of file and we are still collecting a string, grab the
-					   final character and force output. */
-
-					if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3];
-					++a_len;
-
-					if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA)
-						maybe_add_auto(a_collect, a_len);
-
-				}
-				else if (cksum != prev_cksum) {
-
-					/* Otherwise, if the checksum has changed, see if we have something
-					   worthwhile queued up, and collect that if the answer is yes. */
-
-					if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA)
-						maybe_add_auto(a_collect, a_len);
-
-					a_len = 0;
-					prev_cksum = cksum;
-
-				}
-
-				/* Continue collecting string, but only if the bit flip actually made
-				   any difference - we don't want no-op tokens. */
-
-				if (cksum != queue_cur->exec_cksum) {
-
-					if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3];
-					++a_len;
-
-				}
-
-			}
-
-		}
-
-		new_hit_cnt = queued_paths + unique_crashes;
-
-		stage_finds[STAGE_FLIP1] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_FLIP1] += stage_max;
-
-		/* Two walking bits. */
-
-		stage_name = "bitflip 2/1";
-		stage_short = "flip2";
-		stage_max = (len << 3) - 1;
-
-		orig_hit_cnt = new_hit_cnt;
-
-		for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
-
-			stage_cur_byte = stage_cur >> 3;
-
-			FLIP_BIT(out_buf, stage_cur);
-			FLIP_BIT(out_buf, stage_cur + 1);
-
-			if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-
-			FLIP_BIT(out_buf, stage_cur);
-			FLIP_BIT(out_buf, stage_cur + 1);
-
-		}
-
-		new_hit_cnt = queued_paths + unique_crashes;
-
-		stage_finds[STAGE_FLIP2] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_FLIP2] += stage_max;
-
-
-
-		/* Four walking bits. */
-
-		stage_name = "bitflip 4/1";
-		stage_short = "flip4";
-		stage_max = (len << 3) - 3;
-
-
-
-
-
-		orig_hit_cnt = new_hit_cnt;
-
-		for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
-
-			stage_cur_byte = stage_cur >> 3;
-
-			FLIP_BIT(out_buf, stage_cur);
-			FLIP_BIT(out_buf, stage_cur + 1);
-			FLIP_BIT(out_buf, stage_cur + 2);
-			FLIP_BIT(out_buf, stage_cur + 3);
-
-			if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-
-			FLIP_BIT(out_buf, stage_cur);
-			FLIP_BIT(out_buf, stage_cur + 1);
-			FLIP_BIT(out_buf, stage_cur + 2);
-			FLIP_BIT(out_buf, stage_cur + 3);
-
-		}
-
-		new_hit_cnt = queued_paths + unique_crashes;
-
-		stage_finds[STAGE_FLIP4] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_FLIP4] += stage_max;
-
-
-
-
-		/* Effector map setup. These macros calculate:
-
-		   EFF_APOS      - position of a particular file offset in the map.
-		   EFF_ALEN      - length of a map with a particular number of bytes.
-		   EFF_SPAN_ALEN - map span for a sequence of bytes.
-
-		 */
-
-#define EFF_APOS(_p)          ((_p) >> EFF_MAP_SCALE2)
-#define EFF_REM(_x)           ((_x) & ((1 << EFF_MAP_SCALE2) - 1))
-#define EFF_ALEN(_l)          (EFF_APOS(_l) + !!EFF_REM(_l))
-#define EFF_SPAN_ALEN(_p, _l) (EFF_APOS((_p) + (_l) - 1) - EFF_APOS(_p) + 1)
-
-		 /* Initialize effector map for the next step (see comments below). Always
-			flag first and last byte as doing something. */
-
-		eff_map = ck_alloc(EFF_ALEN(len));
-		eff_map[0] = 1;
-
-		if (EFF_APOS(len - 1) != 0) {
-			eff_map[EFF_APOS(len - 1)] = 1;
-			++eff_cnt;
-		}
-
-		/* Walking byte. */
-
-		stage_name = "bitflip 8/8";
-		stage_short = "flip8";
-		stage_max = len;
-
-
-
-		orig_hit_cnt = new_hit_cnt;
-
-		for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
-
-			stage_cur_byte = stage_cur;
-
-			out_buf[stage_cur] ^= 0xFF;
-
-			if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-
-			/* We also use this stage to pull off a simple trick: we identify
-			   bytes that seem to have no effect on the current execution path
-			   even when fully flipped - and we skip them during more expensive
-			   deterministic stages, such as arithmetics or known ints. */
-
-			if (!eff_map[EFF_APOS(stage_cur)]) {
-
-				u32 cksum;
-
-				/* If in dumb mode or if the file is very short, just flag everything
-				   without wasting time on checksums. */
-
-				if (!dumb_mode && len >= EFF_MIN_LEN)
-					cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
-				else
-					cksum = ~queue_cur->exec_cksum;
-
-				if (cksum != queue_cur->exec_cksum) {
-					eff_map[EFF_APOS(stage_cur)] = 1;
-					++eff_cnt;
-				}
-
-			}
-
-			out_buf[stage_cur] ^= 0xFF;
-
-		}
-
-		/* If the effector map is more than EFF_MAX_PERC dense, just flag the
-		   whole thing as worth fuzzing, since we wouldn't be saving much time
-		   anyway. */
-
-		if (eff_cnt != EFF_ALEN(len) &&
-			eff_cnt * 100 / EFF_ALEN(len) > EFF_MAX_PERC) {
-
-			memset(eff_map, 1, EFF_ALEN(len));
-
-			blocks_eff_select += EFF_ALEN(len);
-
-		}
-		else {
-
-			blocks_eff_select += eff_cnt;
-
-		}
-
-		blocks_eff_total += EFF_ALEN(len);
-
-		new_hit_cnt = queued_paths + unique_crashes;
-
-		stage_finds[STAGE_FLIP8] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_FLIP8] += stage_max;
-
-
-
-
-
-		/* Two walking bytes. */
-
-		if (len < 2) goto skip_bitflip;
-
-		stage_name = "bitflip 16/8";
-		stage_short = "flip16";
-		stage_cur = 0;
-		stage_max = len - 1;
-
-
-
-		orig_hit_cnt = new_hit_cnt;
-
-		for (i = 0; i < len - 1; ++i) {
-
-			/* Let's consult the effector map... */
-
-			if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
-				--stage_max;
-				continue;
-			}
-
-			stage_cur_byte = i;
-
-			*(u16*)(out_buf + i) ^= 0xFFFF;
-
-			if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-			++stage_cur;
-
-			*(u16*)(out_buf + i) ^= 0xFFFF;
-
-
-		}
-
-		new_hit_cnt = queued_paths + unique_crashes;
-
-		stage_finds[STAGE_FLIP16] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_FLIP16] += stage_max;
-
-
-
-
-		if (len < 4) goto skip_bitflip;
-
-		/* Four walking bytes. */
-
-		stage_name = "bitflip 32/8";
-		stage_short = "flip32";
-		stage_cur = 0;
-		stage_max = len - 3;
-
-
-
-		orig_hit_cnt = new_hit_cnt;
-
-		for (i = 0; i < len - 3; ++i) {
-
-			/* Let's consult the effector map... */
-			if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
-				!eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
-				--stage_max;
-				continue;
-			}
-
-			stage_cur_byte = i;
-
-			*(u32*)(out_buf + i) ^= 0xFFFFFFFF;
-
-			if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-			++stage_cur;
-
-			*(u32*)(out_buf + i) ^= 0xFFFFFFFF;
-
-		}
-
-		new_hit_cnt = queued_paths + unique_crashes;
-
-		stage_finds[STAGE_FLIP32] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_FLIP32] += stage_max;
-
-
-
-
-
-
-	skip_bitflip:
-
-		if (no_arith) goto skip_arith;
-
-		/**********************
-		 * ARITHMETIC INC/DEC *
-		 **********************/
-
-		 /* 8-bit arithmetics. */
-
-		stage_name = "arith 8/8";
-		stage_short = "arith8";
-		stage_cur = 0;
-		stage_max = 2 * len * ARITH_MAX;
-
-
-
-
-		stage_val_type = STAGE_VAL_LE;
-
-		orig_hit_cnt = new_hit_cnt;
-
-		for (i = 0; i < len; ++i) {
-
-			u8 orig = out_buf[i];
-
-			/* Let's consult the effector map... */
-
-			if (!eff_map[EFF_APOS(i)]) {
-				stage_max -= 2 * ARITH_MAX;
-				continue;
-			}
-
-			stage_cur_byte = i;
-
-			for (j = 1; j <= ARITH_MAX; ++j) {
-
-				u8 r = orig ^ (orig + j);
-
-				/* Do arithmetic operations only if the result couldn't be a product
-				   of a bitflip. */
-
-				if (!could_be_bitflip(r)) {
-
-					stage_cur_val = j;
-					out_buf[i] = orig + j;
-
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
-
-				} else --stage_max;
-
-				r = orig ^ (orig - j);
-
-				if (!could_be_bitflip(r)) {
-
-					stage_cur_val = -j;
-					out_buf[i] = orig - j;
-
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
-
-				} else --stage_max;
-
-				out_buf[i] = orig;
-
-			}
-
-		}
-
-		new_hit_cnt = queued_paths + unique_crashes;
-
-		stage_finds[STAGE_ARITH8] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_ARITH8] += stage_max;
-
-
-
-
-
-		/* 16-bit arithmetics, both endians. */
-
-		if (len < 2) goto skip_arith;
-
-		stage_name = "arith 16/8";
-		stage_short = "arith16";
-		stage_cur = 0;
-		stage_max = 4 * (len - 1) * ARITH_MAX;
-
-
-
-
-		orig_hit_cnt = new_hit_cnt;
-
-		for (i = 0; i < len - 1; ++i) {
-
-			u16 orig = *(u16*)(out_buf + i);
-
-			/* Let's consult the effector map... */
-
-			if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
-				stage_max -= 4 * ARITH_MAX;
-				continue;
-			}
-
-			stage_cur_byte = i;
-
-			for (j = 1; j <= ARITH_MAX; ++j) {
-
-				u16 r1 = orig ^ (orig + j),
-					r2 = orig ^ (orig - j),
-					r3 = orig ^ SWAP16(SWAP16(orig) + j),
-					r4 = orig ^ SWAP16(SWAP16(orig) - j);
-
-				/* Try little endian addition and subtraction first. Do it only
-				   if the operation would affect more than one byte (hence the
-				   & 0xff overflow checks) and if it couldn't be a product of
-				   a bitflip. */
-
-				stage_val_type = STAGE_VAL_LE;
-
-				if ((orig & 0xff) + j > 0xff && !could_be_bitflip(r1)) {
-
-					stage_cur_val = j;
-					*(u16*)(out_buf + i) = orig + j;
-
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
-
-				} else --stage_max;
-
-				if ((orig & 0xff) < j && !could_be_bitflip(r2)) {
-
-					stage_cur_val = -j;
-					*(u16*)(out_buf + i) = orig - j;
-
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
-
-				} else --stage_max;
-
-				/* Big endian comes next. Same deal. */
-
-				stage_val_type = STAGE_VAL_BE;
-
-
-				if ((orig >> 8) + j > 0xff && !could_be_bitflip(r3)) {
-
-					stage_cur_val = j;
-					*(u16*)(out_buf + i) = SWAP16(SWAP16(orig) + j);
-
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
-
-				} else --stage_max;
-
-				if ((orig >> 8) < j && !could_be_bitflip(r4)) {
-
-					stage_cur_val = -j;
-					*(u16*)(out_buf + i) = SWAP16(SWAP16(orig) - j);
-
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
-
-				} else --stage_max;
-
-				*(u16*)(out_buf + i) = orig;
-
-			}
-
-		}
-
-		new_hit_cnt = queued_paths + unique_crashes;
-
-		stage_finds[STAGE_ARITH16] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_ARITH16] += stage_max;
-
-
-
-
-		/* 32-bit arithmetics, both endians. */
-
-		if (len < 4) goto skip_arith;
-
-		stage_name = "arith 32/8";
-		stage_short = "arith32";
-		stage_cur = 0;
-		stage_max = 4 * (len - 3) * ARITH_MAX;
-
-
-
-		orig_hit_cnt = new_hit_cnt;
-
-		for (i = 0; i < len - 3; ++i) {
-
-			u32 orig = *(u32*)(out_buf + i);
-
-			/* Let's consult the effector map... */
-
-			if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
-				!eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
-				stage_max -= 4 * ARITH_MAX;
-				continue;
-			}
-
-			stage_cur_byte = i;
-
-			for (j = 1; j <= ARITH_MAX; ++j) {
-
-				u32 r1 = orig ^ (orig + j),
-					r2 = orig ^ (orig - j),
-					r3 = orig ^ SWAP32(SWAP32(orig) + j),
-					r4 = orig ^ SWAP32(SWAP32(orig) - j);
-
-				/* Little endian first. Same deal as with 16-bit: we only want to
-				   try if the operation would have effect on more than two bytes. */
-
-				stage_val_type = STAGE_VAL_LE;
-
-				if ((orig & 0xffff) + j > 0xffff && !could_be_bitflip(r1)) {
-
-					stage_cur_val = j;
-					*(u32*)(out_buf + i) = orig + j;
-
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
-
-				} else --stage_max;
-
-				if ((orig & 0xffff) < j && !could_be_bitflip(r2)) {
-
-					stage_cur_val = -j;
-					*(u32*)(out_buf + i) = orig - j;
-
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					stage_cur++;
-
-				} else --stage_max;
-
-				/* Big endian next. */
-
-				stage_val_type = STAGE_VAL_BE;
-
-				if ((SWAP32(orig) & 0xffff) + j > 0xffff && !could_be_bitflip(r3)) {
-
-					stage_cur_val = j;
-					*(u32*)(out_buf + i) = SWAP32(SWAP32(orig) + j);
-
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
-
-				} else --stage_max;
-
-				if ((SWAP32(orig) & 0xffff) < j && !could_be_bitflip(r4)) {
-
-					stage_cur_val = -j;
-					*(u32*)(out_buf + i) = SWAP32(SWAP32(orig) - j);
-
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
-
-				} else --stage_max;
-
-				*(u32*)(out_buf + i) = orig;
-
-			}
-
-		}
-
-		new_hit_cnt = queued_paths + unique_crashes;
-
-		stage_finds[STAGE_ARITH32] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_ARITH32] += stage_max;
-
-
-
-
-	skip_arith:
-
-		/**********************
-		 * INTERESTING VALUES *
-		 **********************/
-
-		stage_name = "interest 8/8";
-		stage_short = "int8";
-		stage_cur = 0;
-		stage_max = len * sizeof(interesting_8);
-
-
-
-		stage_val_type = STAGE_VAL_LE;
-
-		orig_hit_cnt = new_hit_cnt;
-
-		/* Setting 8-bit integers. */
-
-		for (i = 0; i < len; ++i) {
-
-			u8 orig = out_buf[i];
-
-			/* Let's consult the effector map... */
-
-			if (!eff_map[EFF_APOS(i)]) {
-				stage_max -= sizeof(interesting_8);
-				continue;
-			}
-
-			stage_cur_byte = i;
-
-			for (j = 0; j < sizeof(interesting_8); ++j) {
-
-				/* Skip if the value could be a product of bitflips or arithmetics. */
-
-				if (could_be_bitflip(orig ^ (u8)interesting_8[j]) ||
-					could_be_arith(orig, (u8)interesting_8[j], 1)) {
-					--stage_max;
-					continue;
-				}
-
-				stage_cur_val = interesting_8[j];
-				out_buf[i] = interesting_8[j];
-
-				if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-
-				out_buf[i] = orig;
-				++stage_cur;
-
-			}
-
-		}
-
-		new_hit_cnt = queued_paths + unique_crashes;
-
-		stage_finds[STAGE_INTEREST8] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_INTEREST8] += stage_max;
-
-
-
-
-		/* Setting 16-bit integers, both endians. */
-
-		if (no_arith || len < 2) goto skip_interest;
-
-		stage_name = "interest 16/8";
-		stage_short = "int16";
-		stage_cur = 0;
-		stage_max = 2 * (len - 1) * (sizeof(interesting_16) >> 1);
-
-
-
-		orig_hit_cnt = new_hit_cnt;
-
-		for (i = 0; i < len - 1; ++i) {
-
-			u16 orig = *(u16*)(out_buf + i);
-
-			/* Let's consult the effector map... */
-
-			if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
-				stage_max -= sizeof(interesting_16);
-				continue;
-			}
-
-			stage_cur_byte = i;
-
-			for (j = 0; j < sizeof(interesting_16) / 2; ++j) {
-
-				stage_cur_val = interesting_16[j];
-
-				/* Skip if this could be a product of a bitflip, arithmetics,
-				   or single-byte interesting value insertion. */
-
-				if (!could_be_bitflip(orig ^ (u16)interesting_16[j]) &&
-					!could_be_arith(orig, (u16)interesting_16[j], 2) &&
-					!could_be_interest(orig, (u16)interesting_16[j], 2, 0)) {
-
-					stage_val_type = STAGE_VAL_LE;
-
-					*(u16*)(out_buf + i) = interesting_16[j];
-
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
-
-				} else --stage_max;
-
-				if ((u16)interesting_16[j] != SWAP16(interesting_16[j]) &&
-					!could_be_bitflip(orig ^ SWAP16(interesting_16[j])) &&
-					!could_be_arith(orig, SWAP16(interesting_16[j]), 2) &&
-					!could_be_interest(orig, SWAP16(interesting_16[j]), 2, 1)) {
-
-					stage_val_type = STAGE_VAL_BE;
-
-					*(u16*)(out_buf + i) = SWAP16(interesting_16[j]);
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
-
-				} else --stage_max;
-
-			}
-
-			*(u16*)(out_buf + i) = orig;
-
-		}
-
-		new_hit_cnt = queued_paths + unique_crashes;
-
-		stage_finds[STAGE_INTEREST16] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_INTEREST16] += stage_max;
-
-
-
-
-
-		if (len < 4) goto skip_interest;
-
-		/* Setting 32-bit integers, both endians. */
-
-		stage_name = "interest 32/8";
-		stage_short = "int32";
-		stage_cur = 0;
-		stage_max = 2 * (len - 3) * (sizeof(interesting_32) >> 2);
-
-
-		orig_hit_cnt = new_hit_cnt;
-
-		for (i = 0; i < len - 3; ++i) {
-
-			u32 orig = *(u32*)(out_buf + i);
-
-			/* Let's consult the effector map... */
-
-			if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
-				!eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
-				stage_max -= sizeof(interesting_32) >> 1;
-				continue;
-			}
-
-			stage_cur_byte = i;
-
-			for (j = 0; j < sizeof(interesting_32) / 4; ++j) {
-
-				stage_cur_val = interesting_32[j];
-
-				/* Skip if this could be a product of a bitflip, arithmetics,
-				   or word interesting value insertion. */
-
-				if (!could_be_bitflip(orig ^ (u32)interesting_32[j]) &&
-					!could_be_arith(orig, interesting_32[j], 4) &&
-					!could_be_interest(orig, interesting_32[j], 4, 0)) {
-
-					stage_val_type = STAGE_VAL_LE;
-
-					*(u32*)(out_buf + i) = interesting_32[j];
-
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
-
-				} else --stage_max;
-
-				if ((u32)interesting_32[j] != SWAP32(interesting_32[j]) &&
-					!could_be_bitflip(orig ^ SWAP32(interesting_32[j])) &&
-					!could_be_arith(orig, SWAP32(interesting_32[j]), 4) &&
-					!could_be_interest(orig, SWAP32(interesting_32[j]), 4, 1)) {
-
-					stage_val_type = STAGE_VAL_BE;
-
-					*(u32*)(out_buf + i) = SWAP32(interesting_32[j]);
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
-
-				} else --stage_max;
-
-			}
-
-			*(u32*)(out_buf + i) = orig;
-
-		}
-
-		new_hit_cnt = queued_paths + unique_crashes;
-
-		stage_finds[STAGE_INTEREST32] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_INTEREST32] += stage_max;
-
-
-
-
-
-	skip_interest:
-
-		/********************
-		 * DICTIONARY STUFF *
-		 ********************/
-
-		if (!extras_cnt) goto skip_user_extras;
-
-		/* Overwrite with user-supplied extras. */
-
-		stage_name = "user extras (over)";
-		stage_short = "ext_UO";
-		stage_cur = 0;
-		stage_max = extras_cnt * len;
-
-
-
-
-		stage_val_type = STAGE_VAL_NONE;
-
-		orig_hit_cnt = new_hit_cnt;
-
-		for (i = 0; i < len; ++i) {
-
-			u32 last_len = 0;
-
-			stage_cur_byte = i;
-
-			/* Extras are sorted by size, from smallest to largest. This means
-			   that we don't have to worry about restoring the buffer in
-			   between writes at a particular offset determined by the outer
-			   loop. */
-
-			for (j = 0; j < extras_cnt; ++j) {
-
-				/* Skip extras probabilistically if extras_cnt > MAX_DET_EXTRAS. Also
-				   skip them if there's no room to insert the payload, if the token
-				   is redundant, or if its entire span has no bytes set in the effector
-				   map. */
-
-				if ((extras_cnt > MAX_DET_EXTRAS && UR(extras_cnt) >= MAX_DET_EXTRAS) ||
-					extras[j].len > len - i ||
-					!memcmp(extras[j].data, out_buf + i, extras[j].len) ||
-					!memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, extras[j].len))) {
-
-					--stage_max;
-					continue;
-
-				}
-
-				last_len = extras[j].len;
-				memcpy(out_buf + i, extras[j].data, last_len);
-
-				if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-
-				++stage_cur;
-
-			}
-
-			/* Restore all the clobbered memory. */
-			memcpy(out_buf + i, in_buf + i, last_len);
-
-		}
-
-		new_hit_cnt = queued_paths + unique_crashes;
-
-		stage_finds[STAGE_EXTRAS_UO] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_EXTRAS_UO] += stage_max;
-
-		/* Insertion of user-supplied extras. */
-
-		stage_name = "user extras (insert)";
-		stage_short = "ext_UI";
-		stage_cur = 0;
-		stage_max = extras_cnt * len;
-
-
-
-
-		orig_hit_cnt = new_hit_cnt;
-
-		ex_tmp = ck_alloc(len + MAX_DICT_FILE);
-
-		for (i = 0; i <= len; ++i) {
-
-			stage_cur_byte = i;
-
-			for (j = 0; j < extras_cnt; ++j) {
-
-				if (len + extras[j].len > MAX_FILE) {
-					--stage_max;
-					continue;
-				}
-
-				/* Insert token */
-				memcpy(ex_tmp + i, extras[j].data, extras[j].len);
-
-				/* Copy tail */
-				memcpy(ex_tmp + i + extras[j].len, out_buf + i, len - i);
-
-				if (common_fuzz_stuff(argv, ex_tmp, len + extras[j].len)) {
-					ck_free(ex_tmp);
-					goto abandon_entry;
-				}
-
-				++stage_cur;
-
-			}
-
-			/* Copy head */
-			ex_tmp[i] = out_buf[i];
-
-		}
-
-		ck_free(ex_tmp);
-
-		new_hit_cnt = queued_paths + unique_crashes;
-
-		stage_finds[STAGE_EXTRAS_UI] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_EXTRAS_UI] += stage_max;
-
-	skip_user_extras:
-
-		if (!a_extras_cnt) goto skip_extras;
-
-		stage_name = "auto extras (over)";
-		stage_short = "ext_AO";
-		stage_cur = 0;
-		stage_max = MIN(a_extras_cnt, USE_AUTO_EXTRAS) * len;
-
-
-		stage_val_type = STAGE_VAL_NONE;
-
-		orig_hit_cnt = new_hit_cnt;
-
-		for (i = 0; i < len; ++i) {
-
-			u32 last_len = 0;
-
-			stage_cur_byte = i;
-
-			for (j = 0; j < MIN(a_extras_cnt, USE_AUTO_EXTRAS); ++j) {
-
-				/* See the comment in the earlier code; extras are sorted by size. */
-
-				if (a_extras[j].len > len - i ||
-					!memcmp(a_extras[j].data, out_buf + i, a_extras[j].len) ||
-					!memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, a_extras[j].len))) {
-
-					--stage_max;
-					continue;
-
-				}
-
-				last_len = a_extras[j].len;
-				memcpy(out_buf + i, a_extras[j].data, last_len);
-
-				if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-
-				++stage_cur;
-
-			}
-
-			/* Restore all the clobbered memory. */
-			memcpy(out_buf + i, in_buf + i, last_len);
-
-		}
-
-		new_hit_cnt = queued_paths + unique_crashes;
-
-		stage_finds[STAGE_EXTRAS_AO] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_EXTRAS_AO] += stage_max;
-
-	skip_extras:
-
-		/* If we made this to here without jumping to havoc_stage or abandon_entry,
-		   we're properly done with deterministic steps and can mark it as such
-		   in the .state/ directory. */
-
-		if (!queue_cur->passed_det) mark_as_det_done(queue_cur);
-
-		/****************
-		 * RANDOM HAVOC *
-		 ****************/
-
-	havoc_stage:
-	pacemaker_fuzzing:
-
-
-		stage_cur_byte = -1;
-
-		/* The havoc stage mutation code is also invoked when splicing files; if the
-		   splice_cycle variable is set, generate different descriptions and such. */
-
-		if (!splice_cycle) {
-
-			stage_name = "MOpt-havoc";
-			stage_short = "MOpt_havoc";
-			stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) *
-				perf_score / havoc_div / 100;
-
-		}
-		else {
-
-			static u8 tmp[32];
-
-			perf_score = orig_perf;
-
-			sprintf(tmp, "MOpt-splice %u", splice_cycle);
-			stage_name = tmp;
-			stage_short = "MOpt_splice";
-			stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100;
-
-		}
-
-		s32 temp_len_puppet;
-		cur_ms_lv = get_cur_time();
-
-		{
-
-
-			if (key_puppet == 1)
-			{
-				if (unlikely(orig_hit_cnt_puppet == 0))
-				{
-					orig_hit_cnt_puppet = queued_paths + unique_crashes;
-					last_limit_time_start = get_cur_time();
-					SPLICE_CYCLES_puppet = (UR(SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) + SPLICE_CYCLES_puppet_low);
-				}
-			}
-
-
-			{
-			havoc_stage_puppet:
-
-				stage_cur_byte = -1;
-
-				/* The havoc stage mutation code is also invoked when splicing files; if the
-				   splice_cycle variable is set, generate different descriptions and such. */
-
-				if (!splice_cycle) {
-
-					stage_name = "MOpt avoc";
-					stage_short = "MOpt_havoc";
-					stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) *
-						perf_score / havoc_div / 100;
-
-				}
-				else {
-					static u8 tmp[32];
-					perf_score = orig_perf;
-					sprintf(tmp, "MOpt splice %u", splice_cycle);
-					stage_name = tmp;
-					stage_short = "MOpt_splice";
-					stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100;
-				}
-
-
-
-				if (stage_max < HAVOC_MIN) stage_max = HAVOC_MIN;
-
-				temp_len = len;
-
-				orig_hit_cnt = queued_paths + unique_crashes;
-
-				havoc_queued = queued_paths;
-
-
-
-				for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
-
-					u32 use_stacking = 1 << (1 + UR(HAVOC_STACK_POW2));
-
-					stage_cur_val = use_stacking;
-
-
-					for (i = 0; i < operator_num; ++i)
-					{
-						stage_cycles_puppet_v3[swarm_now][i] = stage_cycles_puppet_v2[swarm_now][i];
-					}
-
-
-					for (i = 0; i < use_stacking; ++i) {
-
-						switch (select_algorithm()) {
-
-						case 0:
-							/* Flip a single bit somewhere. Spooky! */
-							FLIP_BIT(out_buf, UR(temp_len << 3));
-							stage_cycles_puppet_v2[swarm_now][STAGE_FLIP1] += 1;
-							break;
-
-
-						case 1:
-							if (temp_len < 2) break;
-							temp_len_puppet = UR(temp_len << 3);
-							FLIP_BIT(out_buf, temp_len_puppet);
-							FLIP_BIT(out_buf, temp_len_puppet + 1);
-							stage_cycles_puppet_v2[swarm_now][STAGE_FLIP2] += 1;
-							break;
-
-						case 2:
-							if (temp_len < 2) break;
-							temp_len_puppet = UR(temp_len << 3);
-							FLIP_BIT(out_buf, temp_len_puppet);
-							FLIP_BIT(out_buf, temp_len_puppet + 1);
-							FLIP_BIT(out_buf, temp_len_puppet + 2);
-							FLIP_BIT(out_buf, temp_len_puppet + 3);
-							stage_cycles_puppet_v2[swarm_now][STAGE_FLIP4] += 1;
-							break;
-
-						case 3:
-							if (temp_len < 4) break;
-							out_buf[UR(temp_len)] ^= 0xFF;
-							stage_cycles_puppet_v2[swarm_now][STAGE_FLIP8] += 1;
-							break;
-
-						case 4:
-							if (temp_len < 8) break;
-							*(u16*)(out_buf + UR(temp_len - 1)) ^= 0xFFFF;
-							stage_cycles_puppet_v2[swarm_now][STAGE_FLIP16] += 1;
-							break;
-
-						case 5:
-							if (temp_len < 8) break;
-							*(u32*)(out_buf + UR(temp_len - 3)) ^= 0xFFFFFFFF;
-							stage_cycles_puppet_v2[swarm_now][STAGE_FLIP32] += 1;
-							break;
-
-						case 6:
-							out_buf[UR(temp_len)] -= 1 + UR(ARITH_MAX);
-							out_buf[UR(temp_len)] += 1 + UR(ARITH_MAX);
-							stage_cycles_puppet_v2[swarm_now][STAGE_ARITH8] += 1;
-							break;
-
-						case 7:
-							/* Randomly subtract from word, random endian. */
-							if (temp_len < 8) break;
-							if (UR(2)) {
-								u32 pos = UR(temp_len - 1);
-								*(u16*)(out_buf + pos) -= 1 + UR(ARITH_MAX);
-							}
-							else {
-								u32 pos = UR(temp_len - 1);
-								u16 num = 1 + UR(ARITH_MAX);
-								*(u16*)(out_buf + pos) =
-									SWAP16(SWAP16(*(u16*)(out_buf + pos)) - num);
-							}
-							/* Randomly add to word, random endian. */
-							if (UR(2)) {
-								u32 pos = UR(temp_len - 1);
-								*(u16*)(out_buf + pos) += 1 + UR(ARITH_MAX);
-							}
-							else {
-								u32 pos = UR(temp_len - 1);
-								u16 num = 1 + UR(ARITH_MAX);
-								*(u16*)(out_buf + pos) =
-									SWAP16(SWAP16(*(u16*)(out_buf + pos)) + num);
-							}
-							stage_cycles_puppet_v2[swarm_now][STAGE_ARITH16] += 1;
-							break;
-
-
-						case 8:
-							/* Randomly subtract from dword, random endian. */
-							if (temp_len < 8) break;
-							if (UR(2)) {
-								u32 pos = UR(temp_len - 3);
-								*(u32*)(out_buf + pos) -= 1 + UR(ARITH_MAX);
-							}
-							else {
-								u32 pos = UR(temp_len - 3);
-								u32 num = 1 + UR(ARITH_MAX);
-								*(u32*)(out_buf + pos) =
-									SWAP32(SWAP32(*(u32*)(out_buf + pos)) - num);
-							}
-							/* Randomly add to dword, random endian. */
-							//if (temp_len < 4) break;
-							if (UR(2)) {
-								u32 pos = UR(temp_len - 3);
-								*(u32*)(out_buf + pos) += 1 + UR(ARITH_MAX);
-							}
-							else {
-								u32 pos = UR(temp_len - 3);
-								u32 num = 1 + UR(ARITH_MAX);
-								*(u32*)(out_buf + pos) =
-									SWAP32(SWAP32(*(u32*)(out_buf + pos)) + num);
-							}
-							stage_cycles_puppet_v2[swarm_now][STAGE_ARITH32] += 1;
-							break;
-
-
-						case 9:
-							/* Set byte to interesting value. */
-							if (temp_len < 4) break;
-							out_buf[UR(temp_len)] = interesting_8[UR(sizeof(interesting_8))];
-							stage_cycles_puppet_v2[swarm_now][STAGE_INTEREST8] += 1;
-							break;
-
-						case 10:
-							/* Set word to interesting value, randomly choosing endian. */
-							if (temp_len < 8) break;
-							if (UR(2)) {
-								*(u16*)(out_buf + UR(temp_len - 1)) =
-									interesting_16[UR(sizeof(interesting_16) >> 1)];
-							}
-							else {
-								*(u16*)(out_buf + UR(temp_len - 1)) = SWAP16(
-									interesting_16[UR(sizeof(interesting_16) >> 1)]);
-							}
-							stage_cycles_puppet_v2[swarm_now][STAGE_INTEREST16] += 1;
-							break;
-
-
-						case 11:
-							/* Set dword to interesting value, randomly choosing endian. */
-
-							if (temp_len < 8) break;
-
-							if (UR(2)) {
-								*(u32*)(out_buf + UR(temp_len - 3)) =
-									interesting_32[UR(sizeof(interesting_32) >> 2)];
-							}
-							else {
-								*(u32*)(out_buf + UR(temp_len - 3)) = SWAP32(
-									interesting_32[UR(sizeof(interesting_32) >> 2)]);
-							}
-							stage_cycles_puppet_v2[swarm_now][STAGE_INTEREST32] += 1;
-							break;
-
-
-						case 12:
-
-							/* Just set a random byte to a random value. Because,
-							   why not. We use XOR with 1-255 to eliminate the
-							   possibility of a no-op. */
-
-							out_buf[UR(temp_len)] ^= 1 + UR(255);
-							stage_cycles_puppet_v2[swarm_now][STAGE_RANDOMBYTE] += 1;
-							break;
-
-
-
-						case 13: {
-
-							/* Delete bytes. We're making this a bit more likely
-							   than insertion (the next option) in hopes of keeping
-							   files reasonably small. */
-
-							u32 del_from, del_len;
-
-							if (temp_len < 2) break;
-
-							/* Don't delete too much. */
-
-							del_len = choose_block_len(temp_len - 1);
-
-							del_from = UR(temp_len - del_len + 1);
-
-							memmove(out_buf + del_from, out_buf + del_from + del_len,
-								temp_len - del_from - del_len);
-
-							temp_len -= del_len;
-							stage_cycles_puppet_v2[swarm_now][STAGE_DELETEBYTE] += 1;
-							break;
-
-						}
-
-						case 14:
-
-							if (temp_len + HAVOC_BLK_XL < MAX_FILE) {
-
-								/* Clone bytes (75%) or insert a block of constant bytes (25%). */
-
-								u8  actually_clone = UR(4);
-								u32 clone_from, clone_to, clone_len;
-								u8* new_buf;
-
-								if (actually_clone) {
-
-									clone_len = choose_block_len(temp_len);
-									clone_from = UR(temp_len - clone_len + 1);
-
-								}
-								else {
-
-									clone_len = choose_block_len(HAVOC_BLK_XL);
-									clone_from = 0;
-
-								}
-
-								clone_to = UR(temp_len);
-
-								new_buf = ck_alloc_nozero(temp_len + clone_len);
-
-								/* Head */
-
-								memcpy(new_buf, out_buf, clone_to);
-
-								/* Inserted part */
-
-								if (actually_clone)
-									memcpy(new_buf + clone_to, out_buf + clone_from, clone_len);
-								else
-									memset(new_buf + clone_to,
-										UR(2) ? UR(256) : out_buf[UR(temp_len)], clone_len);
-
-								/* Tail */
-								memcpy(new_buf + clone_to + clone_len, out_buf + clone_to,
-									temp_len - clone_to);
-
-								ck_free(out_buf);
-								out_buf = new_buf;
-								temp_len += clone_len;
-								stage_cycles_puppet_v2[swarm_now][STAGE_Clone75] += 1;
-							}
-
-							break;
-
-						case 15: {
-
-							/* Overwrite bytes with a randomly selected chunk (75%) or fixed
-							   bytes (25%). */
-
-							u32 copy_from, copy_to, copy_len;
-
-							if (temp_len < 2) break;
-
-							copy_len = choose_block_len(temp_len - 1);
-
-							copy_from = UR(temp_len - copy_len + 1);
-							copy_to = UR(temp_len - copy_len + 1);
-
-							if (UR(4)) {
-
-								if (copy_from != copy_to)
-									memmove(out_buf + copy_to, out_buf + copy_from, copy_len);
-
-							}
-							else memset(out_buf + copy_to,
-								UR(2) ? UR(256) : out_buf[UR(temp_len)], copy_len);
-							stage_cycles_puppet_v2[swarm_now][STAGE_OverWrite75] += 1;
-							break;
-
-						}
-
-
-						}
-
-					}
-
-
-					tmp_pilot_time += 1;
-
-
-
-
-					u64 temp_total_found = queued_paths + unique_crashes;
-
-
-
-
-					if (common_fuzz_stuff(argv, out_buf, temp_len))
-						goto abandon_entry_puppet;
-
-					/* out_buf might have been mangled a bit, so let's restore it to its
-					   original size and shape. */
-
-					if (temp_len < len) out_buf = ck_realloc(out_buf, len);
-					temp_len = len;
-					memcpy(out_buf, in_buf, len);
-
-					/* If we're finding new stuff, let's run for a bit longer, limits
-					   permitting. */
-
-					if (queued_paths != havoc_queued) {
-
-						if (perf_score <= havoc_max_mult * 100) {
-							stage_max *= 2;
-							perf_score *= 2;
-						}
-
-						havoc_queued = queued_paths;
-
-					}
-
-					if (unlikely(queued_paths + unique_crashes > temp_total_found))
-					{
-						u64 temp_temp_puppet = queued_paths + unique_crashes - temp_total_found;
-						total_puppet_find = total_puppet_find + temp_temp_puppet;
-						for (i = 0; i < 16; ++i)
-						{
-							if (stage_cycles_puppet_v2[swarm_now][i] > stage_cycles_puppet_v3[swarm_now][i])
-								stage_finds_puppet_v2[swarm_now][i] += temp_temp_puppet;
-						}
-					}
-
-				}
-				new_hit_cnt = queued_paths + unique_crashes;
-
-				if (!splice_cycle) {
-          stage_finds[STAGE_HAVOC]  += new_hit_cnt - orig_hit_cnt;
-          stage_cycles[STAGE_HAVOC] += stage_max;
-        } else {
-          stage_finds[STAGE_SPLICE]  += new_hit_cnt - orig_hit_cnt;
-          stage_cycles[STAGE_SPLICE] += stage_max;
-        }
-
-#ifndef IGNORE_FINDS
-
-				/************
-				 * SPLICING *
-				 ************/
-
-
-			retry_splicing_puppet:
-
-				if (use_splicing && splice_cycle++ < SPLICE_CYCLES_puppet &&
-					queued_paths > 1 && queue_cur->len > 1) {
-
-					struct queue_entry* target;
-					u32 tid, split_at;
-					u8* new_buf;
-					s32 f_diff, l_diff;
-
-					/* First of all, if we've modified in_buf for havoc, let's clean that
-					   up... */
-
-					if (in_buf != orig_in) {
-						ck_free(in_buf);
-						in_buf = orig_in;
-						len = queue_cur->len;
-					}
-
-					/* Pick a random queue entry and seek to it. Don't splice with yourself. */
-
-					do { tid = UR(queued_paths); } while (tid == current_entry);
-
-					splicing_with = tid;
-					target = queue;
-
-					while (tid >= 100) { target = target->next_100; tid -= 100; }
-					while (tid--) target = target->next;
-
-					/* Make sure that the target has a reasonable length. */
-
-					while (target && (target->len < 2 || target == queue_cur)) {
-						target = target->next;
-						++splicing_with;
-					}
-
-					if (!target) goto retry_splicing_puppet;
-
-					/* Read the testcase into a new buffer. */
-
-					fd = open(target->fname, O_RDONLY);
-
-					if (fd < 0) PFATAL("Unable to open '%s'", target->fname);
-
-					new_buf = ck_alloc_nozero(target->len);
-
-					ck_read(fd, new_buf, target->len, target->fname);
-
-					close(fd);
-
-					/* Find a suitable splicin g location, somewhere between the first and
-					   the last differing byte. Bail out if the difference is just a single
-					   byte or so. */
-
-					locate_diffs(in_buf, new_buf, MIN(len, target->len), &f_diff, &l_diff);
-
-					if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) {
-						ck_free(new_buf);
-						goto retry_splicing_puppet;
-					}
-
-					/* Split somewhere between the first and last differing byte. */
-
-					split_at = f_diff + UR(l_diff - f_diff);
-
-					/* Do the thing. */
-
-					len = target->len;
-					memcpy(new_buf, in_buf, split_at);
-					in_buf = new_buf;
-					ck_free(out_buf);
-					out_buf = ck_alloc_nozero(len);
-					memcpy(out_buf, in_buf, len);
-					goto havoc_stage_puppet;
-
-				}
-
-#endif /* !IGNORE_FINDS */
-
-				ret_val = 0;
-
-			abandon_entry:
-			abandon_entry_puppet:
-
-				if (splice_cycle >= SPLICE_CYCLES_puppet)
-					SPLICE_CYCLES_puppet = (UR(SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) + SPLICE_CYCLES_puppet_low);
-
-
-				splicing_with = -1;
-
-				/* Update pending_not_fuzzed count if we made it through the calibration
-				   cycle and have not seen this entry before. */
-
-				   // if (!stop_soon && !queue_cur->cal_failed && !queue_cur->was_fuzzed) {
-				   //   queue_cur->was_fuzzed = 1;
-				   //   --pending_not_fuzzed;
-				   //   if (queue_cur->favored) --pending_favored;
-				   // }
-
-				munmap(orig_in, queue_cur->len);
-
-				if (in_buf != orig_in) ck_free(in_buf);
-				ck_free(out_buf);
-				ck_free(eff_map);
-
-
-				if (key_puppet == 1) {
-					if (unlikely(queued_paths + unique_crashes > ((queued_paths + unique_crashes)*limit_time_bound + orig_hit_cnt_puppet)))	{
-						key_puppet = 0;
-						cur_ms_lv = get_cur_time();
-						new_hit_cnt = queued_paths + unique_crashes;
-						orig_hit_cnt_puppet = 0;
-						last_limit_time_start = 0;
-					}
-				}
-
-
-				if (unlikely(tmp_pilot_time > period_pilot)) {
-					total_pacemaker_time += tmp_pilot_time;
-					new_hit_cnt = queued_paths + unique_crashes;
-					swarm_fitness[swarm_now] = (double)(total_puppet_find - temp_puppet_find) / ((double)(tmp_pilot_time)/ period_pilot_tmp);
-					tmp_pilot_time = 0;
-					temp_puppet_find = total_puppet_find;
-
-					u64 temp_stage_finds_puppet = 0;
-					for (i = 0; i < operator_num; ++i) {
-						double temp_eff = 0.0;
-
-						if (stage_cycles_puppet_v2[swarm_now][i] > stage_cycles_puppet[swarm_now][i])
-							temp_eff = (double)(stage_finds_puppet_v2[swarm_now][i] - stage_finds_puppet[swarm_now][i]) /
-							(double)(stage_cycles_puppet_v2[swarm_now][i] - stage_cycles_puppet[swarm_now][i]);
-
-						if (eff_best[swarm_now][i] < temp_eff) {
-							eff_best[swarm_now][i] = temp_eff;
-							L_best[swarm_now][i] = x_now[swarm_now][i];
-						}
-
-						stage_finds_puppet[swarm_now][i] = stage_finds_puppet_v2[swarm_now][i];
-						stage_cycles_puppet[swarm_now][i] = stage_cycles_puppet_v2[swarm_now][i];
-						temp_stage_finds_puppet += stage_finds_puppet[swarm_now][i];
-					}
-
-					swarm_now = swarm_now + 1;
-						if (swarm_now == swarm_num) {
-							key_module = 1;
-							for (i = 0; i < operator_num; ++i) {
-								core_operator_cycles_puppet_v2[i] = core_operator_cycles_puppet[i];
-								core_operator_cycles_puppet_v3[i] = core_operator_cycles_puppet[i];
-								core_operator_finds_puppet_v2[i] = core_operator_finds_puppet[i];
-							}
-
-							double swarm_eff = 0.0;
-							swarm_now = 0;
-							for (i = 0; i < swarm_num; ++i)	{
-								if (swarm_fitness[i] > swarm_eff) {
-									swarm_eff = swarm_fitness[i];
-									swarm_now = i;
-								}
-							}
-							if (swarm_now <0 || swarm_now > swarm_num - 1)
-								PFATAL("swarm_now error number  %d", swarm_now);
-
-						}
-				}
-				return ret_val;
-			}
-		}
-
-
-#undef FLIP_BIT
-
-}
-
-
-static u8 core_fuzzing(char** argv) {
-	int i;
-
-	if (swarm_num == 1) {
-		key_module = 2;
-		return 0;
-	}
-
-
-		s32 len, fd, temp_len, j;
-		u8  *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0;
-		u64 havoc_queued, orig_hit_cnt, new_hit_cnt, cur_ms_lv;
-		u32 splice_cycle = 0, perf_score = 100, orig_perf, prev_cksum, eff_cnt = 1;
-
-		u8  ret_val = 1, doing_det = 0;
-
-		u8  a_collect[MAX_AUTO_EXTRA];
-		u32 a_len = 0;
-
-#ifdef IGNORE_FINDS
-
-		/* In IGNORE_FINDS mode, skip any entries that weren't in the
-		   initial data set. */
-
-		if (queue_cur->depth > 1) return 1;
-
-#else
-
-		if (pending_favored) {
-
-			/* If we have any favored, non-fuzzed new arrivals in the queue,
-			   possibly skip to them at the expense of already-fuzzed or non-favored
-			   cases. */
-
-			if ((queue_cur->was_fuzzed || !queue_cur->favored) &&
-				UR(100) < SKIP_TO_NEW_PROB) return 1;
-
-		} else if (!dumb_mode && !queue_cur->favored && queued_paths > 10) {
-
-			/* Otherwise, still possibly skip non-favored cases, albeit less often.
-			   The odds of skipping stuff are higher for already-fuzzed inputs and
-			   lower for never-fuzzed entries. */
-
-			if (queue_cycle > 1 && !queue_cur->was_fuzzed) {
-
-				if (UR(100) < SKIP_NFAV_NEW_PROB) return 1;
-
-			} else {
-
-				if (UR(100) < SKIP_NFAV_OLD_PROB) return 1;
-
-			}
-
-		}
-
-#endif /* ^IGNORE_FINDS */
-
-		if (not_on_tty) {
-			ACTF("Fuzzing test case #%u (%u total, %llu uniq crashes found)...",
-				current_entry, queued_paths, unique_crashes);
-			fflush(stdout);
-		}
-
-		/* Map the test case into memory. */
-
-		fd = open(queue_cur->fname, O_RDONLY);
-
-		if (fd < 0) PFATAL("Unable to open '%s'", queue_cur->fname);
-
-		len = queue_cur->len;
-
-		orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
-
-		if (orig_in == MAP_FAILED) PFATAL("Unable to mmap '%s'", queue_cur->fname);
-
-		close(fd);
-
-		/* We could mmap() out_buf as MAP_PRIVATE, but we end up clobbering every
-		   single byte anyway, so it wouldn't give us any performance or memory usage
-		   benefits. */
-
-		out_buf = ck_alloc_nozero(len);
-
-		subseq_tmouts = 0;
-
-		cur_depth = queue_cur->depth;
-
-		/*******************************************
-		 * CALIBRATION (only if failed earlier on) *
-		 *******************************************/
-
-		if (queue_cur->cal_failed) {
-
-			u8 res = FAULT_TMOUT;
-
-			if (queue_cur->cal_failed < CAL_CHANCES) {
-
-				res = calibrate_case(argv, queue_cur, in_buf, queue_cycle - 1, 0);
-
-				if (res == FAULT_ERROR)
-					FATAL("Unable to execute target application");
-
-			}
-
-			if (stop_soon || res != crash_mode) {
-				++cur_skipped_paths;
-				goto abandon_entry;
-			}
-
-		}
-
-		/************
-		 * TRIMMING *
-		 ************/
-
-		if (!dumb_mode && !queue_cur->trim_done) {
-
-			u8 res = trim_case(argv, queue_cur, in_buf);
-
-			if (res == FAULT_ERROR)
-				FATAL("Unable to execute target application");
-
-			if (stop_soon) {
-				++cur_skipped_paths;
-				goto abandon_entry;
-			}
-
-			/* Don't retry trimming, even if it failed. */
-
-			queue_cur->trim_done = 1;
-
-			if (len != queue_cur->len) len = queue_cur->len;
-
-		}
-
-		memcpy(out_buf, in_buf, len);
-
-		/*********************
-		 * PERFORMANCE SCORE *
-		 *********************/
-
-		orig_perf = perf_score = calculate_score(queue_cur);
-
-		/* Skip right away if -d is given, if we have done deterministic fuzzing on
-		   this entry ourselves (was_fuzzed), or if it has gone through deterministic
-		   testing in earlier, resumed runs (passed_det). */
-
-		if (skip_deterministic || queue_cur->was_fuzzed || queue_cur->passed_det)
-			goto havoc_stage;
-
-		/* Skip deterministic fuzzing if exec path checksum puts this out of scope
-		   for this master instance. */
-
-		if (master_max && (queue_cur->exec_cksum % master_max) != master_id - 1)
-			goto havoc_stage;
-
-
-		cur_ms_lv = get_cur_time();
-		if (!(key_puppet == 0 && ((cur_ms_lv - last_path_time < limit_time_puppet) ||
-			(last_crash_time != 0 && cur_ms_lv - last_crash_time < limit_time_puppet) || last_path_time == 0)))
-		{
-			key_puppet = 1;
-			goto pacemaker_fuzzing;
-		}
-
-		doing_det = 1;
-
-		/*********************************************
-		 * SIMPLE BITFLIP (+dictionary construction) *
-		 *********************************************/
-
-#define FLIP_BIT(_ar, _b) do { \
-    u8* _arf = (u8*)(_ar); \
-    u32 _bf = (_b); \
-    _arf[(_bf) >> 3] ^= (128 >> ((_bf) & 7)); \
-  } while (0)
-
-		 /* Single walking bit. */
-
-		stage_short = "flip1";
-		stage_max = len << 3;
-		stage_name = "bitflip 1/1";
-
-		stage_val_type = STAGE_VAL_NONE;
-
-		orig_hit_cnt = queued_paths + unique_crashes;
-
-		prev_cksum = queue_cur->exec_cksum;
-
-		for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
-
-			stage_cur_byte = stage_cur >> 3;
-
-			FLIP_BIT(out_buf, stage_cur);
-
-			if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-
-			FLIP_BIT(out_buf, stage_cur);
-
-			/* While flipping the least significant bit in every byte, pull of an extra
-			   trick to detect possible syntax tokens. In essence, the idea is that if
-			   you have a binary blob like this:
-
-			   xxxxxxxxIHDRxxxxxxxx
-
-			   ...and changing the leading and trailing bytes causes variable or no
-			   changes in program flow, but touching any character in the "IHDR" string
-			   always produces the same, distinctive path, it's highly likely that
-			   "IHDR" is an atomically-checked magic value of special significance to
-			   the fuzzed format.
-
-			   We do this here, rather than as a separate stage, because it's a nice
-			   way to keep the operation approximately "free" (i.e., no extra execs).
-
-			   Empirically, performing the check when flipping the least significant bit
-			   is advantageous, compared to doing it at the time of more disruptive
-			   changes, where the program flow may be affected in more violent ways.
-
-			   The caveat is that we won't generate dictionaries in the -d mode or -S
-			   mode - but that's probably a fair trade-off.
-
-			   This won't work particularly well with paths that exhibit variable
-			   behavior, but fails gracefully, so we'll carry out the checks anyway.
-
-			  */
-
-			if (!dumb_mode && (stage_cur & 7) == 7) {
-
-				u32 cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
-
-				if (stage_cur == stage_max - 1 && cksum == prev_cksum) {
-
-					/* If at end of file and we are still collecting a string, grab the
-					   final character and force output. */
-
-					if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3];
-					++a_len;
-
-					if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA)
-						maybe_add_auto(a_collect, a_len);
-
-				}
-				else if (cksum != prev_cksum) {
-
-					/* Otherwise, if the checksum has changed, see if we have something
-					   worthwhile queued up, and collect that if the answer is yes. */
-
-					if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA)
-						maybe_add_auto(a_collect, a_len);
-
-					a_len = 0;
-					prev_cksum = cksum;
-
-				}
-
-				/* Continue collecting string, but only if the bit flip actually made
-				   any difference - we don't want no-op tokens. */
-
-				if (cksum != queue_cur->exec_cksum) {
-
-					if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3];
-					++a_len;
-
-				}
-
-			}
-
-		}
-
-		new_hit_cnt = queued_paths + unique_crashes;
-
-		stage_finds[STAGE_FLIP1] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_FLIP1] += stage_max;
-
-
-
-		/* Two walking bits. */
-
-		stage_name = "bitflip 2/1";
-		stage_short = "flip2";
-		stage_max = (len << 3) - 1;
-
-		orig_hit_cnt = new_hit_cnt;
-
-		for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
-
-			stage_cur_byte = stage_cur >> 3;
-
-			FLIP_BIT(out_buf, stage_cur);
-			FLIP_BIT(out_buf, stage_cur + 1);
-
-			if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-
-			FLIP_BIT(out_buf, stage_cur);
-			FLIP_BIT(out_buf, stage_cur + 1);
-
-		}
-
-		new_hit_cnt = queued_paths + unique_crashes;
-
-		stage_finds[STAGE_FLIP2] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_FLIP2] += stage_max;
-
-
-		/* Four walking bits. */
-
-		stage_name = "bitflip 4/1";
-		stage_short = "flip4";
-		stage_max = (len << 3) - 3;
-
-
-		orig_hit_cnt = new_hit_cnt;
-
-		for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
-
-			stage_cur_byte = stage_cur >> 3;
-
-			FLIP_BIT(out_buf, stage_cur);
-			FLIP_BIT(out_buf, stage_cur + 1);
-			FLIP_BIT(out_buf, stage_cur + 2);
-			FLIP_BIT(out_buf, stage_cur + 3);
-
-			if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-
-			FLIP_BIT(out_buf, stage_cur);
-			FLIP_BIT(out_buf, stage_cur + 1);
-			FLIP_BIT(out_buf, stage_cur + 2);
-			FLIP_BIT(out_buf, stage_cur + 3);
-
-		}
-
-		new_hit_cnt = queued_paths + unique_crashes;
-
-		stage_finds[STAGE_FLIP4] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_FLIP4] += stage_max;
-
-
-		/* Effector map setup. These macros calculate:
-
-		   EFF_APOS      - position of a particular file offset in the map.
-		   EFF_ALEN      - length of a map with a particular number of bytes.
-		   EFF_SPAN_ALEN - map span for a sequence of bytes.
-
-		 */
-
-#define EFF_APOS(_p)          ((_p) >> EFF_MAP_SCALE2)
-#define EFF_REM(_x)           ((_x) & ((1 << EFF_MAP_SCALE2) - 1))
-#define EFF_ALEN(_l)          (EFF_APOS(_l) + !!EFF_REM(_l))
-#define EFF_SPAN_ALEN(_p, _l) (EFF_APOS((_p) + (_l) - 1) - EFF_APOS(_p) + 1)
-
-		 /* Initialize effector map for the next step (see comments below). Always
-			flag first and last byte as doing something. */
-
-		eff_map = ck_alloc(EFF_ALEN(len));
-		eff_map[0] = 1;
-
-		if (EFF_APOS(len - 1) != 0) {
-			eff_map[EFF_APOS(len - 1)] = 1;
-			++eff_cnt;
-		}
-
-		/* Walking byte. */
-
-		stage_name = "bitflip 8/8";
-		stage_short = "flip8";
-		stage_max = len;
-
-
-		orig_hit_cnt = new_hit_cnt;
-
-		for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
-
-			stage_cur_byte = stage_cur;
-
-			out_buf[stage_cur] ^= 0xFF;
-
-			if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-
-			/* We also use this stage to pull off a simple trick: we identify
-			   bytes that seem to have no effect on the current execution path
-			   even when fully flipped - and we skip them during more expensive
-			   deterministic stages, such as arithmetics or known ints. */
-
-			if (!eff_map[EFF_APOS(stage_cur)]) {
-
-				u32 cksum;
-
-				/* If in dumb mode or if the file is very short, just flag everything
-				   without wasting time on checksums. */
-
-				if (!dumb_mode && len >= EFF_MIN_LEN)
-					cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
-				else
-					cksum = ~queue_cur->exec_cksum;
-
-				if (cksum != queue_cur->exec_cksum) {
-					eff_map[EFF_APOS(stage_cur)] = 1;
-					++eff_cnt;
-				}
-
-			}
-
-			out_buf[stage_cur] ^= 0xFF;
-
-		}
-
-		/* If the effector map is more than EFF_MAX_PERC dense, just flag the
-		   whole thing as worth fuzzing, since we wouldn't be saving much time
-		   anyway. */
-
-		if (eff_cnt != EFF_ALEN(len) &&
-			eff_cnt * 100 / EFF_ALEN(len) > EFF_MAX_PERC) {
-
-			memset(eff_map, 1, EFF_ALEN(len));
-
-			blocks_eff_select += EFF_ALEN(len);
-
-		}
-		else {
-
-			blocks_eff_select += eff_cnt;
-
-		}
-
-		blocks_eff_total += EFF_ALEN(len);
-
-		new_hit_cnt = queued_paths + unique_crashes;
-
-		stage_finds[STAGE_FLIP8] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_FLIP8] += stage_max;
-
-
-
-		/* Two walking bytes. */
-
-		if (len < 2) goto skip_bitflip;
-
-		stage_name = "bitflip 16/8";
-		stage_short = "flip16";
-		stage_cur = 0;
-		stage_max = len - 1;
-
-
-		orig_hit_cnt = new_hit_cnt;
-
-		for (i = 0; i < len - 1; ++i) {
-
-			/* Let's consult the effector map... */
-
-			if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
-				--stage_max;
-				continue;
-			}
-
-			stage_cur_byte = i;
-
-			*(u16*)(out_buf + i) ^= 0xFFFF;
-
-			if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-			++stage_cur;
-
-			*(u16*)(out_buf + i) ^= 0xFFFF;
-
-
-		}
-
-		new_hit_cnt = queued_paths + unique_crashes;
-
-		stage_finds[STAGE_FLIP16] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_FLIP16] += stage_max;
-
-
-
-		if (len < 4) goto skip_bitflip;
-
-		/* Four walking bytes. */
-
-		stage_name = "bitflip 32/8";
-		stage_short = "flip32";
-		stage_cur = 0;
-		stage_max = len - 3;
-
-
-		orig_hit_cnt = new_hit_cnt;
-
-		for (i = 0; i < len - 3; ++i) {
-
-			/* Let's consult the effector map... */
-			if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
-				!eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
-				--stage_max;
-				continue;
-			}
-
-			stage_cur_byte = i;
-
-			*(u32*)(out_buf + i) ^= 0xFFFFFFFF;
-
-			if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-			++stage_cur;
-
-			*(u32*)(out_buf + i) ^= 0xFFFFFFFF;
-
-		}
-
-		new_hit_cnt = queued_paths + unique_crashes;
-
-		stage_finds[STAGE_FLIP32] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_FLIP32] += stage_max;
-
-
-
-
-	skip_bitflip:
-
-		if (no_arith) goto skip_arith;
-
-		/**********************
-		 * ARITHMETIC INC/DEC *
-		 **********************/
-
-		 /* 8-bit arithmetics. */
-
-		stage_name = "arith 8/8";
-		stage_short = "arith8";
-		stage_cur = 0;
-		stage_max = 2 * len * ARITH_MAX;
-
-
-		stage_val_type = STAGE_VAL_LE;
-
-		orig_hit_cnt = new_hit_cnt;
-
-		for (i = 0; i < len; ++i) {
-
-			u8 orig = out_buf[i];
-
-			/* Let's consult the effector map... */
-
-			if (!eff_map[EFF_APOS(i)]) {
-				stage_max -= 2 * ARITH_MAX;
-				continue;
-			}
-
-			stage_cur_byte = i;
-
-			for (j = 1; j <= ARITH_MAX; ++j) {
-
-				u8 r = orig ^ (orig + j);
-
-				/* Do arithmetic operations only if the result couldn't be a product
-				   of a bitflip. */
-
-				if (!could_be_bitflip(r)) {
-
-					stage_cur_val = j;
-					out_buf[i] = orig + j;
-
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
-
-				} else --stage_max;
-
-				r = orig ^ (orig - j);
-
-				if (!could_be_bitflip(r)) {
-
-					stage_cur_val = -j;
-					out_buf[i] = orig - j;
-
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
-
-				} else --stage_max;
-
-				out_buf[i] = orig;
-
-			}
-
-		}
-
-		new_hit_cnt = queued_paths + unique_crashes;
-
-		stage_finds[STAGE_ARITH8] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_ARITH8] += stage_max;
-
-
-
-
-		/* 16-bit arithmetics, both endians. */
-
-		if (len < 2) goto skip_arith;
-
-		stage_name = "arith 16/8";
-		stage_short = "arith16";
-		stage_cur = 0;
-		stage_max = 4 * (len - 1) * ARITH_MAX;
-
-
-		orig_hit_cnt = new_hit_cnt;
-
-		for (i = 0; i < len - 1; ++i) {
-
-			u16 orig = *(u16*)(out_buf + i);
-
-			/* Let's consult the effector map... */
-
-			if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
-				stage_max -= 4 * ARITH_MAX;
-				continue;
-			}
-
-			stage_cur_byte = i;
-
-			for (j = 1; j <= ARITH_MAX; ++j) {
-
-				u16 r1 = orig ^ (orig + j),
-					r2 = orig ^ (orig - j),
-					r3 = orig ^ SWAP16(SWAP16(orig) + j),
-					r4 = orig ^ SWAP16(SWAP16(orig) - j);
-
-				/* Try little endian addition and subtraction first. Do it only
-				   if the operation would affect more than one byte (hence the
-				   & 0xff overflow checks) and if it couldn't be a product of
-				   a bitflip. */
-
-				stage_val_type = STAGE_VAL_LE;
-
-				if ((orig & 0xff) + j > 0xff && !could_be_bitflip(r1)) {
-
-					stage_cur_val = j;
-					*(u16*)(out_buf + i) = orig + j;
-
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
-
-				} else --stage_max;
-
-				if ((orig & 0xff) < j && !could_be_bitflip(r2)) {
-
-					stage_cur_val = -j;
-					*(u16*)(out_buf + i) = orig - j;
-
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
-
-				} else --stage_max;
-
-				/* Big endian comes next. Same deal. */
-
-				stage_val_type = STAGE_VAL_BE;
-
-
-				if ((orig >> 8) + j > 0xff && !could_be_bitflip(r3)) {
-
-					stage_cur_val = j;
-					*(u16*)(out_buf + i) = SWAP16(SWAP16(orig) + j);
-
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
-
-				} else --stage_max;
-
-				if ((orig >> 8) < j && !could_be_bitflip(r4)) {
-
-					stage_cur_val = -j;
-					*(u16*)(out_buf + i) = SWAP16(SWAP16(orig) - j);
-
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
-
-				} else --stage_max;
-
-				*(u16*)(out_buf + i) = orig;
-
-			}
-
-		}
-
-		new_hit_cnt = queued_paths + unique_crashes;
-
-		stage_finds[STAGE_ARITH16] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_ARITH16] += stage_max;
-
-
-
-		/* 32-bit arithmetics, both endians. */
-
-		if (len < 4) goto skip_arith;
-
-		stage_name = "arith 32/8";
-		stage_short = "arith32";
-		stage_cur = 0;
-		stage_max = 4 * (len - 3) * ARITH_MAX;
-
-		orig_hit_cnt = new_hit_cnt;
-
-		for (i = 0; i < len - 3; ++i) {
-
-			u32 orig = *(u32*)(out_buf + i);
-
-			/* Let's consult the effector map... */
-
-			if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
-				!eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
-				stage_max -= 4 * ARITH_MAX;
-				continue;
-			}
-
-			stage_cur_byte = i;
-
-			for (j = 1; j <= ARITH_MAX; ++j) {
-
-				u32 r1 = orig ^ (orig + j),
-					r2 = orig ^ (orig - j),
-					r3 = orig ^ SWAP32(SWAP32(orig) + j),
-					r4 = orig ^ SWAP32(SWAP32(orig) - j);
-
-				/* Little endian first. Same deal as with 16-bit: we only want to
-				   try if the operation would have effect on more than two bytes. */
-
-				stage_val_type = STAGE_VAL_LE;
-
-				if ((orig & 0xffff) + j > 0xffff && !could_be_bitflip(r1)) {
-
-					stage_cur_val = j;
-					*(u32*)(out_buf + i) = orig + j;
-
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
-
-				} else --stage_max;
-
-				if ((orig & 0xffff) < j && !could_be_bitflip(r2)) {
-
-					stage_cur_val = -j;
-					*(u32*)(out_buf + i) = orig - j;
-
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
-
-				} else --stage_max;
-
-				/* Big endian next. */
-
-				stage_val_type = STAGE_VAL_BE;
-
-				if ((SWAP32(orig) & 0xffff) + j > 0xffff && !could_be_bitflip(r3)) {
-
-					stage_cur_val = j;
-					*(u32*)(out_buf + i) = SWAP32(SWAP32(orig) + j);
-
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
-
-				} else --stage_max;
-
-				if ((SWAP32(orig) & 0xffff) < j && !could_be_bitflip(r4)) {
-
-					stage_cur_val = -j;
-					*(u32*)(out_buf + i) = SWAP32(SWAP32(orig) - j);
-
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
-
-				} else --stage_max;
-
-				*(u32*)(out_buf + i) = orig;
-
-			}
-
-		}
-
-		new_hit_cnt = queued_paths + unique_crashes;
-
-		stage_finds[STAGE_ARITH32] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_ARITH32] += stage_max;
-
-
-
-	skip_arith:
-
-		/**********************
-		 * INTERESTING VALUES *
-		 **********************/
-
-		stage_name = "interest 8/8";
-		stage_short = "int8";
-		stage_cur = 0;
-		stage_max = len * sizeof(interesting_8);
-
-
-
-		stage_val_type = STAGE_VAL_LE;
-
-		orig_hit_cnt = new_hit_cnt;
-
-		/* Setting 8-bit integers. */
-
-		for (i = 0; i < len; ++i) {
-
-			u8 orig = out_buf[i];
-
-			/* Let's consult the effector map... */
-
-			if (!eff_map[EFF_APOS(i)]) {
-				stage_max -= sizeof(interesting_8);
-				continue;
-			}
-
-			stage_cur_byte = i;
-
-			for (j = 0; j < sizeof(interesting_8); ++j) {
-
-				/* Skip if the value could be a product of bitflips or arithmetics. */
-
-				if (could_be_bitflip(orig ^ (u8)interesting_8[j]) ||
-					could_be_arith(orig, (u8)interesting_8[j], 1)) {
-					--stage_max;
-					continue;
-				}
-
-				stage_cur_val = interesting_8[j];
-				out_buf[i] = interesting_8[j];
-
-				if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-
-				out_buf[i] = orig;
-				++stage_cur;
-
-			}
-
-		}
-
-		new_hit_cnt = queued_paths + unique_crashes;
-
-		stage_finds[STAGE_INTEREST8] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_INTEREST8] += stage_max;
-
-
-
-		/* Setting 16-bit integers, both endians. */
-
-		if (no_arith || len < 2) goto skip_interest;
-
-		stage_name = "interest 16/8";
-		stage_short = "int16";
-		stage_cur = 0;
-		stage_max = 2 * (len - 1) * (sizeof(interesting_16) >> 1);
-
-
-		orig_hit_cnt = new_hit_cnt;
-
-		for (i = 0; i < len - 1; ++i) {
-
-			u16 orig = *(u16*)(out_buf + i);
-
-			/* Let's consult the effector map... */
-
-			if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
-				stage_max -= sizeof(interesting_16);
-				continue;
-			}
-
-			stage_cur_byte = i;
-
-			for (j = 0; j < sizeof(interesting_16) / 2; ++j) {
-
-				stage_cur_val = interesting_16[j];
-
-				/* Skip if this could be a product of a bitflip, arithmetics,
-				   or single-byte interesting value insertion. */
-
-				if (!could_be_bitflip(orig ^ (u16)interesting_16[j]) &&
-					!could_be_arith(orig, (u16)interesting_16[j], 2) &&
-					!could_be_interest(orig, (u16)interesting_16[j], 2, 0)) {
-
-					stage_val_type = STAGE_VAL_LE;
-
-					*(u16*)(out_buf + i) = interesting_16[j];
-
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
-
-				} else --stage_max;
-
-				if ((u16)interesting_16[j] != SWAP16(interesting_16[j]) &&
-					!could_be_bitflip(orig ^ SWAP16(interesting_16[j])) &&
-					!could_be_arith(orig, SWAP16(interesting_16[j]), 2) &&
-					!could_be_interest(orig, SWAP16(interesting_16[j]), 2, 1)) {
-
-					stage_val_type = STAGE_VAL_BE;
-
-					*(u16*)(out_buf + i) = SWAP16(interesting_16[j]);
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
-
-				} else --stage_max;
-
-			}
-
-			*(u16*)(out_buf + i) = orig;
-
-		}
-
-		new_hit_cnt = queued_paths + unique_crashes;
-
-		stage_finds[STAGE_INTEREST16] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_INTEREST16] += stage_max;
-
-
-
-
-		if (len < 4) goto skip_interest;
-
-		/* Setting 32-bit integers, both endians. */
-
-		stage_name = "interest 32/8";
-		stage_short = "int32";
-		stage_cur = 0;
-		stage_max = 2 * (len - 3) * (sizeof(interesting_32) >> 2);
-
-
-		orig_hit_cnt = new_hit_cnt;
-
-		for (i = 0; i < len - 3; ++i) {
-
-			u32 orig = *(u32*)(out_buf + i);
-
-			/* Let's consult the effector map... */
-
-			if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
-				!eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
-				stage_max -= sizeof(interesting_32) >> 1;
-				continue;
-			}
-
-			stage_cur_byte = i;
-
-			for (j = 0; j < sizeof(interesting_32) / 4; ++j) {
-
-				stage_cur_val = interesting_32[j];
-
-				/* Skip if this could be a product of a bitflip, arithmetics,
-				   or word interesting value insertion. */
-
-				if (!could_be_bitflip(orig ^ (u32)interesting_32[j]) &&
-					!could_be_arith(orig, interesting_32[j], 4) &&
-					!could_be_interest(orig, interesting_32[j], 4, 0)) {
-
-					stage_val_type = STAGE_VAL_LE;
-
-					*(u32*)(out_buf + i) = interesting_32[j];
-
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
-
-				} else --stage_max;
-
-				if ((u32)interesting_32[j] != SWAP32(interesting_32[j]) &&
-					!could_be_bitflip(orig ^ SWAP32(interesting_32[j])) &&
-					!could_be_arith(orig, SWAP32(interesting_32[j]), 4) &&
-					!could_be_interest(orig, SWAP32(interesting_32[j]), 4, 1)) {
-
-					stage_val_type = STAGE_VAL_BE;
-
-					*(u32*)(out_buf + i) = SWAP32(interesting_32[j]);
-					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					++stage_cur;
-
-				} else --stage_max;
-
-			}
-
-			*(u32*)(out_buf + i) = orig;
-
-		}
-
-		new_hit_cnt = queued_paths + unique_crashes;
-
-		stage_finds[STAGE_INTEREST32] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_INTEREST32] += stage_max;
-
-
-
-	skip_interest:
-
-		/********************
-		 * DICTIONARY STUFF *
-		 ********************/
-
-		if (!extras_cnt) goto skip_user_extras;
-
-		/* Overwrite with user-supplied extras. */
-
-		stage_name = "user extras (over)";
-		stage_short = "ext_UO";
-		stage_cur = 0;
-		stage_max = extras_cnt * len;
-
-
-		stage_val_type = STAGE_VAL_NONE;
-
-		orig_hit_cnt = new_hit_cnt;
-
-		for (i = 0; i < len; ++i) {
-
-			u32 last_len = 0;
-
-			stage_cur_byte = i;
-
-			/* Extras are sorted by size, from smallest to largest. This means
-			   that we don't have to worry about restoring the buffer in
-			   between writes at a particular offset determined by the outer
-			   loop. */
-
-			for (j = 0; j < extras_cnt; ++j) {
-
-				/* Skip extras probabilistically if extras_cnt > MAX_DET_EXTRAS. Also
-				   skip them if there's no room to insert the payload, if the token
-				   is redundant, or if its entire span has no bytes set in the effector
-				   map. */
-
-				if ((extras_cnt > MAX_DET_EXTRAS && UR(extras_cnt) >= MAX_DET_EXTRAS) ||
-					extras[j].len > len - i ||
-					!memcmp(extras[j].data, out_buf + i, extras[j].len) ||
-					!memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, extras[j].len))) {
-
-					--stage_max;
-					continue;
-
-				}
-
-				last_len = extras[j].len;
-				memcpy(out_buf + i, extras[j].data, last_len);
-
-				if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-
-				++stage_cur;
-
-			}
-
-			/* Restore all the clobbered memory. */
-			memcpy(out_buf + i, in_buf + i, last_len);
-
-		}
-
-		new_hit_cnt = queued_paths + unique_crashes;
-
-		stage_finds[STAGE_EXTRAS_UO] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_EXTRAS_UO] += stage_max;
-
-		/* Insertion of user-supplied extras. */
-
-		stage_name = "user extras (insert)";
-		stage_short = "ext_UI";
-		stage_cur = 0;
-		stage_max = extras_cnt * len;
-
-
-
-
-		orig_hit_cnt = new_hit_cnt;
-
-		ex_tmp = ck_alloc(len + MAX_DICT_FILE);
-
-		for (i = 0; i <= len; ++i) {
-
-			stage_cur_byte = i;
-
-			for (j = 0; j < extras_cnt; ++j) {
-
-				if (len + extras[j].len > MAX_FILE) {
-					--stage_max;
-					continue;
-				}
-
-				/* Insert token */
-				memcpy(ex_tmp + i, extras[j].data, extras[j].len);
-
-				/* Copy tail */
-				memcpy(ex_tmp + i + extras[j].len, out_buf + i, len - i);
-
-				if (common_fuzz_stuff(argv, ex_tmp, len + extras[j].len)) {
-					ck_free(ex_tmp);
-					goto abandon_entry;
-				}
-
-				++stage_cur;
-
-			}
-
-			/* Copy head */
-			ex_tmp[i] = out_buf[i];
-
-		}
-
-		ck_free(ex_tmp);
-
-		new_hit_cnt = queued_paths + unique_crashes;
-
-		stage_finds[STAGE_EXTRAS_UI] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_EXTRAS_UI] += stage_max;
-
-	skip_user_extras:
-
-		if (!a_extras_cnt) goto skip_extras;
-
-		stage_name = "auto extras (over)";
-		stage_short = "ext_AO";
-		stage_cur = 0;
-		stage_max = MIN(a_extras_cnt, USE_AUTO_EXTRAS) * len;
-
-
-		stage_val_type = STAGE_VAL_NONE;
-
-		orig_hit_cnt = new_hit_cnt;
-
-		for (i = 0; i < len; ++i) {
-
-			u32 last_len = 0;
-
-			stage_cur_byte = i;
-
-			for (j = 0; j < MIN(a_extras_cnt, USE_AUTO_EXTRAS); ++j) {
-
-				/* See the comment in the earlier code; extras are sorted by size. */
-
-				if (a_extras[j].len > len - i ||
-					!memcmp(a_extras[j].data, out_buf + i, a_extras[j].len) ||
-					!memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, a_extras[j].len))) {
-
-					--stage_max;
-					continue;
-
-				}
-
-				last_len = a_extras[j].len;
-				memcpy(out_buf + i, a_extras[j].data, last_len);
-
-				if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-
-				++stage_cur;
-
-			}
-
-			/* Restore all the clobbered memory. */
-			memcpy(out_buf + i, in_buf + i, last_len);
-
-		}
-
-		new_hit_cnt = queued_paths + unique_crashes;
-
-		stage_finds[STAGE_EXTRAS_AO] += new_hit_cnt - orig_hit_cnt;
-		stage_cycles[STAGE_EXTRAS_AO] += stage_max;
-
-	skip_extras:
-
-		/* If we made this to here without jumping to havoc_stage or abandon_entry,
-		   we're properly done with deterministic steps and can mark it as such
-		   in the .state/ directory. */
-
-		if (!queue_cur->passed_det) mark_as_det_done(queue_cur);
-
-		/****************
-		 * RANDOM HAVOC *
-		 ****************/
-
-	havoc_stage:
-	pacemaker_fuzzing:
-
-
-		stage_cur_byte = -1;
-
-		/* The havoc stage mutation code is also invoked when splicing files; if the
-		   splice_cycle variable is set, generate different descriptions and such. */
-
-		if (!splice_cycle) {
-
-			stage_name = "MOpt-havoc";
-			stage_short = "MOpt_havoc";
-			stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) *
-				perf_score / havoc_div / 100;
-
-		} else {
-
-			static u8 tmp[32];
-
-			perf_score = orig_perf;
-
-			sprintf(tmp, "MOpt-core-splice %u", splice_cycle);
-			stage_name = tmp;
-			stage_short = "MOpt_core_splice";
-			stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100;
-
-		}
-
-		s32 temp_len_puppet;
-		cur_ms_lv = get_cur_time();
-
-		//for (; swarm_now < swarm_num; ++swarm_now)
-		{
-			if (key_puppet == 1) {
-				if (unlikely(orig_hit_cnt_puppet == 0)) {
-					orig_hit_cnt_puppet = queued_paths + unique_crashes;
-					last_limit_time_start = get_cur_time();
-					SPLICE_CYCLES_puppet = (UR(SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) + SPLICE_CYCLES_puppet_low);
-				}
-			}
-			{
-			havoc_stage_puppet:
-
-				stage_cur_byte = -1;
-
-				/* The havoc stage mutation code is also invoked when splicing files; if the
-				   splice_cycle variable is set, generate different descriptions and such. */
-
-				if (!splice_cycle) {
-					stage_name = "MOpt core avoc";
-					stage_short = "MOpt_core_havoc";
-					stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) *
-						perf_score / havoc_div / 100;
-				} else {
-					static u8 tmp[32];
-					perf_score = orig_perf;
-					sprintf(tmp, "MOpt core splice %u", splice_cycle);
-					stage_name = tmp;
-					stage_short = "MOpt_core_splice";
-					stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100;
-				}
-
-				if (stage_max < HAVOC_MIN) stage_max = HAVOC_MIN;
-				temp_len = len;
-				orig_hit_cnt = queued_paths + unique_crashes;
-				havoc_queued = queued_paths;
-
-				for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
-
-					u32 use_stacking = 1 << (1 + UR(HAVOC_STACK_POW2));
-					stage_cur_val = use_stacking;
-
-					for (i = 0; i < operator_num; ++i) {
-						core_operator_cycles_puppet_v3[i] = core_operator_cycles_puppet_v2[i];
-					}
-
-					for (i = 0; i < use_stacking; ++i) {
-
-						switch (select_algorithm()) {
-
-						case 0:
-							/* Flip a single bit somewhere. Spooky! */
-							FLIP_BIT(out_buf, UR(temp_len << 3));
-							core_operator_cycles_puppet_v2[STAGE_FLIP1] += 1;
-							break;
-
-
-						case 1:
-							if (temp_len < 2) break;
-							temp_len_puppet = UR(temp_len << 3);
-							FLIP_BIT(out_buf, temp_len_puppet);
-							FLIP_BIT(out_buf, temp_len_puppet + 1);
-							core_operator_cycles_puppet_v2[STAGE_FLIP2] += 1;
-							break;
-
-						case 2:
-							if (temp_len < 2) break;
-							temp_len_puppet = UR(temp_len << 3);
-							FLIP_BIT(out_buf, temp_len_puppet);
-							FLIP_BIT(out_buf, temp_len_puppet + 1);
-							FLIP_BIT(out_buf, temp_len_puppet + 2);
-							FLIP_BIT(out_buf, temp_len_puppet + 3);
-							core_operator_cycles_puppet_v2[STAGE_FLIP4] += 1;
-							break;
-
-						case 3:
-							if (temp_len < 4) break;
-							out_buf[UR(temp_len)] ^= 0xFF;
-							core_operator_cycles_puppet_v2[STAGE_FLIP8] += 1;
-							break;
-
-						case 4:
-							if (temp_len < 8) break;
-							*(u16*)(out_buf + UR(temp_len - 1)) ^= 0xFFFF;
-							core_operator_cycles_puppet_v2[STAGE_FLIP16] += 1;
-							break;
-
-						case 5:
-							if (temp_len < 8) break;
-							*(u32*)(out_buf + UR(temp_len - 3)) ^= 0xFFFFFFFF;
-							core_operator_cycles_puppet_v2[STAGE_FLIP32] += 1;
-							break;
-
-						case 6:
-							out_buf[UR(temp_len)] -= 1 + UR(ARITH_MAX);
-							out_buf[UR(temp_len)] += 1 + UR(ARITH_MAX);
-							core_operator_cycles_puppet_v2[STAGE_ARITH8] += 1;
-							break;
-
-						case 7:
-							/* Randomly subtract from word, random endian. */
-							if (temp_len < 8) break;
-							if (UR(2)) {
-								u32 pos = UR(temp_len - 1);
-								*(u16*)(out_buf + pos) -= 1 + UR(ARITH_MAX);
-							} else {
-								u32 pos = UR(temp_len - 1);
-								u16 num = 1 + UR(ARITH_MAX);
-								*(u16*)(out_buf + pos) =
-									SWAP16(SWAP16(*(u16*)(out_buf + pos)) - num);
-							}
-							/* Randomly add to word, random endian. */
-							if (UR(2)) {
-								u32 pos = UR(temp_len - 1);
-								*(u16*)(out_buf + pos) += 1 + UR(ARITH_MAX);
-							} else {
-								u32 pos = UR(temp_len - 1);
-								u16 num = 1 + UR(ARITH_MAX);
-								*(u16*)(out_buf + pos) =
-									SWAP16(SWAP16(*(u16*)(out_buf + pos)) + num);
-							}
-							core_operator_cycles_puppet_v2[STAGE_ARITH16] += 1;
-							break;
-
-
-						case 8:
-							/* Randomly subtract from dword, random endian. */
-							if (temp_len < 8) break;
-							if (UR(2)) {
-								u32 pos = UR(temp_len - 3);
-								*(u32*)(out_buf + pos) -= 1 + UR(ARITH_MAX);
-							} else {
-								u32 pos = UR(temp_len - 3);
-								u32 num = 1 + UR(ARITH_MAX);
-								*(u32*)(out_buf + pos) =
-									SWAP32(SWAP32(*(u32*)(out_buf + pos)) - num);
-							}
-							/* Randomly add to dword, random endian. */
-							if (UR(2)) {
-								u32 pos = UR(temp_len - 3);
-								*(u32*)(out_buf + pos) += 1 + UR(ARITH_MAX);
-							} else {
-								u32 pos = UR(temp_len - 3);
-								u32 num = 1 + UR(ARITH_MAX);
-								*(u32*)(out_buf + pos) =
-									SWAP32(SWAP32(*(u32*)(out_buf + pos)) + num);
-							}
-							core_operator_cycles_puppet_v2[STAGE_ARITH32] += 1;
-							break;
-
-
-						case 9:
-							/* Set byte to interesting value. */
-							if (temp_len < 4) break;
-							out_buf[UR(temp_len)] = interesting_8[UR(sizeof(interesting_8))];
-							core_operator_cycles_puppet_v2[STAGE_INTEREST8] += 1;
-							break;
-
-						case 10:
-							/* Set word to interesting value, randomly choosing endian. */
-							if (temp_len < 8) break;
-							if (UR(2)) {
-								*(u16*)(out_buf + UR(temp_len - 1)) =
-									interesting_16[UR(sizeof(interesting_16) >> 1)];
-							} else {
-								*(u16*)(out_buf + UR(temp_len - 1)) = SWAP16(
-									interesting_16[UR(sizeof(interesting_16) >> 1)]);
-							}
-							core_operator_cycles_puppet_v2[STAGE_INTEREST16] += 1;
-							break;
-
-
-						case 11:
-							/* Set dword to interesting value, randomly choosing endian. */
-
-							if (temp_len < 8) break;
-
-							if (UR(2)) {
-								*(u32*)(out_buf + UR(temp_len - 3)) =
-									interesting_32[UR(sizeof(interesting_32) >> 2)];
-							} else {
-								*(u32*)(out_buf + UR(temp_len - 3)) = SWAP32(
-									interesting_32[UR(sizeof(interesting_32) >> 2)]);
-							}
-							core_operator_cycles_puppet_v2[STAGE_INTEREST32] += 1;
-							break;
-
-
-						case 12:
-
-							/* Just set a random byte to a random value. Because,
-							   why not. We use XOR with 1-255 to eliminate the
-							   possibility of a no-op. */
-
-							out_buf[UR(temp_len)] ^= 1 + UR(255);
-							core_operator_cycles_puppet_v2[STAGE_RANDOMBYTE] += 1;
-							break;
-
-
-						case 13: {
-
-							/* Delete bytes. We're making this a bit more likely
-							   than insertion (the next option) in hopes of keeping
-							   files reasonably small. */
-
-							u32 del_from, del_len;
-
-							if (temp_len < 2) break;
-
-							/* Don't delete too much. */
-
-							del_len = choose_block_len(temp_len - 1);
-
-							del_from = UR(temp_len - del_len + 1);
-
-							memmove(out_buf + del_from, out_buf + del_from + del_len,
-								temp_len - del_from - del_len);
-
-							temp_len -= del_len;
-							core_operator_cycles_puppet_v2[STAGE_DELETEBYTE] += 1;
-							break;
-
-						}
-
-						case 14:
-
-							if (temp_len + HAVOC_BLK_XL < MAX_FILE) {
-
-								/* Clone bytes (75%) or insert a block of constant bytes (25%). */
-
-								u8  actually_clone = UR(4);
-								u32 clone_from, clone_to, clone_len;
-								u8* new_buf;
-
-								if (actually_clone) {
-
-									clone_len = choose_block_len(temp_len);
-									clone_from = UR(temp_len - clone_len + 1);
-
-								} else {
-
-									clone_len = choose_block_len(HAVOC_BLK_XL);
-									clone_from = 0;
-
-								}
-
-								clone_to = UR(temp_len);
-
-								new_buf = ck_alloc_nozero(temp_len + clone_len);
-
-								/* Head */
-
-								memcpy(new_buf, out_buf, clone_to);
-
-								/* Inserted part */
-
-								if (actually_clone)
-									memcpy(new_buf + clone_to, out_buf + clone_from, clone_len);
-								else
-									memset(new_buf + clone_to,
-										UR(2) ? UR(256) : out_buf[UR(temp_len)], clone_len);
-
-								/* Tail */
-								memcpy(new_buf + clone_to + clone_len, out_buf + clone_to,
-									temp_len - clone_to);
-
-								ck_free(out_buf);
-								out_buf = new_buf;
-								temp_len += clone_len;
-								core_operator_cycles_puppet_v2[STAGE_Clone75] += 1;
-							}
-
-							break;
-
-						case 15: {
-
-							/* Overwrite bytes with a randomly selected chunk (75%) or fixed
-							   bytes (25%). */
-
-							u32 copy_from, copy_to, copy_len;
-
-							if (temp_len < 2) break;
-
-							copy_len = choose_block_len(temp_len - 1);
-
-							copy_from = UR(temp_len - copy_len + 1);
-							copy_to = UR(temp_len - copy_len + 1);
-
-							if (UR(4)) {
-
-								if (copy_from != copy_to)
-									memmove(out_buf + copy_to, out_buf + copy_from, copy_len);
-
-							}
-							else memset(out_buf + copy_to,
-								UR(2) ? UR(256) : out_buf[UR(temp_len)], copy_len);
-							core_operator_cycles_puppet_v2[STAGE_OverWrite75] += 1;
-							break;
-
-						}
-
-
-						}
-
-					}
-
-					tmp_core_time += 1;
-
-					u64 temp_total_found = queued_paths + unique_crashes;
-
-					if (common_fuzz_stuff(argv, out_buf, temp_len))
-						goto abandon_entry_puppet;
-
-					/* out_buf might have been mangled a bit, so let's restore it to its
-					   original size and shape. */
-
-					if (temp_len < len) out_buf = ck_realloc(out_buf, len);
-					temp_len = len;
-					memcpy(out_buf, in_buf, len);
-
-					/* If we're finding new stuff, let's run for a bit longer, limits
-					   permitting. */
-
-					if (queued_paths != havoc_queued) {
-
-						if (perf_score <= havoc_max_mult * 100) {
-							stage_max *= 2;
-							perf_score *= 2;
-						}
-
-						havoc_queued = queued_paths;
-
-					}
-
-					if (unlikely(queued_paths + unique_crashes > temp_total_found))
-					{
-						u64 temp_temp_puppet = queued_paths + unique_crashes - temp_total_found;
-						total_puppet_find = total_puppet_find + temp_temp_puppet;
-						for (i = 0; i < 16; ++i)
-						{
-							if (core_operator_cycles_puppet_v2[i] > core_operator_cycles_puppet_v3[i])
-								core_operator_finds_puppet_v2[i] += temp_temp_puppet;
-						}
-					}
-
-				}
-
-				new_hit_cnt = queued_paths + unique_crashes;
-
-
-#ifndef IGNORE_FINDS
-
-				/************
-				 * SPLICING *
-				 ************/
-
-
-			retry_splicing_puppet:
-
-
-
-				if (use_splicing && splice_cycle++ < SPLICE_CYCLES_puppet &&
-					queued_paths > 1 && queue_cur->len > 1) {
-
-					struct queue_entry* target;
-					u32 tid, split_at;
-					u8* new_buf;
-					s32 f_diff, l_diff;
-
-					/* First of all, if we've modified in_buf for havoc, let's clean that
-					   up... */
-
-					if (in_buf != orig_in) {
-						ck_free(in_buf);
-						in_buf = orig_in;
-						len = queue_cur->len;
-					}
-
-					/* Pick a random queue entry and seek to it. Don't splice with yourself. */
-
-					do { tid = UR(queued_paths); } while (tid == current_entry);
-
-					splicing_with = tid;
-					target = queue;
-
-					while (tid >= 100) { target = target->next_100; tid -= 100; }
-					while (tid--) target = target->next;
-
-					/* Make sure that the target has a reasonable length. */
-
-					while (target && (target->len < 2 || target == queue_cur)) {
-						target = target->next;
-						++splicing_with;
-					}
-
-					if (!target) goto retry_splicing_puppet;
-
-					/* Read the testcase into a new buffer. */
-
-					fd = open(target->fname, O_RDONLY);
-
-					if (fd < 0) PFATAL("Unable to open '%s'", target->fname);
-
-					new_buf = ck_alloc_nozero(target->len);
-
-					ck_read(fd, new_buf, target->len, target->fname);
-
-					close(fd);
-
-					/* Find a suitable splicin g location, somewhere between the first and
-					   the last differing byte. Bail out if the difference is just a single
-					   byte or so. */
-
-					locate_diffs(in_buf, new_buf, MIN(len, target->len), &f_diff, &l_diff);
-
-					if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) {
-						ck_free(new_buf);
-						goto retry_splicing_puppet;
-					}
-
-					/* Split somewhere between the first and last differing byte. */
-
-					split_at = f_diff + UR(l_diff - f_diff);
-
-					/* Do the thing. */
-
-					len = target->len;
-					memcpy(new_buf, in_buf, split_at);
-					in_buf = new_buf;
-					ck_free(out_buf);
-					out_buf = ck_alloc_nozero(len);
-					memcpy(out_buf, in_buf, len);
-
-					goto havoc_stage_puppet;
-
-				}
-
-#endif /* !IGNORE_FINDS */
-
-				ret_val = 0;
-			abandon_entry:
-			abandon_entry_puppet:
-
-				if (splice_cycle >= SPLICE_CYCLES_puppet)
-					SPLICE_CYCLES_puppet = (UR(SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) + SPLICE_CYCLES_puppet_low);
-
-
-				splicing_with = -1;
-
-
-				munmap(orig_in, queue_cur->len);
-
-				if (in_buf != orig_in) ck_free(in_buf);
-				ck_free(out_buf);
-				ck_free(eff_map);
-
-
-				if (key_puppet == 1)
-				{
-					if (unlikely(queued_paths + unique_crashes > ((queued_paths + unique_crashes)*limit_time_bound + orig_hit_cnt_puppet)))
-					{
-						key_puppet = 0;
-						cur_ms_lv = get_cur_time();
-						new_hit_cnt = queued_paths + unique_crashes;
-						orig_hit_cnt_puppet = 0;
-						last_limit_time_start = 0;
-					}
-				}
-
-
-				if (unlikely(tmp_core_time > period_core))
-				{
-					total_pacemaker_time += tmp_core_time;
-					tmp_core_time = 0;
-					temp_puppet_find = total_puppet_find;
-					new_hit_cnt = queued_paths + unique_crashes;
-
-					u64 temp_stage_finds_puppet = 0;
-					for (i = 0; i < operator_num; ++i)
-					{
-
-						core_operator_finds_puppet[i] = core_operator_finds_puppet_v2[i];
-						core_operator_cycles_puppet[i] = core_operator_cycles_puppet_v2[i];
-						temp_stage_finds_puppet += core_operator_finds_puppet[i];
-					}
-
-					key_module = 2;
-
-					old_hit_count = new_hit_cnt;
-				}
-				return ret_val;
-			}
-		}
-
-
-#undef FLIP_BIT
-
-}
-
-
-void pso_updating(void) {
-
-	g_now += 1;
-	if (g_now > g_max) g_now = 0;
-	w_now = (w_init - w_end)*(g_max - g_now) / (g_max)+w_end;
-	int tmp_swarm, i, j;
-	u64 temp_operator_finds_puppet = 0;
-	for (i = 0; i < operator_num; ++i)
-	{
-		operator_finds_puppet[i] = core_operator_finds_puppet[i];
-
-		for (j = 0; j < swarm_num; ++j)
-		{
-			operator_finds_puppet[i] = operator_finds_puppet[i] + stage_finds_puppet[j][i];
-		}
-		temp_operator_finds_puppet = temp_operator_finds_puppet + operator_finds_puppet[i];
-	}
-
-	for (i = 0; i < operator_num; ++i)
-	{
-		if (operator_finds_puppet[i])
-			G_best[i] = (double)((double)(operator_finds_puppet[i]) / (double)(temp_operator_finds_puppet));
-	}
-
-	for (tmp_swarm = 0; tmp_swarm < swarm_num; ++tmp_swarm)
-	{
-		double x_temp = 0.0;
-		for (i = 0; i < operator_num; ++i)
-		{
-			probability_now[tmp_swarm][i] = 0.0;
-			v_now[tmp_swarm][i] = w_now * v_now[tmp_swarm][i] + RAND_C * (L_best[tmp_swarm][i] - x_now[tmp_swarm][i]) + RAND_C * (G_best[i] - x_now[tmp_swarm][i]);
-			x_now[tmp_swarm][i] += v_now[tmp_swarm][i];
-			if (x_now[tmp_swarm][i] > v_max)
-				x_now[tmp_swarm][i] = v_max;
-			else if (x_now[tmp_swarm][i] < v_min)
-				x_now[tmp_swarm][i] = v_min;
-			x_temp += x_now[tmp_swarm][i];
-		}
-
-		for (i = 0; i < operator_num; ++i)
-		{
-			x_now[tmp_swarm][i] = x_now[tmp_swarm][i] / x_temp;
-			if (likely(i != 0))
-				probability_now[tmp_swarm][i] = probability_now[tmp_swarm][i - 1] + x_now[tmp_swarm][i];
-			else
-				probability_now[tmp_swarm][i] = x_now[tmp_swarm][i];
-		}
-		if (probability_now[tmp_swarm][operator_num - 1] < 0.99 || probability_now[tmp_swarm][operator_num - 1] > 1.01) FATAL("ERROR probability");
-	}
-	swarm_now = 0;
-	key_module = 0;
-}
-
-
-/* larger change for MOpt implementation: the original fuzz_one was renamed
-   to fuzz_one_original. All documentation references to fuzz_one therefore
-   mean fuzz_one_original */
-static u8 fuzz_one(char** argv) {
-	int key_val_lv = 0;
-	if (limit_time_sig == 0) {
-		key_val_lv = fuzz_one_original(argv);
-	} else {
-		if (key_module == 0)
-			key_val_lv = pilot_fuzzing(argv);
-		else if (key_module == 1)
-			key_val_lv = core_fuzzing(argv);
-		else if (key_module == 2)
-			pso_updating();
-	}
-
-	return key_val_lv;
-}
-
-
-/* Grab interesting test cases from other fuzzers. */
-
-static void sync_fuzzers(char** argv) {
-
-  DIR* sd;
-  struct dirent* sd_ent;
-  u32 sync_cnt = 0;
-
-  sd = opendir(sync_dir);
-  if (!sd) PFATAL("Unable to open '%s'", sync_dir);
-
-  stage_max = stage_cur = 0;
-  cur_depth = 0;
-
-  /* Look at the entries created for every other fuzzer in the sync directory. */
-
-  while ((sd_ent = readdir(sd))) {
-
-    static u8 stage_tmp[128];
-
-    DIR* qd;
-    struct dirent* qd_ent;
-    u8 *qd_path, *qd_synced_path;
-    u32 min_accept = 0, next_min_accept;
-
-    s32 id_fd;
-
-    /* Skip dot files and our own output directory. */
-
-    if (sd_ent->d_name[0] == '.' || !strcmp(sync_id, sd_ent->d_name)) continue;
-
-    /* Skip anything that doesn't have a queue/ subdirectory. */
-
-    qd_path = alloc_printf("%s/%s/queue", sync_dir, sd_ent->d_name);
-
-    if (!(qd = opendir(qd_path))) {
-      ck_free(qd_path);
-      continue;
-    }
-
-    /* Retrieve the ID of the last seen test case. */
-
-    qd_synced_path = alloc_printf("%s/.synced/%s", out_dir, sd_ent->d_name);
-
-    id_fd = open(qd_synced_path, O_RDWR | O_CREAT, 0600);
-
-    if (id_fd < 0) PFATAL("Unable to create '%s'", qd_synced_path);
-
-    if (read(id_fd, &min_accept, sizeof(u32)) > 0) 
-      lseek(id_fd, 0, SEEK_SET);
-
-    next_min_accept = min_accept;
-
-    /* Show stats */    
-
-    sprintf(stage_tmp, "sync %u", ++sync_cnt);
-    stage_name = stage_tmp;
-    stage_cur  = 0;
-    stage_max  = 0;
-
-    /* For every file queued by this fuzzer, parse ID and see if we have looked at
-       it before; exec a test case if not. */
-
-    while ((qd_ent = readdir(qd))) {
-
-      u8* path;
-      s32 fd;
-      struct stat st;
-
-      if (qd_ent->d_name[0] == '.' ||
-          sscanf(qd_ent->d_name, CASE_PREFIX "%06u", &syncing_case) != 1 || 
-          syncing_case < min_accept) continue;
-
-      /* OK, sounds like a new one. Let's give it a try. */
-
-      if (syncing_case >= next_min_accept)
-        next_min_accept = syncing_case + 1;
-
-      path = alloc_printf("%s/%s", qd_path, qd_ent->d_name);
-
-      /* Allow this to fail in case the other fuzzer is resuming or so... */
-
-      fd = open(path, O_RDONLY);
-
-      if (fd < 0) {
-         ck_free(path);
-         continue;
-      }
-
-      if (fstat(fd, &st)) PFATAL("fstat() failed");
-
-      /* Ignore zero-sized or oversized files. */
-
-      if (st.st_size && st.st_size <= MAX_FILE) {
-
-        u8  fault;
-        u8* mem = mmap(0, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
-
-        if (mem == MAP_FAILED) PFATAL("Unable to mmap '%s'", path);
-
-        /* See what happens. We rely on save_if_interesting() to catch major
-           errors and save the test case. */
-
-        write_to_testcase(mem, st.st_size);
-
-        fault = run_target(argv, exec_tmout);
-
-        if (stop_soon) return;
-
-        syncing_party = sd_ent->d_name;
-        queued_imported += save_if_interesting(argv, mem, st.st_size, fault);
-        syncing_party = 0;
-
-        munmap(mem, st.st_size);
-
-        if (!(stage_cur++ % stats_update_freq)) show_stats();
-
-      }
-
-      ck_free(path);
-      close(fd);
-
-    }
-
-    ck_write(id_fd, &next_min_accept, sizeof(u32), qd_synced_path);
-
-    close(id_fd);
-    closedir(qd);
-    ck_free(qd_path);
-    ck_free(qd_synced_path);
-    
-  }  
-
-  closedir(sd);
-
-}
-
-
-/* Handle stop signal (Ctrl-C, etc). */
-
-static void handle_stop_sig(int sig) {
-
-  stop_soon = 1; 
-
-  if (child_pid > 0) kill(child_pid, SIGKILL);
-  if (forksrv_pid > 0) kill(forksrv_pid, SIGKILL);
-
-}
-
-
-/* Handle skip request (SIGUSR1). */
-
-static void handle_skipreq(int sig) {
-
-  skip_requested = 1;
-
-}
-
-/* Handle timeout (SIGALRM). */
-
-static void handle_timeout(int sig) {
-
-  if (child_pid > 0) {
-
-    child_timed_out = 1; 
-    kill(child_pid, SIGKILL);
-
-  } else if (child_pid == -1 && forksrv_pid > 0) {
-
-    child_timed_out = 1; 
-    kill(forksrv_pid, SIGKILL);
-
-  }
-
-}
-
-
-/* Do a PATH search and find target binary to see that it exists and
-   isn't a shell script - a common and painful mistake. We also check for
-   a valid ELF header and for evidence of AFL instrumentation. */
-
-EXP_ST void check_binary(u8* fname) {
-
-  u8* env_path = 0;
-  struct stat st;
-
-  s32 fd;
-  u8* f_data;
-  u32 f_len = 0;
-
-  ACTF("Validating target binary...");
-
-  if (strchr(fname, '/') || !(env_path = getenv("PATH"))) {
-
-    target_path = ck_strdup(fname);
-    if (stat(target_path, &st) || !S_ISREG(st.st_mode) ||
-        !(st.st_mode & 0111) || (f_len = st.st_size) < 4)
-      FATAL("Program '%s' not found or not executable", fname);
-
-  } else {
-
-    while (env_path) {
-
-      u8 *cur_elem, *delim = strchr(env_path, ':');
-
-      if (delim) {
-
-        cur_elem = ck_alloc(delim - env_path + 1);
-        memcpy(cur_elem, env_path, delim - env_path);
-        ++delim;
-
-      } else cur_elem = ck_strdup(env_path);
-
-      env_path = delim;
-
-      if (cur_elem[0])
-        target_path = alloc_printf("%s/%s", cur_elem, fname);
-      else
-        target_path = ck_strdup(fname);
-
-      ck_free(cur_elem);
-
-      if (!stat(target_path, &st) && S_ISREG(st.st_mode) &&
-          (st.st_mode & 0111) && (f_len = st.st_size) >= 4) break;
-
-      ck_free(target_path);
-      target_path = 0;
-
-    }
-
-    if (!target_path) FATAL("Program '%s' not found or not executable", fname);
-
-  }
-
-  if (getenv("AFL_SKIP_BIN_CHECK")) return;
-
-  /* Check for blatant user errors. */
-
-  if ((!strncmp(target_path, "/tmp/", 5) && !strchr(target_path + 5, '/')) ||
-      (!strncmp(target_path, "/var/tmp/", 9) && !strchr(target_path + 9, '/')))
-     FATAL("Please don't keep binaries in /tmp or /var/tmp");
-
-  fd = open(target_path, O_RDONLY);
-
-  if (fd < 0) PFATAL("Unable to open '%s'", target_path);
-
-  f_data = mmap(0, f_len, PROT_READ, MAP_PRIVATE, fd, 0);
-
-  if (f_data == MAP_FAILED) PFATAL("Unable to mmap file '%s'", target_path);
-
-  close(fd);
-
-  if (f_data[0] == '#' && f_data[1] == '!') {
-
-    SAYF("\n" cLRD "[-] " cRST
-         "Oops, the target binary looks like a shell script. Some build systems will\n"
-         "    sometimes generate shell stubs for dynamically linked programs; try static\n"
-         "    library mode (./configure --disable-shared) if that's the case.\n\n"
-
-         "    Another possible cause is that you are actually trying to use a shell\n" 
-         "    wrapper around the fuzzed component. Invoking shell can slow down the\n" 
-         "    fuzzing process by a factor of 20x or more; it's best to write the wrapper\n"
-         "    in a compiled language instead.\n");
-
-    FATAL("Program '%s' is a shell script", target_path);
-
-  }
-
-#ifndef __APPLE__
-
-  if (f_data[0] != 0x7f || memcmp(f_data + 1, "ELF", 3))
-    FATAL("Program '%s' is not an ELF binary", target_path);
-
-#else
-
-#if !defined(__arm__) && !defined(__arm64__)
-  if (f_data[0] != 0xCF || f_data[1] != 0xFA || f_data[2] != 0xED)
-    FATAL("Program '%s' is not a 64-bit Mach-O binary", target_path);
-#endif
-
-#endif /* ^!__APPLE__ */
-
-  if (!qemu_mode && !unicorn_mode && !dumb_mode &&
-      !memmem(f_data, f_len, SHM_ENV_VAR, strlen(SHM_ENV_VAR) + 1)) {
-
-    SAYF("\n" cLRD "[-] " cRST
-         "Looks like the target binary is not instrumented! The fuzzer depends on\n"
-         "    compile-time instrumentation to isolate interesting test cases while\n"
-         "    mutating the input data. For more information, and for tips on how to\n"
-         "    instrument binaries, please see %s/README.\n\n"
-
-         "    When source code is not available, you may be able to leverage QEMU\n"
-         "    mode support. Consult the README for tips on how to enable this.\n"
-
-         "    (It is also possible to use afl-fuzz as a traditional, \"dumb\" fuzzer.\n"
-         "    For that, you can use the -n option - but expect much worse results.)\n",
-         doc_path);
-
-    FATAL("No instrumentation detected");
-
-  }
-
-  if ((qemu_mode || unicorn_mode) &&
-      memmem(f_data, f_len, SHM_ENV_VAR, strlen(SHM_ENV_VAR) + 1)) {
-
-    SAYF("\n" cLRD "[-] " cRST
-         "This program appears to be instrumented with afl-gcc, but is being run in\n"
-         "    QEMU or Unicorn mode (-Q or -U). This is probably not what you want -\n"
-         "    this setup will be slow and offer no practical benefits.\n");
-
-    FATAL("Instrumentation found in -Q or -U mode");
-
-  }
-
-  if (memmem(f_data, f_len, "libasan.so", 10) ||
-      memmem(f_data, f_len, "__msan_init", 11)) uses_asan = 1;
-
-  /* Detect persistent & deferred init signatures in the binary. */
-
-  if (memmem(f_data, f_len, PERSIST_SIG, strlen(PERSIST_SIG) + 1)) {
-
-    OKF(cPIN "Persistent mode binary detected.");
-    setenv(PERSIST_ENV_VAR, "1", 1);
-    persistent_mode = 1;
-
-  } else if (getenv("AFL_PERSISTENT")) {
-
-    WARNF("AFL_PERSISTENT is no longer supported and may misbehave!");
-
-  }
-
-  if (memmem(f_data, f_len, DEFER_SIG, strlen(DEFER_SIG) + 1)) {
-
-    OKF(cPIN "Deferred forkserver binary detected.");
-    setenv(DEFER_ENV_VAR, "1", 1);
-    deferred_mode = 1;
-
-  } else if (getenv("AFL_DEFER_FORKSRV")) {
-
-    WARNF("AFL_DEFER_FORKSRV is no longer supported and may misbehave!");
-
-  }
-
-  if (munmap(f_data, f_len)) PFATAL("unmap() failed");
-
-}
-
-
-/* Trim and possibly create a banner for the run. */
-
-static void fix_up_banner(u8* name) {
-
-  if (!use_banner) {
-
-    if (sync_id) {
-
-      use_banner = sync_id;
-
-    } else {
-
-      u8* trim = strrchr(name, '/');
-      if (!trim) use_banner = name; else use_banner = trim + 1;
-
-    }
-
-  }
-
-  if (strlen(use_banner) > 32) {
-
-    u8* tmp = ck_alloc(36);
-    sprintf(tmp, "%.32s...", use_banner);
-    use_banner = tmp;
-
-  }
-
-}
-
-
-/* Check if we're on TTY. */
-
-static void check_if_tty(void) {
-
-  struct winsize ws;
-
-  if (getenv("AFL_NO_UI")) {
-    OKF("Disabling the UI because AFL_NO_UI is set.");
-    not_on_tty = 1;
-    return;
-  }
-
-  if (ioctl(1, TIOCGWINSZ, &ws)) {
-
-    if (errno == ENOTTY) {
-      OKF("Looks like we're not running on a tty, so I'll be a bit less verbose.");
-      not_on_tty = 1;
-    }
-
-    return;
-  }
-
-}
-
-
-/* Check terminal dimensions after resize. */
-
-static void check_term_size(void) {
-
-  struct winsize ws;
-
-  term_too_small = 0;
-
-  if (ioctl(1, TIOCGWINSZ, &ws)) return;
-
-  if (ws.ws_row == 0 || ws.ws_col == 0) return;
-  if (ws.ws_row < 24 || ws.ws_col < 79) term_too_small = 1;
-
-}
-
-
-
-/* Display usage hints. */
-
-static void usage(u8* argv0) {
-
-  SAYF("\n%s [ options ] -- /path/to/fuzzed_app [ ... ]\n\n"
-
-       "Required parameters:\n"
-       "  -i dir        - input directory with test cases\n"
-       "  -o dir        - output directory for fuzzer findings\n\n"
-
-       "Execution control settings:\n"
-       "  -p schedule   - power schedules recompute a seed's performance score.\n"
-       "                  <explore (default), fast, coe, lin, quad, or exploit>\n"
-       "                  see docs/power_schedules.txt\n"
-       "  -f file       - location read by the fuzzed program (stdin)\n"
-       "  -t msec       - timeout for each run (auto-scaled, 50-%u ms)\n"
-       "  -m megs       - memory limit for child process (%u MB)\n"
-       "  -Q            - use binary-only instrumentation (QEMU mode)\n"
-       "  -U            - use Unicorn-based instrumentation (Unicorn mode)\n\n"
-       "  -L minutes    - use MOpt(imize) mode and set the limit time for entering the\n"
-       "                  pacemaker mode (minutes of no new paths, 0 = immediately).\n"
-       "                  a recommended value is 10-60. see docs/README.MOpt\n\n"
- 
-       "Fuzzing behavior settings:\n"
-       "  -d            - quick & dirty mode (skips deterministic steps)\n"
-       "  -n            - fuzz without instrumentation (dumb mode)\n"
-       "  -x dir        - optional fuzzer dictionary (see README)\n\n"
-
-       "Testing settings:\n"
-       "  -s seed       - use a fixed seed for the RNG\n"
-       "  -V seconds    - fuzz for a maximum total time of seconds then terminate\n"
-       "  -E execs      - fuzz for a maximum number of total executions then terminate\n\n"
-
-       "Other stuff:\n"
-       "  -T text       - text banner to show on the screen\n"
-       "  -M / -S id    - distributed mode (see parallel_fuzzing.txt)\n"
-       "  -B bitmap.txt - mutate a specific test case, use the out/fuzz_bitmap file\n"
-       "  -C            - crash exploration mode (the peruvian rabbit thing)\n"
-       "  -e ext        - File extension for the temporarily generated test case\n\n"
-
-#ifdef USE_PYTHON
-       "Compiled with Python 2.7 module support, see docs/python_mutators.txt\n"
-#endif
-       "For additional tips, please consult %s/README\n\n",
-
-       argv0, EXEC_TIMEOUT, MEM_LIMIT, doc_path);
-
-  exit(1);
-
-}
-
-
-/* Prepare output directories and fds. */
-
-EXP_ST void setup_dirs_fds(void) {
-
-  u8* tmp;
-  s32 fd;
-
-  ACTF("Setting up output directories...");
-
-  if (sync_id && mkdir(sync_dir, 0700) && errno != EEXIST)
-      PFATAL("Unable to create '%s'", sync_dir);
-
-  if (mkdir(out_dir, 0700)) {
-
-    if (errno != EEXIST) PFATAL("Unable to create '%s'", out_dir);
-
-    maybe_delete_out_dir();
-
-  } else {
-
-    if (in_place_resume)
-      FATAL("Resume attempted but old output directory not found");
-
-    out_dir_fd = open(out_dir, O_RDONLY);
-
-#ifndef __sun
-
-    if (out_dir_fd < 0 || flock(out_dir_fd, LOCK_EX | LOCK_NB))
-      PFATAL("Unable to flock() output directory.");
-
-#endif /* !__sun */
-
-  }
-
-  /* Queue directory for any starting & discovered paths. */
-
-  tmp = alloc_printf("%s/queue", out_dir);
-  if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp);
-  ck_free(tmp);
-
-  /* Top-level directory for queue metadata used for session
-     resume and related tasks. */
-
-  tmp = alloc_printf("%s/queue/.state/", out_dir);
-  if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp);
-  ck_free(tmp);
-
-  /* Directory for flagging queue entries that went through
-     deterministic fuzzing in the past. */
-
-  tmp = alloc_printf("%s/queue/.state/deterministic_done/", out_dir);
-  if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp);
-  ck_free(tmp);
-
-  /* Directory with the auto-selected dictionary entries. */
-
-  tmp = alloc_printf("%s/queue/.state/auto_extras/", out_dir);
-  if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp);
-  ck_free(tmp);
-
-  /* The set of paths currently deemed redundant. */
-
-  tmp = alloc_printf("%s/queue/.state/redundant_edges/", out_dir);
-  if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp);
-  ck_free(tmp);
-
-  /* The set of paths showing variable behavior. */
-
-  tmp = alloc_printf("%s/queue/.state/variable_behavior/", out_dir);
-  if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp);
-  ck_free(tmp);
-
-  /* Sync directory for keeping track of cooperating fuzzers. */
-
-  if (sync_id) {
-
-    tmp = alloc_printf("%s/.synced/", out_dir);
-
-    if (mkdir(tmp, 0700) && (!in_place_resume || errno != EEXIST))
-      PFATAL("Unable to create '%s'", tmp);
-
-    ck_free(tmp);
-
-  }
-
-  /* All recorded crashes. */
-
-  tmp = alloc_printf("%s/crashes", out_dir);
-  if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp);
-  ck_free(tmp);
-
-  /* All recorded hangs. */
-
-  tmp = alloc_printf("%s/hangs", out_dir);
-  if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp);
-  ck_free(tmp);
-
-  /* Generally useful file descriptors. */
-
-  dev_null_fd = open("/dev/null", O_RDWR);
-  if (dev_null_fd < 0) PFATAL("Unable to open /dev/null");
-
-#ifndef HAVE_ARC4RANDOM
-  dev_urandom_fd = open("/dev/urandom", O_RDONLY);
-  if (dev_urandom_fd < 0) PFATAL("Unable to open /dev/urandom");
-#endif
-
-  /* Gnuplot output file. */
-
-  tmp = alloc_printf("%s/plot_data", out_dir);
-  fd = open(tmp, O_WRONLY | O_CREAT | O_EXCL, 0600);
-  if (fd < 0) PFATAL("Unable to create '%s'", tmp);
-  ck_free(tmp);
-
-  plot_file = fdopen(fd, "w");
-  if (!plot_file) PFATAL("fdopen() failed");
-
-  fprintf(plot_file, "# unix_time, cycles_done, cur_path, paths_total, "
-                     "pending_total, pending_favs, map_size, unique_crashes, "
-                     "unique_hangs, max_depth, execs_per_sec\n");
-                     /* ignore errors */
-
-}
-
-static void setup_cmdline_file(char** argv) {
-  u8* tmp;
-  s32 fd;
-  u32 i = 0;
-
-  FILE* cmdline_file = NULL;
-
-  /* Store the command line to reproduce our findings */
-  tmp = alloc_printf("%s/cmdline", out_dir);
-  fd = open(tmp, O_WRONLY | O_CREAT | O_EXCL, 0600);
-  if (fd < 0) PFATAL("Unable to create '%s'", tmp);
-  ck_free(tmp);
-
-  cmdline_file = fdopen(fd, "w");
-  if (!cmdline_file) PFATAL("fdopen() failed");
-
-  while (argv[i]) {
-    fprintf(cmdline_file, "%s\n", argv[i]);
-    ++i;
-  }
-
-  fclose(cmdline_file);
-}
-
-
-/* Setup the output file for fuzzed data, if not using -f. */
-
-EXP_ST void setup_stdio_file(void) {
-
-  u8* fn;
-  if (file_extension) {
-    fn = alloc_printf("%s/.cur_input.%s", out_dir, file_extension);
-  } else {
-    fn = alloc_printf("%s/.cur_input", out_dir);
-  }
-
-  unlink(fn); /* Ignore errors */
-
-  out_fd = open(fn, O_RDWR | O_CREAT | O_EXCL, 0600);
-
-  if (out_fd < 0) PFATAL("Unable to create '%s'", fn);
-
-  ck_free(fn);
-
-}
-
-
-/* Make sure that core dumps don't go to a program. */
-
-static void check_crash_handling(void) {
-
-#ifdef __APPLE__
-
-  /* Yuck! There appears to be no simple C API to query for the state of 
-     loaded daemons on MacOS X, and I'm a bit hesitant to do something
-     more sophisticated, such as disabling crash reporting via Mach ports,
-     until I get a box to test the code. So, for now, we check for crash
-     reporting the awful way. */
-  
-  if (system("launchctl list 2>/dev/null | grep -q '\\.ReportCrash$'")) return;
-
-  SAYF("\n" cLRD "[-] " cRST
-       "Whoops, your system is configured to forward crash notifications to an\n"
-       "    external crash reporting utility. This will cause issues due to the\n"
-       "    extended delay between the fuzzed binary malfunctioning and this fact\n"
-       "    being relayed to the fuzzer via the standard waitpid() API.\n\n"
-       "    To avoid having crashes misinterpreted as timeouts, please run the\n" 
-       "    following commands:\n\n"
-
-       "    SL=/System/Library; PL=com.apple.ReportCrash\n"
-       "    launchctl unload -w ${SL}/LaunchAgents/${PL}.plist\n"
-       "    sudo launchctl unload -w ${SL}/LaunchDaemons/${PL}.Root.plist\n");
-
-  if (!getenv("AFL_I_DONT_CARE_ABOUT_MISSING_CRASHES"))
-    FATAL("Crash reporter detected");
-
-#else
-
-  /* This is Linux specific, but I don't think there's anything equivalent on
-     *BSD, so we can just let it slide for now. */
-
-  s32 fd = open("/proc/sys/kernel/core_pattern", O_RDONLY);
-  u8  fchar;
-
-  if (fd < 0) return;
-
-  ACTF("Checking core_pattern...");
-
-  if (read(fd, &fchar, 1) == 1 && fchar == '|') {
-
-    SAYF("\n" cLRD "[-] " cRST
-         "Hmm, your system is configured to send core dump notifications to an\n"
-         "    external utility. This will cause issues: there will be an extended delay\n"
-         "    between stumbling upon a crash and having this information relayed to the\n"
-         "    fuzzer via the standard waitpid() API.\n\n"
-
-         "    To avoid having crashes misinterpreted as timeouts, please log in as root\n" 
-         "    and temporarily modify /proc/sys/kernel/core_pattern, like so:\n\n"
-
-         "    echo core >/proc/sys/kernel/core_pattern\n");
-
-    if (!getenv("AFL_I_DONT_CARE_ABOUT_MISSING_CRASHES"))
-      FATAL("Pipe at the beginning of 'core_pattern'");
-
-  }
- 
-  close(fd);
-
-#endif /* ^__APPLE__ */
-
-}
-
-
-/* Check CPU governor. */
-
-static void check_cpu_governor(void) {
-#ifdef __linux__
-  FILE* f;
-  u8 tmp[128];
-  u64 min = 0, max = 0;
-
-  if (getenv("AFL_SKIP_CPUFREQ")) return;
-
-  if (cpu_aff > 0)
-    snprintf(tmp, sizeof(tmp), "%s%d%s", "/sys/devices/system/cpu/cpu", cpu_aff, "/cpufreq/scaling_governor");
-  else
-    snprintf(tmp, sizeof(tmp), "%s", "/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor");
-  f = fopen("/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor", "r");
-  if (!f) {
-    if (cpu_aff > 0)
-      snprintf(tmp, sizeof(tmp), "%s%d%s", "/sys/devices/system/cpu/cpufreq/policy", cpu_aff, "/scaling_governor");
-    else
-      snprintf(tmp, sizeof(tmp), "%s", "/sys/devices/system/cpu/cpufreq/policy0/scaling_governor");
-    f = fopen(tmp, "r");
-  }
-  if (!f) {
-    WARNF("Could not check CPU scaling governor");
-    return;
-  }
-
-  ACTF("Checking CPU scaling governor...");
-
-  if (!fgets(tmp, 128, f)) PFATAL("fgets() failed");
-
-  fclose(f);
-
-  if (!strncmp(tmp, "perf", 4)) return;
-
-  f = fopen("/sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq", "r");
-
-  if (f) {
-    if (fscanf(f, "%llu", &min) != 1) min = 0;
-    fclose(f);
-  }
-
-  f = fopen("/sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq", "r");
-
-  if (f) {
-    if (fscanf(f, "%llu", &max) != 1) max = 0;
-    fclose(f);
-  }
-
-  if (min == max) return;
-
-  SAYF("\n" cLRD "[-] " cRST
-       "Whoops, your system uses on-demand CPU frequency scaling, adjusted\n"
-       "    between %llu and %llu MHz. Unfortunately, the scaling algorithm in the\n"
-       "    kernel is imperfect and can miss the short-lived processes spawned by\n"
-       "    afl-fuzz. To keep things moving, run these commands as root:\n\n"
-
-       "    cd /sys/devices/system/cpu\n"
-       "    echo performance | tee cpu*/cpufreq/scaling_governor\n\n"
-
-       "    You can later go back to the original state by replacing 'performance' with\n"
-       "    'ondemand'. If you don't want to change the settings, set AFL_SKIP_CPUFREQ\n"
-       "    to make afl-fuzz skip this check - but expect some performance drop.\n",
-       min / 1024, max / 1024);
-
-  FATAL("Suboptimal CPU scaling governor");
-#endif
-}
-
-
-/* Count the number of logical CPU cores. */
-
-static void get_core_count(void) {
-
-  u32 cur_runnable = 0;
-
-#if defined(__APPLE__) || defined(__FreeBSD__) || defined (__OpenBSD__)
-
-  size_t s = sizeof(cpu_core_count);
-
-  /* On *BSD systems, we can just use a sysctl to get the number of CPUs. */
-
-#ifdef __APPLE__
-
-  if (sysctlbyname("hw.logicalcpu", &cpu_core_count, &s, NULL, 0) < 0)
-    return;
-
-#else
-
-  int s_name[2] = { CTL_HW, HW_NCPU };
-
-  if (sysctl(s_name, 2, &cpu_core_count, &s, NULL, 0) < 0) return;
-
-#endif /* ^__APPLE__ */
-
-#else
-
-#ifdef HAVE_AFFINITY
-
-  cpu_core_count = sysconf(_SC_NPROCESSORS_ONLN);
-
-#else
-
-  FILE* f = fopen("/proc/stat", "r");
-  u8 tmp[1024];
-
-  if (!f) return;
-
-  while (fgets(tmp, sizeof(tmp), f))
-    if (!strncmp(tmp, "cpu", 3) && isdigit(tmp[3])) ++cpu_core_count;
-
-  fclose(f);
-
-#endif /* ^HAVE_AFFINITY */
-
-#endif /* ^(__APPLE__ || __FreeBSD__ || __OpenBSD__) */
-
-  if (cpu_core_count > 0) {
-
-    cur_runnable = (u32)get_runnable_processes();
-
-#if defined(__APPLE__) || defined(__FreeBSD__) || defined (__OpenBSD__)
-
-    /* Add ourselves, since the 1-minute average doesn't include that yet. */
-
-    ++cur_runnable;
-
-#endif /* __APPLE__ || __FreeBSD__ || __OpenBSD__ */
-
-    OKF("You have %u CPU core%s and %u runnable tasks (utilization: %0.0f%%).",
-        cpu_core_count, cpu_core_count > 1 ? "s" : "",
-        cur_runnable, cur_runnable * 100.0 / cpu_core_count);
-
-    if (cpu_core_count > 1) {
-
-      if (cur_runnable > cpu_core_count * 1.5) {
-
-        WARNF("System under apparent load, performance may be spotty.");
-
-      } else if (cur_runnable + 1 <= cpu_core_count) {
-
-        OKF("Try parallel jobs - see %s/parallel_fuzzing.txt.", doc_path);
-  
-      }
-
-    }
-
-  } else {
-
-    cpu_core_count = 0;
-    WARNF("Unable to figure out the number of CPU cores.");
-
-  }
-
-}
-
-
-/* Validate and fix up out_dir and sync_dir when using -S. */
-
-static void fix_up_sync(void) {
-
-  u8* x = sync_id;
-
-  if (dumb_mode)
-    FATAL("-S / -M and -n are mutually exclusive");
-
-  if (skip_deterministic) {
-
-    if (force_deterministic)
-      FATAL("use -S instead of -M -d");
-    //else
-    //  FATAL("-S already implies -d");
-
-  }
-
-  while (*x) {
-
-    if (!isalnum(*x) && *x != '_' && *x != '-')
-      FATAL("Non-alphanumeric fuzzer ID specified via -S or -M");
-
-    ++x;
-
-  }
-
-  if (strlen(sync_id) > 32) FATAL("Fuzzer ID too long");
-
-  x = alloc_printf("%s/%s", out_dir, sync_id);
-
-  sync_dir = out_dir;
-  out_dir  = x;
-
-  if (!force_deterministic) {
-    skip_deterministic = 1;
-    use_splicing = 1;
-  }
-
-}
-
-
-/* Handle screen resize (SIGWINCH). */
-
-static void handle_resize(int sig) {
-  clear_screen = 1;
-}
-
-
-/* Check ASAN options. */
-
-static void check_asan_opts(void) {
-  u8* x = getenv("ASAN_OPTIONS");
-
-  if (x) {
-
-    if (!strstr(x, "abort_on_error=1"))
-      FATAL("Custom ASAN_OPTIONS set without abort_on_error=1 - please fix!");
-
-    if (!strstr(x, "symbolize=0"))
-      FATAL("Custom ASAN_OPTIONS set without symbolize=0 - please fix!");
-
-  }
-
-  x = getenv("MSAN_OPTIONS");
-
-  if (x) {
-
-    if (!strstr(x, "exit_code=" STRINGIFY(MSAN_ERROR)))
-      FATAL("Custom MSAN_OPTIONS set without exit_code="
-            STRINGIFY(MSAN_ERROR) " - please fix!");
-
-    if (!strstr(x, "symbolize=0"))
-      FATAL("Custom MSAN_OPTIONS set without symbolize=0 - please fix!");
-
-  }
-
-} 
-
-
-/* Set up signal handlers. More complicated that needs to be, because libc on
-   Solaris doesn't resume interrupted reads(), sets SA_RESETHAND when you call
-   siginterrupt(), and does other stupid things. */
-
-EXP_ST void setup_signal_handlers(void) {
-
-  struct sigaction sa;
-
-  sa.sa_handler   = NULL;
-  sa.sa_flags     = SA_RESTART;
-  sa.sa_sigaction = NULL;
-
-  sigemptyset(&sa.sa_mask);
-
-  /* Various ways of saying "stop". */
-
-  sa.sa_handler = handle_stop_sig;
-  sigaction(SIGHUP, &sa, NULL);
-  sigaction(SIGINT, &sa, NULL);
-  sigaction(SIGTERM, &sa, NULL);
-
-  /* Exec timeout notifications. */
-
-  sa.sa_handler = handle_timeout;
-  sigaction(SIGALRM, &sa, NULL);
-
-  /* Window resize */
-
-  sa.sa_handler = handle_resize;
-  sigaction(SIGWINCH, &sa, NULL);
-
-  /* SIGUSR1: skip entry */
-
-  sa.sa_handler = handle_skipreq;
-  sigaction(SIGUSR1, &sa, NULL);
-
-  /* Things we don't care about. */
-
-  sa.sa_handler = SIG_IGN;
-  sigaction(SIGTSTP, &sa, NULL);
-  sigaction(SIGPIPE, &sa, NULL);
-
-}
-
-
-/* Rewrite argv for QEMU. */
-
-static char** get_qemu_argv(u8* own_loc, char** argv, int argc) {
-
-  char** new_argv = ck_alloc(sizeof(char*) * (argc + 4));
-  u8 *tmp, *cp, *rsl, *own_copy;
-
-  memcpy(new_argv + 3, argv + 1, sizeof(char*) * argc);
-
-  new_argv[2] = target_path;
-  new_argv[1] = "--";
-
-  /* Now we need to actually find the QEMU binary to put in argv[0]. */
-
-  tmp = getenv("AFL_PATH");
-
-  if (tmp) {
-
-    cp = alloc_printf("%s/afl-qemu-trace", tmp);
-
-    if (access(cp, X_OK))
-      FATAL("Unable to find '%s'", tmp);
-
-    target_path = new_argv[0] = cp;
-    return new_argv;
-
-  }
-
-  own_copy = ck_strdup(own_loc);
-  rsl = strrchr(own_copy, '/');
-
-  if (rsl) {
-
-    *rsl = 0;
-
-    cp = alloc_printf("%s/afl-qemu-trace", own_copy);
-    ck_free(own_copy);
-
-    if (!access(cp, X_OK)) {
-
-      target_path = new_argv[0] = cp;
-      return new_argv;
-
-    }
-
-  } else ck_free(own_copy);
-
-  if (!access(BIN_PATH "/afl-qemu-trace", X_OK)) {
-
-    target_path = new_argv[0] = ck_strdup(BIN_PATH "/afl-qemu-trace");
-    return new_argv;
-
-  }
-
-  SAYF("\n" cLRD "[-] " cRST
-       "Oops, unable to find the 'afl-qemu-trace' binary. The binary must be built\n"
-       "    separately by following the instructions in qemu_mode/README.qemu. If you\n"
-       "    already have the binary installed, you may need to specify AFL_PATH in the\n"
-       "    environment.\n\n"
-
-       "    Of course, even without QEMU, afl-fuzz can still work with binaries that are\n"
-       "    instrumented at compile time with afl-gcc. It is also possible to use it as a\n"
-       "    traditional \"dumb\" fuzzer by specifying '-n' in the command line.\n");
-
-  FATAL("Failed to locate 'afl-qemu-trace'.");
-
-}
-
-/* Make a copy of the current command line. */
-
-static void save_cmdline(u32 argc, char** argv) {
-
-  u32 len = 1, i;
-  u8* buf;
-
-  for (i = 0; i < argc; ++i)
-    len += strlen(argv[i]) + 1;
-  
-  buf = orig_cmdline = ck_alloc(len);
-
-  for (i = 0; i < argc; ++i) {
-
-    u32 l = strlen(argv[i]);
-
-    memcpy(buf, argv[i], l);
-    buf += l;
-
-    if (i != argc - 1) *(buf++) = ' ';
-
-  }
-
-  *buf = 0;
-
-}
-
-int stricmp(char const *a, char const *b) {
-  int d;
-  for (;; ++a, ++b) {
-    d = tolower(*a) - tolower(*b);
-    if (d != 0 || !*a)
-      return d;
-  }
-}
-
-#ifndef AFL_LIB
-
-/* Main entry point */
-
-int main(int argc, char** argv) {
-
-  s32 opt;
-  u64 prev_queued = 0;
-  u32 sync_interval_cnt = 0, seek_to;
-  u8  *extras_dir = 0;
-  u8  mem_limit_given = 0;
-  u8  exit_1 = !!getenv("AFL_BENCH_JUST_ONE");
-  char** use_argv;
-  s64 init_seed;
-
-  struct timeval tv;
-  struct timezone tz;
-
-  SAYF(cCYA "afl-fuzz" VERSION cRST " based on afl by <lcamtuf@google.com> and a big online community\n");
-
-  doc_path = access(DOC_PATH, F_OK) ? "docs" : DOC_PATH;
-
-  gettimeofday(&tv, &tz);
-  init_seed = tv.tv_sec ^ tv.tv_usec ^ getpid();
-
-  while ((opt = getopt(argc, argv, "+i:o:f:m:t:T:dnCB:S:M:x:QUe:p:s:V:E:L:")) > 0)
-
-    switch (opt) {
-
-      case 's': {
-        init_seed = strtoul(optarg, 0L, 10);
-        fixed_seed = 1;
-        break;
-      }
-
-      case 'p': /* Power schedule */
-
-        if (!stricmp(optarg, "fast")) {
-          schedule = FAST;
-        } else if (!stricmp(optarg, "coe")) {
-          schedule = COE;
-        } else if (!stricmp(optarg, "exploit")) {
-          schedule = EXPLOIT;
-        } else if (!stricmp(optarg, "lin")) {
-          schedule = LIN;
-        } else if (!stricmp(optarg, "quad")) {
-          schedule = QUAD;
-        } else if (!stricmp(optarg, "explore") || !stricmp(optarg, "default") || !stricmp(optarg, "normal") || !stricmp(optarg, "afl")) {
-          schedule = EXPLORE;
-        } else {
-          FATAL("Unknown -p power schedule");
-        }
-        break;
-
-      case 'e':
-
-        if (file_extension) FATAL("Multiple -e options not supported");
-
-        file_extension = optarg;
-
-        break;
-
-      case 'i': /* input dir */
-
-        if (in_dir) FATAL("Multiple -i options not supported");
-        in_dir = optarg;
-
-        if (!strcmp(in_dir, "-")) in_place_resume = 1;
-
-        break;
-
-      case 'o': /* output dir */
-
-        if (out_dir) FATAL("Multiple -o options not supported");
-        out_dir = optarg;
-        break;
-
-      case 'M': { /* master sync ID */
-
-          u8* c;
-
-          if (sync_id) FATAL("Multiple -S or -M options not supported");
-          sync_id = ck_strdup(optarg);
-
-          if ((c = strchr(sync_id, ':'))) {
-
-            *c = 0;
-
-            if (sscanf(c + 1, "%u/%u", &master_id, &master_max) != 2 ||
-                !master_id || !master_max || master_id > master_max ||
-                master_max > 1000000) FATAL("Bogus master ID passed to -M");
-
-          }
-
-          force_deterministic = 1;
-
-        }
-
-        break;
-
-      case 'S': 
-
-        if (sync_id) FATAL("Multiple -S or -M options not supported");
-        sync_id = ck_strdup(optarg);
-        break;
-
-      case 'f': /* target file */
-
-        if (out_file) FATAL("Multiple -f options not supported");
-        out_file = optarg;
-        break;
-
-      case 'x': /* dictionary */
-
-        if (extras_dir) FATAL("Multiple -x options not supported");
-        extras_dir = optarg;
-        break;
-
-      case 't': { /* timeout */
-
-          u8 suffix = 0;
-
-          if (timeout_given) FATAL("Multiple -t options not supported");
-
-          if (sscanf(optarg, "%u%c", &exec_tmout, &suffix) < 1 ||
-              optarg[0] == '-') FATAL("Bad syntax used for -t");
-
-          if (exec_tmout < 5) FATAL("Dangerously low value of -t");
-
-          if (suffix == '+') timeout_given = 2; else timeout_given = 1;
-
-          break;
-
-      }
-
-      case 'm': { /* mem limit */
-
-          u8 suffix = 'M';
-
-          if (mem_limit_given) FATAL("Multiple -m options not supported");
-          mem_limit_given = 1;
-
-          if (!strcmp(optarg, "none")) {
-
-            mem_limit = 0;
-            break;
-
-          }
-
-          if (sscanf(optarg, "%llu%c", &mem_limit, &suffix) < 1 ||
-              optarg[0] == '-') FATAL("Bad syntax used for -m");
-
-          switch (suffix) {
-
-            case 'T': mem_limit *= 1024 * 1024; break;
-            case 'G': mem_limit *= 1024; break;
-            case 'k': mem_limit /= 1024; break;
-            case 'M': break;
-
-            default:  FATAL("Unsupported suffix or bad syntax for -m");
-
-          }
-
-          if (mem_limit < 5) FATAL("Dangerously low value of -m");
-
-          if (sizeof(rlim_t) == 4 && mem_limit > 2000)
-            FATAL("Value of -m out of range on 32-bit systems");
-
-        }
-
-        break;
-
-      case 'd': /* skip deterministic */
-
-        if (skip_deterministic) FATAL("Multiple -d options not supported");
-        skip_deterministic = 1;
-        use_splicing = 1;
-        break;
-
-      case 'B': /* load bitmap */
-
-        /* This is a secret undocumented option! It is useful if you find
-           an interesting test case during a normal fuzzing process, and want
-           to mutate it without rediscovering any of the test cases already
-           found during an earlier run.
-
-           To use this mode, you need to point -B to the fuzz_bitmap produced
-           by an earlier run for the exact same binary... and that's it.
-
-           I only used this once or twice to get variants of a particular
-           file, so I'm not making this an official setting. */
-
-        if (in_bitmap) FATAL("Multiple -B options not supported");
-
-        in_bitmap = optarg;
-        read_bitmap(in_bitmap);
-        break;
-
-      case 'C': /* crash mode */
-
-        if (crash_mode) FATAL("Multiple -C options not supported");
-        crash_mode = FAULT_CRASH;
-        break;
-
-      case 'n': /* dumb mode */
-
-        if (dumb_mode) FATAL("Multiple -n options not supported");
-        if (getenv("AFL_DUMB_FORKSRV")) dumb_mode = 2; else dumb_mode = 1;
-
-        break;
-
-      case 'T': /* banner */
-
-        if (use_banner) FATAL("Multiple -T options not supported");
-        use_banner = optarg;
-        break;
-
-      case 'Q': /* QEMU mode */
-
-        if (qemu_mode) FATAL("Multiple -Q options not supported");
-        qemu_mode = 1;
-
-        if (!mem_limit_given) mem_limit = MEM_LIMIT_QEMU;
-
-        break;
-
-      case 'U': /* Unicorn mode */
-
-        if (unicorn_mode) FATAL("Multiple -U options not supported");
-        unicorn_mode = 1;
-
-        if (!mem_limit_given) mem_limit = MEM_LIMIT_UNICORN;
-
-        break;
-
-      case 'V': {
-           most_time_key = 1;
-           if (sscanf(optarg, "%llu", &most_time) < 1 || optarg[0] == '-')
-             FATAL("Bad syntax used for -V");
-        }
-        break;
-
-      case 'E': {
-           most_execs_key = 1;
-           if (sscanf(optarg, "%llu", &most_execs) < 1 || optarg[0] == '-')
-             FATAL("Bad syntax used for -E");
-        }
-        break;
-
-      case 'L': { /* MOpt mode */
-
-              if (limit_time_sig)  FATAL("Multiple -L options not supported");
-              limit_time_sig = 1;
-              havoc_max_mult = HAVOC_MAX_MULT_MOPT;
-
-			if (sscanf(optarg, "%llu", &limit_time_puppet) < 1 ||
-				optarg[0] == '-') FATAL("Bad syntax used for -L");
-
-			u64 limit_time_puppet2 = limit_time_puppet * 60 * 1000;
-
-			if (limit_time_puppet2 < limit_time_puppet ) FATAL("limit_time overflow");
-				limit_time_puppet = limit_time_puppet2;
-
-			SAYF("limit_time_puppet %llu\n",limit_time_puppet);
-			swarm_now = 0;
-
-			if (limit_time_puppet == 0 )
-			    key_puppet = 1;
-
-			int i;
-			int tmp_swarm = 0;
-
-			if (g_now > g_max) g_now = 0;
-			w_now = (w_init - w_end)*(g_max - g_now) / (g_max)+w_end;
-
-			for (tmp_swarm = 0; tmp_swarm < swarm_num; ++tmp_swarm) {
-				double total_puppet_temp = 0.0;
-				swarm_fitness[tmp_swarm] = 0.0;
-
-				for (i = 0; i < operator_num; ++i) {
-					stage_finds_puppet[tmp_swarm][i] = 0;
-					probability_now[tmp_swarm][i] = 0.0;
-					x_now[tmp_swarm][i] = ((double)(random() % 7000)*0.0001 + 0.1);
-					total_puppet_temp += x_now[tmp_swarm][i];
-					v_now[tmp_swarm][i] = 0.1;
-					L_best[tmp_swarm][i] = 0.5;
-					G_best[i] = 0.5;
-					eff_best[tmp_swarm][i] = 0.0;
-
-				}
-
-				for (i = 0; i < operator_num; ++i) {
-					stage_cycles_puppet_v2[tmp_swarm][i] = stage_cycles_puppet[tmp_swarm][i];
-					stage_finds_puppet_v2[tmp_swarm][i] = stage_finds_puppet[tmp_swarm][i];
-					x_now[tmp_swarm][i] = x_now[tmp_swarm][i] / total_puppet_temp;
-				}
-
-				double x_temp = 0.0;
-
-				for (i = 0; i < operator_num; ++i) {
-					probability_now[tmp_swarm][i] = 0.0;
-					v_now[tmp_swarm][i] = w_now * v_now[tmp_swarm][i] + RAND_C * (L_best[tmp_swarm][i] - x_now[tmp_swarm][i]) + RAND_C * (G_best[i] - x_now[tmp_swarm][i]);
-
-					x_now[tmp_swarm][i] += v_now[tmp_swarm][i];
-
-					if (x_now[tmp_swarm][i] > v_max)
-						x_now[tmp_swarm][i] = v_max;
-					else if (x_now[tmp_swarm][i] < v_min)
-						x_now[tmp_swarm][i] = v_min;
-
-					x_temp += x_now[tmp_swarm][i];
-				}
-
-				for (i = 0; i < operator_num; ++i) {
-					x_now[tmp_swarm][i] = x_now[tmp_swarm][i] / x_temp;
-					if (likely(i != 0))
-						probability_now[tmp_swarm][i] = probability_now[tmp_swarm][i - 1] + x_now[tmp_swarm][i];
-					else
-						probability_now[tmp_swarm][i] = x_now[tmp_swarm][i];
-				}
-				if (probability_now[tmp_swarm][operator_num - 1] < 0.99 || probability_now[tmp_swarm][operator_num - 1] > 1.01)
-                                    FATAL("ERROR probability");
-			}
-
-			for (i = 0; i < operator_num; ++i) {
-				core_operator_finds_puppet[i] = 0;
-				core_operator_finds_puppet_v2[i] = 0;
-				core_operator_cycles_puppet[i] = 0;
-				core_operator_cycles_puppet_v2[i] = 0;
-				core_operator_cycles_puppet_v3[i] = 0;
-			}
-
-        }
-        break;
-
-      default:
-
-        usage(argv[0]);
-
-    }
-
-  if (optind == argc || !in_dir || !out_dir) usage(argv[0]);
-
-  if (fixed_seed)
-    OKF("Running with fixed seed: %u", (u32)init_seed);
-  srandom((u32)init_seed);
-  setup_signal_handlers();
-  check_asan_opts();
-
-  power_name = power_names[schedule];
-
-  if (sync_id) fix_up_sync();
-
-  if (!strcmp(in_dir, out_dir))
-    FATAL("Input and output directories can't be the same");
-
-  if ((tmp_dir = getenv("AFL_TMPDIR")) != NULL) {
-    char tmpfile[strlen(tmp_dir + 16)];
-    sprintf(tmpfile, "%s/%s", tmp_dir, ".cur_input");
-    if (access(tmpfile, F_OK) != -1) // there is still a race condition here, but well ...
-      FATAL("TMP_DIR already has an existing temporary input file: %s", tmpfile);
-  } else
-    tmp_dir = out_dir;
-
-  if (dumb_mode) {
-
-    if (crash_mode) FATAL("-C and -n are mutually exclusive");
-    if (qemu_mode)  FATAL("-Q and -n are mutually exclusive");
-    if (unicorn_mode) FATAL("-U and -n are mutually exclusive");
-
-  }
-  
-  if (strchr(argv[optind], '/') == NULL) WARNF(cLRD "Target binary called without a prefixed path, make sure you are fuzzing the right binary: " cRST "%s", argv[optind]);
-
-  OKF("afl++ is maintained by Marc \"van Hauser\" Heuse, Heiko \"hexcoder\" Eissfeldt and Andrea Fioraldi");
-  OKF("afl++ is open source, get it at https://github.com/vanhauser-thc/AFLplusplus");
-  OKF("Power schedules from github.com/mboehme/aflfast");
-  OKF("Python Mutator and llvm_mode whitelisting from github.com/choller/afl");
-  OKF("afl-tmin fork server patch from github.com/nccgroup/TriforceAFL");
-  OKF("MOpt Mutator from github.com/puppet-meteor/MOpt-AFL");
-  ACTF("Getting to work...");
-
-  switch (schedule) {
-    case FAST:    OKF ("Using exponential power schedule (FAST)"); break;
-    case COE:     OKF ("Using cut-off exponential power schedule (COE)"); break;
-    case EXPLOIT: OKF ("Using exploitation-based constant power schedule (EXPLOIT)"); break;
-    case LIN:     OKF ("Using linear power schedule (LIN)"); break;
-    case QUAD:    OKF ("Using quadratic power schedule (QUAD)"); break;
-    case EXPLORE: OKF ("Using exploration-based constant power schedule (EXPLORE)"); break;
-    default : FATAL ("Unknown power schedule"); break;
-  }
-
-  if (getenv("AFL_NO_FORKSRV"))    no_forkserver    = 1;
-  if (getenv("AFL_NO_CPU_RED"))    no_cpu_meter_red = 1;
-  if (getenv("AFL_NO_ARITH"))      no_arith         = 1;
-  if (getenv("AFL_SHUFFLE_QUEUE")) shuffle_queue    = 1;
-  if (getenv("AFL_FAST_CAL"))      fast_cal         = 1;
-
-  if (getenv("AFL_HANG_TMOUT")) {
-    hang_tmout = atoi(getenv("AFL_HANG_TMOUT"));
-    if (!hang_tmout) FATAL("Invalid value of AFL_HANG_TMOUT");
-  }
-
-  if (dumb_mode == 2 && no_forkserver)
-    FATAL("AFL_DUMB_FORKSRV and AFL_NO_FORKSRV are mutually exclusive");
-
-  if (getenv("AFL_PRELOAD")) {
-    setenv("LD_PRELOAD", getenv("AFL_PRELOAD"), 1);
-    setenv("DYLD_INSERT_LIBRARIES", getenv("AFL_PRELOAD"), 1);
-  }
-
-  if (getenv("AFL_LD_PRELOAD"))
-    FATAL("Use AFL_PRELOAD instead of AFL_LD_PRELOAD");
-
-  save_cmdline(argc, argv);
-
-  fix_up_banner(argv[optind]);
-
-  check_if_tty();
-
-  if (getenv("AFL_CAL_FAST")) {
-    /* Use less calibration cycles, for slow applications */
-    cal_cycles = 3;
-    cal_cycles_long = 5;
-  }
-
-  if (getenv("AFL_DEBUG"))
-    debug = 1;
-
-  if (getenv("AFL_PYTHON_ONLY")) {
-    /* This ensures we don't proceed to havoc/splice */
-    python_only = 1;
-
-    /* Ensure we also skip all deterministic steps */
-    skip_deterministic = 1;
-  }
-
-  get_core_count();
-
-#ifdef HAVE_AFFINITY
-  bind_to_free_cpu();
-#endif /* HAVE_AFFINITY */
-
-  check_crash_handling();
-  check_cpu_governor();
-
-  setup_post();
-  setup_custom_mutator();
-  setup_shm(dumb_mode);
-
-  if (!in_bitmap) memset(virgin_bits, 255, MAP_SIZE);
-  memset(virgin_tmout, 255, MAP_SIZE);
-  memset(virgin_crash, 255, MAP_SIZE);
-
-  init_count_class16();
-
-  setup_dirs_fds();
-
-  u8 with_python_support = 0;
-#ifdef USE_PYTHON
-  if (init_py())
-    FATAL("Failed to initialize Python module");
-  with_python_support = 1;
-#endif
-
-  if (getenv("AFL_PYTHON_MODULE") && !with_python_support)
-     FATAL("Your AFL binary was built without Python support");
-
-  setup_cmdline_file(argv + optind);
-
-  read_testcases();
-  load_auto();
-
-  pivot_inputs();
-
-  if (extras_dir) load_extras(extras_dir);
-
-  if (!timeout_given) find_timeout();
-
-  /* If we don't have a file name chosen yet, use a safe default. */
-
-  if (!out_file) {
-    u32 i = optind + 1;
-    while (argv[i]) {
-
-      u8* aa_loc = strstr(argv[i], "@@");
-
-      if (aa_loc && !out_file) {
-        if (file_extension) {
-          out_file = alloc_printf("%s/.cur_input.%s", out_dir, file_extension);
-        } else {
-          out_file = alloc_printf("%s/.cur_input", out_dir);
-        }
-        detect_file_args(argv + optind + 1, out_file);
-	break;
-      }
-
-      ++i;
-
-    }
-  }
-
-  if (!out_file) setup_stdio_file();
-
-  check_binary(argv[optind]);
-
-  start_time = get_cur_time();
-
-  if (qemu_mode)
-    use_argv = get_qemu_argv(argv[0], argv + optind, argc - optind);
-  else
-    use_argv = argv + optind;
-
-  perform_dry_run(use_argv);
-
-  cull_queue();
-
-  show_init_stats();
-
-  seek_to = find_start_position();
-
-  write_stats_file(0, 0, 0);
-  save_auto();
-
-  if (stop_soon) goto stop_fuzzing;
-
-  /* Woop woop woop */
-
-  if (!not_on_tty) {
-    sleep(4);
-    start_time += 4000;
-    if (stop_soon) goto stop_fuzzing;
-  }
-
-  // real start time, we reset, so this works correctly with -V
-  start_time = get_cur_time();
-
-  while (1) {
-
-    u8 skipped_fuzz;
-
-    cull_queue();
-
-    if (!queue_cur) {
-
-      ++queue_cycle;
-      current_entry     = 0;
-      cur_skipped_paths = 0;
-      queue_cur         = queue;
-
-      while (seek_to) {
-        ++current_entry;
-        --seek_to;
-        queue_cur = queue_cur->next;
-      }
-
-      show_stats();
-
-      if (not_on_tty) {
-        ACTF("Entering queue cycle %llu.", queue_cycle);
-        fflush(stdout);
-      }
-
-      /* If we had a full queue cycle with no new finds, try
-         recombination strategies next. */
-
-      if (queued_paths == prev_queued) {
-
-        if (use_splicing) ++cycles_wo_finds; else use_splicing = 1;
-
-      } else cycles_wo_finds = 0;
-
-      prev_queued = queued_paths;
-
-      if (sync_id && queue_cycle == 1 && getenv("AFL_IMPORT_FIRST"))
-        sync_fuzzers(use_argv);
-
-    }
-
-    skipped_fuzz = fuzz_one(use_argv);
-
-    if (!stop_soon && sync_id && !skipped_fuzz) {
-      
-      if (!(sync_interval_cnt++ % SYNC_INTERVAL))
-        sync_fuzzers(use_argv);
-
-    }
-
-    if (!stop_soon && exit_1) stop_soon = 2;
-
-    if (stop_soon) break;
-
-    queue_cur = queue_cur->next;
-    ++current_entry;
-
-    if (most_time_key == 1) {
-      u64 cur_ms_lv = get_cur_time();
-      if (most_time * 1000 < cur_ms_lv  - start_time) {
-        most_time_key = 2;
-        break;
-      }
-    }
-    if (most_execs_key == 1) {
-      if (most_execs <= total_execs) {
-        most_execs_key = 2;
-        break;
-      }
-    }
-  }
-
-  if (queue_cur) show_stats();
-
-  write_bitmap();
-  write_stats_file(0, 0, 0);
-  save_auto();
-
-stop_fuzzing:
-
-  SAYF(CURSOR_SHOW cLRD "\n\n+++ Testing aborted %s +++\n" cRST,
-       stop_soon == 2 ? "programmatically" : "by user");
-
-  if (most_time_key == 2)
-    SAYF(cYEL "[!] " cRST "Time limit was reached\n");
-  if (most_execs_key == 2)
-    SAYF(cYEL "[!] " cRST "Execution limit was reached\n");
-
-  /* Running for more than 30 minutes but still doing first cycle? */
-
-  if (queue_cycle == 1 && get_cur_time() - start_time > 30 * 60 * 1000) {
-
-    SAYF("\n" cYEL "[!] " cRST
-           "Stopped during the first cycle, results may be incomplete.\n"
-           "    (For info on resuming, see %s/README)\n", doc_path);
-
-  }
-
-  fclose(plot_file);
-  destroy_queue();
-  destroy_extras();
-  ck_free(target_path);
-  ck_free(sync_id);
-
-  alloc_report();
-
-#ifdef USE_PYTHON
-  finalize_py();
-#endif
-
-  OKF("We're done here. Have a nice day!\n");
-
-  exit(0);
-
-}
-
-#endif /* !AFL_LIB */
diff --git a/android-ashmem.h b/android-ashmem.h
deleted file mode 100644
index a787c04b..00000000
--- a/android-ashmem.h
+++ /dev/null
@@ -1,81 +0,0 @@
-#ifndef _ANDROID_ASHMEM_H
-#define _ANDROID_ASHMEM_H
-
-#include <fcntl.h>
-#include <linux/shm.h>
-#include <linux/ashmem.h>
-#include <sys/ioctl.h>
-#include <sys/mman.h>
-
-#if __ANDROID_API__ >= 26
-#define shmat bionic_shmat
-#define shmctl bionic_shmctl
-#define shmdt bionic_shmdt
-#define shmget bionic_shmget
-#endif
- #include <sys/shm.h>
-#undef shmat
-#undef shmctl
-#undef shmdt
-#undef shmget
-#include <stdio.h>
-
-#define ASHMEM_DEVICE	"/dev/ashmem"
-
-static inline int shmctl(int __shmid, int __cmd, struct shmid_ds *__buf)
-{
-		int ret = 0;
-		if (__cmd == IPC_RMID) {
-			int length = ioctl(__shmid, ASHMEM_GET_SIZE, NULL);
-			struct ashmem_pin pin = {0, length};
-			ret = ioctl(__shmid, ASHMEM_UNPIN, &pin);
-			close(__shmid);
-		}
-
-		return ret;
-}
-
-static inline int shmget (key_t __key, size_t __size, int __shmflg)
-{
-	int fd,ret;
-	char ourkey[11];
-
-	fd = open(ASHMEM_DEVICE, O_RDWR);
-	if (fd < 0)
-		return fd;
-
-	sprintf(ourkey,"%d",__key);
-	ret = ioctl(fd, ASHMEM_SET_NAME, ourkey);
-	if (ret < 0)
-		goto error;
-
-	ret = ioctl(fd, ASHMEM_SET_SIZE, __size);
-	if (ret < 0)
-		goto error;
-
-	return fd;
-
-error:
-	close(fd);
-	return ret;
-}
-
-static inline void *shmat (int __shmid, const void *__shmaddr, int __shmflg)
-{
-  int size;
-  void *ptr;
-  
-  size = ioctl(__shmid, ASHMEM_GET_SIZE, NULL);
-  if (size < 0) {
-    return NULL;
-  }
-
-  ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, __shmid, 0);
-  if (ptr == MAP_FAILED) {
-    return NULL;
-  }
-  
-  return ptr;
-}
-
-#endif
diff --git a/config.h b/config.h
index 29c33d46..046ab52a 100644..120000
--- a/config.h
+++ b/config.h
@@ -1,363 +1 @@
-/*
-   american fuzzy lop plus plus - vaguely configurable bits
-   ----------------------------------------------
-
-   Written and maintained by Michal Zalewski <lcamtuf@google.com>
-
-   Copyright 2013, 2014, 2015, 2016 Google Inc. All rights reserved.
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at:
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- */
-
-#ifndef _HAVE_CONFIG_H
-#define _HAVE_CONFIG_H
-
-#include "types.h"
-
-/* Version string: */
-
-#define VERSION             "++2.53d"  // c = release, d = volatile github dev
-
-/******************************************************
- *                                                    *
- *  Settings that may be of interest to power users:  *
- *                                                    *
- ******************************************************/
-
-/* Comment out to disable terminal colors (note that this makes afl-analyze
-   a lot less nice): */
-
-#define USE_COLOR
-
-/* Comment out to disable fancy ANSI boxes and use poor man's 7-bit UI: */
-
-#define FANCY_BOXES
-
-/* Default timeout for fuzzed code (milliseconds). This is the upper bound,
-   also used for detecting hangs; the actual value is auto-scaled: */
-
-#define EXEC_TIMEOUT        1000
-
-/* Timeout rounding factor when auto-scaling (milliseconds): */
-
-#define EXEC_TM_ROUND       20
-
-/* Default memory limit for child process (MB): */
-
-#ifndef __x86_64__ 
-#  define MEM_LIMIT         25
-#else
-#  define MEM_LIMIT         50
-#endif /* ^!__x86_64__ */
-
-/* Default memory limit when running in QEMU mode (MB): */
-
-#define MEM_LIMIT_QEMU      200
-
-/* Default memory limit when running in Unicorn mode (MB): */
-
-#define MEM_LIMIT_UNICORN   200
-
-/* Number of calibration cycles per every new test case (and for test
-   cases that show variable behavior): */
-
-#define CAL_CYCLES          8
-#define CAL_CYCLES_LONG     40
-
-/* Number of subsequent timeouts before abandoning an input file: */
-
-#define TMOUT_LIMIT         250
-
-/* Maximum number of unique hangs or crashes to record: */
-
-#define KEEP_UNIQUE_HANG    500
-#define KEEP_UNIQUE_CRASH   5000
-
-/* Baseline number of random tweaks during a single 'havoc' stage: */
-
-#define HAVOC_CYCLES        256
-#define HAVOC_CYCLES_INIT   1024
-
-/* Maximum multiplier for the above (should be a power of two, beware
-   of 32-bit int overflows): */
-
-#define HAVOC_MAX_MULT      16
-#define HAVOC_MAX_MULT_MOPT 32
-
-/* Absolute minimum number of havoc cycles (after all adjustments): */
-
-#define HAVOC_MIN           16
-
-/* Power Schedule Divisor */
-#define POWER_BETA          1
-#define MAX_FACTOR          (POWER_BETA * 32)
-
-/* Maximum stacking for havoc-stage tweaks. The actual value is calculated
-   like this: 
-
-   n = random between 1 and HAVOC_STACK_POW2
-   stacking = 2^n
-
-   In other words, the default (n = 7) produces 2, 4, 8, 16, 32, 64, or
-   128 stacked tweaks: */
-
-#define HAVOC_STACK_POW2    7
-
-/* Caps on block sizes for cloning and deletion operations. Each of these
-   ranges has a 33% probability of getting picked, except for the first
-   two cycles where smaller blocks are favored: */
-
-#define HAVOC_BLK_SMALL     32
-#define HAVOC_BLK_MEDIUM    128
-#define HAVOC_BLK_LARGE     1500
-
-/* Extra-large blocks, selected very rarely (<5% of the time): */
-
-#define HAVOC_BLK_XL        32768
-
-/* Probabilities of skipping non-favored entries in the queue, expressed as
-   percentages: */
-
-#define SKIP_TO_NEW_PROB    99 /* ...when there are new, pending favorites */
-#define SKIP_NFAV_OLD_PROB  95 /* ...no new favs, cur entry already fuzzed */
-#define SKIP_NFAV_NEW_PROB  75 /* ...no new favs, cur entry not fuzzed yet */
-
-/* Splicing cycle count: */
-
-#define SPLICE_CYCLES       15
-
-/* Nominal per-splice havoc cycle length: */
-
-#define SPLICE_HAVOC        32
-
-/* Maximum offset for integer addition / subtraction stages: */
-
-#define ARITH_MAX           35
-
-/* Limits for the test case trimmer. The absolute minimum chunk size; and
-   the starting and ending divisors for chopping up the input file: */
-
-#define TRIM_MIN_BYTES      4
-#define TRIM_START_STEPS    16
-#define TRIM_END_STEPS      1024
-
-/* Maximum size of input file, in bytes (keep under 100MB): */
-
-#define MAX_FILE            (1 * 1024 * 1024)
-
-/* The same, for the test case minimizer: */
-
-#define TMIN_MAX_FILE       (10 * 1024 * 1024)
-
-/* Block normalization steps for afl-tmin: */
-
-#define TMIN_SET_MIN_SIZE   4
-#define TMIN_SET_STEPS      128
-
-/* Maximum dictionary token size (-x), in bytes: */
-
-#define MAX_DICT_FILE       128
-
-/* Length limits for auto-detected dictionary tokens: */
-
-#define MIN_AUTO_EXTRA      3
-#define MAX_AUTO_EXTRA      32
-
-/* Maximum number of user-specified dictionary tokens to use in deterministic
-   steps; past this point, the "extras/user" step will be still carried out,
-   but with proportionally lower odds: */
-
-#define MAX_DET_EXTRAS      200
-
-/* Maximum number of auto-extracted dictionary tokens to actually use in fuzzing
-   (first value), and to keep in memory as candidates. The latter should be much
-   higher than the former. */
-
-#define USE_AUTO_EXTRAS     50
-#define MAX_AUTO_EXTRAS     (USE_AUTO_EXTRAS * 10)
-
-/* Scaling factor for the effector map used to skip some of the more
-   expensive deterministic steps. The actual divisor is set to
-   2^EFF_MAP_SCALE2 bytes: */
-
-#define EFF_MAP_SCALE2      3
-
-/* Minimum input file length at which the effector logic kicks in: */
-
-#define EFF_MIN_LEN         128
-
-/* Maximum effector density past which everything is just fuzzed
-   unconditionally (%): */
-
-#define EFF_MAX_PERC        90
-
-/* UI refresh frequency (Hz): */
-
-#define UI_TARGET_HZ        5
-
-/* Fuzzer stats file and plot update intervals (sec): */
-
-#define STATS_UPDATE_SEC    60
-#define PLOT_UPDATE_SEC     5
-
-/* Smoothing divisor for CPU load and exec speed stats (1 - no smoothing). */
-
-#define AVG_SMOOTHING       16
-
-/* Sync interval (every n havoc cycles): */
-
-#define SYNC_INTERVAL       5
-
-/* Output directory reuse grace period (minutes): */
-
-#define OUTPUT_GRACE        25
-
-/* Uncomment to use simple file names (id_NNNNNN): */
-
-// #define SIMPLE_FILES
-
-/* List of interesting values to use in fuzzing. */
-
-#define INTERESTING_8 \
-  -128,          /* Overflow signed 8-bit when decremented  */ \
-  -1,            /*                                         */ \
-   0,            /*                                         */ \
-   1,            /*                                         */ \
-   16,           /* One-off with common buffer size         */ \
-   32,           /* One-off with common buffer size         */ \
-   64,           /* One-off with common buffer size         */ \
-   100,          /* One-off with common buffer size         */ \
-   127           /* Overflow signed 8-bit when incremented  */
-
-#define INTERESTING_16 \
-  -32768,        /* Overflow signed 16-bit when decremented */ \
-  -129,          /* Overflow signed 8-bit                   */ \
-   128,          /* Overflow signed 8-bit                   */ \
-   255,          /* Overflow unsig 8-bit when incremented   */ \
-   256,          /* Overflow unsig 8-bit                    */ \
-   512,          /* One-off with common buffer size         */ \
-   1000,         /* One-off with common buffer size         */ \
-   1024,         /* One-off with common buffer size         */ \
-   4096,         /* One-off with common buffer size         */ \
-   32767         /* Overflow signed 16-bit when incremented */
-
-#define INTERESTING_32 \
-  -2147483648LL, /* Overflow signed 32-bit when decremented */ \
-  -100663046,    /* Large negative number (endian-agnostic) */ \
-  -32769,        /* Overflow signed 16-bit                  */ \
-   32768,        /* Overflow signed 16-bit                  */ \
-   65535,        /* Overflow unsig 16-bit when incremented  */ \
-   65536,        /* Overflow unsig 16 bit                   */ \
-   100663045,    /* Large positive number (endian-agnostic) */ \
-   2147483647    /* Overflow signed 32-bit when incremented */
-
-/***********************************************************
- *                                                         *
- *  Really exotic stuff you probably don't want to touch:  *
- *                                                         *
- ***********************************************************/
-
-/* Call count interval between reseeding the libc PRNG from /dev/urandom: */
-
-#define RESEED_RNG          10000
-
-/* Maximum line length passed from GCC to 'as' and used for parsing
-   configuration files: */
-
-#define MAX_LINE            8192
-
-/* Environment variable used to pass SHM ID to the called program. */
-
-#define SHM_ENV_VAR         "__AFL_SHM_ID"
-
-/* Other less interesting, internal-only variables. */
-
-#define CLANG_ENV_VAR       "__AFL_CLANG_MODE"
-#define AS_LOOP_ENV_VAR     "__AFL_AS_LOOPCHECK"
-#define PERSIST_ENV_VAR     "__AFL_PERSISTENT"
-#define DEFER_ENV_VAR       "__AFL_DEFER_FORKSRV"
-
-/* In-code signatures for deferred and persistent mode. */
-
-#define PERSIST_SIG         "##SIG_AFL_PERSISTENT##"
-#define DEFER_SIG           "##SIG_AFL_DEFER_FORKSRV##"
-
-/* Distinctive bitmap signature used to indicate failed execution: */
-
-#define EXEC_FAIL_SIG       0xfee1dead
-
-/* Distinctive exit code used to indicate MSAN trip condition: */
-
-#define MSAN_ERROR          86
-
-/* Designated file descriptors for forkserver commands (the application will
-   use FORKSRV_FD and FORKSRV_FD + 1): */
-
-#define FORKSRV_FD          198
-
-/* Fork server init timeout multiplier: we'll wait the user-selected
-   timeout plus this much for the fork server to spin up. */
-
-#define FORK_WAIT_MULT      10
-
-/* Calibration timeout adjustments, to be a bit more generous when resuming
-   fuzzing sessions or trying to calibrate already-added internal finds.
-   The first value is a percentage, the other is in milliseconds: */
-
-#define CAL_TMOUT_PERC      125
-#define CAL_TMOUT_ADD       50
-
-/* Number of chances to calibrate a case before giving up: */
-
-#define CAL_CHANCES         3
-
-/* Map size for the traced binary (2^MAP_SIZE_POW2). Must be greater than
-   2; you probably want to keep it under 18 or so for performance reasons
-   (adjusting AFL_INST_RATIO when compiling is probably a better way to solve
-   problems with complex programs). You need to recompile the target binary
-   after changing this - otherwise, SEGVs may ensue. */
-
-#define MAP_SIZE_POW2       16
-#define MAP_SIZE            (1 << MAP_SIZE_POW2)
-
-/* Maximum allocator request size (keep well under INT_MAX): */
-
-#define MAX_ALLOC           0x40000000
-
-/* A made-up hashing seed: */
-
-#define HASH_CONST          0xa5b35705
-
-/* Constants for afl-gotcpu to control busy loop timing: */
-
-#define  CTEST_TARGET_MS    5000
-#define  CTEST_CORE_TRG_MS  1000
-#define  CTEST_BUSY_CYCLES  (10 * 1000 * 1000)
-
-/* Enable NeverZero counters in QEMU mode */
-
-#define AFL_QEMU_NOT_ZERO
-
-/* Uncomment this to use inferior block-coverage-based instrumentation. Note
-   that you need to recompile the target binary for this to have any effect: */
-
-// #define COVERAGE_ONLY
-
-/* Uncomment this to ignore hit counts and output just one bit per tuple.
-   As with the previous setting, you will need to recompile the target
-   binary: */
-
-// #define SKIP_COUNTS
-
-/* Uncomment this to use instrumentation data to record newly discovered paths,
-   but do not use them as seeds for fuzzing. This is useful for conveniently
-   measuring coverage that could be attained by a "dumb" fuzzing algorithm: */
-
-// #define IGNORE_FINDS
-
-#endif /* ! _HAVE_CONFIG_H */
+include/config.h
\ No newline at end of file
diff --git a/debug.h b/debug.h
deleted file mode 100644
index c0044280..00000000
--- a/debug.h
+++ /dev/null
@@ -1,251 +0,0 @@
-/*
-   american fuzzy lop - debug / error handling macros
-   --------------------------------------------------
-
-   Written and maintained by Michal Zalewski <lcamtuf@google.com>
-
-   Copyright 2013, 2014, 2015, 2016 Google Inc. All rights reserved.
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at:
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- */
-
-#ifndef _HAVE_DEBUG_H
-#define _HAVE_DEBUG_H
-
-#include <errno.h>
-
-#include "types.h"
-#include "config.h"
-
-/*******************
- * Terminal colors *
- *******************/
-
-#ifdef USE_COLOR
-
-#  define cBLK "\x1b[0;30m"
-#  define cRED "\x1b[0;31m"
-#  define cGRN "\x1b[0;32m"
-#  define cBRN "\x1b[0;33m"
-#  define cBLU "\x1b[0;34m"
-#  define cMGN "\x1b[0;35m"
-#  define cCYA "\x1b[0;36m"
-#  define cLGR "\x1b[0;37m"
-#  define cGRA "\x1b[1;90m"
-#  define cLRD "\x1b[1;91m"
-#  define cLGN "\x1b[1;92m"
-#  define cYEL "\x1b[1;93m"
-#  define cLBL "\x1b[1;94m"
-#  define cPIN "\x1b[1;95m"
-#  define cLCY "\x1b[1;96m"
-#  define cBRI "\x1b[1;97m"
-#  define cRST "\x1b[0m"
-
-#  define bgBLK "\x1b[40m"
-#  define bgRED "\x1b[41m"
-#  define bgGRN "\x1b[42m"
-#  define bgBRN "\x1b[43m"
-#  define bgBLU "\x1b[44m"
-#  define bgMGN "\x1b[45m"
-#  define bgCYA "\x1b[46m"
-#  define bgLGR "\x1b[47m"
-#  define bgGRA "\x1b[100m"
-#  define bgLRD "\x1b[101m"
-#  define bgLGN "\x1b[102m"
-#  define bgYEL "\x1b[103m"
-#  define bgLBL "\x1b[104m"
-#  define bgPIN "\x1b[105m"
-#  define bgLCY "\x1b[106m"
-#  define bgBRI "\x1b[107m"
-
-#else
-
-#  define cBLK ""
-#  define cRED ""
-#  define cGRN ""
-#  define cBRN ""
-#  define cBLU ""
-#  define cMGN ""
-#  define cCYA ""
-#  define cLGR ""
-#  define cGRA ""
-#  define cLRD ""
-#  define cLGN ""
-#  define cYEL ""
-#  define cLBL ""
-#  define cPIN ""
-#  define cLCY ""
-#  define cBRI ""
-#  define cRST ""
-
-#  define bgBLK ""
-#  define bgRED ""
-#  define bgGRN ""
-#  define bgBRN ""
-#  define bgBLU ""
-#  define bgMGN ""
-#  define bgCYA ""
-#  define bgLGR ""
-#  define bgGRA ""
-#  define bgLRD ""
-#  define bgLGN ""
-#  define bgYEL ""
-#  define bgLBL ""
-#  define bgPIN ""
-#  define bgLCY ""
-#  define bgBRI ""
-
-#endif /* ^USE_COLOR */
-
-/*************************
- * Box drawing sequences *
- *************************/
-
-#ifdef FANCY_BOXES
-
-#  define SET_G1   "\x1b)0"       /* Set G1 for box drawing    */
-#  define RESET_G1 "\x1b)B"       /* Reset G1 to ASCII         */
-#  define bSTART   "\x0e"         /* Enter G1 drawing mode     */
-#  define bSTOP    "\x0f"         /* Leave G1 drawing mode     */
-#  define bH       "q"            /* Horizontal line           */
-#  define bV       "x"            /* Vertical line             */
-#  define bLT      "l"            /* Left top corner           */
-#  define bRT      "k"            /* Right top corner          */
-#  define bLB      "m"            /* Left bottom corner        */
-#  define bRB      "j"            /* Right bottom corner       */
-#  define bX       "n"            /* Cross                     */
-#  define bVR      "t"            /* Vertical, branch right    */
-#  define bVL      "u"            /* Vertical, branch left     */
-#  define bHT      "v"            /* Horizontal, branch top    */
-#  define bHB      "w"            /* Horizontal, branch bottom */
-
-#else
-
-#  define SET_G1   ""
-#  define RESET_G1 ""
-#  define bSTART   ""
-#  define bSTOP    ""
-#  define bH       "-"
-#  define bV       "|"
-#  define bLT      "+"
-#  define bRT      "+"
-#  define bLB      "+"
-#  define bRB      "+"
-#  define bX       "+"
-#  define bVR      "+"
-#  define bVL      "+"
-#  define bHT      "+"
-#  define bHB      "+"
-
-#endif /* ^FANCY_BOXES */
-
-/***********************
- * Misc terminal codes *
- ***********************/
-
-#define TERM_HOME     "\x1b[H"
-#define TERM_CLEAR    TERM_HOME "\x1b[2J"
-#define cEOL          "\x1b[0K"
-#define CURSOR_HIDE   "\x1b[?25l"
-#define CURSOR_SHOW   "\x1b[?25h"
-
-/************************
- * Debug & error macros *
- ************************/
-
-/* Just print stuff to the appropriate stream. */
-
-#ifdef MESSAGES_TO_STDOUT
-#  define SAYF(x...)    printf(x)
-#else 
-#  define SAYF(x...)    fprintf(stderr, x)
-#endif /* ^MESSAGES_TO_STDOUT */
-
-/* Show a prefixed warning. */
-
-#define WARNF(x...) do { \
-    SAYF(cYEL "[!] " cBRI "WARNING: " cRST x); \
-    SAYF(cRST "\n"); \
-  } while (0)
-
-/* Show a prefixed "doing something" message. */
-
-#define ACTF(x...) do { \
-    SAYF(cLBL "[*] " cRST x); \
-    SAYF(cRST "\n"); \
-  } while (0)
-
-/* Show a prefixed "success" message. */
-
-#define OKF(x...) do { \
-    SAYF(cLGN "[+] " cRST x); \
-    SAYF(cRST "\n"); \
-  } while (0)
-
-/* Show a prefixed fatal error message (not used in afl). */
-
-#define BADF(x...) do { \
-    SAYF(cLRD "\n[-] " cRST x); \
-    SAYF(cRST "\n"); \
-  } while (0)
-
-/* Die with a verbose non-OS fatal error message. */
-
-#define FATAL(x...) do { \
-    SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD "\n[-] PROGRAM ABORT : " \
-         cRST x); \
-    SAYF(cLRD "\n         Location : " cRST "%s(), %s:%u\n\n", \
-         __FUNCTION__, __FILE__, __LINE__); \
-    exit(1); \
-  } while (0)
-
-/* Die by calling abort() to provide a core dump. */
-
-#define ABORT(x...) do { \
-    SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD "\n[-] PROGRAM ABORT : " \
-         cRST x); \
-    SAYF(cLRD "\n    Stop location : " cRST "%s(), %s:%u\n\n", \
-         __FUNCTION__, __FILE__, __LINE__); \
-    abort(); \
-  } while (0)
-
-/* Die while also including the output of perror(). */
-
-#define PFATAL(x...) do { \
-    fflush(stdout); \
-    SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD "\n[-]  SYSTEM ERROR : " \
-         cRST x); \
-    SAYF(cLRD "\n    Stop location : " cRST "%s(), %s:%u\n", \
-         __FUNCTION__, __FILE__, __LINE__); \
-    SAYF(cLRD "       OS message : " cRST "%s\n", strerror(errno)); \
-    exit(1); \
-  } while (0)
-
-/* Die with FAULT() or PFAULT() depending on the value of res (used to
-   interpret different failure modes for read(), write(), etc). */
-
-#define RPFATAL(res, x...) do { \
-    if (res < 0) PFATAL(x); else FATAL(x); \
-  } while (0)
-
-/* Error-checking versions of read() and write() that call RPFATAL() as
-   appropriate. */
-
-#define ck_write(fd, buf, len, fn) do { \
-    u32 _len = (len); \
-    s32 _res = write(fd, buf, _len); \
-    if (_res != _len) RPFATAL(_res, "Short write to %s", fn); \
-  } while (0)
-
-#define ck_read(fd, buf, len, fn) do { \
-    u32 _len = (len); \
-    s32 _res = read(fd, buf, _len); \
-    if (_res != _len) RPFATAL(_res, "Short read from %s", fn); \
-  } while (0)
-
-#endif /* ! _HAVE_DEBUG_H */
diff --git a/docs/ChangeLog b/docs/ChangeLog
index 6d4c4792..1cd95650 100644
--- a/docs/ChangeLog
+++ b/docs/ChangeLog
@@ -17,20 +17,29 @@ sending a mail to <afl-users+subscribe@googlegroups.com>.
 Version ++2.53d (dev):
 ----------------------
 
+  - big code refactoring:
+    * all includes are now in include/
+    * all afl sources are now in src/ - see src/README
+    * afl-fuzz was splitted up in various individual files for including
+      functionality in other programs (e.g. forkserver, memory map, etc.)
+      or better readability.
+    * new code indention everywhere
+  - added AFL_FORCE_UI to show the UI even if the terminal is not detected
   - llvm 9 is now supported (still needs testing)
   - Android is now supported (thank to JoeyJiao!) - still need to modify the Makefile though
   - fix building qemu on some Ubuntus (thanks to floyd!)
   - custom mutator by a loaded library is now supported (thanks to kyakdan!)
+  - added PR that includes peak_rss_mb and slowest_exec_ms in the fuzzer_stats report
+  - more support for *BSD (thanks to devnexen!)
+  - fix building on *BSD (thanks to tobias.kortkamp for the patch)
   - fix for a few features to support different map sized than 2^16
   - afl-showmap: new option -r now shows the real values in the buckets (stock
     afl never did), plus shows tuple content summary information now
-  - fix building on *BSD (thanks to tobias.kortkamp for the patch)
   - small docu updates
   - NeverZero counters for QEMU
   - NeverZero counters for Unicorn
   - CompareCoverage Unicorn
   - Immediates-only instrumentation for CompareCoverage
-  - ... your patch? :)
 
 
 --------------------------
diff --git a/docs/env_variables.txt b/docs/env_variables.txt
index 821463ae..cea3597b 100644
--- a/docs/env_variables.txt
+++ b/docs/env_variables.txt
@@ -223,6 +223,9 @@ checks or alter some of the more exotic semantics of the tool:
     some basic stats. This behavior is also automatically triggered when the
     output from afl-fuzz is redirected to a file or to a pipe.
 
+  - Setting AFL_FORCE_UI will force painting the UI on the screen even if
+    no valid terminal was detected (for virtual consoles)
+
   - If you are Jakub, you may need AFL_I_DONT_CARE_ABOUT_MISSING_CRASHES.
     Others need not apply.
 
diff --git a/afl-as.h b/include/afl-as.h
index 4748eda7..4f8fb640 100644
--- a/afl-as.h
+++ b/include/afl-as.h
@@ -37,7 +37,7 @@
 #include "config.h"
 #include "types.h"
 
-/* 
+/*
    ------------------
    Performances notes
    ------------------
@@ -106,47 +106,47 @@
 
 static const u8* trampoline_fmt_32 =
 
-  "\n"
-  "/* --- AFL TRAMPOLINE (32-BIT) --- */\n"
-  "\n"
-  ".align 4\n"
-  "\n"
-  "leal -16(%%esp), %%esp\n"
-  "movl %%edi,  0(%%esp)\n"
-  "movl %%edx,  4(%%esp)\n"
-  "movl %%ecx,  8(%%esp)\n"
-  "movl %%eax, 12(%%esp)\n"
-  "movl $0x%08x, %%ecx\n"
-  "call __afl_maybe_log\n"
-  "movl 12(%%esp), %%eax\n"
-  "movl  8(%%esp), %%ecx\n"
-  "movl  4(%%esp), %%edx\n"
-  "movl  0(%%esp), %%edi\n"
-  "leal 16(%%esp), %%esp\n"
-  "\n"
-  "/* --- END --- */\n"
-  "\n";
+    "\n"
+    "/* --- AFL TRAMPOLINE (32-BIT) --- */\n"
+    "\n"
+    ".align 4\n"
+    "\n"
+    "leal -16(%%esp), %%esp\n"
+    "movl %%edi,  0(%%esp)\n"
+    "movl %%edx,  4(%%esp)\n"
+    "movl %%ecx,  8(%%esp)\n"
+    "movl %%eax, 12(%%esp)\n"
+    "movl $0x%08x, %%ecx\n"
+    "call __afl_maybe_log\n"
+    "movl 12(%%esp), %%eax\n"
+    "movl  8(%%esp), %%ecx\n"
+    "movl  4(%%esp), %%edx\n"
+    "movl  0(%%esp), %%edi\n"
+    "leal 16(%%esp), %%esp\n"
+    "\n"
+    "/* --- END --- */\n"
+    "\n";
 
 static const u8* trampoline_fmt_64 =
 
-  "\n"
-  "/* --- AFL TRAMPOLINE (64-BIT) --- */\n"
-  "\n"
-  ".align 4\n"
-  "\n"
-  "leaq -(128+24)(%%rsp), %%rsp\n"
-  "movq %%rdx,  0(%%rsp)\n"
-  "movq %%rcx,  8(%%rsp)\n"
-  "movq %%rax, 16(%%rsp)\n"
-  "movq $0x%08x, %%rcx\n"
-  "call __afl_maybe_log\n"
-  "movq 16(%%rsp), %%rax\n"
-  "movq  8(%%rsp), %%rcx\n"
-  "movq  0(%%rsp), %%rdx\n"
-  "leaq (128+24)(%%rsp), %%rsp\n"
-  "\n"
-  "/* --- END --- */\n"
-  "\n";
+    "\n"
+    "/* --- AFL TRAMPOLINE (64-BIT) --- */\n"
+    "\n"
+    ".align 4\n"
+    "\n"
+    "leaq -(128+24)(%%rsp), %%rsp\n"
+    "movq %%rdx,  0(%%rsp)\n"
+    "movq %%rcx,  8(%%rsp)\n"
+    "movq %%rax, 16(%%rsp)\n"
+    "movq $0x%08x, %%rcx\n"
+    "call __afl_maybe_log\n"
+    "movq 16(%%rsp), %%rax\n"
+    "movq  8(%%rsp), %%rcx\n"
+    "movq  0(%%rsp), %%rdx\n"
+    "leaq (128+24)(%%rsp), %%rsp\n"
+    "\n"
+    "/* --- END --- */\n"
+    "\n";
 
 static const u8* main_payload_32 = 
 
@@ -398,9 +398,9 @@ static const u8* main_payload_32 =
    recognize .string. */
 
 #ifdef __APPLE__
-#  define CALL_L64(str)		"call _" str "\n"
+#  define CALL_L64(str) "call _" str "\n"
 #else
-#  define CALL_L64(str)		"call " str "@PLT\n"
+#  define CALL_L64(str) "call " str "@PLT\n"
 #endif /* ^__APPLE__ */
 
 static const u8* main_payload_64 = 
@@ -415,7 +415,7 @@ static const u8* main_payload_64 =
   "\n"
   "__afl_maybe_log:\n"
   "\n"
-#if defined(__OpenBSD__)  || (defined(__FreeBSD__) && (__FreeBSD__ < 9))
+#if defined(__OpenBSD__) || (defined(__FreeBSD__) && (__FreeBSD__ < 9))
   "  .byte 0x9f /* lahf */\n"
 #else
   "  lahf\n"
@@ -448,7 +448,7 @@ static const u8* main_payload_64 =
   "__afl_return:\n"
   "\n"
   "  addb $127, %al\n"
-#if defined(__OpenBSD__)  || (defined(__FreeBSD__) && (__FreeBSD__ < 9))
+#if defined(__OpenBSD__) || (defined(__FreeBSD__) && (__FreeBSD__ < 9))
   "  .byte 0x9e /* sahf */\n"
 #else
   "  sahf\n"
@@ -737,9 +737,9 @@ static const u8* main_payload_64 =
 #ifdef __APPLE__
 
   "  .comm   __afl_area_ptr, 8\n"
-#ifndef COVERAGE_ONLY
+#  ifndef COVERAGE_ONLY
   "  .comm   __afl_prev_loc, 8\n"
-#endif /* !COVERAGE_ONLY */
+#  endif /* !COVERAGE_ONLY */
   "  .comm   __afl_fork_pid, 4\n"
   "  .comm   __afl_temp, 4\n"
   "  .comm   __afl_setup_failure, 1\n"
@@ -747,9 +747,9 @@ static const u8* main_payload_64 =
 #else
 
   "  .lcomm   __afl_area_ptr, 8\n"
-#ifndef COVERAGE_ONLY
+#  ifndef COVERAGE_ONLY
   "  .lcomm   __afl_prev_loc, 8\n"
-#endif /* !COVERAGE_ONLY */
+#  endif /* !COVERAGE_ONLY */
   "  .lcomm   __afl_fork_pid, 4\n"
   "  .lcomm   __afl_temp, 4\n"
   "  .lcomm   __afl_setup_failure, 1\n"
@@ -765,3 +765,4 @@ static const u8* main_payload_64 =
   "\n";
 
 #endif /* !_HAVE_AFL_AS_H */
+
diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h
new file mode 100644
index 00000000..b5c5afaf
--- /dev/null
+++ b/include/afl-fuzz.h
@@ -0,0 +1,639 @@
+/*
+   american fuzzy lop - fuzzer code
+   --------------------------------
+
+   Written and maintained by Michal Zalewski <lcamtuf@google.com>
+
+   Forkserver design by Jann Horn <jannhorn@googlemail.com>
+
+   Copyright 2013, 2014, 2015, 2016, 2017 Google Inc. All rights reserved.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at:
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+   This is the real deal: the program takes an instrumented binary and
+   attempts a variety of basic fuzzing tricks, paying close attention to
+   how they affect the execution path.
+
+ */
+
+#ifndef _AFL_FUZZ_H
+#define _AFL_FUZZ_H
+
+#define AFL_MAIN
+#define MESSAGES_TO_STDOUT
+
+#ifndef _GNU_SOURCE
+#  define _GNU_SOURCE
+#endif
+#define _FILE_OFFSET_BITS 64
+
+#ifdef __ANDROID__
+#  include "android-ashmem.h"
+#endif
+
+#include "config.h"
+#include "types.h"
+#include "debug.h"
+#include "alloc-inl.h"
+#include "hash.h"
+#include "sharedmem.h"
+#include "forkserver.h"
+#include "common.h"
+
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <errno.h>
+#include <signal.h>
+#include <dirent.h>
+#include <ctype.h>
+#include <fcntl.h>
+#include <termios.h>
+#include <dlfcn.h>
+#include <sched.h>
+
+#include <sys/wait.h>
+#include <sys/time.h>
+#include <sys/shm.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/resource.h>
+#include <sys/mman.h>
+#include <sys/ioctl.h>
+#include <sys/file.h>
+
+#if defined(__APPLE__) || defined(__FreeBSD__) || defined(__OpenBSD__)
+#  include <sys/sysctl.h>
+#  define HAVE_ARC4RANDOM 1
+#endif /* __APPLE__ || __FreeBSD__ || __OpenBSD__ */
+
+/* For systems that have sched_setaffinity; right now just Linux, but one
+   can hope... */
+
+#ifdef __linux__
+#  define HAVE_AFFINITY 1
+#endif /* __linux__ */
+
+#ifndef SIMPLE_FILES
+#  define CASE_PREFIX "id:"
+#else
+#  define CASE_PREFIX "id_"
+#endif /* ^!SIMPLE_FILES */
+
+struct queue_entry {
+
+  u8* fname;                            /* File name for the test case      */
+  u32 len;                              /* Input length                     */
+
+  u8 cal_failed,                        /* Calibration failed?              */
+      trim_done,                        /* Trimmed?                         */
+      was_fuzzed,                       /* historical, but needed for MOpt  */
+      passed_det,                       /* Deterministic stages passed?     */
+      has_new_cov,                      /* Triggers new coverage?           */
+      var_behavior,                     /* Variable behavior?               */
+      favored,                          /* Currently favored?               */
+      fs_redundant;                     /* Marked as redundant in the fs?   */
+
+  u32 bitmap_size,                      /* Number of bits set in bitmap     */
+      fuzz_level,                       /* Number of fuzzing iterations     */
+      exec_cksum;                       /* Checksum of the execution trace  */
+
+  u64 exec_us,                          /* Execution time (us)              */
+      handicap,                         /* Number of queue cycles behind    */
+      n_fuzz,                          /* Number of fuzz, does not overflow */
+      depth;                            /* Path depth                       */
+
+  u8* trace_mini;                       /* Trace bytes, if kept             */
+  u32 tc_ref;                           /* Trace bytes ref count            */
+
+  struct queue_entry *next,             /* Next element, if any             */
+      *next_100;                        /* 100 elements ahead               */
+
+};
+
+struct extra_data {
+
+  u8* data;                             /* Dictionary token data            */
+  u32 len;                              /* Dictionary token length          */
+  u32 hit_cnt;                          /* Use count in the corpus          */
+
+};
+
+/* Fuzzing stages */
+
+enum {
+
+  /* 00 */ STAGE_FLIP1,
+  /* 01 */ STAGE_FLIP2,
+  /* 02 */ STAGE_FLIP4,
+  /* 03 */ STAGE_FLIP8,
+  /* 04 */ STAGE_FLIP16,
+  /* 05 */ STAGE_FLIP32,
+  /* 06 */ STAGE_ARITH8,
+  /* 07 */ STAGE_ARITH16,
+  /* 08 */ STAGE_ARITH32,
+  /* 09 */ STAGE_INTEREST8,
+  /* 10 */ STAGE_INTEREST16,
+  /* 11 */ STAGE_INTEREST32,
+  /* 12 */ STAGE_EXTRAS_UO,
+  /* 13 */ STAGE_EXTRAS_UI,
+  /* 14 */ STAGE_EXTRAS_AO,
+  /* 15 */ STAGE_HAVOC,
+  /* 16 */ STAGE_SPLICE,
+  /* 17 */ STAGE_PYTHON,
+  /* 18 */ STAGE_CUSTOM_MUTATOR
+
+};
+
+/* Stage value types */
+
+enum {
+
+  /* 00 */ STAGE_VAL_NONE,
+  /* 01 */ STAGE_VAL_LE,
+  /* 02 */ STAGE_VAL_BE
+
+};
+
+/* Execution status fault codes */
+
+enum {
+
+  /* 00 */ FAULT_NONE,
+  /* 01 */ FAULT_TMOUT,
+  /* 02 */ FAULT_CRASH,
+  /* 03 */ FAULT_ERROR,
+  /* 04 */ FAULT_NOINST,
+  /* 05 */ FAULT_NOBITS
+
+};
+
+/* MOpt:
+   Lots of globals, but mostly for the status UI and other things where it
+   really makes no sense to haul them around as function parameters. */
+extern u64 limit_time_puppet, orig_hit_cnt_puppet, last_limit_time_start,
+    tmp_pilot_time, total_pacemaker_time, total_puppet_find, temp_puppet_find,
+    most_time_key, most_time, most_execs_key, most_execs, old_hit_count;
+
+extern s32 SPLICE_CYCLES_puppet, limit_time_sig, key_puppet, key_module;
+
+extern double w_init, w_end, w_now;
+
+extern s32 g_now;
+extern s32 g_max;
+
+#define operator_num 16
+#define swarm_num 5
+#define period_core 500000
+
+extern u64 tmp_core_time;
+extern s32 swarm_now;
+
+extern double x_now[swarm_num][operator_num], L_best[swarm_num][operator_num],
+    eff_best[swarm_num][operator_num], G_best[operator_num],
+    v_now[swarm_num][operator_num], probability_now[swarm_num][operator_num],
+    swarm_fitness[swarm_num];
+
+extern u64 stage_finds_puppet[swarm_num][operator_num], /* Patterns found per
+                                                           fuzz stage    */
+    stage_finds_puppet_v2[swarm_num][operator_num],
+    stage_cycles_puppet_v2[swarm_num][operator_num],
+    stage_cycles_puppet_v3[swarm_num][operator_num],
+    stage_cycles_puppet[swarm_num][operator_num],
+    operator_finds_puppet[operator_num],
+    core_operator_finds_puppet[operator_num],
+    core_operator_finds_puppet_v2[operator_num],
+    core_operator_cycles_puppet[operator_num],
+    core_operator_cycles_puppet_v2[operator_num],
+    core_operator_cycles_puppet_v3[operator_num];   /* Execs per fuzz stage */
+
+#define RAND_C (rand() % 1000 * 0.001)
+#define v_max 1
+#define v_min 0.05
+#define limit_time_bound 1.1
+#define SPLICE_CYCLES_puppet_up 25
+#define SPLICE_CYCLES_puppet_low 5
+#define STAGE_RANDOMBYTE 12
+#define STAGE_DELETEBYTE 13
+#define STAGE_Clone75 14
+#define STAGE_OverWrite75 15
+#define period_pilot 50000
+
+extern double period_pilot_tmp;
+extern s32    key_lv;
+
+extern u8 *in_dir,                      /* Input directory with test cases  */
+    *out_dir,                           /* Working & output directory       */
+    *tmp_dir,                           /* Temporary directory for input    */
+    *sync_dir,                          /* Synchronization directory        */
+    *sync_id,                           /* Fuzzer ID                        */
+    *power_name,                        /* Power schedule name              */
+    *use_banner,                        /* Display banner                   */
+    *in_bitmap,                         /* Input bitmap                     */
+    *file_extension,                    /* File extension                   */
+    *orig_cmdline,                      /* Original command line            */
+    *doc_path,                          /* Path to documentation dir        */
+    *target_path,                       /* Path to target binary            */
+    *out_file;                          /* File to fuzz, if any             */
+
+extern u32 exec_tmout;                  /* Configurable exec timeout (ms)   */
+extern u32 hang_tmout;                  /* Timeout used for hang det (ms)   */
+
+extern u64 mem_limit;                   /* Memory cap for child (MB)        */
+
+extern u8 cal_cycles,                   /* Calibration cycles defaults      */
+    cal_cycles_long, debug,             /* Debug mode                       */
+    python_only;                        /* Python-only mode                 */
+
+extern u32 stats_update_freq;           /* Stats update frequency (execs)   */
+
+enum {
+
+  /* 00 */ EXPLORE, /* AFL default, Exploration-based constant schedule */
+  /* 01 */ FAST,    /* Exponential schedule             */
+  /* 02 */ COE,     /* Cut-Off Exponential schedule     */
+  /* 03 */ LIN,     /* Linear schedule                  */
+  /* 04 */ QUAD,    /* Quadratic schedule               */
+  /* 05 */ EXPLOIT, /* AFL's exploitation-based const.  */
+
+  POWER_SCHEDULES_NUM
+
+};
+
+extern char* power_names[POWER_SCHEDULES_NUM];
+
+extern u8 schedule;                     /* Power schedule (default: EXPLORE)*/
+extern u8 havoc_max_mult;
+
+extern u8 skip_deterministic,           /* Skip deterministic stages?       */
+    force_deterministic,                /* Force deterministic stages?      */
+    use_splicing,                       /* Recombine input files?           */
+    dumb_mode,                          /* Run in non-instrumented mode?    */
+    score_changed,                      /* Scoring for favorites changed?   */
+    kill_signal,                        /* Signal that killed the child     */
+    resuming_fuzz,                      /* Resuming an older fuzzing job?   */
+    timeout_given,                      /* Specific timeout given?          */
+    not_on_tty,                         /* stdout is not a tty              */
+    term_too_small,                     /* terminal dimensions too small    */
+    no_forkserver,                      /* Disable forkserver?              */
+    crash_mode,                         /* Crash mode! Yeah!                */
+    in_place_resume,                    /* Attempt in-place resume?         */
+    auto_changed,                       /* Auto-generated tokens changed?   */
+    no_cpu_meter_red,                   /* Feng shui on the status screen   */
+    no_arith,                           /* Skip most arithmetic ops         */
+    shuffle_queue,                      /* Shuffle input queue?             */
+    bitmap_changed,                     /* Time to update bitmap?           */
+    qemu_mode,                          /* Running in QEMU mode?            */
+    unicorn_mode,                       /* Running in Unicorn mode?         */
+    skip_requested,                     /* Skip request, via SIGUSR1        */
+    run_over10m,                        /* Run time over 10 minutes?        */
+    persistent_mode,                    /* Running in persistent mode?      */
+    deferred_mode,                      /* Deferred forkserver mode?        */
+    fixed_seed,                         /* do not reseed                    */
+    fast_cal,                           /* Try to calibrate faster?         */
+    uses_asan;                          /* Target uses ASAN?                */
+
+extern s32 out_fd,                      /* Persistent fd for out_file       */
+#ifndef HAVE_ARC4RANDOM
+    dev_urandom_fd,                     /* Persistent fd for /dev/urandom   */
+#endif
+    dev_null_fd,                        /* Persistent fd for /dev/null      */
+    fsrv_ctl_fd,                        /* Fork server control pipe (write) */
+    fsrv_st_fd;                         /* Fork server status pipe (read)   */
+
+extern s32 forksrv_pid,                 /* PID of the fork server           */
+    child_pid,                          /* PID of the fuzzed program        */
+    out_dir_fd;                         /* FD of the lock file              */
+
+extern u8* trace_bits;                  /* SHM with instrumentation bitmap  */
+
+extern u8 virgin_bits[MAP_SIZE],        /* Regions yet untouched by fuzzing */
+    virgin_tmout[MAP_SIZE],             /* Bits we haven't seen in tmouts   */
+    virgin_crash[MAP_SIZE];             /* Bits we haven't seen in crashes  */
+
+extern u8 var_bytes[MAP_SIZE];          /* Bytes that appear to be variable */
+
+extern volatile u8 stop_soon,           /* Ctrl-C pressed?                  */
+    clear_screen,                       /* Window resized?                  */
+    child_timed_out;                    /* Traced process timed out?        */
+
+extern u32 queued_paths,                /* Total number of queued testcases */
+    queued_variable,                    /* Testcases with variable behavior */
+    queued_at_start,                    /* Total number of initial inputs   */
+    queued_discovered,                  /* Items discovered during this run */
+    queued_imported,                    /* Items imported via -S            */
+    queued_favored,                     /* Paths deemed favorable           */
+    queued_with_cov,                    /* Paths with new coverage bytes    */
+    pending_not_fuzzed,                 /* Queued but not done yet          */
+    pending_favored,                    /* Pending favored paths            */
+    cur_skipped_paths,                  /* Abandoned inputs in cur cycle    */
+    cur_depth,                          /* Current path depth               */
+    max_depth,                          /* Max path depth                   */
+    useless_at_start,                   /* Number of useless starting paths */
+    var_byte_count,                     /* Bitmap bytes with var behavior   */
+    current_entry,                      /* Current queue entry ID           */
+    havoc_div;                          /* Cycle count divisor for havoc    */
+
+extern u64 total_crashes,               /* Total number of crashes          */
+    unique_crashes,                     /* Crashes with unique signatures   */
+    total_tmouts,                       /* Total number of timeouts         */
+    unique_tmouts,                      /* Timeouts with unique signatures  */
+    unique_hangs,                       /* Hangs with unique signatures     */
+    total_execs,                        /* Total execve() calls             */
+    slowest_exec_ms,                    /* Slowest testcase non hang in ms  */
+    start_time,                         /* Unix start time (ms)             */
+    last_path_time,                     /* Time for most recent path (ms)   */
+    last_crash_time,                    /* Time for most recent crash (ms)  */
+    last_hang_time,                     /* Time for most recent hang (ms)   */
+    last_crash_execs,                   /* Exec counter at last crash       */
+    queue_cycle,                        /* Queue round counter              */
+    cycles_wo_finds,                    /* Cycles without any new paths     */
+    trim_execs,                         /* Execs done to trim input files   */
+    bytes_trim_in,                      /* Bytes coming into the trimmer    */
+    bytes_trim_out,                     /* Bytes coming outa the trimmer    */
+    blocks_eff_total,                   /* Blocks subject to effector maps  */
+    blocks_eff_select;                  /* Blocks selected as fuzzable      */
+
+extern u32 subseq_tmouts;               /* Number of timeouts in a row      */
+
+extern u8 *stage_name,                  /* Name of the current fuzz stage   */
+    *stage_short,                       /* Short stage name                 */
+    *syncing_party;                     /* Currently syncing with...        */
+
+extern s32 stage_cur, stage_max;        /* Stage progression                */
+extern s32 splicing_with;               /* Splicing with which test case?   */
+
+extern u32 master_id, master_max;       /* Master instance job splitting    */
+
+extern u32 syncing_case;                /* Syncing with case #...           */
+
+extern s32 stage_cur_byte,              /* Byte offset of current stage op  */
+    stage_cur_val;                      /* Value used for stage op          */
+
+extern u8 stage_val_type;               /* Value type (STAGE_VAL_*)         */
+
+extern u64 stage_finds[32],             /* Patterns found per fuzz stage    */
+    stage_cycles[32];                   /* Execs per fuzz stage             */
+
+#ifndef HAVE_ARC4RANDOM
+extern u32 rand_cnt;                    /* Random number counter            */
+#endif
+
+extern u64 total_cal_us,                /* Total calibration time (us)      */
+    total_cal_cycles;                   /* Total calibration cycles         */
+
+extern u64 total_bitmap_size,           /* Total bit count for all bitmaps  */
+    total_bitmap_entries;               /* Number of bitmaps counted        */
+
+extern s32 cpu_core_count;              /* CPU core count                   */
+
+#ifdef HAVE_AFFINITY
+
+extern s32 cpu_aff;                     /* Selected CPU core                */
+
+#endif /* HAVE_AFFINITY */
+
+extern FILE* plot_file;                 /* Gnuplot output file              */
+
+extern struct queue_entry *queue,       /* Fuzzing queue (linked list)      */
+    *queue_cur,                         /* Current offset within the queue  */
+    *queue_top,                         /* Top of the list                  */
+    *q_prev100;                         /* Previous 100 marker              */
+
+extern struct queue_entry*
+    top_rated[MAP_SIZE];                /* Top entries for bitmap bytes     */
+
+extern struct extra_data* extras;       /* Extra tokens to fuzz with        */
+extern u32                extras_cnt;   /* Total number of tokens read      */
+
+extern struct extra_data* a_extras;     /* Automatically selected extras    */
+extern u32                a_extras_cnt; /* Total number of tokens available */
+
+u8* (*post_handler)(u8* buf, u32* len);
+
+/* hooks for the custom mutator function */
+size_t (*custom_mutator)(u8* data, size_t size, u8* mutated_out,
+                         size_t max_size, unsigned int seed);
+size_t (*pre_save_handler)(u8* data, size_t size, u8** new_data);
+
+/* Interesting values, as per config.h */
+
+extern s8  interesting_8[INTERESTING_8_LEN];
+extern s16 interesting_16[INTERESTING_8_LEN + INTERESTING_16_LEN];
+extern s32
+    interesting_32[INTERESTING_8_LEN + INTERESTING_16_LEN + INTERESTING_32_LEN];
+
+/* Python stuff */
+#ifdef USE_PYTHON
+
+#  include <Python.h>
+
+extern PyObject* py_module;
+
+enum {
+
+  /* 00 */ PY_FUNC_INIT,
+  /* 01 */ PY_FUNC_FUZZ,
+  /* 02 */ PY_FUNC_INIT_TRIM,
+  /* 03 */ PY_FUNC_POST_TRIM,
+  /* 04 */ PY_FUNC_TRIM,
+  PY_FUNC_COUNT
+
+};
+
+extern PyObject* py_functions[PY_FUNC_COUNT];
+
+#endif
+
+/**** Prototypes ****/
+
+/* Python */
+#ifdef USE_PYTHON
+int  init_py();
+void finalize_py();
+void fuzz_py(char*, size_t, char*, size_t, char**, size_t*);
+u32  init_trim_py(char*, size_t);
+u32  post_trim_py(char);
+void trim_py(char**, size_t*);
+u8   trim_case_python(char**, struct queue_entry*, u8*);
+#endif
+
+/* Queue */
+
+void mark_as_det_done(struct queue_entry*);
+void mark_as_variable(struct queue_entry*);
+void mark_as_redundant(struct queue_entry*, u8);
+void add_to_queue(u8*, u32, u8);
+void destroy_queue(void);
+void update_bitmap_score(struct queue_entry*);
+void cull_queue(void);
+u32  calculate_score(struct queue_entry*);
+
+/* Bitmap */
+
+void write_bitmap(void);
+void read_bitmap(u8*);
+u8   has_new_bits(u8*);
+u32  count_bits(u8*);
+u32  count_bytes(u8*);
+u32  count_non_255_bytes(u8*);
+#ifdef __x86_64__
+void simplify_trace(u64*);
+void classify_counts(u64*);
+#else
+void simplify_trace(u32*);
+void classify_counts(u32*);
+#endif
+void init_count_class16(void);
+void minimize_bits(u8*, u8*);
+#ifndef SIMPLE_FILES
+u8* describe_op(u8);
+#endif
+u8 save_if_interesting(char**, void*, u32, u8);
+
+/* Misc */
+
+u8* DI(u64);
+u8* DF(double);
+u8* DMS(u64);
+u8* DTD(u64, u64);
+
+/* Extras */
+
+void load_extras_file(u8*, u32*, u32*, u32);
+void load_extras(u8*);
+void maybe_add_auto(u8*, u32);
+void save_auto(void);
+void load_auto(void);
+void destroy_extras(void);
+
+/* Stats */
+
+void write_stats_file(double, double, double);
+void maybe_update_plot_file(double, double);
+void show_stats(void);
+void show_init_stats(void);
+
+/* Run */
+
+u8   run_target(char**, u32);
+void write_to_testcase(void*, u32);
+void write_with_gap(void*, u32, u32, u32);
+u8   calibrate_case(char**, struct queue_entry*, u8*, u32, u8);
+void sync_fuzzers(char**);
+u8   trim_case(char**, struct queue_entry*, u8*);
+u8   common_fuzz_stuff(char**, u8*, u32);
+
+/* Fuzz one */
+
+u8   fuzz_one_original(char**);
+u8   pilot_fuzzing(char**);
+u8   core_fuzzing(char**);
+void pso_updating(void);
+u8   fuzz_one(char**);
+
+/* Init */
+
+#ifdef HAVE_AFFINITY
+void bind_to_free_cpu(void);
+#endif
+void   setup_post(void);
+void   setup_custom_mutator(void);
+void   read_testcases(void);
+void   perform_dry_run(char**);
+void   pivot_inputs(void);
+u32    find_start_position(void);
+void   find_timeout(void);
+double get_runnable_processes(void);
+void   nuke_resume_dir(void);
+void   maybe_delete_out_dir(void);
+void   setup_dirs_fds(void);
+void   setup_cmdline_file(char**);
+void   setup_stdio_file(void);
+void   check_crash_handling(void);
+void   check_cpu_governor(void);
+void   get_core_count(void);
+void   fix_up_sync(void);
+void   check_asan_opts(void);
+void   check_binary(u8*);
+void   fix_up_banner(u8*);
+void   check_if_tty(void);
+void   setup_signal_handlers(void);
+char** get_qemu_argv(u8*, char**, int);
+void   save_cmdline(u32, char**);
+
+/**** Inline routines ****/
+
+/* Generate a random number (from 0 to limit - 1). This may
+   have slight bias. */
+
+static inline u32 UR(u32 limit) {
+
+#ifdef HAVE_ARC4RANDOM
+  if (fixed_seed) { return random() % limit; }
+
+  /* The boundary not being necessarily a power of 2,
+     we need to ensure the result uniformity. */
+  return arc4random_uniform(limit);
+#else
+  if (!fixed_seed && unlikely(!rand_cnt--)) {
+
+    u32 seed[2];
+
+    ck_read(dev_urandom_fd, &seed, sizeof(seed), "/dev/urandom");
+    srandom(seed[0]);
+    rand_cnt = (RESEED_RNG / 2) + (seed[1] % RESEED_RNG);
+
+  }
+
+  return random() % limit;
+#endif
+
+}
+
+/* Find first power of two greater or equal to val (assuming val under
+   2^63). */
+
+static u64 next_p2(u64 val) {
+
+  u64 ret = 1;
+  while (val > ret)
+    ret <<= 1;
+  return ret;
+
+}
+
+/* Get unix time in milliseconds */
+
+static u64 get_cur_time(void) {
+
+  struct timeval  tv;
+  struct timezone tz;
+
+  gettimeofday(&tv, &tz);
+
+  return (tv.tv_sec * 1000ULL) + (tv.tv_usec / 1000);
+
+}
+
+/* Get unix time in microseconds */
+
+static u64 get_cur_time_us(void) {
+
+  struct timeval  tv;
+  struct timezone tz;
+
+  gettimeofday(&tv, &tz);
+
+  return (tv.tv_sec * 1000000ULL) + tv.tv_usec;
+
+}
+
+#endif
+
diff --git a/alloc-inl.h b/include/alloc-inl.h
index 04f56d0d..302d15b6 100644
--- a/alloc-inl.h
+++ b/include/alloc-inl.h
@@ -31,88 +31,117 @@
 
 /* User-facing macro to sprintf() to a dynamically allocated buffer. */
 
-#define alloc_printf(_str...) ({ \
-    u8* _tmp; \
-    s32 _len = snprintf(NULL, 0, _str); \
+#define alloc_printf(_str...)                        \
+  ({                                                 \
+                                                     \
+    u8* _tmp;                                        \
+    s32 _len = snprintf(NULL, 0, _str);              \
     if (_len < 0) FATAL("Whoa, snprintf() fails?!"); \
-    _tmp = ck_alloc(_len + 1); \
-    snprintf((char*)_tmp, _len + 1, _str); \
-    _tmp; \
+    _tmp = ck_alloc(_len + 1);                       \
+    snprintf((char*)_tmp, _len + 1, _str);           \
+    _tmp;                                            \
+                                                     \
   })
 
 /* Macro to enforce allocation limits as a last-resort defense against
    integer overflows. */
 
-#define ALLOC_CHECK_SIZE(_s) do { \
-    if ((_s) > MAX_ALLOC) \
-      ABORT("Bad alloc request: %u bytes", (_s)); \
+#define ALLOC_CHECK_SIZE(_s)                                          \
+  do {                                                                \
+                                                                      \
+    if ((_s) > MAX_ALLOC) ABORT("Bad alloc request: %u bytes", (_s)); \
+                                                                      \
   } while (0)
 
 /* Macro to check malloc() failures and the like. */
 
-#define ALLOC_CHECK_RESULT(_r, _s) do { \
-    if (!(_r)) \
-      ABORT("Out of memory: can't allocate %u bytes", (_s)); \
+#define ALLOC_CHECK_RESULT(_r, _s)                                    \
+  do {                                                                \
+                                                                      \
+    if (!(_r)) ABORT("Out of memory: can't allocate %u bytes", (_s)); \
+                                                                      \
   } while (0)
 
 /* Magic tokens used to mark used / freed chunks. */
 
-#define ALLOC_MAGIC_C1  0xFF00FF00 /* Used head (dword)  */
-#define ALLOC_MAGIC_F   0xFE00FE00 /* Freed head (dword) */
-#define ALLOC_MAGIC_C2  0xF0       /* Used tail (byte)   */
+#define ALLOC_MAGIC_C1 0xFF00FF00 /* Used head (dword)  */
+#define ALLOC_MAGIC_F 0xFE00FE00  /* Freed head (dword) */
+#define ALLOC_MAGIC_C2 0xF0       /* Used tail (byte)   */
 
 /* Positions of guard tokens in relation to the user-visible pointer. */
 
-#define ALLOC_C1(_ptr)  (((u32*)(_ptr))[-2])
-#define ALLOC_S(_ptr)   (((u32*)(_ptr))[-1])
-#define ALLOC_C2(_ptr)  (((u8*)(_ptr))[ALLOC_S(_ptr)])
+#define ALLOC_C1(_ptr) (((u32*)(_ptr))[-2])
+#define ALLOC_S(_ptr) (((u32*)(_ptr))[-1])
+#define ALLOC_C2(_ptr) (((u8*)(_ptr))[ALLOC_S(_ptr)])
 
-#define ALLOC_OFF_HEAD  8
+#define ALLOC_OFF_HEAD 8
 #define ALLOC_OFF_TOTAL (ALLOC_OFF_HEAD + 1)
 
 /* Allocator increments for ck_realloc_block(). */
 
-#define ALLOC_BLK_INC    256
+#define ALLOC_BLK_INC 256
 
 /* Sanity-checking macros for pointers. */
 
-#define CHECK_PTR(_p) do { \
-    if (_p) { \
-      if (ALLOC_C1(_p) ^ ALLOC_MAGIC_C1) {\
-        if (ALLOC_C1(_p) == ALLOC_MAGIC_F) \
-          ABORT("Use after free."); \
-        else ABORT("Corrupted head alloc canary."); \
-      } \
-   } \
+#define CHECK_PTR(_p)                            \
+  do {                                           \
+                                                 \
+    if (_p) {                                    \
+                                                 \
+      if (ALLOC_C1(_p) ^ ALLOC_MAGIC_C1) {       \
+                                                 \
+        if (ALLOC_C1(_p) == ALLOC_MAGIC_F)       \
+          ABORT("Use after free.");              \
+        else                                     \
+          ABORT("Corrupted head alloc canary."); \
+                                                 \
+      }                                          \
+                                                 \
+    }                                            \
+                                                 \
   } while (0)
 
 /*
 #define CHECK_PTR(_p) do { \
+                           \
+                           \
     if (_p) { \
+              \
+              \
       if (ALLOC_C1(_p) ^ ALLOC_MAGIC_C1) {\
+                                          \
+                                          \
         if (ALLOC_C1(_p) == ALLOC_MAGIC_F) \
           ABORT("Use after free."); \
         else ABORT("Corrupted head alloc canary."); \
+                                                    \
       } \
+        \
       if (ALLOC_C2(_p) ^ ALLOC_MAGIC_C2) \
         ABORT("Corrupted tail alloc canary."); \
+                                               \
     } \
+      \
+      \
+      \
   } while (0)
 */
 
-#define CHECK_PTR_EXPR(_p) ({ \
-    typeof (_p) _tmp = (_p); \
-    CHECK_PTR(_tmp); \
-    _tmp; \
+#define CHECK_PTR_EXPR(_p)  \
+  ({                        \
+                            \
+    typeof(_p) _tmp = (_p); \
+    CHECK_PTR(_tmp);        \
+    _tmp;                   \
+                            \
   })
 
-
 /* Allocate a buffer, explicitly not zeroing it. Returns NULL for zero-sized
    requests. */
 
 static inline void* DFL_ck_alloc_nozero(u32 size) {
 
-  void* ret;
+  u8* ret;
 
   if (!size) return NULL;
 
@@ -123,14 +152,13 @@ static inline void* DFL_ck_alloc_nozero(u32 size) {
   ret += ALLOC_OFF_HEAD;
 
   ALLOC_C1(ret) = ALLOC_MAGIC_C1;
-  ALLOC_S(ret)  = size;
+  ALLOC_S(ret) = size;
   ALLOC_C2(ret) = ALLOC_MAGIC_C2;
 
-  return ret;
+  return (void*)ret;
 
 }
 
-
 /* Allocate a buffer, returning zeroed memory. */
 
 static inline void* DFL_ck_alloc(u32 size) {
@@ -144,7 +172,6 @@ static inline void* DFL_ck_alloc(u32 size) {
 
 }
 
-
 /* Free memory, checking for double free and corrupted heap. When DEBUG_BUILD
    is set, the old memory will be also clobbered with 0xFF. */
 
@@ -163,19 +190,19 @@ static inline void DFL_ck_free(void* mem) {
 
   ALLOC_C1(mem) = ALLOC_MAGIC_F;
 
-  free(mem - ALLOC_OFF_HEAD);
+  u8* realStart = mem;
+  free(realStart - ALLOC_OFF_HEAD);
 
 }
 
-
 /* Re-allocate a buffer, checking for issues and zeroing any newly-added tail.
    With DEBUG_BUILD, the buffer is always reallocated to a new addresses and the
    old memory is clobbered with 0xFF. */
 
 static inline void* DFL_ck_realloc(void* orig, u32 size) {
 
-  void* ret;
-  u32   old_size = 0;
+  u8* ret;
+  u32 old_size = 0;
 
   if (!size) {
 
@@ -192,8 +219,10 @@ static inline void* DFL_ck_realloc(void* orig, u32 size) {
     ALLOC_C1(orig) = ALLOC_MAGIC_F;
 #endif /* !DEBUG_BUILD */
 
-    old_size  = ALLOC_S(orig);
-    orig     -= ALLOC_OFF_HEAD;
+    old_size = ALLOC_S(orig);
+    u8* origu8 = orig;
+    origu8 -= ALLOC_OFF_HEAD;
+    orig = origu8;
 
     ALLOC_CHECK_SIZE(old_size);
 
@@ -216,10 +245,11 @@ static inline void* DFL_ck_realloc(void* orig, u32 size) {
 
   if (orig) {
 
-    memcpy(ret + ALLOC_OFF_HEAD, orig + ALLOC_OFF_HEAD, MIN(size, old_size));
-    memset(orig + ALLOC_OFF_HEAD, 0xFF, old_size);
+    u8* origu8 = orig;
+    memcpy(ret + ALLOC_OFF_HEAD, origu8 + ALLOC_OFF_HEAD, MIN(size, old_size));
+    memset(origu8 + ALLOC_OFF_HEAD, 0xFF, old_size);
 
-    ALLOC_C1(orig + ALLOC_OFF_HEAD) = ALLOC_MAGIC_F;
+    ALLOC_C1(origu8 + ALLOC_OFF_HEAD) = ALLOC_MAGIC_F;
 
     free(orig);
 
@@ -230,17 +260,15 @@ static inline void* DFL_ck_realloc(void* orig, u32 size) {
   ret += ALLOC_OFF_HEAD;
 
   ALLOC_C1(ret) = ALLOC_MAGIC_C1;
-  ALLOC_S(ret)  = size;
+  ALLOC_S(ret) = size;
   ALLOC_C2(ret) = ALLOC_MAGIC_C2;
 
-  if (size > old_size)
-    memset(ret + old_size, 0, size - old_size);
+  if (size > old_size) memset(ret + old_size, 0, size - old_size);
 
-  return ret;
+  return (void*)ret;
 
 }
 
-
 /* Re-allocate a buffer with ALLOC_BLK_INC increments (used to speed up
    repeated small reallocs without complicating the user code). */
 
@@ -264,13 +292,12 @@ static inline void* DFL_ck_realloc_block(void* orig, u32 size) {
 
 }
 
-
 /* Create a buffer with a copy of a string. Returns NULL for NULL inputs. */
 
 static inline u8* DFL_ck_strdup(u8* str) {
 
-  void* ret;
-  u32   size;
+  u8* ret;
+  u32 size;
 
   if (!str) return NULL;
 
@@ -283,38 +310,36 @@ static inline u8* DFL_ck_strdup(u8* str) {
   ret += ALLOC_OFF_HEAD;
 
   ALLOC_C1(ret) = ALLOC_MAGIC_C1;
-  ALLOC_S(ret)  = size;
+  ALLOC_S(ret) = size;
   ALLOC_C2(ret) = ALLOC_MAGIC_C2;
 
   return memcpy(ret, str, size);
 
 }
 
-
 /* Create a buffer with a copy of a memory block. Returns NULL for zero-sized
    or NULL inputs. */
 
 static inline void* DFL_ck_memdup(void* mem, u32 size) {
 
-  void* ret;
+  u8* ret;
 
   if (!mem || !size) return NULL;
 
   ALLOC_CHECK_SIZE(size);
   ret = malloc(size + ALLOC_OFF_TOTAL);
   ALLOC_CHECK_RESULT(ret, size);
-  
+
   ret += ALLOC_OFF_HEAD;
 
   ALLOC_C1(ret) = ALLOC_MAGIC_C1;
-  ALLOC_S(ret)  = size;
+  ALLOC_S(ret) = size;
   ALLOC_C2(ret) = ALLOC_MAGIC_C2;
 
   return memcpy(ret, mem, size);
 
 }
 
-
 /* Create a buffer with a block of text, appending a NUL terminator at the end.
    Returns NULL for zero-sized or NULL inputs. */
 
@@ -327,11 +352,11 @@ static inline u8* DFL_ck_memdup_str(u8* mem, u32 size) {
   ALLOC_CHECK_SIZE(size);
   ret = malloc(size + ALLOC_OFF_TOTAL + 1);
   ALLOC_CHECK_RESULT(ret, size);
-  
+
   ret += ALLOC_OFF_HEAD;
 
   ALLOC_C1(ret) = ALLOC_MAGIC_C1;
-  ALLOC_S(ret)  = size;
+  ALLOC_S(ret) = size;
   ALLOC_C2(ret) = ALLOC_MAGIC_C2;
 
   memcpy(ret, mem, size);
@@ -341,22 +366,21 @@ static inline u8* DFL_ck_memdup_str(u8* mem, u32 size) {
 
 }
 
-
 #ifndef DEBUG_BUILD
 
 /* In non-debug mode, we just do straightforward aliasing of the above functions
    to user-visible names such as ck_alloc(). */
 
-#define ck_alloc          DFL_ck_alloc
-#define ck_alloc_nozero   DFL_ck_alloc_nozero
-#define ck_realloc        DFL_ck_realloc
-#define ck_realloc_block  DFL_ck_realloc_block
-#define ck_strdup         DFL_ck_strdup
-#define ck_memdup         DFL_ck_memdup
-#define ck_memdup_str     DFL_ck_memdup_str
-#define ck_free           DFL_ck_free
+#  define ck_alloc DFL_ck_alloc
+#  define ck_alloc_nozero DFL_ck_alloc_nozero
+#  define ck_realloc DFL_ck_realloc
+#  define ck_realloc_block DFL_ck_realloc_block
+#  define ck_strdup DFL_ck_strdup
+#  define ck_memdup DFL_ck_memdup
+#  define ck_memdup_str DFL_ck_memdup_str
+#  define ck_free DFL_ck_free
 
-#define alloc_report()
+#  define alloc_report()
 
 #else
 
@@ -365,34 +389,35 @@ static inline u8* DFL_ck_memdup_str(u8* mem, u32 size) {
 
 /* Alloc tracking data structures: */
 
-#define ALLOC_BUCKETS     4096
+#  define ALLOC_BUCKETS 4096
 
 struct TRK_obj {
-  void *ptr;
+
+  void* ptr;
   char *file, *func;
-  u32  line;
+  u32 line;
+
 };
 
-#ifdef AFL_MAIN
+#  ifdef AFL_MAIN
 
 struct TRK_obj* TRK[ALLOC_BUCKETS];
 u32 TRK_cnt[ALLOC_BUCKETS];
 
-#  define alloc_report() TRK_report()
+#    define alloc_report() TRK_report()
 
-#else
+#  else
 
 extern struct TRK_obj* TRK[ALLOC_BUCKETS];
-extern u32 TRK_cnt[ALLOC_BUCKETS];
+extern u32             TRK_cnt[ALLOC_BUCKETS];
 
-#  define alloc_report()
+#    define alloc_report()
 
-#endif /* ^AFL_MAIN */
+#  endif /* ^AFL_MAIN */
 
 /* Bucket-assigning function for a given pointer: */
 
-#define TRKH(_ptr) (((((u32)(_ptr)) >> 16) ^ ((u32)(_ptr))) % ALLOC_BUCKETS)
-
+#  define TRKH(_ptr) (((((u32)(_ptr)) >> 16) ^ ((u32)(_ptr))) % ALLOC_BUCKETS)
 
 /* Add a new entry to the list of allocated objects. */
 
@@ -411,7 +436,7 @@ static inline void TRK_alloc_buf(void* ptr, const char* file, const char* func,
 
     if (!TRK[bucket][i].ptr) {
 
-      TRK[bucket][i].ptr  = ptr;
+      TRK[bucket][i].ptr = ptr;
       TRK[bucket][i].file = (char*)file;
       TRK[bucket][i].func = (char*)func;
       TRK[bucket][i].line = line;
@@ -421,10 +446,10 @@ static inline void TRK_alloc_buf(void* ptr, const char* file, const char* func,
 
   /* No space available - allocate more. */
 
-  TRK[bucket] = DFL_ck_realloc_block(TRK[bucket],
-    (TRK_cnt[bucket] + 1) * sizeof(struct TRK_obj));
+  TRK[bucket] = DFL_ck_realloc_block(
+      TRK[bucket], (TRK_cnt[bucket] + 1) * sizeof(struct TRK_obj));
 
-  TRK[bucket][i].ptr  = ptr;
+  TRK[bucket][i].ptr = ptr;
   TRK[bucket][i].file = (char*)file;
   TRK[bucket][i].func = (char*)func;
   TRK[bucket][i].line = line;
@@ -433,7 +458,6 @@ static inline void TRK_alloc_buf(void* ptr, const char* file, const char* func,
 
 }
 
-
 /* Remove entry from the list of allocated objects. */
 
 static inline void TRK_free_buf(void* ptr, const char* file, const char* func,
@@ -456,12 +480,11 @@ static inline void TRK_free_buf(void* ptr, const char* file, const char* func,
 
     }
 
-  WARNF("ALLOC: Attempt to free non-allocated memory in %s (%s:%u)",
-        func, file, line);
+  WARNF("ALLOC: Attempt to free non-allocated memory in %s (%s:%u)", func, file,
+        line);
 
 }
 
-
 /* Do a final report on all non-deallocated objects. */
 
 static inline void TRK_report(void) {
@@ -478,7 +501,6 @@ static inline void TRK_report(void) {
 
 }
 
-
 /* Simple wrappers for non-debugging functions: */
 
 static inline void* TRK_ck_alloc(u32 size, const char* file, const char* func,
@@ -490,7 +512,6 @@ static inline void* TRK_ck_alloc(u32 size, const char* file, const char* func,
 
 }
 
-
 static inline void* TRK_ck_realloc(void* orig, u32 size, const char* file,
                                    const char* func, u32 line) {
 
@@ -501,7 +522,6 @@ static inline void* TRK_ck_realloc(void* orig, u32 size, const char* file,
 
 }
 
-
 static inline void* TRK_ck_realloc_block(void* orig, u32 size, const char* file,
                                          const char* func, u32 line) {
 
@@ -512,7 +532,6 @@ static inline void* TRK_ck_realloc_block(void* orig, u32 size, const char* file,
 
 }
 
-
 static inline void* TRK_ck_strdup(u8* str, const char* file, const char* func,
                                   u32 line) {
 
@@ -522,7 +541,6 @@ static inline void* TRK_ck_strdup(u8* str, const char* file, const char* func,
 
 }
 
-
 static inline void* TRK_ck_memdup(void* mem, u32 size, const char* file,
                                   const char* func, u32 line) {
 
@@ -532,7 +550,6 @@ static inline void* TRK_ck_memdup(void* mem, u32 size, const char* file,
 
 }
 
-
 static inline void* TRK_ck_memdup_str(void* mem, u32 size, const char* file,
                                       const char* func, u32 line) {
 
@@ -542,9 +559,8 @@ static inline void* TRK_ck_memdup_str(void* mem, u32 size, const char* file,
 
 }
 
-
-static inline void TRK_ck_free(void* ptr, const char* file,
-                                const char* func, u32 line) {
+static inline void TRK_ck_free(void* ptr, const char* file, const char* func,
+                               u32 line) {
 
   TRK_free_buf(ptr, file, func, line);
   DFL_ck_free(ptr);
@@ -553,30 +569,27 @@ static inline void TRK_ck_free(void* ptr, const char* file,
 
 /* Aliasing user-facing names to tracking functions: */
 
-#define ck_alloc(_p1) \
-  TRK_ck_alloc(_p1, __FILE__, __FUNCTION__, __LINE__)
+#  define ck_alloc(_p1) TRK_ck_alloc(_p1, __FILE__, __FUNCTION__, __LINE__)
 
-#define ck_alloc_nozero(_p1) \
-  TRK_ck_alloc(_p1, __FILE__, __FUNCTION__, __LINE__)
+#define ck_alloc_nozero(_p1) TRK_ck_alloc(_p1, __FILE__, __FUNCTION__, __LINE__)
 
-#define ck_realloc(_p1, _p2) \
+#  define ck_realloc(_p1, _p2)\
   TRK_ck_realloc(_p1, _p2, __FILE__, __FUNCTION__, __LINE__)
 
-#define ck_realloc_block(_p1, _p2) \
+#  define ck_realloc_block(_p1, _p2)\
   TRK_ck_realloc_block(_p1, _p2, __FILE__, __FUNCTION__, __LINE__)
 
-#define ck_strdup(_p1) \
-  TRK_ck_strdup(_p1, __FILE__, __FUNCTION__, __LINE__)
+#  define ck_strdup(_p1) TRK_ck_strdup(_p1, __FILE__, __FUNCTION__, __LINE__)
 
-#define ck_memdup(_p1, _p2) \
+#  define ck_memdup(_p1, _p2)\
   TRK_ck_memdup(_p1, _p2, __FILE__, __FUNCTION__, __LINE__)
 
-#define ck_memdup_str(_p1, _p2) \
+#  define ck_memdup_str(_p1, _p2)\
   TRK_ck_memdup_str(_p1, _p2, __FILE__, __FUNCTION__, __LINE__)
 
-#define ck_free(_p1) \
-  TRK_ck_free(_p1, __FILE__, __FUNCTION__, __LINE__)
+#  define ck_free(_p1) TRK_ck_free(_p1, __FILE__, __FUNCTION__, __LINE__)
 
 #endif /* ^!DEBUG_BUILD */
 
 #endif /* ! _HAVE_ALLOC_INL_H */
+
diff --git a/include/android-ashmem.h b/include/android-ashmem.h
new file mode 100644
index 00000000..a4b5bf30
--- /dev/null
+++ b/include/android-ashmem.h
@@ -0,0 +1,80 @@
+#ifndef _ANDROID_ASHMEM_H
+#define _ANDROID_ASHMEM_H
+
+#include <fcntl.h>
+#include <linux/shm.h>
+#include <linux/ashmem.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+
+#if __ANDROID_API__ >= 26
+#  define shmat bionic_shmat
+#  define shmctl bionic_shmctl
+#  define shmdt bionic_shmdt
+#  define shmget bionic_shmget
+#endif
+#include <sys/shm.h>
+#undef shmat
+#undef shmctl
+#undef shmdt
+#undef shmget
+#include <stdio.h>
+
+#define ASHMEM_DEVICE "/dev/ashmem"
+
+static inline int shmctl(int __shmid, int __cmd, struct shmid_ds *__buf) {
+
+  int ret = 0;
+  if (__cmd == IPC_RMID) {
+
+    int               length = ioctl(__shmid, ASHMEM_GET_SIZE, NULL);
+    struct ashmem_pin pin = {0, length};
+    ret = ioctl(__shmid, ASHMEM_UNPIN, &pin);
+    close(__shmid);
+
+  }
+
+  return ret;
+
+}
+
+static inline int shmget(key_t __key, size_t __size, int __shmflg) {
+
+  int  fd, ret;
+  char ourkey[11];
+
+  fd = open(ASHMEM_DEVICE, O_RDWR);
+  if (fd < 0) return fd;
+
+  sprintf(ourkey, "%d", __key);
+  ret = ioctl(fd, ASHMEM_SET_NAME, ourkey);
+  if (ret < 0) goto error;
+
+  ret = ioctl(fd, ASHMEM_SET_SIZE, __size);
+  if (ret < 0) goto error;
+
+  return fd;
+
+error:
+  close(fd);
+  return ret;
+
+}
+
+static inline void *shmat(int __shmid, const void *__shmaddr, int __shmflg) {
+
+  int   size;
+  void *ptr;
+
+  size = ioctl(__shmid, ASHMEM_GET_SIZE, NULL);
+  if (size < 0) { return NULL; }
+
+  ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, __shmid, 0);
+  if (ptr == MAP_FAILED) { return NULL; }
+
+  return ptr;
+
+}
+
+#endif
+
diff --git a/afl-common.h b/include/common.h
index 161caa39..9845c2af 100644
--- a/afl-common.h
+++ b/include/common.h
@@ -4,3 +4,4 @@
 
 void detect_file_args(char **argv, u8 *prog_in);
 #endif
+
diff --git a/include/config.h b/include/config.h
new file mode 100644
index 00000000..babba3bd
--- /dev/null
+++ b/include/config.h
@@ -0,0 +1,370 @@
+/*
+   american fuzzy lop plus plus - vaguely configurable bits
+   ----------------------------------------------
+
+   Written and maintained by Michal Zalewski <lcamtuf@google.com>
+
+   Copyright 2013, 2014, 2015, 2016 Google Inc. All rights reserved.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at:
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ */
+
+#ifndef _HAVE_CONFIG_H
+#define _HAVE_CONFIG_H
+
+#include "types.h"
+
+/* Version string: */
+
+#define VERSION "++2.53d"  // c = release, d = volatile github dev
+
+/******************************************************
+ *                                                    *
+ *  Settings that may be of interest to power users:  *
+ *                                                    *
+ ******************************************************/
+
+/* Comment out to disable terminal colors (note that this makes afl-analyze
+   a lot less nice): */
+
+#define USE_COLOR
+
+/* Comment out to disable fancy ANSI boxes and use poor man's 7-bit UI: */
+
+#define FANCY_BOXES
+
+/* Default timeout for fuzzed code (milliseconds). This is the upper bound,
+   also used for detecting hangs; the actual value is auto-scaled: */
+
+#define EXEC_TIMEOUT 1000
+
+/* Timeout rounding factor when auto-scaling (milliseconds): */
+
+#define EXEC_TM_ROUND 20
+
+/* Default memory limit for child process (MB): */
+
+#ifndef __x86_64__
+#  define MEM_LIMIT 25
+#else
+#  define MEM_LIMIT 50
+#endif /* ^!__x86_64__ */
+
+/* Default memory limit when running in QEMU mode (MB): */
+
+#define MEM_LIMIT_QEMU 200
+
+/* Default memory limit when running in Unicorn mode (MB): */
+
+#define MEM_LIMIT_UNICORN 200
+
+/* Number of calibration cycles per every new test case (and for test
+   cases that show variable behavior): */
+
+#define CAL_CYCLES 8
+#define CAL_CYCLES_LONG 40
+
+/* Number of subsequent timeouts before abandoning an input file: */
+
+#define TMOUT_LIMIT 250
+
+/* Maximum number of unique hangs or crashes to record: */
+
+#define KEEP_UNIQUE_HANG 500
+#define KEEP_UNIQUE_CRASH 5000
+
+/* Baseline number of random tweaks during a single 'havoc' stage: */
+
+#define HAVOC_CYCLES 256
+#define HAVOC_CYCLES_INIT 1024
+
+/* Maximum multiplier for the above (should be a power of two, beware
+   of 32-bit int overflows): */
+
+#define HAVOC_MAX_MULT 16
+#define HAVOC_MAX_MULT_MOPT 32
+
+/* Absolute minimum number of havoc cycles (after all adjustments): */
+
+#define HAVOC_MIN 16
+
+/* Power Schedule Divisor */
+#define POWER_BETA 1
+#define MAX_FACTOR (POWER_BETA * 32)
+
+/* Maximum stacking for havoc-stage tweaks. The actual value is calculated
+   like this:
+
+   n = random between 1 and HAVOC_STACK_POW2
+   stacking = 2^n
+
+   In other words, the default (n = 7) produces 2, 4, 8, 16, 32, 64, or
+   128 stacked tweaks: */
+
+#define HAVOC_STACK_POW2 7
+
+/* Caps on block sizes for cloning and deletion operations. Each of these
+   ranges has a 33% probability of getting picked, except for the first
+   two cycles where smaller blocks are favored: */
+
+#define HAVOC_BLK_SMALL 32
+#define HAVOC_BLK_MEDIUM 128
+#define HAVOC_BLK_LARGE 1500
+
+/* Extra-large blocks, selected very rarely (<5% of the time): */
+
+#define HAVOC_BLK_XL 32768
+
+/* Probabilities of skipping non-favored entries in the queue, expressed as
+   percentages: */
+
+#define SKIP_TO_NEW_PROB 99   /* ...when there are new, pending favorites */
+#define SKIP_NFAV_OLD_PROB 95 /* ...no new favs, cur entry already fuzzed */
+#define SKIP_NFAV_NEW_PROB 75 /* ...no new favs, cur entry not fuzzed yet */
+
+/* Splicing cycle count: */
+
+#define SPLICE_CYCLES 15
+
+/* Nominal per-splice havoc cycle length: */
+
+#define SPLICE_HAVOC 32
+
+/* Maximum offset for integer addition / subtraction stages: */
+
+#define ARITH_MAX 35
+
+/* Limits for the test case trimmer. The absolute minimum chunk size; and
+   the starting and ending divisors for chopping up the input file: */
+
+#define TRIM_MIN_BYTES 4
+#define TRIM_START_STEPS 16
+#define TRIM_END_STEPS 1024
+
+/* Maximum size of input file, in bytes (keep under 100MB): */
+
+#define MAX_FILE (1 * 1024 * 1024)
+
+/* The same, for the test case minimizer: */
+
+#define TMIN_MAX_FILE (10 * 1024 * 1024)
+
+/* Block normalization steps for afl-tmin: */
+
+#define TMIN_SET_MIN_SIZE 4
+#define TMIN_SET_STEPS 128
+
+/* Maximum dictionary token size (-x), in bytes: */
+
+#define MAX_DICT_FILE 128
+
+/* Length limits for auto-detected dictionary tokens: */
+
+#define MIN_AUTO_EXTRA 3
+#define MAX_AUTO_EXTRA 32
+
+/* Maximum number of user-specified dictionary tokens to use in deterministic
+   steps; past this point, the "extras/user" step will be still carried out,
+   but with proportionally lower odds: */
+
+#define MAX_DET_EXTRAS 200
+
+/* Maximum number of auto-extracted dictionary tokens to actually use in fuzzing
+   (first value), and to keep in memory as candidates. The latter should be much
+   higher than the former. */
+
+#define USE_AUTO_EXTRAS 50
+#define MAX_AUTO_EXTRAS (USE_AUTO_EXTRAS * 10)
+
+/* Scaling factor for the effector map used to skip some of the more
+   expensive deterministic steps. The actual divisor is set to
+   2^EFF_MAP_SCALE2 bytes: */
+
+#define EFF_MAP_SCALE2 3
+
+/* Minimum input file length at which the effector logic kicks in: */
+
+#define EFF_MIN_LEN 128
+
+/* Maximum effector density past which everything is just fuzzed
+   unconditionally (%): */
+
+#define EFF_MAX_PERC 90
+
+/* UI refresh frequency (Hz): */
+
+#define UI_TARGET_HZ 5
+
+/* Fuzzer stats file and plot update intervals (sec): */
+
+#define STATS_UPDATE_SEC 60
+#define PLOT_UPDATE_SEC 5
+
+/* Smoothing divisor for CPU load and exec speed stats (1 - no smoothing). */
+
+#define AVG_SMOOTHING 16
+
+/* Sync interval (every n havoc cycles): */
+
+#define SYNC_INTERVAL 5
+
+/* Output directory reuse grace period (minutes): */
+
+#define OUTPUT_GRACE 25
+
+/* Uncomment to use simple file names (id_NNNNNN): */
+
+// #define SIMPLE_FILES
+
+/* List of interesting values to use in fuzzing. */
+
+#define INTERESTING_8                                    \
+  -128,    /* Overflow signed 8-bit when decremented  */ \
+      -1,  /*                                         */ \
+      0,   /*                                         */ \
+      1,   /*                                         */ \
+      16,  /* One-off with common buffer size         */ \
+      32,  /* One-off with common buffer size         */ \
+      64,  /* One-off with common buffer size         */ \
+      100, /* One-off with common buffer size         */ \
+      127                        /* Overflow signed 8-bit when incremented  */
+
+#define INTERESTING_8_LEN 9
+
+#define INTERESTING_16                                    \
+  -32768,   /* Overflow signed 16-bit when decremented */ \
+      -129, /* Overflow signed 8-bit                   */ \
+      128,  /* Overflow signed 8-bit                   */ \
+      255,  /* Overflow unsig 8-bit when incremented   */ \
+      256,  /* Overflow unsig 8-bit                    */ \
+      512,  /* One-off with common buffer size         */ \
+      1000, /* One-off with common buffer size         */ \
+      1024, /* One-off with common buffer size         */ \
+      4096, /* One-off with common buffer size         */ \
+      32767                      /* Overflow signed 16-bit when incremented */
+
+#define INTERESTING_16_LEN 10
+
+#define INTERESTING_32                                          \
+  -2147483648LL,  /* Overflow signed 32-bit when decremented */ \
+      -100663046, /* Large negative number (endian-agnostic) */ \
+      -32769,     /* Overflow signed 16-bit                  */ \
+      32768,      /* Overflow signed 16-bit                  */ \
+      65535,      /* Overflow unsig 16-bit when incremented  */ \
+      65536,      /* Overflow unsig 16 bit                   */ \
+      100663045,  /* Large positive number (endian-agnostic) */ \
+      2147483647                 /* Overflow signed 32-bit when incremented */
+
+#define INTERESTING_32_LEN 8
+
+/***********************************************************
+ *                                                         *
+ *  Really exotic stuff you probably don't want to touch:  *
+ *                                                         *
+ ***********************************************************/
+
+/* Call count interval between reseeding the libc PRNG from /dev/urandom: */
+
+#define RESEED_RNG 10000
+
+/* Maximum line length passed from GCC to 'as' and used for parsing
+   configuration files: */
+
+#define MAX_LINE 8192
+
+/* Environment variable used to pass SHM ID to the called program. */
+
+#define SHM_ENV_VAR "__AFL_SHM_ID"
+
+/* Other less interesting, internal-only variables. */
+
+#define CLANG_ENV_VAR "__AFL_CLANG_MODE"
+#define AS_LOOP_ENV_VAR "__AFL_AS_LOOPCHECK"
+#define PERSIST_ENV_VAR "__AFL_PERSISTENT"
+#define DEFER_ENV_VAR "__AFL_DEFER_FORKSRV"
+
+/* In-code signatures for deferred and persistent mode. */
+
+#define PERSIST_SIG "##SIG_AFL_PERSISTENT##"
+#define DEFER_SIG "##SIG_AFL_DEFER_FORKSRV##"
+
+/* Distinctive bitmap signature used to indicate failed execution: */
+
+#define EXEC_FAIL_SIG 0xfee1dead
+
+/* Distinctive exit code used to indicate MSAN trip condition: */
+
+#define MSAN_ERROR 86
+
+/* Designated file descriptors for forkserver commands (the application will
+   use FORKSRV_FD and FORKSRV_FD + 1): */
+
+#define FORKSRV_FD 198
+
+/* Fork server init timeout multiplier: we'll wait the user-selected
+   timeout plus this much for the fork server to spin up. */
+
+#define FORK_WAIT_MULT 10
+
+/* Calibration timeout adjustments, to be a bit more generous when resuming
+   fuzzing sessions or trying to calibrate already-added internal finds.
+   The first value is a percentage, the other is in milliseconds: */
+
+#define CAL_TMOUT_PERC 125
+#define CAL_TMOUT_ADD 50
+
+/* Number of chances to calibrate a case before giving up: */
+
+#define CAL_CHANCES 3
+
+/* Map size for the traced binary (2^MAP_SIZE_POW2). Must be greater than
+   2; you probably want to keep it under 18 or so for performance reasons
+   (adjusting AFL_INST_RATIO when compiling is probably a better way to solve
+   problems with complex programs). You need to recompile the target binary
+   after changing this - otherwise, SEGVs may ensue. */
+
+#define MAP_SIZE_POW2 16
+#define MAP_SIZE (1 << MAP_SIZE_POW2)
+
+/* Maximum allocator request size (keep well under INT_MAX): */
+
+#define MAX_ALLOC 0x40000000
+
+/* A made-up hashing seed: */
+
+#define HASH_CONST 0xa5b35705
+
+/* Constants for afl-gotcpu to control busy loop timing: */
+
+#define CTEST_TARGET_MS 5000
+#define CTEST_CORE_TRG_MS 1000
+#define CTEST_BUSY_CYCLES (10 * 1000 * 1000)
+
+/* Enable NeverZero counters in QEMU mode */
+
+#define AFL_QEMU_NOT_ZERO
+
+/* Uncomment this to use inferior block-coverage-based instrumentation. Note
+   that you need to recompile the target binary for this to have any effect: */
+
+// #define COVERAGE_ONLY
+
+/* Uncomment this to ignore hit counts and output just one bit per tuple.
+   As with the previous setting, you will need to recompile the target
+   binary: */
+
+// #define SKIP_COUNTS
+
+/* Uncomment this to use instrumentation data to record newly discovered paths,
+   but do not use them as seeds for fuzzing. This is useful for conveniently
+   measuring coverage that could be attained by a "dumb" fuzzing algorithm: */
+
+// #define IGNORE_FINDS
+
+#endif /* ! _HAVE_CONFIG_H */
+
diff --git a/include/debug.h b/include/debug.h
new file mode 100644
index 00000000..6a59ad7a
--- /dev/null
+++ b/include/debug.h
@@ -0,0 +1,285 @@
+/*
+   american fuzzy lop - debug / error handling macros
+   --------------------------------------------------
+
+   Written and maintained by Michal Zalewski <lcamtuf@google.com>
+
+   Copyright 2013, 2014, 2015, 2016 Google Inc. All rights reserved.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at:
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ */
+
+#ifndef _HAVE_DEBUG_H
+#define _HAVE_DEBUG_H
+
+#include <errno.h>
+
+#include "types.h"
+#include "config.h"
+
+/*******************
+ * Terminal colors *
+ *******************/
+
+#ifdef USE_COLOR
+
+#  define cBLK "\x1b[0;30m"
+#  define cRED "\x1b[0;31m"
+#  define cGRN "\x1b[0;32m"
+#  define cBRN "\x1b[0;33m"
+#  define cBLU "\x1b[0;34m"
+#  define cMGN "\x1b[0;35m"
+#  define cCYA "\x1b[0;36m"
+#  define cLGR "\x1b[0;37m"
+#  define cGRA "\x1b[1;90m"
+#  define cLRD "\x1b[1;91m"
+#  define cLGN "\x1b[1;92m"
+#  define cYEL "\x1b[1;93m"
+#  define cLBL "\x1b[1;94m"
+#  define cPIN "\x1b[1;95m"
+#  define cLCY "\x1b[1;96m"
+#  define cBRI "\x1b[1;97m"
+#  define cRST "\x1b[0m"
+
+#  define bgBLK "\x1b[40m"
+#  define bgRED "\x1b[41m"
+#  define bgGRN "\x1b[42m"
+#  define bgBRN "\x1b[43m"
+#  define bgBLU "\x1b[44m"
+#  define bgMGN "\x1b[45m"
+#  define bgCYA "\x1b[46m"
+#  define bgLGR "\x1b[47m"
+#  define bgGRA "\x1b[100m"
+#  define bgLRD "\x1b[101m"
+#  define bgLGN "\x1b[102m"
+#  define bgYEL "\x1b[103m"
+#  define bgLBL "\x1b[104m"
+#  define bgPIN "\x1b[105m"
+#  define bgLCY "\x1b[106m"
+#  define bgBRI "\x1b[107m"
+
+#else
+
+#  define cBLK ""
+#  define cRED ""
+#  define cGRN ""
+#  define cBRN ""
+#  define cBLU ""
+#  define cMGN ""
+#  define cCYA ""
+#  define cLGR ""
+#  define cGRA ""
+#  define cLRD ""
+#  define cLGN ""
+#  define cYEL ""
+#  define cLBL ""
+#  define cPIN ""
+#  define cLCY ""
+#  define cBRI ""
+#  define cRST ""
+
+#  define bgBLK ""
+#  define bgRED ""
+#  define bgGRN ""
+#  define bgBRN ""
+#  define bgBLU ""
+#  define bgMGN ""
+#  define bgCYA ""
+#  define bgLGR ""
+#  define bgGRA ""
+#  define bgLRD ""
+#  define bgLGN ""
+#  define bgYEL ""
+#  define bgLBL ""
+#  define bgPIN ""
+#  define bgLCY ""
+#  define bgBRI ""
+
+#endif /* ^USE_COLOR */
+
+/*************************
+ * Box drawing sequences *
+ *************************/
+
+#ifdef FANCY_BOXES
+
+#  define SET_G1 "\x1b)0"   /* Set G1 for box drawing    */
+#  define RESET_G1 "\x1b)B" /* Reset G1 to ASCII         */
+#  define bSTART "\x0e"     /* Enter G1 drawing mode     */
+#  define bSTOP "\x0f"      /* Leave G1 drawing mode     */
+#  define bH "q"            /* Horizontal line           */
+#  define bV "x"            /* Vertical line             */
+#  define bLT "l"           /* Left top corner           */
+#  define bRT "k"           /* Right top corner          */
+#  define bLB "m"           /* Left bottom corner        */
+#  define bRB "j"           /* Right bottom corner       */
+#  define bX "n"            /* Cross                     */
+#  define bVR "t"           /* Vertical, branch right    */
+#  define bVL "u"           /* Vertical, branch left     */
+#  define bHT "v"           /* Horizontal, branch top    */
+#  define bHB "w"           /* Horizontal, branch bottom */
+
+#else
+
+#  define SET_G1 ""
+#  define RESET_G1 ""
+#  define bSTART ""
+#  define bSTOP ""
+#  define bH "-"
+#  define bV "|"
+#  define bLT "+"
+#  define bRT "+"
+#  define bLB "+"
+#  define bRB "+"
+#  define bX "+"
+#  define bVR "+"
+#  define bVL "+"
+#  define bHT "+"
+#  define bHB "+"
+
+#endif /* ^FANCY_BOXES */
+
+/***********************
+ * Misc terminal codes *
+ ***********************/
+
+#define TERM_HOME "\x1b[H"
+#define TERM_CLEAR TERM_HOME "\x1b[2J"
+#define cEOL "\x1b[0K"
+#define CURSOR_HIDE "\x1b[?25l"
+#define CURSOR_SHOW "\x1b[?25h"
+
+/************************
+ * Debug & error macros *
+ ************************/
+
+/* Just print stuff to the appropriate stream. */
+
+#ifdef MESSAGES_TO_STDOUT
+#  define SAYF(x...) printf(x)
+#else
+#  define SAYF(x...) fprintf(stderr, x)
+#endif /* ^MESSAGES_TO_STDOUT */
+
+/* Show a prefixed warning. */
+
+#define WARNF(x...)                            \
+  do {                                         \
+                                               \
+    SAYF(cYEL "[!] " cBRI "WARNING: " cRST x); \
+    SAYF(cRST "\n");                           \
+                                               \
+  } while (0)
+
+/* Show a prefixed "doing something" message. */
+
+#define ACTF(x...)            \
+  do {                        \
+                              \
+    SAYF(cLBL "[*] " cRST x); \
+    SAYF(cRST "\n");          \
+                              \
+  } while (0)
+
+/* Show a prefixed "success" message. */
+
+#define OKF(x...)             \
+  do {                        \
+                              \
+    SAYF(cLGN "[+] " cRST x); \
+    SAYF(cRST "\n");          \
+                              \
+  } while (0)
+
+/* Show a prefixed fatal error message (not used in afl). */
+
+#define BADF(x...)              \
+  do {                          \
+                                \
+    SAYF(cLRD "\n[-] " cRST x); \
+    SAYF(cRST "\n");            \
+                                \
+  } while (0)
+
+/* Die with a verbose non-OS fatal error message. */
+
+#define FATAL(x...)                                                          \
+  do {                                                                       \
+                                                                             \
+    SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD                                \
+         "\n[-] PROGRAM ABORT : " cRST   x);                                   \
+    SAYF(cLRD "\n         Location : " cRST "%s(), %s:%u\n\n", __FUNCTION__, \
+         __FILE__, __LINE__);                                                \
+    exit(1);                                                                 \
+                                                                             \
+  } while (0)
+
+/* Die by calling abort() to provide a core dump. */
+
+#define ABORT(x...)                                                          \
+  do {                                                                       \
+                                                                             \
+    SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD                                \
+         "\n[-] PROGRAM ABORT : " cRST   x);                                   \
+    SAYF(cLRD "\n    Stop location : " cRST "%s(), %s:%u\n\n", __FUNCTION__, \
+         __FILE__, __LINE__);                                                \
+    abort();                                                                 \
+                                                                             \
+  } while (0)
+
+/* Die while also including the output of perror(). */
+
+#define PFATAL(x...)                                                       \
+  do {                                                                     \
+                                                                           \
+    fflush(stdout);                                                        \
+    SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD                              \
+         "\n[-]  SYSTEM ERROR : " cRST   x);                                 \
+    SAYF(cLRD "\n    Stop location : " cRST "%s(), %s:%u\n", __FUNCTION__, \
+         __FILE__, __LINE__);                                              \
+    SAYF(cLRD "       OS message : " cRST "%s\n", strerror(errno));        \
+    exit(1);                                                               \
+                                                                           \
+  } while (0)
+
+/* Die with FAULT() or PFAULT() depending on the value of res (used to
+   interpret different failure modes for read(), write(), etc). */
+
+#define RPFATAL(res, x...) \
+  do {                     \
+                           \
+    if (res < 0)           \
+      PFATAL(x);           \
+    else                   \
+      FATAL(x);            \
+                           \
+  } while (0)
+
+/* Error-checking versions of read() and write() that call RPFATAL() as
+   appropriate. */
+
+#define ck_write(fd, buf, len, fn)                            \
+  do {                                                        \
+                                                              \
+    u32 _len = (len);                                         \
+    s32 _res = write(fd, buf, _len);                          \
+    if (_res != _len) RPFATAL(_res, "Short write to %s", fn); \
+                                                              \
+  } while (0)
+
+#define ck_read(fd, buf, len, fn)                              \
+  do {                                                         \
+                                                               \
+    u32 _len = (len);                                          \
+    s32 _res = read(fd, buf, _len);                            \
+    if (_res != _len) RPFATAL(_res, "Short read from %s", fn); \
+                                                               \
+  } while (0)
+
+#endif /* ! _HAVE_DEBUG_H */
+
diff --git a/include/forkserver.h b/include/forkserver.h
new file mode 100644
index 00000000..af5dab72
--- /dev/null
+++ b/include/forkserver.h
@@ -0,0 +1,25 @@
+#ifndef __AFL_FORKSERVER_H
+#define __AFL_FORKSERVER_H
+
+void handle_timeout(int sig);
+void init_forkserver(char **argv);
+
+#ifdef __APPLE__
+#  define MSG_FORK_ON_APPLE                                                  \
+  "    - On MacOS X, the semantics of fork() syscalls are non-standard and " \
+  "may\n"                                                                    \
+  "      break afl-fuzz performance optimizations when running "             \
+  "platform-specific\n"                                                      \
+  "      targets. To fix this, set AFL_NO_FORKSRV=1 in the environment.\n\n"
+#else
+#  define MSG_FORK_ON_APPLE ""
+#endif
+
+#ifdef RLIMIT_AS
+#  define MSG_ULIMIT_USAGE "      ( ulimit -Sv $[%llu << 10];"
+#else
+#  define MSG_ULIMIT_USAGE "      ( ulimit -Sd $[%llu << 10];"
+#endif /* ^RLIMIT_AS */
+
+#endif
+
diff --git a/hash.h b/include/hash.h
index f39a8257..5d0512a6 100644
--- a/hash.h
+++ b/include/hash.h
@@ -31,12 +31,12 @@
 
 #ifdef __x86_64__
 
-#define ROL64(_x, _r)  ((((u64)(_x)) << (_r)) | (((u64)(_x)) >> (64 - (_r))))
+#  define ROL64(_x, _r) ((((u64)(_x)) << (_r)) | (((u64)(_x)) >> (64 - (_r))))
 
 static inline u32 hash32(const void* key, u32 len, u32 seed) {
 
   const u64* data = (u64*)key;
-  u64 h1 = seed ^ len;
+  u64        h1 = seed ^ len;
 
   len >>= 3;
 
@@ -45,12 +45,12 @@ static inline u32 hash32(const void* key, u32 len, u32 seed) {
     u64 k1 = *data++;
 
     k1 *= 0x87c37b91114253d5ULL;
-    k1  = ROL64(k1, 31);
+    k1 = ROL64(k1, 31);
     k1 *= 0x4cf5ad432745937fULL;
 
     h1 ^= k1;
-    h1  = ROL64(h1, 27);
-    h1  = h1 * 5 + 0x52dce729;
+    h1 = ROL64(h1, 27);
+    h1 = h1 * 5 + 0x52dce729;
 
   }
 
@@ -64,14 +64,14 @@ static inline u32 hash32(const void* key, u32 len, u32 seed) {
 
 }
 
-#else 
+#else
 
-#define ROL32(_x, _r)  ((((u32)(_x)) << (_r)) | (((u32)(_x)) >> (32 - (_r))))
+#  define ROL32(_x, _r) ((((u32)(_x)) << (_r)) | (((u32)(_x)) >> (32 - (_r))))
 
 static inline u32 hash32(const void* key, u32 len, u32 seed) {
 
-  const u32* data  = (u32*)key;
-  u32 h1 = seed ^ len;
+  const u32* data = (u32*)key;
+  u32        h1 = seed ^ len;
 
   len >>= 2;
 
@@ -80,12 +80,12 @@ static inline u32 hash32(const void* key, u32 len, u32 seed) {
     u32 k1 = *data++;
 
     k1 *= 0xcc9e2d51;
-    k1  = ROL32(k1, 15);
+    k1 = ROL32(k1, 15);
     k1 *= 0x1b873593;
 
     h1 ^= k1;
-    h1  = ROL32(h1, 13);
-    h1  = h1 * 5 + 0xe6546b64;
+    h1 = ROL32(h1, 13);
+    h1 = h1 * 5 + 0xe6546b64;
 
   }
 
@@ -102,3 +102,4 @@ static inline u32 hash32(const void* key, u32 len, u32 seed) {
 #endif /* ^__x86_64__ */
 
 #endif /* !_HAVE_HASH_H */
+
diff --git a/sharedmem.h b/include/sharedmem.h
index 53a85fcb..7e13b13b 100644
--- a/sharedmem.h
+++ b/include/sharedmem.h
@@ -1,6 +1,8 @@
-#ifndef __SHAREDMEM_H
-#define __SHAREDMEM_H
+#ifndef __AFL_SHAREDMEM_H
+#define __AFL_SHAREDMEM_H
 
 void setup_shm(unsigned char dumb_mode);
 void remove_shm(void);
+
 #endif
+
diff --git a/include/types.h b/include/types.h
new file mode 100644
index 00000000..60ae64c2
--- /dev/null
+++ b/include/types.h
@@ -0,0 +1,96 @@
+/*
+   american fuzzy lop - type definitions and minor macros
+   ------------------------------------------------------
+
+   Written and maintained by Michal Zalewski <lcamtuf@google.com>
+
+   Copyright 2013, 2014, 2015 Google Inc. All rights reserved.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at:
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ */
+
+#ifndef _HAVE_TYPES_H
+#define _HAVE_TYPES_H
+
+#include <stdint.h>
+#include <stdlib.h>
+
+typedef uint8_t  u8;
+typedef uint16_t u16;
+typedef uint32_t u32;
+
+/*
+
+   Ugh. There is an unintended compiler / glibc #include glitch caused by
+   combining the u64 type an %llu in format strings, necessitating a workaround.
+
+   In essence, the compiler is always looking for 'unsigned long long' for %llu.
+   On 32-bit systems, the u64 type (aliased to uint64_t) is expanded to
+   'unsigned long long' in <bits/types.h>, so everything checks out.
+
+   But on 64-bit systems, it is #ifdef'ed in the same file as 'unsigned long'.
+   Now, it only happens in circumstances where the type happens to have the
+   expected bit width, *but* the compiler does not know that... and complains
+   about 'unsigned long' being unsafe to pass to %llu.
+
+ */
+
+#ifdef __x86_64__
+typedef unsigned long long u64;
+#else
+typedef uint64_t u64;
+#endif /* ^__x86_64__ */
+
+typedef int8_t  s8;
+typedef int16_t s16;
+typedef int32_t s32;
+typedef int64_t s64;
+
+#ifndef MIN
+#  define MIN(_a, _b) ((_a) > (_b) ? (_b) : (_a))
+#  define MAX(_a, _b) ((_a) > (_b) ? (_a) : (_b))
+#endif /* !MIN */
+
+#define SWAP16(_x)                    \
+  ({                                  \
+                                      \
+    u16 _ret = (_x);                  \
+    (u16)((_ret << 8) | (_ret >> 8)); \
+                                      \
+  })
+
+#define SWAP32(_x)                                                   \
+  ({                                                                 \
+                                                                     \
+    u32 _ret = (_x);                                                 \
+    (u32)((_ret << 24) | (_ret >> 24) | ((_ret << 8) & 0x00FF0000) | \
+          ((_ret >> 8) & 0x0000FF00));                               \
+                                                                     \
+  })
+
+#ifdef AFL_LLVM_PASS
+#  define AFL_R(x) (random() % (x))
+#else
+#  define R(x) (random() % (x))
+#endif /* ^AFL_LLVM_PASS */
+
+#define STRINGIFY_INTERNAL(x) #x
+#define STRINGIFY(x) STRINGIFY_INTERNAL(x)
+
+#define MEM_BARRIER() __asm__ volatile("" ::: "memory")
+
+#if __GNUC__ < 6
+#  define likely(_x) (_x)
+#  define unlikely(_x) (_x)
+#else
+#  define likely(_x) __builtin_expect(!!(_x), 1)
+#  define unlikely(_x) __builtin_expect(!!(_x), 0)
+#endif
+
+#endif /* ! _HAVE_TYPES_H */
+
diff --git a/libdislocator/Makefile b/libdislocator/Makefile
index a4116780..236667ec 100644
--- a/libdislocator/Makefile
+++ b/libdislocator/Makefile
@@ -18,7 +18,7 @@ HELPER_PATH  = $(PREFIX)/lib/afl
 
 VERSION     = $(shell grep '^\#define VERSION ' ../config.h | cut -d '"' -f2)
 
-CFLAGS      ?= -O3 -funroll-loops
+CFLAGS      ?= -O3 -funroll-loops -I ../include/
 CFLAGS      += -Wall -D_FORTIFY_SOURCE=2 -g -Wno-pointer-sign
 
 all: libdislocator.so
diff --git a/libdislocator/libdislocator.so.c b/libdislocator/libdislocator.so.c
index 043480a6..5104fed4 100644
--- a/libdislocator/libdislocator.so.c
+++ b/libdislocator/libdislocator.so.c
@@ -25,8 +25,8 @@
 #include <limits.h>
 #include <sys/mman.h>
 
-#include "../config.h"
-#include "../types.h"
+#include "config.h"
+#include "types.h"
 
 #ifndef PAGE_SIZE
 #  define PAGE_SIZE 4096
@@ -38,23 +38,35 @@
 
 /* Error / message handling: */
 
-#define DEBUGF(_x...) do { \
-    if (alloc_verbose) { \
-      if (++call_depth == 1) { \
+#define DEBUGF(_x...)                 \
+  do {                                \
+                                      \
+    if (alloc_verbose) {              \
+                                      \
+      if (++call_depth == 1) {        \
+                                      \
         fprintf(stderr, "[AFL] " _x); \
-        fprintf(stderr, "\n"); \
-      } \
-      call_depth--; \
-    } \
+        fprintf(stderr, "\n");        \
+                                      \
+      }                               \
+      call_depth--;                   \
+                                      \
+    }                                 \
+                                      \
   } while (0)
 
-#define FATAL(_x...) do { \
-    if (++call_depth == 1) { \
+#define FATAL(_x...)                    \
+  do {                                  \
+                                        \
+    if (++call_depth == 1) {            \
+                                        \
       fprintf(stderr, "*** [AFL] " _x); \
-      fprintf(stderr, " ***\n"); \
-      abort(); \
-    } \
-    call_depth--; \
+      fprintf(stderr, " ***\n");        \
+      abort();                          \
+                                        \
+    }                                   \
+    call_depth--;                       \
+                                        \
   } while (0)
 
 /* Macro to count the number of pages needed to store a buffer: */
@@ -63,7 +75,7 @@
 
 /* Canary & clobber bytes: */
 
-#define ALLOC_CANARY  0xAACCAACC
+#define ALLOC_CANARY 0xAACCAACC
 #define ALLOC_CLOBBER 0xCC
 
 #define PTR_C(_p) (((u32*)(_p))[-1])
@@ -73,14 +85,13 @@
 
 static u32 max_mem = MAX_ALLOC;         /* Max heap usage to permit         */
 static u8  alloc_verbose,               /* Additional debug messages        */
-           hard_fail,                   /* abort() when max_mem exceeded?   */
-           no_calloc_over;              /* abort() on calloc() overflows?   */
+    hard_fail,                          /* abort() when max_mem exceeded?   */
+    no_calloc_over;                     /* abort() on calloc() overflows?   */
 
 static __thread size_t total_mem;       /* Currently allocated mem          */
 
 static __thread u32 call_depth;         /* To avoid recursion via fprintf() */
 
-
 /* This is the main alloc function. It allocates one page more than necessary,
    sets that tailing page to PROT_NONE, and then increments the return address
    so that it is right-aligned to that boundary. Since it always uses mmap(),
@@ -90,14 +101,11 @@ static void* __dislocator_alloc(size_t len) {
 
   void* ret;
 
-
   if (total_mem + len > max_mem || total_mem + len < total_mem) {
 
-    if (hard_fail)
-      FATAL("total allocs exceed %u MB", max_mem / 1024 / 1024);
+    if (hard_fail) FATAL("total allocs exceed %u MB", max_mem / 1024 / 1024);
 
-    DEBUGF("total allocs exceed %u MB, returning NULL",
-           max_mem / 1024 / 1024);
+    DEBUGF("total allocs exceed %u MB, returning NULL", max_mem / 1024 / 1024);
 
     return NULL;
 
@@ -142,7 +150,6 @@ static void* __dislocator_alloc(size_t len) {
 
 }
 
-
 /* The "user-facing" wrapper for calloc(). This just checks for overflows and
    displays debug messages if requested. */
 
@@ -157,8 +164,11 @@ void* calloc(size_t elem_len, size_t elem_cnt) {
   if (elem_cnt && len / elem_cnt != elem_len) {
 
     if (no_calloc_over) {
-      DEBUGF("calloc(%zu, %zu) would overflow, returning NULL", elem_len, elem_cnt);
+
+      DEBUGF("calloc(%zu, %zu) would overflow, returning NULL", elem_len,
+             elem_cnt);
       return NULL;
+
     }
 
     FATAL("calloc(%zu, %zu) would overflow", elem_len, elem_cnt);
@@ -174,7 +184,6 @@ void* calloc(size_t elem_len, size_t elem_cnt) {
 
 }
 
-
 /* The wrapper for malloc(). Roughly the same, also clobbers the returned
    memory (unlike calloc(), malloc() is not guaranteed to return zeroed
    memory). */
@@ -193,7 +202,6 @@ void* malloc(size_t len) {
 
 }
 
-
 /* The wrapper for free(). This simply marks the entire region as PROT_NONE.
    If the region is already freed, the code will segfault during the attempt to
    read the canary. Not very graceful, but works, right? */
@@ -224,7 +232,6 @@ void free(void* ptr) {
 
 }
 
-
 /* Realloc is pretty straightforward, too. We forcibly reallocate the buffer,
    move data, and then free (aka mprotect()) the original one. */
 
@@ -249,7 +256,6 @@ void* realloc(void* ptr, size_t len) {
 
 }
 
-
 __attribute__((constructor)) void __dislocator_init(void) {
 
   u8* tmp = getenv("AFL_LD_LIMIT_MB");
@@ -266,3 +272,4 @@ __attribute__((constructor)) void __dislocator_init(void) {
   no_calloc_over = !!getenv("AFL_LD_NO_CALLOC_OVER");
 
 }
+
diff --git a/libtokencap/Makefile b/libtokencap/Makefile
index a464f76d..ec4c8f95 100644
--- a/libtokencap/Makefile
+++ b/libtokencap/Makefile
@@ -18,7 +18,7 @@ HELPER_PATH  = $(PREFIX)/lib/afl
 
 VERSION     = $(shell grep '^\#define VERSION ' ../config.h | cut -d '"' -f2)
 
-CFLAGS      ?= -O3 -funroll-loops
+CFLAGS      ?= -O3 -funroll-loops -I ../include/
 CFLAGS      += -Wall -D_FORTIFY_SOURCE=2 -g -Wno-pointer-sign
 
 all: libtokencap.so
diff --git a/libtokencap/libtokencap.so.c b/libtokencap/libtokencap.so.c
index 54072279..fa26447e 100644
--- a/libtokencap/libtokencap.so.c
+++ b/libtokencap/libtokencap.so.c
@@ -30,27 +30,23 @@
 #  error "Sorry, this library is Linux-specific for now!"
 #endif /* !__linux__ */
 
-
 /* Mapping data and such */
 
 #define MAX_MAPPINGS 1024
 
-static struct mapping {
-  void *st, *en;
-} __tokencap_ro[MAX_MAPPINGS];
+static struct mapping { void *st, *en; } __tokencap_ro[MAX_MAPPINGS];
 
 static u32   __tokencap_ro_cnt;
 static u8    __tokencap_ro_loaded;
 static FILE* __tokencap_out_file;
 
-
 /* Identify read-only regions in memory. Only parameters that fall into these
    ranges are worth dumping when passed to strcmp() and so on. Read-write
    regions are far more likely to contain user input instead. */
 
 static void __tokencap_load_mappings(void) {
 
-  u8 buf[MAX_LINE];
+  u8    buf[MAX_LINE];
   FILE* f = fopen("/proc/self/maps", "r");
 
   __tokencap_ro_loaded = 1;
@@ -59,8 +55,8 @@ static void __tokencap_load_mappings(void) {
 
   while (fgets(buf, MAX_LINE, f)) {
 
-    u8 rf, wf;
-    void* st, *en;
+    u8    rf, wf;
+    void *st, *en;
 
     if (sscanf(buf, "%p-%p %c%c", &st, &en, &rf, &wf) != 4) continue;
     if (wf == 'w' || rf != 'r') continue;
@@ -76,7 +72,6 @@ static void __tokencap_load_mappings(void) {
 
 }
 
-
 /* Check an address against the list of read-only mappings. */
 
 static u8 __tokencap_is_ro(const void* ptr) {
@@ -85,20 +80,19 @@ static u8 __tokencap_is_ro(const void* ptr) {
 
   if (!__tokencap_ro_loaded) __tokencap_load_mappings();
 
-  for (i = 0; i < __tokencap_ro_cnt; i++) 
+  for (i = 0; i < __tokencap_ro_cnt; i++)
     if (ptr >= __tokencap_ro[i].st && ptr <= __tokencap_ro[i].en) return 1;
 
   return 0;
 
 }
 
-
 /* Dump an interesting token to output file, quoting and escaping it
    properly. */
 
 static void __tokencap_dump(const u8* ptr, size_t len, u8 is_text) {
 
-  u8 buf[MAX_AUTO_EXTRA * 4 + 1];
+  u8  buf[MAX_AUTO_EXTRA * 4 + 1];
   u32 i;
   u32 pos = 0;
 
@@ -120,9 +114,7 @@ static void __tokencap_dump(const u8* ptr, size_t len, u8 is_text) {
         pos += 4;
         break;
 
-      default:
-
-        buf[pos++] = ptr[i];
+      default: buf[pos++] = ptr[i];
 
     }
 
@@ -130,11 +122,10 @@ static void __tokencap_dump(const u8* ptr, size_t len, u8 is_text) {
 
   buf[pos] = 0;
 
-  fprintf(__tokencap_out_file, "\"%s\"\n", buf);    
+  fprintf(__tokencap_out_file, "\"%s\"\n", buf);
 
 }
 
-
 /* Replacements for strcmp(), memcmp(), and so on. Note that these will be used
    only if the target is compiled with -fno-builtins and linked dynamically. */
 
@@ -151,13 +142,13 @@ int strcmp(const char* str1, const char* str2) {
 
     if (c1 != c2) return (c1 > c2) ? 1 : -1;
     if (!c1) return 0;
-    str1++; str2++;
+    str1++;
+    str2++;
 
   }
 
 }
 
-
 #undef strncmp
 
 int strncmp(const char* str1, const char* str2, size_t len) {
@@ -171,7 +162,8 @@ int strncmp(const char* str1, const char* str2, size_t len) {
 
     if (!c1) return 0;
     if (c1 != c2) return (c1 > c2) ? 1 : -1;
-    str1++; str2++;
+    str1++;
+    str2++;
 
   }
 
@@ -179,7 +171,6 @@ int strncmp(const char* str1, const char* str2, size_t len) {
 
 }
 
-
 #undef strcasecmp
 
 int strcasecmp(const char* str1, const char* str2) {
@@ -193,13 +184,13 @@ int strcasecmp(const char* str1, const char* str2) {
 
     if (c1 != c2) return (c1 > c2) ? 1 : -1;
     if (!c1) return 0;
-    str1++; str2++;
+    str1++;
+    str2++;
 
   }
 
 }
 
-
 #undef strncasecmp
 
 int strncasecmp(const char* str1, const char* str2, size_t len) {
@@ -213,7 +204,8 @@ int strncasecmp(const char* str1, const char* str2, size_t len) {
 
     if (!c1) return 0;
     if (c1 != c2) return (c1 > c2) ? 1 : -1;
-    str1++; str2++;
+    str1++;
+    str2++;
 
   }
 
@@ -221,7 +213,6 @@ int strncasecmp(const char* str1, const char* str2, size_t len) {
 
 }
 
-
 #undef memcmp
 
 int memcmp(const void* mem1, const void* mem2, size_t len) {
@@ -233,7 +224,8 @@ int memcmp(const void* mem1, const void* mem2, size_t len) {
 
     unsigned char c1 = *(const char*)mem1, c2 = *(const char*)mem2;
     if (c1 != c2) return (c1 > c2) ? 1 : -1;
-    mem1++; mem2++;
+    mem1++;
+    mem2++;
 
   }
 
@@ -241,7 +233,6 @@ int memcmp(const void* mem1, const void* mem2, size_t len) {
 
 }
 
-
 #undef strstr
 
 char* strstr(const char* haystack, const char* needle) {
@@ -249,16 +240,17 @@ char* strstr(const char* haystack, const char* needle) {
   if (__tokencap_is_ro(haystack))
     __tokencap_dump(haystack, strlen(haystack), 1);
 
-  if (__tokencap_is_ro(needle))
-    __tokencap_dump(needle, strlen(needle), 1);
+  if (__tokencap_is_ro(needle)) __tokencap_dump(needle, strlen(needle), 1);
 
   do {
+
     const char* n = needle;
     const char* h = haystack;
 
-    while(*n && *h && *n == *h) n++, h++;
+    while (*n && *h && *n == *h)
+      n++, h++;
 
-    if(!*n) return (char*)haystack;
+    if (!*n) return (char*)haystack;
 
   } while (*(haystack++));
 
@@ -266,7 +258,6 @@ char* strstr(const char* haystack, const char* needle) {
 
 }
 
-
 #undef strcasestr
 
 char* strcasestr(const char* haystack, const char* needle) {
@@ -274,25 +265,24 @@ char* strcasestr(const char* haystack, const char* needle) {
   if (__tokencap_is_ro(haystack))
     __tokencap_dump(haystack, strlen(haystack), 1);
 
-  if (__tokencap_is_ro(needle))
-    __tokencap_dump(needle, strlen(needle), 1);
+  if (__tokencap_is_ro(needle)) __tokencap_dump(needle, strlen(needle), 1);
 
   do {
 
     const char* n = needle;
     const char* h = haystack;
 
-    while(*n && *h && tolower(*n) == tolower(*h)) n++, h++;
+    while (*n && *h && tolower(*n) == tolower(*h))
+      n++, h++;
 
-    if(!*n) return (char*)haystack;
+    if (!*n) return (char*)haystack;
 
-  } while(*(haystack++));
+  } while (*(haystack++));
 
   return 0;
 
 }
 
-
 /* Init code to open the output file (or default to stderr). */
 
 __attribute__((constructor)) void __tokencap_init(void) {
diff --git a/llvm_mode/LLVMInsTrim.so.cc b/llvm_mode/LLVMInsTrim.so.cc
index 0a15680d..4b5597e2 100644
--- a/llvm_mode/LLVMInsTrim.so.cc
+++ b/llvm_mode/LLVMInsTrim.so.cc
@@ -24,8 +24,8 @@
 #include <string>
 #include <fstream>
 
-#include "../config.h"
-#include "../debug.h"
+#include "config.h"
+#include "debug.h"
 
 #include "MarkNodes.h"
 
@@ -37,268 +37,349 @@ static cl::opt<bool> LoopHeadOpt("loophead", cl::desc("LoopHead"),
                                  cl::init(false));
 
 namespace {
-  struct InsTrim : public ModulePass {
 
-  protected:
-    std::list<std::string> myWhitelist;
+struct InsTrim : public ModulePass {
 
-  private:
-    std::mt19937 generator;
-    int total_instr = 0;
+ protected:
+  std::list<std::string> myWhitelist;
 
-    unsigned int genLabel() {
-      return generator() & (MAP_SIZE - 1);
-    }
+ private:
+  std::mt19937 generator;
+  int          total_instr = 0;
+
+  unsigned int genLabel() {
+
+    return generator() & (MAP_SIZE - 1);
+
+  }
+
+ public:
+  static char ID;
+  InsTrim() : ModulePass(ID), generator(0) {
 
-  public:
-    static char ID;
-    InsTrim() : ModulePass(ID), generator(0) {
-      char* instWhiteListFilename = getenv("AFL_LLVM_WHITELIST");
-      if (instWhiteListFilename) {
-        std::string line;
-        std::ifstream fileStream;
-        fileStream.open(instWhiteListFilename);
-        if (!fileStream)
-          report_fatal_error("Unable to open AFL_LLVM_WHITELIST");
+    char *instWhiteListFilename = getenv("AFL_LLVM_WHITELIST");
+    if (instWhiteListFilename) {
+
+      std::string   line;
+      std::ifstream fileStream;
+      fileStream.open(instWhiteListFilename);
+      if (!fileStream) report_fatal_error("Unable to open AFL_LLVM_WHITELIST");
+      getline(fileStream, line);
+      while (fileStream) {
+
+        myWhitelist.push_back(line);
         getline(fileStream, line);
-        while (fileStream) {
-          myWhitelist.push_back(line);
-          getline(fileStream, line);
-        }
+
       }
-    }
 
-    void getAnalysisUsage(AnalysisUsage &AU) const override {
-      AU.addRequired<DominatorTreeWrapperPass>();
     }
 
+  }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override {
+
+    AU.addRequired<DominatorTreeWrapperPass>();
+
+  }
+
 #if LLVM_VERSION_MAJOR < 4
-    const char *
+  const char *
 #else
-    StringRef
+  StringRef
 #endif
-              getPassName() const override {
-      return "InstTrim Instrumentation";
-    }
+  getPassName() const override {
+
+    return "InstTrim Instrumentation";
+
+  }
+
+  bool runOnModule(Module &M) override {
+
+    char be_quiet = 0;
+
+    if (isatty(2) && !getenv("AFL_QUIET")) {
+
+      SAYF(cCYA "LLVMInsTrim" VERSION cRST " by csienslab\n");
+
+    } else
+
+      be_quiet = 1;
 
-    bool runOnModule(Module &M) override {
-      char be_quiet = 0;
-      
-      if (isatty(2) && !getenv("AFL_QUIET")) {
-        SAYF(cCYA "LLVMInsTrim" VERSION cRST " by csienslab\n");
-      } else be_quiet = 1;
-    
 #if LLVM_VERSION_MAJOR < 9
-      char* neverZero_counters_str;
-      if ((neverZero_counters_str = getenv("AFL_LLVM_NOT_ZERO")) != NULL)
-        OKF("LLVM neverZero activated (by hexcoder)\n");
+    char *neverZero_counters_str;
+    if ((neverZero_counters_str = getenv("AFL_LLVM_NOT_ZERO")) != NULL)
+      OKF("LLVM neverZero activated (by hexcoder)\n");
 #endif
-    
-      if (getenv("AFL_LLVM_INSTRIM_LOOPHEAD") != NULL || getenv("LOOPHEAD") != NULL) {
-        LoopHeadOpt = true;
-      }
 
-      // this is our default
-      MarkSetOpt = true;
-      
-/*    // I dont think this makes sense to port into LLVMInsTrim
-      char* inst_ratio_str = getenv("AFL_INST_RATIO");
-      unsigned int inst_ratio = 100;
-      if (inst_ratio_str) {
-       if (sscanf(inst_ratio_str, "%u", &inst_ratio) != 1 || !inst_ratio || inst_ratio > 100)
-         FATAL("Bad value of AFL_INST_RATIO (must be between 1 and 100)");
-      }
-*/
+    if (getenv("AFL_LLVM_INSTRIM_LOOPHEAD") != NULL ||
+        getenv("LOOPHEAD") != NULL) {
+
+      LoopHeadOpt = true;
+
+    }
+
+    // this is our default
+    MarkSetOpt = true;
+
+    /*    // I dont think this makes sense to port into LLVMInsTrim
+          char* inst_ratio_str = getenv("AFL_INST_RATIO");
+          unsigned int inst_ratio = 100;
+          if (inst_ratio_str) {
+
+           if (sscanf(inst_ratio_str, "%u", &inst_ratio) != 1 || !inst_ratio ||
+       inst_ratio > 100) FATAL("Bad value of AFL_INST_RATIO (must be between 1
+       and 100)");
+
+          }
+
+    */
 
-      LLVMContext &C = M.getContext();
-      IntegerType *Int8Ty  = IntegerType::getInt8Ty(C);
-      IntegerType *Int32Ty = IntegerType::getInt32Ty(C);
+    LLVMContext &C = M.getContext();
+    IntegerType *Int8Ty = IntegerType::getInt8Ty(C);
+    IntegerType *Int32Ty = IntegerType::getInt32Ty(C);
 
-      GlobalVariable *CovMapPtr = new GlobalVariable(
+    GlobalVariable *CovMapPtr = new GlobalVariable(
         M, PointerType::getUnqual(Int8Ty), false, GlobalValue::ExternalLinkage,
         nullptr, "__afl_area_ptr");
 
-      GlobalVariable *OldPrev = new GlobalVariable(
-        M, Int32Ty, false, GlobalValue::ExternalLinkage, 0, "__afl_prev_loc",
-        0, GlobalVariable::GeneralDynamicTLSModel, 0, false);
+    GlobalVariable *OldPrev = new GlobalVariable(
+        M, Int32Ty, false, GlobalValue::ExternalLinkage, 0, "__afl_prev_loc", 0,
+        GlobalVariable::GeneralDynamicTLSModel, 0, false);
 
-      u64 total_rs = 0;
-      u64 total_hs = 0;
+    u64 total_rs = 0;
+    u64 total_hs = 0;
+
+    for (Function &F : M) {
+
+      if (!F.size()) { continue; }
+
+      if (!myWhitelist.empty()) {
+
+        bool      instrumentBlock = false;
+        DebugLoc  Loc;
+        StringRef instFilename;
+
+        for (auto &BB : F) {
+
+          BasicBlock::iterator IP = BB.getFirstInsertionPt();
+          IRBuilder<>          IRB(&(*IP));
+          if (!Loc) Loc = IP->getDebugLoc();
 
-      for (Function &F : M) {
-        if (!F.size()) {
-          continue;
         }
 
-        if (!myWhitelist.empty()) {
-          bool instrumentBlock = false;
-          DebugLoc Loc;
-          StringRef instFilename;
+        if (Loc) {
+
+          DILocation *cDILoc = dyn_cast<DILocation>(Loc.getAsMDNode());
+
+          unsigned int instLine = cDILoc->getLine();
+          instFilename = cDILoc->getFilename();
+
+          if (instFilename.str().empty()) {
+
+            /* If the original location is empty, try using the inlined location
+             */
+            DILocation *oDILoc = cDILoc->getInlinedAt();
+            if (oDILoc) {
+
+              instFilename = oDILoc->getFilename();
+              instLine = oDILoc->getLine();
+
+            }
 
-          for (auto &BB : F) {
-            BasicBlock::iterator IP = BB.getFirstInsertionPt();
-            IRBuilder<> IRB(&(*IP));
-            if (!Loc)
-              Loc = IP->getDebugLoc();
           }
 
-          if ( Loc ) {
-              DILocation *cDILoc = dyn_cast<DILocation>(Loc.getAsMDNode());
+          /* Continue only if we know where we actually are */
+          if (!instFilename.str().empty()) {
 
-              unsigned int instLine = cDILoc->getLine();
-              instFilename = cDILoc->getFilename();
+            for (std::list<std::string>::iterator it = myWhitelist.begin();
+                 it != myWhitelist.end(); ++it) {
 
-              if (instFilename.str().empty()) {
-                  /* If the original location is empty, try using the inlined location */
-                  DILocation *oDILoc = cDILoc->getInlinedAt();
-                  if (oDILoc) {
-                      instFilename = oDILoc->getFilename();
-                      instLine = oDILoc->getLine();
-                  }
-              }
+              if (instFilename.str().length() >= it->length()) {
+
+                if (instFilename.str().compare(
+                        instFilename.str().length() - it->length(),
+                        it->length(), *it) == 0) {
+
+                  instrumentBlock = true;
+                  break;
+
+                }
 
-              /* Continue only if we know where we actually are */
-              if (!instFilename.str().empty()) {
-                  for (std::list<std::string>::iterator it = myWhitelist.begin(); it != myWhitelist.end(); ++it) {
-                      if (instFilename.str().length() >= it->length()) {
-                          if (instFilename.str().compare(instFilename.str().length() - it->length(), it->length(), *it) == 0) {
-                              instrumentBlock = true;
-                              break;
-                          }
-                      }
-                  }
               }
-          }
 
-          /* Either we couldn't figure out our location or the location is
-           * not whitelisted, so we skip instrumentation. */
-          if (!instrumentBlock) {
-            if (!instFilename.str().empty())
-              SAYF(cYEL "[!] " cBRI "Not in whitelist, skipping %s ...\n", instFilename.str().c_str());
-            else
-              SAYF(cYEL "[!] " cBRI "No filename information found, skipping it");
-            continue;
+            }
+
           }
+
         }
 
-        std::unordered_set<BasicBlock *> MS;
-        if (!MarkSetOpt) {
-          for (auto &BB : F) {
-            MS.insert(&BB);
-          }
-          total_rs += F.size();
+        /* Either we couldn't figure out our location or the location is
+         * not whitelisted, so we skip instrumentation. */
+        if (!instrumentBlock) {
+
+          if (!instFilename.str().empty())
+            SAYF(cYEL "[!] " cBRI "Not in whitelist, skipping %s ...\n",
+                 instFilename.str().c_str());
+          else
+            SAYF(cYEL "[!] " cBRI "No filename information found, skipping it");
+          continue;
+
+        }
+
+      }
+
+      std::unordered_set<BasicBlock *> MS;
+      if (!MarkSetOpt) {
+
+        for (auto &BB : F) {
+
+          MS.insert(&BB);
+
+        }
+
+        total_rs += F.size();
+
+      } else {
+
+        auto Result = markNodes(&F);
+        auto RS = Result.first;
+        auto HS = Result.second;
+
+        MS.insert(RS.begin(), RS.end());
+        if (!LoopHeadOpt) {
+
+          MS.insert(HS.begin(), HS.end());
+          total_rs += MS.size();
+
         } else {
-          auto Result = markNodes(&F);
-          auto RS = Result.first;
-          auto HS = Result.second;
-
-          MS.insert(RS.begin(), RS.end());
-          if (!LoopHeadOpt) {
-            MS.insert(HS.begin(), HS.end());
-            total_rs += MS.size();
-          } else {
-            DenseSet<std::pair<BasicBlock *, BasicBlock *>> EdgeSet;
-            DominatorTreeWrapperPass *DTWP = &getAnalysis<DominatorTreeWrapperPass>(F);
-            auto DT = &DTWP->getDomTree();
-
-            total_rs += RS.size();
-            total_hs += HS.size();
-
-            for (BasicBlock *BB : HS) {
-              bool Inserted = false;
-              for (auto BI = pred_begin(BB), BE = pred_end(BB);
-                   BI != BE; ++BI
-              ) {
-                auto Edge = BasicBlockEdge(*BI, BB);
-                if (Edge.isSingleEdge() && DT->dominates(Edge, BB)) {
-                  EdgeSet.insert({*BI, BB});
-                  Inserted = true;
-                  break;
-                }
-              }
-              if (!Inserted) {
-                MS.insert(BB);
-                total_rs += 1;
-                total_hs -= 1;
+
+          DenseSet<std::pair<BasicBlock *, BasicBlock *>> EdgeSet;
+          DominatorTreeWrapperPass *                      DTWP =
+              &getAnalysis<DominatorTreeWrapperPass>(F);
+          auto DT = &DTWP->getDomTree();
+
+          total_rs += RS.size();
+          total_hs += HS.size();
+
+          for (BasicBlock *BB : HS) {
+
+            bool Inserted = false;
+            for (auto BI = pred_begin(BB), BE = pred_end(BB); BI != BE; ++BI) {
+
+              auto Edge = BasicBlockEdge(*BI, BB);
+              if (Edge.isSingleEdge() && DT->dominates(Edge, BB)) {
+
+                EdgeSet.insert({*BI, BB});
+                Inserted = true;
+                break;
+
               }
+
             }
-            for (auto I = EdgeSet.begin(), E = EdgeSet.end(); I != E; ++I) {
-              auto PredBB = I->first;
-              auto SuccBB = I->second;
-              auto NewBB = SplitBlockPredecessors(SuccBB, {PredBB}, ".split",
-                                                  DT, nullptr,
-#if LLVM_VERSION_MAJOR >= 8
-                                                  nullptr,
-#endif
-                                                  false);
-              MS.insert(NewBB);
+
+            if (!Inserted) {
+
+              MS.insert(BB);
+              total_rs += 1;
+              total_hs -= 1;
+
             }
-          }
 
-          auto *EBB = &F.getEntryBlock();
-          if (succ_begin(EBB) == succ_end(EBB)) {
-            MS.insert(EBB);
-            total_rs += 1;
           }
 
-          for (BasicBlock &BB : F) {
-            if (MS.find(&BB) == MS.end()) {
-              continue;
-            }
-            IRBuilder<> IRB(&*BB.getFirstInsertionPt());
-            IRB.CreateStore(ConstantInt::get(Int32Ty, genLabel()), OldPrev);
+          for (auto I = EdgeSet.begin(), E = EdgeSet.end(); I != E; ++I) {
+
+            auto PredBB = I->first;
+            auto SuccBB = I->second;
+            auto NewBB =
+                SplitBlockPredecessors(SuccBB, {PredBB}, ".split", DT, nullptr,
+#if LLVM_VERSION_MAJOR >= 8
+                                       nullptr,
+#endif
+                                       false);
+            MS.insert(NewBB);
+
           }
+
+        }
+
+        auto *EBB = &F.getEntryBlock();
+        if (succ_begin(EBB) == succ_end(EBB)) {
+
+          MS.insert(EBB);
+          total_rs += 1;
+
         }
 
         for (BasicBlock &BB : F) {
-          auto PI = pred_begin(&BB);
-          auto PE = pred_end(&BB);
-          if (MarkSetOpt && MS.find(&BB) == MS.end()) {
-            continue;
-          }
 
+          if (MS.find(&BB) == MS.end()) { continue; }
           IRBuilder<> IRB(&*BB.getFirstInsertionPt());
-          Value *L = NULL;
-          if (PI == PE) {
-            L = ConstantInt::get(Int32Ty, genLabel());
-          } else {
-            auto *PN = PHINode::Create(Int32Ty, 0, "", &*BB.begin());
-            DenseMap<BasicBlock *, unsigned> PredMap;
-            for (auto PI = pred_begin(&BB), PE = pred_end(&BB);
-                 PI != PE; ++PI
-            ) {
-              BasicBlock *PBB = *PI;
-              auto It = PredMap.insert({PBB, genLabel()});
-              unsigned Label = It.first->second;
-              PN->addIncoming(ConstantInt::get(Int32Ty, Label), PBB);
-            }
-            L = PN;
+          IRB.CreateStore(ConstantInt::get(Int32Ty, genLabel()), OldPrev);
+
+        }
+
+      }
+
+      for (BasicBlock &BB : F) {
+
+        auto PI = pred_begin(&BB);
+        auto PE = pred_end(&BB);
+        if (MarkSetOpt && MS.find(&BB) == MS.end()) { continue; }
+
+        IRBuilder<> IRB(&*BB.getFirstInsertionPt());
+        Value *     L = NULL;
+        if (PI == PE) {
+
+          L = ConstantInt::get(Int32Ty, genLabel());
+
+        } else {
+
+          auto *PN = PHINode::Create(Int32Ty, 0, "", &*BB.begin());
+          DenseMap<BasicBlock *, unsigned> PredMap;
+          for (auto PI = pred_begin(&BB), PE = pred_end(&BB); PI != PE; ++PI) {
+
+            BasicBlock *PBB = *PI;
+            auto        It = PredMap.insert({PBB, genLabel()});
+            unsigned    Label = It.first->second;
+            PN->addIncoming(ConstantInt::get(Int32Ty, Label), PBB);
+
           }
 
-          /* Load prev_loc */
-          LoadInst *PrevLoc = IRB.CreateLoad(OldPrev);
-          PrevLoc->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, None));
-          Value *PrevLocCasted = IRB.CreateZExt(PrevLoc, IRB.getInt32Ty());
+          L = PN;
+
+        }
+
+        /* Load prev_loc */
+        LoadInst *PrevLoc = IRB.CreateLoad(OldPrev);
+        PrevLoc->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, None));
+        Value *PrevLocCasted = IRB.CreateZExt(PrevLoc, IRB.getInt32Ty());
+
+        /* Load SHM pointer */
+        LoadInst *MapPtr = IRB.CreateLoad(CovMapPtr);
+        MapPtr->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, None));
+        Value *MapPtrIdx =
+            IRB.CreateGEP(MapPtr, IRB.CreateXor(PrevLocCasted, L));
 
-          /* Load SHM pointer */
-          LoadInst *MapPtr = IRB.CreateLoad(CovMapPtr);
-          MapPtr->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, None));
-          Value *MapPtrIdx = IRB.CreateGEP(MapPtr, IRB.CreateXor(PrevLocCasted, L));
+        /* Update bitmap */
+        LoadInst *Counter = IRB.CreateLoad(MapPtrIdx);
+        Counter->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, None));
 
-          /* Update bitmap */
-          LoadInst *Counter = IRB.CreateLoad(MapPtrIdx);
-          Counter->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, None));
-          
-          Value *Incr = IRB.CreateAdd(Counter, ConstantInt::get(Int8Ty, 1));
+        Value *Incr = IRB.CreateAdd(Counter, ConstantInt::get(Int8Ty, 1));
 
 #if LLVM_VERSION_MAJOR < 9
-          if (neverZero_counters_str != NULL) // with llvm 9 we make this the default as the bug in llvm is then fixed
+        if (neverZero_counters_str !=
+            NULL)  // with llvm 9 we make this the default as the bug in llvm is
+                   // then fixed
 #else
-          if (1) // with llvm 9 we make this the default as the bug in llvm is then fixed
+        if (1)  // with llvm 9 we make this the default as the bug in llvm is
+                // then fixed
 #endif
-          {
+        {
+
           /* hexcoder: Realize a counter that skips zero during overflow.
-           * Once this counter reaches its maximum value, it next increments to 1
+           * Once this counter reaches its maximum value, it next increments to
+           * 1
            *
            * Instead of
            * Counter + 1 -> Counter
@@ -306,38 +387,52 @@ namespace {
            * Counter + 1 -> {Counter, OverflowFlag}
            * Counter + OverflowFlag -> Counter
            */
-            auto cf = IRB.CreateICmpEQ(Incr, ConstantInt::get(Int8Ty, 0));
-            auto carry = IRB.CreateZExt(cf, Int8Ty);
-            Incr = IRB.CreateAdd(Incr, carry);
-          }
-   
-          IRB.CreateStore(Incr, MapPtrIdx)->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, None));
-   
-          /* Set prev_loc to cur_loc >> 1 */
-          /*
-          StoreInst *Store = IRB.CreateStore(ConstantInt::get(Int32Ty, L >> 1), OldPrev);
-          Store->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, None));
-          */
-
-          total_instr++;
+          auto cf = IRB.CreateICmpEQ(Incr, ConstantInt::get(Int8Ty, 0));
+          auto carry = IRB.CreateZExt(cf, Int8Ty);
+          Incr = IRB.CreateAdd(Incr, carry);
+
         }
+
+        IRB.CreateStore(Incr, MapPtrIdx)
+            ->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, None));
+
+        /* Set prev_loc to cur_loc >> 1 */
+        /*
+        StoreInst *Store = IRB.CreateStore(ConstantInt::get(Int32Ty, L >> 1),
+        OldPrev); Store->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C,
+        None));
+        */
+
+        total_instr++;
+
       }
 
-      OKF("Instrumented %u locations (%llu, %llu) (%s mode)\n"/*", ratio %u%%)."*/,
-          total_instr, total_rs, total_hs,
-          getenv("AFL_HARDEN") ? "hardened" :
-          ((getenv("AFL_USE_ASAN") || getenv("AFL_USE_MSAN")) ?
-          "ASAN/MSAN" : "non-hardened")/*, inst_ratio*/);
-      return false;
     }
-  }; // end of struct InsTrim
+
+    OKF("Instrumented %u locations (%llu, %llu) (%s mode)\n" /*", ratio
+                                                                %u%%)."*/
+        ,
+        total_instr, total_rs, total_hs,
+        getenv("AFL_HARDEN")
+            ? "hardened"
+            : ((getenv("AFL_USE_ASAN") || getenv("AFL_USE_MSAN"))
+                   ? "ASAN/MSAN"
+                   : "non-hardened") /*, inst_ratio*/);
+    return false;
+
+  }
+
+};  // end of struct InsTrim
+
 }  // end of anonymous namespace
 
 char InsTrim::ID = 0;
 
 static void registerAFLPass(const PassManagerBuilder &,
                             legacy::PassManagerBase &PM) {
+
   PM.add(new InsTrim());
+
 }
 
 static RegisterStandardPasses RegisterAFLPass(
@@ -345,3 +440,4 @@ static RegisterStandardPasses RegisterAFLPass(
 
 static RegisterStandardPasses RegisterAFLPass0(
     PassManagerBuilder::EP_EnabledOnOptLevel0, registerAFLPass);
+
diff --git a/llvm_mode/Makefile b/llvm_mode/Makefile
index 7f0c8c5d..88e9d579 100644
--- a/llvm_mode/Makefile
+++ b/llvm_mode/Makefile
@@ -49,7 +49,7 @@ ifeq "$(LLVM_MAJOR)" "9"
 endif
 
 CFLAGS      ?= -O3 -funroll-loops
-CFLAGS      += -Wall -D_FORTIFY_SOURCE=2 -g -Wno-pointer-sign \
+CFLAGS      += -Wall -D_FORTIFY_SOURCE=2 -g -Wno-pointer-sign -I ../include/ \
                -DAFL_PATH=\"$(HELPER_PATH)\" -DBIN_PATH=\"$(BIN_PATH)\" \
                -DVERSION=\"$(VERSION)\"
 ifdef AFL_TRACE_PC
@@ -57,7 +57,7 @@ ifdef AFL_TRACE_PC
 endif
 
 CXXFLAGS    ?= -O3 -funroll-loops
-CXXFLAGS    += -Wall -D_FORTIFY_SOURCE=2 -g \
+CXXFLAGS    += -Wall -D_FORTIFY_SOURCE=2 -g -I ../include/ \
                -DVERSION=\"$(VERSION)\" -Wno-variadic-macros
 
 CLANG_CFL    = `$(LLVM_CONFIG) --cxxflags` -Wl,-znodelete -fno-rtti -fpic $(CXXFLAGS)
diff --git a/llvm_mode/MarkNodes.cc b/llvm_mode/MarkNodes.cc
index 348dc264..2aeeda8d 100644
--- a/llvm_mode/MarkNodes.cc
+++ b/llvm_mode/MarkNodes.cc
@@ -19,207 +19,267 @@
 
 using namespace llvm;
 
-DenseMap<BasicBlock *, uint32_t> LMap;
-std::vector<BasicBlock *> Blocks;
-std::set<uint32_t> Marked , Markabove;
-std::vector< std::vector<uint32_t> > Succs , Preds;
+DenseMap<BasicBlock *, uint32_t>    LMap;
+std::vector<BasicBlock *>           Blocks;
+std::set<uint32_t>                  Marked, Markabove;
+std::vector<std::vector<uint32_t> > Succs, Preds;
+
+void reset() {
 
-void reset(){
   LMap.clear();
   Blocks.clear();
   Marked.clear();
   Markabove.clear();
+
 }
 
 uint32_t start_point;
 
 void labelEachBlock(Function *F) {
+
   // Fake single endpoint;
   LMap[NULL] = Blocks.size();
   Blocks.push_back(NULL);
- 
+
   // Assign the unique LabelID to each block;
   for (auto I = F->begin(), E = F->end(); I != E; ++I) {
+
     BasicBlock *BB = &*I;
     LMap[BB] = Blocks.size();
     Blocks.push_back(BB);
+
   }
-  
+
   start_point = LMap[&F->getEntryBlock()];
+
 }
 
 void buildCFG(Function *F) {
-  Succs.resize( Blocks.size() );
-  Preds.resize( Blocks.size() );
-  for( size_t i = 0 ; i < Succs.size() ; i ++ ){
-    Succs[ i ].clear();
-    Preds[ i ].clear();
+
+  Succs.resize(Blocks.size());
+  Preds.resize(Blocks.size());
+  for (size_t i = 0; i < Succs.size(); i++) {
+
+    Succs[i].clear();
+    Preds[i].clear();
+
   }
 
-  //uint32_t FakeID = 0;
+  // uint32_t FakeID = 0;
   for (auto S = F->begin(), E = F->end(); S != E; ++S) {
+
     BasicBlock *BB = &*S;
-    uint32_t MyID = LMap[BB];
-    //if (succ_begin(BB) == succ_end(BB)) {
-      //Succs[MyID].push_back(FakeID);
-      //Marked.insert(MyID);
+    uint32_t    MyID = LMap[BB];
+    // if (succ_begin(BB) == succ_end(BB)) {
+
+    // Succs[MyID].push_back(FakeID);
+    // Marked.insert(MyID);
     //}
     for (auto I = succ_begin(BB), E = succ_end(BB); I != E; ++I) {
+
       Succs[MyID].push_back(LMap[*I]);
+
     }
+
   }
+
 }
 
-std::vector< std::vector<uint32_t> > tSuccs;
-std::vector<bool> tag , indfs;
+std::vector<std::vector<uint32_t> > tSuccs;
+std::vector<bool>                   tag, indfs;
 
 void DFStree(size_t now_id) {
-  if(tag[now_id]) return;
-  tag[now_id]=true;
-  indfs[now_id]=true;
-  for (auto succ: tSuccs[now_id]) {
-    if(tag[succ] and indfs[succ]) {
+
+  if (tag[now_id]) return;
+  tag[now_id] = true;
+  indfs[now_id] = true;
+  for (auto succ : tSuccs[now_id]) {
+
+    if (tag[succ] and indfs[succ]) {
+
       Marked.insert(succ);
       Markabove.insert(succ);
       continue;
+
     }
+
     Succs[now_id].push_back(succ);
     Preds[succ].push_back(now_id);
     DFStree(succ);
+
   }
-  indfs[now_id]=false;
+
+  indfs[now_id] = false;
+
 }
+
 void turnCFGintoDAG(Function *F) {
+
   tSuccs = Succs;
   tag.resize(Blocks.size());
   indfs.resize(Blocks.size());
-  for (size_t i = 0; i < Blocks.size(); ++ i) {
+  for (size_t i = 0; i < Blocks.size(); ++i) {
+
     Succs[i].clear();
-    tag[i]=false;
-    indfs[i]=false;
+    tag[i] = false;
+    indfs[i] = false;
+
   }
+
   DFStree(start_point);
-  for (size_t i = 0; i < Blocks.size(); ++ i) 
-    if( Succs[i].empty() ){
+  for (size_t i = 0; i < Blocks.size(); ++i)
+    if (Succs[i].empty()) {
+
       Succs[i].push_back(0);
       Preds[0].push_back(i);
+
     }
+
 }
 
 uint32_t timeStamp;
-namespace DominatorTree{
-  std::vector< std::vector<uint32_t> > cov;
-  std::vector<uint32_t> dfn, nfd, par, sdom, idom, mom, mn;
+namespace DominatorTree {
+
+std::vector<std::vector<uint32_t> > cov;
+std::vector<uint32_t>               dfn, nfd, par, sdom, idom, mom, mn;
+
+bool Compare(uint32_t u, uint32_t v) {
+
+  return dfn[u] < dfn[v];
+
+}
+
+uint32_t eval(uint32_t u) {
+
+  if (mom[u] == u) return u;
+  uint32_t res = eval(mom[u]);
+  if (Compare(sdom[mn[mom[u]]], sdom[mn[u]])) { mn[u] = mn[mom[u]]; }
+  return mom[u] = res;
+
+}
+
+void DFS(uint32_t now) {
+
+  timeStamp += 1;
+  dfn[now] = timeStamp;
+  nfd[timeStamp - 1] = now;
+  for (auto succ : Succs[now]) {
+
+    if (dfn[succ] == 0) {
+
+      par[succ] = now;
+      DFS(succ);
 
-  bool Compare(uint32_t u, uint32_t v) {
-    return dfn[u] < dfn[v];
-  }
-  uint32_t eval(uint32_t u) {
-    if( mom[u] == u ) return u;
-    uint32_t res = eval( mom[u] );
-    if(Compare(sdom[mn[mom[u]]] , sdom[mn[u]])) {
-      mn[u] = mn[mom[u]];
     }
-    return mom[u] = res;
+
   }
 
-  void DFS(uint32_t now) {
-    timeStamp += 1;
-    dfn[now] = timeStamp;
-    nfd[timeStamp - 1] = now;
-    for( auto succ : Succs[now] ) {
-      if( dfn[succ] == 0 ) {
-        par[succ] = now;
-        DFS(succ);
-      }
-    }
+}
+
+void DominatorTree(Function *F) {
+
+  if (Blocks.empty()) return;
+  uint32_t s = start_point;
+
+  // Initialization
+  mn.resize(Blocks.size());
+  cov.resize(Blocks.size());
+  dfn.resize(Blocks.size());
+  nfd.resize(Blocks.size());
+  par.resize(Blocks.size());
+  mom.resize(Blocks.size());
+  sdom.resize(Blocks.size());
+  idom.resize(Blocks.size());
+
+  for (uint32_t i = 0; i < Blocks.size(); i++) {
+
+    dfn[i] = 0;
+    nfd[i] = Blocks.size();
+    cov[i].clear();
+    idom[i] = mom[i] = mn[i] = sdom[i] = i;
+
   }
 
-  void DominatorTree(Function *F) {
-    if( Blocks.empty() ) return;
-    uint32_t s = start_point;
-
-    // Initialization
-    mn.resize(Blocks.size());
-    cov.resize(Blocks.size());
-    dfn.resize(Blocks.size());
-    nfd.resize(Blocks.size());
-    par.resize(Blocks.size());
-    mom.resize(Blocks.size());
-    sdom.resize(Blocks.size());
-    idom.resize(Blocks.size());
-
-    for( uint32_t i = 0 ; i < Blocks.size() ; i ++ ) {
-      dfn[i] = 0;
-      nfd[i] = Blocks.size();
-      cov[i].clear();
-      idom[i] = mom[i] = mn[i] = sdom[i] = i;
-    }
+  timeStamp = 0;
+  DFS(s);
 
-    timeStamp = 0;
-    DFS(s);
+  for (uint32_t i = Blocks.size() - 1; i >= 1u; i--) {
+
+    uint32_t now = nfd[i];
+    if (now == Blocks.size()) { continue; }
+    for (uint32_t pre : Preds[now]) {
+
+      if (dfn[pre]) {
+
+        eval(pre);
+        if (Compare(sdom[mn[pre]], sdom[now])) { sdom[now] = sdom[mn[pre]]; }
 
-    for( uint32_t i = Blocks.size() - 1 ; i >= 1u ; i -- ) {
-      uint32_t now = nfd[i];
-      if( now == Blocks.size() ) {
-        continue;
-      }
-      for( uint32_t pre : Preds[ now ] ) {
-        if( dfn[ pre ] ) {
-          eval(pre);
-          if( Compare(sdom[mn[pre]], sdom[now]) ) {
-            sdom[now] = sdom[mn[pre]];
-          }
-        }
-      }
-      cov[sdom[now]].push_back(now);
-      mom[now] = par[now];
-      for( uint32_t x : cov[par[now]] ) {
-        eval(x);
-        if( Compare(sdom[mn[x]], par[now]) ) {
-          idom[x] = mn[x];
-        } else {
-          idom[x] = par[now];
-        }
       }
+
     }
 
-    for( uint32_t i = 1 ; i < Blocks.size() ; i += 1 ) {
-      uint32_t now = nfd[i];
-      if( now == Blocks.size() ) {
-        continue;
+    cov[sdom[now]].push_back(now);
+    mom[now] = par[now];
+    for (uint32_t x : cov[par[now]]) {
+
+      eval(x);
+      if (Compare(sdom[mn[x]], par[now])) {
+
+        idom[x] = mn[x];
+
+      } else {
+
+        idom[x] = par[now];
+
       }
-      if(idom[now] != sdom[now])
-        idom[now] = idom[idom[now]];
+
     }
+
   }
-} // End of DominatorTree
 
-std::vector<uint32_t> Visited, InStack;
-std::vector<uint32_t> TopoOrder, InDeg;
-std::vector< std::vector<uint32_t> > t_Succ , t_Pred;
+  for (uint32_t i = 1; i < Blocks.size(); i += 1) {
+
+    uint32_t now = nfd[i];
+    if (now == Blocks.size()) { continue; }
+    if (idom[now] != sdom[now]) idom[now] = idom[idom[now]];
+
+  }
+
+}
+
+}  // namespace DominatorTree
+
+std::vector<uint32_t>               Visited, InStack;
+std::vector<uint32_t>               TopoOrder, InDeg;
+std::vector<std::vector<uint32_t> > t_Succ, t_Pred;
 
 void Go(uint32_t now, uint32_t tt) {
-  if( now == tt ) return;
+
+  if (now == tt) return;
   Visited[now] = InStack[now] = timeStamp;
 
-  for(uint32_t nxt : Succs[now]) {
-    if(Visited[nxt] == timeStamp and InStack[nxt] == timeStamp) {
+  for (uint32_t nxt : Succs[now]) {
+
+    if (Visited[nxt] == timeStamp and InStack[nxt] == timeStamp) {
+
       Marked.insert(nxt);
+
     }
+
     t_Succ[now].push_back(nxt);
     t_Pred[nxt].push_back(now);
     InDeg[nxt] += 1;
-    if(Visited[nxt] == timeStamp) {
-      continue;
-    }
+    if (Visited[nxt] == timeStamp) { continue; }
     Go(nxt, tt);
+
   }
 
   InStack[now] = 0;
+
 }
 
 void TopologicalSort(uint32_t ss, uint32_t tt) {
+
   timeStamp += 1;
 
   Go(ss, tt);
@@ -227,76 +287,111 @@ void TopologicalSort(uint32_t ss, uint32_t tt) {
   TopoOrder.clear();
   std::queue<uint32_t> wait;
   wait.push(ss);
-  while( not wait.empty() ) {
-    uint32_t now = wait.front(); wait.pop();
+  while (not wait.empty()) {
+
+    uint32_t now = wait.front();
+    wait.pop();
     TopoOrder.push_back(now);
-    for(uint32_t nxt : t_Succ[now]) {
+    for (uint32_t nxt : t_Succ[now]) {
+
       InDeg[nxt] -= 1;
-      if(InDeg[nxt] == 0u) {
-        wait.push(nxt);
-      }
+      if (InDeg[nxt] == 0u) { wait.push(nxt); }
+
     }
+
   }
+
 }
 
-std::vector< std::set<uint32_t> > NextMarked;
-bool Indistinguish(uint32_t node1, uint32_t node2) {
-  if(NextMarked[node1].size() > NextMarked[node2].size()){
+std::vector<std::set<uint32_t> > NextMarked;
+bool                             Indistinguish(uint32_t node1, uint32_t node2) {
+
+  if (NextMarked[node1].size() > NextMarked[node2].size()) {
+
     uint32_t _swap = node1;
     node1 = node2;
     node2 = _swap;
+
   }
-  for(uint32_t x : NextMarked[node1]) {
-    if( NextMarked[node2].find(x) != NextMarked[node2].end() ) {
-      return true;
-    }
+
+  for (uint32_t x : NextMarked[node1]) {
+
+    if (NextMarked[node2].find(x) != NextMarked[node2].end()) { return true; }
+
   }
+
   return false;
+
 }
 
 void MakeUniq(uint32_t now) {
+
   bool StopFlag = false;
   if (Marked.find(now) == Marked.end()) {
-    for(uint32_t pred1 : t_Pred[now]) {
-      for(uint32_t pred2 : t_Pred[now]) {
-        if(pred1 == pred2) continue;
-        if(Indistinguish(pred1, pred2)) {
+
+    for (uint32_t pred1 : t_Pred[now]) {
+
+      for (uint32_t pred2 : t_Pred[now]) {
+
+        if (pred1 == pred2) continue;
+        if (Indistinguish(pred1, pred2)) {
+
           Marked.insert(now);
           StopFlag = true;
           break;
+
         }
+
       }
-      if (StopFlag) {
-        break;
-      }
+
+      if (StopFlag) { break; }
+
     }
+
   }
-  if(Marked.find(now) != Marked.end()) {
+
+  if (Marked.find(now) != Marked.end()) {
+
     NextMarked[now].insert(now);
+
   } else {
-    for(uint32_t pred : t_Pred[now]) {
-      for(uint32_t x : NextMarked[pred]) {
+
+    for (uint32_t pred : t_Pred[now]) {
+
+      for (uint32_t x : NextMarked[pred]) {
+
         NextMarked[now].insert(x);
+
       }
+
     }
+
   }
+
 }
 
 void MarkSubGraph(uint32_t ss, uint32_t tt) {
+
   TopologicalSort(ss, tt);
-  if(TopoOrder.empty()) return;
+  if (TopoOrder.empty()) return;
+
+  for (uint32_t i : TopoOrder) {
 
-  for(uint32_t i : TopoOrder) {
     NextMarked[i].clear();
+
   }
 
   NextMarked[TopoOrder[0]].insert(TopoOrder[0]);
-  for(uint32_t i = 1 ; i < TopoOrder.size() ; i += 1) {
+  for (uint32_t i = 1; i < TopoOrder.size(); i += 1) {
+
     MakeUniq(TopoOrder[i]);
+
   }
+
 }
 
 void MarkVertice(Function *F) {
+
   uint32_t s = start_point;
 
   InDeg.resize(Blocks.size());
@@ -306,26 +401,32 @@ void MarkVertice(Function *F) {
   t_Pred.resize(Blocks.size());
   NextMarked.resize(Blocks.size());
 
-  for( uint32_t i = 0 ; i < Blocks.size() ; i += 1 ) {
+  for (uint32_t i = 0; i < Blocks.size(); i += 1) {
+
     Visited[i] = InStack[i] = InDeg[i] = 0;
     t_Succ[i].clear();
     t_Pred[i].clear();
+
   }
+
   timeStamp = 0;
   uint32_t t = 0;
-  //MarkSubGraph(s, t);
-  //return;
+  // MarkSubGraph(s, t);
+  // return;
+
+  while (s != t) {
 
-  while( s != t ) {
     MarkSubGraph(DominatorTree::idom[t], t);
     t = DominatorTree::idom[t];
+
   }
 
 }
 
 // return {marked nodes}
-std::pair<std::vector<BasicBlock *>,
-          std::vector<BasicBlock *> >markNodes(Function *F) {
+std::pair<std::vector<BasicBlock *>, std::vector<BasicBlock *> > markNodes(
+    Function *F) {
+
   assert(F->size() > 0 && "Function can not be empty");
 
   reset();
@@ -335,21 +436,30 @@ std::pair<std::vector<BasicBlock *>,
   DominatorTree::DominatorTree(F);
   MarkVertice(F);
 
-  std::vector<BasicBlock *> Result , ResultAbove;
-  for( uint32_t x : Markabove ) {
-    auto it = Marked.find( x );
-    if( it != Marked.end() )
-      Marked.erase( it );
-    if( x )
-      ResultAbove.push_back(Blocks[x]);
+  std::vector<BasicBlock *> Result, ResultAbove;
+  for (uint32_t x : Markabove) {
+
+    auto it = Marked.find(x);
+    if (it != Marked.end()) Marked.erase(it);
+    if (x) ResultAbove.push_back(Blocks[x]);
+
   }
-  for( uint32_t x : Marked ) {
+
+  for (uint32_t x : Marked) {
+
     if (x == 0) {
+
       continue;
+
     } else {
+
       Result.push_back(Blocks[x]);
+
     }
+
   }
 
-  return { Result , ResultAbove };
+  return {Result, ResultAbove};
+
 }
+
diff --git a/llvm_mode/MarkNodes.h b/llvm_mode/MarkNodes.h
index e3bf3ce5..23316652 100644
--- a/llvm_mode/MarkNodes.h
+++ b/llvm_mode/MarkNodes.h
@@ -1,11 +1,12 @@
 #ifndef __MARK_NODES__
-#define __MARK_NODES__
+#  define __MARK_NODES__
 
-#include "llvm/IR/BasicBlock.h"
-#include "llvm/IR/Function.h"
-#include<vector>
+#  include "llvm/IR/BasicBlock.h"
+#  include "llvm/IR/Function.h"
+#  include <vector>
 
-std::pair<std::vector<llvm::BasicBlock *>,
-          std::vector<llvm::BasicBlock *>> markNodes(llvm::Function *F);
+std::pair<std::vector<llvm::BasicBlock *>, std::vector<llvm::BasicBlock *>>
+markNodes(llvm::Function *F);
 
 #endif
+
diff --git a/llvm_mode/afl-clang-fast.c b/llvm_mode/afl-clang-fast.c
index a4bb7539..ed320716 100644
--- a/llvm_mode/afl-clang-fast.c
+++ b/llvm_mode/afl-clang-fast.c
@@ -23,10 +23,10 @@
 
 #define AFL_MAIN
 
-#include "../config.h"
-#include "../types.h"
-#include "../debug.h"
-#include "../alloc-inl.h"
+#include "config.h"
+#include "types.h"
+#include "debug.h"
+#include "alloc-inl.h"
 
 #include <stdio.h>
 #include <unistd.h>
@@ -34,16 +34,15 @@
 #include <string.h>
 #include <assert.h>
 
-static u8*  obj_path;               /* Path to runtime libraries         */
-static u8** cc_params;              /* Parameters passed to the real CC  */
-static u32  cc_par_cnt = 1;         /* Param count, including argv0      */
-
+static u8*  obj_path;                  /* Path to runtime libraries         */
+static u8** cc_params;                 /* Parameters passed to the real CC  */
+static u32  cc_par_cnt = 1;            /* Param count, including argv0      */
 
 /* Try to find the runtime libraries. If that fails, abort. */
 
 static void find_obj(u8* argv0) {
 
-  u8 *afl_path = getenv("AFL_PATH");
+  u8* afl_path = getenv("AFL_PATH");
   u8 *slash, *tmp;
 
   if (afl_path) {
@@ -51,9 +50,11 @@ static void find_obj(u8* argv0) {
     tmp = alloc_printf("%s/afl-llvm-rt.o", afl_path);
 
     if (!access(tmp, R_OK)) {
+
       obj_path = afl_path;
       ck_free(tmp);
       return;
+
     }
 
     ck_free(tmp);
@@ -64,7 +65,7 @@ static void find_obj(u8* argv0) {
 
   if (slash) {
 
-    u8 *dir;
+    u8* dir;
 
     *slash = 0;
     dir = ck_strdup(argv0);
@@ -73,9 +74,11 @@ static void find_obj(u8* argv0) {
     tmp = alloc_printf("%s/afl-llvm-rt.o", dir);
 
     if (!access(tmp, R_OK)) {
+
       obj_path = dir;
       ck_free(tmp);
       return;
+
     }
 
     ck_free(tmp);
@@ -84,33 +87,43 @@ static void find_obj(u8* argv0) {
   }
 
   if (!access(AFL_PATH "/afl-llvm-rt.o", R_OK)) {
+
     obj_path = AFL_PATH;
     return;
+
   }
 
-  FATAL("Unable to find 'afl-llvm-rt.o' or 'afl-llvm-pass.so.cc'. Please set AFL_PATH");
- 
-}
+  FATAL(
+      "Unable to find 'afl-llvm-rt.o' or 'afl-llvm-pass.so.cc'. Please set "
+      "AFL_PATH");
 
+}
 
 /* Copy argv to cc_params, making the necessary edits. */
 
 static void edit_params(u32 argc, char** argv) {
 
-  u8 fortify_set = 0, asan_set = 0, x_set = 0, maybe_linking = 1, bit_mode = 0;
-  u8 *name;
+  u8  fortify_set = 0, asan_set = 0, x_set = 0, maybe_linking = 1, bit_mode = 0;
+  u8* name;
 
   cc_params = ck_alloc((argc + 128) * sizeof(u8*));
 
   name = strrchr(argv[0], '/');
-  if (!name) name = argv[0]; else name++;
+  if (!name)
+    name = argv[0];
+  else
+    name++;
 
   if (!strcmp(name, "afl-clang-fast++")) {
+
     u8* alt_cxx = getenv("AFL_CXX");
     cc_params[0] = alt_cxx ? alt_cxx : (u8*)"clang++";
+
   } else {
+
     u8* alt_cc = getenv("AFL_CC");
     cc_params[0] = alt_cc ? alt_cc : (u8*)"clang";
+
   }
 
   /* There are three ways to compile with afl-clang-fast. In the traditional
@@ -118,36 +131,50 @@ static void edit_params(u32 argc, char** argv) {
      much faster but has less coverage. Finally tere is the experimental
      'trace-pc-guard' mode, we use native LLVM instrumentation callbacks
      instead. For trace-pc-guard see:
-     http://clang.llvm.org/docs/SanitizerCoverage.html#tracing-pcs-with-guards */
+     http://clang.llvm.org/docs/SanitizerCoverage.html#tracing-pcs-with-guards
+   */
 
   // laf
-  if (getenv("LAF_SPLIT_SWITCHES")||getenv("AFL_LLVM_LAF_SPLIT_SWITCHES")) {
+  if (getenv("LAF_SPLIT_SWITCHES") || getenv("AFL_LLVM_LAF_SPLIT_SWITCHES")) {
+
     cc_params[cc_par_cnt++] = "-Xclang";
     cc_params[cc_par_cnt++] = "-load";
     cc_params[cc_par_cnt++] = "-Xclang";
-    cc_params[cc_par_cnt++] = alloc_printf("%s/split-switches-pass.so", obj_path);
+    cc_params[cc_par_cnt++] =
+        alloc_printf("%s/split-switches-pass.so", obj_path);
+
   }
 
-  if (getenv("LAF_TRANSFORM_COMPARES")||getenv("AFL_LLVM_LAF_TRANSFORM_COMPARES")) {
+  if (getenv("LAF_TRANSFORM_COMPARES") ||
+      getenv("AFL_LLVM_LAF_TRANSFORM_COMPARES")) {
+
     cc_params[cc_par_cnt++] = "-Xclang";
     cc_params[cc_par_cnt++] = "-load";
     cc_params[cc_par_cnt++] = "-Xclang";
-    cc_params[cc_par_cnt++] = alloc_printf("%s/compare-transform-pass.so", obj_path);
+    cc_params[cc_par_cnt++] =
+        alloc_printf("%s/compare-transform-pass.so", obj_path);
+
   }
 
-  if (getenv("LAF_SPLIT_COMPARES")||getenv("AFL_LLVM_LAF_SPLIT_COMPARES")) {
+  if (getenv("LAF_SPLIT_COMPARES") || getenv("AFL_LLVM_LAF_SPLIT_COMPARES")) {
+
     cc_params[cc_par_cnt++] = "-Xclang";
     cc_params[cc_par_cnt++] = "-load";
     cc_params[cc_par_cnt++] = "-Xclang";
-    cc_params[cc_par_cnt++] = alloc_printf("%s/split-compares-pass.so", obj_path);
+    cc_params[cc_par_cnt++] =
+        alloc_printf("%s/split-compares-pass.so", obj_path);
+
   }
+
   // /laf
 
 #ifdef USE_TRACE_PC
-  cc_params[cc_par_cnt++] = "-fsanitize-coverage=trace-pc-guard"; // edge coverage by default
-  //cc_params[cc_par_cnt++] = "-mllvm";
-  //cc_params[cc_par_cnt++] = "-fsanitize-coverage=trace-cmp,trace-div,trace-gep";
-  //cc_params[cc_par_cnt++] = "-sanitizer-coverage-block-threshold=0";
+  cc_params[cc_par_cnt++] =
+      "-fsanitize-coverage=trace-pc-guard";  // edge coverage by default
+  // cc_params[cc_par_cnt++] = "-mllvm";
+  // cc_params[cc_par_cnt++] =
+  // "-fsanitize-coverage=trace-cmp,trace-div,trace-gep";
+  // cc_params[cc_par_cnt++] = "-sanitizer-coverage-block-threshold=0";
 #else
   cc_params[cc_par_cnt++] = "-Xclang";
   cc_params[cc_par_cnt++] = "-load";
@@ -165,6 +192,7 @@ static void edit_params(u32 argc, char** argv) {
   if (argc == 1 && !strcmp(argv[1], "-v")) maybe_linking = 0;
 
   while (--argc) {
+
     u8* cur = *(++argv);
 
     if (!strcmp(cur, "-m32")) bit_mode = 32;
@@ -175,15 +203,15 @@ static void edit_params(u32 argc, char** argv) {
     if (!strcmp(cur, "-c") || !strcmp(cur, "-S") || !strcmp(cur, "-E"))
       maybe_linking = 0;
 
-    if (!strcmp(cur, "-fsanitize=address") ||
-        !strcmp(cur, "-fsanitize=memory")) asan_set = 1;
+    if (!strcmp(cur, "-fsanitize=address") || !strcmp(cur, "-fsanitize=memory"))
+      asan_set = 1;
 
     if (strstr(cur, "FORTIFY_SOURCE")) fortify_set = 1;
 
     if (!strcmp(cur, "-shared")) maybe_linking = 0;
 
-    if (!strcmp(cur, "-Wl,-z,defs") ||
-        !strcmp(cur, "-Wl,--no-undefined")) continue;
+    if (!strcmp(cur, "-Wl,-z,defs") || !strcmp(cur, "-Wl,--no-undefined"))
+      continue;
 
     cc_params[cc_par_cnt++] = cur;
 
@@ -193,8 +221,7 @@ static void edit_params(u32 argc, char** argv) {
 
     cc_params[cc_par_cnt++] = "-fstack-protector-all";
 
-    if (!fortify_set)
-      cc_params[cc_par_cnt++] = "-D_FORTIFY_SOURCE=2";
+    if (!fortify_set) cc_params[cc_par_cnt++] = "-D_FORTIFY_SOURCE=2";
 
   }
 
@@ -202,8 +229,7 @@ static void edit_params(u32 argc, char** argv) {
 
     if (getenv("AFL_USE_ASAN")) {
 
-      if (getenv("AFL_USE_MSAN"))
-        FATAL("ASAN and MSAN are mutually exclusive");
+      if (getenv("AFL_USE_MSAN")) FATAL("ASAN and MSAN are mutually exclusive");
 
       if (getenv("AFL_HARDEN"))
         FATAL("ASAN and AFL_HARDEN are mutually exclusive");
@@ -213,8 +239,7 @@ static void edit_params(u32 argc, char** argv) {
 
     } else if (getenv("AFL_USE_MSAN")) {
 
-      if (getenv("AFL_USE_ASAN"))
-        FATAL("ASAN and MSAN are mutually exclusive");
+      if (getenv("AFL_USE_ASAN")) FATAL("ASAN and MSAN are mutually exclusive");
 
       if (getenv("AFL_HARDEN"))
         FATAL("MSAN and AFL_HARDEN are mutually exclusive");
@@ -279,35 +304,41 @@ static void edit_params(u32 argc, char** argv) {
 
    */
 
-  cc_params[cc_par_cnt++] = "-D__AFL_LOOP(_A)="
-    "({ static volatile char *_B __attribute__((used)); "
-    " _B = (char*)\"" PERSIST_SIG "\"; "
+  cc_params[cc_par_cnt++] =
+      "-D__AFL_LOOP(_A)="
+      "({ static volatile char *_B __attribute__((used)); "
+      " _B = (char*)\"" PERSIST_SIG
+      "\"; "
 #ifdef __APPLE__
-    "__attribute__((visibility(\"default\"))) "
-    "int _L(unsigned int) __asm__(\"___afl_persistent_loop\"); "
+      "__attribute__((visibility(\"default\"))) "
+      "int _L(unsigned int) __asm__(\"___afl_persistent_loop\"); "
 #else
-    "__attribute__((visibility(\"default\"))) "
-    "int _L(unsigned int) __asm__(\"__afl_persistent_loop\"); "
+      "__attribute__((visibility(\"default\"))) "
+      "int _L(unsigned int) __asm__(\"__afl_persistent_loop\"); "
 #endif /* ^__APPLE__ */
-    "_L(_A); })";
+      "_L(_A); })";
 
-  cc_params[cc_par_cnt++] = "-D__AFL_INIT()="
-    "do { static volatile char *_A __attribute__((used)); "
-    " _A = (char*)\"" DEFER_SIG "\"; "
+  cc_params[cc_par_cnt++] =
+      "-D__AFL_INIT()="
+      "do { static volatile char *_A __attribute__((used)); "
+      " _A = (char*)\"" DEFER_SIG
+      "\"; "
 #ifdef __APPLE__
-    "__attribute__((visibility(\"default\"))) "
-    "void _I(void) __asm__(\"___afl_manual_init\"); "
+      "__attribute__((visibility(\"default\"))) "
+      "void _I(void) __asm__(\"___afl_manual_init\"); "
 #else
-    "__attribute__((visibility(\"default\"))) "
-    "void _I(void) __asm__(\"__afl_manual_init\"); "
+      "__attribute__((visibility(\"default\"))) "
+      "void _I(void) __asm__(\"__afl_manual_init\"); "
 #endif /* ^__APPLE__ */
-    "_I(); } while (0)";
+      "_I(); } while (0)";
 
   if (maybe_linking) {
 
     if (x_set) {
+
       cc_params[cc_par_cnt++] = "-x";
       cc_params[cc_par_cnt++] = "none";
+
     }
 
     switch (bit_mode) {
@@ -340,7 +371,6 @@ static void edit_params(u32 argc, char** argv) {
 
 }
 
-
 /* Main entry point */
 
 int main(int argc, char** argv) {
@@ -348,46 +378,53 @@ int main(int argc, char** argv) {
   if (isatty(2) && !getenv("AFL_QUIET")) {
 
 #ifdef USE_TRACE_PC
-    SAYF(cCYA "afl-clang-fast" VERSION  cRST " [tpcg] by <lszekeres@google.com>\n");
+    SAYF(cCYA "afl-clang-fast" VERSION cRST
+              " [tpcg] by <lszekeres@google.com>\n");
 #else
-    SAYF(cCYA "afl-clang-fast" VERSION  cRST " by <lszekeres@google.com>\n");
+    SAYF(cCYA "afl-clang-fast" VERSION cRST " by <lszekeres@google.com>\n");
 #endif /* ^USE_TRACE_PC */
 
   }
 
   if (argc < 2) {
 
-    SAYF("\n"
-         "This is a helper application for afl-fuzz. It serves as a drop-in replacement\n"
-         "for clang, letting you recompile third-party code with the required runtime\n"
-         "instrumentation. A common use pattern would be one of the following:\n\n"
+    SAYF(
+        "\n"
+        "This is a helper application for afl-fuzz. It serves as a drop-in "
+        "replacement\n"
+        "for clang, letting you recompile third-party code with the required "
+        "runtime\n"
+        "instrumentation. A common use pattern would be one of the "
+        "following:\n\n"
 
-         "  CC=%s/afl-clang-fast ./configure\n"
-         "  CXX=%s/afl-clang-fast++ ./configure\n\n"
+        "  CC=%s/afl-clang-fast ./configure\n"
+        "  CXX=%s/afl-clang-fast++ ./configure\n\n"
 
-         "In contrast to the traditional afl-clang tool, this version is implemented as\n"
-         "an LLVM pass and tends to offer improved performance with slow programs.\n\n"
+        "In contrast to the traditional afl-clang tool, this version is "
+        "implemented as\n"
+        "an LLVM pass and tends to offer improved performance with slow "
+        "programs.\n\n"
 
-         "You can specify custom next-stage toolchain via AFL_CC and AFL_CXX. Setting\n"
-         "AFL_HARDEN enables hardening optimizations in the compiled code.\n\n",
-         BIN_PATH, BIN_PATH);
+        "You can specify custom next-stage toolchain via AFL_CC and AFL_CXX. "
+        "Setting\n"
+        "AFL_HARDEN enables hardening optimizations in the compiled code.\n\n",
+        BIN_PATH, BIN_PATH);
 
     exit(1);
 
   }
 
-
   find_obj(argv[0]);
 
   edit_params(argc, argv);
 
-/*
-  int i = 0;
-  printf("EXEC:");
-  while (cc_params[i] != NULL)
-    printf(" %s", cc_params[i++]);
-  printf("\n");
-*/
+  /*
+    int i = 0;
+    printf("EXEC:");
+    while (cc_params[i] != NULL)
+      printf(" %s", cc_params[i++]);
+    printf("\n");
+  */
 
   execvp(cc_params[0], (char**)cc_params);
 
@@ -396,3 +433,4 @@ int main(int argc, char** argv) {
   return 0;
 
 }
+
diff --git a/llvm_mode/afl-llvm-pass.so.cc b/llvm_mode/afl-llvm-pass.so.cc
index bdad835f..5d531a87 100644
--- a/llvm_mode/afl-llvm-pass.so.cc
+++ b/llvm_mode/afl-llvm-pass.so.cc
@@ -24,8 +24,8 @@
 
 #define AFL_LLVM_PASS
 
-#include "../config.h"
-#include "../debug.h"
+#include "config.h"
+#include "debug.h"
 
 #include <stdio.h>
 #include <stdlib.h>
@@ -48,50 +48,52 @@ using namespace llvm;
 
 namespace {
 
-  class AFLCoverage : public ModulePass {
-
-    public:
-
-      static char ID;
-      AFLCoverage() : ModulePass(ID) {
-        char* instWhiteListFilename = getenv("AFL_LLVM_WHITELIST");
-        if (instWhiteListFilename) {
-          std::string line;
-          std::ifstream fileStream;
-          fileStream.open(instWhiteListFilename);
-          if (!fileStream)
-            report_fatal_error("Unable to open AFL_LLVM_WHITELIST");
-          getline(fileStream, line);
-          while (fileStream) {
-            myWhitelist.push_back(line);
-            getline(fileStream, line);
-          }
-        }
+class AFLCoverage : public ModulePass {
+
+ public:
+  static char ID;
+  AFLCoverage() : ModulePass(ID) {
+
+    char *instWhiteListFilename = getenv("AFL_LLVM_WHITELIST");
+    if (instWhiteListFilename) {
+
+      std::string   line;
+      std::ifstream fileStream;
+      fileStream.open(instWhiteListFilename);
+      if (!fileStream) report_fatal_error("Unable to open AFL_LLVM_WHITELIST");
+      getline(fileStream, line);
+      while (fileStream) {
+
+        myWhitelist.push_back(line);
+        getline(fileStream, line);
+
       }
 
-      bool runOnModule(Module &M) override;
+    }
 
-      // StringRef getPassName() const override {
-      //  return "American Fuzzy Lop Instrumentation";
-      // }
+  }
 
-    protected:
+  bool runOnModule(Module &M) override;
 
-      std::list<std::string> myWhitelist;
+  // StringRef getPassName() const override {
 
-  };
+  //  return "American Fuzzy Lop Instrumentation";
+  // }
 
-}
+ protected:
+  std::list<std::string> myWhitelist;
 
+};
 
-char AFLCoverage::ID = 0;
+}  // namespace
 
+char AFLCoverage::ID = 0;
 
 bool AFLCoverage::runOnModule(Module &M) {
 
   LLVMContext &C = M.getContext();
 
-  IntegerType *Int8Ty  = IntegerType::getInt8Ty(C);
+  IntegerType *Int8Ty = IntegerType::getInt8Ty(C);
   IntegerType *Int32Ty = IntegerType::getInt32Ty(C);
   unsigned int cur_loc = 0;
 
@@ -103,11 +105,13 @@ bool AFLCoverage::runOnModule(Module &M) {
 
     SAYF(cCYA "afl-llvm-pass" VERSION cRST " by <lszekeres@google.com>\n");
 
-  } else be_quiet = 1;
+  } else
+
+    be_quiet = 1;
 
   /* Decide instrumentation ratio */
 
-  char* inst_ratio_str = getenv("AFL_INST_RATIO");
+  char *       inst_ratio_str = getenv("AFL_INST_RATIO");
   unsigned int inst_ratio = 100;
 
   if (inst_ratio_str) {
@@ -119,7 +123,7 @@ bool AFLCoverage::runOnModule(Module &M) {
   }
 
 #if LLVM_VERSION_MAJOR < 9
-  char* neverZero_counters_str = getenv("AFL_LLVM_NOT_ZERO");
+  char *neverZero_counters_str = getenv("AFL_LLVM_NOT_ZERO");
 #endif
 
   /* Get globals for the SHM region and the previous location. Note that
@@ -134,8 +138,8 @@ bool AFLCoverage::runOnModule(Module &M) {
       M, Int32Ty, false, GlobalValue::ExternalLinkage, 0, "__afl_prev_loc");
 #else
   GlobalVariable *AFLPrevLoc = new GlobalVariable(
-      M, Int32Ty, false, GlobalValue::ExternalLinkage, 0, "__afl_prev_loc",
-      0, GlobalVariable::GeneralDynamicTLSModel, 0, false);
+      M, Int32Ty, false, GlobalValue::ExternalLinkage, 0, "__afl_prev_loc", 0,
+      GlobalVariable::GeneralDynamicTLSModel, 0, false);
 #endif
 
   /* Instrument all the things! */
@@ -146,58 +150,77 @@ bool AFLCoverage::runOnModule(Module &M) {
     for (auto &BB : F) {
 
       BasicBlock::iterator IP = BB.getFirstInsertionPt();
-      IRBuilder<> IRB(&(*IP));
-      
+      IRBuilder<>          IRB(&(*IP));
+
       if (!myWhitelist.empty()) {
-          bool instrumentBlock = false;
-
-          /* Get the current location using debug information.
-           * For now, just instrument the block if we are not able
-           * to determine our location. */
-          DebugLoc Loc = IP->getDebugLoc();
-          if ( Loc ) {
-              DILocation *cDILoc = dyn_cast<DILocation>(Loc.getAsMDNode());
-
-              unsigned int instLine = cDILoc->getLine();
-              StringRef instFilename = cDILoc->getFilename();
-
-              if (instFilename.str().empty()) {
-                  /* If the original location is empty, try using the inlined location */
-                  DILocation *oDILoc = cDILoc->getInlinedAt();
-                  if (oDILoc) {
-                      instFilename = oDILoc->getFilename();
-                      instLine = oDILoc->getLine();
-                  }
-              }
 
-              /* Continue only if we know where we actually are */
-              if (!instFilename.str().empty()) {
-                  for (std::list<std::string>::iterator it = myWhitelist.begin(); it != myWhitelist.end(); ++it) {
-                      /* We don't check for filename equality here because
-                       * filenames might actually be full paths. Instead we
-                       * check that the actual filename ends in the filename
-                       * specified in the list. */
-                      if (instFilename.str().length() >= it->length()) {
-                          if (instFilename.str().compare(instFilename.str().length() - it->length(), it->length(), *it) == 0) {
-                              instrumentBlock = true;
-                              break;
-                          }
-                      }
-                  }
+        bool instrumentBlock = false;
+
+        /* Get the current location using debug information.
+         * For now, just instrument the block if we are not able
+         * to determine our location. */
+        DebugLoc Loc = IP->getDebugLoc();
+        if (Loc) {
+
+          DILocation *cDILoc = dyn_cast<DILocation>(Loc.getAsMDNode());
+
+          unsigned int instLine = cDILoc->getLine();
+          StringRef    instFilename = cDILoc->getFilename();
+
+          if (instFilename.str().empty()) {
+
+            /* If the original location is empty, try using the inlined location
+             */
+            DILocation *oDILoc = cDILoc->getInlinedAt();
+            if (oDILoc) {
+
+              instFilename = oDILoc->getFilename();
+              instLine = oDILoc->getLine();
+
+            }
+
+          }
+
+          /* Continue only if we know where we actually are */
+          if (!instFilename.str().empty()) {
+
+            for (std::list<std::string>::iterator it = myWhitelist.begin();
+                 it != myWhitelist.end(); ++it) {
+
+              /* We don't check for filename equality here because
+               * filenames might actually be full paths. Instead we
+               * check that the actual filename ends in the filename
+               * specified in the list. */
+              if (instFilename.str().length() >= it->length()) {
+
+                if (instFilename.str().compare(
+                        instFilename.str().length() - it->length(),
+                        it->length(), *it) == 0) {
+
+                  instrumentBlock = true;
+                  break;
+
+                }
+
               }
+
+            }
+
           }
 
-          /* Either we couldn't figure out our location or the location is
-           * not whitelisted, so we skip instrumentation. */
-          if (!instrumentBlock) continue;
-      }
+        }
+
+        /* Either we couldn't figure out our location or the location is
+         * not whitelisted, so we skip instrumentation. */
+        if (!instrumentBlock) continue;
 
+      }
 
       if (AFL_R(100) >= inst_ratio) continue;
 
       /* Make up cur_loc */
 
-       //cur_loc++;
+      // cur_loc++;
       cur_loc = AFL_R(MAP_SIZE);
 
       // only instrument if this basic block is the destination of a previous
@@ -205,24 +228,27 @@ bool AFLCoverage::runOnModule(Module &M) {
       // this gets rid of ~5-10% of instrumentations that are unnecessary
       // result: a little more speed and less map pollution
       int more_than_one = -1;
-      //fprintf(stderr, "BB %u: ", cur_loc);
+      // fprintf(stderr, "BB %u: ", cur_loc);
       for (BasicBlock *Pred : predecessors(&BB)) {
+
         int count = 0;
-        if (more_than_one == -1)
-          more_than_one = 0;
-        //fprintf(stderr, " %p=>", Pred);
+        if (more_than_one == -1) more_than_one = 0;
+        // fprintf(stderr, " %p=>", Pred);
         for (BasicBlock *Succ : successors(Pred)) {
-          //if (count > 0)
+
+          // if (count > 0)
           //  fprintf(stderr, "|");
           if (Succ != NULL) count++;
-          //fprintf(stderr, "%p", Succ);
+          // fprintf(stderr, "%p", Succ);
+
         }
-        if (count > 1)
-          more_than_one = 1;
+
+        if (count > 1) more_than_one = 1;
+
       }
-      //fprintf(stderr, " == %d\n", more_than_one);
-      if (more_than_one != 1)
-        continue;
+
+      // fprintf(stderr, " == %d\n", more_than_one);
+      if (more_than_one != 1) continue;
 
       ConstantInt *CurLoc = ConstantInt::get(Int32Ty, cur_loc);
 
@@ -236,7 +262,8 @@ bool AFLCoverage::runOnModule(Module &M) {
 
       LoadInst *MapPtr = IRB.CreateLoad(AFLMapPtr);
       MapPtr->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, None));
-      Value *MapPtrIdx = IRB.CreateGEP(MapPtr, IRB.CreateXor(PrevLocCasted, CurLoc));
+      Value *MapPtrIdx =
+          IRB.CreateGEP(MapPtr, IRB.CreateXor(PrevLocCasted, CurLoc));
 
       /* Update bitmap */
 
@@ -246,7 +273,9 @@ bool AFLCoverage::runOnModule(Module &M) {
       Value *Incr = IRB.CreateAdd(Counter, ConstantInt::get(Int8Ty, 1));
 
 #if LLVM_VERSION_MAJOR < 9
-      if (neverZero_counters_str != NULL) { // with llvm 9 we make this the default as the bug in llvm is then fixed
+      if (neverZero_counters_str !=
+          NULL) {  // with llvm 9 we make this the default as the bug in llvm is
+                   // then fixed
 #endif
         /* hexcoder: Realize a counter that skips zero during overflow.
          * Once this counter reaches its maximum value, it next increments to 1
@@ -257,48 +286,67 @@ bool AFLCoverage::runOnModule(Module &M) {
          * Counter + 1 -> {Counter, OverflowFlag}
          * Counter + OverflowFlag -> Counter
          */
-/*       // we keep the old solutions just in case
-         // Solution #1
-         if (neverZero_counters_str[0] == '1') {
-           CallInst *AddOv = IRB.CreateBinaryIntrinsic(Intrinsic::uadd_with_overflow, Counter, ConstantInt::get(Int8Ty, 1));
-           AddOv->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, None));
-           Value *SumWithOverflowBit = AddOv;
-           Incr = IRB.CreateAdd(IRB.CreateExtractValue(SumWithOverflowBit, 0),  // sum 
-                                IRB.CreateZExt( // convert from one bit type to 8 bits type 
-                                               IRB.CreateExtractValue(SumWithOverflowBit, 1), // overflow
-                                               Int8Ty));
-          // Solution #2
-          } else if (neverZero_counters_str[0] == '2') {
-             auto cf = IRB.CreateICmpEQ(Counter, ConstantInt::get(Int8Ty, 255));
-             Value *HowMuch = IRB.CreateAdd(ConstantInt::get(Int8Ty, 1), cf);
-             Incr = IRB.CreateAdd(Counter, HowMuch);
-          // Solution #3
-          } else if (neverZero_counters_str[0] == '3') {
-*/
-          // this is the solution we choose because llvm9 should do the right thing here
-            auto cf = IRB.CreateICmpEQ(Incr, ConstantInt::get(Int8Ty, 0));
-            auto carry = IRB.CreateZExt(cf, Int8Ty);
-            Incr = IRB.CreateAdd(Incr, carry);
+        /*       // we keep the old solutions just in case
+                 // Solution #1
+                 if (neverZero_counters_str[0] == '1') {
+
+                   CallInst *AddOv =
+           IRB.CreateBinaryIntrinsic(Intrinsic::uadd_with_overflow, Counter,
+           ConstantInt::get(Int8Ty, 1));
+                   AddOv->setMetadata(M.getMDKindID("nosanitize"),
+           MDNode::get(C, None)); Value *SumWithOverflowBit = AddOv; Incr =
+           IRB.CreateAdd(IRB.CreateExtractValue(SumWithOverflowBit, 0),  // sum
+                                        IRB.CreateZExt( // convert from one bit
+           type to 8 bits type IRB.CreateExtractValue(SumWithOverflowBit, 1), //
+           overflow Int8Ty));
+                  // Solution #2
+
+                  } else if (neverZero_counters_str[0] == '2') {
+
+                     auto cf = IRB.CreateICmpEQ(Counter,
+           ConstantInt::get(Int8Ty, 255)); Value *HowMuch =
+           IRB.CreateAdd(ConstantInt::get(Int8Ty, 1), cf); Incr =
+           IRB.CreateAdd(Counter, HowMuch);
+                  // Solution #3
+
+                  } else if (neverZero_counters_str[0] == '3') {
+
+        */
+        // this is the solution we choose because llvm9 should do the right
+        // thing here
+        auto cf = IRB.CreateICmpEQ(Incr, ConstantInt::get(Int8Ty, 0));
+        auto carry = IRB.CreateZExt(cf, Int8Ty);
+        Incr = IRB.CreateAdd(Incr, carry);
 /*
          // Solution #4
+
          } else if (neverZero_counters_str[0] == '4') {
+
             auto cf = IRB.CreateICmpULT(Incr, ConstantInt::get(Int8Ty, 1));
             auto carry = IRB.CreateZExt(cf, Int8Ty);
             Incr = IRB.CreateAdd(Incr, carry);
+
          } else {
-            fprintf(stderr, "Error: unknown value for AFL_NZERO_COUNTS: %s (valid is 1-4)\n", neverZero_counters_str);
-            exit(-1);
+
+            fprintf(stderr, "Error: unknown value for AFL_NZERO_COUNTS: %s
+   (valid is 1-4)\n", neverZero_counters_str); exit(-1);
+
          }
+
 */
 #if LLVM_VERSION_MAJOR < 9
+
       }
+
 #endif
 
-      IRB.CreateStore(Incr, MapPtrIdx)->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, None));
+      IRB.CreateStore(Incr, MapPtrIdx)
+          ->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, None));
 
       /* Set prev_loc to cur_loc >> 1 */
 
-      StoreInst *Store = IRB.CreateStore(ConstantInt::get(Int32Ty, cur_loc >> 1), AFLPrevLoc);
+      StoreInst *Store =
+          IRB.CreateStore(ConstantInt::get(Int32Ty, cur_loc >> 1), AFLPrevLoc);
       Store->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, None));
 
       inst_blocks++;
@@ -309,11 +357,16 @@ bool AFLCoverage::runOnModule(Module &M) {
 
   if (!be_quiet) {
 
-    if (!inst_blocks) WARNF("No instrumentation targets found.");
-    else OKF("Instrumented %u locations (%s mode, ratio %u%%).",
-             inst_blocks, getenv("AFL_HARDEN") ? "hardened" :
-             ((getenv("AFL_USE_ASAN") || getenv("AFL_USE_MSAN")) ?
-              "ASAN/MSAN" : "non-hardened"), inst_ratio);
+    if (!inst_blocks)
+      WARNF("No instrumentation targets found.");
+    else
+      OKF("Instrumented %u locations (%s mode, ratio %u%%).", inst_blocks,
+          getenv("AFL_HARDEN")
+              ? "hardened"
+              : ((getenv("AFL_USE_ASAN") || getenv("AFL_USE_MSAN"))
+                     ? "ASAN/MSAN"
+                     : "non-hardened"),
+          inst_ratio);
 
   }
 
@@ -321,7 +374,6 @@ bool AFLCoverage::runOnModule(Module &M) {
 
 }
 
-
 static void registerAFLPass(const PassManagerBuilder &,
                             legacy::PassManagerBase &PM) {
 
@@ -329,9 +381,9 @@ static void registerAFLPass(const PassManagerBuilder &,
 
 }
 
-
 static RegisterStandardPasses RegisterAFLPass(
     PassManagerBuilder::EP_OptimizerLast, registerAFLPass);
 
 static RegisterStandardPasses RegisterAFLPass0(
     PassManagerBuilder::EP_EnabledOnOptLevel0, registerAFLPass);
+
diff --git a/llvm_mode/afl-llvm-rt.o.c b/llvm_mode/afl-llvm-rt.o.c
index 67208454..bc38f1ec 100644
--- a/llvm_mode/afl-llvm-rt.o.c
+++ b/llvm_mode/afl-llvm-rt.o.c
@@ -20,10 +20,10 @@
 */
 
 #ifdef __ANDROID__
-  #include "android-ashmem.h"
+#  include "android-ashmem.h"
 #endif
-#include "../config.h"
-#include "../types.h"
+#include "config.h"
+#include "types.h"
 
 #include <stdio.h>
 #include <stdlib.h>
@@ -50,10 +50,9 @@
 #include <sys/mman.h>
 #include <fcntl.h>
 
-
 /* Globals needed by the injected instrumentation. The __afl_area_initial region
-   is used for instrumentation output before __afl_map_shm() has a chance to run.
-   It will end up as .comm, so it shouldn't be too wasteful. */
+   is used for instrumentation output before __afl_map_shm() has a chance to
+   run. It will end up as .comm, so it shouldn't be too wasteful. */
 
 u8  __afl_area_initial[MAP_SIZE];
 u8* __afl_area_ptr = __afl_area_initial;
@@ -64,43 +63,46 @@ u32 __afl_prev_loc;
 __thread u32 __afl_prev_loc;
 #endif
 
-
 /* Running in persistent mode? */
 
 static u8 is_persistent;
 
-
 /* SHM setup. */
 
 static void __afl_map_shm(void) {
 
-  u8 *id_str = getenv(SHM_ENV_VAR);
+  u8* id_str = getenv(SHM_ENV_VAR);
 
   /* If we're running under AFL, attach to the appropriate region, replacing the
      early-stage __afl_area_initial region that is needed to allow some really
      hacky .init code to work correctly in projects such as OpenSSL. */
 
   if (id_str) {
+
 #ifdef USEMMAP
-    const char *shm_file_path = id_str;
-    int shm_fd = -1;
-    unsigned char *shm_base = NULL;
+    const char*    shm_file_path = id_str;
+    int            shm_fd = -1;
+    unsigned char* shm_base = NULL;
 
     /* create the shared memory segment as if it was a file */
     shm_fd = shm_open(shm_file_path, O_RDWR, 0600);
     if (shm_fd == -1) {
+
       printf("shm_open() failed\n");
       exit(1);
+
     }
 
     /* map the shared memory segment to the address space of the process */
     shm_base = mmap(0, MAP_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, shm_fd, 0);
     if (shm_base == MAP_FAILED) {
+
       close(shm_fd);
       shm_fd = -1;
 
       printf("mmap() failed\n");
       exit(2);
+
     }
 
     __afl_area_ptr = shm_base;
@@ -112,7 +114,7 @@ static void __afl_map_shm(void) {
 
     /* Whooooops. */
 
-    if (__afl_area_ptr == (void *)-1) _exit(1);
+    if (__afl_area_ptr == (void*)-1) _exit(1);
 
     /* Write something into the bitmap so that even with low AFL_INST_RATIO,
        our parent doesn't give up on us. */
@@ -123,16 +125,15 @@ static void __afl_map_shm(void) {
 
 }
 
-
 /* Fork server logic. */
 
 static void __afl_start_forkserver(void) {
 
   static u8 tmp[4];
-  s32 child_pid;
+  s32       child_pid;
+
+  u8 child_stopped = 0;
 
-  u8  child_stopped = 0;
-  
   void (*old_sigchld_handler)(int) = signal(SIGCHLD, SIG_DFL);
 
   /* Phone home and tell the parent that we're OK. If parent isn't there,
@@ -154,8 +155,10 @@ static void __afl_start_forkserver(void) {
        process. */
 
     if (child_stopped && was_killed) {
+
       child_stopped = 0;
       if (waitpid(child_pid, &status, 0) < 0) _exit(1);
+
     }
 
     if (!child_stopped) {
@@ -168,12 +171,13 @@ static void __afl_start_forkserver(void) {
       /* In child process: close fds, resume execution. */
 
       if (!child_pid) {
+
         signal(SIGCHLD, old_sigchld_handler);
 
         close(FORKSRV_FD);
         close(FORKSRV_FD + 1);
         return;
-  
+
       }
 
     } else {
@@ -207,7 +211,6 @@ static void __afl_start_forkserver(void) {
 
 }
 
-
 /* A simplified persistent mode handler, used as explained in README.llvm. */
 
 int __afl_persistent_loop(unsigned int max_cnt) {
@@ -227,9 +230,10 @@ int __afl_persistent_loop(unsigned int max_cnt) {
       memset(__afl_area_ptr, 0, MAP_SIZE);
       __afl_area_ptr[0] = 1;
       __afl_prev_loc = 0;
+
     }
 
-    cycle_cnt  = max_cnt;
+    cycle_cnt = max_cnt;
     first_pass = 0;
     return 1;
 
@@ -262,7 +266,6 @@ int __afl_persistent_loop(unsigned int max_cnt) {
 
 }
 
-
 /* This one can be called from user code when deferred forkserver mode
     is enabled. */
 
@@ -280,7 +283,6 @@ void __afl_manual_init(void) {
 
 }
 
-
 /* Proper initialization routine. */
 
 __attribute__((constructor(CONST_PRIO))) void __afl_auto_init(void) {
@@ -293,7 +295,6 @@ __attribute__((constructor(CONST_PRIO))) void __afl_auto_init(void) {
 
 }
 
-
 /* The following stuff deals with supporting -fsanitize-coverage=trace-pc-guard.
    It remains non-operational in the traditional, plugin-backed LLVM mode.
    For more info about 'trace-pc-guard', see README.llvm.
@@ -302,9 +303,10 @@ __attribute__((constructor(CONST_PRIO))) void __afl_auto_init(void) {
    edge (as opposed to every basic block). */
 
 void __sanitizer_cov_trace_pc_guard(uint32_t* guard) {
+
   __afl_area_ptr[*guard]++;
-}
 
+}
 
 /* Init callback. Populates instrumentation IDs. Note that we're using
    ID of 0 as a special value to indicate non-instrumented bits. That may
@@ -321,8 +323,10 @@ void __sanitizer_cov_trace_pc_guard_init(uint32_t* start, uint32_t* stop) {
   if (x) inst_ratio = atoi(x);
 
   if (!inst_ratio || inst_ratio > 100) {
+
     fprintf(stderr, "[-] ERROR: Invalid AFL_INST_RATIO (must be 1-100).\n");
     abort();
+
   }
 
   /* Make sure that the first element in the range is always set - we use that
@@ -333,11 +337,14 @@ void __sanitizer_cov_trace_pc_guard_init(uint32_t* start, uint32_t* stop) {
 
   while (start < stop) {
 
-    if (R(100) < inst_ratio) *start = R(MAP_SIZE - 1) + 1;
-    else *start = 0;
+    if (R(100) < inst_ratio)
+      *start = R(MAP_SIZE - 1) + 1;
+    else
+      *start = 0;
 
     start++;
 
   }
 
 }
+
diff --git a/llvm_mode/compare-transform-pass.so.cc b/llvm_mode/compare-transform-pass.so.cc
index e7886db1..e1b6e671 100644
--- a/llvm_mode/compare-transform-pass.so.cc
+++ b/llvm_mode/compare-transform-pass.so.cc
@@ -36,202 +36,236 @@ using namespace llvm;
 
 namespace {
 
-  class CompareTransform : public ModulePass {
+class CompareTransform : public ModulePass {
 
-    public:
-      static char ID;
-      CompareTransform() : ModulePass(ID) {
-      } 
+ public:
+  static char ID;
+  CompareTransform() : ModulePass(ID) {
 
-      bool runOnModule(Module &M) override;
+  }
+
+  bool runOnModule(Module &M) override;
 
 #if LLVM_VERSION_MAJOR < 4
-      const char * getPassName() const override {
+  const char *getPassName() const override {
+
 #else
-      StringRef getPassName() const override {
+  StringRef getPassName() const override {
+
 #endif
-        return "transforms compare functions";
-      }
-    private:
-      bool transformCmps(Module &M, const bool processStrcmp, const bool processMemcmp
-        ,const bool processStrncmp, const bool processStrcasecmp, const bool processStrncasecmp);
-  };
-}
+    return "transforms compare functions";
 
+  }
+
+ private:
+  bool transformCmps(Module &M, const bool processStrcmp,
+                     const bool processMemcmp, const bool processStrncmp,
+                     const bool processStrcasecmp,
+                     const bool processStrncasecmp);
+
+};
+
+}  // namespace
 
 char CompareTransform::ID = 0;
 
-bool CompareTransform::transformCmps(Module &M, const bool processStrcmp, const bool processMemcmp
-  , const bool processStrncmp, const bool processStrcasecmp, const bool processStrncasecmp) {
+bool CompareTransform::transformCmps(Module &M, const bool processStrcmp,
+                                     const bool processMemcmp,
+                                     const bool processStrncmp,
+                                     const bool processStrcasecmp,
+                                     const bool processStrncasecmp) {
 
-  std::vector<CallInst*> calls;
-  LLVMContext &C = M.getContext();
-  IntegerType *Int8Ty = IntegerType::getInt8Ty(C);
-  IntegerType *Int32Ty = IntegerType::getInt32Ty(C);
-  IntegerType *Int64Ty = IntegerType::getInt64Ty(C);
+  std::vector<CallInst *> calls;
+  LLVMContext &           C = M.getContext();
+  IntegerType *           Int8Ty = IntegerType::getInt8Ty(C);
+  IntegerType *           Int32Ty = IntegerType::getInt32Ty(C);
+  IntegerType *           Int64Ty = IntegerType::getInt64Ty(C);
 
 #if LLVM_VERSION_MAJOR < 9
-  Constant* 
+  Constant *
 #else
   FunctionCallee
 #endif
-               c = M.getOrInsertFunction("tolower",
-                                         Int32Ty,
-                                         Int32Ty
+      c = M.getOrInsertFunction("tolower", Int32Ty, Int32Ty
 #if LLVM_VERSION_MAJOR < 5
-					 , nullptr
+                                ,
+                                nullptr
 #endif
-					 );
+      );
 #if LLVM_VERSION_MAJOR < 9
-  Function* tolowerFn = cast<Function>(c);
+  Function *tolowerFn = cast<Function>(c);
 #else
   FunctionCallee tolowerFn = c;
 #endif
 
-  /* iterate over all functions, bbs and instruction and add suitable calls to strcmp/memcmp/strncmp/strcasecmp/strncasecmp */
+  /* iterate over all functions, bbs and instruction and add suitable calls to
+   * strcmp/memcmp/strncmp/strcasecmp/strncasecmp */
   for (auto &F : M) {
+
     for (auto &BB : F) {
-      for(auto &IN: BB) {
-        CallInst* callInst = nullptr;
+
+      for (auto &IN : BB) {
+
+        CallInst *callInst = nullptr;
 
         if ((callInst = dyn_cast<CallInst>(&IN))) {
 
-          bool isStrcmp      = processStrcmp;
-          bool isMemcmp      = processMemcmp;
-          bool isStrncmp     = processStrncmp;
-          bool isStrcasecmp  = processStrcasecmp;
+          bool isStrcmp = processStrcmp;
+          bool isMemcmp = processMemcmp;
+          bool isStrncmp = processStrncmp;
+          bool isStrcasecmp = processStrcasecmp;
           bool isStrncasecmp = processStrncasecmp;
 
           Function *Callee = callInst->getCalledFunction();
-          if (!Callee)
-            continue;
-          if (callInst->getCallingConv() != llvm::CallingConv::C)
-            continue;
+          if (!Callee) continue;
+          if (callInst->getCallingConv() != llvm::CallingConv::C) continue;
           StringRef FuncName = Callee->getName();
-          isStrcmp      &= !FuncName.compare(StringRef("strcmp"));
-          isMemcmp      &= !FuncName.compare(StringRef("memcmp"));
-          isStrncmp     &= !FuncName.compare(StringRef("strncmp"));
-          isStrcasecmp  &= !FuncName.compare(StringRef("strcasecmp"));
+          isStrcmp &= !FuncName.compare(StringRef("strcmp"));
+          isMemcmp &= !FuncName.compare(StringRef("memcmp"));
+          isStrncmp &= !FuncName.compare(StringRef("strncmp"));
+          isStrcasecmp &= !FuncName.compare(StringRef("strcasecmp"));
           isStrncasecmp &= !FuncName.compare(StringRef("strncasecmp"));
 
-          if (!isStrcmp && !isMemcmp && !isStrncmp && !isStrcasecmp && !isStrncasecmp)
+          if (!isStrcmp && !isMemcmp && !isStrncmp && !isStrcasecmp &&
+              !isStrncasecmp)
             continue;
 
-          /* Verify the strcmp/memcmp/strncmp/strcasecmp/strncasecmp function prototype */
+          /* Verify the strcmp/memcmp/strncmp/strcasecmp/strncasecmp function
+           * prototype */
           FunctionType *FT = Callee->getFunctionType();
 
-
-          isStrcmp      &= FT->getNumParams() == 2 &&
-                      FT->getReturnType()->isIntegerTy(32) &&
-                      FT->getParamType(0) == FT->getParamType(1) &&
-                      FT->getParamType(0) == IntegerType::getInt8PtrTy(M.getContext());
-          isStrcasecmp  &= FT->getNumParams() == 2 &&
-                      FT->getReturnType()->isIntegerTy(32) &&
-                      FT->getParamType(0) == FT->getParamType(1) &&
-                      FT->getParamType(0) == IntegerType::getInt8PtrTy(M.getContext());
-          isMemcmp      &= FT->getNumParams() == 3 &&
+          isStrcmp &=
+              FT->getNumParams() == 2 && FT->getReturnType()->isIntegerTy(32) &&
+              FT->getParamType(0) == FT->getParamType(1) &&
+              FT->getParamType(0) == IntegerType::getInt8PtrTy(M.getContext());
+          isStrcasecmp &=
+              FT->getNumParams() == 2 && FT->getReturnType()->isIntegerTy(32) &&
+              FT->getParamType(0) == FT->getParamType(1) &&
+              FT->getParamType(0) == IntegerType::getInt8PtrTy(M.getContext());
+          isMemcmp &= FT->getNumParams() == 3 &&
                       FT->getReturnType()->isIntegerTy(32) &&
                       FT->getParamType(0)->isPointerTy() &&
                       FT->getParamType(1)->isPointerTy() &&
                       FT->getParamType(2)->isIntegerTy();
-          isStrncmp     &= FT->getNumParams() == 3 &&
-                      FT->getReturnType()->isIntegerTy(32) &&
-                      FT->getParamType(0) == FT->getParamType(1) &&
-                      FT->getParamType(0) == IntegerType::getInt8PtrTy(M.getContext()) &&
-                      FT->getParamType(2)->isIntegerTy();
+          isStrncmp &= FT->getNumParams() == 3 &&
+                       FT->getReturnType()->isIntegerTy(32) &&
+                       FT->getParamType(0) == FT->getParamType(1) &&
+                       FT->getParamType(0) ==
+                           IntegerType::getInt8PtrTy(M.getContext()) &&
+                       FT->getParamType(2)->isIntegerTy();
           isStrncasecmp &= FT->getNumParams() == 3 &&
-                      FT->getReturnType()->isIntegerTy(32) &&
-                      FT->getParamType(0) == FT->getParamType(1) &&
-                      FT->getParamType(0) == IntegerType::getInt8PtrTy(M.getContext()) &&
-                      FT->getParamType(2)->isIntegerTy();
-
-          if (!isStrcmp && !isMemcmp && !isStrncmp && !isStrcasecmp && !isStrncasecmp)
+                           FT->getReturnType()->isIntegerTy(32) &&
+                           FT->getParamType(0) == FT->getParamType(1) &&
+                           FT->getParamType(0) ==
+                               IntegerType::getInt8PtrTy(M.getContext()) &&
+                           FT->getParamType(2)->isIntegerTy();
+
+          if (!isStrcmp && !isMemcmp && !isStrncmp && !isStrcasecmp &&
+              !isStrncasecmp)
             continue;
 
           /* is a str{n,}{case,}cmp/memcmp, check if we have
            * str{case,}cmp(x, "const") or str{case,}cmp("const", x)
            * strn{case,}cmp(x, "const", ..) or strn{case,}cmp("const", x, ..)
            * memcmp(x, "const", ..) or memcmp("const", x, ..) */
-          Value *Str1P = callInst->getArgOperand(0), *Str2P = callInst->getArgOperand(1);
+          Value *Str1P = callInst->getArgOperand(0),
+                *Str2P = callInst->getArgOperand(1);
           StringRef Str1, Str2;
-          bool HasStr1 = getConstantStringInfo(Str1P, Str1);
-          bool HasStr2 = getConstantStringInfo(Str2P, Str2);
+          bool      HasStr1 = getConstantStringInfo(Str1P, Str1);
+          bool      HasStr2 = getConstantStringInfo(Str2P, Str2);
 
           /* handle cases of one string is const, one string is variable */
-          if (!(HasStr1 ^ HasStr2))
-            continue;
+          if (!(HasStr1 ^ HasStr2)) continue;
 
           if (isMemcmp || isStrncmp || isStrncasecmp) {
+
             /* check if third operand is a constant integer
              * strlen("constStr") and sizeof() are treated as constant */
-            Value *op2 = callInst->getArgOperand(2);
-            ConstantInt* ilen = dyn_cast<ConstantInt>(op2);
-            if (!ilen)
-              continue;
-            /* final precaution: if size of compare is larger than constant string skip it*/
-            uint64_t literalLength = HasStr1 ? GetStringLength(Str1P) : GetStringLength(Str2P);
-            if (literalLength < ilen->getZExtValue())
-              continue;
+            Value *      op2 = callInst->getArgOperand(2);
+            ConstantInt *ilen = dyn_cast<ConstantInt>(op2);
+            if (!ilen) continue;
+            /* final precaution: if size of compare is larger than constant
+             * string skip it*/
+            uint64_t literalLength =
+                HasStr1 ? GetStringLength(Str1P) : GetStringLength(Str2P);
+            if (literalLength < ilen->getZExtValue()) continue;
+
           }
 
           calls.push_back(callInst);
+
         }
+
       }
+
     }
+
   }
 
-  if (!calls.size())
-    return false;
-  errs() << "Replacing " << calls.size() << " calls to strcmp/memcmp/strncmp/strcasecmp/strncasecmp\n";
+  if (!calls.size()) return false;
+  errs() << "Replacing " << calls.size()
+         << " calls to strcmp/memcmp/strncmp/strcasecmp/strncasecmp\n";
 
-  for (auto &callInst: calls) {
+  for (auto &callInst : calls) {
 
-    Value *Str1P = callInst->getArgOperand(0), *Str2P = callInst->getArgOperand(1);
-    StringRef Str1, Str2, ConstStr;
+    Value *Str1P = callInst->getArgOperand(0),
+          *Str2P = callInst->getArgOperand(1);
+    StringRef   Str1, Str2, ConstStr;
     std::string TmpConstStr;
-    Value *VarStr;
-    bool HasStr1 = getConstantStringInfo(Str1P, Str1);
+    Value *     VarStr;
+    bool        HasStr1 = getConstantStringInfo(Str1P, Str1);
     getConstantStringInfo(Str2P, Str2);
     uint64_t constLen, sizedLen;
-    bool isMemcmp          = !callInst->getCalledFunction()->getName().compare(StringRef("memcmp"));
-    bool isSizedcmp        = isMemcmp
-		          || !callInst->getCalledFunction()->getName().compare(StringRef("strncmp"))
-		          || !callInst->getCalledFunction()->getName().compare(StringRef("strncasecmp"));
-    bool isCaseInsensitive = !callInst->getCalledFunction()->getName().compare(StringRef("strcasecmp"))
-		          || !callInst->getCalledFunction()->getName().compare(StringRef("strncasecmp"));
+    bool     isMemcmp =
+        !callInst->getCalledFunction()->getName().compare(StringRef("memcmp"));
+    bool isSizedcmp = isMemcmp ||
+                      !callInst->getCalledFunction()->getName().compare(
+                          StringRef("strncmp")) ||
+                      !callInst->getCalledFunction()->getName().compare(
+                          StringRef("strncasecmp"));
+    bool isCaseInsensitive = !callInst->getCalledFunction()->getName().compare(
+                                 StringRef("strcasecmp")) ||
+                             !callInst->getCalledFunction()->getName().compare(
+                                 StringRef("strncasecmp"));
 
     if (isSizedcmp) {
-      Value *op2 = callInst->getArgOperand(2);
-      ConstantInt* ilen = dyn_cast<ConstantInt>(op2);
+
+      Value *      op2 = callInst->getArgOperand(2);
+      ConstantInt *ilen = dyn_cast<ConstantInt>(op2);
       sizedLen = ilen->getZExtValue();
+
     }
 
     if (HasStr1) {
+
       TmpConstStr = Str1.str();
       VarStr = Str2P;
       constLen = isMemcmp ? sizedLen : GetStringLength(Str1P);
-    }
-    else {
+
+    } else {
+
       TmpConstStr = Str2.str();
       VarStr = Str1P;
       constLen = isMemcmp ? sizedLen : GetStringLength(Str2P);
+
     }
 
     /* properly handle zero terminated C strings by adding the terminating 0 to
      * the StringRef (in comparison to std::string a StringRef has built-in
      * runtime bounds checking, which makes debugging easier) */
-    TmpConstStr.append("\0", 1); ConstStr = StringRef(TmpConstStr);
+    TmpConstStr.append("\0", 1);
+    ConstStr = StringRef(TmpConstStr);
 
-    if (isSizedcmp && constLen > sizedLen) {
-      constLen = sizedLen;
-    }
+    if (isSizedcmp && constLen > sizedLen) { constLen = sizedLen; }
 
-    errs() << callInst->getCalledFunction()->getName() << ": len " << constLen << ": " << ConstStr << "\n";
+    errs() << callInst->getCalledFunction()->getName() << ": len " << constLen
+           << ": " << ConstStr << "\n";
 
     /* split before the call instruction */
     BasicBlock *bb = callInst->getParent();
     BasicBlock *end_bb = bb->splitBasicBlock(BasicBlock::iterator(callInst));
-    BasicBlock *next_bb =  BasicBlock::Create(C, "cmp_added", end_bb->getParent(), end_bb);
+    BasicBlock *next_bb =
+        BasicBlock::Create(C, "cmp_added", end_bb->getParent(), end_bb);
     BranchInst::Create(end_bb, next_bb);
     PHINode *PN = PHINode::Create(Int32Ty, constLen + 1, "cmp_phi");
 
@@ -249,71 +283,81 @@ bool CompareTransform::transformCmps(Module &M, const bool processStrcmp, const
 
       char c = isCaseInsensitive ? tolower(ConstStr[i]) : ConstStr[i];
 
-
       BasicBlock::iterator IP = next_bb->getFirstInsertionPt();
-      IRBuilder<> IRB(&*IP);
+      IRBuilder<>          IRB(&*IP);
 
-      Value* v = ConstantInt::get(Int64Ty, i);
-      Value *ele  = IRB.CreateInBoundsGEP(VarStr, v, "empty");
+      Value *v = ConstantInt::get(Int64Ty, i);
+      Value *ele = IRB.CreateInBoundsGEP(VarStr, v, "empty");
       Value *load = IRB.CreateLoad(ele);
       if (isCaseInsensitive) {
+
         // load >= 'A' && load <= 'Z' ? load | 0x020 : load
         std::vector<Value *> args;
         args.push_back(load);
         load = IRB.CreateCall(tolowerFn, args, "tmp");
         load = IRB.CreateTrunc(load, Int8Ty);
+
       }
+
       Value *isub;
       if (HasStr1)
         isub = IRB.CreateSub(ConstantInt::get(Int8Ty, c), load);
       else
         isub = IRB.CreateSub(load, ConstantInt::get(Int8Ty, c));
 
-      Value *sext = IRB.CreateSExt(isub, Int32Ty); 
+      Value *sext = IRB.CreateSExt(isub, Int32Ty);
       PN->addIncoming(sext, cur_bb);
 
-
       if (i < constLen - 1) {
-        next_bb =  BasicBlock::Create(C, "cmp_added", end_bb->getParent(), end_bb);
+
+        next_bb =
+            BasicBlock::Create(C, "cmp_added", end_bb->getParent(), end_bb);
         BranchInst::Create(end_bb, next_bb);
 
         Value *icmp = IRB.CreateICmpEQ(isub, ConstantInt::get(Int8Ty, 0));
         IRB.CreateCondBr(icmp, next_bb, end_bb);
         cur_bb->getTerminator()->eraseFromParent();
+
       } else {
-        //IRB.CreateBr(end_bb);
+
+        // IRB.CreateBr(end_bb);
+
       }
 
-      //add offset to varstr
-      //create load
-      //create signed isub
-      //create icmp
-      //create jcc
-      //create next_bb
+      // add offset to varstr
+      // create load
+      // create signed isub
+      // create icmp
+      // create jcc
+      // create next_bb
+
     }
 
     /* since the call is the first instruction of the bb it is safe to
      * replace it with a phi instruction */
     BasicBlock::iterator ii(callInst);
     ReplaceInstWithInst(callInst->getParent()->getInstList(), ii, PN);
-  }
 
+  }
 
   return true;
+
 }
 
 bool CompareTransform::runOnModule(Module &M) {
 
   if (getenv("AFL_QUIET") == NULL)
-    llvm::errs() << "Running compare-transform-pass by laf.intel@gmail.com, extended by heiko@hexco.de\n";
+    llvm::errs() << "Running compare-transform-pass by laf.intel@gmail.com, "
+                    "extended by heiko@hexco.de\n";
   transformCmps(M, true, true, true, true, true);
   verifyModule(M);
 
   return true;
+
 }
 
 static void registerCompTransPass(const PassManagerBuilder &,
-                            legacy::PassManagerBase &PM) {
+                                  legacy::PassManagerBase &PM) {
 
   auto p = new CompareTransform();
   PM.add(p);
diff --git a/llvm_mode/split-compares-pass.so.cc b/llvm_mode/split-compares-pass.so.cc
index a74b60fa..1e9d6542 100644
--- a/llvm_mode/split-compares-pass.so.cc
+++ b/llvm_mode/split-compares-pass.so.cc
@@ -27,117 +27,126 @@
 using namespace llvm;
 
 namespace {
-  class SplitComparesTransform : public ModulePass {
-    public:
-      static char ID;
-      SplitComparesTransform() : ModulePass(ID) {}
 
-      bool runOnModule(Module &M) override;
+class SplitComparesTransform : public ModulePass {
+
+ public:
+  static char ID;
+  SplitComparesTransform() : ModulePass(ID) {
+
+  }
+
+  bool runOnModule(Module &M) override;
 #if LLVM_VERSION_MAJOR >= 4
-      StringRef getPassName() const override {
+  StringRef getPassName() const override {
+
 #else
-      const char * getPassName() const override {
+  const char *getPassName() const override {
+
 #endif
-        return "simplifies and splits ICMP instructions";
-      }
-    private:
-      bool splitCompares(Module &M, unsigned bitw);
-      bool simplifyCompares(Module &M);
-      bool simplifySignedness(Module &M);
+    return "simplifies and splits ICMP instructions";
 
-  };
-}
+  }
+
+ private:
+  bool splitCompares(Module &M, unsigned bitw);
+  bool simplifyCompares(Module &M);
+  bool simplifySignedness(Module &M);
+
+};
+
+}  // namespace
 
 char SplitComparesTransform::ID = 0;
 
-/* This function splits ICMP instructions with xGE or xLE predicates into two 
+/* This function splits ICMP instructions with xGE or xLE predicates into two
  * ICMP instructions with predicate xGT or xLT and EQ */
 bool SplitComparesTransform::simplifyCompares(Module &M) {
-  LLVMContext &C = M.getContext();
-  std::vector<Instruction*> icomps;
-  IntegerType *Int1Ty = IntegerType::getInt1Ty(C);
+
+  LLVMContext &              C = M.getContext();
+  std::vector<Instruction *> icomps;
+  IntegerType *              Int1Ty = IntegerType::getInt1Ty(C);
 
   /* iterate over all functions, bbs and instruction and add
    * all integer comparisons with >= and <= predicates to the icomps vector */
   for (auto &F : M) {
+
     for (auto &BB : F) {
-      for (auto &IN: BB) {
-        CmpInst* selectcmpInst = nullptr;
+
+      for (auto &IN : BB) {
+
+        CmpInst *selectcmpInst = nullptr;
 
         if ((selectcmpInst = dyn_cast<CmpInst>(&IN))) {
 
           if (selectcmpInst->getPredicate() != CmpInst::ICMP_UGE &&
               selectcmpInst->getPredicate() != CmpInst::ICMP_SGE &&
               selectcmpInst->getPredicate() != CmpInst::ICMP_ULE &&
-              selectcmpInst->getPredicate() != CmpInst::ICMP_SLE ) {
+              selectcmpInst->getPredicate() != CmpInst::ICMP_SLE) {
+
             continue;
+
           }
 
           auto op0 = selectcmpInst->getOperand(0);
           auto op1 = selectcmpInst->getOperand(1);
 
-          IntegerType* intTyOp0 = dyn_cast<IntegerType>(op0->getType());
-          IntegerType* intTyOp1 = dyn_cast<IntegerType>(op1->getType());
+          IntegerType *intTyOp0 = dyn_cast<IntegerType>(op0->getType());
+          IntegerType *intTyOp1 = dyn_cast<IntegerType>(op1->getType());
 
           /* this is probably not needed but we do it anyway */
-          if (!intTyOp0 || !intTyOp1) {
-            continue;
-          }
+          if (!intTyOp0 || !intTyOp1) { continue; }
 
           icomps.push_back(selectcmpInst);
+
         }
+
       }
+
     }
-  }
 
-  if (!icomps.size()) {
-    return false;
   }
 
+  if (!icomps.size()) { return false; }
+
+  for (auto &IcmpInst : icomps) {
 
-  for (auto &IcmpInst: icomps) {
-    BasicBlock* bb = IcmpInst->getParent();
+    BasicBlock *bb = IcmpInst->getParent();
 
     auto op0 = IcmpInst->getOperand(0);
     auto op1 = IcmpInst->getOperand(1);
 
     /* find out what the new predicate is going to be */
-    auto pred = dyn_cast<CmpInst>(IcmpInst)->getPredicate();
+    auto               pred = dyn_cast<CmpInst>(IcmpInst)->getPredicate();
     CmpInst::Predicate new_pred;
-    switch(pred) {
-      case CmpInst::ICMP_UGE:
-        new_pred = CmpInst::ICMP_UGT;
-        break;
-      case CmpInst::ICMP_SGE:
-        new_pred = CmpInst::ICMP_SGT;
-        break;
-      case CmpInst::ICMP_ULE:
-        new_pred = CmpInst::ICMP_ULT;
-        break;
-      case CmpInst::ICMP_SLE:
-        new_pred = CmpInst::ICMP_SLT;
-        break;
-      default: // keep the compiler happy
+    switch (pred) {
+
+      case CmpInst::ICMP_UGE: new_pred = CmpInst::ICMP_UGT; break;
+      case CmpInst::ICMP_SGE: new_pred = CmpInst::ICMP_SGT; break;
+      case CmpInst::ICMP_ULE: new_pred = CmpInst::ICMP_ULT; break;
+      case CmpInst::ICMP_SLE: new_pred = CmpInst::ICMP_SLT; break;
+      default:  // keep the compiler happy
         continue;
+
     }
 
     /* split before the icmp instruction */
-    BasicBlock* end_bb = bb->splitBasicBlock(BasicBlock::iterator(IcmpInst));
+    BasicBlock *end_bb = bb->splitBasicBlock(BasicBlock::iterator(IcmpInst));
 
     /* the old bb now contains a unconditional jump to the new one (end_bb)
      * we need to delete it later */
 
     /* create the ICMP instruction with new_pred and add it to the old basic
      * block bb it is now at the position where the old IcmpInst was */
-    Instruction* icmp_np;
+    Instruction *icmp_np;
     icmp_np = CmpInst::Create(Instruction::ICmp, new_pred, op0, op1);
     bb->getInstList().insert(bb->getTerminator()->getIterator(), icmp_np);
 
     /* create a new basic block which holds the new EQ icmp */
     Instruction *icmp_eq;
     /* insert middle_bb before end_bb */
-    BasicBlock* middle_bb =  BasicBlock::Create(C, "injected",
-      end_bb->getParent(), end_bb);
+    BasicBlock *middle_bb =
+        BasicBlock::Create(C, "injected", end_bb->getParent(), end_bb);
     icmp_eq = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, op0, op1);
     middle_bb->getInstList().push_back(icmp_eq);
     /* add an unconditional branch to the end of middle_bb with destination
@@ -150,7 +159,6 @@ bool SplitComparesTransform::simplifyCompares(Module &M) {
     BranchInst::Create(end_bb, middle_bb, icmp_np, bb);
     term->eraseFromParent();
 
-
     /* replace the old IcmpInst (which is the first inst in end_bb) with a PHI
      * inst to wire up the loose ends */
     PHINode *PN = PHINode::Create(Int1Ty, 2, "");
@@ -162,118 +170,139 @@ bool SplitComparesTransform::simplifyCompares(Module &M) {
     /* replace the old IcmpInst with our new and shiny PHI inst */
     BasicBlock::iterator ii(IcmpInst);
     ReplaceInstWithInst(IcmpInst->getParent()->getInstList(), ii, PN);
+
   }
 
   return true;
+
 }
 
 /* this function transforms signed compares to equivalent unsigned compares */
 bool SplitComparesTransform::simplifySignedness(Module &M) {
-  LLVMContext &C = M.getContext();
-  std::vector<Instruction*> icomps;
-  IntegerType *Int1Ty = IntegerType::getInt1Ty(C);
+
+  LLVMContext &              C = M.getContext();
+  std::vector<Instruction *> icomps;
+  IntegerType *              Int1Ty = IntegerType::getInt1Ty(C);
 
   /* iterate over all functions, bbs and instruction and add
    * all signed compares to icomps vector */
   for (auto &F : M) {
+
     for (auto &BB : F) {
-      for(auto &IN: BB) {
-        CmpInst* selectcmpInst = nullptr;
+
+      for (auto &IN : BB) {
+
+        CmpInst *selectcmpInst = nullptr;
 
         if ((selectcmpInst = dyn_cast<CmpInst>(&IN))) {
 
           if (selectcmpInst->getPredicate() != CmpInst::ICMP_SGT &&
-             selectcmpInst->getPredicate() != CmpInst::ICMP_SLT
-             ) {
+              selectcmpInst->getPredicate() != CmpInst::ICMP_SLT) {
+
             continue;
+
           }
 
           auto op0 = selectcmpInst->getOperand(0);
           auto op1 = selectcmpInst->getOperand(1);
 
-          IntegerType* intTyOp0 = dyn_cast<IntegerType>(op0->getType());
-          IntegerType* intTyOp1 = dyn_cast<IntegerType>(op1->getType());
+          IntegerType *intTyOp0 = dyn_cast<IntegerType>(op0->getType());
+          IntegerType *intTyOp1 = dyn_cast<IntegerType>(op1->getType());
 
           /* see above */
-          if (!intTyOp0 || !intTyOp1) {
-            continue;
-          }
+          if (!intTyOp0 || !intTyOp1) { continue; }
 
           /* i think this is not possible but to lazy to look it up */
-          if (intTyOp0->getBitWidth() != intTyOp1->getBitWidth()) {
-            continue;
-          }
+          if (intTyOp0->getBitWidth() != intTyOp1->getBitWidth()) { continue; }
 
           icomps.push_back(selectcmpInst);
+
         }
+
       }
+
     }
-  }
 
-  if (!icomps.size()) {
-    return false;
   }
 
-  for (auto &IcmpInst: icomps) {
-    BasicBlock* bb = IcmpInst->getParent();
+  if (!icomps.size()) { return false; }
+
+  for (auto &IcmpInst : icomps) {
+
+    BasicBlock *bb = IcmpInst->getParent();
 
     auto op0 = IcmpInst->getOperand(0);
     auto op1 = IcmpInst->getOperand(1);
 
-    IntegerType* intTyOp0 = dyn_cast<IntegerType>(op0->getType());
-    unsigned bitw = intTyOp0->getBitWidth();
+    IntegerType *intTyOp0 = dyn_cast<IntegerType>(op0->getType());
+    unsigned     bitw = intTyOp0->getBitWidth();
     IntegerType *IntType = IntegerType::get(C, bitw);
 
-
     /* get the new predicate */
-    auto pred = dyn_cast<CmpInst>(IcmpInst)->getPredicate();
+    auto               pred = dyn_cast<CmpInst>(IcmpInst)->getPredicate();
     CmpInst::Predicate new_pred;
     if (pred == CmpInst::ICMP_SGT) {
+
       new_pred = CmpInst::ICMP_UGT;
+
     } else {
+
       new_pred = CmpInst::ICMP_ULT;
+
     }
 
-    BasicBlock* end_bb = bb->splitBasicBlock(BasicBlock::iterator(IcmpInst));
+    BasicBlock *end_bb = bb->splitBasicBlock(BasicBlock::iterator(IcmpInst));
 
     /* create a 1 bit compare for the sign bit. to do this shift and trunc
      * the original operands so only the first bit remains.*/
     Instruction *s_op0, *t_op0, *s_op1, *t_op1, *icmp_sign_bit;
 
-    s_op0 = BinaryOperator::Create(Instruction::LShr, op0, ConstantInt::get(IntType, bitw - 1));
+    s_op0 = BinaryOperator::Create(Instruction::LShr, op0,
+                                   ConstantInt::get(IntType, bitw - 1));
     bb->getInstList().insert(bb->getTerminator()->getIterator(), s_op0);
     t_op0 = new TruncInst(s_op0, Int1Ty);
     bb->getInstList().insert(bb->getTerminator()->getIterator(), t_op0);
 
-    s_op1 = BinaryOperator::Create(Instruction::LShr, op1, ConstantInt::get(IntType, bitw - 1));
+    s_op1 = BinaryOperator::Create(Instruction::LShr, op1,
+                                   ConstantInt::get(IntType, bitw - 1));
     bb->getInstList().insert(bb->getTerminator()->getIterator(), s_op1);
     t_op1 = new TruncInst(s_op1, Int1Ty);
     bb->getInstList().insert(bb->getTerminator()->getIterator(), t_op1);
 
     /* compare of the sign bits */
-    icmp_sign_bit = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, t_op0, t_op1);
+    icmp_sign_bit =
+        CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, t_op0, t_op1);
     bb->getInstList().insert(bb->getTerminator()->getIterator(), icmp_sign_bit);
 
     /* create a new basic block which is executed if the signedness bit is
-     * different */ 
+     * different */
     Instruction *icmp_inv_sig_cmp;
-    BasicBlock* sign_bb = BasicBlock::Create(C, "sign", end_bb->getParent(), end_bb);
+    BasicBlock * sign_bb =
+        BasicBlock::Create(C, "sign", end_bb->getParent(), end_bb);
     if (pred == CmpInst::ICMP_SGT) {
+
       /* if we check for > and the op0 positive and op1 negative then the final
        * result is true. if op0 negative and op1 pos, the cmp must result
        * in false
        */
-      icmp_inv_sig_cmp = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_ULT, t_op0, t_op1);
+      icmp_inv_sig_cmp =
+          CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_ULT, t_op0, t_op1);
+
     } else {
+
       /* just the inverse of the above statement */
-      icmp_inv_sig_cmp = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_UGT, t_op0, t_op1);
+      icmp_inv_sig_cmp =
+          CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_UGT, t_op0, t_op1);
+
     }
+
     sign_bb->getInstList().push_back(icmp_inv_sig_cmp);
     BranchInst::Create(end_bb, sign_bb);
 
     /* create a new bb which is executed if signedness is equal */
     Instruction *icmp_usign_cmp;
-    BasicBlock* middle_bb =  BasicBlock::Create(C, "injected", end_bb->getParent(), end_bb);
+    BasicBlock * middle_bb =
+        BasicBlock::Create(C, "injected", end_bb->getParent(), end_bb);
     /* we can do a normal unsigned compare now */
     icmp_usign_cmp = CmpInst::Create(Instruction::ICmp, new_pred, op0, op1);
     middle_bb->getInstList().push_back(icmp_usign_cmp);
@@ -285,7 +314,6 @@ bool SplitComparesTransform::simplifySignedness(Module &M) {
     BranchInst::Create(middle_bb, sign_bb, icmp_sign_bit, bb);
     term->eraseFromParent();
 
-
     PHINode *PN = PHINode::Create(Int1Ty, 2, "");
 
     PN->addIncoming(icmp_usign_cmp, middle_bb);
@@ -293,91 +321,100 @@ bool SplitComparesTransform::simplifySignedness(Module &M) {
 
     BasicBlock::iterator ii(IcmpInst);
     ReplaceInstWithInst(IcmpInst->getParent()->getInstList(), ii, PN);
+
   }
 
   return true;
+
 }
 
 /* splits icmps of size bitw into two nested icmps with bitw/2 size each */
 bool SplitComparesTransform::splitCompares(Module &M, unsigned bitw) {
+
   LLVMContext &C = M.getContext();
 
   IntegerType *Int1Ty = IntegerType::getInt1Ty(C);
   IntegerType *OldIntType = IntegerType::get(C, bitw);
   IntegerType *NewIntType = IntegerType::get(C, bitw / 2);
 
-  std::vector<Instruction*> icomps;
+  std::vector<Instruction *> icomps;
 
-  if (bitw % 2) {
-    return false;
-  }
+  if (bitw % 2) { return false; }
 
   /* not supported yet */
-  if (bitw > 64) {
-    return false;
-  }
+  if (bitw > 64) { return false; }
 
-  /* get all EQ, NE, UGT, and ULT icmps of width bitw. if the other two 
+  /* get all EQ, NE, UGT, and ULT icmps of width bitw. if the other two
    * unctions were executed only these four predicates should exist */
   for (auto &F : M) {
+
     for (auto &BB : F) {
-      for(auto &IN: BB) {
-        CmpInst* selectcmpInst = nullptr;
+
+      for (auto &IN : BB) {
+
+        CmpInst *selectcmpInst = nullptr;
 
         if ((selectcmpInst = dyn_cast<CmpInst>(&IN))) {
 
-          if(selectcmpInst->getPredicate() != CmpInst::ICMP_EQ &&
-             selectcmpInst->getPredicate() != CmpInst::ICMP_NE &&
-             selectcmpInst->getPredicate() != CmpInst::ICMP_UGT &&
-             selectcmpInst->getPredicate() != CmpInst::ICMP_ULT
-             ) {
+          if (selectcmpInst->getPredicate() != CmpInst::ICMP_EQ &&
+              selectcmpInst->getPredicate() != CmpInst::ICMP_NE &&
+              selectcmpInst->getPredicate() != CmpInst::ICMP_UGT &&
+              selectcmpInst->getPredicate() != CmpInst::ICMP_ULT) {
+
             continue;
+
           }
 
           auto op0 = selectcmpInst->getOperand(0);
           auto op1 = selectcmpInst->getOperand(1);
 
-          IntegerType* intTyOp0 = dyn_cast<IntegerType>(op0->getType());
-          IntegerType* intTyOp1 = dyn_cast<IntegerType>(op1->getType());
+          IntegerType *intTyOp0 = dyn_cast<IntegerType>(op0->getType());
+          IntegerType *intTyOp1 = dyn_cast<IntegerType>(op1->getType());
 
-          if (!intTyOp0 || !intTyOp1) {
-            continue;
-          }
+          if (!intTyOp0 || !intTyOp1) { continue; }
 
           /* check if the bitwidths are the one we are looking for */
-          if (intTyOp0->getBitWidth() != bitw || intTyOp1->getBitWidth() != bitw) {
+          if (intTyOp0->getBitWidth() != bitw ||
+              intTyOp1->getBitWidth() != bitw) {
+
             continue;
+
           }
 
           icomps.push_back(selectcmpInst);
+
         }
+
       }
+
     }
-  }
 
-  if (!icomps.size()) {
-    return false;
   }
 
-  for (auto &IcmpInst: icomps) {
-    BasicBlock* bb = IcmpInst->getParent();
+  if (!icomps.size()) { return false; }
+
+  for (auto &IcmpInst : icomps) {
+
+    BasicBlock *bb = IcmpInst->getParent();
 
     auto op0 = IcmpInst->getOperand(0);
     auto op1 = IcmpInst->getOperand(1);
 
     auto pred = dyn_cast<CmpInst>(IcmpInst)->getPredicate();
 
-    BasicBlock* end_bb = bb->splitBasicBlock(BasicBlock::iterator(IcmpInst));
+    BasicBlock *end_bb = bb->splitBasicBlock(BasicBlock::iterator(IcmpInst));
 
     /* create the comparison of the top halves of the original operands */
     Instruction *s_op0, *op0_high, *s_op1, *op1_high, *icmp_high;
 
-    s_op0 = BinaryOperator::Create(Instruction::LShr, op0, ConstantInt::get(OldIntType, bitw / 2));
+    s_op0 = BinaryOperator::Create(Instruction::LShr, op0,
+                                   ConstantInt::get(OldIntType, bitw / 2));
     bb->getInstList().insert(bb->getTerminator()->getIterator(), s_op0);
     op0_high = new TruncInst(s_op0, NewIntType);
     bb->getInstList().insert(bb->getTerminator()->getIterator(), op0_high);
 
-    s_op1 = BinaryOperator::Create(Instruction::LShr, op1, ConstantInt::get(OldIntType, bitw / 2));
+    s_op1 = BinaryOperator::Create(Instruction::LShr, op1,
+                                   ConstantInt::get(OldIntType, bitw / 2));
     bb->getInstList().insert(bb->getTerminator()->getIterator(), s_op1);
     op1_high = new TruncInst(s_op1, NewIntType);
     bb->getInstList().insert(bb->getTerminator()->getIterator(), op1_high);
@@ -387,11 +424,13 @@ bool SplitComparesTransform::splitCompares(Module &M, unsigned bitw) {
 
     /* now we have to destinguish between == != and > < */
     if (pred == CmpInst::ICMP_EQ || pred == CmpInst::ICMP_NE) {
+
       /* transformation for == and != icmps */
 
       /* create a compare for the lower half of the original operands */
       Instruction *op0_low, *op1_low, *icmp_low;
-      BasicBlock* cmp_low_bb = BasicBlock::Create(C, "injected", end_bb->getParent(), end_bb);
+      BasicBlock * cmp_low_bb =
+          BasicBlock::Create(C, "injected", end_bb->getParent(), end_bb);
 
       op0_low = new TruncInst(op0, NewIntType);
       cmp_low_bb->getInstList().push_back(op0_low);
@@ -407,21 +446,30 @@ bool SplitComparesTransform::splitCompares(Module &M, unsigned bitw) {
        * the comparison */
       auto term = bb->getTerminator();
       if (pred == CmpInst::ICMP_EQ) {
+
         BranchInst::Create(cmp_low_bb, end_bb, icmp_high, bb);
+
       } else {
+
         /* CmpInst::ICMP_NE */
         BranchInst::Create(end_bb, cmp_low_bb, icmp_high, bb);
+
       }
+
       term->eraseFromParent();
 
       /* create the PHI and connect the edges accordingly */
       PHINode *PN = PHINode::Create(Int1Ty, 2, "");
       PN->addIncoming(icmp_low, cmp_low_bb);
       if (pred == CmpInst::ICMP_EQ) {
+
         PN->addIncoming(ConstantInt::get(Int1Ty, 0), bb);
+
       } else {
+
         /* CmpInst::ICMP_NE */
         PN->addIncoming(ConstantInt::get(Int1Ty, 1), bb);
+
       }
 
       /* replace the old icmp with the new PHI */
@@ -429,19 +477,28 @@ bool SplitComparesTransform::splitCompares(Module &M, unsigned bitw) {
       ReplaceInstWithInst(IcmpInst->getParent()->getInstList(), ii, PN);
 
     } else {
+
       /* CmpInst::ICMP_UGT and CmpInst::ICMP_ULT */
       /* transformations for < and > */
 
-      /* create a basic block which checks for the inverse predicate. 
+      /* create a basic block which checks for the inverse predicate.
        * if this is true we can go to the end if not we have to got to the
        * bb which checks the lower half of the operands */
       Instruction *icmp_inv_cmp, *op0_low, *op1_low, *icmp_low;
-      BasicBlock* inv_cmp_bb = BasicBlock::Create(C, "inv_cmp", end_bb->getParent(), end_bb);
+      BasicBlock * inv_cmp_bb =
+          BasicBlock::Create(C, "inv_cmp", end_bb->getParent(), end_bb);
       if (pred == CmpInst::ICMP_UGT) {
-        icmp_inv_cmp = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_ULT, op0_high, op1_high);
+
+        icmp_inv_cmp = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_ULT,
+                                       op0_high, op1_high);
+
       } else {
-        icmp_inv_cmp = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_UGT, op0_high, op1_high);
+
+        icmp_inv_cmp = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_UGT,
+                                       op0_high, op1_high);
+
       }
+
       inv_cmp_bb->getInstList().push_back(icmp_inv_cmp);
 
       auto term = bb->getTerminator();
@@ -449,7 +506,8 @@ bool SplitComparesTransform::splitCompares(Module &M, unsigned bitw) {
       BranchInst::Create(end_bb, inv_cmp_bb, icmp_high, bb);
 
       /* create a bb which handles the cmp of the lower halves */
-      BasicBlock* cmp_low_bb = BasicBlock::Create(C, "injected", end_bb->getParent(), end_bb);
+      BasicBlock *cmp_low_bb =
+          BasicBlock::Create(C, "injected", end_bb->getParent(), end_bb);
       op0_low = new TruncInst(op0, NewIntType);
       cmp_low_bb->getInstList().push_back(op0_low);
       op1_low = new TruncInst(op1, NewIntType);
@@ -468,57 +526,64 @@ bool SplitComparesTransform::splitCompares(Module &M, unsigned bitw) {
 
       BasicBlock::iterator ii(IcmpInst);
       ReplaceInstWithInst(IcmpInst->getParent()->getInstList(), ii, PN);
+
     }
+
   }
-  return  true;
+
+  return true;
+
 }
 
 bool SplitComparesTransform::runOnModule(Module &M) {
+
   int bitw = 64;
 
-  char* bitw_env = getenv("LAF_SPLIT_COMPARES_BITW");
-  if (!bitw_env)
-    bitw_env = getenv("AFL_LLVM_LAF_SPLIT_COMPARES_BITW");
-  if (bitw_env) {
-    bitw = atoi(bitw_env);
-  }
+  char *bitw_env = getenv("LAF_SPLIT_COMPARES_BITW");
+  if (!bitw_env) bitw_env = getenv("AFL_LLVM_LAF_SPLIT_COMPARES_BITW");
+  if (bitw_env) { bitw = atoi(bitw_env); }
 
   simplifyCompares(M);
 
   simplifySignedness(M);
 
   if (getenv("AFL_QUIET") == NULL)
-    errs() << "Split-compare-pass by laf.intel@gmail.com\n"; 
+    errs() << "Split-compare-pass by laf.intel@gmail.com\n";
 
   switch (bitw) {
+
     case 64:
-      errs() << "Running split-compare-pass " << 64 << "\n"; 
+      errs() << "Running split-compare-pass " << 64 << "\n";
       splitCompares(M, 64);
 
-      [[clang::fallthrough]]; /*FALLTHRU*/ /* FALLTHROUGH */
+      [[clang::fallthrough]]; /*FALLTHRU*/                   /* FALLTHROUGH */
     case 32:
-      errs() << "Running split-compare-pass " << 32 << "\n"; 
+      errs() << "Running split-compare-pass " << 32 << "\n";
       splitCompares(M, 32);
 
-      [[clang::fallthrough]]; /*FALLTHRU*/ /* FALLTHROUGH */
+      [[clang::fallthrough]]; /*FALLTHRU*/                   /* FALLTHROUGH */
     case 16:
-      errs() << "Running split-compare-pass " << 16 << "\n"; 
+      errs() << "Running split-compare-pass " << 16 << "\n";
       splitCompares(M, 16);
       break;
 
     default:
-      errs() << "NOT Running split-compare-pass \n"; 
+      errs() << "NOT Running split-compare-pass \n";
       return false;
       break;
+
   }
 
   verifyModule(M);
   return true;
+
 }
 
 static void registerSplitComparesPass(const PassManagerBuilder &,
-                         legacy::PassManagerBase &PM) {
+                                      legacy::PassManagerBase &PM) {
+
   PM.add(new SplitComparesTransform());
+
 }
 
 static RegisterStandardPasses RegisterSplitComparesPass(
@@ -526,3 +591,4 @@ static RegisterStandardPasses RegisterSplitComparesPass(
 
 static RegisterStandardPasses RegisterSplitComparesTransPass0(
     PassManagerBuilder::EP_EnabledOnOptLevel0, registerSplitComparesPass);
+
diff --git a/llvm_mode/split-switches-pass.so.cc b/llvm_mode/split-switches-pass.so.cc
index 1ace3185..2743a71a 100644
--- a/llvm_mode/split-switches-pass.so.cc
+++ b/llvm_mode/split-switches-pass.so.cc
@@ -36,54 +36,65 @@ using namespace llvm;
 
 namespace {
 
-  class SplitSwitchesTransform : public ModulePass {
+class SplitSwitchesTransform : public ModulePass {
 
-    public:
-      static char ID;
-      SplitSwitchesTransform() : ModulePass(ID) {
-      } 
+ public:
+  static char ID;
+  SplitSwitchesTransform() : ModulePass(ID) {
 
-      bool runOnModule(Module &M) override;
+  }
+
+  bool runOnModule(Module &M) override;
 
 #if LLVM_VERSION_MAJOR >= 4
-      StringRef getPassName() const override {
+  StringRef getPassName() const override {
+
 #else
-      const char * getPassName() const override {
+  const char *getPassName() const override {
+
 #endif
-        return "splits switch constructs";
-      }
-      struct CaseExpr {
-        ConstantInt* Val;
-        BasicBlock* BB;
-
-        CaseExpr(ConstantInt *val = nullptr, BasicBlock *bb = nullptr) :
-          Val(val), BB(bb) { }
-      };
-
-    typedef std::vector<CaseExpr> CaseVector;
-
-    private:
-      bool splitSwitches(Module &M);
-      bool transformCmps(Module &M, const bool processStrcmp, const bool processMemcmp);
-      BasicBlock* switchConvert(CaseVector Cases, std::vector<bool> bytesChecked,
-                                BasicBlock* OrigBlock, BasicBlock* NewDefault,
-                                Value* Val, unsigned level);
+    return "splits switch constructs";
+
+  }
+
+  struct CaseExpr {
+
+    ConstantInt *Val;
+    BasicBlock * BB;
+
+    CaseExpr(ConstantInt *val = nullptr, BasicBlock *bb = nullptr)
+        : Val(val), BB(bb) {
+
+    }
+
   };
 
-}
+  typedef std::vector<CaseExpr> CaseVector;
 
-char SplitSwitchesTransform::ID = 0;
+ private:
+  bool        splitSwitches(Module &M);
+  bool        transformCmps(Module &M, const bool processStrcmp,
+                            const bool processMemcmp);
+  BasicBlock *switchConvert(CaseVector Cases, std::vector<bool> bytesChecked,
+                            BasicBlock *OrigBlock, BasicBlock *NewDefault,
+                            Value *Val, unsigned level);
+
+};
 
+}  // namespace
+
+char SplitSwitchesTransform::ID = 0;
 
 /* switchConvert - Transform simple list of Cases into list of CaseRange's */
-BasicBlock* SplitSwitchesTransform::switchConvert(CaseVector Cases, std::vector<bool> bytesChecked, 
-                                            BasicBlock* OrigBlock, BasicBlock* NewDefault,
-                                            Value* Val, unsigned level) {
-
-  unsigned ValTypeBitWidth = Cases[0].Val->getBitWidth();
-  IntegerType *ValType  = IntegerType::get(OrigBlock->getContext(), ValTypeBitWidth);
-  IntegerType *ByteType = IntegerType::get(OrigBlock->getContext(), 8);
-  unsigned BytesInValue = bytesChecked.size();
+BasicBlock *SplitSwitchesTransform::switchConvert(
+    CaseVector Cases, std::vector<bool> bytesChecked, BasicBlock *OrigBlock,
+    BasicBlock *NewDefault, Value *Val, unsigned level) {
+
+  unsigned     ValTypeBitWidth = Cases[0].Val->getBitWidth();
+  IntegerType *ValType =
+      IntegerType::get(OrigBlock->getContext(), ValTypeBitWidth);
+  IntegerType *        ByteType = IntegerType::get(OrigBlock->getContext(), 8);
+  unsigned             BytesInValue = bytesChecked.size();
   std::vector<uint8_t> setSizes;
   std::vector<std::set<uint8_t>> byteSets(BytesInValue, std::set<uint8_t>());
 
@@ -91,43 +102,54 @@ BasicBlock* SplitSwitchesTransform::switchConvert(CaseVector Cases, std::vector<
 
   /* for each of the possible cases we iterate over all bytes of the values
    * build a set of possible values at each byte position in byteSets */
-  for (CaseExpr& Case: Cases) {
+  for (CaseExpr &Case : Cases) {
+
     for (unsigned i = 0; i < BytesInValue; i++) {
 
-      uint8_t byte = (Case.Val->getZExtValue() >> (i*8)) & 0xFF;
+      uint8_t byte = (Case.Val->getZExtValue() >> (i * 8)) & 0xFF;
       byteSets[i].insert(byte);
+
     }
+
   }
 
   /* find the index of the first byte position that was not yet checked. then
    * save the number of possible values at that byte position */
   unsigned smallestIndex = 0;
   unsigned smallestSize = 257;
-  for(unsigned i = 0; i < byteSets.size(); i++) {
-    if (bytesChecked[i])
-      continue;
+  for (unsigned i = 0; i < byteSets.size(); i++) {
+
+    if (bytesChecked[i]) continue;
     if (byteSets[i].size() < smallestSize) {
+
       smallestIndex = i;
       smallestSize = byteSets[i].size();
+
     }
+
   }
+
   assert(bytesChecked[smallestIndex] == false);
 
   /* there are only smallestSize different bytes at index smallestIndex */
- 
+
   Instruction *Shift, *Trunc;
-  Function* F = OrigBlock->getParent();
-  BasicBlock* NewNode = BasicBlock::Create(Val->getContext(), "NodeBlock", F);
-  Shift = BinaryOperator::Create(Instruction::LShr, Val, ConstantInt::get(ValType, smallestIndex * 8));
+  Function *   F = OrigBlock->getParent();
+  BasicBlock * NewNode = BasicBlock::Create(Val->getContext(), "NodeBlock", F);
+  Shift = BinaryOperator::Create(Instruction::LShr, Val,
+                                 ConstantInt::get(ValType, smallestIndex * 8));
   NewNode->getInstList().push_back(Shift);
 
   if (ValTypeBitWidth > 8) {
+
     Trunc = new TruncInst(Shift, ByteType);
     NewNode->getInstList().push_back(Trunc);
-  }
-  else {
+
+  } else {
+
     /* not necessary to trunc */
     Trunc = Shift;
+
   }
 
   /* this is a trivial case, we can directly check for the byte,
@@ -135,118 +157,155 @@ BasicBlock* SplitSwitchesTransform::switchConvert(CaseVector Cases, std::vector<
    * mark the byte as checked. if this was the last byte to check
    * we can finally execute the block belonging to this case */
 
-
   if (smallestSize == 1) {
+
     uint8_t byte = *(byteSets[smallestIndex].begin());
 
-    /* insert instructions to check whether the value we are switching on is equal to byte */
-    ICmpInst* Comp = new ICmpInst(ICmpInst::ICMP_EQ, Trunc, ConstantInt::get(ByteType, byte), "byteMatch");
+    /* insert instructions to check whether the value we are switching on is
+     * equal to byte */
+    ICmpInst *Comp =
+        new ICmpInst(ICmpInst::ICMP_EQ, Trunc, ConstantInt::get(ByteType, byte),
+                     "byteMatch");
     NewNode->getInstList().push_back(Comp);
 
     bytesChecked[smallestIndex] = true;
-    if (std::all_of(bytesChecked.begin(), bytesChecked.end(), [](bool b){return b;} )) {
+    if (std::all_of(bytesChecked.begin(), bytesChecked.end(),
+                    [](bool b) { return b; })) {
+
       assert(Cases.size() == 1);
       BranchInst::Create(Cases[0].BB, NewDefault, Comp, NewNode);
 
       /* we have to update the phi nodes! */
-      for (BasicBlock::iterator I = Cases[0].BB->begin(); I != Cases[0].BB->end(); ++I) {
-        if (!isa<PHINode>(&*I)) {
-          continue;
-        }
+      for (BasicBlock::iterator I = Cases[0].BB->begin();
+           I != Cases[0].BB->end(); ++I) {
+
+        if (!isa<PHINode>(&*I)) { continue; }
         PHINode *PN = cast<PHINode>(I);
 
         /* Only update the first occurrence. */
         unsigned Idx = 0, E = PN->getNumIncomingValues();
         for (; Idx != E; ++Idx) {
+
           if (PN->getIncomingBlock(Idx) == OrigBlock) {
+
             PN->setIncomingBlock(Idx, NewNode);
             break;
+
           }
+
         }
+
       }
-    }
-    else {
-      BasicBlock* BB = switchConvert(Cases, bytesChecked, OrigBlock, NewDefault, Val, level + 1);
+
+    } else {
+
+      BasicBlock *BB = switchConvert(Cases, bytesChecked, OrigBlock, NewDefault,
+                                     Val, level + 1);
       BranchInst::Create(BB, NewDefault, Comp, NewNode);
+
     }
+
   }
+
   /* there is no byte which we can directly check on, split the tree */
   else {
 
     std::vector<uint8_t> byteVector;
-    std::copy(byteSets[smallestIndex].begin(), byteSets[smallestIndex].end(), std::back_inserter(byteVector));
+    std::copy(byteSets[smallestIndex].begin(), byteSets[smallestIndex].end(),
+              std::back_inserter(byteVector));
     std::sort(byteVector.begin(), byteVector.end());
     uint8_t pivot = byteVector[byteVector.size() / 2];
 
-    /* we already chose to divide the cases based on the value of byte at index smallestIndex
-     * the pivot value determines the threshold for the decicion; if a case value
-     * is smaller at this byte index move it to the LHS vector, otherwise to the RHS vector */
+    /* we already chose to divide the cases based on the value of byte at index
+     * smallestIndex the pivot value determines the threshold for the decicion;
+     * if a case value
+     * is smaller at this byte index move it to the LHS vector, otherwise to the
+     * RHS vector */
 
     CaseVector LHSCases, RHSCases;
 
-    for (CaseExpr& Case: Cases) {
-      uint8_t byte = (Case.Val->getZExtValue() >> (smallestIndex*8)) & 0xFF;
+    for (CaseExpr &Case : Cases) {
+
+      uint8_t byte = (Case.Val->getZExtValue() >> (smallestIndex * 8)) & 0xFF;
 
       if (byte < pivot) {
+
         LHSCases.push_back(Case);
-      }
-      else {
+
+      } else {
+
         RHSCases.push_back(Case);
+
       }
+
     }
-    BasicBlock *LBB, *RBB;
-    LBB = switchConvert(LHSCases, bytesChecked, OrigBlock, NewDefault, Val, level + 1);
-    RBB = switchConvert(RHSCases, bytesChecked, OrigBlock, NewDefault, Val, level + 1);
 
-    /* insert instructions to check whether the value we are switching on is equal to byte */
-    ICmpInst* Comp = new ICmpInst(ICmpInst::ICMP_ULT, Trunc, ConstantInt::get(ByteType, pivot), "byteMatch");
+    BasicBlock *LBB, *RBB;
+    LBB = switchConvert(LHSCases, bytesChecked, OrigBlock, NewDefault, Val,
+                        level + 1);
+    RBB = switchConvert(RHSCases, bytesChecked, OrigBlock, NewDefault, Val,
+                        level + 1);
+
+    /* insert instructions to check whether the value we are switching on is
+     * equal to byte */
+    ICmpInst *Comp =
+        new ICmpInst(ICmpInst::ICMP_ULT, Trunc,
+                     ConstantInt::get(ByteType, pivot), "byteMatch");
     NewNode->getInstList().push_back(Comp);
     BranchInst::Create(LBB, RBB, Comp, NewNode);
 
   }
 
   return NewNode;
+
 }
 
 bool SplitSwitchesTransform::splitSwitches(Module &M) {
 
-  std::vector<SwitchInst*> switches;
+  std::vector<SwitchInst *> switches;
 
   /* iterate over all functions, bbs and instruction and add
    * all switches to switches vector for later processing */
   for (auto &F : M) {
+
     for (auto &BB : F) {
-      SwitchInst* switchInst = nullptr;
+
+      SwitchInst *switchInst = nullptr;
 
       if ((switchInst = dyn_cast<SwitchInst>(BB.getTerminator()))) {
-        if (switchInst->getNumCases() < 1)
-            continue;
-          switches.push_back(switchInst);
+
+        if (switchInst->getNumCases() < 1) continue;
+        switches.push_back(switchInst);
+
       }
+
     }
+
   }
 
-  if (!switches.size())
-    return false;
-  errs() << "Rewriting " << switches.size() << " switch statements " << "\n";
+  if (!switches.size()) return false;
+  errs() << "Rewriting " << switches.size() << " switch statements "
+         << "\n";
 
-  for (auto &SI: switches) {
+  for (auto &SI : switches) {
 
     BasicBlock *CurBlock = SI->getParent();
     BasicBlock *OrigBlock = CurBlock;
-    Function *F = CurBlock->getParent();
+    Function *  F = CurBlock->getParent();
     /* this is the value we are switching on */
-    Value *Val = SI->getCondition();
-    BasicBlock* Default = SI->getDefaultDest();
-    unsigned bitw = Val->getType()->getIntegerBitWidth();
+    Value *     Val = SI->getCondition();
+    BasicBlock *Default = SI->getDefaultDest();
+    unsigned    bitw = Val->getType()->getIntegerBitWidth();
 
     errs() << "switch: " << SI->getNumCases() << " cases " << bitw << " bit\n";
 
-    /* If there is only the default destination or the condition checks 8 bit or less, don't bother with the code below. */
+    /* If there is only the default destination or the condition checks 8 bit or
+     * less, don't bother with the code below. */
     if (!SI->getNumCases() || bitw <= 8) {
-      if (getenv("AFL_QUIET") == NULL)
-        errs() << "skip trivial switch..\n";
+
+      if (getenv("AFL_QUIET") == NULL) errs() << "skip trivial switch..\n";
       continue;
+
     }
 
     /* Create a new, empty default block so that the new hierarchy of
@@ -258,10 +317,10 @@ bool SplitSwitchesTransform::splitSwitches(Module &M) {
     NewDefault->insertInto(F, Default);
     BranchInst::Create(Default, NewDefault);
 
-
     /* Prepare cases vector. */
     CaseVector Cases;
-    for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end(); i != e; ++i)
+    for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end(); i != e;
+         ++i)
 #if LLVM_VERSION_MAJOR < 5
       Cases.push_back(CaseExpr(i.getCaseValue(), i.getCaseSuccessor()));
 #else
@@ -269,8 +328,10 @@ bool SplitSwitchesTransform::splitSwitches(Module &M) {
 #endif
     /* bugfix thanks to pbst
      * round up bytesChecked (in case getBitWidth() % 8 != 0) */
-    std::vector<bool> bytesChecked((7 + Cases[0].Val->getBitWidth()) / 8, false);
-    BasicBlock* SwitchBlock = switchConvert(Cases, bytesChecked, OrigBlock, NewDefault, Val, 0);
+    std::vector<bool> bytesChecked((7 + Cases[0].Val->getBitWidth()) / 8,
+                                   false);
+    BasicBlock *      SwitchBlock =
+        switchConvert(Cases, bytesChecked, OrigBlock, NewDefault, Val, 0);
 
     /* Branch to our shiny new if-then stuff... */
     BranchInst::Create(SwitchBlock, OrigBlock);
@@ -278,41 +339,47 @@ bool SplitSwitchesTransform::splitSwitches(Module &M) {
     /* We are now done with the switch instruction, delete it. */
     CurBlock->getInstList().erase(SI);
 
+    /* we have to update the phi nodes! */
+    for (BasicBlock::iterator I = Default->begin(); I != Default->end(); ++I) {
+
+      if (!isa<PHINode>(&*I)) { continue; }
+      PHINode *PN = cast<PHINode>(I);
+
+      /* Only update the first occurrence. */
+      unsigned Idx = 0, E = PN->getNumIncomingValues();
+      for (; Idx != E; ++Idx) {
+
+        if (PN->getIncomingBlock(Idx) == OrigBlock) {
+
+          PN->setIncomingBlock(Idx, NewDefault);
+          break;
+
+        }
+
+      }
+
+    }
+
+  }
+
+  verifyModule(M);
+  return true;
 
-   /* we have to update the phi nodes! */
-   for (BasicBlock::iterator I = Default->begin(); I != Default->end(); ++I) {
-     if (!isa<PHINode>(&*I)) {
-      continue;
-     }
-     PHINode *PN = cast<PHINode>(I);
-
-     /* Only update the first occurrence. */
-     unsigned Idx = 0, E = PN->getNumIncomingValues();
-     for (; Idx != E; ++Idx) {
-       if (PN->getIncomingBlock(Idx) == OrigBlock) {
-         PN->setIncomingBlock(Idx, NewDefault);
-         break;
-       }
-     }
-   }
- }
-
- verifyModule(M);
- return true;
 }
 
 bool SplitSwitchesTransform::runOnModule(Module &M) {
 
   if (getenv("AFL_QUIET") == NULL)
-    llvm::errs() << "Running split-switches-pass by laf.intel@gmail.com\n"; 
+    llvm::errs() << "Running split-switches-pass by laf.intel@gmail.com\n";
   splitSwitches(M);
   verifyModule(M);
 
   return true;
+
 }
 
 static void registerSplitSwitchesTransPass(const PassManagerBuilder &,
-                            legacy::PassManagerBase &PM) {
+                                           legacy::PassManagerBase &PM) {
 
   auto p = new SplitSwitchesTransform();
   PM.add(p);
@@ -324,3 +391,4 @@ static RegisterStandardPasses RegisterSplitSwitchesTransPass(
 
 static RegisterStandardPasses RegisterSplitSwitchesTransPass0(
     PassManagerBuilder::EP_EnabledOnOptLevel0, registerSplitSwitchesTransPass);
+
diff --git a/qemu_mode/libcompcov/Makefile b/qemu_mode/libcompcov/Makefile
index c984588b..a1f4e31f 100644
--- a/qemu_mode/libcompcov/Makefile
+++ b/qemu_mode/libcompcov/Makefile
@@ -18,7 +18,7 @@ HELPER_PATH  = $(PREFIX)/lib/afl
 
 VERSION     = $(shell grep '^\#define VERSION ' ../config.h | cut -d '"' -f2)
 
-CFLAGS      ?= -O3 -funroll-loops
+CFLAGS      ?= -O3 -funroll-loops -I ../../include/
 CFLAGS      += -Wall -Wno-unused-result -D_FORTIFY_SOURCE=2 -g -Wno-pointer-sign
 LDFLAGS     += -ldl
 
diff --git a/qemu_mode/libcompcov/compcovtest.cc b/qemu_mode/libcompcov/compcovtest.cc
index fd1fda00..171e4526 100644
--- a/qemu_mode/libcompcov/compcovtest.cc
+++ b/qemu_mode/libcompcov/compcovtest.cc
@@ -3,13 +3,13 @@
 // Author: Mateusz Jurczyk (mjurczyk@google.com)

 //

 // Copyright 2019 Google LLC

-// 

+//

 // Licensed under the Apache License, Version 2.0 (the "License");

 // you may not use this file except in compliance with the License.

 // You may obtain a copy of the License at

-// 

+//

 // https://www.apache.org/licenses/LICENSE-2.0

-// 

+//

 // Unless required by applicable law or agreed to in writing, software

 // distributed under the License is distributed on an "AS IS" BASIS,

 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

@@ -17,7 +17,8 @@
 // limitations under the License.

 //

 

-// solution: echo -ne 'The quick brown fox jumps over the lazy dog\xbe\xba\xfe\xca\xbe\xba\xfe\xca\xde\xc0\xad\xde\xef\xbe' | ./compcovtest

+// solution: echo -ne 'The quick brown fox jumps over the lazy

+// dog\xbe\xba\xfe\xca\xbe\xba\xfe\xca\xde\xc0\xad\xde\xef\xbe' | ./compcovtest

 

 #include <cstdint>

 #include <cstdio>

@@ -25,39 +26,40 @@
 #include <cstring>

 

 int main() {

-  char buffer[44] = { /* zero padding */ };

+
+  char buffer[44] = {/* zero padding */};

   fread(buffer, 1, sizeof(buffer) - 1, stdin);

 

   if (memcmp(&buffer[0], "The quick brown fox ", 20) != 0 ||

       strncmp(&buffer[20], "jumps over ", 11) != 0 ||

       strcmp(&buffer[31], "the lazy dog") != 0) {

+
     return 1;

+
   }

 

   uint64_t x = 0;

   fread(&x, sizeof(x), 1, stdin);

-  if (x != 0xCAFEBABECAFEBABE) {

-    return 2;

-  }

+  if (x != 0xCAFEBABECAFEBABE) { return 2; }

 

   uint32_t y = 0;

   fread(&y, sizeof(y), 1, stdin);

-  if (y != 0xDEADC0DE) {

-    return 3;

-  }

+  if (y != 0xDEADC0DE) { return 3; }

 

   uint16_t z = 0;

   fread(&z, sizeof(z), 1, stdin);

 

   switch (z) {

-    case 0xBEEF:

-      break;

+
+    case 0xBEEF: break;

 

-    default:

-      return 4;

+    default: return 4;

+
   }

 

   printf("Puzzle solved, congrats!\n");

   abort();

   return 0;

+
 }

+
diff --git a/qemu_mode/libcompcov/libcompcov.so.c b/qemu_mode/libcompcov/libcompcov.so.c
index 0ccda927..e758c034 100644
--- a/qemu_mode/libcompcov/libcompcov.so.c
+++ b/qemu_mode/libcompcov/libcompcov.so.c
@@ -27,8 +27,8 @@
 #include <sys/types.h>
 #include <sys/shm.h>
 
-#include "../../types.h"
-#include "../../config.h"
+#include "types.h"
+#include "config.h"
 
 #include "pmparser.h"
 
@@ -40,10 +40,9 @@
 
 #define MAX_CMP_LENGTH 32
 
-static void *__compcov_code_start,
-            *__compcov_code_end;
+static void *__compcov_code_start, *__compcov_code_end;
 
-static u8 *__compcov_afl_map;
+static u8* __compcov_afl_map;
 
 static u32 __compcov_level;
 
@@ -55,15 +54,11 @@ static int (*__libc_memcmp)(const void*, const void*, size_t);
 
 static int debug_fd = -1;
 
-
 #define MAX_MAPPINGS 1024
 
-static struct mapping {
-  void *st, *en;
-} __compcov_ro[MAX_MAPPINGS];
-
-static u32   __compcov_ro_cnt;
+static struct mapping { void *st, *en; } __compcov_ro[MAX_MAPPINGS];
 
+static u32 __compcov_ro_cnt;
 
 /* Check an address against the list of read-only mappings. */
 
@@ -71,42 +66,42 @@ static u8 __compcov_is_ro(const void* ptr) {
 
   u32 i;
 
-  for (i = 0; i < __compcov_ro_cnt; i++) 
+  for (i = 0; i < __compcov_ro_cnt; i++)
     if (ptr >= __compcov_ro[i].st && ptr <= __compcov_ro[i].en) return 1;
 
   return 0;
+
 }
 
+static size_t __strlen2(const char* s1, const char* s2, size_t max_length) {
 
-static size_t __strlen2(const char *s1, const char *s2, size_t max_length) {
   // from https://github.com/googleprojectzero/CompareCoverage
-  
+
   size_t len = 0;
-  for (; len < max_length && s1[len] != '\0' && s2[len] != '\0'; len++) { }
+  for (; len < max_length && s1[len] != '\0' && s2[len] != '\0'; len++) {}
   return len;
+
 }
 
 /* Identify the binary boundaries in the memory mapping */
 
 static void __compcov_load(void) {
-  
+
   __libc_strcmp = dlsym(RTLD_NEXT, "strcmp");
   __libc_strncmp = dlsym(RTLD_NEXT, "strncmp");
   __libc_strcasecmp = dlsym(RTLD_NEXT, "strcasecmp");
   __libc_strncasecmp = dlsym(RTLD_NEXT, "strncasecmp");
   __libc_memcmp = dlsym(RTLD_NEXT, "memcmp");
 
-  if (getenv("AFL_QEMU_COMPCOV")) {
-
-    __compcov_level = 1;
-  }
+  if (getenv("AFL_QEMU_COMPCOV")) { __compcov_level = 1; }
   if (getenv("AFL_COMPCOV_LEVEL")) {
 
     __compcov_level = atoi(getenv("AFL_COMPCOV_LEVEL"));
+
   }
-  
-  char *id_str = getenv(SHM_ENV_VAR);
-  int shm_id;
+
+  char* id_str = getenv(SHM_ENV_VAR);
+  int   shm_id;
 
   if (id_str) {
 
@@ -114,61 +109,72 @@ static void __compcov_load(void) {
     __compcov_afl_map = shmat(shm_id, NULL, 0);
 
     if (__compcov_afl_map == (void*)-1) exit(1);
+
   } else {
-  
+
     __compcov_afl_map = calloc(1, MAP_SIZE);
+
   }
 
   if (getenv("AFL_INST_LIBS")) {
-  
+
     __compcov_code_start = (void*)0;
     __compcov_code_end = (void*)-1;
     return;
+
   }
 
   char* bin_name = getenv("AFL_COMPCOV_BINNAME");
 
   procmaps_iterator* maps = pmparser_parse(-1);
-  procmaps_struct* maps_tmp = NULL;
+  procmaps_struct*   maps_tmp = NULL;
 
   while ((maps_tmp = pmparser_next(maps)) != NULL) {
-  
+
     /* If AFL_COMPCOV_BINNAME is not set pick the first executable segment */
     if (!bin_name || strstr(maps_tmp->pathname, bin_name) != NULL) {
-    
+
       if (maps_tmp->is_x) {
-        if (!__compcov_code_start)
-            __compcov_code_start = maps_tmp->addr_start;
-        if (!__compcov_code_end)
-            __compcov_code_end = maps_tmp->addr_end;
+
+        if (!__compcov_code_start) __compcov_code_start = maps_tmp->addr_start;
+        if (!__compcov_code_end) __compcov_code_end = maps_tmp->addr_end;
+
       }
+
     }
-    
+
     if ((maps_tmp->is_w && !maps_tmp->is_r) || __compcov_ro_cnt == MAX_MAPPINGS)
       continue;
-    
+
     __compcov_ro[__compcov_ro_cnt].st = maps_tmp->addr_start;
     __compcov_ro[__compcov_ro_cnt].en = maps_tmp->addr_end;
+
   }
 
   pmparser_free(maps);
-}
 
+}
 
 static void __compcov_trace(u64 cur_loc, const u8* v0, const u8* v1, size_t n) {
 
   size_t i;
-  
+
   if (debug_fd != 1) {
+
     char debugbuf[4096];
-    snprintf(debugbuf, sizeof(debugbuf), "0x%llx %s %s %lu\n", cur_loc, v0 == NULL ? "(null)" : (char*)v0, v1 == NULL ? "(null)" : (char*)v1, n);
+    snprintf(debugbuf, sizeof(debugbuf), "0x%llx %s %s %lu\n", cur_loc,
+             v0 == NULL ? "(null)" : (char*)v0,
+             v1 == NULL ? "(null)" : (char*)v1, n);
     write(debug_fd, debugbuf, strlen(debugbuf));
+
   }
-  
+
   for (i = 0; i < n && v0[i] == v1[i]; ++i) {
-  
-    __compcov_afl_map[cur_loc +i]++;
+
+    __compcov_afl_map[cur_loc + i]++;
+
   }
+
 }
 
 /* Check an address against the list of read-only mappings. */
@@ -176,8 +182,8 @@ static void __compcov_trace(u64 cur_loc, const u8* v0, const u8* v1, size_t n) {
 static u8 __compcov_is_in_bound(const void* ptr) {
 
   return ptr >= __compcov_code_start && ptr < __compcov_code_end;
-}
 
+}
 
 /* Replacements for strcmp(), memcmp(), and so on. Note that these will be used
    only if the target is compiled with -fno-builtins and linked dynamically. */
@@ -187,127 +193,145 @@ static u8 __compcov_is_in_bound(const void* ptr) {
 int strcmp(const char* str1, const char* str2) {
 
   void* retaddr = __builtin_return_address(0);
-  
-  if (__compcov_is_in_bound(retaddr) && !(__compcov_level < 2 &&
-      !__compcov_is_ro(str1) && !__compcov_is_ro(str2))) {
 
-    size_t n = __strlen2(str1, str2, MAX_CMP_LENGTH +1);
-    
+  if (__compcov_is_in_bound(retaddr) &&
+      !(__compcov_level < 2 && !__compcov_is_ro(str1) &&
+        !__compcov_is_ro(str2))) {
+
+    size_t n = __strlen2(str1, str2, MAX_CMP_LENGTH + 1);
+
     if (n <= MAX_CMP_LENGTH) {
-    
+
       u64 cur_loc = (u64)retaddr;
-      cur_loc  = (cur_loc >> 4) ^ (cur_loc << 8);
+      cur_loc = (cur_loc >> 4) ^ (cur_loc << 8);
       cur_loc &= MAP_SIZE - 1;
-      
+
       __compcov_trace(cur_loc, str1, str2, n);
+
     }
+
   }
 
   return __libc_strcmp(str1, str2);
-}
 
+}
 
 #undef strncmp
 
 int strncmp(const char* str1, const char* str2, size_t len) {
 
   void* retaddr = __builtin_return_address(0);
-  
-  if (__compcov_is_in_bound(retaddr) && !(__compcov_level < 2 &&
-      !__compcov_is_ro(str1) && !__compcov_is_ro(str2))) {
 
-    size_t n = __strlen2(str1, str2, MAX_CMP_LENGTH +1);
+  if (__compcov_is_in_bound(retaddr) &&
+      !(__compcov_level < 2 && !__compcov_is_ro(str1) &&
+        !__compcov_is_ro(str2))) {
+
+    size_t n = __strlen2(str1, str2, MAX_CMP_LENGTH + 1);
     n = MIN(n, len);
-    
+
     if (n <= MAX_CMP_LENGTH) {
-    
+
       u64 cur_loc = (u64)retaddr;
-      cur_loc  = (cur_loc >> 4) ^ (cur_loc << 8);
+      cur_loc = (cur_loc >> 4) ^ (cur_loc << 8);
       cur_loc &= MAP_SIZE - 1;
-      
+
       __compcov_trace(cur_loc, str1, str2, n);
+
     }
+
   }
-  
+
   return __libc_strncmp(str1, str2, len);
-}
 
+}
 
 #undef strcasecmp
 
 int strcasecmp(const char* str1, const char* str2) {
 
   void* retaddr = __builtin_return_address(0);
-  
-  if (__compcov_is_in_bound(retaddr) && !(__compcov_level < 2 &&
-      !__compcov_is_ro(str1) && !__compcov_is_ro(str2))) {
+
+  if (__compcov_is_in_bound(retaddr) &&
+      !(__compcov_level < 2 && !__compcov_is_ro(str1) &&
+        !__compcov_is_ro(str2))) {
+
     /* Fallback to strcmp, maybe improve in future */
 
-    size_t n = __strlen2(str1, str2, MAX_CMP_LENGTH +1);
-    
+    size_t n = __strlen2(str1, str2, MAX_CMP_LENGTH + 1);
+
     if (n <= MAX_CMP_LENGTH) {
-    
+
       u64 cur_loc = (u64)retaddr;
-      cur_loc  = (cur_loc >> 4) ^ (cur_loc << 8);
+      cur_loc = (cur_loc >> 4) ^ (cur_loc << 8);
       cur_loc &= MAP_SIZE - 1;
-      
+
       __compcov_trace(cur_loc, str1, str2, n);
+
     }
+
   }
 
   return __libc_strcasecmp(str1, str2);
-}
 
+}
 
 #undef strncasecmp
 
 int strncasecmp(const char* str1, const char* str2, size_t len) {
 
   void* retaddr = __builtin_return_address(0);
-  
-  if (__compcov_is_in_bound(retaddr) && !(__compcov_level < 2 &&
-      !__compcov_is_ro(str1) && !__compcov_is_ro(str2))) {
+
+  if (__compcov_is_in_bound(retaddr) &&
+      !(__compcov_level < 2 && !__compcov_is_ro(str1) &&
+        !__compcov_is_ro(str2))) {
+
     /* Fallback to strncmp, maybe improve in future */
 
-    size_t n = __strlen2(str1, str2, MAX_CMP_LENGTH +1);
+    size_t n = __strlen2(str1, str2, MAX_CMP_LENGTH + 1);
     n = MIN(n, len);
-    
+
     if (n <= MAX_CMP_LENGTH) {
-    
+
       u64 cur_loc = (u64)retaddr;
-      cur_loc  = (cur_loc >> 4) ^ (cur_loc << 8);
+      cur_loc = (cur_loc >> 4) ^ (cur_loc << 8);
       cur_loc &= MAP_SIZE - 1;
-      
+
       __compcov_trace(cur_loc, str1, str2, n);
+
     }
+
   }
 
   return __libc_strncasecmp(str1, str2, len);
-}
 
+}
 
 #undef memcmp
 
 int memcmp(const void* mem1, const void* mem2, size_t len) {
 
   void* retaddr = __builtin_return_address(0);
-  
-  if (__compcov_is_in_bound(retaddr) && !(__compcov_level < 2 &&
-      !__compcov_is_ro(mem1) && !__compcov_is_ro(mem2))) {
+
+  if (__compcov_is_in_bound(retaddr) &&
+      !(__compcov_level < 2 && !__compcov_is_ro(mem1) &&
+        !__compcov_is_ro(mem2))) {
 
     size_t n = len;
-    
+
     if (n <= MAX_CMP_LENGTH) {
-    
+
       u64 cur_loc = (u64)retaddr;
-      cur_loc  = (cur_loc >> 4) ^ (cur_loc << 8);
+      cur_loc = (cur_loc >> 4) ^ (cur_loc << 8);
       cur_loc &= MAP_SIZE - 1;
-      
+
       __compcov_trace(cur_loc, mem1, mem2, n);
+
     }
+
   }
 
   return __libc_memcmp(mem1, mem2, len);
+
 }
 
 /* Init code to open init the library. */
@@ -315,9 +339,10 @@ int memcmp(const void* mem1, const void* mem2, size_t len) {
 __attribute__((constructor)) void __compcov_init(void) {
 
   if (getenv("AFL_QEMU_COMPCOV_DEBUG") != NULL)
-    debug_fd = open("compcov.debug", O_WRONLY | O_CREAT | O_TRUNC | O_SYNC, 0644);
+    debug_fd =
+        open("compcov.debug", O_WRONLY | O_CREAT | O_TRUNC | O_SYNC, 0644);
 
   __compcov_load();
-}
 
+}
 
diff --git a/qemu_mode/libcompcov/pmparser.h b/qemu_mode/libcompcov/pmparser.h
index 34d0cd50..91dfd032 100644
--- a/qemu_mode/libcompcov/pmparser.h
+++ b/qemu_mode/libcompcov/pmparser.h
@@ -13,54 +13,60 @@ implied warranty.
  */
 
 #ifndef H_PMPARSER
-#define H_PMPARSER
-#include <stdio.h>
-#include <stdlib.h>
-#include <unistd.h>
-#include <string.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <fcntl.h>
-#include <errno.h>
-#include <linux/limits.h>
-
-//maximum line length in a procmaps file
-#define PROCMAPS_LINE_MAX_LENGTH  (PATH_MAX + 100) 
+#  define H_PMPARSER
+#  include <stdio.h>
+#  include <stdlib.h>
+#  include <unistd.h>
+#  include <string.h>
+#  include <sys/types.h>
+#  include <sys/stat.h>
+#  include <fcntl.h>
+#  include <errno.h>
+#  include <linux/limits.h>
+
+// maximum line length in a procmaps file
+#  define PROCMAPS_LINE_MAX_LENGTH (PATH_MAX + 100)
 /**
  * procmaps_struct
  * @desc hold all the information about an area in the process's  VM
  */
-typedef struct procmaps_struct{
-	void* addr_start; 	//< start address of the area
-	void* addr_end; 	//< end address
-	unsigned long length; //< size of the range
-
-	char perm[5];		//< permissions rwxp
-	short is_r;			//< rewrote of perm with short flags
-	short is_w;
-	short is_x;
-	short is_p;
-
-	long offset;	//< offset
-	char dev[12];	//< dev major:minor
-	int inode;		//< inode of the file that backs the area
-
-	char pathname[600];		//< the path of the file that backs the area
-	//chained list
-	struct procmaps_struct* next;		//<handler of the chinaed list
+typedef struct procmaps_struct {
+
+  void*         addr_start;  //< start address of the area
+  void*         addr_end;    //< end address
+  unsigned long length;      //< size of the range
+
+  char  perm[5];  //< permissions rwxp
+  short is_r;     //< rewrote of perm with short flags
+  short is_w;
+  short is_x;
+  short is_p;
+
+  long offset;   //< offset
+  char dev[12];  //< dev major:minor
+  int  inode;    //< inode of the file that backs the area
+
+  char pathname[600];  //< the path of the file that backs the area
+  // chained list
+  struct procmaps_struct* next;  //<handler of the chinaed list
+
 } procmaps_struct;
 
 /**
  * procmaps_iterator
  * @desc holds iterating information
  */
-typedef struct procmaps_iterator{
-	procmaps_struct* head;
-	procmaps_struct* current;
+typedef struct procmaps_iterator {
+
+  procmaps_struct* head;
+  procmaps_struct* current;
+
 } procmaps_iterator;
+
 /**
  * pmparser_parse
- * @param pid the process id whose memory map to be parser. the current process if pid<0
+ * @param pid the process id whose memory map to be parser. the current process
+ * if pid<0
  * @return an iterator over all the nodes
  */
 procmaps_iterator* pmparser_parse(int pid);
@@ -83,198 +89,238 @@ void pmparser_free(procmaps_iterator* p_procmaps_it);
  * _pmparser_split_line
  * @description internal usage
  */
-void _pmparser_split_line(char*buf,char*addr1,char*addr2,char*perm, char* offset, char* device,char*inode,char* pathname);
+void _pmparser_split_line(char* buf, char* addr1, char* addr2, char* perm,
+                          char* offset, char* device, char* inode,
+                          char* pathname);
 
 /**
  * pmparser_print
  * @param map the head of the list
  * @order the order of the area to print, -1 to print everything
  */
-void pmparser_print(procmaps_struct* map,int order);
-
+void pmparser_print(procmaps_struct* map, int order);
 
 /**
  * gobal variables
  */
-//procmaps_struct* g_last_head=NULL;
-//procmaps_struct* g_current=NULL;
-
-
-procmaps_iterator* pmparser_parse(int pid){
-	procmaps_iterator* maps_it = malloc(sizeof(procmaps_iterator));
-	char maps_path[500];
-	if(pid>=0 ){
-		sprintf(maps_path,"/proc/%d/maps",pid);
-	}else{
-		sprintf(maps_path,"/proc/self/maps");
-	}
-	FILE* file=fopen(maps_path,"r");
-	if(!file){
-		fprintf(stderr,"pmparser : cannot open the memory maps, %s\n",strerror(errno));
-		return NULL;
-	}
-	int ind=0;char buf[PROCMAPS_LINE_MAX_LENGTH];
-	//int c;
-	procmaps_struct* list_maps=NULL;
-	procmaps_struct* tmp;
-	procmaps_struct* current_node=list_maps;
-	char addr1[20],addr2[20], perm[8], offset[20], dev[10],inode[30],pathname[PATH_MAX];
-	while( !feof(file) ){
-		fgets(buf,PROCMAPS_LINE_MAX_LENGTH,file);
-		//allocate a node
-		tmp=(procmaps_struct*)malloc(sizeof(procmaps_struct));
-		//fill the node
-		_pmparser_split_line(buf,addr1,addr2,perm,offset, dev,inode,pathname);
-		//printf("#%s",buf);
-		//printf("%s-%s %s %s %s %s\t%s\n",addr1,addr2,perm,offset,dev,inode,pathname);
-		//addr_start & addr_end
-		//unsigned long l_addr_start;
-		sscanf(addr1,"%lx",(long unsigned *)&tmp->addr_start );
-		sscanf(addr2,"%lx",(long unsigned *)&tmp->addr_end );
-		//size
-		tmp->length=(unsigned long)(tmp->addr_end-tmp->addr_start);
-		//perm
-		strcpy(tmp->perm,perm);
-		tmp->is_r=(perm[0]=='r');
-		tmp->is_w=(perm[1]=='w');
-		tmp->is_x=(perm[2]=='x');
-		tmp->is_p=(perm[3]=='p');
-
-		//offset
-		sscanf(offset,"%lx",&tmp->offset );
-		//device
-		strcpy(tmp->dev,dev);
-		//inode
-		tmp->inode=atoi(inode);
-		//pathname
-		strcpy(tmp->pathname,pathname);
-		tmp->next=NULL;
-		//attach the node
-		if(ind==0){
-			list_maps=tmp;
-			list_maps->next=NULL;
-			current_node=list_maps;
-		}
-		current_node->next=tmp;
-		current_node=tmp;
-		ind++;
-		//printf("%s",buf);
-	}
-
-	//close file
-	fclose(file);
-
-
-	//g_last_head=list_maps;
-	maps_it->head = list_maps;
-	maps_it->current =  list_maps;
-	return maps_it;
+// procmaps_struct* g_last_head=NULL;
+// procmaps_struct* g_current=NULL;
+
+procmaps_iterator* pmparser_parse(int pid) {
+
+  procmaps_iterator* maps_it = malloc(sizeof(procmaps_iterator));
+  char               maps_path[500];
+  if (pid >= 0) {
+
+    sprintf(maps_path, "/proc/%d/maps", pid);
+
+  } else {
+
+    sprintf(maps_path, "/proc/self/maps");
+
+  }
+
+  FILE* file = fopen(maps_path, "r");
+  if (!file) {
+
+    fprintf(stderr, "pmparser : cannot open the memory maps, %s\n",
+            strerror(errno));
+    return NULL;
+
+  }
+
+  int  ind = 0;
+  char buf[PROCMAPS_LINE_MAX_LENGTH];
+  // int c;
+  procmaps_struct* list_maps = NULL;
+  procmaps_struct* tmp;
+  procmaps_struct* current_node = list_maps;
+  char addr1[20], addr2[20], perm[8], offset[20], dev[10], inode[30],
+      pathname[PATH_MAX];
+  while (!feof(file)) {
+
+    fgets(buf, PROCMAPS_LINE_MAX_LENGTH, file);
+    // allocate a node
+    tmp = (procmaps_struct*)malloc(sizeof(procmaps_struct));
+    // fill the node
+    _pmparser_split_line(buf, addr1, addr2, perm, offset, dev, inode, pathname);
+    // printf("#%s",buf);
+    // printf("%s-%s %s %s %s
+    // %s\t%s\n",addr1,addr2,perm,offset,dev,inode,pathname); addr_start &
+    // addr_end unsigned long l_addr_start;
+    sscanf(addr1, "%lx", (long unsigned*)&tmp->addr_start);
+    sscanf(addr2, "%lx", (long unsigned*)&tmp->addr_end);
+    // size
+    tmp->length = (unsigned long)(tmp->addr_end - tmp->addr_start);
+    // perm
+    strcpy(tmp->perm, perm);
+    tmp->is_r = (perm[0] == 'r');
+    tmp->is_w = (perm[1] == 'w');
+    tmp->is_x = (perm[2] == 'x');
+    tmp->is_p = (perm[3] == 'p');
+
+    // offset
+    sscanf(offset, "%lx", &tmp->offset);
+    // device
+    strcpy(tmp->dev, dev);
+    // inode
+    tmp->inode = atoi(inode);
+    // pathname
+    strcpy(tmp->pathname, pathname);
+    tmp->next = NULL;
+    // attach the node
+    if (ind == 0) {
+
+      list_maps = tmp;
+      list_maps->next = NULL;
+      current_node = list_maps;
+
+    }
+
+    current_node->next = tmp;
+    current_node = tmp;
+    ind++;
+    // printf("%s",buf);
+
+  }
+
+  // close file
+  fclose(file);
+
+  // g_last_head=list_maps;
+  maps_it->head = list_maps;
+  maps_it->current = list_maps;
+  return maps_it;
+
 }
 
+procmaps_struct* pmparser_next(procmaps_iterator* p_procmaps_it) {
 
-procmaps_struct* pmparser_next(procmaps_iterator* p_procmaps_it){
-	if(p_procmaps_it->current == NULL)
-		return NULL;
-	procmaps_struct* p_current = p_procmaps_it->current;
-	p_procmaps_it->current = p_procmaps_it->current->next;
-	return p_current;
-	/*
-	if(g_current==NULL){
-		g_current=g_last_head;
-	}else
-		g_current=g_current->next;
-
-	return g_current;
-	*/
-}
+  if (p_procmaps_it->current == NULL) return NULL;
+  procmaps_struct* p_current = p_procmaps_it->current;
+  p_procmaps_it->current = p_procmaps_it->current->next;
+  return p_current;
+  /*
+  if(g_current==NULL){
+
+          g_current=g_last_head;
 
+  }else
 
+          g_current=g_current->next;
 
-void pmparser_free(procmaps_iterator* p_procmaps_it){
-	procmaps_struct* maps_list = p_procmaps_it->head;
-	if(maps_list==NULL) return ;
-	procmaps_struct* act=maps_list;
-	procmaps_struct* nxt=act->next;
-	while(act!=NULL){
-		free(act);
-		act=nxt;
-		if(nxt!=NULL)
-			nxt=nxt->next;
-	}
+  return g_current;
+  */
 
 }
 
+void pmparser_free(procmaps_iterator* p_procmaps_it) {
+
+  procmaps_struct* maps_list = p_procmaps_it->head;
+  if (maps_list == NULL) return;
+  procmaps_struct* act = maps_list;
+  procmaps_struct* nxt = act->next;
+  while (act != NULL) {
 
-void _pmparser_split_line(
-		char*buf,char*addr1,char*addr2,
-		char*perm,char* offset,char* device,char*inode,
-		char* pathname){
-	//
-	int orig=0;
-	int i=0;
-	//addr1
-	while(buf[i]!='-'){
-		addr1[i-orig]=buf[i];
-		i++;
-	}
-	addr1[i]='\0';
-	i++;
-	//addr2
-	orig=i;
-	while(buf[i]!='\t' && buf[i]!=' '){
-		addr2[i-orig]=buf[i];
-		i++;
-	}
-	addr2[i-orig]='\0';
-
-	//perm
-	while(buf[i]=='\t' || buf[i]==' ')
-		i++;
-	orig=i;
-	while(buf[i]!='\t' && buf[i]!=' '){
-		perm[i-orig]=buf[i];
-		i++;
-	}
-	perm[i-orig]='\0';
-	//offset
-	while(buf[i]=='\t' || buf[i]==' ')
-		i++;
-	orig=i;
-	while(buf[i]!='\t' && buf[i]!=' '){
-		offset[i-orig]=buf[i];
-		i++;
-	}
-	offset[i-orig]='\0';
-	//dev
-	while(buf[i]=='\t' || buf[i]==' ')
-		i++;
-	orig=i;
-	while(buf[i]!='\t' && buf[i]!=' '){
-		device[i-orig]=buf[i];
-		i++;
-	}
-	device[i-orig]='\0';
-	//inode
-	while(buf[i]=='\t' || buf[i]==' ')
-		i++;
-	orig=i;
-	while(buf[i]!='\t' && buf[i]!=' '){
-		inode[i-orig]=buf[i];
-		i++;
-	}
-	inode[i-orig]='\0';
-	//pathname
-	pathname[0]='\0';
-	while(buf[i]=='\t' || buf[i]==' ')
-		i++;
-	orig=i;
-	while(buf[i]!='\t' && buf[i]!=' ' && buf[i]!='\n'){
-		pathname[i-orig]=buf[i];
-		i++;
-	}
-	pathname[i-orig]='\0';
+    free(act);
+    act = nxt;
+    if (nxt != NULL) nxt = nxt->next;
+
+  }
 
 }
 
+void _pmparser_split_line(char* buf, char* addr1, char* addr2, char* perm,
+                          char* offset, char* device, char* inode,
+                          char* pathname) {
+
+  //
+  int orig = 0;
+  int i = 0;
+  // addr1
+  while (buf[i] != '-') {
+
+    addr1[i - orig] = buf[i];
+    i++;
+
+  }
+
+  addr1[i] = '\0';
+  i++;
+  // addr2
+  orig = i;
+  while (buf[i] != '\t' && buf[i] != ' ') {
+
+    addr2[i - orig] = buf[i];
+    i++;
+
+  }
+
+  addr2[i - orig] = '\0';
+
+  // perm
+  while (buf[i] == '\t' || buf[i] == ' ')
+    i++;
+  orig = i;
+  while (buf[i] != '\t' && buf[i] != ' ') {
+
+    perm[i - orig] = buf[i];
+    i++;
+
+  }
+
+  perm[i - orig] = '\0';
+  // offset
+  while (buf[i] == '\t' || buf[i] == ' ')
+    i++;
+  orig = i;
+  while (buf[i] != '\t' && buf[i] != ' ') {
+
+    offset[i - orig] = buf[i];
+    i++;
+
+  }
+
+  offset[i - orig] = '\0';
+  // dev
+  while (buf[i] == '\t' || buf[i] == ' ')
+    i++;
+  orig = i;
+  while (buf[i] != '\t' && buf[i] != ' ') {
+
+    device[i - orig] = buf[i];
+    i++;
+
+  }
+
+  device[i - orig] = '\0';
+  // inode
+  while (buf[i] == '\t' || buf[i] == ' ')
+    i++;
+  orig = i;
+  while (buf[i] != '\t' && buf[i] != ' ') {
+
+    inode[i - orig] = buf[i];
+    i++;
+
+  }
+
+  inode[i - orig] = '\0';
+  // pathname
+  pathname[0] = '\0';
+  while (buf[i] == '\t' || buf[i] == ' ')
+    i++;
+  orig = i;
+  while (buf[i] != '\t' && buf[i] != ' ' && buf[i] != '\n') {
+
+    pathname[i - orig] = buf[i];
+    i++;
+
+  }
+
+  pathname[i - orig] = '\0';
+
+}
 
 #endif
+
diff --git a/qemu_mode/patches/afl-qemu-common.h b/qemu_mode/patches/afl-qemu-common.h
index c475cb58..c87bacb6 100644
--- a/qemu_mode/patches/afl-qemu-common.h
+++ b/qemu_mode/patches/afl-qemu-common.h
@@ -33,19 +33,17 @@
 
 #include "../../config.h"
 
-/* NeverZero */ 
+/* NeverZero */
 
 #if (defined(__x86_64__) || defined(__i386__)) && defined(AFL_QEMU_NOT_ZERO)
-#  define INC_AFL_AREA(loc) \
-    asm volatile ( \
-      "incb (%0, %1, 1)\n" \
-      "adcb $0, (%0, %1, 1)\n" \
-      : /* no out */ \
-      : "r" (afl_area_ptr), "r" (loc) \
-      : "memory", "eax" \
-    )
+#  define INC_AFL_AREA(loc)         \
+  asm volatile(                     \
+      "incb (%0, %1, 1)\n"          \
+      "adcb $0, (%0, %1, 1)\n"      \
+      : /* no out */                \
+      : "r"(afl_area_ptr), "r"(loc) \
+      : "memory", "eax")
 #else
-#  define INC_AFL_AREA(loc) \
-  afl_area_ptr[loc]++
+#  define INC_AFL_AREA(loc) afl_area_ptr[loc]++
 #endif
 
diff --git a/qemu_mode/patches/afl-qemu-cpu-inl.h b/qemu_mode/patches/afl-qemu-cpu-inl.h
index 4ad31b60..2a1331cb 100644
--- a/qemu_mode/patches/afl-qemu-cpu-inl.h
+++ b/qemu_mode/patches/afl-qemu-cpu-inl.h
@@ -42,11 +42,16 @@
    _start and does the usual forkserver stuff, not very different from
    regular instrumentation injected via afl-as.h. */
 
-#define AFL_QEMU_CPU_SNIPPET2 do { \
-    if(itb->pc == afl_entry_point) { \
-      afl_setup(); \
-      afl_forkserver(cpu); \
-    } \
+#define AFL_QEMU_CPU_SNIPPET2         \
+  do {                                \
+                                      \
+    if (itb->pc == afl_entry_point) { \
+                                      \
+      afl_setup();                    \
+      afl_forkserver(cpu);            \
+                                      \
+    }                                 \
+                                      \
   } while (0)
 
 /* We use one additional file descriptor to relay "needs translation"
@@ -56,60 +61,71 @@
 
 /* This is equivalent to afl-as.h: */
 
-static unsigned char dummy[MAP_SIZE]; /* costs MAP_SIZE but saves a few instructions */
-unsigned char *afl_area_ptr = dummy; /* Exported for afl_gen_trace */
+static unsigned char
+               dummy[MAP_SIZE]; /* costs MAP_SIZE but saves a few instructions */
+unsigned char *afl_area_ptr = dummy;          /* Exported for afl_gen_trace */
 
 /* Exported variables populated by the code patched into elfload.c: */
 
-abi_ulong afl_entry_point, /* ELF entry point (_start) */
-          afl_start_code,  /* .text start pointer      */
-          afl_end_code;    /* .text end pointer        */
+abi_ulong afl_entry_point,                      /* ELF entry point (_start) */
+    afl_start_code,                             /* .text start pointer      */
+    afl_end_code;                               /* .text end pointer        */
 
 u8 afl_compcov_level;
 
 /* Set in the child process in forkserver mode: */
 
-static int forkserver_installed = 0;
+static int           forkserver_installed = 0;
 static unsigned char afl_fork_child;
-unsigned int afl_forksrv_pid;
+unsigned int         afl_forksrv_pid;
 
 /* Instrumentation ratio: */
 
-unsigned int afl_inst_rms = MAP_SIZE; /* Exported for afl_gen_trace */
+unsigned int afl_inst_rms = MAP_SIZE;         /* Exported for afl_gen_trace */
 
 /* Function declarations. */
 
 static void afl_setup(void);
-static void afl_forkserver(CPUState*);
+static void afl_forkserver(CPUState *);
 
-static void afl_wait_tsl(CPUState*, int);
-static void afl_request_tsl(target_ulong, target_ulong, uint32_t, uint32_t, TranslationBlock*, int);
+static void afl_wait_tsl(CPUState *, int);
+static void afl_request_tsl(target_ulong, target_ulong, uint32_t, uint32_t,
+                            TranslationBlock *, int);
 
 /* Data structures passed around by the translate handlers: */
 
 struct afl_tb {
+
   target_ulong pc;
   target_ulong cs_base;
-  uint32_t flags;
-  uint32_t cf_mask;
+  uint32_t     flags;
+  uint32_t     cf_mask;
+
 };
 
 struct afl_tsl {
+
   struct afl_tb tb;
-  char is_chain;
+  char          is_chain;
+
 };
 
 struct afl_chain {
+
   struct afl_tb last_tb;
-  uint32_t cf_mask;
-  int tb_exit;
+  uint32_t      cf_mask;
+  int           tb_exit;
+
 };
 
 /* Some forward decls: */
 
-TranslationBlock *tb_htable_lookup(CPUState*, target_ulong, target_ulong, uint32_t, uint32_t);
-static inline TranslationBlock *tb_find(CPUState*, TranslationBlock*, int, uint32_t);
-static inline void tb_add_jump(TranslationBlock *tb, int n, TranslationBlock *tb_next);
+TranslationBlock *tb_htable_lookup(CPUState *, target_ulong, target_ulong,
+                                   uint32_t, uint32_t);
+static inline TranslationBlock *tb_find(CPUState *, TranslationBlock *, int,
+                                        uint32_t);
+static inline void              tb_add_jump(TranslationBlock *tb, int n,
+                                            TranslationBlock *tb_next);
 
 /*************************
  * ACTUAL IMPLEMENTATION *
@@ -119,8 +135,7 @@ static inline void tb_add_jump(TranslationBlock *tb, int n, TranslationBlock *tb
 
 static void afl_setup(void) {
 
-  char *id_str = getenv(SHM_ENV_VAR),
-       *inst_r = getenv("AFL_INST_RATIO");
+  char *id_str = getenv(SHM_ENV_VAR), *inst_r = getenv("AFL_INST_RATIO");
 
   int shm_id;
 
@@ -142,7 +157,7 @@ static void afl_setup(void) {
     shm_id = atoi(id_str);
     afl_area_ptr = shmat(shm_id, NULL, 0);
 
-    if (afl_area_ptr == (void*)-1) exit(1);
+    if (afl_area_ptr == (void *)-1) exit(1);
 
     /* With AFL_INST_RATIO set to a low value, we want to touch the bitmap
        so that the parent doesn't give up on us. */
@@ -154,18 +169,16 @@ static void afl_setup(void) {
   if (getenv("AFL_INST_LIBS")) {
 
     afl_start_code = 0;
-    afl_end_code   = (abi_ulong)-1;
+    afl_end_code = (abi_ulong)-1;
 
   }
-  
-  /* Maintain for compatibility */
-  if (getenv("AFL_QEMU_COMPCOV")) {
 
-    afl_compcov_level = 1;
-  }
+  /* Maintain for compatibility */
+  if (getenv("AFL_QEMU_COMPCOV")) { afl_compcov_level = 1; }
   if (getenv("AFL_COMPCOV_LEVEL")) {
 
     afl_compcov_level = atoi(getenv("AFL_COMPCOV_LEVEL"));
+
   }
 
   /* pthread_atfork() seems somewhat broken in util/rcu.c, and I'm
@@ -176,17 +189,15 @@ static void afl_setup(void) {
 
 }
 
-
 /* Fork server logic, invoked once we hit _start. */
 
 static void afl_forkserver(CPUState *cpu) {
 
   static unsigned char tmp[4];
 
-  if (forkserver_installed == 1)
-    return;
+  if (forkserver_installed == 1) return;
   forkserver_installed = 1;
-  //if (!afl_area_ptr) return; // not necessary because of fixed dummy buffer
+  // if (!afl_area_ptr) return; // not necessary because of fixed dummy buffer
 
   /* Tell the parent that we're alive. If the parent doesn't want
      to talk, assume that we're not running in forkserver mode. */
@@ -200,7 +211,7 @@ static void afl_forkserver(CPUState *cpu) {
   while (1) {
 
     pid_t child_pid;
-    int status, t_fd[2];
+    int   status, t_fd[2];
 
     /* Whoops, parent dead? */
 
@@ -246,59 +257,60 @@ static void afl_forkserver(CPUState *cpu) {
 
 }
 
-
 /* This code is invoked whenever QEMU decides that it doesn't have a
    translation of a particular block and needs to compute it, or when it
    decides to chain two TBs together. When this happens, we tell the parent to
    mirror the operation, so that the next fork() has a cached copy. */
 
-static void afl_request_tsl(target_ulong pc, target_ulong cb, uint32_t flags, uint32_t cf_mask,
-                            TranslationBlock *last_tb, int tb_exit) {
+static void afl_request_tsl(target_ulong pc, target_ulong cb, uint32_t flags,
+                            uint32_t cf_mask, TranslationBlock *last_tb,
+                            int tb_exit) {
 
-  struct afl_tsl t;
+  struct afl_tsl   t;
   struct afl_chain c;
 
   if (!afl_fork_child) return;
 
-  t.tb.pc      = pc;
+  t.tb.pc = pc;
   t.tb.cs_base = cb;
-  t.tb.flags   = flags;
+  t.tb.flags = flags;
   t.tb.cf_mask = cf_mask;
-  t.is_chain   = (last_tb != NULL);
+  t.is_chain = (last_tb != NULL);
 
   if (write(TSL_FD, &t, sizeof(struct afl_tsl)) != sizeof(struct afl_tsl))
     return;
 
   if (t.is_chain) {
-    c.last_tb.pc      = last_tb->pc;
+
+    c.last_tb.pc = last_tb->pc;
     c.last_tb.cs_base = last_tb->cs_base;
-    c.last_tb.flags   = last_tb->flags;
-    c.cf_mask         = cf_mask;
-    c.tb_exit         = tb_exit;
+    c.last_tb.flags = last_tb->flags;
+    c.cf_mask = cf_mask;
+    c.tb_exit = tb_exit;
 
     if (write(TSL_FD, &c, sizeof(struct afl_chain)) != sizeof(struct afl_chain))
       return;
+
   }
 
 }
 
-
 /* Check if an address is valid in the current mapping */
 
 static inline int is_valid_addr(target_ulong addr) {
 
-    int l, flags;
-    target_ulong page;
-    void * p;
-    
-    page = addr & TARGET_PAGE_MASK;
-    l = (page + TARGET_PAGE_SIZE) - addr;
-    
-    flags = page_get_flags(page);
-    if (!(flags & PAGE_VALID) || !(flags & PAGE_READ))
-        return 0;
-    
-    return 1;
+  int          l, flags;
+  target_ulong page;
+  void *       p;
+
+  page = addr & TARGET_PAGE_MASK;
+  l = (page + TARGET_PAGE_SIZE) - addr;
+
+  flags = page_get_flags(page);
+  if (!(flags & PAGE_VALID) || !(flags & PAGE_READ)) return 0;
+
+  return 1;
+
 }
 
 /* This is the other side of the same channel. Since timeouts are handled by
@@ -306,8 +318,8 @@ static inline int is_valid_addr(target_ulong addr) {
 
 static void afl_wait_tsl(CPUState *cpu, int fd) {
 
-  struct afl_tsl t;
-  struct afl_chain c;
+  struct afl_tsl    t;
+  struct afl_chain  c;
   TranslationBlock *tb, *last_tb;
 
   while (1) {
@@ -316,30 +328,33 @@ static void afl_wait_tsl(CPUState *cpu, int fd) {
 
     /* Broken pipe means it's time to return to the fork server routine. */
 
-    if (read(fd, &t, sizeof(struct afl_tsl)) != sizeof(struct afl_tsl))
-      break;
+    if (read(fd, &t, sizeof(struct afl_tsl)) != sizeof(struct afl_tsl)) break;
 
     tb = tb_htable_lookup(cpu, t.tb.pc, t.tb.cs_base, t.tb.flags, t.tb.cf_mask);
 
-    if(!tb) {
-      
+    if (!tb) {
+
       /* The child may request to transate a block of memory that is not
          mapped in the parent (e.g. jitted code or dlopened code).
          This causes a SIGSEV in gen_intermediate_code() and associated
          subroutines. We simply avoid caching of such blocks. */
 
       if (is_valid_addr(t.tb.pc)) {
-    
+
         mmap_lock();
         tb = tb_gen_code(cpu, t.tb.pc, t.tb.cs_base, t.tb.flags, t.tb.cf_mask);
         mmap_unlock();
+
       } else {
-      
-        invalid_pc = 1; 
+
+        invalid_pc = 1;
+
       }
+
     }
 
     if (t.is_chain) {
+
       if (read(fd, &c, sizeof(struct afl_chain)) != sizeof(struct afl_chain))
         break;
 
@@ -347,10 +362,10 @@ static void afl_wait_tsl(CPUState *cpu, int fd) {
 
         last_tb = tb_htable_lookup(cpu, c.last_tb.pc, c.last_tb.cs_base,
                                    c.last_tb.flags, c.cf_mask);
-        if (last_tb) {
-          tb_add_jump(last_tb, c.tb_exit, tb);
-        }
+        if (last_tb) { tb_add_jump(last_tb, c.tb_exit, tb); }
+
       }
+
     }
 
   }
@@ -358,3 +373,4 @@ static void afl_wait_tsl(CPUState *cpu, int fd) {
   close(fd);
 
 }
+
diff --git a/qemu_mode/patches/afl-qemu-cpu-translate-inl.h b/qemu_mode/patches/afl-qemu-cpu-translate-inl.h
index 09ecb9d2..3d3c1b6b 100644
--- a/qemu_mode/patches/afl-qemu-cpu-translate-inl.h
+++ b/qemu_mode/patches/afl-qemu-cpu-translate-inl.h
@@ -37,9 +37,9 @@
 
 /* Declared in afl-qemu-cpu-inl.h */
 extern unsigned char *afl_area_ptr;
-extern unsigned int afl_inst_rms;
-extern abi_ulong afl_start_code, afl_end_code;
-extern u8 afl_compcov_level;
+extern unsigned int   afl_inst_rms;
+extern abi_ulong      afl_start_code, afl_end_code;
+extern u8             afl_compcov_level;
 
 void tcg_gen_afl_compcov_log_call(void *func, target_ulong cur_loc,
                                   TCGv_i64 arg1, TCGv_i64 arg2);
@@ -47,81 +47,93 @@ void tcg_gen_afl_compcov_log_call(void *func, target_ulong cur_loc,
 static void afl_compcov_log_16(target_ulong cur_loc, target_ulong arg1,
                                target_ulong arg2) {
 
-  if ((arg1 & 0xff) == (arg2 & 0xff)) {
-    INC_AFL_AREA(cur_loc);
-  }
+  if ((arg1 & 0xff) == (arg2 & 0xff)) { INC_AFL_AREA(cur_loc); }
+
 }
 
 static void afl_compcov_log_32(target_ulong cur_loc, target_ulong arg1,
                                target_ulong arg2) {
 
   if ((arg1 & 0xff) == (arg2 & 0xff)) {
+
     INC_AFL_AREA(cur_loc);
     if ((arg1 & 0xffff) == (arg2 & 0xffff)) {
-      INC_AFL_AREA(cur_loc +1);
-      if ((arg1 & 0xffffff) == (arg2 & 0xffffff)) {
-        INC_AFL_AREA(cur_loc +2);
-      }
+
+      INC_AFL_AREA(cur_loc + 1);
+      if ((arg1 & 0xffffff) == (arg2 & 0xffffff)) { INC_AFL_AREA(cur_loc + 2); }
+
     }
+
   }
+
 }
 
 static void afl_compcov_log_64(target_ulong cur_loc, target_ulong arg1,
                                target_ulong arg2) {
 
   if ((arg1 & 0xff) == (arg2 & 0xff)) {
+
     INC_AFL_AREA(cur_loc);
     if ((arg1 & 0xffff) == (arg2 & 0xffff)) {
-      INC_AFL_AREA(cur_loc +1);
+
+      INC_AFL_AREA(cur_loc + 1);
       if ((arg1 & 0xffffff) == (arg2 & 0xffffff)) {
-        INC_AFL_AREA(cur_loc +2);
+
+        INC_AFL_AREA(cur_loc + 2);
         if ((arg1 & 0xffffffff) == (arg2 & 0xffffffff)) {
-          INC_AFL_AREA(cur_loc +3);
+
+          INC_AFL_AREA(cur_loc + 3);
           if ((arg1 & 0xffffffffff) == (arg2 & 0xffffffffff)) {
-            INC_AFL_AREA(cur_loc +4);
+
+            INC_AFL_AREA(cur_loc + 4);
             if ((arg1 & 0xffffffffffff) == (arg2 & 0xffffffffffff)) {
-              INC_AFL_AREA(cur_loc +5);
+
+              INC_AFL_AREA(cur_loc + 5);
               if ((arg1 & 0xffffffffffffff) == (arg2 & 0xffffffffffffff)) {
-                INC_AFL_AREA(cur_loc +6);
+
+                INC_AFL_AREA(cur_loc + 6);
+
               }
+
             }
+
           }
+
         }
+
       }
+
     }
+
   }
-}
 
+}
 
 static void afl_gen_compcov(target_ulong cur_loc, TCGv_i64 arg1, TCGv_i64 arg2,
                             TCGMemOp ot, int is_imm) {
 
   void *func;
-  
+
   if (!afl_compcov_level || cur_loc > afl_end_code || cur_loc < afl_start_code)
     return;
-  
-  if (!is_imm && afl_compcov_level < 2)
-    return;
+
+  if (!is_imm && afl_compcov_level < 2) return;
 
   switch (ot) {
-    case MO_64:
-      func = &afl_compcov_log_64;
-      break;
-    case MO_32: 
-      func = &afl_compcov_log_32;
-      break;
-    case MO_16:
-      func = &afl_compcov_log_16;
-      break;
-    default:
-      return;
+
+    case MO_64: func = &afl_compcov_log_64; break;
+    case MO_32: func = &afl_compcov_log_32; break;
+    case MO_16: func = &afl_compcov_log_16; break;
+    default: return;
+
   }
-  
-  cur_loc  = (cur_loc >> 4) ^ (cur_loc << 8);
+
+  cur_loc = (cur_loc >> 4) ^ (cur_loc << 8);
   cur_loc &= MAP_SIZE - 7;
-  
+
   if (cur_loc >= afl_inst_rms) return;
-  
+
   tcg_gen_afl_compcov_log_call(func, cur_loc, arg1, arg2);
+
 }
+
diff --git a/qemu_mode/patches/afl-qemu-tcg-inl.h b/qemu_mode/patches/afl-qemu-tcg-inl.h
index a9c53b8c..d53a1ccf 100644
--- a/qemu_mode/patches/afl-qemu-tcg-inl.h
+++ b/qemu_mode/patches/afl-qemu-tcg-inl.h
@@ -31,275 +31,343 @@
 
  */
 
-void afl_maybe_log(void* cur_loc);
+void afl_maybe_log(void *cur_loc);
 
 /* Note: we convert the 64 bit args to 32 bit and do some alignment
    and endian swap. Maybe it would be better to do the alignment
    and endian swap in tcg_reg_alloc_call(). */
-void tcg_gen_afl_maybe_log_call(target_ulong cur_loc)
-{
-    int real_args, pi;
-    unsigned sizemask, flags;
-    TCGOp *op;
-
-    TCGTemp *arg = tcgv_i64_temp( tcg_const_tl(cur_loc) );
-
-    flags = 0;
-    sizemask = dh_sizemask(void, 0) | dh_sizemask(i64, 1);
-
-#if defined(__sparc__) && !defined(__arch64__) \
-    && !defined(CONFIG_TCG_INTERPRETER)
-    /* We have 64-bit values in one register, but need to pass as two
-       separate parameters.  Split them.  */
-    int orig_sizemask = sizemask;
-    TCGv_i64 retl, reth;
-    TCGTemp *split_args[MAX_OPC_PARAM];
-
-    retl = NULL;
-    reth = NULL;
-    if (sizemask != 0) {
-        real_args = 0;
-        int is_64bit = sizemask & (1 << 2);
-        if (is_64bit) {
-            TCGv_i64 orig = temp_tcgv_i64(arg);
-            TCGv_i32 h = tcg_temp_new_i32();
-            TCGv_i32 l = tcg_temp_new_i32();
-            tcg_gen_extr_i64_i32(l, h, orig);
-            split_args[real_args++] = tcgv_i32_temp(h);
-            split_args[real_args++] = tcgv_i32_temp(l);
-        } else {
-            split_args[real_args++] = arg;
-        }
-        nargs = real_args;
-        args = split_args;
-        sizemask = 0;
+void tcg_gen_afl_maybe_log_call(target_ulong cur_loc) {
+
+  int      real_args, pi;
+  unsigned sizemask, flags;
+  TCGOp *  op;
+
+  TCGTemp *arg = tcgv_i64_temp(tcg_const_tl(cur_loc));
+
+  flags = 0;
+  sizemask = dh_sizemask(void, 0) | dh_sizemask(i64, 1);
+
+#if defined(__sparc__) && !defined(__arch64__) && \
+    !defined(CONFIG_TCG_INTERPRETER)
+  /* We have 64-bit values in one register, but need to pass as two
+     separate parameters.  Split them.  */
+  int      orig_sizemask = sizemask;
+  TCGv_i64 retl, reth;
+  TCGTemp *split_args[MAX_OPC_PARAM];
+
+  retl = NULL;
+  reth = NULL;
+  if (sizemask != 0) {
+
+    real_args = 0;
+    int is_64bit = sizemask & (1 << 2);
+    if (is_64bit) {
+
+      TCGv_i64 orig = temp_tcgv_i64(arg);
+      TCGv_i32 h = tcg_temp_new_i32();
+      TCGv_i32 l = tcg_temp_new_i32();
+      tcg_gen_extr_i64_i32(l, h, orig);
+      split_args[real_args++] = tcgv_i32_temp(h);
+      split_args[real_args++] = tcgv_i32_temp(l);
+
+    } else {
+
+      split_args[real_args++] = arg;
+
     }
+
+    nargs = real_args;
+    args = split_args;
+    sizemask = 0;
+
+  }
+
 #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
-    int is_64bit = sizemask & (1 << 2);
-    int is_signed = sizemask & (2 << 2);
-    if (!is_64bit) {
-        TCGv_i64 temp = tcg_temp_new_i64();
-        TCGv_i64 orig = temp_tcgv_i64(arg);
-        if (is_signed) {
-            tcg_gen_ext32s_i64(temp, orig);
-        } else {
-            tcg_gen_ext32u_i64(temp, orig);
-        }
-        arg = tcgv_i64_temp(temp);
+  int is_64bit = sizemask & (1 << 2);
+  int is_signed = sizemask & (2 << 2);
+  if (!is_64bit) {
+
+    TCGv_i64 temp = tcg_temp_new_i64();
+    TCGv_i64 orig = temp_tcgv_i64(arg);
+    if (is_signed) {
+
+      tcg_gen_ext32s_i64(temp, orig);
+
+    } else {
+
+      tcg_gen_ext32u_i64(temp, orig);
+
     }
+
+    arg = tcgv_i64_temp(temp);
+
+  }
+
 #endif /* TCG_TARGET_EXTEND_ARGS */
 
-    op = tcg_emit_op(INDEX_op_call);
+  op = tcg_emit_op(INDEX_op_call);
 
-    pi = 0;
+  pi = 0;
 
-    TCGOP_CALLO(op) = 0;
+  TCGOP_CALLO(op) = 0;
+
+  real_args = 0;
+  int is_64bit = sizemask & (1 << 2);
+  if (TCG_TARGET_REG_BITS < 64 && is_64bit) {
 
-    real_args = 0;
-    int is_64bit = sizemask & (1 << 2);
-    if (TCG_TARGET_REG_BITS < 64 && is_64bit) {
 #ifdef TCG_TARGET_CALL_ALIGN_ARGS
-        /* some targets want aligned 64 bit args */
-        if (real_args & 1) {
-            op->args[pi++] = TCG_CALL_DUMMY_ARG;
-            real_args++;
-        }
+    /* some targets want aligned 64 bit args */
+    if (real_args & 1) {
+
+      op->args[pi++] = TCG_CALL_DUMMY_ARG;
+      real_args++;
+
+    }
+
 #endif
-       /* If stack grows up, then we will be placing successive
-          arguments at lower addresses, which means we need to
-          reverse the order compared to how we would normally
-          treat either big or little-endian.  For those arguments
-          that will wind up in registers, this still works for
-          HPPA (the only current STACK_GROWSUP target) since the
-          argument registers are *also* allocated in decreasing
-          order.  If another such target is added, this logic may
-          have to get more complicated to differentiate between
-          stack arguments and register arguments.  */
+    /* If stack grows up, then we will be placing successive
+       arguments at lower addresses, which means we need to
+       reverse the order compared to how we would normally
+       treat either big or little-endian.  For those arguments
+       that will wind up in registers, this still works for
+       HPPA (the only current STACK_GROWSUP target) since the
+       argument registers are *also* allocated in decreasing
+       order.  If another such target is added, this logic may
+       have to get more complicated to differentiate between
+       stack arguments and register arguments.  */
 #if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP)
-        op->args[pi++] = temp_arg(arg + 1);
-        op->args[pi++] = temp_arg(arg);
+    op->args[pi++] = temp_arg(arg + 1);
+    op->args[pi++] = temp_arg(arg);
 #else
-        op->args[pi++] = temp_arg(arg);
-        op->args[pi++] = temp_arg(arg + 1);
+    op->args[pi++] = temp_arg(arg);
+    op->args[pi++] = temp_arg(arg + 1);
 #endif
-        real_args += 2;
-    }
+    real_args += 2;
+
+  }
+
+  op->args[pi++] = temp_arg(arg);
+  real_args++;
+
+  op->args[pi++] = (uintptr_t)&afl_maybe_log;
+  op->args[pi++] = flags;
+  TCGOP_CALLI(op) = real_args;
+
+  /* Make sure the fields didn't overflow.  */
+  tcg_debug_assert(TCGOP_CALLI(op) == real_args);
+  tcg_debug_assert(pi <= ARRAY_SIZE(op->args));
+
+#if defined(__sparc__) && !defined(__arch64__) && \
+    !defined(CONFIG_TCG_INTERPRETER)
+  /* Free all of the parts we allocated above.  */
+  real_args = 0;
+  int is_64bit = orig_sizemask & (1 << 2);
+  if (is_64bit) {
+
+    tcg_temp_free_internal(args[real_args++]);
+    tcg_temp_free_internal(args[real_args++]);
+
+  } else {
 
-    op->args[pi++] = temp_arg(arg);
     real_args++;
 
-    op->args[pi++] = (uintptr_t)&afl_maybe_log;
-    op->args[pi++] = flags;
-    TCGOP_CALLI(op) = real_args;
+  }
 
-    /* Make sure the fields didn't overflow.  */
-    tcg_debug_assert(TCGOP_CALLI(op) == real_args);
-    tcg_debug_assert(pi <= ARRAY_SIZE(op->args));
+  if (orig_sizemask & 1) {
+
+    /* The 32-bit ABI returned two 32-bit pieces.  Re-assemble them.
+       Note that describing these as TCGv_i64 eliminates an unnecessary
+       zero-extension that tcg_gen_concat_i32_i64 would create.  */
+    tcg_gen_concat32_i64(temp_tcgv_i64(NULL), retl, reth);
+    tcg_temp_free_i64(retl);
+    tcg_temp_free_i64(reth);
+
+  }
 
-#if defined(__sparc__) && !defined(__arch64__) \
-    && !defined(CONFIG_TCG_INTERPRETER)
-    /* Free all of the parts we allocated above.  */
-    real_args = 0;
-    int is_64bit = orig_sizemask & (1 << 2);
-    if (is_64bit) {
-        tcg_temp_free_internal(args[real_args++]);
-        tcg_temp_free_internal(args[real_args++]);
-    } else {
-        real_args++;
-    }
-    if (orig_sizemask & 1) {
-        /* The 32-bit ABI returned two 32-bit pieces.  Re-assemble them.
-           Note that describing these as TCGv_i64 eliminates an unnecessary
-           zero-extension that tcg_gen_concat_i32_i64 would create.  */
-        tcg_gen_concat32_i64(temp_tcgv_i64(NULL), retl, reth);
-        tcg_temp_free_i64(retl);
-        tcg_temp_free_i64(reth);
-    }
 #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
-    int is_64bit = sizemask & (1 << 2);
-    if (!is_64bit) {
-        tcg_temp_free_internal(arg);
-    }
+  int is_64bit = sizemask & (1 << 2);
+  if (!is_64bit) { tcg_temp_free_internal(arg); }
 #endif /* TCG_TARGET_EXTEND_ARGS */
+
 }
 
-void tcg_gen_afl_compcov_log_call(void *func, target_ulong cur_loc, TCGv_i64 arg1, TCGv_i64 arg2)
-{
-    int i, real_args, nb_rets, pi;
-    unsigned sizemask, flags;
-    TCGOp *op;
-
-    const int nargs = 3;
-    TCGTemp *args[3] = { tcgv_i64_temp( tcg_const_tl(cur_loc) ),
-                         tcgv_i64_temp(arg1),
-                         tcgv_i64_temp(arg2) };
-
-    flags = 0;
-    sizemask = dh_sizemask(void, 0) | dh_sizemask(i64, 1) |
-               dh_sizemask(i64, 2) | dh_sizemask(i64, 3);
-
-#if defined(__sparc__) && !defined(__arch64__) \
-    && !defined(CONFIG_TCG_INTERPRETER)
-    /* We have 64-bit values in one register, but need to pass as two
-       separate parameters.  Split them.  */
-    int orig_sizemask = sizemask;
-    int orig_nargs = nargs;
-    TCGv_i64 retl, reth;
-    TCGTemp *split_args[MAX_OPC_PARAM];
-
-    retl = NULL;
-    reth = NULL;
-    if (sizemask != 0) {
-        for (i = real_args = 0; i < nargs; ++i) {
-            int is_64bit = sizemask & (1 << (i+1)*2);
-            if (is_64bit) {
-                TCGv_i64 orig = temp_tcgv_i64(args[i]);
-                TCGv_i32 h = tcg_temp_new_i32();
-                TCGv_i32 l = tcg_temp_new_i32();
-                tcg_gen_extr_i64_i32(l, h, orig);
-                split_args[real_args++] = tcgv_i32_temp(h);
-                split_args[real_args++] = tcgv_i32_temp(l);
-            } else {
-                split_args[real_args++] = args[i];
-            }
-        }
-        nargs = real_args;
-        args = split_args;
-        sizemask = 0;
+void tcg_gen_afl_compcov_log_call(void *func, target_ulong cur_loc,
+                                  TCGv_i64 arg1, TCGv_i64 arg2) {
+
+  int      i, real_args, nb_rets, pi;
+  unsigned sizemask, flags;
+  TCGOp *  op;
+
+  const int nargs = 3;
+  TCGTemp *args[3] = {tcgv_i64_temp(tcg_const_tl(cur_loc)), tcgv_i64_temp(arg1),
+                      tcgv_i64_temp(arg2)};
+
+  flags = 0;
+  sizemask = dh_sizemask(void, 0) | dh_sizemask(i64, 1) | dh_sizemask(i64, 2) |
+             dh_sizemask(i64, 3);
+
+#if defined(__sparc__) && !defined(__arch64__) && \
+    !defined(CONFIG_TCG_INTERPRETER)
+  /* We have 64-bit values in one register, but need to pass as two
+     separate parameters.  Split them.  */
+  int      orig_sizemask = sizemask;
+  int      orig_nargs = nargs;
+  TCGv_i64 retl, reth;
+  TCGTemp *split_args[MAX_OPC_PARAM];
+
+  retl = NULL;
+  reth = NULL;
+  if (sizemask != 0) {
+
+    for (i = real_args = 0; i < nargs; ++i) {
+
+      int is_64bit = sizemask & (1 << (i + 1) * 2);
+      if (is_64bit) {
+
+        TCGv_i64 orig = temp_tcgv_i64(args[i]);
+        TCGv_i32 h = tcg_temp_new_i32();
+        TCGv_i32 l = tcg_temp_new_i32();
+        tcg_gen_extr_i64_i32(l, h, orig);
+        split_args[real_args++] = tcgv_i32_temp(h);
+        split_args[real_args++] = tcgv_i32_temp(l);
+
+      } else {
+
+        split_args[real_args++] = args[i];
+
+      }
+
     }
+
+    nargs = real_args;
+    args = split_args;
+    sizemask = 0;
+
+  }
+
 #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
-    for (i = 0; i < nargs; ++i) {
-        int is_64bit = sizemask & (1 << (i+1)*2);
-        int is_signed = sizemask & (2 << (i+1)*2);
-        if (!is_64bit) {
-            TCGv_i64 temp = tcg_temp_new_i64();
-            TCGv_i64 orig = temp_tcgv_i64(args[i]);
-            if (is_signed) {
-                tcg_gen_ext32s_i64(temp, orig);
-            } else {
-                tcg_gen_ext32u_i64(temp, orig);
-            }
-            args[i] = tcgv_i64_temp(temp);
-        }
+  for (i = 0; i < nargs; ++i) {
+
+    int is_64bit = sizemask & (1 << (i + 1) * 2);
+    int is_signed = sizemask & (2 << (i + 1) * 2);
+    if (!is_64bit) {
+
+      TCGv_i64 temp = tcg_temp_new_i64();
+      TCGv_i64 orig = temp_tcgv_i64(args[i]);
+      if (is_signed) {
+
+        tcg_gen_ext32s_i64(temp, orig);
+
+      } else {
+
+        tcg_gen_ext32u_i64(temp, orig);
+
+      }
+
+      args[i] = tcgv_i64_temp(temp);
+
     }
+
+  }
+
 #endif /* TCG_TARGET_EXTEND_ARGS */
 
-    op = tcg_emit_op(INDEX_op_call);
+  op = tcg_emit_op(INDEX_op_call);
 
-    pi = 0;
-    nb_rets = 0;
-    TCGOP_CALLO(op) = nb_rets;
+  pi = 0;
+  nb_rets = 0;
+  TCGOP_CALLO(op) = nb_rets;
+
+  real_args = 0;
+  for (i = 0; i < nargs; i++) {
+
+    int is_64bit = sizemask & (1 << (i + 1) * 2);
+    if (TCG_TARGET_REG_BITS < 64 && is_64bit) {
 
-    real_args = 0;
-    for (i = 0; i < nargs; i++) {
-        int is_64bit = sizemask & (1 << (i+1)*2);
-        if (TCG_TARGET_REG_BITS < 64 && is_64bit) {
 #ifdef TCG_TARGET_CALL_ALIGN_ARGS
-            /* some targets want aligned 64 bit args */
-            if (real_args & 1) {
-                op->args[pi++] = TCG_CALL_DUMMY_ARG;
-                real_args++;
-            }
+      /* some targets want aligned 64 bit args */
+      if (real_args & 1) {
+
+        op->args[pi++] = TCG_CALL_DUMMY_ARG;
+        real_args++;
+
+      }
+
 #endif
-           /* If stack grows up, then we will be placing successive
-              arguments at lower addresses, which means we need to
-              reverse the order compared to how we would normally
-              treat either big or little-endian.  For those arguments
-              that will wind up in registers, this still works for
-              HPPA (the only current STACK_GROWSUP target) since the
-              argument registers are *also* allocated in decreasing
-              order.  If another such target is added, this logic may
-              have to get more complicated to differentiate between
-              stack arguments and register arguments.  */
+      /* If stack grows up, then we will be placing successive
+         arguments at lower addresses, which means we need to
+         reverse the order compared to how we would normally
+         treat either big or little-endian.  For those arguments
+         that will wind up in registers, this still works for
+         HPPA (the only current STACK_GROWSUP target) since the
+         argument registers are *also* allocated in decreasing
+         order.  If another such target is added, this logic may
+         have to get more complicated to differentiate between
+         stack arguments and register arguments.  */
 #if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP)
-            op->args[pi++] = temp_arg(args[i] + 1);
-            op->args[pi++] = temp_arg(args[i]);
+      op->args[pi++] = temp_arg(args[i] + 1);
+      op->args[pi++] = temp_arg(args[i]);
 #else
-            op->args[pi++] = temp_arg(args[i]);
-            op->args[pi++] = temp_arg(args[i] + 1);
+      op->args[pi++] = temp_arg(args[i]);
+      op->args[pi++] = temp_arg(args[i] + 1);
 #endif
-            real_args += 2;
-            continue;
-        }
+      real_args += 2;
+      continue;
 
-        op->args[pi++] = temp_arg(args[i]);
-        real_args++;
-    }
-    op->args[pi++] = (uintptr_t)func;
-    op->args[pi++] = flags;
-    TCGOP_CALLI(op) = real_args;
-
-    /* Make sure the fields didn't overflow.  */
-    tcg_debug_assert(TCGOP_CALLI(op) == real_args);
-    tcg_debug_assert(pi <= ARRAY_SIZE(op->args));
-
-#if defined(__sparc__) && !defined(__arch64__) \
-    && !defined(CONFIG_TCG_INTERPRETER)
-    /* Free all of the parts we allocated above.  */
-    for (i = real_args = 0; i < orig_nargs; ++i) {
-        int is_64bit = orig_sizemask & (1 << (i+1)*2);
-        if (is_64bit) {
-            tcg_temp_free_internal(args[real_args++]);
-            tcg_temp_free_internal(args[real_args++]);
-        } else {
-            real_args++;
-        }
     }
-    if (orig_sizemask & 1) {
-        /* The 32-bit ABI returned two 32-bit pieces.  Re-assemble them.
-           Note that describing these as TCGv_i64 eliminates an unnecessary
-           zero-extension that tcg_gen_concat_i32_i64 would create.  */
-        tcg_gen_concat32_i64(temp_tcgv_i64(NULL), retl, reth);
-        tcg_temp_free_i64(retl);
-        tcg_temp_free_i64(reth);
+
+    op->args[pi++] = temp_arg(args[i]);
+    real_args++;
+
+  }
+
+  op->args[pi++] = (uintptr_t)func;
+  op->args[pi++] = flags;
+  TCGOP_CALLI(op) = real_args;
+
+  /* Make sure the fields didn't overflow.  */
+  tcg_debug_assert(TCGOP_CALLI(op) == real_args);
+  tcg_debug_assert(pi <= ARRAY_SIZE(op->args));
+
+#if defined(__sparc__) && !defined(__arch64__) && \
+    !defined(CONFIG_TCG_INTERPRETER)
+  /* Free all of the parts we allocated above.  */
+  for (i = real_args = 0; i < orig_nargs; ++i) {
+
+    int is_64bit = orig_sizemask & (1 << (i + 1) * 2);
+    if (is_64bit) {
+
+      tcg_temp_free_internal(args[real_args++]);
+      tcg_temp_free_internal(args[real_args++]);
+
+    } else {
+
+      real_args++;
+
     }
+
+  }
+
+  if (orig_sizemask & 1) {
+
+    /* The 32-bit ABI returned two 32-bit pieces.  Re-assemble them.
+       Note that describing these as TCGv_i64 eliminates an unnecessary
+       zero-extension that tcg_gen_concat_i32_i64 would create.  */
+    tcg_gen_concat32_i64(temp_tcgv_i64(NULL), retl, reth);
+    tcg_temp_free_i64(retl);
+    tcg_temp_free_i64(reth);
+
+  }
+
 #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
-    for (i = 0; i < nargs; ++i) {
-        int is_64bit = sizemask & (1 << (i+1)*2);
-        if (!is_64bit) {
-            tcg_temp_free_internal(args[i]);
-        }
-    }
+  for (i = 0; i < nargs; ++i) {
+
+    int is_64bit = sizemask & (1 << (i + 1) * 2);
+    if (!is_64bit) { tcg_temp_free_internal(args[i]); }
+
+  }
+
 #endif /* TCG_TARGET_EXTEND_ARGS */
+
 }
 
diff --git a/qemu_mode/patches/afl-qemu-translate-inl.h b/qemu_mode/patches/afl-qemu-translate-inl.h
index ffe43dba..9abaa961 100644
--- a/qemu_mode/patches/afl-qemu-translate-inl.h
+++ b/qemu_mode/patches/afl-qemu-translate-inl.h
@@ -36,8 +36,8 @@
 
 /* Declared in afl-qemu-cpu-inl.h */
 extern unsigned char *afl_area_ptr;
-extern unsigned int afl_inst_rms;
-extern abi_ulong afl_start_code, afl_end_code;
+extern unsigned int   afl_inst_rms;
+extern abi_ulong      afl_start_code, afl_end_code;
 
 void tcg_gen_afl_maybe_log_call(target_ulong cur_loc);
 
@@ -59,14 +59,16 @@ static void afl_gen_trace(target_ulong cur_loc) {
   /* Optimize for cur_loc > afl_end_code, which is the most likely case on
      Linux systems. */
 
-  if (cur_loc > afl_end_code || cur_loc < afl_start_code /*|| !afl_area_ptr*/) // not needed because of static dummy buffer
+  if (cur_loc > afl_end_code ||
+      cur_loc < afl_start_code /*|| !afl_area_ptr*/)  // not needed because of
+                                                      // static dummy buffer
     return;
 
   /* Looks like QEMU always maps to fixed locations, so ASLR is not a
      concern. Phew. But instruction addresses may be aligned. Let's mangle
      the value to get something quasi-uniform. */
 
-  cur_loc  = (cur_loc >> 4) ^ (cur_loc << 8);
+  cur_loc = (cur_loc >> 4) ^ (cur_loc << 8);
   cur_loc &= MAP_SIZE - 1;
 
   /* Implement probabilistic instrumentation by looking at scrambled block
@@ -75,5 +77,6 @@ static void afl_gen_trace(target_ulong cur_loc) {
   if (cur_loc >= afl_inst_rms) return;
 
   tcg_gen_afl_maybe_log_call(cur_loc);
-  
+
 }
+
diff --git a/afl-analyze.c b/src/afl-analyze.c
index 0e8c9fb0..e3014256 100644
--- a/afl-analyze.c
+++ b/src/afl-analyze.c
@@ -22,7 +22,7 @@
 #define AFL_MAIN
 
 #ifdef __ANDROID__
-  #include "android-ashmem.h"
+#  include "android-ashmem.h"
 #endif
 #include "config.h"
 #include "types.h"
@@ -30,7 +30,7 @@
 #include "alloc-inl.h"
 #include "hash.h"
 #include "sharedmem.h"
-#include "afl-common.h"
+#include "common.h"
 
 #include <stdio.h>
 #include <unistd.h>
@@ -50,61 +50,59 @@
 #include <sys/types.h>
 #include <sys/resource.h>
 
-static s32 child_pid;                 /* PID of the tested program         */
+static s32 child_pid;                  /* PID of the tested program         */
 
-       u8* trace_bits;                /* SHM with instrumentation bitmap   */
+u8* trace_bits;                        /* SHM with instrumentation bitmap   */
 
-static u8 *in_file,                   /* Analyzer input test case          */
-          *prog_in,                   /* Targeted program input file       */
-          *target_path,               /* Path to target binary             */
-          *doc_path;                  /* Path to docs                      */
+static u8 *in_file,                    /* Analyzer input test case          */
+    *prog_in,                          /* Targeted program input file       */
+    *target_path,                      /* Path to target binary             */
+    *doc_path;                         /* Path to docs                      */
 
-static u8 *in_data;                   /* Input data for analysis           */
+static u8* in_data;                    /* Input data for analysis           */
 
-static u32 in_len,                    /* Input data length                 */
-           orig_cksum,                /* Original checksum                 */
-           total_execs,               /* Total number of execs             */
-           exec_hangs,                /* Total number of hangs             */
-           exec_tmout = EXEC_TIMEOUT; /* Exec timeout (ms)                 */
+static u32 in_len,                     /* Input data length                 */
+    orig_cksum,                        /* Original checksum                 */
+    total_execs,                       /* Total number of execs             */
+    exec_hangs,                        /* Total number of hangs             */
+    exec_tmout = EXEC_TIMEOUT;         /* Exec timeout (ms)                 */
 
-static u64 mem_limit = MEM_LIMIT;     /* Memory limit (MB)                 */
+static u64 mem_limit = MEM_LIMIT;      /* Memory limit (MB)                 */
 
-static s32 dev_null_fd = -1;          /* FD to /dev/null                   */
+static s32 dev_null_fd = -1;           /* FD to /dev/null                   */
 
-static u8  edges_only,                /* Ignore hit counts?                */
-           use_hex_offsets,           /* Show hex offsets?                 */
-           use_stdin = 1;             /* Use stdin for program input?      */
-
-static volatile u8
-           stop_soon,                 /* Ctrl-C pressed?                   */
-           child_timed_out;           /* Child timed out?                  */
+static u8 edges_only,                  /* Ignore hit counts?                */
+    use_hex_offsets,                   /* Show hex offsets?                 */
+    use_stdin = 1;                     /* Use stdin for program input?      */
 
+static volatile u8 stop_soon,          /* Ctrl-C pressed?                   */
+    child_timed_out;                   /* Child timed out?                  */
 
 /* Constants used for describing byte behavior. */
 
-#define RESP_NONE       0x00          /* Changing byte is a no-op.         */
-#define RESP_MINOR      0x01          /* Some changes have no effect.      */
-#define RESP_VARIABLE   0x02          /* Changes produce variable paths.   */
-#define RESP_FIXED      0x03          /* Changes produce fixed patterns.   */
-
-#define RESP_LEN        0x04          /* Potential length field            */
-#define RESP_CKSUM      0x05          /* Potential checksum                */
-#define RESP_SUSPECT    0x06          /* Potential "suspect" blob          */
+#define RESP_NONE 0x00     /* Changing byte is a no-op.         */
+#define RESP_MINOR 0x01    /* Some changes have no effect.      */
+#define RESP_VARIABLE 0x02 /* Changes produce variable paths.   */
+#define RESP_FIXED 0x03    /* Changes produce fixed patterns.   */
 
+#define RESP_LEN 0x04     /* Potential length field            */
+#define RESP_CKSUM 0x05   /* Potential checksum                */
+#define RESP_SUSPECT 0x06 /* Potential "suspect" blob          */
 
-/* Classify tuple counts. This is a slow & naive version, but good enough here. */
+/* Classify tuple counts. This is a slow & naive version, but good enough here.
+ */
 
 static u8 count_class_lookup[256] = {
 
-  [0]           = 0,
-  [1]           = 1,
-  [2]           = 2,
-  [3]           = 4,
-  [4 ... 7]     = 8,
-  [8 ... 15]    = 16,
-  [16 ... 31]   = 32,
-  [32 ... 127]  = 64,
-  [128 ... 255] = 128
+    [0] = 0,
+    [1] = 1,
+    [2] = 2,
+    [3] = 4,
+    [4 ... 7] = 8,
+    [8 ... 15] = 16,
+    [16 ... 31] = 32,
+    [32 ... 127] = 64,
+    [128 ... 255] = 128
 
 };
 
@@ -115,61 +113,62 @@ static void classify_counts(u8* mem) {
   if (edges_only) {
 
     while (i--) {
+
       if (*mem) *mem = 1;
       mem++;
+
     }
 
   } else {
 
     while (i--) {
+
       *mem = count_class_lookup[*mem];
       mem++;
+
     }
 
   }
 
 }
 
-
 /* See if any bytes are set in the bitmap. */
 
 static inline u8 anything_set(void) {
 
   u32* ptr = (u32*)trace_bits;
-  u32  i   = (MAP_SIZE >> 2);
+  u32  i = (MAP_SIZE >> 2);
 
-  while (i--) if (*(ptr++)) return 1;
+  while (i--)
+    if (*(ptr++)) return 1;
 
   return 0;
 
 }
 
-
 /* Get rid of temp files (atexit handler). */
 
 static void at_exit_handler(void) {
 
-  unlink(prog_in); /* Ignore errors */
+  unlink(prog_in);                                         /* Ignore errors */
 
 }
 
-
 /* Read initial file. */
 
 static void read_initial_file(void) {
 
   struct stat st;
-  s32 fd = open(in_file, O_RDONLY);
+  s32         fd = open(in_file, O_RDONLY);
 
   if (fd < 0) PFATAL("Unable to open '%s'", in_file);
 
-  if (fstat(fd, &st) || !st.st_size)
-    FATAL("Zero-sized input file.");
+  if (fstat(fd, &st) || !st.st_size) FATAL("Zero-sized input file.");
 
   if (st.st_size >= TMIN_MAX_FILE)
     FATAL("Input file is too large (%u MB max)", TMIN_MAX_FILE / 1024 / 1024);
 
-  in_len  = st.st_size;
+  in_len = st.st_size;
   in_data = ck_alloc_nozero(in_len);
 
   ck_read(fd, in_data, in_len, in_file);
@@ -180,14 +179,13 @@ static void read_initial_file(void) {
 
 }
 
-
 /* Write output file. */
 
 static s32 write_to_file(u8* path, u8* mem, u32 len) {
 
   s32 ret;
 
-  unlink(path); /* Ignore errors */
+  unlink(path);                                            /* Ignore errors */
 
   ret = open(path, O_RDWR | O_CREAT | O_EXCL, 0600);
 
@@ -201,7 +199,6 @@ static s32 write_to_file(u8* path, u8* mem, u32 len) {
 
 }
 
-
 /* Handle timeout signal. */
 
 static void handle_timeout(int sig) {
@@ -211,14 +208,13 @@ static void handle_timeout(int sig) {
 
 }
 
-
 /* Execute target application. Returns exec checksum, or 0 if program
    times out. */
 
 static u32 run_target(char** argv, u8* mem, u32 len, u8 first_run) {
 
   static struct itimerval it;
-  int status = 0;
+  int                     status = 0;
 
   s32 prog_in_fd;
   u32 cksum;
@@ -237,8 +233,7 @@ static u32 run_target(char** argv, u8* mem, u32 len, u8 first_run) {
     struct rlimit r;
 
     if (dup2(use_stdin ? prog_in_fd : dev_null_fd, 0) < 0 ||
-        dup2(dev_null_fd, 1) < 0 ||
-        dup2(dev_null_fd, 2) < 0) {
+        dup2(dev_null_fd, 1) < 0 || dup2(dev_null_fd, 2) < 0) {
 
       *(u32*)trace_bits = EXEC_FAIL_SIG;
       PFATAL("dup2() failed");
@@ -254,18 +249,18 @@ static u32 run_target(char** argv, u8* mem, u32 len, u8 first_run) {
 
 #ifdef RLIMIT_AS
 
-      setrlimit(RLIMIT_AS, &r); /* Ignore errors */
+      setrlimit(RLIMIT_AS, &r);                            /* Ignore errors */
 
 #else
 
-      setrlimit(RLIMIT_DATA, &r); /* Ignore errors */
+      setrlimit(RLIMIT_DATA, &r);                          /* Ignore errors */
 
 #endif /* ^RLIMIT_AS */
 
     }
 
     r.rlim_max = r.rlim_cur = 0;
-    setrlimit(RLIMIT_CORE, &r); /* Ignore errors */
+    setrlimit(RLIMIT_CORE, &r);                            /* Ignore errors */
 
     execv(target_path, argv);
 
@@ -303,8 +298,10 @@ static u32 run_target(char** argv, u8* mem, u32 len, u8 first_run) {
   total_execs++;
 
   if (stop_soon) {
+
     SAYF(cRST cLRD "\n+++ Analysis aborted by user +++\n" cRST);
     exit(1);
+
   }
 
   /* Always discard inputs that time out. */
@@ -335,7 +332,6 @@ static u32 run_target(char** argv, u8* mem, u32 len, u8 first_run) {
 
 }
 
-
 #ifdef USE_COLOR
 
 /* Helper function to display a human-readable character. */
@@ -353,24 +349,25 @@ static void show_char(u8 val) {
 
 }
 
-
 /* Show the legend */
 
 static void show_legend(void) {
 
-  SAYF("    " cLGR bgGRA " 01 " cRST " - no-op block              "
-              cBLK bgLGN " 01 " cRST " - suspected length field\n"
-       "    " cBRI bgGRA " 01 " cRST " - superficial content      "
-              cBLK bgYEL " 01 " cRST " - suspected cksum or magic int\n"
-       "    " cBLK bgCYA " 01 " cRST " - critical stream          "
-              cBLK bgLRD " 01 " cRST " - suspected checksummed block\n"
+  SAYF("    " cLGR bgGRA " 01 " cRST " - no-op block              " cBLK bgLGN
+       " 01 " cRST
+       " - suspected length field\n"
+       "    " cBRI bgGRA " 01 " cRST " - superficial content      " cBLK bgYEL
+       " 01 " cRST
+       " - suspected cksum or magic int\n"
+       "    " cBLK bgCYA " 01 " cRST " - critical stream          " cBLK bgLRD
+       " 01 " cRST
+       " - suspected checksummed block\n"
        "    " cBLK bgMGN " 01 " cRST " - \"magic value\" section\n\n");
 
 }
 
 #endif /* USE_COLOR */
 
-
 /* Interpret and report a pattern in the input file. */
 
 static void dump_hex(u8* buf, u32 len, u8* b_data) {
@@ -385,7 +382,7 @@ static void dump_hex(u8* buf, u32 len, u8* b_data) {
     u32 rlen = 1;
 #endif /* ^USE_COLOR */
 
-    u8  rtype = b_data[i] & 0x0f;
+    u8 rtype = b_data[i] & 0x0f;
 
     /* Look ahead to determine the length of run. */
 
@@ -404,51 +401,61 @@ static void dump_hex(u8* buf, u32 len, u8* b_data) {
 
         case 2: {
 
-            u16 val = *(u16*)(in_data + i);
+          u16 val = *(u16*)(in_data + i);
+
+          /* Small integers may be length fields. */
 
-            /* Small integers may be length fields. */
+          if (val && (val <= in_len || SWAP16(val) <= in_len)) {
 
-            if (val && (val <= in_len || SWAP16(val) <= in_len)) {
-              rtype = RESP_LEN;
-              break;
-            }
+            rtype = RESP_LEN;
+            break;
+
+          }
 
-            /* Uniform integers may be checksums. */
+          /* Uniform integers may be checksums. */
 
-            if (val && abs(in_data[i] - in_data[i + 1]) > 32) {
-              rtype = RESP_CKSUM;
-              break;
-            }
+          if (val && abs(in_data[i] - in_data[i + 1]) > 32) {
 
+            rtype = RESP_CKSUM;
             break;
 
           }
 
+          break;
+
+        }
+
         case 4: {
 
-            u32 val = *(u32*)(in_data + i);
+          u32 val = *(u32*)(in_data + i);
 
-            /* Small integers may be length fields. */
+          /* Small integers may be length fields. */
 
-            if (val && (val <= in_len || SWAP32(val) <= in_len)) {
-              rtype = RESP_LEN;
-              break;
-            }
+          if (val && (val <= in_len || SWAP32(val) <= in_len)) {
 
-            /* Uniform integers may be checksums. */
+            rtype = RESP_LEN;
+            break;
 
-            if (val && (in_data[i] >> 7 != in_data[i + 1] >> 7 ||
-                in_data[i] >> 7 != in_data[i + 2] >> 7 ||
-                in_data[i] >> 7 != in_data[i + 3] >> 7)) {
-              rtype = RESP_CKSUM;
-              break;
-            }
+          }
 
+          /* Uniform integers may be checksums. */
+
+          if (val && (in_data[i] >> 7 != in_data[i + 1] >> 7 ||
+                      in_data[i] >> 7 != in_data[i + 2] >> 7 ||
+                      in_data[i] >> 7 != in_data[i + 3] >> 7)) {
+
+            rtype = RESP_CKSUM;
             break;
 
           }
 
-        case 1: case 3: case 5 ... MAX_AUTO_EXTRA - 1: break;
+          break;
+
+        }
+
+        case 1:
+        case 3:
+        case 5 ... MAX_AUTO_EXTRA - 1: break;
 
         default: rtype = RESP_SUSPECT;
 
@@ -477,19 +484,22 @@ static void dump_hex(u8* buf, u32 len, u8* b_data) {
 
       switch (rtype) {
 
-        case RESP_NONE:     SAYF(cLGR bgGRA); break;
-        case RESP_MINOR:    SAYF(cBRI bgGRA); break;
+        case RESP_NONE: SAYF(cLGR bgGRA); break;
+        case RESP_MINOR: SAYF(cBRI bgGRA); break;
         case RESP_VARIABLE: SAYF(cBLK bgCYA); break;
-        case RESP_FIXED:    SAYF(cBLK bgMGN); break;
-        case RESP_LEN:      SAYF(cBLK bgLGN); break;
-        case RESP_CKSUM:    SAYF(cBLK bgYEL); break;
-        case RESP_SUSPECT:  SAYF(cBLK bgLRD); break;
+        case RESP_FIXED: SAYF(cBLK bgMGN); break;
+        case RESP_LEN: SAYF(cBLK bgLGN); break;
+        case RESP_CKSUM: SAYF(cBLK bgYEL); break;
+        case RESP_SUSPECT: SAYF(cBLK bgLRD); break;
 
       }
 
       show_char(in_data[i + off]);
 
-      if (off != rlen - 1 && (i + off + 1) % 16) SAYF(" "); else SAYF(cRST " ");
+      if (off != rlen - 1 && (i + off + 1) % 16)
+        SAYF(" ");
+      else
+        SAYF(cRST " ");
 
     }
 
@@ -502,13 +512,13 @@ static void dump_hex(u8* buf, u32 len, u8* b_data) {
 
     switch (rtype) {
 
-      case RESP_NONE:     SAYF("no-op block\n"); break;
-      case RESP_MINOR:    SAYF("superficial content\n"); break;
+      case RESP_NONE: SAYF("no-op block\n"); break;
+      case RESP_MINOR: SAYF("superficial content\n"); break;
       case RESP_VARIABLE: SAYF("critical stream\n"); break;
-      case RESP_FIXED:    SAYF("\"magic value\" section\n"); break;
-      case RESP_LEN:      SAYF("suspected length field\n"); break;
-      case RESP_CKSUM:    SAYF("suspected cksum or magic int\n"); break;
-      case RESP_SUSPECT:  SAYF("suspected checksummed block\n"); break;
+      case RESP_FIXED: SAYF("\"magic value\" section\n"); break;
+      case RESP_LEN: SAYF("suspected length field\n"); break;
+      case RESP_CKSUM: SAYF("suspected cksum or magic int\n"); break;
+      case RESP_SUSPECT: SAYF("suspected checksummed block\n"); break;
 
     }
 
@@ -524,8 +534,6 @@ static void dump_hex(u8* buf, u32 len, u8* b_data) {
 
 }
 
-
-
 /* Actually analyze! */
 
 static void analyze(char** argv) {
@@ -536,7 +544,7 @@ static void analyze(char** argv) {
   u8* b_data = ck_alloc(in_len + 1);
   u8  seq_byte = 0;
 
-  b_data[in_len] = 0xff; /* Intentional terminator. */
+  b_data[in_len] = 0xff;                         /* Intentional terminator. */
 
   ACTF("Analyzing input file (this may take a while)...\n");
 
@@ -587,12 +595,15 @@ static void analyze(char** argv) {
 
       b_data[i] = RESP_FIXED;
 
-    } else b_data[i] = RESP_VARIABLE;
+    } else
+
+      b_data[i] = RESP_VARIABLE;
 
     /* When all checksums change, flip most significant bit of b_data. */
 
-    if (prev_xff != xor_ff && prev_x01 != xor_01 &&
-        prev_s10 != sub_10 && prev_a10 != add_10) seq_byte ^= 0x80;
+    if (prev_xff != xor_ff && prev_x01 != xor_01 && prev_s10 != sub_10 &&
+        prev_a10 != add_10)
+      seq_byte ^= 0x80;
 
     b_data[i] |= seq_byte;
 
@@ -601,7 +612,7 @@ static void analyze(char** argv) {
     prev_s10 = sub_10;
     prev_a10 = add_10;
 
-  } 
+  }
 
   dump_hex(in_data, in_len, b_data);
 
@@ -618,8 +629,6 @@ static void analyze(char** argv) {
 
 }
 
-
-
 /* Handle Ctrl-C and the like. */
 
 static void handle_stop_sig(int sig) {
@@ -630,7 +639,6 @@ static void handle_stop_sig(int sig) {
 
 }
 
-
 /* Do basic preparations - persistent fds, filenames, etc. */
 
 static void set_up_environment(void) {
@@ -674,18 +682,20 @@ static void set_up_environment(void) {
   if (x) {
 
     if (!strstr(x, "exit_code=" STRINGIFY(MSAN_ERROR)))
-      FATAL("Custom MSAN_OPTIONS set without exit_code="
-            STRINGIFY(MSAN_ERROR) " - please fix!");
+      FATAL("Custom MSAN_OPTIONS set without exit_code=" STRINGIFY(
+          MSAN_ERROR) " - please fix!");
 
     if (!strstr(x, "symbolize=0"))
       FATAL("Custom MSAN_OPTIONS set without symbolize=0 - please fix!");
 
   }
 
-  setenv("ASAN_OPTIONS", "abort_on_error=1:"
-                         "detect_leaks=0:"
-                         "symbolize=0:"
-                         "allocator_may_return_null=1", 0);
+  setenv("ASAN_OPTIONS",
+         "abort_on_error=1:"
+         "detect_leaks=0:"
+         "symbolize=0:"
+         "allocator_may_return_null=1",
+         0);
 
   setenv("MSAN_OPTIONS", "exit_code=" STRINGIFY(MSAN_ERROR) ":"
                          "symbolize=0:"
@@ -694,21 +704,22 @@ static void set_up_environment(void) {
                          "msan_track_origins=0", 0);
 
   if (getenv("AFL_PRELOAD")) {
+
     setenv("LD_PRELOAD", getenv("AFL_PRELOAD"), 1);
     setenv("DYLD_INSERT_LIBRARIES", getenv("AFL_PRELOAD"), 1);
+
   }
 
 }
 
-
 /* Setup signal handlers, duh. */
 
 static void setup_signal_handlers(void) {
 
   struct sigaction sa;
 
-  sa.sa_handler   = NULL;
-  sa.sa_flags     = SA_RESTART;
+  sa.sa_handler = NULL;
+  sa.sa_flags = SA_RESTART;
   sa.sa_sigaction = NULL;
 
   sigemptyset(&sa.sa_mask);
@@ -727,43 +738,42 @@ static void setup_signal_handlers(void) {
 
 }
 
-
 /* Display usage hints. */
 
 static void usage(u8* argv0) {
 
-  SAYF("\n%s [ options ] -- /path/to/target_app [ ... ]\n\n"
+  SAYF(
+      "\n%s [ options ] -- /path/to/target_app [ ... ]\n\n"
 
-       "Required parameters:\n\n"
+      "Required parameters:\n\n"
 
-       "  -i file       - input test case to be analyzed by the tool\n"
+      "  -i file       - input test case to be analyzed by the tool\n"
 
-       "Execution control settings:\n\n"
+      "Execution control settings:\n\n"
 
-       "  -f file       - input file read by the tested program (stdin)\n"
-       "  -t msec       - timeout for each run (%u ms)\n"
-       "  -m megs       - memory limit for child process (%u MB)\n"
-       "  -Q            - use binary-only instrumentation (QEMU mode)\n"
-       "  -U            - use unicorn-based instrumentation (Unicorn mode)\n\n"
+      "  -f file       - input file read by the tested program (stdin)\n"
+      "  -t msec       - timeout for each run (%d ms)\n"
+      "  -m megs       - memory limit for child process (%d MB)\n"
+      "  -Q            - use binary-only instrumentation (QEMU mode)\n"
+      "  -U            - use unicorn-based instrumentation (Unicorn mode)\n\n"
 
-       "Analysis settings:\n\n"
+      "Analysis settings:\n\n"
 
-       "  -e            - look for edge coverage only, ignore hit counts\n\n"
+      "  -e            - look for edge coverage only, ignore hit counts\n\n"
 
-       "For additional tips, please consult %s/README.\n\n",
+      "For additional tips, please consult %s/README.\n\n",
 
-       argv0, EXEC_TIMEOUT, MEM_LIMIT, doc_path);
+      argv0, EXEC_TIMEOUT, MEM_LIMIT, doc_path);
 
   exit(1);
 
 }
 
-
 /* Find binary. */
 
 static void find_binary(u8* fname) {
 
-  u8* env_path = 0;
+  u8*         env_path = 0;
   struct stat st;
 
   if (strchr(fname, '/') || !(env_path = getenv("PATH"))) {
@@ -786,7 +796,9 @@ static void find_binary(u8* fname) {
         memcpy(cur_elem, env_path, delim - env_path);
         delim++;
 
-      } else cur_elem = ck_strdup(env_path);
+      } else
+
+        cur_elem = ck_strdup(env_path);
 
       env_path = delim;
 
@@ -798,7 +810,8 @@ static void find_binary(u8* fname) {
       ck_free(cur_elem);
 
       if (!stat(target_path, &st) && S_ISREG(st.st_mode) &&
-          (st.st_mode & 0111) && st.st_size >= 4) break;
+          (st.st_mode & 0111) && st.st_size >= 4)
+        break;
 
       ck_free(target_path);
       target_path = 0;
@@ -811,13 +824,12 @@ static void find_binary(u8* fname) {
 
 }
 
-
 /* Fix up argv for QEMU. */
 
 static char** get_qemu_argv(u8* own_loc, char** argv, int argc) {
 
   char** new_argv = ck_alloc(sizeof(char*) * (argc + 4));
-  u8 *tmp, *cp, *rsl, *own_copy;
+  u8 *   tmp, *cp, *rsl, *own_copy;
 
   memcpy(new_argv + 3, argv + 1, sizeof(char*) * argc);
 
@@ -832,8 +844,7 @@ static char** get_qemu_argv(u8* own_loc, char** argv, int argc) {
 
     cp = alloc_printf("%s/afl-qemu-trace", tmp);
 
-    if (access(cp, X_OK))
-      FATAL("Unable to find '%s'", tmp);
+    if (access(cp, X_OK)) FATAL("Unable to find '%s'", tmp);
 
     target_path = new_argv[0] = cp;
     return new_argv;
@@ -857,7 +868,9 @@ static char** get_qemu_argv(u8* own_loc, char** argv, int argc) {
 
     }
 
-  } else ck_free(own_copy);
+  } else
+
+    ck_free(own_copy);
 
   if (!access(BIN_PATH "/afl-qemu-trace", X_OK)) {
 
@@ -882,7 +895,7 @@ int main(int argc, char** argv) {
 
   SAYF(cCYA "afl-analyze" VERSION cRST " by <lcamtuf@google.com>\n");
 
-  while ((opt = getopt(argc,argv,"+i:f:m:t:eQU")) > 0)
+  while ((opt = getopt(argc, argv, "+i:f:m:t:eQU")) > 0)
 
     switch (opt) {
 
@@ -896,7 +909,7 @@ int main(int argc, char** argv) {
 
         if (prog_in) FATAL("Multiple -f options not supported");
         use_stdin = 0;
-        prog_in   = optarg;
+        prog_in = optarg;
         break;
 
       case 'e':
@@ -907,40 +920,41 @@ int main(int argc, char** argv) {
 
       case 'm': {
 
-          u8 suffix = 'M';
+        u8 suffix = 'M';
 
-          if (mem_limit_given) FATAL("Multiple -m options not supported");
-          mem_limit_given = 1;
+        if (mem_limit_given) FATAL("Multiple -m options not supported");
+        mem_limit_given = 1;
 
-          if (!strcmp(optarg, "none")) {
+        if (!strcmp(optarg, "none")) {
 
-            mem_limit = 0;
-            break;
+          mem_limit = 0;
+          break;
 
-          }
+        }
 
-          if (sscanf(optarg, "%llu%c", &mem_limit, &suffix) < 1 ||
-              optarg[0] == '-') FATAL("Bad syntax used for -m");
+        if (sscanf(optarg, "%llu%c", &mem_limit, &suffix) < 1 ||
+            optarg[0] == '-')
+          FATAL("Bad syntax used for -m");
 
-          switch (suffix) {
+        switch (suffix) {
 
-            case 'T': mem_limit *= 1024 * 1024; break;
-            case 'G': mem_limit *= 1024; break;
-            case 'k': mem_limit /= 1024; break;
-            case 'M': break;
+          case 'T': mem_limit *= 1024 * 1024; break;
+          case 'G': mem_limit *= 1024; break;
+          case 'k': mem_limit /= 1024; break;
+          case 'M': break;
 
-            default:  FATAL("Unsupported suffix or bad syntax for -m");
+          default: FATAL("Unsupported suffix or bad syntax for -m");
 
-          }
+        }
 
-          if (mem_limit < 5) FATAL("Dangerously low value of -m");
+        if (mem_limit < 5) FATAL("Dangerously low value of -m");
 
-          if (sizeof(rlim_t) == 4 && mem_limit > 2000)
-            FATAL("Value of -m out of range on 32-bit systems");
+        if (sizeof(rlim_t) == 4 && mem_limit > 2000)
+          FATAL("Value of -m out of range on 32-bit systems");
 
-        }
+      }
 
-        break;
+      break;
 
       case 't':
 
@@ -970,9 +984,7 @@ int main(int argc, char** argv) {
         unicorn_mode = 1;
         break;
 
-      default:
-
-        usage(argv[0]);
+      default: usage(argv[0]);
 
     }
 
diff --git a/afl-as.c b/src/afl-as.c
index 94595f24..57f4c4a3 100644
--- a/afl-as.c
+++ b/src/afl-as.c
@@ -48,39 +48,38 @@
 #include <sys/wait.h>
 #include <sys/time.h>
 
-static u8** as_params;          /* Parameters passed to the real 'as'   */
+static u8** as_params;              /* Parameters passed to the real 'as'   */
 
-static u8*  input_file;         /* Originally specified input file      */
-static u8*  modified_file;      /* Instrumented file for the real 'as'  */
+static u8* input_file;              /* Originally specified input file      */
+static u8* modified_file;           /* Instrumented file for the real 'as'  */
 
-static u8   be_quiet,           /* Quiet mode (no stderr output)        */
-            clang_mode,         /* Running in clang mode?               */
-            pass_thru,          /* Just pass data through?              */
-            just_version,       /* Just show version?                   */
-            sanitizer;          /* Using ASAN / MSAN                    */
+static u8 be_quiet,                 /* Quiet mode (no stderr output)        */
+    clang_mode,                     /* Running in clang mode?               */
+    pass_thru,                      /* Just pass data through?              */
+    just_version,                   /* Just show version?                   */
+    sanitizer;                      /* Using ASAN / MSAN                    */
 
-static u32  inst_ratio = 100,   /* Instrumentation probability (%)      */
-            as_par_cnt = 1;     /* Number of params to 'as'             */
+static u32 inst_ratio = 100,        /* Instrumentation probability (%)      */
+    as_par_cnt = 1;                 /* Number of params to 'as'             */
 
-/* If we don't find --32 or --64 in the command line, default to 
+/* If we don't find --32 or --64 in the command line, default to
    instrumentation for whichever mode we were compiled with. This is not
    perfect, but should do the trick for almost all use cases. */
 
 #ifdef __x86_64__
 
-static u8   use_64bit = 1;
+static u8 use_64bit = 1;
 
 #else
 
-static u8   use_64bit = 0;
+static u8 use_64bit = 0;
 
-#ifdef __APPLE__
-#  error "Sorry, 32-bit Apple platforms are not supported."
-#endif /* __APPLE__ */
+#  ifdef __APPLE__
+#    error "Sorry, 32-bit Apple platforms are not supported."
+#  endif /* __APPLE__ */
 
 #endif /* ^__x86_64__ */
 
-
 /* Examine and modify parameters to pass to 'as'. Note that the file name
    is always the last parameter passed by GCC, so we exploit this property
    to keep the code simple. */
@@ -134,8 +133,10 @@ static void edit_params(int argc, char** argv) {
 
   for (i = 1; i < argc - 1; i++) {
 
-    if (!strcmp(argv[i], "--64")) use_64bit = 1;
-    else if (!strcmp(argv[i], "--32")) use_64bit = 0;
+    if (!strcmp(argv[i], "--64"))
+      use_64bit = 1;
+    else if (!strcmp(argv[i], "--32"))
+      use_64bit = 0;
 
 #ifdef __APPLE__
 
@@ -143,7 +144,8 @@ static void edit_params(int argc, char** argv) {
 
     if (!strcmp(argv[i], "-arch") && i + 1 < argc) {
 
-      if (!strcmp(argv[i + 1], "x86_64")) use_64bit = 1;
+      if (!strcmp(argv[i + 1], "x86_64"))
+        use_64bit = 1;
       else if (!strcmp(argv[i + 1], "i386"))
         FATAL("Sorry, 32-bit Apple platforms are not supported.");
 
@@ -181,13 +183,17 @@ static void edit_params(int argc, char** argv) {
   if (input_file[0] == '-') {
 
     if (!strcmp(input_file + 1, "-version")) {
+
       just_version = 1;
       modified_file = input_file;
       goto wrap_things_up;
+
     }
 
-    if (input_file[1]) FATAL("Incorrect use (not called through afl-gcc?)");
-      else input_file = NULL;
+    if (input_file[1])
+      FATAL("Incorrect use (not called through afl-gcc?)");
+    else
+      input_file = NULL;
 
   } else {
 
@@ -197,22 +203,21 @@ static void edit_params(int argc, char** argv) {
        NSS. */
 
     if (strncmp(input_file, tmp_dir, strlen(tmp_dir)) &&
-        strncmp(input_file, "/var/tmp/", 9) &&
-        strncmp(input_file, "/tmp/", 5)) pass_thru = 1;
+        strncmp(input_file, "/var/tmp/", 9) && strncmp(input_file, "/tmp/", 5))
+      pass_thru = 1;
 
   }
 
-  modified_file = alloc_printf("%s/.afl-%u-%u.s", tmp_dir, getpid(),
-                               (u32)time(NULL));
+  modified_file =
+      alloc_printf("%s/.afl-%u-%u.s", tmp_dir, getpid(), (u32)time(NULL));
 
 wrap_things_up:
 
   as_params[as_par_cnt++] = modified_file;
-  as_params[as_par_cnt]   = NULL;
+  as_params[as_par_cnt] = NULL;
 
 }
 
-
 /* Process input file, generate modified_file. Insert instrumentation in all
    the appropriate places. */
 
@@ -222,11 +227,11 @@ static void add_instrumentation(void) {
 
   FILE* inf;
   FILE* outf;
-  s32 outfd;
-  u32 ins_lines = 0;
+  s32   outfd;
+  u32   ins_lines = 0;
 
-  u8  instr_ok = 0, skip_csect = 0, skip_next_label = 0,
-      skip_intel = 0, skip_app = 0, instrument_next = 0;
+  u8 instr_ok = 0, skip_csect = 0, skip_next_label = 0, skip_intel = 0,
+     skip_app = 0, instrument_next = 0;
 
 #ifdef __APPLE__
 
@@ -239,7 +244,9 @@ static void add_instrumentation(void) {
     inf = fopen(input_file, "r");
     if (!inf) PFATAL("Unable to read '%s'", input_file);
 
-  } else inf = stdin;
+  } else
+
+    inf = stdin;
 
   outfd = open(modified_file, O_WRONLY | O_EXCL | O_CREAT, 0600);
 
@@ -247,7 +254,7 @@ static void add_instrumentation(void) {
 
   outf = fdopen(outfd, "w");
 
-  if (!outf) PFATAL("fdopen() failed");  
+  if (!outf) PFATAL("fdopen() failed");
 
   while (fgets(line, MAX_LINE, inf)) {
 
@@ -284,22 +291,26 @@ static void add_instrumentation(void) {
          around them, so we use that as a signal. */
 
       if (!clang_mode && instr_ok && !strncmp(line + 2, "p2align ", 8) &&
-          isdigit(line[10]) && line[11] == '\n') skip_next_label = 1;
+          isdigit(line[10]) && line[11] == '\n')
+        skip_next_label = 1;
 
       if (!strncmp(line + 2, "text\n", 5) ||
           !strncmp(line + 2, "section\t.text", 13) ||
           !strncmp(line + 2, "section\t__TEXT,__text", 21) ||
           !strncmp(line + 2, "section __TEXT,__text", 21)) {
+
         instr_ok = 1;
-        continue; 
+        continue;
+
       }
 
       if (!strncmp(line + 2, "section\t", 8) ||
-          !strncmp(line + 2, "section ", 8) ||
-          !strncmp(line + 2, "bss\n", 4) ||
+          !strncmp(line + 2, "section ", 8) || !strncmp(line + 2, "bss\n", 4) ||
           !strncmp(line + 2, "data\n", 5)) {
+
         instr_ok = 0;
         continue;
+
       }
 
     }
@@ -354,8 +365,9 @@ static void add_instrumentation(void) {
 
      */
 
-    if (skip_intel || skip_app || skip_csect || !instr_ok ||
-        line[0] == '#' || line[0] == ' ') continue;
+    if (skip_intel || skip_app || skip_csect || !instr_ok || line[0] == '#' ||
+        line[0] == ' ')
+      continue;
 
     /* Conditional branch instruction (jnz, etc). We append the instrumentation
        right after the branch (to instrument the not-taken path) and at the
@@ -404,15 +416,16 @@ static void add_instrumentation(void) {
 
         /* Apple: L<num> / LBB<num> */
 
-        if ((isdigit(line[1]) || (clang_mode && !strncmp(line, "LBB", 3)))
-            && R(100) < inst_ratio) {
+        if ((isdigit(line[1]) || (clang_mode && !strncmp(line, "LBB", 3))) &&
+            R(100) < inst_ratio) {
 
 #else
 
         /* Apple: .L<num> / .LBB<num> */
 
-        if ((isdigit(line[2]) || (clang_mode && !strncmp(line + 1, "LBB", 3)))
-            && R(100) < inst_ratio) {
+        if ((isdigit(line[2]) ||
+             (clang_mode && !strncmp(line + 1, "LBB", 3))) &&
+            R(100) < inst_ratio) {
 
 #endif /* __APPLE__ */
 
@@ -427,7 +440,10 @@ static void add_instrumentation(void) {
              .Lfunc_begin0-style exception handling calculations (a problem on
              MacOS X). */
 
-          if (!skip_next_label) instrument_next = 1; else skip_next_label = 0;
+          if (!skip_next_label)
+            instrument_next = 1;
+          else
+            skip_next_label = 0;
 
         }
 
@@ -436,34 +452,34 @@ static void add_instrumentation(void) {
         /* Function label (always instrumented, deferred mode). */
 
         instrument_next = 1;
-    
+
       }
 
     }
 
   }
 
-  if (ins_lines)
-    fputs(use_64bit ? main_payload_64 : main_payload_32, outf);
+  if (ins_lines) fputs(use_64bit ? main_payload_64 : main_payload_32, outf);
 
   if (input_file) fclose(inf);
   fclose(outf);
 
   if (!be_quiet) {
 
-    if (!ins_lines) WARNF("No instrumentation targets found%s.",
-                          pass_thru ? " (pass-thru mode)" : "");
-    else OKF("Instrumented %u locations (%s-bit, %s mode, ratio %u%%).",
-             ins_lines, use_64bit ? "64" : "32",
-             getenv("AFL_HARDEN") ? "hardened" : 
-             (sanitizer ? "ASAN/MSAN" : "non-hardened"),
-             inst_ratio);
- 
+    if (!ins_lines)
+      WARNF("No instrumentation targets found%s.",
+            pass_thru ? " (pass-thru mode)" : "");
+    else
+      OKF("Instrumented %u locations (%s-bit, %s mode, ratio %u%%).", ins_lines,
+          use_64bit ? "64" : "32",
+          getenv("AFL_HARDEN") ? "hardened"
+                               : (sanitizer ? "ASAN/MSAN" : "non-hardened"),
+          inst_ratio);
+
   }
 
 }
 
-
 /* Main entry point */
 
 int main(int argc, char** argv) {
@@ -473,7 +489,7 @@ int main(int argc, char** argv) {
   int status;
   u8* inst_ratio_str = getenv("AFL_INST_RATIO");
 
-  struct timeval tv;
+  struct timeval  tv;
   struct timezone tz;
 
   clang_mode = !!getenv(CLANG_ENV_VAR);
@@ -481,19 +497,26 @@ int main(int argc, char** argv) {
   if (isatty(2) && !getenv("AFL_QUIET")) {
 
     SAYF(cCYA "afl-as" VERSION cRST " by <lcamtuf@google.com>\n");
- 
-  } else be_quiet = 1;
+
+  } else
+
+    be_quiet = 1;
 
   if (argc < 2) {
 
-    SAYF("\n"
-         "This is a helper application for afl-fuzz. It is a wrapper around GNU 'as',\n"
-         "executed by the toolchain whenever using afl-gcc or afl-clang. You probably\n"
-         "don't want to run this program directly.\n\n"
+    SAYF(
+        "\n"
+        "This is a helper application for afl-fuzz. It is a wrapper around GNU "
+        "'as',\n"
+        "executed by the toolchain whenever using afl-gcc or afl-clang. You "
+        "probably\n"
+        "don't want to run this program directly.\n\n"
 
-         "Rarely, when dealing with extremely complex projects, it may be advisable to\n"
-         "set AFL_INST_RATIO to a value less than 100 in order to reduce the odds of\n"
-         "instrumenting every discovered branch.\n\n");
+        "Rarely, when dealing with extremely complex projects, it may be "
+        "advisable to\n"
+        "set AFL_INST_RATIO to a value less than 100 in order to reduce the "
+        "odds of\n"
+        "instrumenting every discovered branch.\n\n");
 
     exit(1);
 
@@ -509,7 +532,7 @@ int main(int argc, char** argv) {
 
   if (inst_ratio_str) {
 
-    if (sscanf(inst_ratio_str, "%u", &inst_ratio) != 1 || inst_ratio > 100) 
+    if (sscanf(inst_ratio_str, "%u", &inst_ratio) != 1 || inst_ratio > 100)
       FATAL("Bad value of AFL_INST_RATIO (must be between 0 and 100)");
 
   }
@@ -524,9 +547,10 @@ int main(int argc, char** argv) {
      that... */
 
   if (getenv("AFL_USE_ASAN") || getenv("AFL_USE_MSAN")) {
+
     sanitizer = 1;
-    if (!getenv("AFL_INST_RATIO"))
-      inst_ratio /= 3;
+    if (!getenv("AFL_INST_RATIO")) inst_ratio /= 3;
+
   }
 
   if (!just_version) add_instrumentation();
diff --git a/afl-common.c b/src/afl-common.c
index 1c5e5bfe..9f1f45eb 100644
--- a/afl-common.c
+++ b/src/afl-common.c
@@ -13,22 +13,29 @@
 
 /* Detect @@ in args. */
 #ifndef __glibc__
-#include <unistd.h>
+#  include <unistd.h>
 #endif
+
 void detect_file_args(char** argv, u8* prog_in) {
 
   u32 i = 0;
 #ifdef __GLIBC__
-  u8* cwd = getcwd(NULL, 0); /* non portable glibc extension */
+  u8* cwd = getcwd(NULL, 0);                /* non portable glibc extension */
 #else
-  u8* cwd;
-  char *buf;
-  long size = pathconf(".", _PC_PATH_MAX);
-  if ((buf = (char *)malloc((size_t)size)) != NULL) {
-    cwd = getcwd(buf, (size_t)size); /* portable version */
+  u8*   cwd;
+  char* buf;
+  long  size = pathconf(".", _PC_PATH_MAX);
+  if ((buf = (char*)malloc((size_t)size)) != NULL) {
+
+    cwd = getcwd(buf, (size_t)size);                    /* portable version */
+
   } else {
+
     PFATAL("getcwd() failed");
+    cwd = 0;                                          /* for dumb compilers */
+
   }
+
 #endif
 
   if (!cwd) PFATAL("getcwd() failed");
@@ -45,8 +52,10 @@ void detect_file_args(char** argv, u8* prog_in) {
 
       /* Be sure that we're always using fully-qualified paths. */
 
-      if (prog_in[0] == '/') aa_subst = prog_in;
-      else aa_subst = alloc_printf("%s/%s", cwd, prog_in);
+      if (prog_in[0] == '/')
+        aa_subst = prog_in;
+      else
+        aa_subst = alloc_printf("%s/%s", cwd, prog_in);
 
       /* Construct a replacement argv value. */
 
@@ -63,7 +72,7 @@ void detect_file_args(char** argv, u8* prog_in) {
 
   }
 
-  free(cwd); /* not tracked */
+  free(cwd);                                                 /* not tracked */
 
 }
 
diff --git a/src/afl-forkserver.c b/src/afl-forkserver.c
new file mode 100644
index 00000000..152ae802
--- /dev/null
+++ b/src/afl-forkserver.c
@@ -0,0 +1,430 @@
+#include "config.h"
+#include "types.h"
+#include "debug.h"
+#include "forkserver.h"
+
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <errno.h>
+#include <signal.h>
+#include <sys/time.h>
+#include <sys/wait.h>
+#include <sys/resource.h>
+
+/* a program that includes afl-forkserver needs to define these */
+extern u8  uses_asan;
+extern u8 *trace_bits;
+extern s32 forksrv_pid, child_pid, fsrv_ctl_fd, fsrv_st_fd;
+extern s32 out_fd, out_dir_fd, dev_urandom_fd,
+    dev_null_fd;                                /* initialize these with -1 */
+extern u32   exec_tmout;
+extern u64   mem_limit;
+extern u8 *  out_file, *target_path, *doc_path;
+extern FILE *plot_file;
+
+/* we need this internally but can be defined and read extern in the main source
+ */
+u8 child_timed_out;
+
+/* Describe integer as memory size. */
+
+u8 *forkserver_DMS(u64 val) {
+
+  static u8 tmp[12][16];
+  static u8 cur;
+
+#define CHK_FORMAT(_divisor, _limit_mult, _fmt, _cast)    \
+  do {                                                    \
+                                                          \
+    if (val < (_divisor) * (_limit_mult)) {               \
+                                                          \
+      sprintf(tmp[cur], _fmt, ((_cast)val) / (_divisor)); \
+      return tmp[cur];                                    \
+                                                          \
+    }                                                     \
+                                                          \
+  } while (0)
+
+  cur = (cur + 1) % 12;
+
+  /* 0-9999 */
+  CHK_FORMAT(1, 10000, "%llu B", u64);
+
+  /* 10.0k - 99.9k */
+  CHK_FORMAT(1024, 99.95, "%0.01f kB", double);
+
+  /* 100k - 999k */
+  CHK_FORMAT(1024, 1000, "%llu kB", u64);
+
+  /* 1.00M - 9.99M */
+  CHK_FORMAT(1024 * 1024, 9.995, "%0.02f MB", double);
+
+  /* 10.0M - 99.9M */
+  CHK_FORMAT(1024 * 1024, 99.95, "%0.01f MB", double);
+
+  /* 100M - 999M */
+  CHK_FORMAT(1024 * 1024, 1000, "%llu MB", u64);
+
+  /* 1.00G - 9.99G */
+  CHK_FORMAT(1024LL * 1024 * 1024, 9.995, "%0.02f GB", double);
+
+  /* 10.0G - 99.9G */
+  CHK_FORMAT(1024LL * 1024 * 1024, 99.95, "%0.01f GB", double);
+
+  /* 100G - 999G */
+  CHK_FORMAT(1024LL * 1024 * 1024, 1000, "%llu GB", u64);
+
+  /* 1.00T - 9.99G */
+  CHK_FORMAT(1024LL * 1024 * 1024 * 1024, 9.995, "%0.02f TB", double);
+
+  /* 10.0T - 99.9T */
+  CHK_FORMAT(1024LL * 1024 * 1024 * 1024, 99.95, "%0.01f TB", double);
+
+#undef CHK_FORMAT
+
+  /* 100T+ */
+  strcpy(tmp[cur], "infty");
+  return tmp[cur];
+
+}
+
+/* the timeout handler */
+
+void handle_timeout(int sig) {
+
+  if (child_pid > 0) {
+
+    child_timed_out = 1;
+    kill(child_pid, SIGKILL);
+
+  } else if (child_pid == -1 && forksrv_pid > 0) {
+
+    child_timed_out = 1;
+    kill(forksrv_pid, SIGKILL);
+
+  }
+
+}
+
+/* Spin up fork server (instrumented mode only). The idea is explained here:
+
+   http://lcamtuf.blogspot.com/2014/10/fuzzing-binaries-without-execve.html
+
+   In essence, the instrumentation allows us to skip execve(), and just keep
+   cloning a stopped child. So, we just execute once, and then send commands
+   through a pipe. The other part of this logic is in afl-as.h / llvm_mode */
+
+void init_forkserver(char **argv) {
+
+  static struct itimerval it;
+  int                     st_pipe[2], ctl_pipe[2];
+  int                     status;
+  s32                     rlen;
+
+  ACTF("Spinning up the fork server...");
+
+  if (pipe(st_pipe) || pipe(ctl_pipe)) PFATAL("pipe() failed");
+
+  child_timed_out = 0;
+  forksrv_pid = fork();
+
+  if (forksrv_pid < 0) PFATAL("fork() failed");
+
+  if (!forksrv_pid) {
+
+    /* CHILD PROCESS */
+
+    struct rlimit r;
+
+    /* Umpf. On OpenBSD, the default fd limit for root users is set to
+       soft 128. Let's try to fix that... */
+
+    if (!getrlimit(RLIMIT_NOFILE, &r) && r.rlim_cur < FORKSRV_FD + 2) {
+
+      r.rlim_cur = FORKSRV_FD + 2;
+      setrlimit(RLIMIT_NOFILE, &r);                        /* Ignore errors */
+
+    }
+
+    if (mem_limit) {
+
+      r.rlim_max = r.rlim_cur = ((rlim_t)mem_limit) << 20;
+
+#ifdef RLIMIT_AS
+      setrlimit(RLIMIT_AS, &r);                            /* Ignore errors */
+#else
+      /* This takes care of OpenBSD, which doesn't have RLIMIT_AS, but
+         according to reliable sources, RLIMIT_DATA covers anonymous
+         maps - so we should be getting good protection against OOM bugs. */
+
+      setrlimit(RLIMIT_DATA, &r);                          /* Ignore errors */
+#endif /* ^RLIMIT_AS */
+
+    }
+
+    /* Dumping cores is slow and can lead to anomalies if SIGKILL is delivered
+       before the dump is complete. */
+
+    //    r.rlim_max = r.rlim_cur = 0;
+    //    setrlimit(RLIMIT_CORE, &r);                      /* Ignore errors */
+
+    /* Isolate the process and configure standard descriptors. If out_file is
+       specified, stdin is /dev/null; otherwise, out_fd is cloned instead. */
+
+    setsid();
+
+    if (!getenv("AFL_DEBUG_CHILD_OUTPUT")) {
+
+      dup2(dev_null_fd, 1);
+      dup2(dev_null_fd, 2);
+
+    }
+
+    if (out_file) {
+
+      dup2(dev_null_fd, 0);
+
+    } else {
+
+      dup2(out_fd, 0);
+      close(out_fd);
+
+    }
+
+    /* Set up control and status pipes, close the unneeded original fds. */
+
+    if (dup2(ctl_pipe[0], FORKSRV_FD) < 0) PFATAL("dup2() failed");
+    if (dup2(st_pipe[1], FORKSRV_FD + 1) < 0) PFATAL("dup2() failed");
+
+    close(ctl_pipe[0]);
+    close(ctl_pipe[1]);
+    close(st_pipe[0]);
+    close(st_pipe[1]);
+
+    close(out_dir_fd);
+    close(dev_null_fd);
+    close(dev_urandom_fd);
+    close(plot_file == NULL ? -1 : fileno(plot_file));
+
+    /* This should improve performance a bit, since it stops the linker from
+       doing extra work post-fork(). */
+
+    if (!getenv("LD_BIND_LAZY")) setenv("LD_BIND_NOW", "1", 0);
+
+    /* Set sane defaults for ASAN if nothing else specified. */
+
+    setenv("ASAN_OPTIONS",
+           "abort_on_error=1:"
+           "detect_leaks=0:"
+           "symbolize=0:"
+           "allocator_may_return_null=1",
+           0);
+
+    /* MSAN is tricky, because it doesn't support abort_on_error=1 at this
+       point. So, we do this in a very hacky way. */
+
+    setenv("MSAN_OPTIONS",
+           "exit_code=" STRINGIFY(MSAN_ERROR) ":"
+                                              "symbolize=0:"
+                                              "abort_on_error=1:"
+                                              "allocator_may_return_null=1:"
+                                              "msan_track_origins=0",
+           0);
+
+    execv(target_path, argv);
+
+    /* Use a distinctive bitmap signature to tell the parent about execv()
+       falling through. */
+
+    *(u32 *)trace_bits = EXEC_FAIL_SIG;
+    exit(0);
+
+  }
+
+  /* PARENT PROCESS */
+
+  /* Close the unneeded endpoints. */
+
+  close(ctl_pipe[0]);
+  close(st_pipe[1]);
+
+  fsrv_ctl_fd = ctl_pipe[1];
+  fsrv_st_fd = st_pipe[0];
+
+  /* Wait for the fork server to come up, but don't wait too long. */
+
+  if (exec_tmout) {
+
+    it.it_value.tv_sec = ((exec_tmout * FORK_WAIT_MULT) / 1000);
+    it.it_value.tv_usec = ((exec_tmout * FORK_WAIT_MULT) % 1000) * 1000;
+
+  }
+
+  setitimer(ITIMER_REAL, &it, NULL);
+
+  rlen = read(fsrv_st_fd, &status, 4);
+
+  it.it_value.tv_sec = 0;
+  it.it_value.tv_usec = 0;
+
+  setitimer(ITIMER_REAL, &it, NULL);
+
+  /* If we have a four-byte "hello" message from the server, we're all set.
+     Otherwise, try to figure out what went wrong. */
+
+  if (rlen == 4) {
+
+    OKF("All right - fork server is up.");
+    return;
+
+  }
+
+  if (child_timed_out)
+    FATAL("Timeout while initializing fork server (adjusting -t may help)");
+
+  if (waitpid(forksrv_pid, &status, 0) <= 0) PFATAL("waitpid() failed");
+
+  if (WIFSIGNALED(status)) {
+
+    if (mem_limit && mem_limit < 500 && uses_asan) {
+
+      SAYF("\n" cLRD "[-] " cRST
+           "Whoops, the target binary crashed suddenly, "
+           "before receiving any input\n"
+           "    from the fuzzer! Since it seems to be built with ASAN and you "
+           "have a\n"
+           "    restrictive memory limit configured, this is expected; please "
+           "read\n"
+           "    %s/notes_for_asan.txt for help.\n",
+           doc_path);
+
+    } else if (!mem_limit) {
+
+      SAYF("\n" cLRD "[-] " cRST
+           "Whoops, the target binary crashed suddenly, "
+           "before receiving any input\n"
+           "    from the fuzzer! There are several probable explanations:\n\n"
+
+           "    - The binary is just buggy and explodes entirely on its own. "
+           "If so, you\n"
+           "      need to fix the underlying problem or find a better "
+           "replacement.\n\n"
+
+           MSG_FORK_ON_APPLE
+
+           "    - Less likely, there is a horrible bug in the fuzzer. If other "
+           "options\n"
+           "      fail, poke <afl-users@googlegroups.com> for troubleshooting "
+           "tips.\n");
+
+    } else {
+
+      SAYF("\n" cLRD "[-] " cRST
+           "Whoops, the target binary crashed suddenly, "
+           "before receiving any input\n"
+           "    from the fuzzer! There are several probable explanations:\n\n"
+
+           "    - The current memory limit (%s) is too restrictive, causing "
+           "the\n"
+           "      target to hit an OOM condition in the dynamic linker. Try "
+           "bumping up\n"
+           "      the limit with the -m setting in the command line. A simple "
+           "way confirm\n"
+           "      this diagnosis would be:\n\n"
+
+           MSG_ULIMIT_USAGE
+           " /path/to/fuzzed_app )\n\n"
+
+           "      Tip: you can use http://jwilk.net/software/recidivm to "
+           "quickly\n"
+           "      estimate the required amount of virtual memory for the "
+           "binary.\n\n"
+
+           "    - The binary is just buggy and explodes entirely on its own. "
+           "If so, you\n"
+           "      need to fix the underlying problem or find a better "
+           "replacement.\n\n"
+
+           MSG_FORK_ON_APPLE
+
+           "    - Less likely, there is a horrible bug in the fuzzer. If other "
+           "options\n"
+           "      fail, poke <afl-users@googlegroups.com> for troubleshooting "
+           "tips.\n",
+           forkserver_DMS(mem_limit << 20), mem_limit - 1);
+
+    }
+
+    FATAL("Fork server crashed with signal %d", WTERMSIG(status));
+
+  }
+
+  if (*(u32 *)trace_bits == EXEC_FAIL_SIG)
+    FATAL("Unable to execute target application ('%s')", argv[0]);
+
+  if (mem_limit && mem_limit < 500 && uses_asan) {
+
+    SAYF("\n" cLRD "[-] " cRST
+         "Hmm, looks like the target binary terminated "
+         "before we could complete a\n"
+         "    handshake with the injected code. Since it seems to be built "
+         "with ASAN and\n"
+         "    you have a restrictive memory limit configured, this is "
+         "expected; please\n"
+         "    read %s/notes_for_asan.txt for help.\n",
+         doc_path);
+
+  } else if (!mem_limit) {
+
+    SAYF("\n" cLRD "[-] " cRST
+         "Hmm, looks like the target binary terminated "
+         "before we could complete a\n"
+         "    handshake with the injected code. Perhaps there is a horrible "
+         "bug in the\n"
+         "    fuzzer. Poke <afl-users@googlegroups.com> for troubleshooting "
+         "tips.\n");
+
+  } else {
+
+    SAYF(
+        "\n" cLRD "[-] " cRST
+        "Hmm, looks like the target binary terminated "
+        "before we could complete a\n"
+        "    handshake with the injected code. There are %s probable "
+        "explanations:\n\n"
+
+        "%s"
+        "    - The current memory limit (%s) is too restrictive, causing an "
+        "OOM\n"
+        "      fault in the dynamic linker. This can be fixed with the -m "
+        "option. A\n"
+        "      simple way to confirm the diagnosis may be:\n\n"
+
+        MSG_ULIMIT_USAGE
+        " /path/to/fuzzed_app )\n\n"
+
+        "      Tip: you can use http://jwilk.net/software/recidivm to quickly\n"
+        "      estimate the required amount of virtual memory for the "
+        "binary.\n\n"
+
+        "    - Less likely, there is a horrible bug in the fuzzer. If other "
+        "options\n"
+        "      fail, poke <afl-users@googlegroups.com> for troubleshooting "
+        "tips.\n",
+        getenv(DEFER_ENV_VAR) ? "three" : "two",
+        getenv(DEFER_ENV_VAR)
+            ? "    - You are using deferred forkserver, but __AFL_INIT() is "
+              "never\n"
+              "      reached before the program terminates.\n\n"
+            : "",
+        forkserver_DMS(mem_limit << 20), mem_limit - 1);
+
+  }
+
+  FATAL("Fork server handshake failed");
+
+}
+
diff --git a/src/afl-fuzz-bitmap.c b/src/afl-fuzz-bitmap.c
new file mode 100644
index 00000000..be187fff
--- /dev/null
+++ b/src/afl-fuzz-bitmap.c
@@ -0,0 +1,708 @@
+/*
+   american fuzzy lop - fuzzer code
+   --------------------------------
+
+   Written and maintained by Michal Zalewski <lcamtuf@google.com>
+
+   Forkserver design by Jann Horn <jannhorn@googlemail.com>
+
+   Copyright 2013, 2014, 2015, 2016, 2017 Google Inc. All rights reserved.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at:
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+   This is the real deal: the program takes an instrumented binary and
+   attempts a variety of basic fuzzing tricks, paying close attention to
+   how they affect the execution path.
+
+ */
+
+#include "afl-fuzz.h"
+
+/* Write bitmap to file. The bitmap is useful mostly for the secret
+   -B option, to focus a separate fuzzing session on a particular
+   interesting input without rediscovering all the others. */
+
+void write_bitmap(void) {
+
+  u8* fname;
+  s32 fd;
+
+  if (!bitmap_changed) return;
+  bitmap_changed = 0;
+
+  fname = alloc_printf("%s/fuzz_bitmap", out_dir);
+  fd = open(fname, O_WRONLY | O_CREAT | O_TRUNC, 0600);
+
+  if (fd < 0) PFATAL("Unable to open '%s'", fname);
+
+  ck_write(fd, virgin_bits, MAP_SIZE, fname);
+
+  close(fd);
+  ck_free(fname);
+
+}
+
+/* Read bitmap from file. This is for the -B option again. */
+
+void read_bitmap(u8* fname) {
+
+  s32 fd = open(fname, O_RDONLY);
+
+  if (fd < 0) PFATAL("Unable to open '%s'", fname);
+
+  ck_read(fd, virgin_bits, MAP_SIZE, fname);
+
+  close(fd);
+
+}
+
+/* Check if the current execution path brings anything new to the table.
+   Update virgin bits to reflect the finds. Returns 1 if the only change is
+   the hit-count for a particular tuple; 2 if there are new tuples seen.
+   Updates the map, so subsequent calls will always return 0.
+
+   This function is called after every exec() on a fairly large buffer, so
+   it needs to be fast. We do this in 32-bit and 64-bit flavors. */
+
+u8 has_new_bits(u8* virgin_map) {
+
+#ifdef __x86_64__
+
+  u64* current = (u64*)trace_bits;
+  u64* virgin = (u64*)virgin_map;
+
+  u32 i = (MAP_SIZE >> 3);
+
+#else
+
+  u32* current = (u32*)trace_bits;
+  u32* virgin = (u32*)virgin_map;
+
+  u32 i = (MAP_SIZE >> 2);
+
+#endif /* ^__x86_64__ */
+
+  u8 ret = 0;
+
+  while (i--) {
+
+    /* Optimize for (*current & *virgin) == 0 - i.e., no bits in current bitmap
+       that have not been already cleared from the virgin map - since this will
+       almost always be the case. */
+
+    if (unlikely(*current) && unlikely(*current & *virgin)) {
+
+      if (likely(ret < 2)) {
+
+        u8* cur = (u8*)current;
+        u8* vir = (u8*)virgin;
+
+        /* Looks like we have not found any new bytes yet; see if any non-zero
+           bytes in current[] are pristine in virgin[]. */
+
+#ifdef __x86_64__
+
+        if ((cur[0] && vir[0] == 0xff) || (cur[1] && vir[1] == 0xff) ||
+            (cur[2] && vir[2] == 0xff) || (cur[3] && vir[3] == 0xff) ||
+            (cur[4] && vir[4] == 0xff) || (cur[5] && vir[5] == 0xff) ||
+            (cur[6] && vir[6] == 0xff) || (cur[7] && vir[7] == 0xff))
+          ret = 2;
+        else
+          ret = 1;
+
+#else
+
+        if ((cur[0] && vir[0] == 0xff) || (cur[1] && vir[1] == 0xff) ||
+            (cur[2] && vir[2] == 0xff) || (cur[3] && vir[3] == 0xff))
+          ret = 2;
+        else
+          ret = 1;
+
+#endif /* ^__x86_64__ */
+
+      }
+
+      *virgin &= ~*current;
+
+    }
+
+    ++current;
+    ++virgin;
+
+  }
+
+  if (ret && virgin_map == virgin_bits) bitmap_changed = 1;
+
+  return ret;
+
+}
+
+/* Count the number of bits set in the provided bitmap. Used for the status
+   screen several times every second, does not have to be fast. */
+
+u32 count_bits(u8* mem) {
+
+  u32* ptr = (u32*)mem;
+  u32  i = (MAP_SIZE >> 2);
+  u32  ret = 0;
+
+  while (i--) {
+
+    u32 v = *(ptr++);
+
+    /* This gets called on the inverse, virgin bitmap; optimize for sparse
+       data. */
+
+    if (v == 0xffffffff) {
+
+      ret += 32;
+      continue;
+
+    }
+
+    v -= ((v >> 1) & 0x55555555);
+    v = (v & 0x33333333) + ((v >> 2) & 0x33333333);
+    ret += (((v + (v >> 4)) & 0xF0F0F0F) * 0x01010101) >> 24;
+
+  }
+
+  return ret;
+
+}
+
+#define FF(_b) (0xff << ((_b) << 3))
+
+/* Count the number of bytes set in the bitmap. Called fairly sporadically,
+   mostly to update the status screen or calibrate and examine confirmed
+   new paths. */
+
+u32 count_bytes(u8* mem) {
+
+  u32* ptr = (u32*)mem;
+  u32  i = (MAP_SIZE >> 2);
+  u32  ret = 0;
+
+  while (i--) {
+
+    u32 v = *(ptr++);
+
+    if (!v) continue;
+    if (v & FF(0)) ++ret;
+    if (v & FF(1)) ++ret;
+    if (v & FF(2)) ++ret;
+    if (v & FF(3)) ++ret;
+
+  }
+
+  return ret;
+
+}
+
+/* Count the number of non-255 bytes set in the bitmap. Used strictly for the
+   status screen, several calls per second or so. */
+
+u32 count_non_255_bytes(u8* mem) {
+
+  u32* ptr = (u32*)mem;
+  u32  i = (MAP_SIZE >> 2);
+  u32  ret = 0;
+
+  while (i--) {
+
+    u32 v = *(ptr++);
+
+    /* This is called on the virgin bitmap, so optimize for the most likely
+       case. */
+
+    if (v == 0xffffffff) continue;
+    if ((v & FF(0)) != FF(0)) ++ret;
+    if ((v & FF(1)) != FF(1)) ++ret;
+    if ((v & FF(2)) != FF(2)) ++ret;
+    if ((v & FF(3)) != FF(3)) ++ret;
+
+  }
+
+  return ret;
+
+}
+
+/* Destructively simplify trace by eliminating hit count information
+   and replacing it with 0x80 or 0x01 depending on whether the tuple
+   is hit or not. Called on every new crash or timeout, should be
+   reasonably fast. */
+
+const u8 simplify_lookup[256] = {
+
+    [0] = 1, [1 ... 255] = 128
+
+};
+
+#ifdef __x86_64__
+
+void simplify_trace(u64* mem) {
+
+  u32 i = MAP_SIZE >> 3;
+
+  while (i--) {
+
+    /* Optimize for sparse bitmaps. */
+
+    if (unlikely(*mem)) {
+
+      u8* mem8 = (u8*)mem;
+
+      mem8[0] = simplify_lookup[mem8[0]];
+      mem8[1] = simplify_lookup[mem8[1]];
+      mem8[2] = simplify_lookup[mem8[2]];
+      mem8[3] = simplify_lookup[mem8[3]];
+      mem8[4] = simplify_lookup[mem8[4]];
+      mem8[5] = simplify_lookup[mem8[5]];
+      mem8[6] = simplify_lookup[mem8[6]];
+      mem8[7] = simplify_lookup[mem8[7]];
+
+    } else
+
+      *mem = 0x0101010101010101ULL;
+
+    ++mem;
+
+  }
+
+}
+
+#else
+
+void simplify_trace(u32* mem) {
+
+  u32 i = MAP_SIZE >> 2;
+
+  while (i--) {
+
+    /* Optimize for sparse bitmaps. */
+
+    if (unlikely(*mem)) {
+
+      u8* mem8 = (u8*)mem;
+
+      mem8[0] = simplify_lookup[mem8[0]];
+      mem8[1] = simplify_lookup[mem8[1]];
+      mem8[2] = simplify_lookup[mem8[2]];
+      mem8[3] = simplify_lookup[mem8[3]];
+
+    } else
+
+      *mem = 0x01010101;
+
+    ++mem;
+
+  }
+
+}
+
+#endif /* ^__x86_64__ */
+
+/* Destructively classify execution counts in a trace. This is used as a
+   preprocessing step for any newly acquired traces. Called on every exec,
+   must be fast. */
+
+static const u8 count_class_lookup8[256] = {
+
+    [0] = 0,
+    [1] = 1,
+    [2] = 2,
+    [3] = 4,
+    [4 ... 7] = 8,
+    [8 ... 15] = 16,
+    [16 ... 31] = 32,
+    [32 ... 127] = 64,
+    [128 ... 255] = 128
+
+};
+
+static u16 count_class_lookup16[65536];
+
+void init_count_class16(void) {
+
+  u32 b1, b2;
+
+  for (b1 = 0; b1 < 256; b1++)
+    for (b2 = 0; b2 < 256; b2++)
+      count_class_lookup16[(b1 << 8) + b2] =
+          (count_class_lookup8[b1] << 8) | count_class_lookup8[b2];
+
+}
+
+#ifdef __x86_64__
+
+void classify_counts(u64* mem) {
+
+  u32 i = MAP_SIZE >> 3;
+
+  while (i--) {
+
+    /* Optimize for sparse bitmaps. */
+
+    if (unlikely(*mem)) {
+
+      u16* mem16 = (u16*)mem;
+
+      mem16[0] = count_class_lookup16[mem16[0]];
+      mem16[1] = count_class_lookup16[mem16[1]];
+      mem16[2] = count_class_lookup16[mem16[2]];
+      mem16[3] = count_class_lookup16[mem16[3]];
+
+    }
+
+    ++mem;
+
+  }
+
+}
+
+#else
+
+void classify_counts(u32* mem) {
+
+  u32 i = MAP_SIZE >> 2;
+
+  while (i--) {
+
+    /* Optimize for sparse bitmaps. */
+
+    if (unlikely(*mem)) {
+
+      u16* mem16 = (u16*)mem;
+
+      mem16[0] = count_class_lookup16[mem16[0]];
+      mem16[1] = count_class_lookup16[mem16[1]];
+
+    }
+
+    ++mem;
+
+  }
+
+}
+
+#endif /* ^__x86_64__ */
+
+/* Compact trace bytes into a smaller bitmap. We effectively just drop the
+   count information here. This is called only sporadically, for some
+   new paths. */
+
+void minimize_bits(u8* dst, u8* src) {
+
+  u32 i = 0;
+
+  while (i < MAP_SIZE) {
+
+    if (*(src++)) dst[i >> 3] |= 1 << (i & 7);
+    ++i;
+
+  }
+
+}
+
+#ifndef SIMPLE_FILES
+
+/* Construct a file name for a new test case, capturing the operation
+   that led to its discovery. Uses a static buffer. */
+
+u8* describe_op(u8 hnb) {
+
+  static u8 ret[256];
+
+  if (syncing_party) {
+
+    sprintf(ret, "sync:%s,src:%06u", syncing_party, syncing_case);
+
+  } else {
+
+    sprintf(ret, "src:%06u", current_entry);
+
+    sprintf(ret + strlen(ret), ",time:%llu", get_cur_time() - start_time);
+
+    if (splicing_with >= 0) sprintf(ret + strlen(ret), "+%06d", splicing_with);
+
+    sprintf(ret + strlen(ret), ",op:%s", stage_short);
+
+    if (stage_cur_byte >= 0) {
+
+      sprintf(ret + strlen(ret), ",pos:%d", stage_cur_byte);
+
+      if (stage_val_type != STAGE_VAL_NONE)
+        sprintf(ret + strlen(ret), ",val:%s%+d",
+                (stage_val_type == STAGE_VAL_BE) ? "be:" : "", stage_cur_val);
+
+    } else
+
+      sprintf(ret + strlen(ret), ",rep:%d", stage_cur_val);
+
+  }
+
+  if (hnb == 2) strcat(ret, ",+cov");
+
+  return ret;
+
+}
+
+#endif /* !SIMPLE_FILES */
+
+/* Write a message accompanying the crash directory :-) */
+
+static void write_crash_readme(void) {
+
+  u8*   fn = alloc_printf("%s/crashes/README.txt", out_dir);
+  s32   fd;
+  FILE* f;
+
+  fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600);
+  ck_free(fn);
+
+  /* Do not die on errors here - that would be impolite. */
+
+  if (fd < 0) return;
+
+  f = fdopen(fd, "w");
+
+  if (!f) {
+
+    close(fd);
+    return;
+
+  }
+
+  fprintf(
+      f,
+      "Command line used to find this crash:\n\n"
+
+      "%s\n\n"
+
+      "If you can't reproduce a bug outside of afl-fuzz, be sure to set the "
+      "same\n"
+      "memory limit. The limit used for this fuzzing session was %s.\n\n"
+
+      "Need a tool to minimize test cases before investigating the crashes or "
+      "sending\n"
+      "them to a vendor? Check out the afl-tmin that comes with the fuzzer!\n\n"
+
+      "Found any cool bugs in open-source tools using afl-fuzz? If yes, please "
+      "drop\n"
+      "an mail at <afl-users@googlegroups.com> once the issues are fixed\n\n"
+
+      "  https://github.com/vanhauser-thc/AFLplusplus\n\n",
+
+      orig_cmdline, DMS(mem_limit << 20));                 /* ignore errors */
+
+  fclose(f);
+
+}
+
+/* Check if the result of an execve() during routine fuzzing is interesting,
+   save or queue the input test case for further analysis if so. Returns 1 if
+   entry is saved, 0 otherwise. */
+
+u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) {
+
+  if (len == 0) return 0;
+
+  u8* fn = "";
+  u8  hnb;
+  s32 fd;
+  u8  keeping = 0, res;
+
+  /* Update path frequency. */
+  u32 cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
+
+  struct queue_entry* q = queue;
+  while (q) {
+
+    if (q->exec_cksum == cksum) q->n_fuzz = q->n_fuzz + 1;
+
+    q = q->next;
+
+  }
+
+  if (fault == crash_mode) {
+
+    /* Keep only if there are new bits in the map, add to queue for
+       future fuzzing, etc. */
+
+    if (!(hnb = has_new_bits(virgin_bits))) {
+
+      if (crash_mode) ++total_crashes;
+      return 0;
+
+    }
+
+#ifndef SIMPLE_FILES
+
+    fn = alloc_printf("%s/queue/id:%06u,%s", out_dir, queued_paths,
+                      describe_op(hnb));
+
+#else
+
+    fn = alloc_printf("%s/queue/id_%06u", out_dir, queued_paths);
+
+#endif /* ^!SIMPLE_FILES */
+
+    add_to_queue(fn, len, 0);
+
+    if (hnb == 2) {
+
+      queue_top->has_new_cov = 1;
+      ++queued_with_cov;
+
+    }
+
+    queue_top->exec_cksum = cksum;
+
+    /* Try to calibrate inline; this also calls update_bitmap_score() when
+       successful. */
+
+    res = calibrate_case(argv, queue_top, mem, queue_cycle - 1, 0);
+
+    if (res == FAULT_ERROR) FATAL("Unable to execute target application");
+
+    fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600);
+    if (fd < 0) PFATAL("Unable to create '%s'", fn);
+    ck_write(fd, mem, len, fn);
+    close(fd);
+
+    keeping = 1;
+
+  }
+
+  switch (fault) {
+
+    case FAULT_TMOUT:
+
+      /* Timeouts are not very interesting, but we're still obliged to keep
+         a handful of samples. We use the presence of new bits in the
+         hang-specific bitmap as a signal of uniqueness. In "dumb" mode, we
+         just keep everything. */
+
+      ++total_tmouts;
+
+      if (unique_hangs >= KEEP_UNIQUE_HANG) return keeping;
+
+      if (!dumb_mode) {
+
+#ifdef __x86_64__
+        simplify_trace((u64*)trace_bits);
+#else
+        simplify_trace((u32*)trace_bits);
+#endif /* ^__x86_64__ */
+
+        if (!has_new_bits(virgin_tmout)) return keeping;
+
+      }
+
+      ++unique_tmouts;
+
+      /* Before saving, we make sure that it's a genuine hang by re-running
+         the target with a more generous timeout (unless the default timeout
+         is already generous). */
+
+      if (exec_tmout < hang_tmout) {
+
+        u8 new_fault;
+        write_to_testcase(mem, len);
+        new_fault = run_target(argv, hang_tmout);
+
+        /* A corner case that one user reported bumping into: increasing the
+           timeout actually uncovers a crash. Make sure we don't discard it if
+           so. */
+
+        if (!stop_soon && new_fault == FAULT_CRASH) goto keep_as_crash;
+
+        if (stop_soon || new_fault != FAULT_TMOUT) return keeping;
+
+      }
+
+#ifndef SIMPLE_FILES
+
+      fn = alloc_printf("%s/hangs/id:%06llu,%s", out_dir, unique_hangs,
+                        describe_op(0));
+
+#else
+
+      fn = alloc_printf("%s/hangs/id_%06llu", out_dir, unique_hangs);
+
+#endif /* ^!SIMPLE_FILES */
+
+      ++unique_hangs;
+
+      last_hang_time = get_cur_time();
+
+      break;
+
+    case FAULT_CRASH:
+
+    keep_as_crash:
+
+      /* This is handled in a manner roughly similar to timeouts,
+         except for slightly different limits and no need to re-run test
+         cases. */
+
+      ++total_crashes;
+
+      if (unique_crashes >= KEEP_UNIQUE_CRASH) return keeping;
+
+      if (!dumb_mode) {
+
+#ifdef __x86_64__
+        simplify_trace((u64*)trace_bits);
+#else
+        simplify_trace((u32*)trace_bits);
+#endif /* ^__x86_64__ */
+
+        if (!has_new_bits(virgin_crash)) return keeping;
+
+      }
+
+      if (!unique_crashes) write_crash_readme();
+
+#ifndef SIMPLE_FILES
+
+      fn = alloc_printf("%s/crashes/id:%06llu,sig:%02u,%s", out_dir,
+                        unique_crashes, kill_signal, describe_op(0));
+
+#else
+
+      fn = alloc_printf("%s/crashes/id_%06llu_%02u", out_dir, unique_crashes,
+                        kill_signal);
+
+#endif /* ^!SIMPLE_FILES */
+
+      ++unique_crashes;
+
+      last_crash_time = get_cur_time();
+      last_crash_execs = total_execs;
+
+      break;
+
+    case FAULT_ERROR: FATAL("Unable to execute target application");
+
+    default: return keeping;
+
+  }
+
+  /* If we're here, we apparently want to save the crash or hang
+     test case, too. */
+
+  fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600);
+  if (fd < 0) PFATAL("Unable to create '%s'", fn);
+  ck_write(fd, mem, len, fn);
+  close(fd);
+
+  ck_free(fn);
+
+  return keeping;
+
+}
+
diff --git a/src/afl-fuzz-extras.c b/src/afl-fuzz-extras.c
new file mode 100644
index 00000000..f43c86f4
--- /dev/null
+++ b/src/afl-fuzz-extras.c
@@ -0,0 +1,485 @@
+/*
+   american fuzzy lop - fuzzer code
+   --------------------------------
+
+   Written and maintained by Michal Zalewski <lcamtuf@google.com>
+
+   Forkserver design by Jann Horn <jannhorn@googlemail.com>
+
+   Copyright 2013, 2014, 2015, 2016, 2017 Google Inc. All rights reserved.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at:
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+   This is the real deal: the program takes an instrumented binary and
+   attempts a variety of basic fuzzing tricks, paying close attention to
+   how they affect the execution path.
+
+ */
+
+#include "afl-fuzz.h"
+
+/* Helper function for load_extras. */
+
+static int compare_extras_len(const void* p1, const void* p2) {
+
+  struct extra_data *e1 = (struct extra_data*)p1, *e2 = (struct extra_data*)p2;
+
+  return e1->len - e2->len;
+
+}
+
+static int compare_extras_use_d(const void* p1, const void* p2) {
+
+  struct extra_data *e1 = (struct extra_data*)p1, *e2 = (struct extra_data*)p2;
+
+  return e2->hit_cnt - e1->hit_cnt;
+
+}
+
+/* Read extras from a file, sort by size. */
+
+void load_extras_file(u8* fname, u32* min_len, u32* max_len, u32 dict_level) {
+
+  FILE* f;
+  u8    buf[MAX_LINE];
+  u8*   lptr;
+  u32   cur_line = 0;
+
+  f = fopen(fname, "r");
+
+  if (!f) PFATAL("Unable to open '%s'", fname);
+
+  while ((lptr = fgets(buf, MAX_LINE, f))) {
+
+    u8 *rptr, *wptr;
+    u32 klen = 0;
+
+    ++cur_line;
+
+    /* Trim on left and right. */
+
+    while (isspace(*lptr))
+      ++lptr;
+
+    rptr = lptr + strlen(lptr) - 1;
+    while (rptr >= lptr && isspace(*rptr))
+      --rptr;
+    ++rptr;
+    *rptr = 0;
+
+    /* Skip empty lines and comments. */
+
+    if (!*lptr || *lptr == '#') continue;
+
+    /* All other lines must end with '"', which we can consume. */
+
+    --rptr;
+
+    if (rptr < lptr || *rptr != '"')
+      FATAL("Malformed name=\"value\" pair in line %u.", cur_line);
+
+    *rptr = 0;
+
+    /* Skip alphanumerics and dashes (label). */
+
+    while (isalnum(*lptr) || *lptr == '_')
+      ++lptr;
+
+    /* If @number follows, parse that. */
+
+    if (*lptr == '@') {
+
+      ++lptr;
+      if (atoi(lptr) > dict_level) continue;
+      while (isdigit(*lptr))
+        ++lptr;
+
+    }
+
+    /* Skip whitespace and = signs. */
+
+    while (isspace(*lptr) || *lptr == '=')
+      ++lptr;
+
+    /* Consume opening '"'. */
+
+    if (*lptr != '"')
+      FATAL("Malformed name=\"keyword\" pair in line %u.", cur_line);
+
+    ++lptr;
+
+    if (!*lptr) FATAL("Empty keyword in line %u.", cur_line);
+
+    /* Okay, let's allocate memory and copy data between "...", handling
+       \xNN escaping, \\, and \". */
+
+    extras =
+        ck_realloc_block(extras, (extras_cnt + 1) * sizeof(struct extra_data));
+
+    wptr = extras[extras_cnt].data = ck_alloc(rptr - lptr);
+
+    while (*lptr) {
+
+      char* hexdigits = "0123456789abcdef";
+
+      switch (*lptr) {
+
+        case 1 ... 31:
+        case 128 ... 255:
+          FATAL("Non-printable characters in line %u.", cur_line);
+
+        case '\\':
+
+          ++lptr;
+
+          if (*lptr == '\\' || *lptr == '"') {
+
+            *(wptr++) = *(lptr++);
+            klen++;
+            break;
+
+          }
+
+          if (*lptr != 'x' || !isxdigit(lptr[1]) || !isxdigit(lptr[2]))
+            FATAL("Invalid escaping (not \\xNN) in line %u.", cur_line);
+
+          *(wptr++) = ((strchr(hexdigits, tolower(lptr[1])) - hexdigits) << 4) |
+                      (strchr(hexdigits, tolower(lptr[2])) - hexdigits);
+
+          lptr += 3;
+          ++klen;
+
+          break;
+
+        default: *(wptr++) = *(lptr++); ++klen;
+
+      }
+
+    }
+
+    extras[extras_cnt].len = klen;
+
+    if (extras[extras_cnt].len > MAX_DICT_FILE)
+      FATAL("Keyword too big in line %u (%s, limit is %s)", cur_line, DMS(klen),
+            DMS(MAX_DICT_FILE));
+
+    if (*min_len > klen) *min_len = klen;
+    if (*max_len < klen) *max_len = klen;
+
+    ++extras_cnt;
+
+  }
+
+  fclose(f);
+
+}
+
+/* Read extras from the extras directory and sort them by size. */
+
+void load_extras(u8* dir) {
+
+  DIR*           d;
+  struct dirent* de;
+  u32            min_len = MAX_DICT_FILE, max_len = 0, dict_level = 0;
+  u8*            x;
+
+  /* If the name ends with @, extract level and continue. */
+
+  if ((x = strchr(dir, '@'))) {
+
+    *x = 0;
+    dict_level = atoi(x + 1);
+
+  }
+
+  ACTF("Loading extra dictionary from '%s' (level %u)...", dir, dict_level);
+
+  d = opendir(dir);
+
+  if (!d) {
+
+    if (errno == ENOTDIR) {
+
+      load_extras_file(dir, &min_len, &max_len, dict_level);
+      goto check_and_sort;
+
+    }
+
+    PFATAL("Unable to open '%s'", dir);
+
+  }
+
+  if (x) FATAL("Dictionary levels not supported for directories.");
+
+  while ((de = readdir(d))) {
+
+    struct stat st;
+    u8*         fn = alloc_printf("%s/%s", dir, de->d_name);
+    s32         fd;
+
+    if (lstat(fn, &st) || access(fn, R_OK)) PFATAL("Unable to access '%s'", fn);
+
+    /* This also takes care of . and .. */
+    if (!S_ISREG(st.st_mode) || !st.st_size) {
+
+      ck_free(fn);
+      continue;
+
+    }
+
+    if (st.st_size > MAX_DICT_FILE)
+      FATAL("Extra '%s' is too big (%s, limit is %s)", fn, DMS(st.st_size),
+            DMS(MAX_DICT_FILE));
+
+    if (min_len > st.st_size) min_len = st.st_size;
+    if (max_len < st.st_size) max_len = st.st_size;
+
+    extras =
+        ck_realloc_block(extras, (extras_cnt + 1) * sizeof(struct extra_data));
+
+    extras[extras_cnt].data = ck_alloc(st.st_size);
+    extras[extras_cnt].len = st.st_size;
+
+    fd = open(fn, O_RDONLY);
+
+    if (fd < 0) PFATAL("Unable to open '%s'", fn);
+
+    ck_read(fd, extras[extras_cnt].data, st.st_size, fn);
+
+    close(fd);
+    ck_free(fn);
+
+    ++extras_cnt;
+
+  }
+
+  closedir(d);
+
+check_and_sort:
+
+  if (!extras_cnt) FATAL("No usable files in '%s'", dir);
+
+  qsort(extras, extras_cnt, sizeof(struct extra_data), compare_extras_len);
+
+  OKF("Loaded %u extra tokens, size range %s to %s.", extras_cnt, DMS(min_len),
+      DMS(max_len));
+
+  if (max_len > 32)
+    WARNF("Some tokens are relatively large (%s) - consider trimming.",
+          DMS(max_len));
+
+  if (extras_cnt > MAX_DET_EXTRAS)
+    WARNF("More than %d tokens - will use them probabilistically.",
+          MAX_DET_EXTRAS);
+
+}
+
+/* Helper function for maybe_add_auto() */
+
+static inline u8 memcmp_nocase(u8* m1, u8* m2, u32 len) {
+
+  while (len--)
+    if (tolower(*(m1++)) ^ tolower(*(m2++))) return 1;
+  return 0;
+
+}
+
+/* Maybe add automatic extra. */
+
+void maybe_add_auto(u8* mem, u32 len) {
+
+  u32 i;
+
+  /* Allow users to specify that they don't want auto dictionaries. */
+
+  if (!MAX_AUTO_EXTRAS || !USE_AUTO_EXTRAS) return;
+
+  /* Skip runs of identical bytes. */
+
+  for (i = 1; i < len; ++i)
+    if (mem[0] ^ mem[i]) break;
+
+  if (i == len) return;
+
+  /* Reject builtin interesting values. */
+
+  if (len == 2) {
+
+    i = sizeof(interesting_16) >> 1;
+
+    while (i--)
+      if (*((u16*)mem) == interesting_16[i] ||
+          *((u16*)mem) == SWAP16(interesting_16[i]))
+        return;
+
+  }
+
+  if (len == 4) {
+
+    i = sizeof(interesting_32) >> 2;
+
+    while (i--)
+      if (*((u32*)mem) == interesting_32[i] ||
+          *((u32*)mem) == SWAP32(interesting_32[i]))
+        return;
+
+  }
+
+  /* Reject anything that matches existing extras. Do a case-insensitive
+     match. We optimize by exploiting the fact that extras[] are sorted
+     by size. */
+
+  for (i = 0; i < extras_cnt; ++i)
+    if (extras[i].len >= len) break;
+
+  for (; i < extras_cnt && extras[i].len == len; ++i)
+    if (!memcmp_nocase(extras[i].data, mem, len)) return;
+
+  /* Last but not least, check a_extras[] for matches. There are no
+     guarantees of a particular sort order. */
+
+  auto_changed = 1;
+
+  for (i = 0; i < a_extras_cnt; ++i) {
+
+    if (a_extras[i].len == len && !memcmp_nocase(a_extras[i].data, mem, len)) {
+
+      a_extras[i].hit_cnt++;
+      goto sort_a_extras;
+
+    }
+
+  }
+
+  /* At this point, looks like we're dealing with a new entry. So, let's
+     append it if we have room. Otherwise, let's randomly evict some other
+     entry from the bottom half of the list. */
+
+  if (a_extras_cnt < MAX_AUTO_EXTRAS) {
+
+    a_extras = ck_realloc_block(a_extras,
+                                (a_extras_cnt + 1) * sizeof(struct extra_data));
+
+    a_extras[a_extras_cnt].data = ck_memdup(mem, len);
+    a_extras[a_extras_cnt].len = len;
+    ++a_extras_cnt;
+
+  } else {
+
+    i = MAX_AUTO_EXTRAS / 2 + UR((MAX_AUTO_EXTRAS + 1) / 2);
+
+    ck_free(a_extras[i].data);
+
+    a_extras[i].data = ck_memdup(mem, len);
+    a_extras[i].len = len;
+    a_extras[i].hit_cnt = 0;
+
+  }
+
+sort_a_extras:
+
+  /* First, sort all auto extras by use count, descending order. */
+
+  qsort(a_extras, a_extras_cnt, sizeof(struct extra_data),
+        compare_extras_use_d);
+
+  /* Then, sort the top USE_AUTO_EXTRAS entries by size. */
+
+  qsort(a_extras, MIN(USE_AUTO_EXTRAS, a_extras_cnt), sizeof(struct extra_data),
+        compare_extras_len);
+
+}
+
+/* Save automatically generated extras. */
+
+void save_auto(void) {
+
+  u32 i;
+
+  if (!auto_changed) return;
+  auto_changed = 0;
+
+  for (i = 0; i < MIN(USE_AUTO_EXTRAS, a_extras_cnt); ++i) {
+
+    u8* fn = alloc_printf("%s/queue/.state/auto_extras/auto_%06u", out_dir, i);
+    s32 fd;
+
+    fd = open(fn, O_WRONLY | O_CREAT | O_TRUNC, 0600);
+
+    if (fd < 0) PFATAL("Unable to create '%s'", fn);
+
+    ck_write(fd, a_extras[i].data, a_extras[i].len, fn);
+
+    close(fd);
+    ck_free(fn);
+
+  }
+
+}
+
+/* Load automatically generated extras. */
+
+void load_auto(void) {
+
+  u32 i;
+
+  for (i = 0; i < USE_AUTO_EXTRAS; ++i) {
+
+    u8  tmp[MAX_AUTO_EXTRA + 1];
+    u8* fn = alloc_printf("%s/.state/auto_extras/auto_%06u", in_dir, i);
+    s32 fd, len;
+
+    fd = open(fn, O_RDONLY, 0600);
+
+    if (fd < 0) {
+
+      if (errno != ENOENT) PFATAL("Unable to open '%s'", fn);
+      ck_free(fn);
+      break;
+
+    }
+
+    /* We read one byte more to cheaply detect tokens that are too
+       long (and skip them). */
+
+    len = read(fd, tmp, MAX_AUTO_EXTRA + 1);
+
+    if (len < 0) PFATAL("Unable to read from '%s'", fn);
+
+    if (len >= MIN_AUTO_EXTRA && len <= MAX_AUTO_EXTRA)
+      maybe_add_auto(tmp, len);
+
+    close(fd);
+    ck_free(fn);
+
+  }
+
+  if (i)
+    OKF("Loaded %u auto-discovered dictionary tokens.", i);
+  else
+    OKF("No auto-generated dictionary tokens to reuse.");
+
+}
+
+/* Destroy extras. */
+
+void destroy_extras(void) {
+
+  u32 i;
+
+  for (i = 0; i < extras_cnt; ++i)
+    ck_free(extras[i].data);
+
+  ck_free(extras);
+
+  for (i = 0; i < a_extras_cnt; ++i)
+    ck_free(a_extras[i].data);
+
+  ck_free(a_extras);
+
+}
+
diff --git a/src/afl-fuzz-globals.c b/src/afl-fuzz-globals.c
new file mode 100644
index 00000000..8fded173
--- /dev/null
+++ b/src/afl-fuzz-globals.c
@@ -0,0 +1,257 @@
+/*
+   american fuzzy lop - fuzzer code
+   --------------------------------
+
+   Written and maintained by Michal Zalewski <lcamtuf@google.com>
+
+   Forkserver design by Jann Horn <jannhorn@googlemail.com>
+
+   Copyright 2013, 2014, 2015, 2016, 2017 Google Inc. All rights reserved.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at:
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+   This is the real deal: the program takes an instrumented binary and
+   attempts a variety of basic fuzzing tricks, paying close attention to
+   how they affect the execution path.
+
+ */
+
+#include "afl-fuzz.h"
+
+/* MOpt:
+   Lots of globals, but mostly for the status UI and other things where it
+   really makes no sense to haul them around as function parameters. */
+u64 limit_time_puppet, orig_hit_cnt_puppet, last_limit_time_start,
+    tmp_pilot_time, total_pacemaker_time, total_puppet_find, temp_puppet_find,
+    most_time_key, most_time, most_execs_key, most_execs, old_hit_count;
+
+s32 SPLICE_CYCLES_puppet, limit_time_sig, key_puppet, key_module;
+
+double w_init = 0.9, w_end = 0.3, w_now;
+
+s32 g_now;
+s32 g_max = 5000;
+
+u64 tmp_core_time;
+s32 swarm_now;
+
+double x_now[swarm_num][operator_num], L_best[swarm_num][operator_num],
+    eff_best[swarm_num][operator_num], G_best[operator_num],
+    v_now[swarm_num][operator_num], probability_now[swarm_num][operator_num],
+    swarm_fitness[swarm_num];
+
+u64 stage_finds_puppet[swarm_num]
+                      [operator_num],   /* Patterns found per fuzz stage    */
+    stage_finds_puppet_v2[swarm_num][operator_num],
+    stage_cycles_puppet_v2[swarm_num][operator_num],
+    stage_cycles_puppet_v3[swarm_num][operator_num],
+    stage_cycles_puppet[swarm_num][operator_num],
+    operator_finds_puppet[operator_num],
+    core_operator_finds_puppet[operator_num],
+    core_operator_finds_puppet_v2[operator_num],
+    core_operator_cycles_puppet[operator_num],
+    core_operator_cycles_puppet_v2[operator_num],
+    core_operator_cycles_puppet_v3[operator_num];   /* Execs per fuzz stage */
+
+double period_pilot_tmp = 5000.0;
+s32    key_lv;
+
+u8 *in_dir,                             /* Input directory with test cases  */
+    *out_dir,                           /* Working & output directory       */
+    *tmp_dir,                           /* Temporary directory for input    */
+    *sync_dir,                          /* Synchronization directory        */
+    *sync_id,                           /* Fuzzer ID                        */
+    *power_name,                        /* Power schedule name              */
+    *use_banner,                        /* Display banner                   */
+    *in_bitmap,                         /* Input bitmap                     */
+    *file_extension,                    /* File extension                   */
+    *orig_cmdline;                      /* Original command line            */
+u8 *doc_path,                           /* Path to documentation dir        */
+    *target_path,                       /* Path to target binary            */
+    *out_file;                          /* File to fuzz, if any             */
+
+u32 exec_tmout = EXEC_TIMEOUT;          /* Configurable exec timeout (ms)   */
+u32 hang_tmout = EXEC_TIMEOUT;          /* Timeout used for hang det (ms)   */
+
+u64 mem_limit = MEM_LIMIT;              /* Memory cap for child (MB)        */
+
+u8 cal_cycles = CAL_CYCLES,             /* Calibration cycles defaults      */
+    cal_cycles_long = CAL_CYCLES_LONG, debug,                 /* Debug mode */
+    python_only;                        /* Python-only mode                 */
+
+u32 stats_update_freq = 1;              /* Stats update frequency (execs)   */
+
+char *power_names[POWER_SCHEDULES_NUM] = {"explore", "fast", "coe",
+                                          "lin",     "quad", "exploit"};
+
+u8 schedule = EXPLORE;                  /* Power schedule (default: EXPLORE)*/
+u8 havoc_max_mult = HAVOC_MAX_MULT;
+
+u8 skip_deterministic,                  /* Skip deterministic stages?       */
+    force_deterministic,                /* Force deterministic stages?      */
+    use_splicing,                       /* Recombine input files?           */
+    dumb_mode,                          /* Run in non-instrumented mode?    */
+    score_changed,                      /* Scoring for favorites changed?   */
+    kill_signal,                        /* Signal that killed the child     */
+    resuming_fuzz,                      /* Resuming an older fuzzing job?   */
+    timeout_given,                      /* Specific timeout given?          */
+    not_on_tty,                         /* stdout is not a tty              */
+    term_too_small,                     /* terminal dimensions too small    */
+    no_forkserver,                      /* Disable forkserver?              */
+    crash_mode,                         /* Crash mode! Yeah!                */
+    in_place_resume,                    /* Attempt in-place resume?         */
+    auto_changed,                       /* Auto-generated tokens changed?   */
+    no_cpu_meter_red,                   /* Feng shui on the status screen   */
+    no_arith,                           /* Skip most arithmetic ops         */
+    shuffle_queue,                      /* Shuffle input queue?             */
+    bitmap_changed = 1,                 /* Time to update bitmap?           */
+    qemu_mode,                          /* Running in QEMU mode?            */
+    unicorn_mode,                       /* Running in Unicorn mode?         */
+    skip_requested,                     /* Skip request, via SIGUSR1        */
+    run_over10m,                        /* Run time over 10 minutes?        */
+    persistent_mode,                    /* Running in persistent mode?      */
+    deferred_mode,                      /* Deferred forkserver mode?        */
+    fixed_seed,                         /* do not reseed                    */
+    fast_cal,                           /* Try to calibrate faster?         */
+    uses_asan;                          /* Target uses ASAN?                */
+
+s32 out_fd,                             /* Persistent fd for out_file       */
+#ifndef HAVE_ARC4RANDOM
+    dev_urandom_fd = -1,                /* Persistent fd for /dev/urandom   */
+#endif
+    dev_null_fd = -1,                   /* Persistent fd for /dev/null      */
+    fsrv_ctl_fd,                        /* Fork server control pipe (write) */
+    fsrv_st_fd;                         /* Fork server status pipe (read)   */
+
+s32 forksrv_pid,                        /* PID of the fork server           */
+    child_pid = -1,                     /* PID of the fuzzed program        */
+    out_dir_fd = -1;                    /* FD of the lock file              */
+
+u8 *trace_bits;                         /* SHM with instrumentation bitmap  */
+
+u8 virgin_bits[MAP_SIZE],               /* Regions yet untouched by fuzzing */
+    virgin_tmout[MAP_SIZE],             /* Bits we haven't seen in tmouts   */
+    virgin_crash[MAP_SIZE];             /* Bits we haven't seen in crashes  */
+
+u8 var_bytes[MAP_SIZE];                 /* Bytes that appear to be variable */
+
+volatile u8 stop_soon,                  /* Ctrl-C pressed?                  */
+    clear_screen = 1,                   /* Window resized?                  */
+    child_timed_out;                    /* Traced process timed out?        */
+
+u32 queued_paths,                       /* Total number of queued testcases */
+    queued_variable,                    /* Testcases with variable behavior */
+    queued_at_start,                    /* Total number of initial inputs   */
+    queued_discovered,                  /* Items discovered during this run */
+    queued_imported,                    /* Items imported via -S            */
+    queued_favored,                     /* Paths deemed favorable           */
+    queued_with_cov,                    /* Paths with new coverage bytes    */
+    pending_not_fuzzed,                 /* Queued but not done yet          */
+    pending_favored,                    /* Pending favored paths            */
+    cur_skipped_paths,                  /* Abandoned inputs in cur cycle    */
+    cur_depth,                          /* Current path depth               */
+    max_depth,                          /* Max path depth                   */
+    useless_at_start,                   /* Number of useless starting paths */
+    var_byte_count,                     /* Bitmap bytes with var behavior   */
+    current_entry,                      /* Current queue entry ID           */
+    havoc_div = 1;                      /* Cycle count divisor for havoc    */
+
+u64 total_crashes,                      /* Total number of crashes          */
+    unique_crashes,                     /* Crashes with unique signatures   */
+    total_tmouts,                       /* Total number of timeouts         */
+    unique_tmouts,                      /* Timeouts with unique signatures  */
+    unique_hangs,                       /* Hangs with unique signatures     */
+    total_execs,                        /* Total execve() calls             */
+    slowest_exec_ms,                    /* Slowest testcase non hang in ms  */
+    start_time,                         /* Unix start time (ms)             */
+    last_path_time,                     /* Time for most recent path (ms)   */
+    last_crash_time,                    /* Time for most recent crash (ms)  */
+    last_hang_time,                     /* Time for most recent hang (ms)   */
+    last_crash_execs,                   /* Exec counter at last crash       */
+    queue_cycle,                        /* Queue round counter              */
+    cycles_wo_finds,                    /* Cycles without any new paths     */
+    trim_execs,                         /* Execs done to trim input files   */
+    bytes_trim_in,                      /* Bytes coming into the trimmer    */
+    bytes_trim_out,                     /* Bytes coming outa the trimmer    */
+    blocks_eff_total,                   /* Blocks subject to effector maps  */
+    blocks_eff_select;                  /* Blocks selected as fuzzable      */
+
+u32 subseq_tmouts;                      /* Number of timeouts in a row      */
+
+u8 *stage_name = "init",                /* Name of the current fuzz stage   */
+    *stage_short,                       /* Short stage name                 */
+    *syncing_party;                     /* Currently syncing with...        */
+
+s32 stage_cur, stage_max;               /* Stage progression                */
+s32 splicing_with = -1;                 /* Splicing with which test case?   */
+
+u32 master_id, master_max;              /* Master instance job splitting    */
+
+u32 syncing_case;                       /* Syncing with case #...           */
+
+s32 stage_cur_byte,                     /* Byte offset of current stage op  */
+    stage_cur_val;                      /* Value used for stage op          */
+
+u8 stage_val_type;                      /* Value type (STAGE_VAL_*)         */
+
+u64 stage_finds[32],                    /* Patterns found per fuzz stage    */
+    stage_cycles[32];                   /* Execs per fuzz stage             */
+
+#ifndef HAVE_ARC4RANDOM
+u32 rand_cnt;                           /* Random number counter            */
+#endif
+
+u64 total_cal_us,                       /* Total calibration time (us)      */
+    total_cal_cycles;                   /* Total calibration cycles         */
+
+u64 total_bitmap_size,                  /* Total bit count for all bitmaps  */
+    total_bitmap_entries;               /* Number of bitmaps counted        */
+
+s32 cpu_core_count;                     /* CPU core count                   */
+
+#ifdef HAVE_AFFINITY
+
+s32 cpu_aff = -1;                       /* Selected CPU core                */
+
+#endif /* HAVE_AFFINITY */
+
+FILE *plot_file;                        /* Gnuplot output file              */
+
+struct queue_entry *queue,              /* Fuzzing queue (linked list)      */
+    *queue_cur,                         /* Current offset within the queue  */
+    *queue_top,                         /* Top of the list                  */
+    *q_prev100;                         /* Previous 100 marker              */
+
+struct queue_entry *top_rated[MAP_SIZE]; /* Top entries for bitmap bytes     */
+
+struct extra_data *extras;              /* Extra tokens to fuzz with        */
+u32                extras_cnt;          /* Total number of tokens read      */
+
+struct extra_data *a_extras;            /* Automatically selected extras    */
+u32                a_extras_cnt;        /* Total number of tokens available */
+
+u8 *(*post_handler)(u8 *buf, u32 *len);
+
+/* hooks for the custom mutator function */
+size_t (*custom_mutator)(u8 *data, size_t size, u8 *mutated_out,
+                         size_t max_size, unsigned int seed);
+size_t (*pre_save_handler)(u8 *data, size_t size, u8 **new_data);
+
+/* Interesting values, as per config.h */
+
+s8  interesting_8[] = {INTERESTING_8};
+s16 interesting_16[] = {INTERESTING_8, INTERESTING_16};
+s32 interesting_32[] = {INTERESTING_8, INTERESTING_16, INTERESTING_32};
+
+/* Python stuff */
+#ifdef USE_PYTHON
+
+PyObject *py_module;
+PyObject *py_functions[PY_FUNC_COUNT];
+
+#endif
+
diff --git a/src/afl-fuzz-init.c b/src/afl-fuzz-init.c
new file mode 100644
index 00000000..8a3ee6fa
--- /dev/null
+++ b/src/afl-fuzz-init.c
@@ -0,0 +1,2064 @@
+/*
+   american fuzzy lop - fuzzer code
+   --------------------------------
+
+   Written and maintained by Michal Zalewski <lcamtuf@google.com>
+
+   Forkserver design by Jann Horn <jannhorn@googlemail.com>
+
+   Copyright 2013, 2014, 2015, 2016, 2017 Google Inc. All rights reserved.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at:
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+   This is the real deal: the program takes an instrumented binary and
+   attempts a variety of basic fuzzing tricks, paying close attention to
+   how they affect the execution path.
+
+ */
+
+#include "afl-fuzz.h"
+
+#ifdef HAVE_AFFINITY
+
+/* Build a list of processes bound to specific cores. Returns -1 if nothing
+   can be found. Assumes an upper bound of 4k CPUs. */
+
+void bind_to_free_cpu(void) {
+
+  DIR*           d;
+  struct dirent* de;
+  cpu_set_t      c;
+
+  u8  cpu_used[4096] = {0};
+  u32 i;
+
+  if (cpu_core_count < 2) return;
+
+  if (getenv("AFL_NO_AFFINITY")) {
+
+    WARNF("Not binding to a CPU core (AFL_NO_AFFINITY set).");
+    return;
+
+  }
+
+  d = opendir("/proc");
+
+  if (!d) {
+
+    WARNF("Unable to access /proc - can't scan for free CPU cores.");
+    return;
+
+  }
+
+  ACTF("Checking CPU core loadout...");
+
+  /* Introduce some jitter, in case multiple AFL tasks are doing the same
+     thing at the same time... */
+
+  usleep(R(1000) * 250);
+
+  /* Scan all /proc/<pid>/status entries, checking for Cpus_allowed_list.
+     Flag all processes bound to a specific CPU using cpu_used[]. This will
+     fail for some exotic binding setups, but is likely good enough in almost
+     all real-world use cases. */
+
+  while ((de = readdir(d))) {
+
+    u8*   fn;
+    FILE* f;
+    u8    tmp[MAX_LINE];
+    u8    has_vmsize = 0;
+
+    if (!isdigit(de->d_name[0])) continue;
+
+    fn = alloc_printf("/proc/%s/status", de->d_name);
+
+    if (!(f = fopen(fn, "r"))) {
+
+      ck_free(fn);
+      continue;
+
+    }
+
+    while (fgets(tmp, MAX_LINE, f)) {
+
+      u32 hval;
+
+      /* Processes without VmSize are probably kernel tasks. */
+
+      if (!strncmp(tmp, "VmSize:\t", 8)) has_vmsize = 1;
+
+      if (!strncmp(tmp, "Cpus_allowed_list:\t", 19) && !strchr(tmp, '-') &&
+          !strchr(tmp, ',') && sscanf(tmp + 19, "%u", &hval) == 1 &&
+          hval < sizeof(cpu_used) && has_vmsize) {
+
+        cpu_used[hval] = 1;
+        break;
+
+      }
+
+    }
+
+    ck_free(fn);
+    fclose(f);
+
+  }
+
+  closedir(d);
+
+  for (i = 0; i < cpu_core_count; ++i)
+    if (!cpu_used[i]) break;
+
+  if (i == cpu_core_count) {
+
+    SAYF("\n" cLRD "[-] " cRST
+         "Uh-oh, looks like all %d CPU cores on your system are allocated to\n"
+         "    other instances of afl-fuzz (or similar CPU-locked tasks). "
+         "Starting\n"
+         "    another fuzzer on this machine is probably a bad plan, but if "
+         "you are\n"
+         "    absolutely sure, you can set AFL_NO_AFFINITY and try again.\n",
+         cpu_core_count);
+
+    FATAL("No more free CPU cores");
+
+  }
+
+  OKF("Found a free CPU core, binding to #%u.", i);
+
+  cpu_aff = i;
+
+  CPU_ZERO(&c);
+  CPU_SET(i, &c);
+
+  if (sched_setaffinity(0, sizeof(c), &c)) PFATAL("sched_setaffinity failed");
+
+}
+
+#endif /* HAVE_AFFINITY */
+
+/* Load postprocessor, if available. */
+
+void setup_post(void) {
+
+  void* dh;
+  u8*   fn = getenv("AFL_POST_LIBRARY");
+  u32   tlen = 6;
+
+  if (!fn) return;
+
+  ACTF("Loading postprocessor from '%s'...", fn);
+
+  dh = dlopen(fn, RTLD_NOW);
+  if (!dh) FATAL("%s", dlerror());
+
+  post_handler = dlsym(dh, "afl_postprocess");
+  if (!post_handler) FATAL("Symbol 'afl_postprocess' not found.");
+
+  /* Do a quick test. It's better to segfault now than later =) */
+
+  post_handler("hello", &tlen);
+
+  OKF("Postprocessor installed successfully.");
+
+}
+
+void setup_custom_mutator(void) {
+
+  void* dh;
+  u8*   fn = getenv("AFL_CUSTOM_MUTATOR_LIBRARY");
+
+  if (!fn) return;
+
+  ACTF("Loading custom mutator library from '%s'...", fn);
+
+  dh = dlopen(fn, RTLD_NOW);
+  if (!dh) FATAL("%s", dlerror());
+
+  custom_mutator = dlsym(dh, "afl_custom_mutator");
+  if (!custom_mutator) FATAL("Symbol 'afl_custom_mutator' not found.");
+
+  pre_save_handler = dlsym(dh, "afl_pre_save_handler");
+  //  if (!pre_save_handler) WARNF("Symbol 'afl_pre_save_handler' not found.");
+
+  OKF("Custom mutator installed successfully.");
+
+}
+
+/* Shuffle an array of pointers. Might be slightly biased. */
+
+static void shuffle_ptrs(void** ptrs, u32 cnt) {
+
+  u32 i;
+
+  for (i = 0; i < cnt - 2; ++i) {
+
+    u32   j = i + UR(cnt - i);
+    void* s = ptrs[i];
+    ptrs[i] = ptrs[j];
+    ptrs[j] = s;
+
+  }
+
+}
+
+/* Read all testcases from the input directory, then queue them for testing.
+   Called at startup. */
+
+void read_testcases(void) {
+
+  struct dirent** nl;
+  s32             nl_cnt;
+  u32             i;
+  u8*             fn1;
+
+  /* Auto-detect non-in-place resumption attempts. */
+
+  fn1 = alloc_printf("%s/queue", in_dir);
+  if (!access(fn1, F_OK))
+    in_dir = fn1;
+  else
+    ck_free(fn1);
+
+  ACTF("Scanning '%s'...", in_dir);
+
+  /* We use scandir() + alphasort() rather than readdir() because otherwise,
+     the ordering  of test cases would vary somewhat randomly and would be
+     difficult to control. */
+
+  nl_cnt = scandir(in_dir, &nl, NULL, alphasort);
+
+  if (nl_cnt < 0) {
+
+    if (errno == ENOENT || errno == ENOTDIR)
+
+      SAYF("\n" cLRD "[-] " cRST
+           "The input directory does not seem to be valid - try again. The "
+           "fuzzer needs\n"
+           "    one or more test case to start with - ideally, a small file "
+           "under 1 kB\n"
+           "    or so. The cases must be stored as regular files directly in "
+           "the input\n"
+           "    directory.\n");
+
+    PFATAL("Unable to open '%s'", in_dir);
+
+  }
+
+  if (shuffle_queue && nl_cnt > 1) {
+
+    ACTF("Shuffling queue...");
+    shuffle_ptrs((void**)nl, nl_cnt);
+
+  }
+
+  for (i = 0; i < nl_cnt; ++i) {
+
+    struct stat st;
+
+    u8* fn2 = alloc_printf("%s/%s", in_dir, nl[i]->d_name);
+    u8* dfn =
+        alloc_printf("%s/.state/deterministic_done/%s", in_dir, nl[i]->d_name);
+
+    u8 passed_det = 0;
+
+    free(nl[i]);                                             /* not tracked */
+
+    if (lstat(fn2, &st) || access(fn2, R_OK))
+      PFATAL("Unable to access '%s'", fn2);
+
+    /* This also takes care of . and .. */
+
+    if (!S_ISREG(st.st_mode) || !st.st_size || strstr(fn2, "/README.txt")) {
+
+      ck_free(fn2);
+      ck_free(dfn);
+      continue;
+
+    }
+
+    if (st.st_size > MAX_FILE)
+      FATAL("Test case '%s' is too big (%s, limit is %s)", fn2, DMS(st.st_size),
+            DMS(MAX_FILE));
+
+    /* Check for metadata that indicates that deterministic fuzzing
+       is complete for this entry. We don't want to repeat deterministic
+       fuzzing when resuming aborted scans, because it would be pointless
+       and probably very time-consuming. */
+
+    if (!access(dfn, F_OK)) passed_det = 1;
+    ck_free(dfn);
+
+    add_to_queue(fn2, st.st_size, passed_det);
+
+  }
+
+  free(nl);                                                  /* not tracked */
+
+  if (!queued_paths) {
+
+    SAYF("\n" cLRD "[-] " cRST
+         "Looks like there are no valid test cases in the input directory! The "
+         "fuzzer\n"
+         "    needs one or more test case to start with - ideally, a small "
+         "file under\n"
+         "    1 kB or so. The cases must be stored as regular files directly "
+         "in the\n"
+         "    input directory.\n");
+
+    FATAL("No usable test cases in '%s'", in_dir);
+
+  }
+
+  last_path_time = 0;
+  queued_at_start = queued_paths;
+
+}
+
+/* Examine map coverage. Called once, for first test case. */
+
+static void check_map_coverage(void) {
+
+  u32 i;
+
+  if (count_bytes(trace_bits) < 100) return;
+
+  for (i = (1 << (MAP_SIZE_POW2 - 1)); i < MAP_SIZE; ++i)
+    if (trace_bits[i]) return;
+
+  WARNF("Recompile binary with newer version of afl to improve coverage!");
+
+}
+
+/* Perform dry run of all test cases to confirm that the app is working as
+   expected. This is done only for the initial inputs, and only once. */
+
+void perform_dry_run(char** argv) {
+
+  struct queue_entry* q = queue;
+  u32                 cal_failures = 0;
+  u8*                 skip_crashes = getenv("AFL_SKIP_CRASHES");
+
+  while (q) {
+
+    u8* use_mem;
+    u8  res;
+    s32 fd;
+
+    u8* fn = strrchr(q->fname, '/') + 1;
+
+    ACTF("Attempting dry run with '%s'...", fn);
+
+    fd = open(q->fname, O_RDONLY);
+    if (fd < 0) PFATAL("Unable to open '%s'", q->fname);
+
+    use_mem = ck_alloc_nozero(q->len);
+
+    if (read(fd, use_mem, q->len) != q->len)
+      FATAL("Short read from '%s'", q->fname);
+
+    close(fd);
+
+    res = calibrate_case(argv, q, use_mem, 0, 1);
+    ck_free(use_mem);
+
+    if (stop_soon) return;
+
+    if (res == crash_mode || res == FAULT_NOBITS)
+      SAYF(cGRA "    len = %u, map size = %u, exec speed = %llu us\n" cRST,
+           q->len, q->bitmap_size, q->exec_us);
+
+    switch (res) {
+
+      case FAULT_NONE:
+
+        if (q == queue) check_map_coverage();
+
+        if (crash_mode) FATAL("Test case '%s' does *NOT* crash", fn);
+
+        break;
+
+      case FAULT_TMOUT:
+
+        if (timeout_given) {
+
+          /* The -t nn+ syntax in the command line sets timeout_given to '2' and
+             instructs afl-fuzz to tolerate but skip queue entries that time
+             out. */
+
+          if (timeout_given > 1) {
+
+            WARNF("Test case results in a timeout (skipping)");
+            q->cal_failed = CAL_CHANCES;
+            ++cal_failures;
+            break;
+
+          }
+
+          SAYF("\n" cLRD "[-] " cRST
+               "The program took more than %u ms to process one of the initial "
+               "test cases.\n"
+               "    Usually, the right thing to do is to relax the -t option - "
+               "or to delete it\n"
+               "    altogether and allow the fuzzer to auto-calibrate. That "
+               "said, if you know\n"
+               "    what you are doing and want to simply skip the unruly test "
+               "cases, append\n"
+               "    '+' at the end of the value passed to -t ('-t %u+').\n",
+               exec_tmout, exec_tmout);
+
+          FATAL("Test case '%s' results in a timeout", fn);
+
+        } else {
+
+          SAYF("\n" cLRD "[-] " cRST
+               "The program took more than %u ms to process one of the initial "
+               "test cases.\n"
+               "    This is bad news; raising the limit with the -t option is "
+               "possible, but\n"
+               "    will probably make the fuzzing process extremely slow.\n\n"
+
+               "    If this test case is just a fluke, the other option is to "
+               "just avoid it\n"
+               "    altogether, and find one that is less of a CPU hog.\n",
+               exec_tmout);
+
+          FATAL("Test case '%s' results in a timeout", fn);
+
+        }
+
+      case FAULT_CRASH:
+
+        if (crash_mode) break;
+
+        if (skip_crashes) {
+
+          WARNF("Test case results in a crash (skipping)");
+          q->cal_failed = CAL_CHANCES;
+          ++cal_failures;
+          break;
+
+        }
+
+        if (mem_limit) {
+
+          SAYF("\n" cLRD "[-] " cRST
+               "Oops, the program crashed with one of the test cases provided. "
+               "There are\n"
+               "    several possible explanations:\n\n"
+
+               "    - The test case causes known crashes under normal working "
+               "conditions. If\n"
+               "      so, please remove it. The fuzzer should be seeded with "
+               "interesting\n"
+               "      inputs - but not ones that cause an outright crash.\n\n"
+
+               "    - The current memory limit (%s) is too low for this "
+               "program, causing\n"
+               "      it to die due to OOM when parsing valid files. To fix "
+               "this, try\n"
+               "      bumping it up with the -m setting in the command line. "
+               "If in doubt,\n"
+               "      try something along the lines of:\n\n"
+
+               MSG_ULIMIT_USAGE
+               " /path/to/binary [...] <testcase )\n\n"
+
+               "      Tip: you can use http://jwilk.net/software/recidivm to "
+               "quickly\n"
+               "      estimate the required amount of virtual memory for the "
+               "binary. Also,\n"
+               "      if you are using ASAN, see %s/notes_for_asan.txt.\n\n"
+
+               MSG_FORK_ON_APPLE
+
+               "    - Least likely, there is a horrible bug in the fuzzer. If "
+               "other options\n"
+               "      fail, poke <afl-users@googlegroups.com> for "
+               "troubleshooting tips.\n",
+               DMS(mem_limit << 20), mem_limit - 1, doc_path);
+
+        } else {
+
+          SAYF("\n" cLRD "[-] " cRST
+               "Oops, the program crashed with one of the test cases provided. "
+               "There are\n"
+               "    several possible explanations:\n\n"
+
+               "    - The test case causes known crashes under normal working "
+               "conditions. If\n"
+               "      so, please remove it. The fuzzer should be seeded with "
+               "interesting\n"
+               "      inputs - but not ones that cause an outright crash.\n\n"
+
+               MSG_FORK_ON_APPLE
+
+               "    - Least likely, there is a horrible bug in the fuzzer. If "
+               "other options\n"
+               "      fail, poke <afl-users@googlegroups.com> for "
+               "troubleshooting tips.\n");
+
+        }
+
+#undef MSG_ULIMIT_USAGE
+#undef MSG_FORK_ON_APPLE
+
+        FATAL("Test case '%s' results in a crash", fn);
+
+      case FAULT_ERROR:
+
+        FATAL("Unable to execute target application ('%s')", argv[0]);
+
+      case FAULT_NOINST: FATAL("No instrumentation detected");
+
+      case FAULT_NOBITS:
+
+        ++useless_at_start;
+
+        if (!in_bitmap && !shuffle_queue)
+          WARNF("No new instrumentation output, test case may be useless.");
+
+        break;
+
+    }
+
+    if (q->var_behavior) WARNF("Instrumentation output varies across runs.");
+
+    q = q->next;
+
+  }
+
+  if (cal_failures) {
+
+    if (cal_failures == queued_paths)
+      FATAL("All test cases time out%s, giving up!",
+            skip_crashes ? " or crash" : "");
+
+    WARNF("Skipped %u test cases (%0.02f%%) due to timeouts%s.", cal_failures,
+          ((double)cal_failures) * 100 / queued_paths,
+          skip_crashes ? " or crashes" : "");
+
+    if (cal_failures * 5 > queued_paths)
+      WARNF(cLRD "High percentage of rejected test cases, check settings!");
+
+  }
+
+  OKF("All test cases processed.");
+
+}
+
+/* Helper function: link() if possible, copy otherwise. */
+
+static void link_or_copy(u8* old_path, u8* new_path) {
+
+  s32 i = link(old_path, new_path);
+  s32 sfd, dfd;
+  u8* tmp;
+
+  if (!i) return;
+
+  sfd = open(old_path, O_RDONLY);
+  if (sfd < 0) PFATAL("Unable to open '%s'", old_path);
+
+  dfd = open(new_path, O_WRONLY | O_CREAT | O_EXCL, 0600);
+  if (dfd < 0) PFATAL("Unable to create '%s'", new_path);
+
+  tmp = ck_alloc(64 * 1024);
+
+  while ((i = read(sfd, tmp, 64 * 1024)) > 0)
+    ck_write(dfd, tmp, i, new_path);
+
+  if (i < 0) PFATAL("read() failed");
+
+  ck_free(tmp);
+  close(sfd);
+  close(dfd);
+
+}
+
+/* Create hard links for input test cases in the output directory, choosing
+   good names and pivoting accordingly. */
+
+void pivot_inputs(void) {
+
+  struct queue_entry* q = queue;
+  u32                 id = 0;
+
+  ACTF("Creating hard links for all input files...");
+
+  while (q) {
+
+    u8 *nfn, *rsl = strrchr(q->fname, '/');
+    u32 orig_id;
+
+    if (!rsl)
+      rsl = q->fname;
+    else
+      ++rsl;
+
+    /* If the original file name conforms to the syntax and the recorded
+       ID matches the one we'd assign, just use the original file name.
+       This is valuable for resuming fuzzing runs. */
+
+    if (!strncmp(rsl, CASE_PREFIX, 3) &&
+        sscanf(rsl + 3, "%06u", &orig_id) == 1 && orig_id == id) {
+
+      u8* src_str;
+      u32 src_id;
+
+      resuming_fuzz = 1;
+      nfn = alloc_printf("%s/queue/%s", out_dir, rsl);
+
+      /* Since we're at it, let's also try to find parent and figure out the
+         appropriate depth for this entry. */
+
+      src_str = strchr(rsl + 3, ':');
+
+      if (src_str && sscanf(src_str + 1, "%06u", &src_id) == 1) {
+
+        struct queue_entry* s = queue;
+        while (src_id-- && s)
+          s = s->next;
+        if (s) q->depth = s->depth + 1;
+
+        if (max_depth < q->depth) max_depth = q->depth;
+
+      }
+
+    } else {
+
+      /* No dice - invent a new name, capturing the original one as a
+         substring. */
+
+#ifndef SIMPLE_FILES
+
+      u8* use_name = strstr(rsl, ",orig:");
+
+      if (use_name)
+        use_name += 6;
+      else
+        use_name = rsl;
+      nfn = alloc_printf("%s/queue/id:%06u,orig:%s", out_dir, id, use_name);
+
+#else
+
+      nfn = alloc_printf("%s/queue/id_%06u", out_dir, id);
+
+#endif /* ^!SIMPLE_FILES */
+
+    }
+
+    /* Pivot to the new queue entry. */
+
+    link_or_copy(q->fname, nfn);
+    ck_free(q->fname);
+    q->fname = nfn;
+
+    /* Make sure that the passed_det value carries over, too. */
+
+    if (q->passed_det) mark_as_det_done(q);
+
+    q = q->next;
+    ++id;
+
+  }
+
+  if (in_place_resume) nuke_resume_dir();
+
+}
+
+/* When resuming, try to find the queue position to start from. This makes sense
+   only when resuming, and when we can find the original fuzzer_stats. */
+
+u32 find_start_position(void) {
+
+  static u8 tmp[4096];                   /* Ought to be enough for anybody. */
+
+  u8 *fn, *off;
+  s32 fd, i;
+  u32 ret;
+
+  if (!resuming_fuzz) return 0;
+
+  if (in_place_resume)
+    fn = alloc_printf("%s/fuzzer_stats", out_dir);
+  else
+    fn = alloc_printf("%s/../fuzzer_stats", in_dir);
+
+  fd = open(fn, O_RDONLY);
+  ck_free(fn);
+
+  if (fd < 0) return 0;
+
+  i = read(fd, tmp, sizeof(tmp) - 1);
+  (void)i;                                                 /* Ignore errors */
+  close(fd);
+
+  off = strstr(tmp, "cur_path          : ");
+  if (!off) return 0;
+
+  ret = atoi(off + 20);
+  if (ret >= queued_paths) ret = 0;
+  return ret;
+
+}
+
+/* The same, but for timeouts. The idea is that when resuming sessions without
+   -t given, we don't want to keep auto-scaling the timeout over and over
+   again to prevent it from growing due to random flukes. */
+
+void find_timeout(void) {
+
+  static u8 tmp[4096];                   /* Ought to be enough for anybody. */
+
+  u8 *fn, *off;
+  s32 fd, i;
+  u32 ret;
+
+  if (!resuming_fuzz) return;
+
+  if (in_place_resume)
+    fn = alloc_printf("%s/fuzzer_stats", out_dir);
+  else
+    fn = alloc_printf("%s/../fuzzer_stats", in_dir);
+
+  fd = open(fn, O_RDONLY);
+  ck_free(fn);
+
+  if (fd < 0) return;
+
+  i = read(fd, tmp, sizeof(tmp) - 1);
+  (void)i;                                                 /* Ignore errors */
+  close(fd);
+
+  off = strstr(tmp, "exec_timeout   : ");
+  if (!off) return;
+
+  ret = atoi(off + 17);
+  if (ret <= 4) return;
+
+  exec_tmout = ret;
+  timeout_given = 3;
+
+}
+
+/* A helper function for maybe_delete_out_dir(), deleting all prefixed
+   files in a directory. */
+
+static u8 delete_files(u8* path, u8* prefix) {
+
+  DIR*           d;
+  struct dirent* d_ent;
+
+  d = opendir(path);
+
+  if (!d) return 0;
+
+  while ((d_ent = readdir(d))) {
+
+    if (d_ent->d_name[0] != '.' &&
+        (!prefix || !strncmp(d_ent->d_name, prefix, strlen(prefix)))) {
+
+      u8* fname = alloc_printf("%s/%s", path, d_ent->d_name);
+      if (unlink(fname)) PFATAL("Unable to delete '%s'", fname);
+      ck_free(fname);
+
+    }
+
+  }
+
+  closedir(d);
+
+  return !!rmdir(path);
+
+}
+
+/* Get the number of runnable processes, with some simple smoothing. */
+
+double get_runnable_processes(void) {
+
+  static double res;
+
+#if defined(__APPLE__) || defined(__FreeBSD__) || defined(__OpenBSD__)
+
+  /* I don't see any portable sysctl or so that would quickly give us the
+     number of runnable processes; the 1-minute load average can be a
+     semi-decent approximation, though. */
+
+  if (getloadavg(&res, 1) != 1) return 0;
+
+#else
+
+  /* On Linux, /proc/stat is probably the best way; load averages are
+     computed in funny ways and sometimes don't reflect extremely short-lived
+     processes well. */
+
+  FILE* f = fopen("/proc/stat", "r");
+  u8 tmp[1024];
+  u32 val = 0;
+
+  if (!f) return 0;
+
+  while (fgets(tmp, sizeof(tmp), f)) {
+
+    if (!strncmp(tmp, "procs_running ", 14) ||
+        !strncmp(tmp, "procs_blocked ", 14))
+      val += atoi(tmp + 14);
+
+  }
+
+  fclose(f);
+
+  if (!res) {
+
+    res = val;
+
+  } else {
+
+    res = res * (1.0 - 1.0 / AVG_SMOOTHING) +
+          ((double)val) * (1.0 / AVG_SMOOTHING);
+
+  }
+
+#endif /* ^(__APPLE__ || __FreeBSD__ || __OpenBSD__) */
+
+  return res;
+
+}
+
+/* Delete the temporary directory used for in-place session resume. */
+
+void nuke_resume_dir(void) {
+
+  u8* fn;
+
+  fn = alloc_printf("%s/_resume/.state/deterministic_done", out_dir);
+  if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed;
+  ck_free(fn);
+
+  fn = alloc_printf("%s/_resume/.state/auto_extras", out_dir);
+  if (delete_files(fn, "auto_")) goto dir_cleanup_failed;
+  ck_free(fn);
+
+  fn = alloc_printf("%s/_resume/.state/redundant_edges", out_dir);
+  if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed;
+  ck_free(fn);
+
+  fn = alloc_printf("%s/_resume/.state/variable_behavior", out_dir);
+  if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed;
+  ck_free(fn);
+
+  fn = alloc_printf("%s/_resume/.state", out_dir);
+  if (rmdir(fn) && errno != ENOENT) goto dir_cleanup_failed;
+  ck_free(fn);
+
+  fn = alloc_printf("%s/_resume", out_dir);
+  if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed;
+  ck_free(fn);
+
+  return;
+
+dir_cleanup_failed:
+
+  FATAL("_resume directory cleanup failed");
+
+}
+
+/* Delete fuzzer output directory if we recognize it as ours, if the fuzzer
+   is not currently running, and if the last run time isn't too great. */
+
+void maybe_delete_out_dir(void) {
+
+  FILE* f;
+  u8*   fn = alloc_printf("%s/fuzzer_stats", out_dir);
+
+  /* See if the output directory is locked. If yes, bail out. If not,
+     create a lock that will persist for the lifetime of the process
+     (this requires leaving the descriptor open).*/
+
+  out_dir_fd = open(out_dir, O_RDONLY);
+  if (out_dir_fd < 0) PFATAL("Unable to open '%s'", out_dir);
+
+#ifndef __sun
+
+  if (flock(out_dir_fd, LOCK_EX | LOCK_NB) && errno == EWOULDBLOCK) {
+
+    SAYF("\n" cLRD "[-] " cRST
+         "Looks like the job output directory is being actively used by "
+         "another\n"
+         "    instance of afl-fuzz. You will need to choose a different %s\n"
+         "    or stop the other process first.\n",
+         sync_id ? "fuzzer ID" : "output location");
+
+    FATAL("Directory '%s' is in use", out_dir);
+
+  }
+
+#endif /* !__sun */
+
+  f = fopen(fn, "r");
+
+  if (f) {
+
+    u64 start_time2, last_update;
+
+    if (fscanf(f,
+               "start_time     : %llu\n"
+               "last_update    : %llu\n",
+               &start_time2, &last_update) != 2)
+      FATAL("Malformed data in '%s'", fn);
+
+    fclose(f);
+
+    /* Let's see how much work is at stake. */
+
+    if (!in_place_resume && last_update - start_time2 > OUTPUT_GRACE * 60) {
+
+      SAYF("\n" cLRD "[-] " cRST
+           "The job output directory already exists and contains the results "
+           "of more\n"
+           "    than %d minutes worth of fuzzing. To avoid data loss, afl-fuzz "
+           "will *NOT*\n"
+           "    automatically delete this data for you.\n\n"
+
+           "    If you wish to start a new session, remove or rename the "
+           "directory manually,\n"
+           "    or specify a different output location for this job. To resume "
+           "the old\n"
+           "    session, put '-' as the input directory in the command line "
+           "('-i -') and\n"
+           "    try again.\n",
+           OUTPUT_GRACE);
+
+      FATAL("At-risk data found in '%s'", out_dir);
+
+    }
+
+  }
+
+  ck_free(fn);
+
+  /* The idea for in-place resume is pretty simple: we temporarily move the old
+     queue/ to a new location that gets deleted once import to the new queue/
+     is finished. If _resume/ already exists, the current queue/ may be
+     incomplete due to an earlier abort, so we want to use the old _resume/
+     dir instead, and we let rename() fail silently. */
+
+  if (in_place_resume) {
+
+    u8* orig_q = alloc_printf("%s/queue", out_dir);
+
+    in_dir = alloc_printf("%s/_resume", out_dir);
+
+    rename(orig_q, in_dir);                                /* Ignore errors */
+
+    OKF("Output directory exists, will attempt session resume.");
+
+    ck_free(orig_q);
+
+  } else {
+
+    OKF("Output directory exists but deemed OK to reuse.");
+
+  }
+
+  ACTF("Deleting old session data...");
+
+  /* Okay, let's get the ball rolling! First, we need to get rid of the entries
+     in <out_dir>/.synced/.../id:*, if any are present. */
+
+  if (!in_place_resume) {
+
+    fn = alloc_printf("%s/.synced", out_dir);
+    if (delete_files(fn, NULL)) goto dir_cleanup_failed;
+    ck_free(fn);
+
+  }
+
+  /* Next, we need to clean up <out_dir>/queue/.state/ subdirectories: */
+
+  fn = alloc_printf("%s/queue/.state/deterministic_done", out_dir);
+  if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed;
+  ck_free(fn);
+
+  fn = alloc_printf("%s/queue/.state/auto_extras", out_dir);
+  if (delete_files(fn, "auto_")) goto dir_cleanup_failed;
+  ck_free(fn);
+
+  fn = alloc_printf("%s/queue/.state/redundant_edges", out_dir);
+  if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed;
+  ck_free(fn);
+
+  fn = alloc_printf("%s/queue/.state/variable_behavior", out_dir);
+  if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed;
+  ck_free(fn);
+
+  /* Then, get rid of the .state subdirectory itself (should be empty by now)
+     and everything matching <out_dir>/queue/id:*. */
+
+  fn = alloc_printf("%s/queue/.state", out_dir);
+  if (rmdir(fn) && errno != ENOENT) goto dir_cleanup_failed;
+  ck_free(fn);
+
+  fn = alloc_printf("%s/queue", out_dir);
+  if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed;
+  ck_free(fn);
+
+  /* All right, let's do <out_dir>/crashes/id:* and <out_dir>/hangs/id:*. */
+
+  if (!in_place_resume) {
+
+    fn = alloc_printf("%s/crashes/README.txt", out_dir);
+    unlink(fn);                                            /* Ignore errors */
+    ck_free(fn);
+
+  }
+
+  fn = alloc_printf("%s/crashes", out_dir);
+
+  /* Make backup of the crashes directory if it's not empty and if we're
+     doing in-place resume. */
+
+  if (in_place_resume && rmdir(fn)) {
+
+    time_t     cur_t = time(0);
+    struct tm* t = localtime(&cur_t);
+
+#ifndef SIMPLE_FILES
+
+    u8* nfn = alloc_printf("%s.%04d-%02d-%02d-%02d:%02d:%02d", fn,
+                           t->tm_year + 1900, t->tm_mon + 1, t->tm_mday,
+                           t->tm_hour, t->tm_min, t->tm_sec);
+
+#else
+
+    u8* nfn = alloc_printf("%s_%04d%02d%02d%02d%02d%02d", fn, t->tm_year + 1900,
+                           t->tm_mon + 1, t->tm_mday, t->tm_hour, t->tm_min,
+                           t->tm_sec);
+
+#endif /* ^!SIMPLE_FILES */
+
+    rename(fn, nfn);                                      /* Ignore errors. */
+    ck_free(nfn);
+
+  }
+
+  if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed;
+  ck_free(fn);
+
+  fn = alloc_printf("%s/hangs", out_dir);
+
+  /* Backup hangs, too. */
+
+  if (in_place_resume && rmdir(fn)) {
+
+    time_t     cur_t = time(0);
+    struct tm* t = localtime(&cur_t);
+
+#ifndef SIMPLE_FILES
+
+    u8* nfn = alloc_printf("%s.%04d-%02d-%02d-%02d:%02d:%02d", fn,
+                           t->tm_year + 1900, t->tm_mon + 1, t->tm_mday,
+                           t->tm_hour, t->tm_min, t->tm_sec);
+
+#else
+
+    u8* nfn = alloc_printf("%s_%04d%02d%02d%02d%02d%02d", fn, t->tm_year + 1900,
+                           t->tm_mon + 1, t->tm_mday, t->tm_hour, t->tm_min,
+                           t->tm_sec);
+
+#endif /* ^!SIMPLE_FILES */
+
+    rename(fn, nfn);                                      /* Ignore errors. */
+    ck_free(nfn);
+
+  }
+
+  if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed;
+  ck_free(fn);
+
+  /* And now, for some finishing touches. */
+
+  if (file_extension) {
+
+    fn = alloc_printf("%s/.cur_input.%s", out_dir, file_extension);
+
+  } else {
+
+    fn = alloc_printf("%s/.cur_input", out_dir);
+
+  }
+
+  if (unlink(fn) && errno != ENOENT) goto dir_cleanup_failed;
+  ck_free(fn);
+
+  fn = alloc_printf("%s/fuzz_bitmap", out_dir);
+  if (unlink(fn) && errno != ENOENT) goto dir_cleanup_failed;
+  ck_free(fn);
+
+  if (!in_place_resume) {
+
+    fn = alloc_printf("%s/fuzzer_stats", out_dir);
+    if (unlink(fn) && errno != ENOENT) goto dir_cleanup_failed;
+    ck_free(fn);
+
+  }
+
+  fn = alloc_printf("%s/plot_data", out_dir);
+  if (unlink(fn) && errno != ENOENT) goto dir_cleanup_failed;
+  ck_free(fn);
+
+  fn = alloc_printf("%s/cmdline", out_dir);
+  if (unlink(fn) && errno != ENOENT) goto dir_cleanup_failed;
+  ck_free(fn);
+
+  OKF("Output dir cleanup successful.");
+
+  /* Wow... is that all? If yes, celebrate! */
+
+  return;
+
+dir_cleanup_failed:
+
+  SAYF("\n" cLRD "[-] " cRST
+       "Whoops, the fuzzer tried to reuse your output directory, but bumped "
+       "into\n"
+       "    some files that shouldn't be there or that couldn't be removed - "
+       "so it\n"
+       "    decided to abort! This happened while processing this path:\n\n"
+
+       "    %s\n\n"
+       "    Please examine and manually delete the files, or specify a "
+       "different\n"
+       "    output location for the tool.\n",
+       fn);
+
+  FATAL("Output directory cleanup failed");
+
+}
+
+/* Prepare output directories and fds. */
+
+void setup_dirs_fds(void) {
+
+  u8* tmp;
+  s32 fd;
+
+  ACTF("Setting up output directories...");
+
+  if (sync_id && mkdir(sync_dir, 0700) && errno != EEXIST)
+    PFATAL("Unable to create '%s'", sync_dir);
+
+  if (mkdir(out_dir, 0700)) {
+
+    if (errno != EEXIST) PFATAL("Unable to create '%s'", out_dir);
+
+    maybe_delete_out_dir();
+
+  } else {
+
+    if (in_place_resume)
+      FATAL("Resume attempted but old output directory not found");
+
+    out_dir_fd = open(out_dir, O_RDONLY);
+
+#ifndef __sun
+
+    if (out_dir_fd < 0 || flock(out_dir_fd, LOCK_EX | LOCK_NB))
+      PFATAL("Unable to flock() output directory.");
+
+#endif /* !__sun */
+
+  }
+
+  /* Queue directory for any starting & discovered paths. */
+
+  tmp = alloc_printf("%s/queue", out_dir);
+  if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp);
+  ck_free(tmp);
+
+  /* Top-level directory for queue metadata used for session
+     resume and related tasks. */
+
+  tmp = alloc_printf("%s/queue/.state/", out_dir);
+  if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp);
+  ck_free(tmp);
+
+  /* Directory for flagging queue entries that went through
+     deterministic fuzzing in the past. */
+
+  tmp = alloc_printf("%s/queue/.state/deterministic_done/", out_dir);
+  if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp);
+  ck_free(tmp);
+
+  /* Directory with the auto-selected dictionary entries. */
+
+  tmp = alloc_printf("%s/queue/.state/auto_extras/", out_dir);
+  if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp);
+  ck_free(tmp);
+
+  /* The set of paths currently deemed redundant. */
+
+  tmp = alloc_printf("%s/queue/.state/redundant_edges/", out_dir);
+  if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp);
+  ck_free(tmp);
+
+  /* The set of paths showing variable behavior. */
+
+  tmp = alloc_printf("%s/queue/.state/variable_behavior/", out_dir);
+  if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp);
+  ck_free(tmp);
+
+  /* Sync directory for keeping track of cooperating fuzzers. */
+
+  if (sync_id) {
+
+    tmp = alloc_printf("%s/.synced/", out_dir);
+
+    if (mkdir(tmp, 0700) && (!in_place_resume || errno != EEXIST))
+      PFATAL("Unable to create '%s'", tmp);
+
+    ck_free(tmp);
+
+  }
+
+  /* All recorded crashes. */
+
+  tmp = alloc_printf("%s/crashes", out_dir);
+  if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp);
+  ck_free(tmp);
+
+  /* All recorded hangs. */
+
+  tmp = alloc_printf("%s/hangs", out_dir);
+  if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp);
+  ck_free(tmp);
+
+  /* Generally useful file descriptors. */
+
+  dev_null_fd = open("/dev/null", O_RDWR);
+  if (dev_null_fd < 0) PFATAL("Unable to open /dev/null");
+
+#ifndef HAVE_ARC4RANDOM
+  dev_urandom_fd = open("/dev/urandom", O_RDONLY);
+  if (dev_urandom_fd < 0) PFATAL("Unable to open /dev/urandom");
+#endif
+
+  /* Gnuplot output file. */
+
+  tmp = alloc_printf("%s/plot_data", out_dir);
+  fd = open(tmp, O_WRONLY | O_CREAT | O_EXCL, 0600);
+  if (fd < 0) PFATAL("Unable to create '%s'", tmp);
+  ck_free(tmp);
+
+  plot_file = fdopen(fd, "w");
+  if (!plot_file) PFATAL("fdopen() failed");
+
+  fprintf(plot_file,
+          "# unix_time, cycles_done, cur_path, paths_total, "
+          "pending_total, pending_favs, map_size, unique_crashes, "
+          "unique_hangs, max_depth, execs_per_sec\n");
+  /* ignore errors */
+
+}
+
+void setup_cmdline_file(char** argv) {
+
+  u8* tmp;
+  s32 fd;
+  u32 i = 0;
+
+  FILE* cmdline_file = NULL;
+
+  /* Store the command line to reproduce our findings */
+  tmp = alloc_printf("%s/cmdline", out_dir);
+  fd = open(tmp, O_WRONLY | O_CREAT | O_EXCL, 0600);
+  if (fd < 0) PFATAL("Unable to create '%s'", tmp);
+  ck_free(tmp);
+
+  cmdline_file = fdopen(fd, "w");
+  if (!cmdline_file) PFATAL("fdopen() failed");
+
+  while (argv[i]) {
+
+    fprintf(cmdline_file, "%s\n", argv[i]);
+    ++i;
+
+  }
+
+  fclose(cmdline_file);
+
+}
+
+/* Setup the output file for fuzzed data, if not using -f. */
+
+void setup_stdio_file(void) {
+
+  u8* fn;
+  if (file_extension) {
+
+    fn = alloc_printf("%s/.cur_input.%s", out_dir, file_extension);
+
+  } else {
+
+    fn = alloc_printf("%s/.cur_input", out_dir);
+
+  }
+
+  unlink(fn);                                              /* Ignore errors */
+
+  out_fd = open(fn, O_RDWR | O_CREAT | O_EXCL, 0600);
+
+  if (out_fd < 0) PFATAL("Unable to create '%s'", fn);
+
+  ck_free(fn);
+
+}
+
+/* Make sure that core dumps don't go to a program. */
+
+void check_crash_handling(void) {
+
+#ifdef __APPLE__
+
+  /* Yuck! There appears to be no simple C API to query for the state of
+     loaded daemons on MacOS X, and I'm a bit hesitant to do something
+     more sophisticated, such as disabling crash reporting via Mach ports,
+     until I get a box to test the code. So, for now, we check for crash
+     reporting the awful way. */
+
+  if (system("launchctl list 2>/dev/null | grep -q '\\.ReportCrash$'")) return;
+
+  SAYF(
+      "\n" cLRD "[-] " cRST
+      "Whoops, your system is configured to forward crash notifications to an\n"
+      "    external crash reporting utility. This will cause issues due to "
+      "the\n"
+      "    extended delay between the fuzzed binary malfunctioning and this "
+      "fact\n"
+      "    being relayed to the fuzzer via the standard waitpid() API.\n\n"
+      "    To avoid having crashes misinterpreted as timeouts, please run the\n"
+      "    following commands:\n\n"
+
+      "    SL=/System/Library; PL=com.apple.ReportCrash\n"
+      "    launchctl unload -w ${SL}/LaunchAgents/${PL}.plist\n"
+      "    sudo launchctl unload -w ${SL}/LaunchDaemons/${PL}.Root.plist\n");
+
+  if (!getenv("AFL_I_DONT_CARE_ABOUT_MISSING_CRASHES"))
+    FATAL("Crash reporter detected");
+
+#else
+
+  /* This is Linux specific, but I don't think there's anything equivalent on
+   *BSD, so we can just let it slide for now. */
+
+  s32 fd = open("/proc/sys/kernel/core_pattern", O_RDONLY);
+  u8 fchar;
+
+  if (fd < 0) return;
+
+  ACTF("Checking core_pattern...");
+
+  if (read(fd, &fchar, 1) == 1 && fchar == '|') {
+
+    SAYF(
+        "\n" cLRD "[-] " cRST
+        "Hmm, your system is configured to send core dump notifications to an\n"
+        "    external utility. This will cause issues: there will be an "
+        "extended delay\n"
+        "    between stumbling upon a crash and having this information "
+        "relayed to the\n"
+        "    fuzzer via the standard waitpid() API.\n\n"
+
+        "    To avoid having crashes misinterpreted as timeouts, please log in "
+        "as root\n"
+        "    and temporarily modify /proc/sys/kernel/core_pattern, like so:\n\n"
+
+        "    echo core >/proc/sys/kernel/core_pattern\n");
+
+    if (!getenv("AFL_I_DONT_CARE_ABOUT_MISSING_CRASHES"))
+      FATAL("Pipe at the beginning of 'core_pattern'");
+
+  }
+
+  close(fd);
+
+#endif /* ^__APPLE__ */
+
+}
+
+/* Check CPU governor. */
+
+void check_cpu_governor(void) {
+
+#ifdef __linux__
+  FILE* f;
+  u8    tmp[128];
+  u64   min = 0, max = 0;
+
+  if (getenv("AFL_SKIP_CPUFREQ")) return;
+
+  if (cpu_aff > 0)
+    snprintf(tmp, sizeof(tmp), "%s%d%s", "/sys/devices/system/cpu/cpu", cpu_aff,
+             "/cpufreq/scaling_governor");
+  else
+    snprintf(tmp, sizeof(tmp), "%s",
+             "/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor");
+  f = fopen("/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor", "r");
+  if (!f) {
+
+    if (cpu_aff > 0)
+      snprintf(tmp, sizeof(tmp), "%s%d%s",
+               "/sys/devices/system/cpu/cpufreq/policy", cpu_aff,
+               "/scaling_governor");
+    else
+      snprintf(tmp, sizeof(tmp), "%s",
+               "/sys/devices/system/cpu/cpufreq/policy0/scaling_governor");
+    f = fopen(tmp, "r");
+
+  }
+
+  if (!f) {
+
+    WARNF("Could not check CPU scaling governor");
+    return;
+
+  }
+
+  ACTF("Checking CPU scaling governor...");
+
+  if (!fgets(tmp, 128, f)) PFATAL("fgets() failed");
+
+  fclose(f);
+
+  if (!strncmp(tmp, "perf", 4)) return;
+
+  f = fopen("/sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq", "r");
+
+  if (f) {
+
+    if (fscanf(f, "%llu", &min) != 1) min = 0;
+    fclose(f);
+
+  }
+
+  f = fopen("/sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq", "r");
+
+  if (f) {
+
+    if (fscanf(f, "%llu", &max) != 1) max = 0;
+    fclose(f);
+
+  }
+
+  if (min == max) return;
+
+  SAYF("\n" cLRD "[-] " cRST
+       "Whoops, your system uses on-demand CPU frequency scaling, adjusted\n"
+       "    between %llu and %llu MHz. Unfortunately, the scaling algorithm in "
+       "the\n"
+       "    kernel is imperfect and can miss the short-lived processes spawned "
+       "by\n"
+       "    afl-fuzz. To keep things moving, run these commands as root:\n\n"
+
+       "    cd /sys/devices/system/cpu\n"
+       "    echo performance | tee cpu*/cpufreq/scaling_governor\n\n"
+
+       "    You can later go back to the original state by replacing "
+       "'performance' with\n"
+       "    'ondemand'. If you don't want to change the settings, set "
+       "AFL_SKIP_CPUFREQ\n"
+       "    to make afl-fuzz skip this check - but expect some performance "
+       "drop.\n",
+       min / 1024, max / 1024);
+
+  FATAL("Suboptimal CPU scaling governor");
+#endif
+
+}
+
+/* Count the number of logical CPU cores. */
+
+void get_core_count(void) {
+
+#if defined(__APPLE__) || defined(__FreeBSD__) || defined(__OpenBSD__)
+
+  size_t s = sizeof(cpu_core_count);
+
+  /* On *BSD systems, we can just use a sysctl to get the number of CPUs. */
+
+#  ifdef __APPLE__
+
+  if (sysctlbyname("hw.logicalcpu", &cpu_core_count, &s, NULL, 0) < 0) return;
+
+#  else
+
+  int s_name[2] = {CTL_HW, HW_NCPU};
+
+  if (sysctl(s_name, 2, &cpu_core_count, &s, NULL, 0) < 0) return;
+
+#  endif /* ^__APPLE__ */
+
+#else
+
+#  ifdef HAVE_AFFINITY
+
+  cpu_core_count = sysconf(_SC_NPROCESSORS_ONLN);
+
+#  else
+
+  FILE* f = fopen("/proc/stat", "r");
+  u8    tmp[1024];
+
+  if (!f) return;
+
+  while (fgets(tmp, sizeof(tmp), f))
+    if (!strncmp(tmp, "cpu", 3) && isdigit(tmp[3])) ++cpu_core_count;
+
+  fclose(f);
+
+#  endif /* ^HAVE_AFFINITY */
+
+#endif /* ^(__APPLE__ || __FreeBSD__ || __OpenBSD__) */
+
+  if (cpu_core_count > 0) {
+
+    u32 cur_runnable = 0;
+
+    cur_runnable = (u32)get_runnable_processes();
+
+#if defined(__APPLE__) || defined(__FreeBSD__) || defined(__OpenBSD__)
+
+    /* Add ourselves, since the 1-minute average doesn't include that yet. */
+
+    ++cur_runnable;
+
+#endif /* __APPLE__ || __FreeBSD__ || __OpenBSD__ */
+
+    OKF("You have %d CPU core%s and %u runnable tasks (utilization: %0.0f%%).",
+        cpu_core_count, cpu_core_count > 1 ? "s" : "", cur_runnable,
+        cur_runnable * 100.0 / cpu_core_count);
+
+    if (cpu_core_count > 1) {
+
+      if (cur_runnable > cpu_core_count * 1.5) {
+
+        WARNF("System under apparent load, performance may be spotty.");
+
+      } else if (cur_runnable + 1 <= cpu_core_count) {
+
+        OKF("Try parallel jobs - see %s/parallel_fuzzing.txt.", doc_path);
+
+      }
+
+    }
+
+  } else {
+
+    cpu_core_count = 0;
+    WARNF("Unable to figure out the number of CPU cores.");
+
+  }
+
+}
+
+/* Validate and fix up out_dir and sync_dir when using -S. */
+
+void fix_up_sync(void) {
+
+  u8* x = sync_id;
+
+  if (dumb_mode) FATAL("-S / -M and -n are mutually exclusive");
+
+  if (skip_deterministic) {
+
+    if (force_deterministic) FATAL("use -S instead of -M -d");
+    // else
+    //  FATAL("-S already implies -d");
+
+  }
+
+  while (*x) {
+
+    if (!isalnum(*x) && *x != '_' && *x != '-')
+      FATAL("Non-alphanumeric fuzzer ID specified via -S or -M");
+
+    ++x;
+
+  }
+
+  if (strlen(sync_id) > 32) FATAL("Fuzzer ID too long");
+
+  x = alloc_printf("%s/%s", out_dir, sync_id);
+
+  sync_dir = out_dir;
+  out_dir = x;
+
+  if (!force_deterministic) {
+
+    skip_deterministic = 1;
+    use_splicing = 1;
+
+  }
+
+}
+
+/* Handle screen resize (SIGWINCH). */
+
+static void handle_resize(int sig) {
+
+  clear_screen = 1;
+
+}
+
+/* Check ASAN options. */
+
+void check_asan_opts(void) {
+
+  u8* x = getenv("ASAN_OPTIONS");
+
+  if (x) {
+
+    if (!strstr(x, "abort_on_error=1"))
+      FATAL("Custom ASAN_OPTIONS set without abort_on_error=1 - please fix!");
+
+    if (!strstr(x, "symbolize=0"))
+      FATAL("Custom ASAN_OPTIONS set without symbolize=0 - please fix!");
+
+  }
+
+  x = getenv("MSAN_OPTIONS");
+
+  if (x) {
+
+    if (!strstr(x, "exit_code=" STRINGIFY(MSAN_ERROR)))
+      FATAL("Custom MSAN_OPTIONS set without exit_code=" STRINGIFY(
+          MSAN_ERROR) " - please fix!");
+
+    if (!strstr(x, "symbolize=0"))
+      FATAL("Custom MSAN_OPTIONS set without symbolize=0 - please fix!");
+
+  }
+
+}
+
+/* Handle stop signal (Ctrl-C, etc). */
+
+static void handle_stop_sig(int sig) {
+
+  stop_soon = 1;
+
+  if (child_pid > 0) kill(child_pid, SIGKILL);
+  if (forksrv_pid > 0) kill(forksrv_pid, SIGKILL);
+
+}
+
+/* Handle skip request (SIGUSR1). */
+
+static void handle_skipreq(int sig) {
+
+  skip_requested = 1;
+
+}
+
+/* Do a PATH search and find target binary to see that it exists and
+   isn't a shell script - a common and painful mistake. We also check for
+   a valid ELF header and for evidence of AFL instrumentation. */
+
+void check_binary(u8* fname) {
+
+  u8*         env_path = 0;
+  struct stat st;
+
+  s32 fd;
+  u8* f_data;
+  u32 f_len = 0;
+
+  ACTF("Validating target binary...");
+
+  if (strchr(fname, '/') || !(env_path = getenv("PATH"))) {
+
+    target_path = ck_strdup(fname);
+    if (stat(target_path, &st) || !S_ISREG(st.st_mode) ||
+        !(st.st_mode & 0111) || (f_len = st.st_size) < 4)
+      FATAL("Program '%s' not found or not executable", fname);
+
+  } else {
+
+    while (env_path) {
+
+      u8 *cur_elem, *delim = strchr(env_path, ':');
+
+      if (delim) {
+
+        cur_elem = ck_alloc(delim - env_path + 1);
+        memcpy(cur_elem, env_path, delim - env_path);
+        ++delim;
+
+      } else
+
+        cur_elem = ck_strdup(env_path);
+
+      env_path = delim;
+
+      if (cur_elem[0])
+        target_path = alloc_printf("%s/%s", cur_elem, fname);
+      else
+        target_path = ck_strdup(fname);
+
+      ck_free(cur_elem);
+
+      if (!stat(target_path, &st) && S_ISREG(st.st_mode) &&
+          (st.st_mode & 0111) && (f_len = st.st_size) >= 4)
+        break;
+
+      ck_free(target_path);
+      target_path = 0;
+
+    }
+
+    if (!target_path) FATAL("Program '%s' not found or not executable", fname);
+
+  }
+
+  if (getenv("AFL_SKIP_BIN_CHECK")) return;
+
+  /* Check for blatant user errors. */
+
+  if ((!strncmp(target_path, "/tmp/", 5) && !strchr(target_path + 5, '/')) ||
+      (!strncmp(target_path, "/var/tmp/", 9) && !strchr(target_path + 9, '/')))
+    FATAL("Please don't keep binaries in /tmp or /var/tmp");
+
+  fd = open(target_path, O_RDONLY);
+
+  if (fd < 0) PFATAL("Unable to open '%s'", target_path);
+
+  f_data = mmap(0, f_len, PROT_READ, MAP_PRIVATE, fd, 0);
+
+  if (f_data == MAP_FAILED) PFATAL("Unable to mmap file '%s'", target_path);
+
+  close(fd);
+
+  if (f_data[0] == '#' && f_data[1] == '!') {
+
+    SAYF("\n" cLRD "[-] " cRST
+         "Oops, the target binary looks like a shell script. Some build "
+         "systems will\n"
+         "    sometimes generate shell stubs for dynamically linked programs; "
+         "try static\n"
+         "    library mode (./configure --disable-shared) if that's the "
+         "case.\n\n"
+
+         "    Another possible cause is that you are actually trying to use a "
+         "shell\n"
+         "    wrapper around the fuzzed component. Invoking shell can slow "
+         "down the\n"
+         "    fuzzing process by a factor of 20x or more; it's best to write "
+         "the wrapper\n"
+         "    in a compiled language instead.\n");
+
+    FATAL("Program '%s' is a shell script", target_path);
+
+  }
+
+#ifndef __APPLE__
+
+  if (f_data[0] != 0x7f || memcmp(f_data + 1, "ELF", 3))
+    FATAL("Program '%s' is not an ELF binary", target_path);
+
+#else
+
+#  if !defined(__arm__) && !defined(__arm64__)
+  if (f_data[0] != 0xCF || f_data[1] != 0xFA || f_data[2] != 0xED)
+    FATAL("Program '%s' is not a 64-bit Mach-O binary", target_path);
+#  endif
+
+#endif /* ^!__APPLE__ */
+
+  if (!qemu_mode && !unicorn_mode && !dumb_mode &&
+      !memmem(f_data, f_len, SHM_ENV_VAR, strlen(SHM_ENV_VAR) + 1)) {
+
+    SAYF(
+        "\n" cLRD "[-] " cRST
+        "Looks like the target binary is not instrumented! The fuzzer depends "
+        "on\n"
+        "    compile-time instrumentation to isolate interesting test cases "
+        "while\n"
+        "    mutating the input data. For more information, and for tips on "
+        "how to\n"
+        "    instrument binaries, please see %s/README.\n\n"
+
+        "    When source code is not available, you may be able to leverage "
+        "QEMU\n"
+        "    mode support. Consult the README for tips on how to enable this.\n"
+
+        "    (It is also possible to use afl-fuzz as a traditional, \"dumb\" "
+        "fuzzer.\n"
+        "    For that, you can use the -n option - but expect much worse "
+        "results.)\n",
+        doc_path);
+
+    FATAL("No instrumentation detected");
+
+  }
+
+  if ((qemu_mode || unicorn_mode) &&
+      memmem(f_data, f_len, SHM_ENV_VAR, strlen(SHM_ENV_VAR) + 1)) {
+
+    SAYF("\n" cLRD "[-] " cRST
+         "This program appears to be instrumented with afl-gcc, but is being "
+         "run in\n"
+         "    QEMU or Unicorn mode (-Q or -U). This is probably not what you "
+         "want -\n"
+         "    this setup will be slow and offer no practical benefits.\n");
+
+    FATAL("Instrumentation found in -Q or -U mode");
+
+  }
+
+  if (memmem(f_data, f_len, "libasan.so", 10) ||
+      memmem(f_data, f_len, "__msan_init", 11))
+    uses_asan = 1;
+
+  /* Detect persistent & deferred init signatures in the binary. */
+
+  if (memmem(f_data, f_len, PERSIST_SIG, strlen(PERSIST_SIG) + 1)) {
+
+    OKF(cPIN "Persistent mode binary detected.");
+    setenv(PERSIST_ENV_VAR, "1", 1);
+    persistent_mode = 1;
+
+  } else if (getenv("AFL_PERSISTENT")) {
+
+    WARNF("AFL_PERSISTENT is no longer supported and may misbehave!");
+
+  }
+
+  if (memmem(f_data, f_len, DEFER_SIG, strlen(DEFER_SIG) + 1)) {
+
+    OKF(cPIN "Deferred forkserver binary detected.");
+    setenv(DEFER_ENV_VAR, "1", 1);
+    deferred_mode = 1;
+
+  } else if (getenv("AFL_DEFER_FORKSRV")) {
+
+    WARNF("AFL_DEFER_FORKSRV is no longer supported and may misbehave!");
+
+  }
+
+  if (munmap(f_data, f_len)) PFATAL("unmap() failed");
+
+}
+
+/* Trim and possibly create a banner for the run. */
+
+void fix_up_banner(u8* name) {
+
+  if (!use_banner) {
+
+    if (sync_id) {
+
+      use_banner = sync_id;
+
+    } else {
+
+      u8* trim = strrchr(name, '/');
+      if (!trim)
+        use_banner = name;
+      else
+        use_banner = trim + 1;
+
+    }
+
+  }
+
+  if (strlen(use_banner) > 32) {
+
+    u8* tmp = ck_alloc(36);
+    sprintf(tmp, "%.32s...", use_banner);
+    use_banner = tmp;
+
+  }
+
+}
+
+/* Check if we're on TTY. */
+
+void check_if_tty(void) {
+
+  struct winsize ws;
+
+  if (getenv("AFL_NO_UI")) {
+
+    OKF("Disabling the UI because AFL_NO_UI is set.");
+    not_on_tty = 1;
+    return;
+
+  }
+
+  if (ioctl(1, TIOCGWINSZ, &ws)) {
+
+    if (errno == ENOTTY) {
+
+      OKF("Looks like we're not running on a tty, so I'll be a bit less "
+          "verbose.");
+      not_on_tty = 1;
+
+    }
+
+    return;
+
+  }
+
+}
+
+/* Set up signal handlers. More complicated that needs to be, because libc on
+   Solaris doesn't resume interrupted reads(), sets SA_RESETHAND when you call
+   siginterrupt(), and does other stupid things. */
+
+void setup_signal_handlers(void) {
+
+  struct sigaction sa;
+
+  sa.sa_handler = NULL;
+  sa.sa_flags = SA_RESTART;
+  sa.sa_sigaction = NULL;
+
+  sigemptyset(&sa.sa_mask);
+
+  /* Various ways of saying "stop". */
+
+  sa.sa_handler = handle_stop_sig;
+  sigaction(SIGHUP, &sa, NULL);
+  sigaction(SIGINT, &sa, NULL);
+  sigaction(SIGTERM, &sa, NULL);
+
+  /* Exec timeout notifications. */
+
+  sa.sa_handler = handle_timeout;
+  sigaction(SIGALRM, &sa, NULL);
+
+  /* Window resize */
+
+  sa.sa_handler = handle_resize;
+  sigaction(SIGWINCH, &sa, NULL);
+
+  /* SIGUSR1: skip entry */
+
+  sa.sa_handler = handle_skipreq;
+  sigaction(SIGUSR1, &sa, NULL);
+
+  /* Things we don't care about. */
+
+  sa.sa_handler = SIG_IGN;
+  sigaction(SIGTSTP, &sa, NULL);
+  sigaction(SIGPIPE, &sa, NULL);
+
+}
+
+/* Rewrite argv for QEMU. */
+
+char** get_qemu_argv(u8* own_loc, char** argv, int argc) {
+
+  char** new_argv = ck_alloc(sizeof(char*) * (argc + 4));
+  u8 *   tmp, *cp, *rsl, *own_copy;
+
+  memcpy(new_argv + 3, argv + 1, sizeof(char*) * argc);
+
+  new_argv[2] = target_path;
+  new_argv[1] = "--";
+
+  /* Now we need to actually find the QEMU binary to put in argv[0]. */
+
+  tmp = getenv("AFL_PATH");
+
+  if (tmp) {
+
+    cp = alloc_printf("%s/afl-qemu-trace", tmp);
+
+    if (access(cp, X_OK)) FATAL("Unable to find '%s'", tmp);
+
+    target_path = new_argv[0] = cp;
+    return new_argv;
+
+  }
+
+  own_copy = ck_strdup(own_loc);
+  rsl = strrchr(own_copy, '/');
+
+  if (rsl) {
+
+    *rsl = 0;
+
+    cp = alloc_printf("%s/afl-qemu-trace", own_copy);
+    ck_free(own_copy);
+
+    if (!access(cp, X_OK)) {
+
+      target_path = new_argv[0] = cp;
+      return new_argv;
+
+    }
+
+  } else
+
+    ck_free(own_copy);
+
+  if (!access(BIN_PATH "/afl-qemu-trace", X_OK)) {
+
+    target_path = new_argv[0] = ck_strdup(BIN_PATH "/afl-qemu-trace");
+    return new_argv;
+
+  }
+
+  SAYF("\n" cLRD "[-] " cRST
+       "Oops, unable to find the 'afl-qemu-trace' binary. The binary must be "
+       "built\n"
+       "    separately by following the instructions in qemu_mode/README.qemu. "
+       "If you\n"
+       "    already have the binary installed, you may need to specify "
+       "AFL_PATH in the\n"
+       "    environment.\n\n"
+
+       "    Of course, even without QEMU, afl-fuzz can still work with "
+       "binaries that are\n"
+       "    instrumented at compile time with afl-gcc. It is also possible to "
+       "use it as a\n"
+       "    traditional \"dumb\" fuzzer by specifying '-n' in the command "
+       "line.\n");
+
+  FATAL("Failed to locate 'afl-qemu-trace'.");
+
+}
+
+/* Make a copy of the current command line. */
+
+void save_cmdline(u32 argc, char** argv) {
+
+  u32 len = 1, i;
+  u8* buf;
+
+  for (i = 0; i < argc; ++i)
+    len += strlen(argv[i]) + 1;
+
+  buf = orig_cmdline = ck_alloc(len);
+
+  for (i = 0; i < argc; ++i) {
+
+    u32 l = strlen(argv[i]);
+
+    memcpy(buf, argv[i], l);
+    buf += l;
+
+    if (i != argc - 1) *(buf++) = ' ';
+
+  }
+
+  *buf = 0;
+
+}
+
diff --git a/src/afl-fuzz-misc.c b/src/afl-fuzz-misc.c
new file mode 100644
index 00000000..eb0cc187
--- /dev/null
+++ b/src/afl-fuzz-misc.c
@@ -0,0 +1,183 @@
+/*
+   american fuzzy lop - fuzzer code
+   --------------------------------
+
+   Written and maintained by Michal Zalewski <lcamtuf@google.com>
+
+   Forkserver design by Jann Horn <jannhorn@googlemail.com>
+
+   Copyright 2013, 2014, 2015, 2016, 2017 Google Inc. All rights reserved.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at:
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+   This is the real deal: the program takes an instrumented binary and
+   attempts a variety of basic fuzzing tricks, paying close attention to
+   how they affect the execution path.
+
+ */
+
+#include "afl-fuzz.h"
+
+/* Describe integer. Uses 12 cyclic static buffers for return values. The value
+   returned should be five characters or less for all the integers we reasonably
+   expect to see. */
+
+u8* DI(u64 val) {
+
+  static u8 tmp[12][16];
+  static u8 cur;
+
+  cur = (cur + 1) % 12;
+
+#define CHK_FORMAT(_divisor, _limit_mult, _fmt, _cast)    \
+  do {                                                    \
+                                                          \
+    if (val < (_divisor) * (_limit_mult)) {               \
+                                                          \
+      sprintf(tmp[cur], _fmt, ((_cast)val) / (_divisor)); \
+      return tmp[cur];                                    \
+                                                          \
+    }                                                     \
+                                                          \
+  } while (0)
+
+  /* 0-9999 */
+  CHK_FORMAT(1, 10000, "%llu", u64);
+
+  /* 10.0k - 99.9k */
+  CHK_FORMAT(1000, 99.95, "%0.01fk", double);
+
+  /* 100k - 999k */
+  CHK_FORMAT(1000, 1000, "%lluk", u64);
+
+  /* 1.00M - 9.99M */
+  CHK_FORMAT(1000 * 1000, 9.995, "%0.02fM", double);
+
+  /* 10.0M - 99.9M */
+  CHK_FORMAT(1000 * 1000, 99.95, "%0.01fM", double);
+
+  /* 100M - 999M */
+  CHK_FORMAT(1000 * 1000, 1000, "%lluM", u64);
+
+  /* 1.00G - 9.99G */
+  CHK_FORMAT(1000LL * 1000 * 1000, 9.995, "%0.02fG", double);
+
+  /* 10.0G - 99.9G */
+  CHK_FORMAT(1000LL * 1000 * 1000, 99.95, "%0.01fG", double);
+
+  /* 100G - 999G */
+  CHK_FORMAT(1000LL * 1000 * 1000, 1000, "%lluG", u64);
+
+  /* 1.00T - 9.99G */
+  CHK_FORMAT(1000LL * 1000 * 1000 * 1000, 9.995, "%0.02fT", double);
+
+  /* 10.0T - 99.9T */
+  CHK_FORMAT(1000LL * 1000 * 1000 * 1000, 99.95, "%0.01fT", double);
+
+  /* 100T+ */
+  strcpy(tmp[cur], "infty");
+  return tmp[cur];
+
+}
+
+/* Describe float. Similar to the above, except with a single
+   static buffer. */
+
+u8* DF(double val) {
+
+  static u8 tmp[16];
+
+  if (val < 99.995) {
+
+    sprintf(tmp, "%0.02f", val);
+    return tmp;
+
+  }
+
+  if (val < 999.95) {
+
+    sprintf(tmp, "%0.01f", val);
+    return tmp;
+
+  }
+
+  return DI((u64)val);
+
+}
+
+/* Describe integer as memory size. */
+
+u8* DMS(u64 val) {
+
+  static u8 tmp[12][16];
+  static u8 cur;
+
+  cur = (cur + 1) % 12;
+
+  /* 0-9999 */
+  CHK_FORMAT(1, 10000, "%llu B", u64);
+
+  /* 10.0k - 99.9k */
+  CHK_FORMAT(1024, 99.95, "%0.01f kB", double);
+
+  /* 100k - 999k */
+  CHK_FORMAT(1024, 1000, "%llu kB", u64);
+
+  /* 1.00M - 9.99M */
+  CHK_FORMAT(1024 * 1024, 9.995, "%0.02f MB", double);
+
+  /* 10.0M - 99.9M */
+  CHK_FORMAT(1024 * 1024, 99.95, "%0.01f MB", double);
+
+  /* 100M - 999M */
+  CHK_FORMAT(1024 * 1024, 1000, "%llu MB", u64);
+
+  /* 1.00G - 9.99G */
+  CHK_FORMAT(1024LL * 1024 * 1024, 9.995, "%0.02f GB", double);
+
+  /* 10.0G - 99.9G */
+  CHK_FORMAT(1024LL * 1024 * 1024, 99.95, "%0.01f GB", double);
+
+  /* 100G - 999G */
+  CHK_FORMAT(1024LL * 1024 * 1024, 1000, "%llu GB", u64);
+
+  /* 1.00T - 9.99G */
+  CHK_FORMAT(1024LL * 1024 * 1024 * 1024, 9.995, "%0.02f TB", double);
+
+  /* 10.0T - 99.9T */
+  CHK_FORMAT(1024LL * 1024 * 1024 * 1024, 99.95, "%0.01f TB", double);
+
+#undef CHK_FORMAT
+
+  /* 100T+ */
+  strcpy(tmp[cur], "infty");
+  return tmp[cur];
+
+}
+
+/* Describe time delta. Returns one static buffer, 34 chars of less. */
+
+u8* DTD(u64 cur_ms, u64 event_ms) {
+
+  static u8 tmp[64];
+  u64       delta;
+  s32       t_d, t_h, t_m, t_s;
+
+  if (!event_ms) return "none seen yet";
+
+  delta = cur_ms - event_ms;
+
+  t_d = delta / 1000 / 60 / 60 / 24;
+  t_h = (delta / 1000 / 60 / 60) % 24;
+  t_m = (delta / 1000 / 60) % 60;
+  t_s = (delta / 1000) % 60;
+
+  sprintf(tmp, "%s days, %d hrs, %d min, %d sec", DI(t_d), t_h, t_m, t_s);
+  return tmp;
+
+}
+
diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c
new file mode 100644
index 00000000..1b7abedd
--- /dev/null
+++ b/src/afl-fuzz-one.c
@@ -0,0 +1,6024 @@
+/*
+   american fuzzy lop - fuzzer code
+   --------------------------------
+
+   Written and maintained by Michal Zalewski <lcamtuf@google.com>
+
+   Forkserver design by Jann Horn <jannhorn@googlemail.com>
+
+   Copyright 2013, 2014, 2015, 2016, 2017 Google Inc. All rights reserved.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at:
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+   This is the real deal: the program takes an instrumented binary and
+   attempts a variety of basic fuzzing tricks, paying close attention to
+   how they affect the execution path.
+
+ */
+
+#include "afl-fuzz.h"
+
+/* MOpt */
+
+int select_algorithm(void) {
+
+  int i_puppet, j_puppet;
+
+  double sele = ((double)(UR(10000)) * 0.0001);
+  j_puppet = 0;
+  for (i_puppet = 0; i_puppet < operator_num; ++i_puppet) {
+
+    if (unlikely(i_puppet == 0)) {
+
+      if (sele < probability_now[swarm_now][i_puppet]) break;
+
+    } else {
+
+      if (sele < probability_now[swarm_now][i_puppet]) {
+
+        j_puppet = 1;
+        break;
+
+      }
+
+    }
+
+  }
+
+  if (j_puppet == 1 && sele < probability_now[swarm_now][i_puppet - 1])
+    FATAL("error select_algorithm");
+  return i_puppet;
+
+}
+
+/* Helper to choose random block len for block operations in fuzz_one().
+   Doesn't return zero, provided that max_len is > 0. */
+
+static u32 choose_block_len(u32 limit) {
+
+  u32 min_value, max_value;
+  u32 rlim = MIN(queue_cycle, 3);
+
+  if (!run_over10m) rlim = 1;
+
+  switch (UR(rlim)) {
+
+    case 0:
+      min_value = 1;
+      max_value = HAVOC_BLK_SMALL;
+      break;
+
+    case 1:
+      min_value = HAVOC_BLK_SMALL;
+      max_value = HAVOC_BLK_MEDIUM;
+      break;
+
+    default:
+
+      if (UR(10)) {
+
+        min_value = HAVOC_BLK_MEDIUM;
+        max_value = HAVOC_BLK_LARGE;
+
+      } else {
+
+        min_value = HAVOC_BLK_LARGE;
+        max_value = HAVOC_BLK_XL;
+
+      }
+
+  }
+
+  if (min_value >= limit) min_value = 1;
+
+  return min_value + UR(MIN(max_value, limit) - min_value + 1);
+
+}
+
+/* Helper function to see if a particular change (xor_val = old ^ new) could
+   be a product of deterministic bit flips with the lengths and stepovers
+   attempted by afl-fuzz. This is used to avoid dupes in some of the
+   deterministic fuzzing operations that follow bit flips. We also
+   return 1 if xor_val is zero, which implies that the old and attempted new
+   values are identical and the exec would be a waste of time. */
+
+static u8 could_be_bitflip(u32 xor_val) {
+
+  u32 sh = 0;
+
+  if (!xor_val) return 1;
+
+  /* Shift left until first bit set. */
+
+  while (!(xor_val & 1)) {
+
+    ++sh;
+    xor_val >>= 1;
+
+  }
+
+  /* 1-, 2-, and 4-bit patterns are OK anywhere. */
+
+  if (xor_val == 1 || xor_val == 3 || xor_val == 15) return 1;
+
+  /* 8-, 16-, and 32-bit patterns are OK only if shift factor is
+     divisible by 8, since that's the stepover for these ops. */
+
+  if (sh & 7) return 0;
+
+  if (xor_val == 0xff || xor_val == 0xffff || xor_val == 0xffffffff) return 1;
+
+  return 0;
+
+}
+
+/* Helper function to see if a particular value is reachable through
+   arithmetic operations. Used for similar purposes. */
+
+static u8 could_be_arith(u32 old_val, u32 new_val, u8 blen) {
+
+  u32 i, ov = 0, nv = 0, diffs = 0;
+
+  if (old_val == new_val) return 1;
+
+  /* See if one-byte adjustments to any byte could produce this result. */
+
+  for (i = 0; i < blen; ++i) {
+
+    u8 a = old_val >> (8 * i), b = new_val >> (8 * i);
+
+    if (a != b) {
+
+      ++diffs;
+      ov = a;
+      nv = b;
+
+    }
+
+  }
+
+  /* If only one byte differs and the values are within range, return 1. */
+
+  if (diffs == 1) {
+
+    if ((u8)(ov - nv) <= ARITH_MAX || (u8)(nv - ov) <= ARITH_MAX) return 1;
+
+  }
+
+  if (blen == 1) return 0;
+
+  /* See if two-byte adjustments to any byte would produce this result. */
+
+  diffs = 0;
+
+  for (i = 0; i < blen / 2; ++i) {
+
+    u16 a = old_val >> (16 * i), b = new_val >> (16 * i);
+
+    if (a != b) {
+
+      ++diffs;
+      ov = a;
+      nv = b;
+
+    }
+
+  }
+
+  /* If only one word differs and the values are within range, return 1. */
+
+  if (diffs == 1) {
+
+    if ((u16)(ov - nv) <= ARITH_MAX || (u16)(nv - ov) <= ARITH_MAX) return 1;
+
+    ov = SWAP16(ov);
+    nv = SWAP16(nv);
+
+    if ((u16)(ov - nv) <= ARITH_MAX || (u16)(nv - ov) <= ARITH_MAX) return 1;
+
+  }
+
+  /* Finally, let's do the same thing for dwords. */
+
+  if (blen == 4) {
+
+    if ((u32)(old_val - new_val) <= ARITH_MAX ||
+        (u32)(new_val - old_val) <= ARITH_MAX)
+      return 1;
+
+    new_val = SWAP32(new_val);
+    old_val = SWAP32(old_val);
+
+    if ((u32)(old_val - new_val) <= ARITH_MAX ||
+        (u32)(new_val - old_val) <= ARITH_MAX)
+      return 1;
+
+  }
+
+  return 0;
+
+}
+
+/* Last but not least, a similar helper to see if insertion of an
+   interesting integer is redundant given the insertions done for
+   shorter blen. The last param (check_le) is set if the caller
+   already executed LE insertion for current blen and wants to see
+   if BE variant passed in new_val is unique. */
+
+static u8 could_be_interest(u32 old_val, u32 new_val, u8 blen, u8 check_le) {
+
+  u32 i, j;
+
+  if (old_val == new_val) return 1;
+
+  /* See if one-byte insertions from interesting_8 over old_val could
+     produce new_val. */
+
+  for (i = 0; i < blen; ++i) {
+
+    for (j = 0; j < sizeof(interesting_8); ++j) {
+
+      u32 tval =
+          (old_val & ~(0xff << (i * 8))) | (((u8)interesting_8[j]) << (i * 8));
+
+      if (new_val == tval) return 1;
+
+    }
+
+  }
+
+  /* Bail out unless we're also asked to examine two-byte LE insertions
+     as a preparation for BE attempts. */
+
+  if (blen == 2 && !check_le) return 0;
+
+  /* See if two-byte insertions over old_val could give us new_val. */
+
+  for (i = 0; i < blen - 1; ++i) {
+
+    for (j = 0; j < sizeof(interesting_16) / 2; ++j) {
+
+      u32 tval = (old_val & ~(0xffff << (i * 8))) |
+                 (((u16)interesting_16[j]) << (i * 8));
+
+      if (new_val == tval) return 1;
+
+      /* Continue here only if blen > 2. */
+
+      if (blen > 2) {
+
+        tval = (old_val & ~(0xffff << (i * 8))) |
+               (SWAP16(interesting_16[j]) << (i * 8));
+
+        if (new_val == tval) return 1;
+
+      }
+
+    }
+
+  }
+
+  if (blen == 4 && check_le) {
+
+    /* See if four-byte insertions could produce the same result
+       (LE only). */
+
+    for (j = 0; j < sizeof(interesting_32) / 4; ++j)
+      if (new_val == (u32)interesting_32[j]) return 1;
+
+  }
+
+  return 0;
+
+}
+
+#ifndef IGNORE_FINDS
+
+/* Helper function to compare buffers; returns first and last differing offset.
+   We use this to find reasonable locations for splicing two files. */
+
+static void locate_diffs(u8* ptr1, u8* ptr2, u32 len, s32* first, s32* last) {
+
+  s32 f_loc = -1;
+  s32 l_loc = -1;
+  u32 pos;
+
+  for (pos = 0; pos < len; ++pos) {
+
+    if (*(ptr1++) != *(ptr2++)) {
+
+      if (f_loc == -1) f_loc = pos;
+      l_loc = pos;
+
+    }
+
+  }
+
+  *first = f_loc;
+  *last = l_loc;
+
+  return;
+
+}
+
+#endif /* !IGNORE_FINDS */
+
+/* Take the current entry from the queue, fuzz it for a while. This
+   function is a tad too long... returns 0 if fuzzed successfully, 1 if
+   skipped or bailed out. */
+
+u8 fuzz_one_original(char** argv) {
+
+  s32 len, fd, temp_len, i, j;
+  u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0;
+  u64 havoc_queued = 0, orig_hit_cnt, new_hit_cnt;
+  u32 splice_cycle = 0, perf_score = 100, orig_perf, prev_cksum, eff_cnt = 1;
+
+  u8 ret_val = 1, doing_det = 0;
+
+  u8  a_collect[MAX_AUTO_EXTRA];
+  u32 a_len = 0;
+
+#ifdef IGNORE_FINDS
+
+  /* In IGNORE_FINDS mode, skip any entries that weren't in the
+     initial data set. */
+
+  if (queue_cur->depth > 1) return 1;
+
+#else
+
+  if (pending_favored) {
+
+    /* If we have any favored, non-fuzzed new arrivals in the queue,
+       possibly skip to them at the expense of already-fuzzed or non-favored
+       cases. */
+
+    if (((queue_cur->was_fuzzed > 0 || queue_cur->fuzz_level > 0) ||
+         !queue_cur->favored) &&
+        UR(100) < SKIP_TO_NEW_PROB)
+      return 1;
+
+  } else if (!dumb_mode && !queue_cur->favored && queued_paths > 10) {
+
+    /* Otherwise, still possibly skip non-favored cases, albeit less often.
+       The odds of skipping stuff are higher for already-fuzzed inputs and
+       lower for never-fuzzed entries. */
+
+    if (queue_cycle > 1 &&
+        (queue_cur->fuzz_level == 0 || queue_cur->was_fuzzed)) {
+
+      if (UR(100) < SKIP_NFAV_NEW_PROB) return 1;
+
+    } else {
+
+      if (UR(100) < SKIP_NFAV_OLD_PROB) return 1;
+
+    }
+
+  }
+
+#endif /* ^IGNORE_FINDS */
+
+  if (not_on_tty) {
+
+    ACTF("Fuzzing test case #%u (%u total, %llu uniq crashes found)...",
+         current_entry, queued_paths, unique_crashes);
+    fflush(stdout);
+
+  }
+
+  /* Map the test case into memory. */
+
+  fd = open(queue_cur->fname, O_RDONLY);
+
+  if (fd < 0) PFATAL("Unable to open '%s'", queue_cur->fname);
+
+  len = queue_cur->len;
+
+  orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
+
+  if (orig_in == MAP_FAILED)
+    PFATAL("Unable to mmap '%s' with len %d", queue_cur->fname, len);
+
+  close(fd);
+
+  /* We could mmap() out_buf as MAP_PRIVATE, but we end up clobbering every
+     single byte anyway, so it wouldn't give us any performance or memory usage
+     benefits. */
+
+  out_buf = ck_alloc_nozero(len);
+
+  subseq_tmouts = 0;
+
+  cur_depth = queue_cur->depth;
+
+  /*******************************************
+   * CALIBRATION (only if failed earlier on) *
+   *******************************************/
+
+  if (queue_cur->cal_failed) {
+
+    u8 res = FAULT_TMOUT;
+
+    if (queue_cur->cal_failed < CAL_CHANCES) {
+
+      res = calibrate_case(argv, queue_cur, in_buf, queue_cycle - 1, 0);
+
+      if (res == FAULT_ERROR) FATAL("Unable to execute target application");
+
+    }
+
+    if (stop_soon || res != crash_mode) {
+
+      ++cur_skipped_paths;
+      goto abandon_entry;
+
+    }
+
+  }
+
+  /************
+   * TRIMMING *
+   ************/
+
+  if (!dumb_mode && !queue_cur->trim_done && !custom_mutator) {
+
+    u8 res = trim_case(argv, queue_cur, in_buf);
+
+    if (res == FAULT_ERROR) FATAL("Unable to execute target application");
+
+    if (stop_soon) {
+
+      ++cur_skipped_paths;
+      goto abandon_entry;
+
+    }
+
+    /* Don't retry trimming, even if it failed. */
+
+    queue_cur->trim_done = 1;
+
+    len = queue_cur->len;
+
+  }
+
+  memcpy(out_buf, in_buf, len);
+
+  /*********************
+   * PERFORMANCE SCORE *
+   *********************/
+
+  orig_perf = perf_score = calculate_score(queue_cur);
+
+  if (perf_score == 0) goto abandon_entry;
+
+  if (custom_mutator) {
+
+    stage_short = "custom";
+    stage_name = "custom mutator";
+    stage_max = len << 3;
+    stage_val_type = STAGE_VAL_NONE;
+
+    const u32 max_seed_size = 4096 * 4096;
+    u8*       mutated_buf = ck_alloc(max_seed_size);
+
+    orig_hit_cnt = queued_paths + unique_crashes;
+
+    for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+
+      size_t orig_size = (size_t)len;
+      size_t mutated_size = custom_mutator(out_buf, orig_size, mutated_buf,
+                                           max_seed_size, UR(UINT32_MAX));
+      if (mutated_size > 0) {
+
+        out_buf = ck_realloc(out_buf, mutated_size);
+        memcpy(out_buf, mutated_buf, mutated_size);
+        if (common_fuzz_stuff(argv, out_buf, (u32)mutated_size)) {
+
+          goto abandon_entry;
+
+        }
+
+      }
+
+    }
+
+    ck_free(mutated_buf);
+    new_hit_cnt = queued_paths + unique_crashes;
+
+    stage_finds[STAGE_CUSTOM_MUTATOR] += new_hit_cnt - orig_hit_cnt;
+    stage_cycles[STAGE_CUSTOM_MUTATOR] += stage_max;
+    goto abandon_entry;
+
+  }
+
+  /* Skip right away if -d is given, if it has not been chosen sufficiently
+     often to warrant the expensive deterministic stage (fuzz_level), or
+     if it has gone through deterministic testing in earlier, resumed runs
+     (passed_det). */
+
+  if (skip_deterministic ||
+      ((!queue_cur->passed_det) &&
+       perf_score < (queue_cur->depth * 30 <= havoc_max_mult * 100
+                         ? queue_cur->depth * 30
+                         : havoc_max_mult * 100)) ||
+      queue_cur->passed_det)
+#ifdef USE_PYTHON
+    goto python_stage;
+#else
+    goto havoc_stage;
+#endif
+
+  /* Skip deterministic fuzzing if exec path checksum puts this out of scope
+     for this master instance. */
+
+  if (master_max && (queue_cur->exec_cksum % master_max) != master_id - 1)
+#ifdef USE_PYTHON
+    goto python_stage;
+#else
+    goto havoc_stage;
+#endif
+
+  doing_det = 1;
+
+  /*********************************************
+   * SIMPLE BITFLIP (+dictionary construction) *
+   *********************************************/
+
+#define FLIP_BIT(_ar, _b)                   \
+  do {                                      \
+                                            \
+    u8* _arf = (u8*)(_ar);                  \
+    u32 _bf = (_b);                         \
+    _arf[(_bf) >> 3] ^= (128 >> ((_bf)&7)); \
+                                            \
+  } while (0)
+
+  /* Single walking bit. */
+
+  stage_short = "flip1";
+  stage_max = len << 3;
+  stage_name = "bitflip 1/1";
+
+  stage_val_type = STAGE_VAL_NONE;
+
+  orig_hit_cnt = queued_paths + unique_crashes;
+
+  prev_cksum = queue_cur->exec_cksum;
+
+  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+
+    stage_cur_byte = stage_cur >> 3;
+
+    FLIP_BIT(out_buf, stage_cur);
+
+    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+
+    FLIP_BIT(out_buf, stage_cur);
+
+    /* While flipping the least significant bit in every byte, pull of an extra
+       trick to detect possible syntax tokens. In essence, the idea is that if
+       you have a binary blob like this:
+
+       xxxxxxxxIHDRxxxxxxxx
+
+       ...and changing the leading and trailing bytes causes variable or no
+       changes in program flow, but touching any character in the "IHDR" string
+       always produces the same, distinctive path, it's highly likely that
+       "IHDR" is an atomically-checked magic value of special significance to
+       the fuzzed format.
+
+       We do this here, rather than as a separate stage, because it's a nice
+       way to keep the operation approximately "free" (i.e., no extra execs).
+
+       Empirically, performing the check when flipping the least significant bit
+       is advantageous, compared to doing it at the time of more disruptive
+       changes, where the program flow may be affected in more violent ways.
+
+       The caveat is that we won't generate dictionaries in the -d mode or -S
+       mode - but that's probably a fair trade-off.
+
+       This won't work particularly well with paths that exhibit variable
+       behavior, but fails gracefully, so we'll carry out the checks anyway.
+
+      */
+
+    if (!dumb_mode && (stage_cur & 7) == 7) {
+
+      u32 cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
+
+      if (stage_cur == stage_max - 1 && cksum == prev_cksum) {
+
+        /* If at end of file and we are still collecting a string, grab the
+           final character and force output. */
+
+        if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3];
+        ++a_len;
+
+        if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA)
+          maybe_add_auto(a_collect, a_len);
+
+      } else if (cksum != prev_cksum) {
+
+        /* Otherwise, if the checksum has changed, see if we have something
+           worthwhile queued up, and collect that if the answer is yes. */
+
+        if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA)
+          maybe_add_auto(a_collect, a_len);
+
+        a_len = 0;
+        prev_cksum = cksum;
+
+      }
+
+      /* Continue collecting string, but only if the bit flip actually made
+         any difference - we don't want no-op tokens. */
+
+      if (cksum != queue_cur->exec_cksum) {
+
+        if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3];
+        ++a_len;
+
+      }
+
+    }
+
+  }
+
+  new_hit_cnt = queued_paths + unique_crashes;
+
+  stage_finds[STAGE_FLIP1] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_FLIP1] += stage_max;
+
+  /* Two walking bits. */
+
+  stage_name = "bitflip 2/1";
+  stage_short = "flip2";
+  stage_max = (len << 3) - 1;
+
+  orig_hit_cnt = new_hit_cnt;
+
+  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+
+    stage_cur_byte = stage_cur >> 3;
+
+    FLIP_BIT(out_buf, stage_cur);
+    FLIP_BIT(out_buf, stage_cur + 1);
+
+    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+
+    FLIP_BIT(out_buf, stage_cur);
+    FLIP_BIT(out_buf, stage_cur + 1);
+
+  }
+
+  new_hit_cnt = queued_paths + unique_crashes;
+
+  stage_finds[STAGE_FLIP2] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_FLIP2] += stage_max;
+
+  /* Four walking bits. */
+
+  stage_name = "bitflip 4/1";
+  stage_short = "flip4";
+  stage_max = (len << 3) - 3;
+
+  orig_hit_cnt = new_hit_cnt;
+
+  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+
+    stage_cur_byte = stage_cur >> 3;
+
+    FLIP_BIT(out_buf, stage_cur);
+    FLIP_BIT(out_buf, stage_cur + 1);
+    FLIP_BIT(out_buf, stage_cur + 2);
+    FLIP_BIT(out_buf, stage_cur + 3);
+
+    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+
+    FLIP_BIT(out_buf, stage_cur);
+    FLIP_BIT(out_buf, stage_cur + 1);
+    FLIP_BIT(out_buf, stage_cur + 2);
+    FLIP_BIT(out_buf, stage_cur + 3);
+
+  }
+
+  new_hit_cnt = queued_paths + unique_crashes;
+
+  stage_finds[STAGE_FLIP4] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_FLIP4] += stage_max;
+
+  /* Effector map setup. These macros calculate:
+
+     EFF_APOS      - position of a particular file offset in the map.
+     EFF_ALEN      - length of a map with a particular number of bytes.
+     EFF_SPAN_ALEN - map span for a sequence of bytes.
+
+   */
+
+#define EFF_APOS(_p) ((_p) >> EFF_MAP_SCALE2)
+#define EFF_REM(_x) ((_x) & ((1 << EFF_MAP_SCALE2) - 1))
+#define EFF_ALEN(_l) (EFF_APOS(_l) + !!EFF_REM(_l))
+#define EFF_SPAN_ALEN(_p, _l) (EFF_APOS((_p) + (_l)-1) - EFF_APOS(_p) + 1)
+
+  /* Initialize effector map for the next step (see comments below). Always
+     flag first and last byte as doing something. */
+
+  eff_map = ck_alloc(EFF_ALEN(len));
+  eff_map[0] = 1;
+
+  if (EFF_APOS(len - 1) != 0) {
+
+    eff_map[EFF_APOS(len - 1)] = 1;
+    ++eff_cnt;
+
+  }
+
+  /* Walking byte. */
+
+  stage_name = "bitflip 8/8";
+  stage_short = "flip8";
+  stage_max = len;
+
+  orig_hit_cnt = new_hit_cnt;
+
+  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+
+    stage_cur_byte = stage_cur;
+
+    out_buf[stage_cur] ^= 0xFF;
+
+    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+
+    /* We also use this stage to pull off a simple trick: we identify
+       bytes that seem to have no effect on the current execution path
+       even when fully flipped - and we skip them during more expensive
+       deterministic stages, such as arithmetics or known ints. */
+
+    if (!eff_map[EFF_APOS(stage_cur)]) {
+
+      u32 cksum;
+
+      /* If in dumb mode or if the file is very short, just flag everything
+         without wasting time on checksums. */
+
+      if (!dumb_mode && len >= EFF_MIN_LEN)
+        cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
+      else
+        cksum = ~queue_cur->exec_cksum;
+
+      if (cksum != queue_cur->exec_cksum) {
+
+        eff_map[EFF_APOS(stage_cur)] = 1;
+        ++eff_cnt;
+
+      }
+
+    }
+
+    out_buf[stage_cur] ^= 0xFF;
+
+  }
+
+  /* If the effector map is more than EFF_MAX_PERC dense, just flag the
+     whole thing as worth fuzzing, since we wouldn't be saving much time
+     anyway. */
+
+  if (eff_cnt != EFF_ALEN(len) &&
+      eff_cnt * 100 / EFF_ALEN(len) > EFF_MAX_PERC) {
+
+    memset(eff_map, 1, EFF_ALEN(len));
+
+    blocks_eff_select += EFF_ALEN(len);
+
+  } else {
+
+    blocks_eff_select += eff_cnt;
+
+  }
+
+  blocks_eff_total += EFF_ALEN(len);
+
+  new_hit_cnt = queued_paths + unique_crashes;
+
+  stage_finds[STAGE_FLIP8] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_FLIP8] += stage_max;
+
+  /* Two walking bytes. */
+
+  if (len < 2) goto skip_bitflip;
+
+  stage_name = "bitflip 16/8";
+  stage_short = "flip16";
+  stage_cur = 0;
+  stage_max = len - 1;
+
+  orig_hit_cnt = new_hit_cnt;
+
+  for (i = 0; i < len - 1; ++i) {
+
+    /* Let's consult the effector map... */
+
+    if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
+
+      --stage_max;
+      continue;
+
+    }
+
+    stage_cur_byte = i;
+
+    *(u16*)(out_buf + i) ^= 0xFFFF;
+
+    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+    ++stage_cur;
+
+    *(u16*)(out_buf + i) ^= 0xFFFF;
+
+  }
+
+  new_hit_cnt = queued_paths + unique_crashes;
+
+  stage_finds[STAGE_FLIP16] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_FLIP16] += stage_max;
+
+  if (len < 4) goto skip_bitflip;
+
+  /* Four walking bytes. */
+
+  stage_name = "bitflip 32/8";
+  stage_short = "flip32";
+  stage_cur = 0;
+  stage_max = len - 3;
+
+  orig_hit_cnt = new_hit_cnt;
+
+  for (i = 0; i < len - 3; ++i) {
+
+    /* Let's consult the effector map... */
+    if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
+        !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
+
+      --stage_max;
+      continue;
+
+    }
+
+    stage_cur_byte = i;
+
+    *(u32*)(out_buf + i) ^= 0xFFFFFFFF;
+
+    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+    ++stage_cur;
+
+    *(u32*)(out_buf + i) ^= 0xFFFFFFFF;
+
+  }
+
+  new_hit_cnt = queued_paths + unique_crashes;
+
+  stage_finds[STAGE_FLIP32] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_FLIP32] += stage_max;
+
+skip_bitflip:
+
+  if (no_arith) goto skip_arith;
+
+  /**********************
+   * ARITHMETIC INC/DEC *
+   **********************/
+
+  /* 8-bit arithmetics. */
+
+  stage_name = "arith 8/8";
+  stage_short = "arith8";
+  stage_cur = 0;
+  stage_max = 2 * len * ARITH_MAX;
+
+  stage_val_type = STAGE_VAL_LE;
+
+  orig_hit_cnt = new_hit_cnt;
+
+  for (i = 0; i < len; ++i) {
+
+    u8 orig = out_buf[i];
+
+    /* Let's consult the effector map... */
+
+    if (!eff_map[EFF_APOS(i)]) {
+
+      stage_max -= 2 * ARITH_MAX;
+      continue;
+
+    }
+
+    stage_cur_byte = i;
+
+    for (j = 1; j <= ARITH_MAX; ++j) {
+
+      u8 r = orig ^ (orig + j);
+
+      /* Do arithmetic operations only if the result couldn't be a product
+         of a bitflip. */
+
+      if (!could_be_bitflip(r)) {
+
+        stage_cur_val = j;
+        out_buf[i] = orig + j;
+
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
+
+      } else
+
+        --stage_max;
+
+      r = orig ^ (orig - j);
+
+      if (!could_be_bitflip(r)) {
+
+        stage_cur_val = -j;
+        out_buf[i] = orig - j;
+
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
+
+      } else
+
+        --stage_max;
+
+      out_buf[i] = orig;
+
+    }
+
+  }
+
+  new_hit_cnt = queued_paths + unique_crashes;
+
+  stage_finds[STAGE_ARITH8] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_ARITH8] += stage_max;
+
+  /* 16-bit arithmetics, both endians. */
+
+  if (len < 2) goto skip_arith;
+
+  stage_name = "arith 16/8";
+  stage_short = "arith16";
+  stage_cur = 0;
+  stage_max = 4 * (len - 1) * ARITH_MAX;
+
+  orig_hit_cnt = new_hit_cnt;
+
+  for (i = 0; i < len - 1; ++i) {
+
+    u16 orig = *(u16*)(out_buf + i);
+
+    /* Let's consult the effector map... */
+
+    if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
+
+      stage_max -= 4 * ARITH_MAX;
+      continue;
+
+    }
+
+    stage_cur_byte = i;
+
+    for (j = 1; j <= ARITH_MAX; ++j) {
+
+      u16 r1 = orig ^ (orig + j), r2 = orig ^ (orig - j),
+          r3 = orig ^ SWAP16(SWAP16(orig) + j),
+          r4 = orig ^ SWAP16(SWAP16(orig) - j);
+
+      /* Try little endian addition and subtraction first. Do it only
+         if the operation would affect more than one byte (hence the
+         & 0xff overflow checks) and if it couldn't be a product of
+         a bitflip. */
+
+      stage_val_type = STAGE_VAL_LE;
+
+      if ((orig & 0xff) + j > 0xff && !could_be_bitflip(r1)) {
+
+        stage_cur_val = j;
+        *(u16*)(out_buf + i) = orig + j;
+
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
+
+      } else
+
+        --stage_max;
+
+      if ((orig & 0xff) < j && !could_be_bitflip(r2)) {
+
+        stage_cur_val = -j;
+        *(u16*)(out_buf + i) = orig - j;
+
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
+
+      } else
+
+        --stage_max;
+
+      /* Big endian comes next. Same deal. */
+
+      stage_val_type = STAGE_VAL_BE;
+
+      if ((orig >> 8) + j > 0xff && !could_be_bitflip(r3)) {
+
+        stage_cur_val = j;
+        *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) + j);
+
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
+
+      } else
+
+        --stage_max;
+
+      if ((orig >> 8) < j && !could_be_bitflip(r4)) {
+
+        stage_cur_val = -j;
+        *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) - j);
+
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
+
+      } else
+
+        --stage_max;
+
+      *(u16*)(out_buf + i) = orig;
+
+    }
+
+  }
+
+  new_hit_cnt = queued_paths + unique_crashes;
+
+  stage_finds[STAGE_ARITH16] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_ARITH16] += stage_max;
+
+  /* 32-bit arithmetics, both endians. */
+
+  if (len < 4) goto skip_arith;
+
+  stage_name = "arith 32/8";
+  stage_short = "arith32";
+  stage_cur = 0;
+  stage_max = 4 * (len - 3) * ARITH_MAX;
+
+  orig_hit_cnt = new_hit_cnt;
+
+  for (i = 0; i < len - 3; ++i) {
+
+    u32 orig = *(u32*)(out_buf + i);
+
+    /* Let's consult the effector map... */
+
+    if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
+        !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
+
+      stage_max -= 4 * ARITH_MAX;
+      continue;
+
+    }
+
+    stage_cur_byte = i;
+
+    for (j = 1; j <= ARITH_MAX; ++j) {
+
+      u32 r1 = orig ^ (orig + j), r2 = orig ^ (orig - j),
+          r3 = orig ^ SWAP32(SWAP32(orig) + j),
+          r4 = orig ^ SWAP32(SWAP32(orig) - j);
+
+      /* Little endian first. Same deal as with 16-bit: we only want to
+         try if the operation would have effect on more than two bytes. */
+
+      stage_val_type = STAGE_VAL_LE;
+
+      if ((orig & 0xffff) + j > 0xffff && !could_be_bitflip(r1)) {
+
+        stage_cur_val = j;
+        *(u32*)(out_buf + i) = orig + j;
+
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
+
+      } else
+
+        --stage_max;
+
+      if ((orig & 0xffff) < j && !could_be_bitflip(r2)) {
+
+        stage_cur_val = -j;
+        *(u32*)(out_buf + i) = orig - j;
+
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
+
+      } else
+
+        --stage_max;
+
+      /* Big endian next. */
+
+      stage_val_type = STAGE_VAL_BE;
+
+      if ((SWAP32(orig) & 0xffff) + j > 0xffff && !could_be_bitflip(r3)) {
+
+        stage_cur_val = j;
+        *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) + j);
+
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
+
+      } else
+
+        --stage_max;
+
+      if ((SWAP32(orig) & 0xffff) < j && !could_be_bitflip(r4)) {
+
+        stage_cur_val = -j;
+        *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) - j);
+
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
+
+      } else
+
+        --stage_max;
+
+      *(u32*)(out_buf + i) = orig;
+
+    }
+
+  }
+
+  new_hit_cnt = queued_paths + unique_crashes;
+
+  stage_finds[STAGE_ARITH32] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_ARITH32] += stage_max;
+
+skip_arith:
+
+  /**********************
+   * INTERESTING VALUES *
+   **********************/
+
+  stage_name = "interest 8/8";
+  stage_short = "int8";
+  stage_cur = 0;
+  stage_max = len * sizeof(interesting_8);
+
+  stage_val_type = STAGE_VAL_LE;
+
+  orig_hit_cnt = new_hit_cnt;
+
+  /* Setting 8-bit integers. */
+
+  for (i = 0; i < len; ++i) {
+
+    u8 orig = out_buf[i];
+
+    /* Let's consult the effector map... */
+
+    if (!eff_map[EFF_APOS(i)]) {
+
+      stage_max -= sizeof(interesting_8);
+      continue;
+
+    }
+
+    stage_cur_byte = i;
+
+    for (j = 0; j < sizeof(interesting_8); ++j) {
+
+      /* Skip if the value could be a product of bitflips or arithmetics. */
+
+      if (could_be_bitflip(orig ^ (u8)interesting_8[j]) ||
+          could_be_arith(orig, (u8)interesting_8[j], 1)) {
+
+        --stage_max;
+        continue;
+
+      }
+
+      stage_cur_val = interesting_8[j];
+      out_buf[i] = interesting_8[j];
+
+      if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+
+      out_buf[i] = orig;
+      ++stage_cur;
+
+    }
+
+  }
+
+  new_hit_cnt = queued_paths + unique_crashes;
+
+  stage_finds[STAGE_INTEREST8] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_INTEREST8] += stage_max;
+
+  /* Setting 16-bit integers, both endians. */
+
+  if (no_arith || len < 2) goto skip_interest;
+
+  stage_name = "interest 16/8";
+  stage_short = "int16";
+  stage_cur = 0;
+  stage_max = 2 * (len - 1) * (sizeof(interesting_16) >> 1);
+
+  orig_hit_cnt = new_hit_cnt;
+
+  for (i = 0; i < len - 1; ++i) {
+
+    u16 orig = *(u16*)(out_buf + i);
+
+    /* Let's consult the effector map... */
+
+    if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
+
+      stage_max -= sizeof(interesting_16);
+      continue;
+
+    }
+
+    stage_cur_byte = i;
+
+    for (j = 0; j < sizeof(interesting_16) / 2; ++j) {
+
+      stage_cur_val = interesting_16[j];
+
+      /* Skip if this could be a product of a bitflip, arithmetics,
+         or single-byte interesting value insertion. */
+
+      if (!could_be_bitflip(orig ^ (u16)interesting_16[j]) &&
+          !could_be_arith(orig, (u16)interesting_16[j], 2) &&
+          !could_be_interest(orig, (u16)interesting_16[j], 2, 0)) {
+
+        stage_val_type = STAGE_VAL_LE;
+
+        *(u16*)(out_buf + i) = interesting_16[j];
+
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
+
+      } else
+
+        --stage_max;
+
+      if ((u16)interesting_16[j] != SWAP16(interesting_16[j]) &&
+          !could_be_bitflip(orig ^ SWAP16(interesting_16[j])) &&
+          !could_be_arith(orig, SWAP16(interesting_16[j]), 2) &&
+          !could_be_interest(orig, SWAP16(interesting_16[j]), 2, 1)) {
+
+        stage_val_type = STAGE_VAL_BE;
+
+        *(u16*)(out_buf + i) = SWAP16(interesting_16[j]);
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
+
+      } else
+
+        --stage_max;
+
+    }
+
+    *(u16*)(out_buf + i) = orig;
+
+  }
+
+  new_hit_cnt = queued_paths + unique_crashes;
+
+  stage_finds[STAGE_INTEREST16] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_INTEREST16] += stage_max;
+
+  if (len < 4) goto skip_interest;
+
+  /* Setting 32-bit integers, both endians. */
+
+  stage_name = "interest 32/8";
+  stage_short = "int32";
+  stage_cur = 0;
+  stage_max = 2 * (len - 3) * (sizeof(interesting_32) >> 2);
+
+  orig_hit_cnt = new_hit_cnt;
+
+  for (i = 0; i < len - 3; i++) {
+
+    u32 orig = *(u32*)(out_buf + i);
+
+    /* Let's consult the effector map... */
+
+    if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
+        !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
+
+      stage_max -= sizeof(interesting_32) >> 1;
+      continue;
+
+    }
+
+    stage_cur_byte = i;
+
+    for (j = 0; j < sizeof(interesting_32) / 4; ++j) {
+
+      stage_cur_val = interesting_32[j];
+
+      /* Skip if this could be a product of a bitflip, arithmetics,
+         or word interesting value insertion. */
+
+      if (!could_be_bitflip(orig ^ (u32)interesting_32[j]) &&
+          !could_be_arith(orig, interesting_32[j], 4) &&
+          !could_be_interest(orig, interesting_32[j], 4, 0)) {
+
+        stage_val_type = STAGE_VAL_LE;
+
+        *(u32*)(out_buf + i) = interesting_32[j];
+
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
+
+      } else
+
+        --stage_max;
+
+      if ((u32)interesting_32[j] != SWAP32(interesting_32[j]) &&
+          !could_be_bitflip(orig ^ SWAP32(interesting_32[j])) &&
+          !could_be_arith(orig, SWAP32(interesting_32[j]), 4) &&
+          !could_be_interest(orig, SWAP32(interesting_32[j]), 4, 1)) {
+
+        stage_val_type = STAGE_VAL_BE;
+
+        *(u32*)(out_buf + i) = SWAP32(interesting_32[j]);
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
+
+      } else
+
+        --stage_max;
+
+    }
+
+    *(u32*)(out_buf + i) = orig;
+
+  }
+
+  new_hit_cnt = queued_paths + unique_crashes;
+
+  stage_finds[STAGE_INTEREST32] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_INTEREST32] += stage_max;
+
+skip_interest:
+
+  /********************
+   * DICTIONARY STUFF *
+   ********************/
+
+  if (!extras_cnt) goto skip_user_extras;
+
+  /* Overwrite with user-supplied extras. */
+
+  stage_name = "user extras (over)";
+  stage_short = "ext_UO";
+  stage_cur = 0;
+  stage_max = extras_cnt * len;
+
+  stage_val_type = STAGE_VAL_NONE;
+
+  orig_hit_cnt = new_hit_cnt;
+
+  for (i = 0; i < len; ++i) {
+
+    u32 last_len = 0;
+
+    stage_cur_byte = i;
+
+    /* Extras are sorted by size, from smallest to largest. This means
+       that we don't have to worry about restoring the buffer in
+       between writes at a particular offset determined by the outer
+       loop. */
+
+    for (j = 0; j < extras_cnt; ++j) {
+
+      /* Skip extras probabilistically if extras_cnt > MAX_DET_EXTRAS. Also
+         skip them if there's no room to insert the payload, if the token
+         is redundant, or if its entire span has no bytes set in the effector
+         map. */
+
+      if ((extras_cnt > MAX_DET_EXTRAS && UR(extras_cnt) >= MAX_DET_EXTRAS) ||
+          extras[j].len > len - i ||
+          !memcmp(extras[j].data, out_buf + i, extras[j].len) ||
+          !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, extras[j].len))) {
+
+        --stage_max;
+        continue;
+
+      }
+
+      last_len = extras[j].len;
+      memcpy(out_buf + i, extras[j].data, last_len);
+
+      if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+
+      ++stage_cur;
+
+    }
+
+    /* Restore all the clobbered memory. */
+    memcpy(out_buf + i, in_buf + i, last_len);
+
+  }
+
+  new_hit_cnt = queued_paths + unique_crashes;
+
+  stage_finds[STAGE_EXTRAS_UO] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_EXTRAS_UO] += stage_max;
+
+  /* Insertion of user-supplied extras. */
+
+  stage_name = "user extras (insert)";
+  stage_short = "ext_UI";
+  stage_cur = 0;
+  stage_max = extras_cnt * len;
+
+  orig_hit_cnt = new_hit_cnt;
+
+  ex_tmp = ck_alloc(len + MAX_DICT_FILE);
+
+  for (i = 0; i <= len; ++i) {
+
+    stage_cur_byte = i;
+
+    for (j = 0; j < extras_cnt; ++j) {
+
+      if (len + extras[j].len > MAX_FILE) {
+
+        --stage_max;
+        continue;
+
+      }
+
+      /* Insert token */
+      memcpy(ex_tmp + i, extras[j].data, extras[j].len);
+
+      /* Copy tail */
+      memcpy(ex_tmp + i + extras[j].len, out_buf + i, len - i);
+
+      if (common_fuzz_stuff(argv, ex_tmp, len + extras[j].len)) {
+
+        ck_free(ex_tmp);
+        goto abandon_entry;
+
+      }
+
+      ++stage_cur;
+
+    }
+
+    /* Copy head */
+    ex_tmp[i] = out_buf[i];
+
+  }
+
+  ck_free(ex_tmp);
+
+  new_hit_cnt = queued_paths + unique_crashes;
+
+  stage_finds[STAGE_EXTRAS_UI] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_EXTRAS_UI] += stage_max;
+
+skip_user_extras:
+
+  if (!a_extras_cnt) goto skip_extras;
+
+  stage_name = "auto extras (over)";
+  stage_short = "ext_AO";
+  stage_cur = 0;
+  stage_max = MIN(a_extras_cnt, USE_AUTO_EXTRAS) * len;
+
+  stage_val_type = STAGE_VAL_NONE;
+
+  orig_hit_cnt = new_hit_cnt;
+
+  for (i = 0; i < len; ++i) {
+
+    u32 last_len = 0;
+
+    stage_cur_byte = i;
+
+    for (j = 0; j < MIN(a_extras_cnt, USE_AUTO_EXTRAS); ++j) {
+
+      /* See the comment in the earlier code; extras are sorted by size. */
+
+      if (a_extras[j].len > len - i ||
+          !memcmp(a_extras[j].data, out_buf + i, a_extras[j].len) ||
+          !memchr(eff_map + EFF_APOS(i), 1,
+                  EFF_SPAN_ALEN(i, a_extras[j].len))) {
+
+        --stage_max;
+        continue;
+
+      }
+
+      last_len = a_extras[j].len;
+      memcpy(out_buf + i, a_extras[j].data, last_len);
+
+      if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+
+      ++stage_cur;
+
+    }
+
+    /* Restore all the clobbered memory. */
+    memcpy(out_buf + i, in_buf + i, last_len);
+
+  }
+
+  new_hit_cnt = queued_paths + unique_crashes;
+
+  stage_finds[STAGE_EXTRAS_AO] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_EXTRAS_AO] += stage_max;
+
+skip_extras:
+
+  /* If we made this to here without jumping to havoc_stage or abandon_entry,
+     we're properly done with deterministic steps and can mark it as such
+     in the .state/ directory. */
+
+  if (!queue_cur->passed_det) mark_as_det_done(queue_cur);
+
+#ifdef USE_PYTHON
+python_stage:
+  /**********************************
+   * EXTERNAL MUTATORS (Python API) *
+   **********************************/
+
+  if (!py_module) goto havoc_stage;
+
+  stage_name = "python";
+  stage_short = "python";
+  stage_max = HAVOC_CYCLES * perf_score / havoc_div / 100;
+
+  if (stage_max < HAVOC_MIN) stage_max = HAVOC_MIN;
+
+  orig_hit_cnt = queued_paths + unique_crashes;
+
+  char*  retbuf = NULL;
+  size_t retlen = 0;
+
+  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+
+    struct queue_entry* target;
+    u32                 tid;
+    u8*                 new_buf;
+
+  retry_external_pick:
+    /* Pick a random other queue entry for passing to external API */
+    do {
+
+      tid = UR(queued_paths);
+
+    } while (tid == current_entry && queued_paths > 1);
+
+    target = queue;
+
+    while (tid >= 100) {
+
+      target = target->next_100;
+      tid -= 100;
+
+    }
+
+    while (tid--)
+      target = target->next;
+
+    /* Make sure that the target has a reasonable length. */
+
+    while (target && (target->len < 2 || target == queue_cur) &&
+           queued_paths > 1) {
+
+      target = target->next;
+      ++splicing_with;
+
+    }
+
+    if (!target) goto retry_external_pick;
+
+    /* Read the additional testcase into a new buffer. */
+    fd = open(target->fname, O_RDONLY);
+    if (fd < 0) PFATAL("Unable to open '%s'", target->fname);
+    new_buf = ck_alloc_nozero(target->len);
+    ck_read(fd, new_buf, target->len, target->fname);
+    close(fd);
+
+    fuzz_py(out_buf, len, new_buf, target->len, &retbuf, &retlen);
+
+    ck_free(new_buf);
+
+    if (retbuf) {
+
+      if (!retlen) goto abandon_entry;
+
+      if (common_fuzz_stuff(argv, retbuf, retlen)) {
+
+        free(retbuf);
+        goto abandon_entry;
+
+      }
+
+      /* Reset retbuf/retlen */
+      free(retbuf);
+      retbuf = NULL;
+      retlen = 0;
+
+      /* If we're finding new stuff, let's run for a bit longer, limits
+         permitting. */
+
+      if (queued_paths != havoc_queued) {
+
+        if (perf_score <= havoc_max_mult * 100) {
+
+          stage_max *= 2;
+          perf_score *= 2;
+
+        }
+
+        havoc_queued = queued_paths;
+
+      }
+
+    }
+
+  }
+
+  new_hit_cnt = queued_paths + unique_crashes;
+
+  stage_finds[STAGE_PYTHON] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_PYTHON] += stage_max;
+
+  if (python_only) {
+
+    /* Skip other stages */
+    ret_val = 0;
+    goto abandon_entry;
+
+  }
+
+#endif
+
+  /****************
+   * RANDOM HAVOC *
+   ****************/
+
+havoc_stage:
+
+  stage_cur_byte = -1;
+
+  /* The havoc stage mutation code is also invoked when splicing files; if the
+     splice_cycle variable is set, generate different descriptions and such. */
+
+  if (!splice_cycle) {
+
+    stage_name = "havoc";
+    stage_short = "havoc";
+    stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * perf_score /
+                havoc_div / 100;
+
+  } else {
+
+    static u8 tmp[32];
+
+    perf_score = orig_perf;
+
+    sprintf(tmp, "splice %u", splice_cycle);
+    stage_name = tmp;
+    stage_short = "splice";
+    stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100;
+
+  }
+
+  if (stage_max < HAVOC_MIN) stage_max = HAVOC_MIN;
+
+  temp_len = len;
+
+  orig_hit_cnt = queued_paths + unique_crashes;
+
+  havoc_queued = queued_paths;
+
+  /* We essentially just do several thousand runs (depending on perf_score)
+     where we take the input file and make random stacked tweaks. */
+
+  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+
+    u32 use_stacking = 1 << (1 + UR(HAVOC_STACK_POW2));
+
+    stage_cur_val = use_stacking;
+
+    for (i = 0; i < use_stacking; ++i) {
+
+      switch (UR(15 + ((extras_cnt + a_extras_cnt) ? 2 : 0))) {
+
+        case 0:
+
+          /* Flip a single bit somewhere. Spooky! */
+
+          FLIP_BIT(out_buf, UR(temp_len << 3));
+          break;
+
+        case 1:
+
+          /* Set byte to interesting value. */
+
+          out_buf[UR(temp_len)] = interesting_8[UR(sizeof(interesting_8))];
+          break;
+
+        case 2:
+
+          /* Set word to interesting value, randomly choosing endian. */
+
+          if (temp_len < 2) break;
+
+          if (UR(2)) {
+
+            *(u16*)(out_buf + UR(temp_len - 1)) =
+                interesting_16[UR(sizeof(interesting_16) >> 1)];
+
+          } else {
+
+            *(u16*)(out_buf + UR(temp_len - 1)) =
+                SWAP16(interesting_16[UR(sizeof(interesting_16) >> 1)]);
+
+          }
+
+          break;
+
+        case 3:
+
+          /* Set dword to interesting value, randomly choosing endian. */
+
+          if (temp_len < 4) break;
+
+          if (UR(2)) {
+
+            *(u32*)(out_buf + UR(temp_len - 3)) =
+                interesting_32[UR(sizeof(interesting_32) >> 2)];
+
+          } else {
+
+            *(u32*)(out_buf + UR(temp_len - 3)) =
+                SWAP32(interesting_32[UR(sizeof(interesting_32) >> 2)]);
+
+          }
+
+          break;
+
+        case 4:
+
+          /* Randomly subtract from byte. */
+
+          out_buf[UR(temp_len)] -= 1 + UR(ARITH_MAX);
+          break;
+
+        case 5:
+
+          /* Randomly add to byte. */
+
+          out_buf[UR(temp_len)] += 1 + UR(ARITH_MAX);
+          break;
+
+        case 6:
+
+          /* Randomly subtract from word, random endian. */
+
+          if (temp_len < 2) break;
+
+          if (UR(2)) {
+
+            u32 pos = UR(temp_len - 1);
+
+            *(u16*)(out_buf + pos) -= 1 + UR(ARITH_MAX);
+
+          } else {
+
+            u32 pos = UR(temp_len - 1);
+            u16 num = 1 + UR(ARITH_MAX);
+
+            *(u16*)(out_buf + pos) =
+                SWAP16(SWAP16(*(u16*)(out_buf + pos)) - num);
+
+          }
+
+          break;
+
+        case 7:
+
+          /* Randomly add to word, random endian. */
+
+          if (temp_len < 2) break;
+
+          if (UR(2)) {
+
+            u32 pos = UR(temp_len - 1);
+
+            *(u16*)(out_buf + pos) += 1 + UR(ARITH_MAX);
+
+          } else {
+
+            u32 pos = UR(temp_len - 1);
+            u16 num = 1 + UR(ARITH_MAX);
+
+            *(u16*)(out_buf + pos) =
+                SWAP16(SWAP16(*(u16*)(out_buf + pos)) + num);
+
+          }
+
+          break;
+
+        case 8:
+
+          /* Randomly subtract from dword, random endian. */
+
+          if (temp_len < 4) break;
+
+          if (UR(2)) {
+
+            u32 pos = UR(temp_len - 3);
+
+            *(u32*)(out_buf + pos) -= 1 + UR(ARITH_MAX);
+
+          } else {
+
+            u32 pos = UR(temp_len - 3);
+            u32 num = 1 + UR(ARITH_MAX);
+
+            *(u32*)(out_buf + pos) =
+                SWAP32(SWAP32(*(u32*)(out_buf + pos)) - num);
+
+          }
+
+          break;
+
+        case 9:
+
+          /* Randomly add to dword, random endian. */
+
+          if (temp_len < 4) break;
+
+          if (UR(2)) {
+
+            u32 pos = UR(temp_len - 3);
+
+            *(u32*)(out_buf + pos) += 1 + UR(ARITH_MAX);
+
+          } else {
+
+            u32 pos = UR(temp_len - 3);
+            u32 num = 1 + UR(ARITH_MAX);
+
+            *(u32*)(out_buf + pos) =
+                SWAP32(SWAP32(*(u32*)(out_buf + pos)) + num);
+
+          }
+
+          break;
+
+        case 10:
+
+          /* Just set a random byte to a random value. Because,
+             why not. We use XOR with 1-255 to eliminate the
+             possibility of a no-op. */
+
+          out_buf[UR(temp_len)] ^= 1 + UR(255);
+          break;
+
+        case 11 ... 12: {
+
+          /* Delete bytes. We're making this a bit more likely
+             than insertion (the next option) in hopes of keeping
+             files reasonably small. */
+
+          u32 del_from, del_len;
+
+          if (temp_len < 2) break;
+
+          /* Don't delete too much. */
+
+          del_len = choose_block_len(temp_len - 1);
+
+          del_from = UR(temp_len - del_len + 1);
+
+          memmove(out_buf + del_from, out_buf + del_from + del_len,
+                  temp_len - del_from - del_len);
+
+          temp_len -= del_len;
+
+          break;
+
+        }
+
+        case 13:
+
+          if (temp_len + HAVOC_BLK_XL < MAX_FILE) {
+
+            /* Clone bytes (75%) or insert a block of constant bytes (25%). */
+
+            u8  actually_clone = UR(4);
+            u32 clone_from, clone_to, clone_len;
+            u8* new_buf;
+
+            if (actually_clone) {
+
+              clone_len = choose_block_len(temp_len);
+              clone_from = UR(temp_len - clone_len + 1);
+
+            } else {
+
+              clone_len = choose_block_len(HAVOC_BLK_XL);
+              clone_from = 0;
+
+            }
+
+            clone_to = UR(temp_len);
+
+            new_buf = ck_alloc_nozero(temp_len + clone_len);
+
+            /* Head */
+
+            memcpy(new_buf, out_buf, clone_to);
+
+            /* Inserted part */
+
+            if (actually_clone)
+              memcpy(new_buf + clone_to, out_buf + clone_from, clone_len);
+            else
+              memset(new_buf + clone_to,
+                     UR(2) ? UR(256) : out_buf[UR(temp_len)], clone_len);
+
+            /* Tail */
+            memcpy(new_buf + clone_to + clone_len, out_buf + clone_to,
+                   temp_len - clone_to);
+
+            ck_free(out_buf);
+            out_buf = new_buf;
+            temp_len += clone_len;
+
+          }
+
+          break;
+
+        case 14: {
+
+          /* Overwrite bytes with a randomly selected chunk (75%) or fixed
+             bytes (25%). */
+
+          u32 copy_from, copy_to, copy_len;
+
+          if (temp_len < 2) break;
+
+          copy_len = choose_block_len(temp_len - 1);
+
+          copy_from = UR(temp_len - copy_len + 1);
+          copy_to = UR(temp_len - copy_len + 1);
+
+          if (UR(4)) {
+
+            if (copy_from != copy_to)
+              memmove(out_buf + copy_to, out_buf + copy_from, copy_len);
+
+          } else
+
+            memset(out_buf + copy_to, UR(2) ? UR(256) : out_buf[UR(temp_len)],
+                   copy_len);
+
+          break;
+
+        }
+
+          /* Values 15 and 16 can be selected only if there are any extras
+             present in the dictionaries. */
+
+        case 15: {
+
+          /* Overwrite bytes with an extra. */
+
+          if (!extras_cnt || (a_extras_cnt && UR(2))) {
+
+            /* No user-specified extras or odds in our favor. Let's use an
+               auto-detected one. */
+
+            u32 use_extra = UR(a_extras_cnt);
+            u32 extra_len = a_extras[use_extra].len;
+            u32 insert_at;
+
+            if (extra_len > temp_len) break;
+
+            insert_at = UR(temp_len - extra_len + 1);
+            memcpy(out_buf + insert_at, a_extras[use_extra].data, extra_len);
+
+          } else {
+
+            /* No auto extras or odds in our favor. Use the dictionary. */
+
+            u32 use_extra = UR(extras_cnt);
+            u32 extra_len = extras[use_extra].len;
+            u32 insert_at;
+
+            if (extra_len > temp_len) break;
+
+            insert_at = UR(temp_len - extra_len + 1);
+            memcpy(out_buf + insert_at, extras[use_extra].data, extra_len);
+
+          }
+
+          break;
+
+        }
+
+        case 16: {
+
+          u32 use_extra, extra_len, insert_at = UR(temp_len + 1);
+          u8* new_buf;
+
+          /* Insert an extra. Do the same dice-rolling stuff as for the
+             previous case. */
+
+          if (!extras_cnt || (a_extras_cnt && UR(2))) {
+
+            use_extra = UR(a_extras_cnt);
+            extra_len = a_extras[use_extra].len;
+
+            if (temp_len + extra_len >= MAX_FILE) break;
+
+            new_buf = ck_alloc_nozero(temp_len + extra_len);
+
+            /* Head */
+            memcpy(new_buf, out_buf, insert_at);
+
+            /* Inserted part */
+            memcpy(new_buf + insert_at, a_extras[use_extra].data, extra_len);
+
+          } else {
+
+            use_extra = UR(extras_cnt);
+            extra_len = extras[use_extra].len;
+
+            if (temp_len + extra_len >= MAX_FILE) break;
+
+            new_buf = ck_alloc_nozero(temp_len + extra_len);
+
+            /* Head */
+            memcpy(new_buf, out_buf, insert_at);
+
+            /* Inserted part */
+            memcpy(new_buf + insert_at, extras[use_extra].data, extra_len);
+
+          }
+
+          /* Tail */
+          memcpy(new_buf + insert_at + extra_len, out_buf + insert_at,
+                 temp_len - insert_at);
+
+          ck_free(out_buf);
+          out_buf = new_buf;
+          temp_len += extra_len;
+
+          break;
+
+        }
+
+      }
+
+    }
+
+    if (common_fuzz_stuff(argv, out_buf, temp_len)) goto abandon_entry;
+
+    /* out_buf might have been mangled a bit, so let's restore it to its
+       original size and shape. */
+
+    if (temp_len < len) out_buf = ck_realloc(out_buf, len);
+    temp_len = len;
+    memcpy(out_buf, in_buf, len);
+
+    /* If we're finding new stuff, let's run for a bit longer, limits
+       permitting. */
+
+    if (queued_paths != havoc_queued) {
+
+      if (perf_score <= havoc_max_mult * 100) {
+
+        stage_max *= 2;
+        perf_score *= 2;
+
+      }
+
+      havoc_queued = queued_paths;
+
+    }
+
+  }
+
+  new_hit_cnt = queued_paths + unique_crashes;
+
+  if (!splice_cycle) {
+
+    stage_finds[STAGE_HAVOC] += new_hit_cnt - orig_hit_cnt;
+    stage_cycles[STAGE_HAVOC] += stage_max;
+
+  } else {
+
+    stage_finds[STAGE_SPLICE] += new_hit_cnt - orig_hit_cnt;
+    stage_cycles[STAGE_SPLICE] += stage_max;
+
+  }
+
+#ifndef IGNORE_FINDS
+
+  /************
+   * SPLICING *
+   ************/
+
+  /* This is a last-resort strategy triggered by a full round with no findings.
+     It takes the current input file, randomly selects another input, and
+     splices them together at some offset, then relies on the havoc
+     code to mutate that blob. */
+
+retry_splicing:
+
+  if (use_splicing && splice_cycle++ < SPLICE_CYCLES && queued_paths > 1 &&
+      queue_cur->len > 1) {
+
+    struct queue_entry* target;
+    u32                 tid, split_at;
+    u8*                 new_buf;
+    s32                 f_diff, l_diff;
+
+    /* First of all, if we've modified in_buf for havoc, let's clean that
+       up... */
+
+    if (in_buf != orig_in) {
+
+      ck_free(in_buf);
+      in_buf = orig_in;
+      len = queue_cur->len;
+
+    }
+
+    /* Pick a random queue entry and seek to it. Don't splice with yourself. */
+
+    do {
+
+      tid = UR(queued_paths);
+
+    } while (tid == current_entry);
+
+    splicing_with = tid;
+    target = queue;
+
+    while (tid >= 100) {
+
+      target = target->next_100;
+      tid -= 100;
+
+    }
+
+    while (tid--)
+      target = target->next;
+
+    /* Make sure that the target has a reasonable length. */
+
+    while (target && (target->len < 2 || target == queue_cur)) {
+
+      target = target->next;
+      ++splicing_with;
+
+    }
+
+    if (!target) goto retry_splicing;
+
+    /* Read the testcase into a new buffer. */
+
+    fd = open(target->fname, O_RDONLY);
+
+    if (fd < 0) PFATAL("Unable to open '%s'", target->fname);
+
+    new_buf = ck_alloc_nozero(target->len);
+
+    ck_read(fd, new_buf, target->len, target->fname);
+
+    close(fd);
+
+    /* Find a suitable splicing location, somewhere between the first and
+       the last differing byte. Bail out if the difference is just a single
+       byte or so. */
+
+    locate_diffs(in_buf, new_buf, MIN(len, target->len), &f_diff, &l_diff);
+
+    if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) {
+
+      ck_free(new_buf);
+      goto retry_splicing;
+
+    }
+
+    /* Split somewhere between the first and last differing byte. */
+
+    split_at = f_diff + UR(l_diff - f_diff);
+
+    /* Do the thing. */
+
+    len = target->len;
+    memcpy(new_buf, in_buf, split_at);
+    in_buf = new_buf;
+
+    ck_free(out_buf);
+    out_buf = ck_alloc_nozero(len);
+    memcpy(out_buf, in_buf, len);
+
+#  ifdef USE_PYTHON
+    goto python_stage;
+#  else
+    goto havoc_stage;
+#  endif
+
+  }
+
+#endif /* !IGNORE_FINDS */
+
+  ret_val = 0;
+
+abandon_entry:
+
+  splicing_with = -1;
+
+  /* Update pending_not_fuzzed count if we made it through the calibration
+     cycle and have not seen this entry before. */
+
+  if (!stop_soon && !queue_cur->cal_failed &&
+      (queue_cur->was_fuzzed == 0 || queue_cur->fuzz_level == 0)) {
+
+    --pending_not_fuzzed;
+    queue_cur->was_fuzzed = 1;
+    if (queue_cur->favored) --pending_favored;
+
+  }
+
+  ++queue_cur->fuzz_level;
+
+  munmap(orig_in, queue_cur->len);
+
+  if (in_buf != orig_in) ck_free(in_buf);
+  ck_free(out_buf);
+  ck_free(eff_map);
+
+  return ret_val;
+
+#undef FLIP_BIT
+
+}
+
+/* MOpt mode */
+u8 pilot_fuzzing(char** argv) {
+
+  s32 len, fd, temp_len, i, j;
+  u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0;
+  u64 havoc_queued, orig_hit_cnt, new_hit_cnt, cur_ms_lv;
+  u32 splice_cycle = 0, perf_score = 100, orig_perf, prev_cksum, eff_cnt = 1;
+
+  u8 ret_val = 1, doing_det = 0;
+
+  u8  a_collect[MAX_AUTO_EXTRA];
+  u32 a_len = 0;
+
+#ifdef IGNORE_FINDS
+
+  /* In IGNORE_FINDS mode, skip any entries that weren't in the
+     initial data set. */
+
+  if (queue_cur->depth > 1) return 1;
+
+#else
+
+  if (pending_favored) {
+
+    /* If we have any favored, non-fuzzed new arrivals in the queue,
+       possibly skip to them at the expense of already-fuzzed or non-favored
+       cases. */
+
+    if ((queue_cur->was_fuzzed || !queue_cur->favored) &&
+        UR(100) < SKIP_TO_NEW_PROB)
+      return 1;
+
+  } else if (!dumb_mode && !queue_cur->favored && queued_paths > 10) {
+
+    /* Otherwise, still possibly skip non-favored cases, albeit less often.
+       The odds of skipping stuff are higher for already-fuzzed inputs and
+       lower for never-fuzzed entries. */
+
+    if (queue_cycle > 1 && !queue_cur->was_fuzzed) {
+
+      if (UR(100) < SKIP_NFAV_NEW_PROB) return 1;
+
+    } else {
+
+      if (UR(100) < SKIP_NFAV_OLD_PROB) return 1;
+
+    }
+
+  }
+
+#endif /* ^IGNORE_FINDS */
+
+  if (not_on_tty) {
+
+    ACTF("Fuzzing test case #%u (%u total, %llu uniq crashes found)...",
+         current_entry, queued_paths, unique_crashes);
+    fflush(stdout);
+
+  }
+
+  /* Map the test case into memory. */
+
+  fd = open(queue_cur->fname, O_RDONLY);
+
+  if (fd < 0) PFATAL("Unable to open '%s'", queue_cur->fname);
+
+  len = queue_cur->len;
+
+  orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
+
+  if (orig_in == MAP_FAILED) PFATAL("Unable to mmap '%s'", queue_cur->fname);
+
+  close(fd);
+
+  /* We could mmap() out_buf as MAP_PRIVATE, but we end up clobbering every
+     single byte anyway, so it wouldn't give us any performance or memory usage
+     benefits. */
+
+  out_buf = ck_alloc_nozero(len);
+
+  subseq_tmouts = 0;
+
+  cur_depth = queue_cur->depth;
+
+  /*******************************************
+   * CALIBRATION (only if failed earlier on) *
+   *******************************************/
+
+  if (queue_cur->cal_failed) {
+
+    u8 res = FAULT_TMOUT;
+
+    if (queue_cur->cal_failed < CAL_CHANCES) {
+
+      res = calibrate_case(argv, queue_cur, in_buf, queue_cycle - 1, 0);
+
+      if (res == FAULT_ERROR) FATAL("Unable to execute target application");
+
+    }
+
+    if (stop_soon || res != crash_mode) {
+
+      ++cur_skipped_paths;
+      goto abandon_entry;
+
+    }
+
+  }
+
+  /************
+   * TRIMMING *
+   ************/
+
+  if (!dumb_mode && !queue_cur->trim_done) {
+
+    u8 res = trim_case(argv, queue_cur, in_buf);
+
+    if (res == FAULT_ERROR) FATAL("Unable to execute target application");
+
+    if (stop_soon) {
+
+      ++cur_skipped_paths;
+      goto abandon_entry;
+
+    }
+
+    /* Don't retry trimming, even if it failed. */
+
+    queue_cur->trim_done = 1;
+
+    len = queue_cur->len;
+
+  }
+
+  memcpy(out_buf, in_buf, len);
+
+  /*********************
+   * PERFORMANCE SCORE *
+   *********************/
+
+  orig_perf = perf_score = calculate_score(queue_cur);
+
+  /* Skip right away if -d is given, if we have done deterministic fuzzing on
+     this entry ourselves (was_fuzzed), or if it has gone through deterministic
+     testing in earlier, resumed runs (passed_det). */
+
+  if (skip_deterministic || queue_cur->was_fuzzed || queue_cur->passed_det)
+    goto havoc_stage;
+
+  /* Skip deterministic fuzzing if exec path checksum puts this out of scope
+     for this master instance. */
+
+  if (master_max && (queue_cur->exec_cksum % master_max) != master_id - 1)
+    goto havoc_stage;
+
+  cur_ms_lv = get_cur_time();
+  if (!(key_puppet == 0 && ((cur_ms_lv - last_path_time < limit_time_puppet) ||
+                            (last_crash_time != 0 &&
+                             cur_ms_lv - last_crash_time < limit_time_puppet) ||
+                            last_path_time == 0))) {
+
+    key_puppet = 1;
+    goto pacemaker_fuzzing;
+
+  }
+
+  doing_det = 1;
+
+  /*********************************************
+   * SIMPLE BITFLIP (+dictionary construction) *
+   *********************************************/
+
+#define FLIP_BIT(_ar, _b)                   \
+  do {                                      \
+                                            \
+    u8* _arf = (u8*)(_ar);                  \
+    u32 _bf = (_b);                         \
+    _arf[(_bf) >> 3] ^= (128 >> ((_bf)&7)); \
+                                            \
+  } while (0)
+
+  /* Single walking bit. */
+
+  stage_short = "flip1";
+  stage_max = len << 3;
+  stage_name = "bitflip 1/1";
+
+  stage_val_type = STAGE_VAL_NONE;
+
+  orig_hit_cnt = queued_paths + unique_crashes;
+
+  prev_cksum = queue_cur->exec_cksum;
+
+  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+
+    stage_cur_byte = stage_cur >> 3;
+
+    FLIP_BIT(out_buf, stage_cur);
+
+    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+
+    FLIP_BIT(out_buf, stage_cur);
+
+    /* While flipping the least significant bit in every byte, pull of an extra
+       trick to detect possible syntax tokens. In essence, the idea is that if
+       you have a binary blob like this:
+
+       xxxxxxxxIHDRxxxxxxxx
+
+       ...and changing the leading and trailing bytes causes variable or no
+       changes in program flow, but touching any character in the "IHDR" string
+       always produces the same, distinctive path, it's highly likely that
+       "IHDR" is an atomically-checked magic value of special significance to
+       the fuzzed format.
+
+       We do this here, rather than as a separate stage, because it's a nice
+       way to keep the operation approximately "free" (i.e., no extra execs).
+
+       Empirically, performing the check when flipping the least significant bit
+       is advantageous, compared to doing it at the time of more disruptive
+       changes, where the program flow may be affected in more violent ways.
+
+       The caveat is that we won't generate dictionaries in the -d mode or -S
+       mode - but that's probably a fair trade-off.
+
+       This won't work particularly well with paths that exhibit variable
+       behavior, but fails gracefully, so we'll carry out the checks anyway.
+
+      */
+
+    if (!dumb_mode && (stage_cur & 7) == 7) {
+
+      u32 cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
+
+      if (stage_cur == stage_max - 1 && cksum == prev_cksum) {
+
+        /* If at end of file and we are still collecting a string, grab the
+           final character and force output. */
+
+        if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3];
+        ++a_len;
+
+        if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA)
+          maybe_add_auto(a_collect, a_len);
+
+      } else if (cksum != prev_cksum) {
+
+        /* Otherwise, if the checksum has changed, see if we have something
+           worthwhile queued up, and collect that if the answer is yes. */
+
+        if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA)
+          maybe_add_auto(a_collect, a_len);
+
+        a_len = 0;
+        prev_cksum = cksum;
+
+      }
+
+      /* Continue collecting string, but only if the bit flip actually made
+         any difference - we don't want no-op tokens. */
+
+      if (cksum != queue_cur->exec_cksum) {
+
+        if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3];
+        ++a_len;
+
+      }
+
+    }
+
+  }
+
+  new_hit_cnt = queued_paths + unique_crashes;
+
+  stage_finds[STAGE_FLIP1] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_FLIP1] += stage_max;
+
+  /* Two walking bits. */
+
+  stage_name = "bitflip 2/1";
+  stage_short = "flip2";
+  stage_max = (len << 3) - 1;
+
+  orig_hit_cnt = new_hit_cnt;
+
+  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+
+    stage_cur_byte = stage_cur >> 3;
+
+    FLIP_BIT(out_buf, stage_cur);
+    FLIP_BIT(out_buf, stage_cur + 1);
+
+    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+
+    FLIP_BIT(out_buf, stage_cur);
+    FLIP_BIT(out_buf, stage_cur + 1);
+
+  }
+
+  new_hit_cnt = queued_paths + unique_crashes;
+
+  stage_finds[STAGE_FLIP2] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_FLIP2] += stage_max;
+
+  /* Four walking bits. */
+
+  stage_name = "bitflip 4/1";
+  stage_short = "flip4";
+  stage_max = (len << 3) - 3;
+
+  orig_hit_cnt = new_hit_cnt;
+
+  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+
+    stage_cur_byte = stage_cur >> 3;
+
+    FLIP_BIT(out_buf, stage_cur);
+    FLIP_BIT(out_buf, stage_cur + 1);
+    FLIP_BIT(out_buf, stage_cur + 2);
+    FLIP_BIT(out_buf, stage_cur + 3);
+
+    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+
+    FLIP_BIT(out_buf, stage_cur);
+    FLIP_BIT(out_buf, stage_cur + 1);
+    FLIP_BIT(out_buf, stage_cur + 2);
+    FLIP_BIT(out_buf, stage_cur + 3);
+
+  }
+
+  new_hit_cnt = queued_paths + unique_crashes;
+
+  stage_finds[STAGE_FLIP4] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_FLIP4] += stage_max;
+
+  /* Effector map setup. These macros calculate:
+
+     EFF_APOS      - position of a particular file offset in the map.
+     EFF_ALEN      - length of a map with a particular number of bytes.
+     EFF_SPAN_ALEN - map span for a sequence of bytes.
+
+   */
+
+#define EFF_APOS(_p) ((_p) >> EFF_MAP_SCALE2)
+#define EFF_REM(_x) ((_x) & ((1 << EFF_MAP_SCALE2) - 1))
+#define EFF_ALEN(_l) (EFF_APOS(_l) + !!EFF_REM(_l))
+#define EFF_SPAN_ALEN(_p, _l) (EFF_APOS((_p) + (_l)-1) - EFF_APOS(_p) + 1)
+
+  /* Initialize effector map for the next step (see comments below). Always
+         flag first and last byte as doing something. */
+
+  eff_map = ck_alloc(EFF_ALEN(len));
+  eff_map[0] = 1;
+
+  if (EFF_APOS(len - 1) != 0) {
+
+    eff_map[EFF_APOS(len - 1)] = 1;
+    ++eff_cnt;
+
+  }
+
+  /* Walking byte. */
+
+  stage_name = "bitflip 8/8";
+  stage_short = "flip8";
+  stage_max = len;
+
+  orig_hit_cnt = new_hit_cnt;
+
+  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+
+    stage_cur_byte = stage_cur;
+
+    out_buf[stage_cur] ^= 0xFF;
+
+    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+
+    /* We also use this stage to pull off a simple trick: we identify
+       bytes that seem to have no effect on the current execution path
+       even when fully flipped - and we skip them during more expensive
+       deterministic stages, such as arithmetics or known ints. */
+
+    if (!eff_map[EFF_APOS(stage_cur)]) {
+
+      u32 cksum;
+
+      /* If in dumb mode or if the file is very short, just flag everything
+         without wasting time on checksums. */
+
+      if (!dumb_mode && len >= EFF_MIN_LEN)
+        cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
+      else
+        cksum = ~queue_cur->exec_cksum;
+
+      if (cksum != queue_cur->exec_cksum) {
+
+        eff_map[EFF_APOS(stage_cur)] = 1;
+        ++eff_cnt;
+
+      }
+
+    }
+
+    out_buf[stage_cur] ^= 0xFF;
+
+  }
+
+  /* If the effector map is more than EFF_MAX_PERC dense, just flag the
+     whole thing as worth fuzzing, since we wouldn't be saving much time
+     anyway. */
+
+  if (eff_cnt != EFF_ALEN(len) &&
+      eff_cnt * 100 / EFF_ALEN(len) > EFF_MAX_PERC) {
+
+    memset(eff_map, 1, EFF_ALEN(len));
+
+    blocks_eff_select += EFF_ALEN(len);
+
+  } else {
+
+    blocks_eff_select += eff_cnt;
+
+  }
+
+  blocks_eff_total += EFF_ALEN(len);
+
+  new_hit_cnt = queued_paths + unique_crashes;
+
+  stage_finds[STAGE_FLIP8] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_FLIP8] += stage_max;
+
+  /* Two walking bytes. */
+
+  if (len < 2) goto skip_bitflip;
+
+  stage_name = "bitflip 16/8";
+  stage_short = "flip16";
+  stage_cur = 0;
+  stage_max = len - 1;
+
+  orig_hit_cnt = new_hit_cnt;
+
+  for (i = 0; i < len - 1; ++i) {
+
+    /* Let's consult the effector map... */
+
+    if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
+
+      --stage_max;
+      continue;
+
+    }
+
+    stage_cur_byte = i;
+
+    *(u16*)(out_buf + i) ^= 0xFFFF;
+
+    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+    ++stage_cur;
+
+    *(u16*)(out_buf + i) ^= 0xFFFF;
+
+  }
+
+  new_hit_cnt = queued_paths + unique_crashes;
+
+  stage_finds[STAGE_FLIP16] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_FLIP16] += stage_max;
+
+  if (len < 4) goto skip_bitflip;
+
+  /* Four walking bytes. */
+
+  stage_name = "bitflip 32/8";
+  stage_short = "flip32";
+  stage_cur = 0;
+  stage_max = len - 3;
+
+  orig_hit_cnt = new_hit_cnt;
+
+  for (i = 0; i < len - 3; ++i) {
+
+    /* Let's consult the effector map... */
+    if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
+        !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
+
+      --stage_max;
+      continue;
+
+    }
+
+    stage_cur_byte = i;
+
+    *(u32*)(out_buf + i) ^= 0xFFFFFFFF;
+
+    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+    ++stage_cur;
+
+    *(u32*)(out_buf + i) ^= 0xFFFFFFFF;
+
+  }
+
+  new_hit_cnt = queued_paths + unique_crashes;
+
+  stage_finds[STAGE_FLIP32] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_FLIP32] += stage_max;
+
+skip_bitflip:
+
+  if (no_arith) goto skip_arith;
+
+  /**********************
+   * ARITHMETIC INC/DEC *
+   **********************/
+
+  /* 8-bit arithmetics. */
+
+  stage_name = "arith 8/8";
+  stage_short = "arith8";
+  stage_cur = 0;
+  stage_max = 2 * len * ARITH_MAX;
+
+  stage_val_type = STAGE_VAL_LE;
+
+  orig_hit_cnt = new_hit_cnt;
+
+  for (i = 0; i < len; ++i) {
+
+    u8 orig = out_buf[i];
+
+    /* Let's consult the effector map... */
+
+    if (!eff_map[EFF_APOS(i)]) {
+
+      stage_max -= 2 * ARITH_MAX;
+      continue;
+
+    }
+
+    stage_cur_byte = i;
+
+    for (j = 1; j <= ARITH_MAX; ++j) {
+
+      u8 r = orig ^ (orig + j);
+
+      /* Do arithmetic operations only if the result couldn't be a product
+         of a bitflip. */
+
+      if (!could_be_bitflip(r)) {
+
+        stage_cur_val = j;
+        out_buf[i] = orig + j;
+
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
+
+      } else
+
+        --stage_max;
+
+      r = orig ^ (orig - j);
+
+      if (!could_be_bitflip(r)) {
+
+        stage_cur_val = -j;
+        out_buf[i] = orig - j;
+
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
+
+      } else
+
+        --stage_max;
+
+      out_buf[i] = orig;
+
+    }
+
+  }
+
+  new_hit_cnt = queued_paths + unique_crashes;
+
+  stage_finds[STAGE_ARITH8] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_ARITH8] += stage_max;
+
+  /* 16-bit arithmetics, both endians. */
+
+  if (len < 2) goto skip_arith;
+
+  stage_name = "arith 16/8";
+  stage_short = "arith16";
+  stage_cur = 0;
+  stage_max = 4 * (len - 1) * ARITH_MAX;
+
+  orig_hit_cnt = new_hit_cnt;
+
+  for (i = 0; i < len - 1; ++i) {
+
+    u16 orig = *(u16*)(out_buf + i);
+
+    /* Let's consult the effector map... */
+
+    if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
+
+      stage_max -= 4 * ARITH_MAX;
+      continue;
+
+    }
+
+    stage_cur_byte = i;
+
+    for (j = 1; j <= ARITH_MAX; ++j) {
+
+      u16 r1 = orig ^ (orig + j), r2 = orig ^ (orig - j),
+          r3 = orig ^ SWAP16(SWAP16(orig) + j),
+          r4 = orig ^ SWAP16(SWAP16(orig) - j);
+
+      /* Try little endian addition and subtraction first. Do it only
+         if the operation would affect more than one byte (hence the
+         & 0xff overflow checks) and if it couldn't be a product of
+         a bitflip. */
+
+      stage_val_type = STAGE_VAL_LE;
+
+      if ((orig & 0xff) + j > 0xff && !could_be_bitflip(r1)) {
+
+        stage_cur_val = j;
+        *(u16*)(out_buf + i) = orig + j;
+
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
+
+      } else
+
+        --stage_max;
+
+      if ((orig & 0xff) < j && !could_be_bitflip(r2)) {
+
+        stage_cur_val = -j;
+        *(u16*)(out_buf + i) = orig - j;
+
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
+
+      } else
+
+        --stage_max;
+
+      /* Big endian comes next. Same deal. */
+
+      stage_val_type = STAGE_VAL_BE;
+
+      if ((orig >> 8) + j > 0xff && !could_be_bitflip(r3)) {
+
+        stage_cur_val = j;
+        *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) + j);
+
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
+
+      } else
+
+        --stage_max;
+
+      if ((orig >> 8) < j && !could_be_bitflip(r4)) {
+
+        stage_cur_val = -j;
+        *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) - j);
+
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
+
+      } else
+
+        --stage_max;
+
+      *(u16*)(out_buf + i) = orig;
+
+    }
+
+  }
+
+  new_hit_cnt = queued_paths + unique_crashes;
+
+  stage_finds[STAGE_ARITH16] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_ARITH16] += stage_max;
+
+  /* 32-bit arithmetics, both endians. */
+
+  if (len < 4) goto skip_arith;
+
+  stage_name = "arith 32/8";
+  stage_short = "arith32";
+  stage_cur = 0;
+  stage_max = 4 * (len - 3) * ARITH_MAX;
+
+  orig_hit_cnt = new_hit_cnt;
+
+  for (i = 0; i < len - 3; ++i) {
+
+    u32 orig = *(u32*)(out_buf + i);
+
+    /* Let's consult the effector map... */
+
+    if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
+        !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
+
+      stage_max -= 4 * ARITH_MAX;
+      continue;
+
+    }
+
+    stage_cur_byte = i;
+
+    for (j = 1; j <= ARITH_MAX; ++j) {
+
+      u32 r1 = orig ^ (orig + j), r2 = orig ^ (orig - j),
+          r3 = orig ^ SWAP32(SWAP32(orig) + j),
+          r4 = orig ^ SWAP32(SWAP32(orig) - j);
+
+      /* Little endian first. Same deal as with 16-bit: we only want to
+         try if the operation would have effect on more than two bytes. */
+
+      stage_val_type = STAGE_VAL_LE;
+
+      if ((orig & 0xffff) + j > 0xffff && !could_be_bitflip(r1)) {
+
+        stage_cur_val = j;
+        *(u32*)(out_buf + i) = orig + j;
+
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
+
+      } else
+
+        --stage_max;
+
+      if ((orig & 0xffff) < j && !could_be_bitflip(r2)) {
+
+        stage_cur_val = -j;
+        *(u32*)(out_buf + i) = orig - j;
+
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        stage_cur++;
+
+      } else
+
+        --stage_max;
+
+      /* Big endian next. */
+
+      stage_val_type = STAGE_VAL_BE;
+
+      if ((SWAP32(orig) & 0xffff) + j > 0xffff && !could_be_bitflip(r3)) {
+
+        stage_cur_val = j;
+        *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) + j);
+
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
+
+      } else
+
+        --stage_max;
+
+      if ((SWAP32(orig) & 0xffff) < j && !could_be_bitflip(r4)) {
+
+        stage_cur_val = -j;
+        *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) - j);
+
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
+
+      } else
+
+        --stage_max;
+
+      *(u32*)(out_buf + i) = orig;
+
+    }
+
+  }
+
+  new_hit_cnt = queued_paths + unique_crashes;
+
+  stage_finds[STAGE_ARITH32] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_ARITH32] += stage_max;
+
+skip_arith:
+
+  /**********************
+   * INTERESTING VALUES *
+   **********************/
+
+  stage_name = "interest 8/8";
+  stage_short = "int8";
+  stage_cur = 0;
+  stage_max = len * sizeof(interesting_8);
+
+  stage_val_type = STAGE_VAL_LE;
+
+  orig_hit_cnt = new_hit_cnt;
+
+  /* Setting 8-bit integers. */
+
+  for (i = 0; i < len; ++i) {
+
+    u8 orig = out_buf[i];
+
+    /* Let's consult the effector map... */
+
+    if (!eff_map[EFF_APOS(i)]) {
+
+      stage_max -= sizeof(interesting_8);
+      continue;
+
+    }
+
+    stage_cur_byte = i;
+
+    for (j = 0; j < sizeof(interesting_8); ++j) {
+
+      /* Skip if the value could be a product of bitflips or arithmetics. */
+
+      if (could_be_bitflip(orig ^ (u8)interesting_8[j]) ||
+          could_be_arith(orig, (u8)interesting_8[j], 1)) {
+
+        --stage_max;
+        continue;
+
+      }
+
+      stage_cur_val = interesting_8[j];
+      out_buf[i] = interesting_8[j];
+
+      if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+
+      out_buf[i] = orig;
+      ++stage_cur;
+
+    }
+
+  }
+
+  new_hit_cnt = queued_paths + unique_crashes;
+
+  stage_finds[STAGE_INTEREST8] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_INTEREST8] += stage_max;
+
+  /* Setting 16-bit integers, both endians. */
+
+  if (no_arith || len < 2) goto skip_interest;
+
+  stage_name = "interest 16/8";
+  stage_short = "int16";
+  stage_cur = 0;
+  stage_max = 2 * (len - 1) * (sizeof(interesting_16) >> 1);
+
+  orig_hit_cnt = new_hit_cnt;
+
+  for (i = 0; i < len - 1; ++i) {
+
+    u16 orig = *(u16*)(out_buf + i);
+
+    /* Let's consult the effector map... */
+
+    if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
+
+      stage_max -= sizeof(interesting_16);
+      continue;
+
+    }
+
+    stage_cur_byte = i;
+
+    for (j = 0; j < sizeof(interesting_16) / 2; ++j) {
+
+      stage_cur_val = interesting_16[j];
+
+      /* Skip if this could be a product of a bitflip, arithmetics,
+         or single-byte interesting value insertion. */
+
+      if (!could_be_bitflip(orig ^ (u16)interesting_16[j]) &&
+          !could_be_arith(orig, (u16)interesting_16[j], 2) &&
+          !could_be_interest(orig, (u16)interesting_16[j], 2, 0)) {
+
+        stage_val_type = STAGE_VAL_LE;
+
+        *(u16*)(out_buf + i) = interesting_16[j];
+
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
+
+      } else
+
+        --stage_max;
+
+      if ((u16)interesting_16[j] != SWAP16(interesting_16[j]) &&
+          !could_be_bitflip(orig ^ SWAP16(interesting_16[j])) &&
+          !could_be_arith(orig, SWAP16(interesting_16[j]), 2) &&
+          !could_be_interest(orig, SWAP16(interesting_16[j]), 2, 1)) {
+
+        stage_val_type = STAGE_VAL_BE;
+
+        *(u16*)(out_buf + i) = SWAP16(interesting_16[j]);
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
+
+      } else
+
+        --stage_max;
+
+    }
+
+    *(u16*)(out_buf + i) = orig;
+
+  }
+
+  new_hit_cnt = queued_paths + unique_crashes;
+
+  stage_finds[STAGE_INTEREST16] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_INTEREST16] += stage_max;
+
+  if (len < 4) goto skip_interest;
+
+  /* Setting 32-bit integers, both endians. */
+
+  stage_name = "interest 32/8";
+  stage_short = "int32";
+  stage_cur = 0;
+  stage_max = 2 * (len - 3) * (sizeof(interesting_32) >> 2);
+
+  orig_hit_cnt = new_hit_cnt;
+
+  for (i = 0; i < len - 3; ++i) {
+
+    u32 orig = *(u32*)(out_buf + i);
+
+    /* Let's consult the effector map... */
+
+    if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
+        !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
+
+      stage_max -= sizeof(interesting_32) >> 1;
+      continue;
+
+    }
+
+    stage_cur_byte = i;
+
+    for (j = 0; j < sizeof(interesting_32) / 4; ++j) {
+
+      stage_cur_val = interesting_32[j];
+
+      /* Skip if this could be a product of a bitflip, arithmetics,
+         or word interesting value insertion. */
+
+      if (!could_be_bitflip(orig ^ (u32)interesting_32[j]) &&
+          !could_be_arith(orig, interesting_32[j], 4) &&
+          !could_be_interest(orig, interesting_32[j], 4, 0)) {
+
+        stage_val_type = STAGE_VAL_LE;
+
+        *(u32*)(out_buf + i) = interesting_32[j];
+
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
+
+      } else
+
+        --stage_max;
+
+      if ((u32)interesting_32[j] != SWAP32(interesting_32[j]) &&
+          !could_be_bitflip(orig ^ SWAP32(interesting_32[j])) &&
+          !could_be_arith(orig, SWAP32(interesting_32[j]), 4) &&
+          !could_be_interest(orig, SWAP32(interesting_32[j]), 4, 1)) {
+
+        stage_val_type = STAGE_VAL_BE;
+
+        *(u32*)(out_buf + i) = SWAP32(interesting_32[j]);
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
+
+      } else
+
+        --stage_max;
+
+    }
+
+    *(u32*)(out_buf + i) = orig;
+
+  }
+
+  new_hit_cnt = queued_paths + unique_crashes;
+
+  stage_finds[STAGE_INTEREST32] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_INTEREST32] += stage_max;
+
+skip_interest:
+
+  /********************
+   * DICTIONARY STUFF *
+   ********************/
+
+  if (!extras_cnt) goto skip_user_extras;
+
+  /* Overwrite with user-supplied extras. */
+
+  stage_name = "user extras (over)";
+  stage_short = "ext_UO";
+  stage_cur = 0;
+  stage_max = extras_cnt * len;
+
+  stage_val_type = STAGE_VAL_NONE;
+
+  orig_hit_cnt = new_hit_cnt;
+
+  for (i = 0; i < len; ++i) {
+
+    u32 last_len = 0;
+
+    stage_cur_byte = i;
+
+    /* Extras are sorted by size, from smallest to largest. This means
+       that we don't have to worry about restoring the buffer in
+       between writes at a particular offset determined by the outer
+       loop. */
+
+    for (j = 0; j < extras_cnt; ++j) {
+
+      /* Skip extras probabilistically if extras_cnt > MAX_DET_EXTRAS. Also
+         skip them if there's no room to insert the payload, if the token
+         is redundant, or if its entire span has no bytes set in the effector
+         map. */
+
+      if ((extras_cnt > MAX_DET_EXTRAS && UR(extras_cnt) >= MAX_DET_EXTRAS) ||
+          extras[j].len > len - i ||
+          !memcmp(extras[j].data, out_buf + i, extras[j].len) ||
+          !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, extras[j].len))) {
+
+        --stage_max;
+        continue;
+
+      }
+
+      last_len = extras[j].len;
+      memcpy(out_buf + i, extras[j].data, last_len);
+
+      if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+
+      ++stage_cur;
+
+    }
+
+    /* Restore all the clobbered memory. */
+    memcpy(out_buf + i, in_buf + i, last_len);
+
+  }
+
+  new_hit_cnt = queued_paths + unique_crashes;
+
+  stage_finds[STAGE_EXTRAS_UO] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_EXTRAS_UO] += stage_max;
+
+  /* Insertion of user-supplied extras. */
+
+  stage_name = "user extras (insert)";
+  stage_short = "ext_UI";
+  stage_cur = 0;
+  stage_max = extras_cnt * len;
+
+  orig_hit_cnt = new_hit_cnt;
+
+  ex_tmp = ck_alloc(len + MAX_DICT_FILE);
+
+  for (i = 0; i <= len; ++i) {
+
+    stage_cur_byte = i;
+
+    for (j = 0; j < extras_cnt; ++j) {
+
+      if (len + extras[j].len > MAX_FILE) {
+
+        --stage_max;
+        continue;
+
+      }
+
+      /* Insert token */
+      memcpy(ex_tmp + i, extras[j].data, extras[j].len);
+
+      /* Copy tail */
+      memcpy(ex_tmp + i + extras[j].len, out_buf + i, len - i);
+
+      if (common_fuzz_stuff(argv, ex_tmp, len + extras[j].len)) {
+
+        ck_free(ex_tmp);
+        goto abandon_entry;
+
+      }
+
+      ++stage_cur;
+
+    }
+
+    /* Copy head */
+    ex_tmp[i] = out_buf[i];
+
+  }
+
+  ck_free(ex_tmp);
+
+  new_hit_cnt = queued_paths + unique_crashes;
+
+  stage_finds[STAGE_EXTRAS_UI] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_EXTRAS_UI] += stage_max;
+
+skip_user_extras:
+
+  if (!a_extras_cnt) goto skip_extras;
+
+  stage_name = "auto extras (over)";
+  stage_short = "ext_AO";
+  stage_cur = 0;
+  stage_max = MIN(a_extras_cnt, USE_AUTO_EXTRAS) * len;
+
+  stage_val_type = STAGE_VAL_NONE;
+
+  orig_hit_cnt = new_hit_cnt;
+
+  for (i = 0; i < len; ++i) {
+
+    u32 last_len = 0;
+
+    stage_cur_byte = i;
+
+    for (j = 0; j < MIN(a_extras_cnt, USE_AUTO_EXTRAS); ++j) {
+
+      /* See the comment in the earlier code; extras are sorted by size. */
+
+      if (a_extras[j].len > len - i ||
+          !memcmp(a_extras[j].data, out_buf + i, a_extras[j].len) ||
+          !memchr(eff_map + EFF_APOS(i), 1,
+                  EFF_SPAN_ALEN(i, a_extras[j].len))) {
+
+        --stage_max;
+        continue;
+
+      }
+
+      last_len = a_extras[j].len;
+      memcpy(out_buf + i, a_extras[j].data, last_len);
+
+      if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+
+      ++stage_cur;
+
+    }
+
+    /* Restore all the clobbered memory. */
+    memcpy(out_buf + i, in_buf + i, last_len);
+
+  }
+
+  new_hit_cnt = queued_paths + unique_crashes;
+
+  stage_finds[STAGE_EXTRAS_AO] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_EXTRAS_AO] += stage_max;
+
+skip_extras:
+
+  /* If we made this to here without jumping to havoc_stage or abandon_entry,
+     we're properly done with deterministic steps and can mark it as such
+     in the .state/ directory. */
+
+  if (!queue_cur->passed_det) mark_as_det_done(queue_cur);
+
+  /****************
+   * RANDOM HAVOC *
+   ****************/
+
+havoc_stage:
+pacemaker_fuzzing:
+
+  stage_cur_byte = -1;
+
+  /* The havoc stage mutation code is also invoked when splicing files; if the
+     splice_cycle variable is set, generate different descriptions and such. */
+
+  if (!splice_cycle) {
+
+    stage_name = "MOpt-havoc";
+    stage_short = "MOpt_havoc";
+    stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * perf_score /
+                havoc_div / 100;
+
+  } else {
+
+    static u8 tmp[32];
+
+    perf_score = orig_perf;
+
+    sprintf(tmp, "MOpt-splice %u", splice_cycle);
+    stage_name = tmp;
+    stage_short = "MOpt_splice";
+    stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100;
+
+  }
+
+  s32 temp_len_puppet;
+  cur_ms_lv = get_cur_time();
+
+  {
+
+    if (key_puppet == 1) {
+
+      if (unlikely(orig_hit_cnt_puppet == 0)) {
+
+        orig_hit_cnt_puppet = queued_paths + unique_crashes;
+        last_limit_time_start = get_cur_time();
+        SPLICE_CYCLES_puppet =
+            (UR(SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) +
+             SPLICE_CYCLES_puppet_low);
+
+      }
+
+    }
+
+    {
+
+#ifndef IGNORE_FINDS
+    havoc_stage_puppet:
+#endif
+
+      stage_cur_byte = -1;
+
+      /* The havoc stage mutation code is also invoked when splicing files; if
+         the splice_cycle variable is set, generate different descriptions and
+         such. */
+
+      if (!splice_cycle) {
+
+        stage_name = "MOpt avoc";
+        stage_short = "MOpt_havoc";
+        stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) *
+                    perf_score / havoc_div / 100;
+
+      } else {
+
+        static u8 tmp[32];
+        perf_score = orig_perf;
+        sprintf(tmp, "MOpt splice %u", splice_cycle);
+        stage_name = tmp;
+        stage_short = "MOpt_splice";
+        stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100;
+
+      }
+
+      if (stage_max < HAVOC_MIN) stage_max = HAVOC_MIN;
+
+      temp_len = len;
+
+      orig_hit_cnt = queued_paths + unique_crashes;
+
+      havoc_queued = queued_paths;
+
+      for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+
+        u32 use_stacking = 1 << (1 + UR(HAVOC_STACK_POW2));
+
+        stage_cur_val = use_stacking;
+
+        for (i = 0; i < operator_num; ++i) {
+
+          stage_cycles_puppet_v3[swarm_now][i] =
+              stage_cycles_puppet_v2[swarm_now][i];
+
+        }
+
+        for (i = 0; i < use_stacking; ++i) {
+
+          switch (select_algorithm()) {
+
+            case 0:
+              /* Flip a single bit somewhere. Spooky! */
+              FLIP_BIT(out_buf, UR(temp_len << 3));
+              stage_cycles_puppet_v2[swarm_now][STAGE_FLIP1] += 1;
+              break;
+
+            case 1:
+              if (temp_len < 2) break;
+              temp_len_puppet = UR(temp_len << 3);
+              FLIP_BIT(out_buf, temp_len_puppet);
+              FLIP_BIT(out_buf, temp_len_puppet + 1);
+              stage_cycles_puppet_v2[swarm_now][STAGE_FLIP2] += 1;
+              break;
+
+            case 2:
+              if (temp_len < 2) break;
+              temp_len_puppet = UR(temp_len << 3);
+              FLIP_BIT(out_buf, temp_len_puppet);
+              FLIP_BIT(out_buf, temp_len_puppet + 1);
+              FLIP_BIT(out_buf, temp_len_puppet + 2);
+              FLIP_BIT(out_buf, temp_len_puppet + 3);
+              stage_cycles_puppet_v2[swarm_now][STAGE_FLIP4] += 1;
+              break;
+
+            case 3:
+              if (temp_len < 4) break;
+              out_buf[UR(temp_len)] ^= 0xFF;
+              stage_cycles_puppet_v2[swarm_now][STAGE_FLIP8] += 1;
+              break;
+
+            case 4:
+              if (temp_len < 8) break;
+              *(u16*)(out_buf + UR(temp_len - 1)) ^= 0xFFFF;
+              stage_cycles_puppet_v2[swarm_now][STAGE_FLIP16] += 1;
+              break;
+
+            case 5:
+              if (temp_len < 8) break;
+              *(u32*)(out_buf + UR(temp_len - 3)) ^= 0xFFFFFFFF;
+              stage_cycles_puppet_v2[swarm_now][STAGE_FLIP32] += 1;
+              break;
+
+            case 6:
+              out_buf[UR(temp_len)] -= 1 + UR(ARITH_MAX);
+              out_buf[UR(temp_len)] += 1 + UR(ARITH_MAX);
+              stage_cycles_puppet_v2[swarm_now][STAGE_ARITH8] += 1;
+              break;
+
+            case 7:
+              /* Randomly subtract from word, random endian. */
+              if (temp_len < 8) break;
+              if (UR(2)) {
+
+                u32 pos = UR(temp_len - 1);
+                *(u16*)(out_buf + pos) -= 1 + UR(ARITH_MAX);
+
+              } else {
+
+                u32 pos = UR(temp_len - 1);
+                u16 num = 1 + UR(ARITH_MAX);
+                *(u16*)(out_buf + pos) =
+                    SWAP16(SWAP16(*(u16*)(out_buf + pos)) - num);
+
+              }
+
+              /* Randomly add to word, random endian. */
+              if (UR(2)) {
+
+                u32 pos = UR(temp_len - 1);
+                *(u16*)(out_buf + pos) += 1 + UR(ARITH_MAX);
+
+              } else {
+
+                u32 pos = UR(temp_len - 1);
+                u16 num = 1 + UR(ARITH_MAX);
+                *(u16*)(out_buf + pos) =
+                    SWAP16(SWAP16(*(u16*)(out_buf + pos)) + num);
+
+              }
+
+              stage_cycles_puppet_v2[swarm_now][STAGE_ARITH16] += 1;
+              break;
+
+            case 8:
+              /* Randomly subtract from dword, random endian. */
+              if (temp_len < 8) break;
+              if (UR(2)) {
+
+                u32 pos = UR(temp_len - 3);
+                *(u32*)(out_buf + pos) -= 1 + UR(ARITH_MAX);
+
+              } else {
+
+                u32 pos = UR(temp_len - 3);
+                u32 num = 1 + UR(ARITH_MAX);
+                *(u32*)(out_buf + pos) =
+                    SWAP32(SWAP32(*(u32*)(out_buf + pos)) - num);
+
+              }
+
+              /* Randomly add to dword, random endian. */
+              // if (temp_len < 4) break;
+              if (UR(2)) {
+
+                u32 pos = UR(temp_len - 3);
+                *(u32*)(out_buf + pos) += 1 + UR(ARITH_MAX);
+
+              } else {
+
+                u32 pos = UR(temp_len - 3);
+                u32 num = 1 + UR(ARITH_MAX);
+                *(u32*)(out_buf + pos) =
+                    SWAP32(SWAP32(*(u32*)(out_buf + pos)) + num);
+
+              }
+
+              stage_cycles_puppet_v2[swarm_now][STAGE_ARITH32] += 1;
+              break;
+
+            case 9:
+              /* Set byte to interesting value. */
+              if (temp_len < 4) break;
+              out_buf[UR(temp_len)] = interesting_8[UR(sizeof(interesting_8))];
+              stage_cycles_puppet_v2[swarm_now][STAGE_INTEREST8] += 1;
+              break;
+
+            case 10:
+              /* Set word to interesting value, randomly choosing endian. */
+              if (temp_len < 8) break;
+              if (UR(2)) {
+
+                *(u16*)(out_buf + UR(temp_len - 1)) =
+                    interesting_16[UR(sizeof(interesting_16) >> 1)];
+
+              } else {
+
+                *(u16*)(out_buf + UR(temp_len - 1)) =
+                    SWAP16(interesting_16[UR(sizeof(interesting_16) >> 1)]);
+
+              }
+
+              stage_cycles_puppet_v2[swarm_now][STAGE_INTEREST16] += 1;
+              break;
+
+            case 11:
+              /* Set dword to interesting value, randomly choosing endian. */
+
+              if (temp_len < 8) break;
+
+              if (UR(2)) {
+
+                *(u32*)(out_buf + UR(temp_len - 3)) =
+                    interesting_32[UR(sizeof(interesting_32) >> 2)];
+
+              } else {
+
+                *(u32*)(out_buf + UR(temp_len - 3)) =
+                    SWAP32(interesting_32[UR(sizeof(interesting_32) >> 2)]);
+
+              }
+
+              stage_cycles_puppet_v2[swarm_now][STAGE_INTEREST32] += 1;
+              break;
+
+            case 12:
+
+              /* Just set a random byte to a random value. Because,
+                 why not. We use XOR with 1-255 to eliminate the
+                 possibility of a no-op. */
+
+              out_buf[UR(temp_len)] ^= 1 + UR(255);
+              stage_cycles_puppet_v2[swarm_now][STAGE_RANDOMBYTE] += 1;
+              break;
+
+            case 13: {
+
+              /* Delete bytes. We're making this a bit more likely
+                 than insertion (the next option) in hopes of keeping
+                 files reasonably small. */
+
+              u32 del_from, del_len;
+
+              if (temp_len < 2) break;
+
+              /* Don't delete too much. */
+
+              del_len = choose_block_len(temp_len - 1);
+
+              del_from = UR(temp_len - del_len + 1);
+
+              memmove(out_buf + del_from, out_buf + del_from + del_len,
+                      temp_len - del_from - del_len);
+
+              temp_len -= del_len;
+              stage_cycles_puppet_v2[swarm_now][STAGE_DELETEBYTE] += 1;
+              break;
+
+            }
+
+            case 14:
+
+              if (temp_len + HAVOC_BLK_XL < MAX_FILE) {
+
+                /* Clone bytes (75%) or insert a block of constant bytes (25%).
+                 */
+
+                u8  actually_clone = UR(4);
+                u32 clone_from, clone_to, clone_len;
+                u8* new_buf;
+
+                if (actually_clone) {
+
+                  clone_len = choose_block_len(temp_len);
+                  clone_from = UR(temp_len - clone_len + 1);
+
+                } else {
+
+                  clone_len = choose_block_len(HAVOC_BLK_XL);
+                  clone_from = 0;
+
+                }
+
+                clone_to = UR(temp_len);
+
+                new_buf = ck_alloc_nozero(temp_len + clone_len);
+
+                /* Head */
+
+                memcpy(new_buf, out_buf, clone_to);
+
+                /* Inserted part */
+
+                if (actually_clone)
+                  memcpy(new_buf + clone_to, out_buf + clone_from, clone_len);
+                else
+                  memset(new_buf + clone_to,
+                         UR(2) ? UR(256) : out_buf[UR(temp_len)], clone_len);
+
+                /* Tail */
+                memcpy(new_buf + clone_to + clone_len, out_buf + clone_to,
+                       temp_len - clone_to);
+
+                ck_free(out_buf);
+                out_buf = new_buf;
+                temp_len += clone_len;
+                stage_cycles_puppet_v2[swarm_now][STAGE_Clone75] += 1;
+
+              }
+
+              break;
+
+            case 15: {
+
+              /* Overwrite bytes with a randomly selected chunk (75%) or fixed
+                 bytes (25%). */
+
+              u32 copy_from, copy_to, copy_len;
+
+              if (temp_len < 2) break;
+
+              copy_len = choose_block_len(temp_len - 1);
+
+              copy_from = UR(temp_len - copy_len + 1);
+              copy_to = UR(temp_len - copy_len + 1);
+
+              if (UR(4)) {
+
+                if (copy_from != copy_to)
+                  memmove(out_buf + copy_to, out_buf + copy_from, copy_len);
+
+              } else
+
+                memset(out_buf + copy_to,
+                       UR(2) ? UR(256) : out_buf[UR(temp_len)], copy_len);
+              stage_cycles_puppet_v2[swarm_now][STAGE_OverWrite75] += 1;
+              break;
+
+            }
+
+          }
+
+        }
+
+        tmp_pilot_time += 1;
+
+        u64 temp_total_found = queued_paths + unique_crashes;
+
+        if (common_fuzz_stuff(argv, out_buf, temp_len))
+          goto abandon_entry_puppet;
+
+        /* out_buf might have been mangled a bit, so let's restore it to its
+           original size and shape. */
+
+        if (temp_len < len) out_buf = ck_realloc(out_buf, len);
+        temp_len = len;
+        memcpy(out_buf, in_buf, len);
+
+        /* If we're finding new stuff, let's run for a bit longer, limits
+           permitting. */
+
+        if (queued_paths != havoc_queued) {
+
+          if (perf_score <= havoc_max_mult * 100) {
+
+            stage_max *= 2;
+            perf_score *= 2;
+
+          }
+
+          havoc_queued = queued_paths;
+
+        }
+
+        if (unlikely(queued_paths + unique_crashes > temp_total_found)) {
+
+          u64 temp_temp_puppet =
+              queued_paths + unique_crashes - temp_total_found;
+          total_puppet_find = total_puppet_find + temp_temp_puppet;
+          for (i = 0; i < 16; ++i) {
+
+            if (stage_cycles_puppet_v2[swarm_now][i] >
+                stage_cycles_puppet_v3[swarm_now][i])
+              stage_finds_puppet_v2[swarm_now][i] += temp_temp_puppet;
+
+          }
+
+        }
+
+      }
+
+      new_hit_cnt = queued_paths + unique_crashes;
+
+      if (!splice_cycle) {
+
+        stage_finds[STAGE_HAVOC] += new_hit_cnt - orig_hit_cnt;
+        stage_cycles[STAGE_HAVOC] += stage_max;
+
+      } else {
+
+        stage_finds[STAGE_SPLICE] += new_hit_cnt - orig_hit_cnt;
+        stage_cycles[STAGE_SPLICE] += stage_max;
+
+      }
+
+#ifndef IGNORE_FINDS
+
+      /************
+       * SPLICING *
+       ************/
+
+    retry_splicing_puppet:
+
+      if (use_splicing && splice_cycle++ < SPLICE_CYCLES_puppet &&
+          queued_paths > 1 && queue_cur->len > 1) {
+
+        struct queue_entry* target;
+        u32                 tid, split_at;
+        u8*                 new_buf;
+        s32                 f_diff, l_diff;
+
+        /* First of all, if we've modified in_buf for havoc, let's clean that
+           up... */
+
+        if (in_buf != orig_in) {
+
+          ck_free(in_buf);
+          in_buf = orig_in;
+          len = queue_cur->len;
+
+        }
+
+        /* Pick a random queue entry and seek to it. Don't splice with yourself.
+         */
+
+        do {
+
+          tid = UR(queued_paths);
+
+        } while (tid == current_entry);
+
+        splicing_with = tid;
+        target = queue;
+
+        while (tid >= 100) {
+
+          target = target->next_100;
+          tid -= 100;
+
+        }
+
+        while (tid--)
+          target = target->next;
+
+        /* Make sure that the target has a reasonable length. */
+
+        while (target && (target->len < 2 || target == queue_cur)) {
+
+          target = target->next;
+          ++splicing_with;
+
+        }
+
+        if (!target) goto retry_splicing_puppet;
+
+        /* Read the testcase into a new buffer. */
+
+        fd = open(target->fname, O_RDONLY);
+
+        if (fd < 0) PFATAL("Unable to open '%s'", target->fname);
+
+        new_buf = ck_alloc_nozero(target->len);
+
+        ck_read(fd, new_buf, target->len, target->fname);
+
+        close(fd);
+
+        /* Find a suitable splicin g location, somewhere between the first and
+           the last differing byte. Bail out if the difference is just a single
+           byte or so. */
+
+        locate_diffs(in_buf, new_buf, MIN(len, target->len), &f_diff, &l_diff);
+
+        if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) {
+
+          ck_free(new_buf);
+          goto retry_splicing_puppet;
+
+        }
+
+        /* Split somewhere between the first and last differing byte. */
+
+        split_at = f_diff + UR(l_diff - f_diff);
+
+        /* Do the thing. */
+
+        len = target->len;
+        memcpy(new_buf, in_buf, split_at);
+        in_buf = new_buf;
+        ck_free(out_buf);
+        out_buf = ck_alloc_nozero(len);
+        memcpy(out_buf, in_buf, len);
+        goto havoc_stage_puppet;
+
+      }
+
+#endif /* !IGNORE_FINDS */
+
+      ret_val = 0;
+
+    abandon_entry:
+    abandon_entry_puppet:
+
+      if (splice_cycle >= SPLICE_CYCLES_puppet)
+        SPLICE_CYCLES_puppet =
+            (UR(SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) +
+             SPLICE_CYCLES_puppet_low);
+
+      splicing_with = -1;
+
+      /* Update pending_not_fuzzed count if we made it through the calibration
+         cycle and have not seen this entry before. */
+
+      // if (!stop_soon && !queue_cur->cal_failed && !queue_cur->was_fuzzed) {
+
+      //   queue_cur->was_fuzzed = 1;
+      //   --pending_not_fuzzed;
+      //   if (queue_cur->favored) --pending_favored;
+      // }
+
+      munmap(orig_in, queue_cur->len);
+
+      if (in_buf != orig_in) ck_free(in_buf);
+      ck_free(out_buf);
+      ck_free(eff_map);
+
+      if (key_puppet == 1) {
+
+        if (unlikely(queued_paths + unique_crashes >
+                     ((queued_paths + unique_crashes) * limit_time_bound +
+                      orig_hit_cnt_puppet))) {
+
+          key_puppet = 0;
+          cur_ms_lv = get_cur_time();
+          new_hit_cnt = queued_paths + unique_crashes;
+          orig_hit_cnt_puppet = 0;
+          last_limit_time_start = 0;
+
+        }
+
+      }
+
+      if (unlikely(tmp_pilot_time > period_pilot)) {
+
+        total_pacemaker_time += tmp_pilot_time;
+        new_hit_cnt = queued_paths + unique_crashes;
+        swarm_fitness[swarm_now] =
+            (double)(total_puppet_find - temp_puppet_find) /
+            ((double)(tmp_pilot_time) / period_pilot_tmp);
+        tmp_pilot_time = 0;
+        temp_puppet_find = total_puppet_find;
+
+        u64 temp_stage_finds_puppet = 0;
+        for (i = 0; i < operator_num; ++i) {
+
+          double temp_eff = 0.0;
+
+          if (stage_cycles_puppet_v2[swarm_now][i] >
+              stage_cycles_puppet[swarm_now][i])
+            temp_eff = (double)(stage_finds_puppet_v2[swarm_now][i] -
+                                stage_finds_puppet[swarm_now][i]) /
+                       (double)(stage_cycles_puppet_v2[swarm_now][i] -
+                                stage_cycles_puppet[swarm_now][i]);
+
+          if (eff_best[swarm_now][i] < temp_eff) {
+
+            eff_best[swarm_now][i] = temp_eff;
+            L_best[swarm_now][i] = x_now[swarm_now][i];
+
+          }
+
+          stage_finds_puppet[swarm_now][i] =
+              stage_finds_puppet_v2[swarm_now][i];
+          stage_cycles_puppet[swarm_now][i] =
+              stage_cycles_puppet_v2[swarm_now][i];
+          temp_stage_finds_puppet += stage_finds_puppet[swarm_now][i];
+
+        }
+
+        swarm_now = swarm_now + 1;
+        if (swarm_now == swarm_num) {
+
+          key_module = 1;
+          for (i = 0; i < operator_num; ++i) {
+
+            core_operator_cycles_puppet_v2[i] = core_operator_cycles_puppet[i];
+            core_operator_cycles_puppet_v3[i] = core_operator_cycles_puppet[i];
+            core_operator_finds_puppet_v2[i] = core_operator_finds_puppet[i];
+
+          }
+
+          double swarm_eff = 0.0;
+          swarm_now = 0;
+          for (i = 0; i < swarm_num; ++i) {
+
+            if (swarm_fitness[i] > swarm_eff) {
+
+              swarm_eff = swarm_fitness[i];
+              swarm_now = i;
+
+            }
+
+          }
+
+          if (swarm_now < 0 || swarm_now > swarm_num - 1)
+            PFATAL("swarm_now error number  %d", swarm_now);
+
+        }
+
+      }
+
+      return ret_val;
+
+    }
+
+  }
+
+#undef FLIP_BIT
+
+}
+
+u8 core_fuzzing(char** argv) {
+
+  int i;
+
+  if (swarm_num == 1) {
+
+    key_module = 2;
+    return 0;
+
+  }
+
+  s32 len, fd, temp_len, j;
+  u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0;
+  u64 havoc_queued, orig_hit_cnt, new_hit_cnt, cur_ms_lv;
+  u32 splice_cycle = 0, perf_score = 100, orig_perf, prev_cksum, eff_cnt = 1;
+
+  u8 ret_val = 1, doing_det = 0;
+
+  u8  a_collect[MAX_AUTO_EXTRA];
+  u32 a_len = 0;
+
+#ifdef IGNORE_FINDS
+
+  /* In IGNORE_FINDS mode, skip any entries that weren't in the
+     initial data set. */
+
+  if (queue_cur->depth > 1) return 1;
+
+#else
+
+  if (pending_favored) {
+
+    /* If we have any favored, non-fuzzed new arrivals in the queue,
+       possibly skip to them at the expense of already-fuzzed or non-favored
+       cases. */
+
+    if ((queue_cur->was_fuzzed || !queue_cur->favored) &&
+        UR(100) < SKIP_TO_NEW_PROB)
+      return 1;
+
+  } else if (!dumb_mode && !queue_cur->favored && queued_paths > 10) {
+
+    /* Otherwise, still possibly skip non-favored cases, albeit less often.
+       The odds of skipping stuff are higher for already-fuzzed inputs and
+       lower for never-fuzzed entries. */
+
+    if (queue_cycle > 1 && !queue_cur->was_fuzzed) {
+
+      if (UR(100) < SKIP_NFAV_NEW_PROB) return 1;
+
+    } else {
+
+      if (UR(100) < SKIP_NFAV_OLD_PROB) return 1;
+
+    }
+
+  }
+
+#endif /* ^IGNORE_FINDS */
+
+  if (not_on_tty) {
+
+    ACTF("Fuzzing test case #%u (%u total, %llu uniq crashes found)...",
+         current_entry, queued_paths, unique_crashes);
+    fflush(stdout);
+
+  }
+
+  /* Map the test case into memory. */
+
+  fd = open(queue_cur->fname, O_RDONLY);
+
+  if (fd < 0) PFATAL("Unable to open '%s'", queue_cur->fname);
+
+  len = queue_cur->len;
+
+  orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
+
+  if (orig_in == MAP_FAILED) PFATAL("Unable to mmap '%s'", queue_cur->fname);
+
+  close(fd);
+
+  /* We could mmap() out_buf as MAP_PRIVATE, but we end up clobbering every
+     single byte anyway, so it wouldn't give us any performance or memory usage
+     benefits. */
+
+  out_buf = ck_alloc_nozero(len);
+
+  subseq_tmouts = 0;
+
+  cur_depth = queue_cur->depth;
+
+  /*******************************************
+   * CALIBRATION (only if failed earlier on) *
+   *******************************************/
+
+  if (queue_cur->cal_failed) {
+
+    u8 res = FAULT_TMOUT;
+
+    if (queue_cur->cal_failed < CAL_CHANCES) {
+
+      res = calibrate_case(argv, queue_cur, in_buf, queue_cycle - 1, 0);
+
+      if (res == FAULT_ERROR) FATAL("Unable to execute target application");
+
+    }
+
+    if (stop_soon || res != crash_mode) {
+
+      ++cur_skipped_paths;
+      goto abandon_entry;
+
+    }
+
+  }
+
+  /************
+   * TRIMMING *
+   ************/
+
+  if (!dumb_mode && !queue_cur->trim_done) {
+
+    u8 res = trim_case(argv, queue_cur, in_buf);
+
+    if (res == FAULT_ERROR) FATAL("Unable to execute target application");
+
+    if (stop_soon) {
+
+      ++cur_skipped_paths;
+      goto abandon_entry;
+
+    }
+
+    /* Don't retry trimming, even if it failed. */
+
+    queue_cur->trim_done = 1;
+
+    len = queue_cur->len;
+
+  }
+
+  memcpy(out_buf, in_buf, len);
+
+  /*********************
+   * PERFORMANCE SCORE *
+   *********************/
+
+  orig_perf = perf_score = calculate_score(queue_cur);
+
+  /* Skip right away if -d is given, if we have done deterministic fuzzing on
+     this entry ourselves (was_fuzzed), or if it has gone through deterministic
+     testing in earlier, resumed runs (passed_det). */
+
+  if (skip_deterministic || queue_cur->was_fuzzed || queue_cur->passed_det)
+    goto havoc_stage;
+
+  /* Skip deterministic fuzzing if exec path checksum puts this out of scope
+     for this master instance. */
+
+  if (master_max && (queue_cur->exec_cksum % master_max) != master_id - 1)
+    goto havoc_stage;
+
+  cur_ms_lv = get_cur_time();
+  if (!(key_puppet == 0 && ((cur_ms_lv - last_path_time < limit_time_puppet) ||
+                            (last_crash_time != 0 &&
+                             cur_ms_lv - last_crash_time < limit_time_puppet) ||
+                            last_path_time == 0))) {
+
+    key_puppet = 1;
+    goto pacemaker_fuzzing;
+
+  }
+
+  doing_det = 1;
+
+  /*********************************************
+   * SIMPLE BITFLIP (+dictionary construction) *
+   *********************************************/
+
+#define FLIP_BIT(_ar, _b)                   \
+  do {                                      \
+                                            \
+    u8* _arf = (u8*)(_ar);                  \
+    u32 _bf = (_b);                         \
+    _arf[(_bf) >> 3] ^= (128 >> ((_bf)&7)); \
+                                            \
+  } while (0)
+
+  /* Single walking bit. */
+
+  stage_short = "flip1";
+  stage_max = len << 3;
+  stage_name = "bitflip 1/1";
+
+  stage_val_type = STAGE_VAL_NONE;
+
+  orig_hit_cnt = queued_paths + unique_crashes;
+
+  prev_cksum = queue_cur->exec_cksum;
+
+  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+
+    stage_cur_byte = stage_cur >> 3;
+
+    FLIP_BIT(out_buf, stage_cur);
+
+    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+
+    FLIP_BIT(out_buf, stage_cur);
+
+    /* While flipping the least significant bit in every byte, pull of an extra
+       trick to detect possible syntax tokens. In essence, the idea is that if
+       you have a binary blob like this:
+
+       xxxxxxxxIHDRxxxxxxxx
+
+       ...and changing the leading and trailing bytes causes variable or no
+       changes in program flow, but touching any character in the "IHDR" string
+       always produces the same, distinctive path, it's highly likely that
+       "IHDR" is an atomically-checked magic value of special significance to
+       the fuzzed format.
+
+       We do this here, rather than as a separate stage, because it's a nice
+       way to keep the operation approximately "free" (i.e., no extra execs).
+
+       Empirically, performing the check when flipping the least significant bit
+       is advantageous, compared to doing it at the time of more disruptive
+       changes, where the program flow may be affected in more violent ways.
+
+       The caveat is that we won't generate dictionaries in the -d mode or -S
+       mode - but that's probably a fair trade-off.
+
+       This won't work particularly well with paths that exhibit variable
+       behavior, but fails gracefully, so we'll carry out the checks anyway.
+
+      */
+
+    if (!dumb_mode && (stage_cur & 7) == 7) {
+
+      u32 cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
+
+      if (stage_cur == stage_max - 1 && cksum == prev_cksum) {
+
+        /* If at end of file and we are still collecting a string, grab the
+           final character and force output. */
+
+        if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3];
+        ++a_len;
+
+        if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA)
+          maybe_add_auto(a_collect, a_len);
+
+      } else if (cksum != prev_cksum) {
+
+        /* Otherwise, if the checksum has changed, see if we have something
+           worthwhile queued up, and collect that if the answer is yes. */
+
+        if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA)
+          maybe_add_auto(a_collect, a_len);
+
+        a_len = 0;
+        prev_cksum = cksum;
+
+      }
+
+      /* Continue collecting string, but only if the bit flip actually made
+         any difference - we don't want no-op tokens. */
+
+      if (cksum != queue_cur->exec_cksum) {
+
+        if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3];
+        ++a_len;
+
+      }
+
+    }
+
+  }
+
+  new_hit_cnt = queued_paths + unique_crashes;
+
+  stage_finds[STAGE_FLIP1] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_FLIP1] += stage_max;
+
+  /* Two walking bits. */
+
+  stage_name = "bitflip 2/1";
+  stage_short = "flip2";
+  stage_max = (len << 3) - 1;
+
+  orig_hit_cnt = new_hit_cnt;
+
+  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+
+    stage_cur_byte = stage_cur >> 3;
+
+    FLIP_BIT(out_buf, stage_cur);
+    FLIP_BIT(out_buf, stage_cur + 1);
+
+    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+
+    FLIP_BIT(out_buf, stage_cur);
+    FLIP_BIT(out_buf, stage_cur + 1);
+
+  }
+
+  new_hit_cnt = queued_paths + unique_crashes;
+
+  stage_finds[STAGE_FLIP2] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_FLIP2] += stage_max;
+
+  /* Four walking bits. */
+
+  stage_name = "bitflip 4/1";
+  stage_short = "flip4";
+  stage_max = (len << 3) - 3;
+
+  orig_hit_cnt = new_hit_cnt;
+
+  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+
+    stage_cur_byte = stage_cur >> 3;
+
+    FLIP_BIT(out_buf, stage_cur);
+    FLIP_BIT(out_buf, stage_cur + 1);
+    FLIP_BIT(out_buf, stage_cur + 2);
+    FLIP_BIT(out_buf, stage_cur + 3);
+
+    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+
+    FLIP_BIT(out_buf, stage_cur);
+    FLIP_BIT(out_buf, stage_cur + 1);
+    FLIP_BIT(out_buf, stage_cur + 2);
+    FLIP_BIT(out_buf, stage_cur + 3);
+
+  }
+
+  new_hit_cnt = queued_paths + unique_crashes;
+
+  stage_finds[STAGE_FLIP4] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_FLIP4] += stage_max;
+
+  /* Effector map setup. These macros calculate:
+
+     EFF_APOS      - position of a particular file offset in the map.
+     EFF_ALEN      - length of a map with a particular number of bytes.
+     EFF_SPAN_ALEN - map span for a sequence of bytes.
+
+   */
+
+#define EFF_APOS(_p) ((_p) >> EFF_MAP_SCALE2)
+#define EFF_REM(_x) ((_x) & ((1 << EFF_MAP_SCALE2) - 1))
+#define EFF_ALEN(_l) (EFF_APOS(_l) + !!EFF_REM(_l))
+#define EFF_SPAN_ALEN(_p, _l) (EFF_APOS((_p) + (_l)-1) - EFF_APOS(_p) + 1)
+
+  /* Initialize effector map for the next step (see comments below). Always
+         flag first and last byte as doing something. */
+
+  eff_map = ck_alloc(EFF_ALEN(len));
+  eff_map[0] = 1;
+
+  if (EFF_APOS(len - 1) != 0) {
+
+    eff_map[EFF_APOS(len - 1)] = 1;
+    ++eff_cnt;
+
+  }
+
+  /* Walking byte. */
+
+  stage_name = "bitflip 8/8";
+  stage_short = "flip8";
+  stage_max = len;
+
+  orig_hit_cnt = new_hit_cnt;
+
+  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+
+    stage_cur_byte = stage_cur;
+
+    out_buf[stage_cur] ^= 0xFF;
+
+    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+
+    /* We also use this stage to pull off a simple trick: we identify
+       bytes that seem to have no effect on the current execution path
+       even when fully flipped - and we skip them during more expensive
+       deterministic stages, such as arithmetics or known ints. */
+
+    if (!eff_map[EFF_APOS(stage_cur)]) {
+
+      u32 cksum;
+
+      /* If in dumb mode or if the file is very short, just flag everything
+         without wasting time on checksums. */
+
+      if (!dumb_mode && len >= EFF_MIN_LEN)
+        cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
+      else
+        cksum = ~queue_cur->exec_cksum;
+
+      if (cksum != queue_cur->exec_cksum) {
+
+        eff_map[EFF_APOS(stage_cur)] = 1;
+        ++eff_cnt;
+
+      }
+
+    }
+
+    out_buf[stage_cur] ^= 0xFF;
+
+  }
+
+  /* If the effector map is more than EFF_MAX_PERC dense, just flag the
+     whole thing as worth fuzzing, since we wouldn't be saving much time
+     anyway. */
+
+  if (eff_cnt != EFF_ALEN(len) &&
+      eff_cnt * 100 / EFF_ALEN(len) > EFF_MAX_PERC) {
+
+    memset(eff_map, 1, EFF_ALEN(len));
+
+    blocks_eff_select += EFF_ALEN(len);
+
+  } else {
+
+    blocks_eff_select += eff_cnt;
+
+  }
+
+  blocks_eff_total += EFF_ALEN(len);
+
+  new_hit_cnt = queued_paths + unique_crashes;
+
+  stage_finds[STAGE_FLIP8] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_FLIP8] += stage_max;
+
+  /* Two walking bytes. */
+
+  if (len < 2) goto skip_bitflip;
+
+  stage_name = "bitflip 16/8";
+  stage_short = "flip16";
+  stage_cur = 0;
+  stage_max = len - 1;
+
+  orig_hit_cnt = new_hit_cnt;
+
+  for (i = 0; i < len - 1; ++i) {
+
+    /* Let's consult the effector map... */
+
+    if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
+
+      --stage_max;
+      continue;
+
+    }
+
+    stage_cur_byte = i;
+
+    *(u16*)(out_buf + i) ^= 0xFFFF;
+
+    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+    ++stage_cur;
+
+    *(u16*)(out_buf + i) ^= 0xFFFF;
+
+  }
+
+  new_hit_cnt = queued_paths + unique_crashes;
+
+  stage_finds[STAGE_FLIP16] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_FLIP16] += stage_max;
+
+  if (len < 4) goto skip_bitflip;
+
+  /* Four walking bytes. */
+
+  stage_name = "bitflip 32/8";
+  stage_short = "flip32";
+  stage_cur = 0;
+  stage_max = len - 3;
+
+  orig_hit_cnt = new_hit_cnt;
+
+  for (i = 0; i < len - 3; ++i) {
+
+    /* Let's consult the effector map... */
+    if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
+        !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
+
+      --stage_max;
+      continue;
+
+    }
+
+    stage_cur_byte = i;
+
+    *(u32*)(out_buf + i) ^= 0xFFFFFFFF;
+
+    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+    ++stage_cur;
+
+    *(u32*)(out_buf + i) ^= 0xFFFFFFFF;
+
+  }
+
+  new_hit_cnt = queued_paths + unique_crashes;
+
+  stage_finds[STAGE_FLIP32] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_FLIP32] += stage_max;
+
+skip_bitflip:
+
+  if (no_arith) goto skip_arith;
+
+  /**********************
+   * ARITHMETIC INC/DEC *
+   **********************/
+
+  /* 8-bit arithmetics. */
+
+  stage_name = "arith 8/8";
+  stage_short = "arith8";
+  stage_cur = 0;
+  stage_max = 2 * len * ARITH_MAX;
+
+  stage_val_type = STAGE_VAL_LE;
+
+  orig_hit_cnt = new_hit_cnt;
+
+  for (i = 0; i < len; ++i) {
+
+    u8 orig = out_buf[i];
+
+    /* Let's consult the effector map... */
+
+    if (!eff_map[EFF_APOS(i)]) {
+
+      stage_max -= 2 * ARITH_MAX;
+      continue;
+
+    }
+
+    stage_cur_byte = i;
+
+    for (j = 1; j <= ARITH_MAX; ++j) {
+
+      u8 r = orig ^ (orig + j);
+
+      /* Do arithmetic operations only if the result couldn't be a product
+         of a bitflip. */
+
+      if (!could_be_bitflip(r)) {
+
+        stage_cur_val = j;
+        out_buf[i] = orig + j;
+
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
+
+      } else
+
+        --stage_max;
+
+      r = orig ^ (orig - j);
+
+      if (!could_be_bitflip(r)) {
+
+        stage_cur_val = -j;
+        out_buf[i] = orig - j;
+
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
+
+      } else
+
+        --stage_max;
+
+      out_buf[i] = orig;
+
+    }
+
+  }
+
+  new_hit_cnt = queued_paths + unique_crashes;
+
+  stage_finds[STAGE_ARITH8] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_ARITH8] += stage_max;
+
+  /* 16-bit arithmetics, both endians. */
+
+  if (len < 2) goto skip_arith;
+
+  stage_name = "arith 16/8";
+  stage_short = "arith16";
+  stage_cur = 0;
+  stage_max = 4 * (len - 1) * ARITH_MAX;
+
+  orig_hit_cnt = new_hit_cnt;
+
+  for (i = 0; i < len - 1; ++i) {
+
+    u16 orig = *(u16*)(out_buf + i);
+
+    /* Let's consult the effector map... */
+
+    if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
+
+      stage_max -= 4 * ARITH_MAX;
+      continue;
+
+    }
+
+    stage_cur_byte = i;
+
+    for (j = 1; j <= ARITH_MAX; ++j) {
+
+      u16 r1 = orig ^ (orig + j), r2 = orig ^ (orig - j),
+          r3 = orig ^ SWAP16(SWAP16(orig) + j),
+          r4 = orig ^ SWAP16(SWAP16(orig) - j);
+
+      /* Try little endian addition and subtraction first. Do it only
+         if the operation would affect more than one byte (hence the
+         & 0xff overflow checks) and if it couldn't be a product of
+         a bitflip. */
+
+      stage_val_type = STAGE_VAL_LE;
+
+      if ((orig & 0xff) + j > 0xff && !could_be_bitflip(r1)) {
+
+        stage_cur_val = j;
+        *(u16*)(out_buf + i) = orig + j;
+
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
+
+      } else
+
+        --stage_max;
+
+      if ((orig & 0xff) < j && !could_be_bitflip(r2)) {
+
+        stage_cur_val = -j;
+        *(u16*)(out_buf + i) = orig - j;
+
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
+
+      } else
+
+        --stage_max;
+
+      /* Big endian comes next. Same deal. */
+
+      stage_val_type = STAGE_VAL_BE;
+
+      if ((orig >> 8) + j > 0xff && !could_be_bitflip(r3)) {
+
+        stage_cur_val = j;
+        *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) + j);
+
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
+
+      } else
+
+        --stage_max;
+
+      if ((orig >> 8) < j && !could_be_bitflip(r4)) {
+
+        stage_cur_val = -j;
+        *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) - j);
+
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
+
+      } else
+
+        --stage_max;
+
+      *(u16*)(out_buf + i) = orig;
+
+    }
+
+  }
+
+  new_hit_cnt = queued_paths + unique_crashes;
+
+  stage_finds[STAGE_ARITH16] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_ARITH16] += stage_max;
+
+  /* 32-bit arithmetics, both endians. */
+
+  if (len < 4) goto skip_arith;
+
+  stage_name = "arith 32/8";
+  stage_short = "arith32";
+  stage_cur = 0;
+  stage_max = 4 * (len - 3) * ARITH_MAX;
+
+  orig_hit_cnt = new_hit_cnt;
+
+  for (i = 0; i < len - 3; ++i) {
+
+    u32 orig = *(u32*)(out_buf + i);
+
+    /* Let's consult the effector map... */
+
+    if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
+        !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
+
+      stage_max -= 4 * ARITH_MAX;
+      continue;
+
+    }
+
+    stage_cur_byte = i;
+
+    for (j = 1; j <= ARITH_MAX; ++j) {
+
+      u32 r1 = orig ^ (orig + j), r2 = orig ^ (orig - j),
+          r3 = orig ^ SWAP32(SWAP32(orig) + j),
+          r4 = orig ^ SWAP32(SWAP32(orig) - j);
+
+      /* Little endian first. Same deal as with 16-bit: we only want to
+         try if the operation would have effect on more than two bytes. */
+
+      stage_val_type = STAGE_VAL_LE;
+
+      if ((orig & 0xffff) + j > 0xffff && !could_be_bitflip(r1)) {
+
+        stage_cur_val = j;
+        *(u32*)(out_buf + i) = orig + j;
+
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
+
+      } else
+
+        --stage_max;
+
+      if ((orig & 0xffff) < j && !could_be_bitflip(r2)) {
+
+        stage_cur_val = -j;
+        *(u32*)(out_buf + i) = orig - j;
+
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
+
+      } else
+
+        --stage_max;
+
+      /* Big endian next. */
+
+      stage_val_type = STAGE_VAL_BE;
+
+      if ((SWAP32(orig) & 0xffff) + j > 0xffff && !could_be_bitflip(r3)) {
+
+        stage_cur_val = j;
+        *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) + j);
+
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
+
+      } else
+
+        --stage_max;
+
+      if ((SWAP32(orig) & 0xffff) < j && !could_be_bitflip(r4)) {
+
+        stage_cur_val = -j;
+        *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) - j);
+
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
+
+      } else
+
+        --stage_max;
+
+      *(u32*)(out_buf + i) = orig;
+
+    }
+
+  }
+
+  new_hit_cnt = queued_paths + unique_crashes;
+
+  stage_finds[STAGE_ARITH32] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_ARITH32] += stage_max;
+
+skip_arith:
+
+  /**********************
+   * INTERESTING VALUES *
+   **********************/
+
+  stage_name = "interest 8/8";
+  stage_short = "int8";
+  stage_cur = 0;
+  stage_max = len * sizeof(interesting_8);
+
+  stage_val_type = STAGE_VAL_LE;
+
+  orig_hit_cnt = new_hit_cnt;
+
+  /* Setting 8-bit integers. */
+
+  for (i = 0; i < len; ++i) {
+
+    u8 orig = out_buf[i];
+
+    /* Let's consult the effector map... */
+
+    if (!eff_map[EFF_APOS(i)]) {
+
+      stage_max -= sizeof(interesting_8);
+      continue;
+
+    }
+
+    stage_cur_byte = i;
+
+    for (j = 0; j < sizeof(interesting_8); ++j) {
+
+      /* Skip if the value could be a product of bitflips or arithmetics. */
+
+      if (could_be_bitflip(orig ^ (u8)interesting_8[j]) ||
+          could_be_arith(orig, (u8)interesting_8[j], 1)) {
+
+        --stage_max;
+        continue;
+
+      }
+
+      stage_cur_val = interesting_8[j];
+      out_buf[i] = interesting_8[j];
+
+      if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+
+      out_buf[i] = orig;
+      ++stage_cur;
+
+    }
+
+  }
+
+  new_hit_cnt = queued_paths + unique_crashes;
+
+  stage_finds[STAGE_INTEREST8] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_INTEREST8] += stage_max;
+
+  /* Setting 16-bit integers, both endians. */
+
+  if (no_arith || len < 2) goto skip_interest;
+
+  stage_name = "interest 16/8";
+  stage_short = "int16";
+  stage_cur = 0;
+  stage_max = 2 * (len - 1) * (sizeof(interesting_16) >> 1);
+
+  orig_hit_cnt = new_hit_cnt;
+
+  for (i = 0; i < len - 1; ++i) {
+
+    u16 orig = *(u16*)(out_buf + i);
+
+    /* Let's consult the effector map... */
+
+    if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
+
+      stage_max -= sizeof(interesting_16);
+      continue;
+
+    }
+
+    stage_cur_byte = i;
+
+    for (j = 0; j < sizeof(interesting_16) / 2; ++j) {
+
+      stage_cur_val = interesting_16[j];
+
+      /* Skip if this could be a product of a bitflip, arithmetics,
+         or single-byte interesting value insertion. */
+
+      if (!could_be_bitflip(orig ^ (u16)interesting_16[j]) &&
+          !could_be_arith(orig, (u16)interesting_16[j], 2) &&
+          !could_be_interest(orig, (u16)interesting_16[j], 2, 0)) {
+
+        stage_val_type = STAGE_VAL_LE;
+
+        *(u16*)(out_buf + i) = interesting_16[j];
+
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
+
+      } else
+
+        --stage_max;
+
+      if ((u16)interesting_16[j] != SWAP16(interesting_16[j]) &&
+          !could_be_bitflip(orig ^ SWAP16(interesting_16[j])) &&
+          !could_be_arith(orig, SWAP16(interesting_16[j]), 2) &&
+          !could_be_interest(orig, SWAP16(interesting_16[j]), 2, 1)) {
+
+        stage_val_type = STAGE_VAL_BE;
+
+        *(u16*)(out_buf + i) = SWAP16(interesting_16[j]);
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
+
+      } else
+
+        --stage_max;
+
+    }
+
+    *(u16*)(out_buf + i) = orig;
+
+  }
+
+  new_hit_cnt = queued_paths + unique_crashes;
+
+  stage_finds[STAGE_INTEREST16] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_INTEREST16] += stage_max;
+
+  if (len < 4) goto skip_interest;
+
+  /* Setting 32-bit integers, both endians. */
+
+  stage_name = "interest 32/8";
+  stage_short = "int32";
+  stage_cur = 0;
+  stage_max = 2 * (len - 3) * (sizeof(interesting_32) >> 2);
+
+  orig_hit_cnt = new_hit_cnt;
+
+  for (i = 0; i < len - 3; ++i) {
+
+    u32 orig = *(u32*)(out_buf + i);
+
+    /* Let's consult the effector map... */
+
+    if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
+        !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
+
+      stage_max -= sizeof(interesting_32) >> 1;
+      continue;
+
+    }
+
+    stage_cur_byte = i;
+
+    for (j = 0; j < sizeof(interesting_32) / 4; ++j) {
+
+      stage_cur_val = interesting_32[j];
+
+      /* Skip if this could be a product of a bitflip, arithmetics,
+         or word interesting value insertion. */
+
+      if (!could_be_bitflip(orig ^ (u32)interesting_32[j]) &&
+          !could_be_arith(orig, interesting_32[j], 4) &&
+          !could_be_interest(orig, interesting_32[j], 4, 0)) {
+
+        stage_val_type = STAGE_VAL_LE;
+
+        *(u32*)(out_buf + i) = interesting_32[j];
+
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
+
+      } else
+
+        --stage_max;
+
+      if ((u32)interesting_32[j] != SWAP32(interesting_32[j]) &&
+          !could_be_bitflip(orig ^ SWAP32(interesting_32[j])) &&
+          !could_be_arith(orig, SWAP32(interesting_32[j]), 4) &&
+          !could_be_interest(orig, SWAP32(interesting_32[j]), 4, 1)) {
+
+        stage_val_type = STAGE_VAL_BE;
+
+        *(u32*)(out_buf + i) = SWAP32(interesting_32[j]);
+        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+        ++stage_cur;
+
+      } else
+
+        --stage_max;
+
+    }
+
+    *(u32*)(out_buf + i) = orig;
+
+  }
+
+  new_hit_cnt = queued_paths + unique_crashes;
+
+  stage_finds[STAGE_INTEREST32] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_INTEREST32] += stage_max;
+
+skip_interest:
+
+  /********************
+   * DICTIONARY STUFF *
+   ********************/
+
+  if (!extras_cnt) goto skip_user_extras;
+
+  /* Overwrite with user-supplied extras. */
+
+  stage_name = "user extras (over)";
+  stage_short = "ext_UO";
+  stage_cur = 0;
+  stage_max = extras_cnt * len;
+
+  stage_val_type = STAGE_VAL_NONE;
+
+  orig_hit_cnt = new_hit_cnt;
+
+  for (i = 0; i < len; ++i) {
+
+    u32 last_len = 0;
+
+    stage_cur_byte = i;
+
+    /* Extras are sorted by size, from smallest to largest. This means
+       that we don't have to worry about restoring the buffer in
+       between writes at a particular offset determined by the outer
+       loop. */
+
+    for (j = 0; j < extras_cnt; ++j) {
+
+      /* Skip extras probabilistically if extras_cnt > MAX_DET_EXTRAS. Also
+         skip them if there's no room to insert the payload, if the token
+         is redundant, or if its entire span has no bytes set in the effector
+         map. */
+
+      if ((extras_cnt > MAX_DET_EXTRAS && UR(extras_cnt) >= MAX_DET_EXTRAS) ||
+          extras[j].len > len - i ||
+          !memcmp(extras[j].data, out_buf + i, extras[j].len) ||
+          !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, extras[j].len))) {
+
+        --stage_max;
+        continue;
+
+      }
+
+      last_len = extras[j].len;
+      memcpy(out_buf + i, extras[j].data, last_len);
+
+      if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+
+      ++stage_cur;
+
+    }
+
+    /* Restore all the clobbered memory. */
+    memcpy(out_buf + i, in_buf + i, last_len);
+
+  }
+
+  new_hit_cnt = queued_paths + unique_crashes;
+
+  stage_finds[STAGE_EXTRAS_UO] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_EXTRAS_UO] += stage_max;
+
+  /* Insertion of user-supplied extras. */
+
+  stage_name = "user extras (insert)";
+  stage_short = "ext_UI";
+  stage_cur = 0;
+  stage_max = extras_cnt * len;
+
+  orig_hit_cnt = new_hit_cnt;
+
+  ex_tmp = ck_alloc(len + MAX_DICT_FILE);
+
+  for (i = 0; i <= len; ++i) {
+
+    stage_cur_byte = i;
+
+    for (j = 0; j < extras_cnt; ++j) {
+
+      if (len + extras[j].len > MAX_FILE) {
+
+        --stage_max;
+        continue;
+
+      }
+
+      /* Insert token */
+      memcpy(ex_tmp + i, extras[j].data, extras[j].len);
+
+      /* Copy tail */
+      memcpy(ex_tmp + i + extras[j].len, out_buf + i, len - i);
+
+      if (common_fuzz_stuff(argv, ex_tmp, len + extras[j].len)) {
+
+        ck_free(ex_tmp);
+        goto abandon_entry;
+
+      }
+
+      ++stage_cur;
+
+    }
+
+    /* Copy head */
+    ex_tmp[i] = out_buf[i];
+
+  }
+
+  ck_free(ex_tmp);
+
+  new_hit_cnt = queued_paths + unique_crashes;
+
+  stage_finds[STAGE_EXTRAS_UI] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_EXTRAS_UI] += stage_max;
+
+skip_user_extras:
+
+  if (!a_extras_cnt) goto skip_extras;
+
+  stage_name = "auto extras (over)";
+  stage_short = "ext_AO";
+  stage_cur = 0;
+  stage_max = MIN(a_extras_cnt, USE_AUTO_EXTRAS) * len;
+
+  stage_val_type = STAGE_VAL_NONE;
+
+  orig_hit_cnt = new_hit_cnt;
+
+  for (i = 0; i < len; ++i) {
+
+    u32 last_len = 0;
+
+    stage_cur_byte = i;
+
+    for (j = 0; j < MIN(a_extras_cnt, USE_AUTO_EXTRAS); ++j) {
+
+      /* See the comment in the earlier code; extras are sorted by size. */
+
+      if (a_extras[j].len > len - i ||
+          !memcmp(a_extras[j].data, out_buf + i, a_extras[j].len) ||
+          !memchr(eff_map + EFF_APOS(i), 1,
+                  EFF_SPAN_ALEN(i, a_extras[j].len))) {
+
+        --stage_max;
+        continue;
+
+      }
+
+      last_len = a_extras[j].len;
+      memcpy(out_buf + i, a_extras[j].data, last_len);
+
+      if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+
+      ++stage_cur;
+
+    }
+
+    /* Restore all the clobbered memory. */
+    memcpy(out_buf + i, in_buf + i, last_len);
+
+  }
+
+  new_hit_cnt = queued_paths + unique_crashes;
+
+  stage_finds[STAGE_EXTRAS_AO] += new_hit_cnt - orig_hit_cnt;
+  stage_cycles[STAGE_EXTRAS_AO] += stage_max;
+
+skip_extras:
+
+  /* If we made this to here without jumping to havoc_stage or abandon_entry,
+     we're properly done with deterministic steps and can mark it as such
+     in the .state/ directory. */
+
+  if (!queue_cur->passed_det) mark_as_det_done(queue_cur);
+
+  /****************
+   * RANDOM HAVOC *
+   ****************/
+
+havoc_stage:
+pacemaker_fuzzing:
+
+  stage_cur_byte = -1;
+
+  /* The havoc stage mutation code is also invoked when splicing files; if the
+     splice_cycle variable is set, generate different descriptions and such. */
+
+  if (!splice_cycle) {
+
+    stage_name = "MOpt-havoc";
+    stage_short = "MOpt_havoc";
+    stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * perf_score /
+                havoc_div / 100;
+
+  } else {
+
+    static u8 tmp[32];
+
+    perf_score = orig_perf;
+
+    sprintf(tmp, "MOpt-core-splice %u", splice_cycle);
+    stage_name = tmp;
+    stage_short = "MOpt_core_splice";
+    stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100;
+
+  }
+
+  s32 temp_len_puppet;
+  cur_ms_lv = get_cur_time();
+
+  // for (; swarm_now < swarm_num; ++swarm_now)
+  {
+
+    if (key_puppet == 1) {
+
+      if (unlikely(orig_hit_cnt_puppet == 0)) {
+
+        orig_hit_cnt_puppet = queued_paths + unique_crashes;
+        last_limit_time_start = get_cur_time();
+        SPLICE_CYCLES_puppet =
+            (UR(SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) +
+             SPLICE_CYCLES_puppet_low);
+
+      }
+
+    }
+
+    {
+
+#ifndef IGNORE_FINDS
+    havoc_stage_puppet:
+#endif
+
+      stage_cur_byte = -1;
+
+      /* The havoc stage mutation code is also invoked when splicing files; if
+         the splice_cycle variable is set, generate different descriptions and
+         such. */
+
+      if (!splice_cycle) {
+
+        stage_name = "MOpt core avoc";
+        stage_short = "MOpt_core_havoc";
+        stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) *
+                    perf_score / havoc_div / 100;
+
+      } else {
+
+        static u8 tmp[32];
+        perf_score = orig_perf;
+        sprintf(tmp, "MOpt core splice %u", splice_cycle);
+        stage_name = tmp;
+        stage_short = "MOpt_core_splice";
+        stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100;
+
+      }
+
+      if (stage_max < HAVOC_MIN) stage_max = HAVOC_MIN;
+      temp_len = len;
+      orig_hit_cnt = queued_paths + unique_crashes;
+      havoc_queued = queued_paths;
+
+      for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+
+        u32 use_stacking = 1 << (1 + UR(HAVOC_STACK_POW2));
+        stage_cur_val = use_stacking;
+
+        for (i = 0; i < operator_num; ++i) {
+
+          core_operator_cycles_puppet_v3[i] = core_operator_cycles_puppet_v2[i];
+
+        }
+
+        for (i = 0; i < use_stacking; ++i) {
+
+          switch (select_algorithm()) {
+
+            case 0:
+              /* Flip a single bit somewhere. Spooky! */
+              FLIP_BIT(out_buf, UR(temp_len << 3));
+              core_operator_cycles_puppet_v2[STAGE_FLIP1] += 1;
+              break;
+
+            case 1:
+              if (temp_len < 2) break;
+              temp_len_puppet = UR(temp_len << 3);
+              FLIP_BIT(out_buf, temp_len_puppet);
+              FLIP_BIT(out_buf, temp_len_puppet + 1);
+              core_operator_cycles_puppet_v2[STAGE_FLIP2] += 1;
+              break;
+
+            case 2:
+              if (temp_len < 2) break;
+              temp_len_puppet = UR(temp_len << 3);
+              FLIP_BIT(out_buf, temp_len_puppet);
+              FLIP_BIT(out_buf, temp_len_puppet + 1);
+              FLIP_BIT(out_buf, temp_len_puppet + 2);
+              FLIP_BIT(out_buf, temp_len_puppet + 3);
+              core_operator_cycles_puppet_v2[STAGE_FLIP4] += 1;
+              break;
+
+            case 3:
+              if (temp_len < 4) break;
+              out_buf[UR(temp_len)] ^= 0xFF;
+              core_operator_cycles_puppet_v2[STAGE_FLIP8] += 1;
+              break;
+
+            case 4:
+              if (temp_len < 8) break;
+              *(u16*)(out_buf + UR(temp_len - 1)) ^= 0xFFFF;
+              core_operator_cycles_puppet_v2[STAGE_FLIP16] += 1;
+              break;
+
+            case 5:
+              if (temp_len < 8) break;
+              *(u32*)(out_buf + UR(temp_len - 3)) ^= 0xFFFFFFFF;
+              core_operator_cycles_puppet_v2[STAGE_FLIP32] += 1;
+              break;
+
+            case 6:
+              out_buf[UR(temp_len)] -= 1 + UR(ARITH_MAX);
+              out_buf[UR(temp_len)] += 1 + UR(ARITH_MAX);
+              core_operator_cycles_puppet_v2[STAGE_ARITH8] += 1;
+              break;
+
+            case 7:
+              /* Randomly subtract from word, random endian. */
+              if (temp_len < 8) break;
+              if (UR(2)) {
+
+                u32 pos = UR(temp_len - 1);
+                *(u16*)(out_buf + pos) -= 1 + UR(ARITH_MAX);
+
+              } else {
+
+                u32 pos = UR(temp_len - 1);
+                u16 num = 1 + UR(ARITH_MAX);
+                *(u16*)(out_buf + pos) =
+                    SWAP16(SWAP16(*(u16*)(out_buf + pos)) - num);
+
+              }
+
+              /* Randomly add to word, random endian. */
+              if (UR(2)) {
+
+                u32 pos = UR(temp_len - 1);
+                *(u16*)(out_buf + pos) += 1 + UR(ARITH_MAX);
+
+              } else {
+
+                u32 pos = UR(temp_len - 1);
+                u16 num = 1 + UR(ARITH_MAX);
+                *(u16*)(out_buf + pos) =
+                    SWAP16(SWAP16(*(u16*)(out_buf + pos)) + num);
+
+              }
+
+              core_operator_cycles_puppet_v2[STAGE_ARITH16] += 1;
+              break;
+
+            case 8:
+              /* Randomly subtract from dword, random endian. */
+              if (temp_len < 8) break;
+              if (UR(2)) {
+
+                u32 pos = UR(temp_len - 3);
+                *(u32*)(out_buf + pos) -= 1 + UR(ARITH_MAX);
+
+              } else {
+
+                u32 pos = UR(temp_len - 3);
+                u32 num = 1 + UR(ARITH_MAX);
+                *(u32*)(out_buf + pos) =
+                    SWAP32(SWAP32(*(u32*)(out_buf + pos)) - num);
+
+              }
+
+              /* Randomly add to dword, random endian. */
+              if (UR(2)) {
+
+                u32 pos = UR(temp_len - 3);
+                *(u32*)(out_buf + pos) += 1 + UR(ARITH_MAX);
+
+              } else {
+
+                u32 pos = UR(temp_len - 3);
+                u32 num = 1 + UR(ARITH_MAX);
+                *(u32*)(out_buf + pos) =
+                    SWAP32(SWAP32(*(u32*)(out_buf + pos)) + num);
+
+              }
+
+              core_operator_cycles_puppet_v2[STAGE_ARITH32] += 1;
+              break;
+
+            case 9:
+              /* Set byte to interesting value. */
+              if (temp_len < 4) break;
+              out_buf[UR(temp_len)] = interesting_8[UR(sizeof(interesting_8))];
+              core_operator_cycles_puppet_v2[STAGE_INTEREST8] += 1;
+              break;
+
+            case 10:
+              /* Set word to interesting value, randomly choosing endian. */
+              if (temp_len < 8) break;
+              if (UR(2)) {
+
+                *(u16*)(out_buf + UR(temp_len - 1)) =
+                    interesting_16[UR(sizeof(interesting_16) >> 1)];
+
+              } else {
+
+                *(u16*)(out_buf + UR(temp_len - 1)) =
+                    SWAP16(interesting_16[UR(sizeof(interesting_16) >> 1)]);
+
+              }
+
+              core_operator_cycles_puppet_v2[STAGE_INTEREST16] += 1;
+              break;
+
+            case 11:
+              /* Set dword to interesting value, randomly choosing endian. */
+
+              if (temp_len < 8) break;
+
+              if (UR(2)) {
+
+                *(u32*)(out_buf + UR(temp_len - 3)) =
+                    interesting_32[UR(sizeof(interesting_32) >> 2)];
+
+              } else {
+
+                *(u32*)(out_buf + UR(temp_len - 3)) =
+                    SWAP32(interesting_32[UR(sizeof(interesting_32) >> 2)]);
+
+              }
+
+              core_operator_cycles_puppet_v2[STAGE_INTEREST32] += 1;
+              break;
+
+            case 12:
+
+              /* Just set a random byte to a random value. Because,
+                 why not. We use XOR with 1-255 to eliminate the
+                 possibility of a no-op. */
+
+              out_buf[UR(temp_len)] ^= 1 + UR(255);
+              core_operator_cycles_puppet_v2[STAGE_RANDOMBYTE] += 1;
+              break;
+
+            case 13: {
+
+              /* Delete bytes. We're making this a bit more likely
+                 than insertion (the next option) in hopes of keeping
+                 files reasonably small. */
+
+              u32 del_from, del_len;
+
+              if (temp_len < 2) break;
+
+              /* Don't delete too much. */
+
+              del_len = choose_block_len(temp_len - 1);
+
+              del_from = UR(temp_len - del_len + 1);
+
+              memmove(out_buf + del_from, out_buf + del_from + del_len,
+                      temp_len - del_from - del_len);
+
+              temp_len -= del_len;
+              core_operator_cycles_puppet_v2[STAGE_DELETEBYTE] += 1;
+              break;
+
+            }
+
+            case 14:
+
+              if (temp_len + HAVOC_BLK_XL < MAX_FILE) {
+
+                /* Clone bytes (75%) or insert a block of constant bytes (25%).
+                 */
+
+                u8  actually_clone = UR(4);
+                u32 clone_from, clone_to, clone_len;
+                u8* new_buf;
+
+                if (actually_clone) {
+
+                  clone_len = choose_block_len(temp_len);
+                  clone_from = UR(temp_len - clone_len + 1);
+
+                } else {
+
+                  clone_len = choose_block_len(HAVOC_BLK_XL);
+                  clone_from = 0;
+
+                }
+
+                clone_to = UR(temp_len);
+
+                new_buf = ck_alloc_nozero(temp_len + clone_len);
+
+                /* Head */
+
+                memcpy(new_buf, out_buf, clone_to);
+
+                /* Inserted part */
+
+                if (actually_clone)
+                  memcpy(new_buf + clone_to, out_buf + clone_from, clone_len);
+                else
+                  memset(new_buf + clone_to,
+                         UR(2) ? UR(256) : out_buf[UR(temp_len)], clone_len);
+
+                /* Tail */
+                memcpy(new_buf + clone_to + clone_len, out_buf + clone_to,
+                       temp_len - clone_to);
+
+                ck_free(out_buf);
+                out_buf = new_buf;
+                temp_len += clone_len;
+                core_operator_cycles_puppet_v2[STAGE_Clone75] += 1;
+
+              }
+
+              break;
+
+            case 15: {
+
+              /* Overwrite bytes with a randomly selected chunk (75%) or fixed
+                 bytes (25%). */
+
+              u32 copy_from, copy_to, copy_len;
+
+              if (temp_len < 2) break;
+
+              copy_len = choose_block_len(temp_len - 1);
+
+              copy_from = UR(temp_len - copy_len + 1);
+              copy_to = UR(temp_len - copy_len + 1);
+
+              if (UR(4)) {
+
+                if (copy_from != copy_to)
+                  memmove(out_buf + copy_to, out_buf + copy_from, copy_len);
+
+              } else
+
+                memset(out_buf + copy_to,
+                       UR(2) ? UR(256) : out_buf[UR(temp_len)], copy_len);
+              core_operator_cycles_puppet_v2[STAGE_OverWrite75] += 1;
+              break;
+
+            }
+
+          }
+
+        }
+
+        tmp_core_time += 1;
+
+        u64 temp_total_found = queued_paths + unique_crashes;
+
+        if (common_fuzz_stuff(argv, out_buf, temp_len))
+          goto abandon_entry_puppet;
+
+        /* out_buf might have been mangled a bit, so let's restore it to its
+           original size and shape. */
+
+        if (temp_len < len) out_buf = ck_realloc(out_buf, len);
+        temp_len = len;
+        memcpy(out_buf, in_buf, len);
+
+        /* If we're finding new stuff, let's run for a bit longer, limits
+           permitting. */
+
+        if (queued_paths != havoc_queued) {
+
+          if (perf_score <= havoc_max_mult * 100) {
+
+            stage_max *= 2;
+            perf_score *= 2;
+
+          }
+
+          havoc_queued = queued_paths;
+
+        }
+
+        if (unlikely(queued_paths + unique_crashes > temp_total_found)) {
+
+          u64 temp_temp_puppet =
+              queued_paths + unique_crashes - temp_total_found;
+          total_puppet_find = total_puppet_find + temp_temp_puppet;
+          for (i = 0; i < 16; ++i) {
+
+            if (core_operator_cycles_puppet_v2[i] >
+                core_operator_cycles_puppet_v3[i])
+              core_operator_finds_puppet_v2[i] += temp_temp_puppet;
+
+          }
+
+        }
+
+      }
+
+      new_hit_cnt = queued_paths + unique_crashes;
+
+#ifndef IGNORE_FINDS
+
+      /************
+       * SPLICING *
+       ************/
+
+    retry_splicing_puppet:
+
+      if (use_splicing && splice_cycle++ < SPLICE_CYCLES_puppet &&
+          queued_paths > 1 && queue_cur->len > 1) {
+
+        struct queue_entry* target;
+        u32                 tid, split_at;
+        u8*                 new_buf;
+        s32                 f_diff, l_diff;
+
+        /* First of all, if we've modified in_buf for havoc, let's clean that
+           up... */
+
+        if (in_buf != orig_in) {
+
+          ck_free(in_buf);
+          in_buf = orig_in;
+          len = queue_cur->len;
+
+        }
+
+        /* Pick a random queue entry and seek to it. Don't splice with yourself.
+         */
+
+        do {
+
+          tid = UR(queued_paths);
+
+        } while (tid == current_entry);
+
+        splicing_with = tid;
+        target = queue;
+
+        while (tid >= 100) {
+
+          target = target->next_100;
+          tid -= 100;
+
+        }
+
+        while (tid--)
+          target = target->next;
+
+        /* Make sure that the target has a reasonable length. */
+
+        while (target && (target->len < 2 || target == queue_cur)) {
+
+          target = target->next;
+          ++splicing_with;
+
+        }
+
+        if (!target) goto retry_splicing_puppet;
+
+        /* Read the testcase into a new buffer. */
+
+        fd = open(target->fname, O_RDONLY);
+
+        if (fd < 0) PFATAL("Unable to open '%s'", target->fname);
+
+        new_buf = ck_alloc_nozero(target->len);
+
+        ck_read(fd, new_buf, target->len, target->fname);
+
+        close(fd);
+
+        /* Find a suitable splicin g location, somewhere between the first and
+           the last differing byte. Bail out if the difference is just a single
+           byte or so. */
+
+        locate_diffs(in_buf, new_buf, MIN(len, target->len), &f_diff, &l_diff);
+
+        if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) {
+
+          ck_free(new_buf);
+          goto retry_splicing_puppet;
+
+        }
+
+        /* Split somewhere between the first and last differing byte. */
+
+        split_at = f_diff + UR(l_diff - f_diff);
+
+        /* Do the thing. */
+
+        len = target->len;
+        memcpy(new_buf, in_buf, split_at);
+        in_buf = new_buf;
+        ck_free(out_buf);
+        out_buf = ck_alloc_nozero(len);
+        memcpy(out_buf, in_buf, len);
+
+        goto havoc_stage_puppet;
+
+      }
+
+#endif /* !IGNORE_FINDS */
+
+      ret_val = 0;
+    abandon_entry:
+    abandon_entry_puppet:
+
+      if (splice_cycle >= SPLICE_CYCLES_puppet)
+        SPLICE_CYCLES_puppet =
+            (UR(SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) +
+             SPLICE_CYCLES_puppet_low);
+
+      splicing_with = -1;
+
+      munmap(orig_in, queue_cur->len);
+
+      if (in_buf != orig_in) ck_free(in_buf);
+      ck_free(out_buf);
+      ck_free(eff_map);
+
+      if (key_puppet == 1) {
+
+        if (unlikely(queued_paths + unique_crashes >
+                     ((queued_paths + unique_crashes) * limit_time_bound +
+                      orig_hit_cnt_puppet))) {
+
+          key_puppet = 0;
+          cur_ms_lv = get_cur_time();
+          new_hit_cnt = queued_paths + unique_crashes;
+          orig_hit_cnt_puppet = 0;
+          last_limit_time_start = 0;
+
+        }
+
+      }
+
+      if (unlikely(tmp_core_time > period_core)) {
+
+        total_pacemaker_time += tmp_core_time;
+        tmp_core_time = 0;
+        temp_puppet_find = total_puppet_find;
+        new_hit_cnt = queued_paths + unique_crashes;
+
+        u64 temp_stage_finds_puppet = 0;
+        for (i = 0; i < operator_num; ++i) {
+
+          core_operator_finds_puppet[i] = core_operator_finds_puppet_v2[i];
+          core_operator_cycles_puppet[i] = core_operator_cycles_puppet_v2[i];
+          temp_stage_finds_puppet += core_operator_finds_puppet[i];
+
+        }
+
+        key_module = 2;
+
+        old_hit_count = new_hit_cnt;
+
+      }
+
+      return ret_val;
+
+    }
+
+  }
+
+#undef FLIP_BIT
+
+}
+
+void pso_updating(void) {
+
+  g_now += 1;
+  if (g_now > g_max) g_now = 0;
+  w_now = (w_init - w_end) * (g_max - g_now) / (g_max) + w_end;
+  int tmp_swarm, i, j;
+  u64 temp_operator_finds_puppet = 0;
+  for (i = 0; i < operator_num; ++i) {
+
+    operator_finds_puppet[i] = core_operator_finds_puppet[i];
+
+    for (j = 0; j < swarm_num; ++j) {
+
+      operator_finds_puppet[i] =
+          operator_finds_puppet[i] + stage_finds_puppet[j][i];
+
+    }
+
+    temp_operator_finds_puppet =
+        temp_operator_finds_puppet + operator_finds_puppet[i];
+
+  }
+
+  for (i = 0; i < operator_num; ++i) {
+
+    if (operator_finds_puppet[i])
+      G_best[i] = (double)((double)(operator_finds_puppet[i]) /
+                           (double)(temp_operator_finds_puppet));
+
+  }
+
+  for (tmp_swarm = 0; tmp_swarm < swarm_num; ++tmp_swarm) {
+
+    double x_temp = 0.0;
+    for (i = 0; i < operator_num; ++i) {
+
+      probability_now[tmp_swarm][i] = 0.0;
+      v_now[tmp_swarm][i] =
+          w_now * v_now[tmp_swarm][i] +
+          RAND_C * (L_best[tmp_swarm][i] - x_now[tmp_swarm][i]) +
+          RAND_C * (G_best[i] - x_now[tmp_swarm][i]);
+      x_now[tmp_swarm][i] += v_now[tmp_swarm][i];
+      if (x_now[tmp_swarm][i] > v_max)
+        x_now[tmp_swarm][i] = v_max;
+      else if (x_now[tmp_swarm][i] < v_min)
+        x_now[tmp_swarm][i] = v_min;
+      x_temp += x_now[tmp_swarm][i];
+
+    }
+
+    for (i = 0; i < operator_num; ++i) {
+
+      x_now[tmp_swarm][i] = x_now[tmp_swarm][i] / x_temp;
+      if (likely(i != 0))
+        probability_now[tmp_swarm][i] =
+            probability_now[tmp_swarm][i - 1] + x_now[tmp_swarm][i];
+      else
+        probability_now[tmp_swarm][i] = x_now[tmp_swarm][i];
+
+    }
+
+    if (probability_now[tmp_swarm][operator_num - 1] < 0.99 ||
+        probability_now[tmp_swarm][operator_num - 1] > 1.01)
+      FATAL("ERROR probability");
+
+  }
+
+  swarm_now = 0;
+  key_module = 0;
+
+}
+
+/* larger change for MOpt implementation: the original fuzz_one was renamed
+   to fuzz_one_original. All documentation references to fuzz_one therefore
+   mean fuzz_one_original */
+
+u8 fuzz_one(char** argv) {
+
+  int key_val_lv = 0;
+  if (limit_time_sig == 0) {
+
+    key_val_lv = fuzz_one_original(argv);
+
+  } else {
+
+    if (key_module == 0)
+      key_val_lv = pilot_fuzzing(argv);
+    else if (key_module == 1)
+      key_val_lv = core_fuzzing(argv);
+    else if (key_module == 2)
+      pso_updating();
+
+  }
+
+  return key_val_lv;
+
+}
+
diff --git a/src/afl-fuzz-python.c b/src/afl-fuzz-python.c
new file mode 100644
index 00000000..e22291b5
--- /dev/null
+++ b/src/afl-fuzz-python.c
@@ -0,0 +1,402 @@
+/*
+   american fuzzy lop - fuzzer code
+   --------------------------------
+
+   Written and maintained by Michal Zalewski <lcamtuf@google.com>
+
+   Forkserver design by Jann Horn <jannhorn@googlemail.com>
+
+   Copyright 2013, 2014, 2015, 2016, 2017 Google Inc. All rights reserved.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at:
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+   This is the real deal: the program takes an instrumented binary and
+   attempts a variety of basic fuzzing tricks, paying close attention to
+   how they affect the execution path.
+
+ */
+
+#include "afl-fuzz.h"
+
+/* Python stuff */
+#ifdef USE_PYTHON
+
+int init_py() {
+
+  Py_Initialize();
+  u8* module_name = getenv("AFL_PYTHON_MODULE");
+
+  if (module_name) {
+
+    PyObject* py_name = PyString_FromString(module_name);
+
+    py_module = PyImport_Import(py_name);
+    Py_DECREF(py_name);
+
+    if (py_module != NULL) {
+
+      u8 py_notrim = 0;
+      py_functions[PY_FUNC_INIT] = PyObject_GetAttrString(py_module, "init");
+      py_functions[PY_FUNC_FUZZ] = PyObject_GetAttrString(py_module, "fuzz");
+      py_functions[PY_FUNC_INIT_TRIM] =
+          PyObject_GetAttrString(py_module, "init_trim");
+      py_functions[PY_FUNC_POST_TRIM] =
+          PyObject_GetAttrString(py_module, "post_trim");
+      py_functions[PY_FUNC_TRIM] = PyObject_GetAttrString(py_module, "trim");
+
+      for (u8 py_idx = 0; py_idx < PY_FUNC_COUNT; ++py_idx) {
+
+        if (!py_functions[py_idx] || !PyCallable_Check(py_functions[py_idx])) {
+
+          if (py_idx >= PY_FUNC_INIT_TRIM && py_idx <= PY_FUNC_TRIM) {
+
+            // Implementing the trim API is optional for now
+            if (PyErr_Occurred()) PyErr_Print();
+            py_notrim = 1;
+
+          } else {
+
+            if (PyErr_Occurred()) PyErr_Print();
+            fprintf(stderr,
+                    "Cannot find/call function with index %d in external "
+                    "Python module.\n",
+                    py_idx);
+            return 1;
+
+          }
+
+        }
+
+      }
+
+      if (py_notrim) {
+
+        py_functions[PY_FUNC_INIT_TRIM] = NULL;
+        py_functions[PY_FUNC_POST_TRIM] = NULL;
+        py_functions[PY_FUNC_TRIM] = NULL;
+        WARNF(
+            "Python module does not implement trim API, standard trimming will "
+            "be used.");
+
+      }
+
+      PyObject *py_args, *py_value;
+
+      /* Provide the init function a seed for the Python RNG */
+      py_args = PyTuple_New(1);
+      py_value = PyInt_FromLong(UR(0xFFFFFFFF));
+      if (!py_value) {
+
+        Py_DECREF(py_args);
+        fprintf(stderr, "Cannot convert argument\n");
+        return 1;
+
+      }
+
+      PyTuple_SetItem(py_args, 0, py_value);
+
+      py_value = PyObject_CallObject(py_functions[PY_FUNC_INIT], py_args);
+
+      Py_DECREF(py_args);
+
+      if (py_value == NULL) {
+
+        PyErr_Print();
+        fprintf(stderr, "Call failed\n");
+        return 1;
+
+      }
+
+    } else {
+
+      PyErr_Print();
+      fprintf(stderr, "Failed to load \"%s\"\n", module_name);
+      return 1;
+
+    }
+
+  }
+
+  return 0;
+
+}
+
+void finalize_py() {
+
+  if (py_module != NULL) {
+
+    u32 i;
+    for (i = 0; i < PY_FUNC_COUNT; ++i)
+      Py_XDECREF(py_functions[i]);
+
+    Py_DECREF(py_module);
+
+  }
+
+  Py_Finalize();
+
+}
+
+void fuzz_py(char* buf, size_t buflen, char* add_buf, size_t add_buflen,
+             char** ret, size_t* retlen) {
+
+  if (py_module != NULL) {
+
+    PyObject *py_args, *py_value;
+    py_args = PyTuple_New(2);
+    py_value = PyByteArray_FromStringAndSize(buf, buflen);
+    if (!py_value) {
+
+      Py_DECREF(py_args);
+      fprintf(stderr, "Cannot convert argument\n");
+      return;
+
+    }
+
+    PyTuple_SetItem(py_args, 0, py_value);
+
+    py_value = PyByteArray_FromStringAndSize(add_buf, add_buflen);
+    if (!py_value) {
+
+      Py_DECREF(py_args);
+      fprintf(stderr, "Cannot convert argument\n");
+      return;
+
+    }
+
+    PyTuple_SetItem(py_args, 1, py_value);
+
+    py_value = PyObject_CallObject(py_functions[PY_FUNC_FUZZ], py_args);
+
+    Py_DECREF(py_args);
+
+    if (py_value != NULL) {
+
+      *retlen = PyByteArray_Size(py_value);
+      *ret = malloc(*retlen);
+      memcpy(*ret, PyByteArray_AsString(py_value), *retlen);
+      Py_DECREF(py_value);
+
+    } else {
+
+      PyErr_Print();
+      fprintf(stderr, "Call failed\n");
+      return;
+
+    }
+
+  }
+
+}
+
+u32 init_trim_py(char* buf, size_t buflen) {
+
+  PyObject *py_args, *py_value;
+
+  py_args = PyTuple_New(1);
+  py_value = PyByteArray_FromStringAndSize(buf, buflen);
+  if (!py_value) {
+
+    Py_DECREF(py_args);
+    FATAL("Failed to convert arguments");
+
+  }
+
+  PyTuple_SetItem(py_args, 0, py_value);
+
+  py_value = PyObject_CallObject(py_functions[PY_FUNC_INIT_TRIM], py_args);
+  Py_DECREF(py_args);
+
+  if (py_value != NULL) {
+
+    u32 retcnt = PyInt_AsLong(py_value);
+    Py_DECREF(py_value);
+    return retcnt;
+
+  } else {
+
+    PyErr_Print();
+    FATAL("Call failed");
+
+  }
+
+}
+
+u32 post_trim_py(char success) {
+
+  PyObject *py_args, *py_value;
+
+  py_args = PyTuple_New(1);
+
+  py_value = PyBool_FromLong(success);
+  if (!py_value) {
+
+    Py_DECREF(py_args);
+    FATAL("Failed to convert arguments");
+
+  }
+
+  PyTuple_SetItem(py_args, 0, py_value);
+
+  py_value = PyObject_CallObject(py_functions[PY_FUNC_POST_TRIM], py_args);
+  Py_DECREF(py_args);
+
+  if (py_value != NULL) {
+
+    u32 retcnt = PyInt_AsLong(py_value);
+    Py_DECREF(py_value);
+    return retcnt;
+
+  } else {
+
+    PyErr_Print();
+    FATAL("Call failed");
+
+  }
+
+}
+
+void trim_py(char** ret, size_t* retlen) {
+
+  PyObject *py_args, *py_value;
+
+  py_args = PyTuple_New(0);
+  py_value = PyObject_CallObject(py_functions[PY_FUNC_TRIM], py_args);
+  Py_DECREF(py_args);
+
+  if (py_value != NULL) {
+
+    *retlen = PyByteArray_Size(py_value);
+    *ret = malloc(*retlen);
+    memcpy(*ret, PyByteArray_AsString(py_value), *retlen);
+    Py_DECREF(py_value);
+
+  } else {
+
+    PyErr_Print();
+    FATAL("Call failed");
+
+  }
+
+}
+
+u8 trim_case_python(char** argv, struct queue_entry* q, u8* in_buf) {
+
+  static u8 tmp[64];
+  static u8 clean_trace[MAP_SIZE];
+
+  u8  needs_write = 0, fault = 0;
+  u32 trim_exec = 0;
+  u32 orig_len = q->len;
+
+  stage_name = tmp;
+  bytes_trim_in += q->len;
+
+  /* Initialize trimming in the Python module */
+  stage_cur = 0;
+  stage_max = init_trim_py(in_buf, q->len);
+
+  if (not_on_tty && debug)
+    SAYF("[Python Trimming] START: Max %d iterations, %u bytes", stage_max,
+         q->len);
+
+  while (stage_cur < stage_max) {
+
+    sprintf(tmp, "ptrim %s", DI(trim_exec));
+
+    u32 cksum;
+
+    char*  retbuf = NULL;
+    size_t retlen = 0;
+
+    trim_py(&retbuf, &retlen);
+
+    if (retlen > orig_len)
+      FATAL(
+          "Trimmed data returned by Python module is larger than original "
+          "data");
+
+    write_to_testcase(retbuf, retlen);
+
+    fault = run_target(argv, exec_tmout);
+    ++trim_execs;
+
+    if (stop_soon || fault == FAULT_ERROR) goto abort_trimming;
+
+    cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
+
+    if (cksum == q->exec_cksum) {
+
+      q->len = retlen;
+      memcpy(in_buf, retbuf, retlen);
+
+      /* Let's save a clean trace, which will be needed by
+         update_bitmap_score once we're done with the trimming stuff. */
+
+      if (!needs_write) {
+
+        needs_write = 1;
+        memcpy(clean_trace, trace_bits, MAP_SIZE);
+
+      }
+
+      /* Tell the Python module that the trimming was successful */
+      stage_cur = post_trim_py(1);
+
+      if (not_on_tty && debug)
+        SAYF("[Python Trimming] SUCCESS: %d/%d iterations (now at %u bytes)",
+             stage_cur, stage_max, q->len);
+
+    } else {
+
+      /* Tell the Python module that the trimming was unsuccessful */
+      stage_cur = post_trim_py(0);
+      if (not_on_tty && debug)
+        SAYF("[Python Trimming] FAILURE: %d/%d iterations", stage_cur,
+             stage_max);
+
+    }
+
+    /* Since this can be slow, update the screen every now and then. */
+
+    if (!(trim_exec++ % stats_update_freq)) show_stats();
+
+  }
+
+  if (not_on_tty && debug)
+    SAYF("[Python Trimming] DONE: %u bytes -> %u bytes", orig_len, q->len);
+
+  /* If we have made changes to in_buf, we also need to update the on-disk
+     version of the test case. */
+
+  if (needs_write) {
+
+    s32 fd;
+
+    unlink(q->fname);                                      /* ignore errors */
+
+    fd = open(q->fname, O_WRONLY | O_CREAT | O_EXCL, 0600);
+
+    if (fd < 0) PFATAL("Unable to create '%s'", q->fname);
+
+    ck_write(fd, in_buf, q->len, q->fname);
+    close(fd);
+
+    memcpy(trace_bits, clean_trace, MAP_SIZE);
+    update_bitmap_score(q);
+
+  }
+
+abort_trimming:
+
+  bytes_trim_out += q->len;
+  return fault;
+
+}
+
+#endif /* USE_PYTHON */
+
diff --git a/src/afl-fuzz-queue.c b/src/afl-fuzz-queue.c
new file mode 100644
index 00000000..22a9ccb0
--- /dev/null
+++ b/src/afl-fuzz-queue.c
@@ -0,0 +1,454 @@
+/*
+   american fuzzy lop - fuzzer code
+   --------------------------------
+
+   Written and maintained by Michal Zalewski <lcamtuf@google.com>
+
+   Forkserver design by Jann Horn <jannhorn@googlemail.com>
+
+   Copyright 2013, 2014, 2015, 2016, 2017 Google Inc. All rights reserved.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at:
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+   This is the real deal: the program takes an instrumented binary and
+   attempts a variety of basic fuzzing tricks, paying close attention to
+   how they affect the execution path.
+
+ */
+
+#include "afl-fuzz.h"
+
+/* Mark deterministic checks as done for a particular queue entry. We use the
+   .state file to avoid repeating deterministic fuzzing when resuming aborted
+   scans. */
+
+void mark_as_det_done(struct queue_entry* q) {
+
+  u8* fn = strrchr(q->fname, '/');
+  s32 fd;
+
+  fn = alloc_printf("%s/queue/.state/deterministic_done/%s", out_dir, fn + 1);
+
+  fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600);
+  if (fd < 0) PFATAL("Unable to create '%s'", fn);
+  close(fd);
+
+  ck_free(fn);
+
+  q->passed_det = 1;
+
+}
+
+/* Mark as variable. Create symlinks if possible to make it easier to examine
+   the files. */
+
+void mark_as_variable(struct queue_entry* q) {
+
+  u8 *fn = strrchr(q->fname, '/') + 1, *ldest;
+
+  ldest = alloc_printf("../../%s", fn);
+  fn = alloc_printf("%s/queue/.state/variable_behavior/%s", out_dir, fn);
+
+  if (symlink(ldest, fn)) {
+
+    s32 fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600);
+    if (fd < 0) PFATAL("Unable to create '%s'", fn);
+    close(fd);
+
+  }
+
+  ck_free(ldest);
+  ck_free(fn);
+
+  q->var_behavior = 1;
+
+}
+
+/* Mark / unmark as redundant (edge-only). This is not used for restoring state,
+   but may be useful for post-processing datasets. */
+
+void mark_as_redundant(struct queue_entry* q, u8 state) {
+
+  u8* fn;
+
+  if (state == q->fs_redundant) return;
+
+  q->fs_redundant = state;
+
+  fn = strrchr(q->fname, '/');
+  fn = alloc_printf("%s/queue/.state/redundant_edges/%s", out_dir, fn + 1);
+
+  if (state) {
+
+    s32 fd;
+
+    fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600);
+    if (fd < 0) PFATAL("Unable to create '%s'", fn);
+    close(fd);
+
+  } else {
+
+    if (unlink(fn)) PFATAL("Unable to remove '%s'", fn);
+
+  }
+
+  ck_free(fn);
+
+}
+
+/* Append new test case to the queue. */
+
+void add_to_queue(u8* fname, u32 len, u8 passed_det) {
+
+  struct queue_entry* q = ck_alloc(sizeof(struct queue_entry));
+
+  q->fname = fname;
+  q->len = len;
+  q->depth = cur_depth + 1;
+  q->passed_det = passed_det;
+  q->n_fuzz = 1;
+
+  if (q->depth > max_depth) max_depth = q->depth;
+
+  if (queue_top) {
+
+    queue_top->next = q;
+    queue_top = q;
+
+  } else
+
+    q_prev100 = queue = queue_top = q;
+
+  ++queued_paths;
+  ++pending_not_fuzzed;
+
+  cycles_wo_finds = 0;
+
+  if (!(queued_paths % 100)) {
+
+    q_prev100->next_100 = q;
+    q_prev100 = q;
+
+  }
+
+  last_path_time = get_cur_time();
+
+}
+
+/* Destroy the entire queue. */
+
+void destroy_queue(void) {
+
+  struct queue_entry *q = queue, *n;
+
+  while (q) {
+
+    n = q->next;
+    ck_free(q->fname);
+    ck_free(q->trace_mini);
+    ck_free(q);
+    q = n;
+
+  }
+
+}
+
+/* When we bump into a new path, we call this to see if the path appears
+   more "favorable" than any of the existing ones. The purpose of the
+   "favorables" is to have a minimal set of paths that trigger all the bits
+   seen in the bitmap so far, and focus on fuzzing them at the expense of
+   the rest.
+
+   The first step of the process is to maintain a list of top_rated[] entries
+   for every byte in the bitmap. We win that slot if there is no previous
+   contender, or if the contender has a more favorable speed x size factor. */
+
+void update_bitmap_score(struct queue_entry* q) {
+
+  u32 i;
+  u64 fav_factor = q->exec_us * q->len;
+  u64 fuzz_p2 = next_p2(q->n_fuzz);
+
+  /* For every byte set in trace_bits[], see if there is a previous winner,
+     and how it compares to us. */
+
+  for (i = 0; i < MAP_SIZE; ++i)
+
+    if (trace_bits[i]) {
+
+      if (top_rated[i]) {
+
+        /* Faster-executing or smaller test cases are favored. */
+        u64 top_rated_fuzz_p2 = next_p2(top_rated[i]->n_fuzz);
+        u64 top_rated_fav_factor = top_rated[i]->exec_us * top_rated[i]->len;
+
+        if (fuzz_p2 > top_rated_fuzz_p2) {
+
+          continue;
+
+        } else if (fuzz_p2 == top_rated_fuzz_p2) {
+
+          if (fav_factor > top_rated_fav_factor) continue;
+
+        }
+
+        if (fav_factor > top_rated[i]->exec_us * top_rated[i]->len) continue;
+
+        /* Looks like we're going to win. Decrease ref count for the
+           previous winner, discard its trace_bits[] if necessary. */
+
+        if (!--top_rated[i]->tc_ref) {
+
+          ck_free(top_rated[i]->trace_mini);
+          top_rated[i]->trace_mini = 0;
+
+        }
+
+      }
+
+      /* Insert ourselves as the new winner. */
+
+      top_rated[i] = q;
+      ++q->tc_ref;
+
+      if (!q->trace_mini) {
+
+        q->trace_mini = ck_alloc(MAP_SIZE >> 3);
+        minimize_bits(q->trace_mini, trace_bits);
+
+      }
+
+      score_changed = 1;
+
+    }
+
+}
+
+/* The second part of the mechanism discussed above is a routine that
+   goes over top_rated[] entries, and then sequentially grabs winners for
+   previously-unseen bytes (temp_v) and marks them as favored, at least
+   until the next run. The favored entries are given more air time during
+   all fuzzing steps. */
+
+void cull_queue(void) {
+
+  struct queue_entry* q;
+  static u8           temp_v[MAP_SIZE >> 3];
+  u32                 i;
+
+  if (dumb_mode || !score_changed) return;
+
+  score_changed = 0;
+
+  memset(temp_v, 255, MAP_SIZE >> 3);
+
+  queued_favored = 0;
+  pending_favored = 0;
+
+  q = queue;
+
+  while (q) {
+
+    q->favored = 0;
+    q = q->next;
+
+  }
+
+  /* Let's see if anything in the bitmap isn't captured in temp_v.
+     If yes, and if it has a top_rated[] contender, let's use it. */
+
+  for (i = 0; i < MAP_SIZE; ++i)
+    if (top_rated[i] && (temp_v[i >> 3] & (1 << (i & 7)))) {
+
+      u32 j = MAP_SIZE >> 3;
+
+      /* Remove all bits belonging to the current entry from temp_v. */
+
+      while (j--)
+        if (top_rated[i]->trace_mini[j])
+          temp_v[j] &= ~top_rated[i]->trace_mini[j];
+
+      top_rated[i]->favored = 1;
+      ++queued_favored;
+
+      if (top_rated[i]->fuzz_level == 0 || !top_rated[i]->was_fuzzed)
+        ++pending_favored;
+
+    }
+
+  q = queue;
+
+  while (q) {
+
+    mark_as_redundant(q, !q->favored);
+    q = q->next;
+
+  }
+
+}
+
+/* Calculate case desirability score to adjust the length of havoc fuzzing.
+   A helper function for fuzz_one(). Maybe some of these constants should
+   go into config.h. */
+
+u32 calculate_score(struct queue_entry* q) {
+
+  u32 avg_exec_us = total_cal_us / total_cal_cycles;
+  u32 avg_bitmap_size = total_bitmap_size / total_bitmap_entries;
+  u32 perf_score = 100;
+
+  /* Adjust score based on execution speed of this path, compared to the
+     global average. Multiplier ranges from 0.1x to 3x. Fast inputs are
+     less expensive to fuzz, so we're giving them more air time. */
+
+  // TODO BUG FIXME: is this really a good idea?
+  // This sounds like looking for lost keys under a street light just because
+  // the light is better there.
+  // Longer execution time means longer work on the input, the deeper in
+  // coverage, the better the fuzzing, right? -mh
+
+  if (q->exec_us * 0.1 > avg_exec_us)
+    perf_score = 10;
+  else if (q->exec_us * 0.25 > avg_exec_us)
+    perf_score = 25;
+  else if (q->exec_us * 0.5 > avg_exec_us)
+    perf_score = 50;
+  else if (q->exec_us * 0.75 > avg_exec_us)
+    perf_score = 75;
+  else if (q->exec_us * 4 < avg_exec_us)
+    perf_score = 300;
+  else if (q->exec_us * 3 < avg_exec_us)
+    perf_score = 200;
+  else if (q->exec_us * 2 < avg_exec_us)
+    perf_score = 150;
+
+  /* Adjust score based on bitmap size. The working theory is that better
+     coverage translates to better targets. Multiplier from 0.25x to 3x. */
+
+  if (q->bitmap_size * 0.3 > avg_bitmap_size)
+    perf_score *= 3;
+  else if (q->bitmap_size * 0.5 > avg_bitmap_size)
+    perf_score *= 2;
+  else if (q->bitmap_size * 0.75 > avg_bitmap_size)
+    perf_score *= 1.5;
+  else if (q->bitmap_size * 3 < avg_bitmap_size)
+    perf_score *= 0.25;
+  else if (q->bitmap_size * 2 < avg_bitmap_size)
+    perf_score *= 0.5;
+  else if (q->bitmap_size * 1.5 < avg_bitmap_size)
+    perf_score *= 0.75;
+
+  /* Adjust score based on handicap. Handicap is proportional to how late
+     in the game we learned about this path. Latecomers are allowed to run
+     for a bit longer until they catch up with the rest. */
+
+  if (q->handicap >= 4) {
+
+    perf_score *= 4;
+    q->handicap -= 4;
+
+  } else if (q->handicap) {
+
+    perf_score *= 2;
+    --q->handicap;
+
+  }
+
+  /* Final adjustment based on input depth, under the assumption that fuzzing
+     deeper test cases is more likely to reveal stuff that can't be
+     discovered with traditional fuzzers. */
+
+  switch (q->depth) {
+
+    case 0 ... 3: break;
+    case 4 ... 7: perf_score *= 2; break;
+    case 8 ... 13: perf_score *= 3; break;
+    case 14 ... 25: perf_score *= 4; break;
+    default: perf_score *= 5;
+
+  }
+
+  u64 fuzz = q->n_fuzz;
+  u64 fuzz_total;
+
+  u32 n_paths, fuzz_mu;
+  u32 factor = 1;
+
+  switch (schedule) {
+
+    case EXPLORE: break;
+
+    case EXPLOIT: factor = MAX_FACTOR; break;
+
+    case COE:
+      fuzz_total = 0;
+      n_paths = 0;
+
+      struct queue_entry* queue_it = queue;
+      while (queue_it) {
+
+        fuzz_total += queue_it->n_fuzz;
+        n_paths++;
+        queue_it = queue_it->next;
+
+      }
+
+      fuzz_mu = fuzz_total / n_paths;
+      if (fuzz <= fuzz_mu) {
+
+        if (q->fuzz_level < 16)
+          factor = ((u32)(1 << q->fuzz_level));
+        else
+          factor = MAX_FACTOR;
+
+      } else {
+
+        factor = 0;
+
+      }
+
+      break;
+
+    case FAST:
+      if (q->fuzz_level < 16) {
+
+        factor = ((u32)(1 << q->fuzz_level)) / (fuzz == 0 ? 1 : fuzz);
+
+      } else
+
+        factor = MAX_FACTOR / (fuzz == 0 ? 1 : next_p2(fuzz));
+      break;
+
+    case LIN: factor = q->fuzz_level / (fuzz == 0 ? 1 : fuzz); break;
+
+    case QUAD:
+      factor = q->fuzz_level * q->fuzz_level / (fuzz == 0 ? 1 : fuzz);
+      break;
+
+    default: PFATAL("Unknown Power Schedule");
+
+  }
+
+  if (factor > MAX_FACTOR) factor = MAX_FACTOR;
+
+  perf_score *= factor / POWER_BETA;
+
+  // MOpt mode
+  if (limit_time_sig != 0 && max_depth - q->depth < 3)
+    perf_score *= 2;
+  else if (perf_score < 1)
+    perf_score =
+        1;  // Add a lower bound to AFLFast's energy assignment strategies
+
+  /* Make sure that we don't go over limit. */
+
+  if (perf_score > havoc_max_mult * 100) perf_score = havoc_max_mult * 100;
+
+  return perf_score;
+
+}
+
diff --git a/src/afl-fuzz-run.c b/src/afl-fuzz-run.c
new file mode 100644
index 00000000..4093d991
--- /dev/null
+++ b/src/afl-fuzz-run.c
@@ -0,0 +1,801 @@
+/*
+   american fuzzy lop - fuzzer code
+   --------------------------------
+
+   Written and maintained by Michal Zalewski <lcamtuf@google.com>
+
+   Forkserver design by Jann Horn <jannhorn@googlemail.com>
+
+   Copyright 2013, 2014, 2015, 2016, 2017 Google Inc. All rights reserved.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at:
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+   This is the real deal: the program takes an instrumented binary and
+   attempts a variety of basic fuzzing tricks, paying close attention to
+   how they affect the execution path.
+
+ */
+
+#include "afl-fuzz.h"
+
+/* Execute target application, monitoring for timeouts. Return status
+   information. The called program will update trace_bits[]. */
+
+u8 run_target(char** argv, u32 timeout) {
+
+  static struct itimerval it;
+  static u32              prev_timed_out = 0;
+  static u64              exec_ms = 0;
+
+  int status = 0;
+  u32 tb4;
+
+  child_timed_out = 0;
+
+  /* After this memset, trace_bits[] are effectively volatile, so we
+     must prevent any earlier operations from venturing into that
+     territory. */
+
+  memset(trace_bits, 0, MAP_SIZE);
+  MEM_BARRIER();
+
+  /* If we're running in "dumb" mode, we can't rely on the fork server
+     logic compiled into the target program, so we will just keep calling
+     execve(). There is a bit of code duplication between here and
+     init_forkserver(), but c'est la vie. */
+
+  if (dumb_mode == 1 || no_forkserver) {
+
+    child_pid = fork();
+
+    if (child_pid < 0) PFATAL("fork() failed");
+
+    if (!child_pid) {
+
+      struct rlimit r;
+
+      if (mem_limit) {
+
+        r.rlim_max = r.rlim_cur = ((rlim_t)mem_limit) << 20;
+
+#ifdef RLIMIT_AS
+
+        setrlimit(RLIMIT_AS, &r);                          /* Ignore errors */
+
+#else
+
+        setrlimit(RLIMIT_DATA, &r);                        /* Ignore errors */
+
+#endif /* ^RLIMIT_AS */
+
+      }
+
+      r.rlim_max = r.rlim_cur = 0;
+
+      setrlimit(RLIMIT_CORE, &r);                          /* Ignore errors */
+
+      /* Isolate the process and configure standard descriptors. If out_file is
+         specified, stdin is /dev/null; otherwise, out_fd is cloned instead. */
+
+      setsid();
+
+      dup2(dev_null_fd, 1);
+      dup2(dev_null_fd, 2);
+
+      if (out_file) {
+
+        dup2(dev_null_fd, 0);
+
+      } else {
+
+        dup2(out_fd, 0);
+        close(out_fd);
+
+      }
+
+      /* On Linux, would be faster to use O_CLOEXEC. Maybe TODO. */
+
+      close(dev_null_fd);
+      close(out_dir_fd);
+#ifndef HAVE_ARC4RANDOM
+      close(dev_urandom_fd);
+#endif
+      close(fileno(plot_file));
+
+      /* Set sane defaults for ASAN if nothing else specified. */
+
+      setenv("ASAN_OPTIONS",
+             "abort_on_error=1:"
+             "detect_leaks=0:"
+             "symbolize=0:"
+             "allocator_may_return_null=1",
+             0);
+
+      setenv("MSAN_OPTIONS", "exit_code=" STRINGIFY(MSAN_ERROR) ":"
+                             "symbolize=0:"
+                             "msan_track_origins=0", 0);
+
+      execv(target_path, argv);
+
+      /* Use a distinctive bitmap value to tell the parent about execv()
+         falling through. */
+
+      *(u32*)trace_bits = EXEC_FAIL_SIG;
+      exit(0);
+
+    }
+
+  } else {
+
+    s32 res;
+
+    /* In non-dumb mode, we have the fork server up and running, so simply
+       tell it to have at it, and then read back PID. */
+
+    if ((res = write(fsrv_ctl_fd, &prev_timed_out, 4)) != 4) {
+
+      if (stop_soon) return 0;
+      RPFATAL(res, "Unable to request new process from fork server (OOM?)");
+
+    }
+
+    if ((res = read(fsrv_st_fd, &child_pid, 4)) != 4) {
+
+      if (stop_soon) return 0;
+      RPFATAL(res, "Unable to request new process from fork server (OOM?)");
+
+    }
+
+    if (child_pid <= 0) FATAL("Fork server is misbehaving (OOM?)");
+
+  }
+
+  /* Configure timeout, as requested by user, then wait for child to terminate.
+   */
+
+  it.it_value.tv_sec = (timeout / 1000);
+  it.it_value.tv_usec = (timeout % 1000) * 1000;
+
+  setitimer(ITIMER_REAL, &it, NULL);
+
+  /* The SIGALRM handler simply kills the child_pid and sets child_timed_out. */
+
+  if (dumb_mode == 1 || no_forkserver) {
+
+    if (waitpid(child_pid, &status, 0) <= 0) PFATAL("waitpid() failed");
+
+  } else {
+
+    s32 res;
+
+    if ((res = read(fsrv_st_fd, &status, 4)) != 4) {
+
+      if (stop_soon) return 0;
+      RPFATAL(res, "Unable to communicate with fork server (OOM?)");
+
+    }
+
+  }
+
+  if (!WIFSTOPPED(status)) child_pid = 0;
+
+  getitimer(ITIMER_REAL, &it);
+  exec_ms =
+      (u64)timeout - (it.it_value.tv_sec * 1000 + it.it_value.tv_usec / 1000);
+  if (slowest_exec_ms < exec_ms) slowest_exec_ms = exec_ms;
+
+  it.it_value.tv_sec = 0;
+  it.it_value.tv_usec = 0;
+
+  setitimer(ITIMER_REAL, &it, NULL);
+
+  ++total_execs;
+
+  /* Any subsequent operations on trace_bits must not be moved by the
+     compiler below this point. Past this location, trace_bits[] behave
+     very normally and do not have to be treated as volatile. */
+
+  MEM_BARRIER();
+
+  tb4 = *(u32*)trace_bits;
+
+#ifdef __x86_64__
+  classify_counts((u64*)trace_bits);
+#else
+  classify_counts((u32*)trace_bits);
+#endif /* ^__x86_64__ */
+
+  prev_timed_out = child_timed_out;
+
+  /* Report outcome to caller. */
+
+  if (WIFSIGNALED(status) && !stop_soon) {
+
+    kill_signal = WTERMSIG(status);
+
+    if (child_timed_out && kill_signal == SIGKILL) return FAULT_TMOUT;
+
+    return FAULT_CRASH;
+
+  }
+
+  /* A somewhat nasty hack for MSAN, which doesn't support abort_on_error and
+     must use a special exit code. */
+
+  if (uses_asan && WEXITSTATUS(status) == MSAN_ERROR) {
+
+    kill_signal = 0;
+    return FAULT_CRASH;
+
+  }
+
+  if ((dumb_mode == 1 || no_forkserver) && tb4 == EXEC_FAIL_SIG)
+    return FAULT_ERROR;
+
+  return FAULT_NONE;
+
+}
+
+/* Write modified data to file for testing. If out_file is set, the old file
+   is unlinked and a new one is created. Otherwise, out_fd is rewound and
+   truncated. */
+
+void write_to_testcase(void* mem, u32 len) {
+
+  s32 fd = out_fd;
+
+  if (out_file) {
+
+    unlink(out_file);                                     /* Ignore errors. */
+
+    fd = open(out_file, O_WRONLY | O_CREAT | O_EXCL, 0600);
+
+    if (fd < 0) PFATAL("Unable to create '%s'", out_file);
+
+  } else
+
+    lseek(fd, 0, SEEK_SET);
+
+  if (pre_save_handler) {
+
+    u8*    new_data;
+    size_t new_size = pre_save_handler(mem, len, &new_data);
+    ck_write(fd, new_data, new_size, out_file);
+
+  } else {
+
+    ck_write(fd, mem, len, out_file);
+
+  }
+
+  if (!out_file) {
+
+    if (ftruncate(fd, len)) PFATAL("ftruncate() failed");
+    lseek(fd, 0, SEEK_SET);
+
+  } else
+
+    close(fd);
+
+}
+
+/* The same, but with an adjustable gap. Used for trimming. */
+
+void write_with_gap(void* mem, u32 len, u32 skip_at, u32 skip_len) {
+
+  s32 fd = out_fd;
+  u32 tail_len = len - skip_at - skip_len;
+
+  if (out_file) {
+
+    unlink(out_file);                                     /* Ignore errors. */
+
+    fd = open(out_file, O_WRONLY | O_CREAT | O_EXCL, 0600);
+
+    if (fd < 0) PFATAL("Unable to create '%s'", out_file);
+
+  } else
+
+    lseek(fd, 0, SEEK_SET);
+
+  if (skip_at) ck_write(fd, mem, skip_at, out_file);
+
+  u8* memu8 = mem;
+  if (tail_len) ck_write(fd, memu8 + skip_at + skip_len, tail_len, out_file);
+
+  if (!out_file) {
+
+    if (ftruncate(fd, len - skip_len)) PFATAL("ftruncate() failed");
+    lseek(fd, 0, SEEK_SET);
+
+  } else
+
+    close(fd);
+
+}
+
+/* Calibrate a new test case. This is done when processing the input directory
+   to warn about flaky or otherwise problematic test cases early on; and when
+   new paths are discovered to detect variable behavior and so on. */
+
+u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem, u32 handicap,
+                  u8 from_queue) {
+
+  static u8 first_trace[MAP_SIZE];
+
+  u8 fault = 0, new_bits = 0, var_detected = 0,
+     first_run = (q->exec_cksum == 0);
+
+  u64 start_us, stop_us;
+
+  s32 old_sc = stage_cur, old_sm = stage_max;
+  u32 use_tmout = exec_tmout;
+  u8* old_sn = stage_name;
+
+  /* Be a bit more generous about timeouts when resuming sessions, or when
+     trying to calibrate already-added finds. This helps avoid trouble due
+     to intermittent latency. */
+
+  if (!from_queue || resuming_fuzz)
+    use_tmout =
+        MAX(exec_tmout + CAL_TMOUT_ADD, exec_tmout * CAL_TMOUT_PERC / 100);
+
+  ++q->cal_failed;
+
+  stage_name = "calibration";
+  stage_max = fast_cal ? 3 : CAL_CYCLES;
+
+  /* Make sure the forkserver is up before we do anything, and let's not
+     count its spin-up time toward binary calibration. */
+
+  if (dumb_mode != 1 && !no_forkserver && !forksrv_pid) init_forkserver(argv);
+
+  if (q->exec_cksum) memcpy(first_trace, trace_bits, MAP_SIZE);
+
+  start_us = get_cur_time_us();
+
+  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+
+    u32 cksum;
+
+    if (!first_run && !(stage_cur % stats_update_freq)) show_stats();
+
+    write_to_testcase(use_mem, q->len);
+
+    fault = run_target(argv, use_tmout);
+
+    /* stop_soon is set by the handler for Ctrl+C. When it's pressed,
+       we want to bail out quickly. */
+
+    if (stop_soon || fault != crash_mode) goto abort_calibration;
+
+    if (!dumb_mode && !stage_cur && !count_bytes(trace_bits)) {
+
+      fault = FAULT_NOINST;
+      goto abort_calibration;
+
+    }
+
+    cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
+
+    if (q->exec_cksum != cksum) {
+
+      u8 hnb = has_new_bits(virgin_bits);
+      if (hnb > new_bits) new_bits = hnb;
+
+      if (q->exec_cksum) {
+
+        u32 i;
+
+        for (i = 0; i < MAP_SIZE; ++i) {
+
+          if (!var_bytes[i] && first_trace[i] != trace_bits[i]) {
+
+            var_bytes[i] = 1;
+            stage_max = CAL_CYCLES_LONG;
+
+          }
+
+        }
+
+        var_detected = 1;
+
+      } else {
+
+        q->exec_cksum = cksum;
+        memcpy(first_trace, trace_bits, MAP_SIZE);
+
+      }
+
+    }
+
+  }
+
+  stop_us = get_cur_time_us();
+
+  total_cal_us += stop_us - start_us;
+  total_cal_cycles += stage_max;
+
+  /* OK, let's collect some stats about the performance of this test case.
+     This is used for fuzzing air time calculations in calculate_score(). */
+
+  q->exec_us = (stop_us - start_us) / stage_max;
+  q->bitmap_size = count_bytes(trace_bits);
+  q->handicap = handicap;
+  q->cal_failed = 0;
+
+  total_bitmap_size += q->bitmap_size;
+  ++total_bitmap_entries;
+
+  update_bitmap_score(q);
+
+  /* If this case didn't result in new output from the instrumentation, tell
+     parent. This is a non-critical problem, but something to warn the user
+     about. */
+
+  if (!dumb_mode && first_run && !fault && !new_bits) fault = FAULT_NOBITS;
+
+abort_calibration:
+
+  if (new_bits == 2 && !q->has_new_cov) {
+
+    q->has_new_cov = 1;
+    ++queued_with_cov;
+
+  }
+
+  /* Mark variable paths. */
+
+  if (var_detected) {
+
+    var_byte_count = count_bytes(var_bytes);
+
+    if (!q->var_behavior) {
+
+      mark_as_variable(q);
+      ++queued_variable;
+
+    }
+
+  }
+
+  stage_name = old_sn;
+  stage_cur = old_sc;
+  stage_max = old_sm;
+
+  if (!first_run) show_stats();
+
+  return fault;
+
+}
+
+/* Grab interesting test cases from other fuzzers. */
+
+void sync_fuzzers(char** argv) {
+
+  DIR*           sd;
+  struct dirent* sd_ent;
+  u32            sync_cnt = 0;
+
+  sd = opendir(sync_dir);
+  if (!sd) PFATAL("Unable to open '%s'", sync_dir);
+
+  stage_max = stage_cur = 0;
+  cur_depth = 0;
+
+  /* Look at the entries created for every other fuzzer in the sync directory.
+   */
+
+  while ((sd_ent = readdir(sd))) {
+
+    static u8 stage_tmp[128];
+
+    DIR*           qd;
+    struct dirent* qd_ent;
+    u8 *           qd_path, *qd_synced_path;
+    u32            min_accept = 0, next_min_accept;
+
+    s32 id_fd;
+
+    /* Skip dot files and our own output directory. */
+
+    if (sd_ent->d_name[0] == '.' || !strcmp(sync_id, sd_ent->d_name)) continue;
+
+    /* Skip anything that doesn't have a queue/ subdirectory. */
+
+    qd_path = alloc_printf("%s/%s/queue", sync_dir, sd_ent->d_name);
+
+    if (!(qd = opendir(qd_path))) {
+
+      ck_free(qd_path);
+      continue;
+
+    }
+
+    /* Retrieve the ID of the last seen test case. */
+
+    qd_synced_path = alloc_printf("%s/.synced/%s", out_dir, sd_ent->d_name);
+
+    id_fd = open(qd_synced_path, O_RDWR | O_CREAT, 0600);
+
+    if (id_fd < 0) PFATAL("Unable to create '%s'", qd_synced_path);
+
+    if (read(id_fd, &min_accept, sizeof(u32)) > 0) lseek(id_fd, 0, SEEK_SET);
+
+    next_min_accept = min_accept;
+
+    /* Show stats */
+
+    sprintf(stage_tmp, "sync %u", ++sync_cnt);
+    stage_name = stage_tmp;
+    stage_cur = 0;
+    stage_max = 0;
+
+    /* For every file queued by this fuzzer, parse ID and see if we have looked
+       at it before; exec a test case if not. */
+
+    while ((qd_ent = readdir(qd))) {
+
+      u8*         path;
+      s32         fd;
+      struct stat st;
+
+      if (qd_ent->d_name[0] == '.' ||
+          sscanf(qd_ent->d_name, CASE_PREFIX "%06u", &syncing_case) != 1 ||
+          syncing_case < min_accept)
+        continue;
+
+      /* OK, sounds like a new one. Let's give it a try. */
+
+      if (syncing_case >= next_min_accept) next_min_accept = syncing_case + 1;
+
+      path = alloc_printf("%s/%s", qd_path, qd_ent->d_name);
+
+      /* Allow this to fail in case the other fuzzer is resuming or so... */
+
+      fd = open(path, O_RDONLY);
+
+      if (fd < 0) {
+
+        ck_free(path);
+        continue;
+
+      }
+
+      if (fstat(fd, &st)) PFATAL("fstat() failed");
+
+      /* Ignore zero-sized or oversized files. */
+
+      if (st.st_size && st.st_size <= MAX_FILE) {
+
+        u8  fault;
+        u8* mem = mmap(0, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
+
+        if (mem == MAP_FAILED) PFATAL("Unable to mmap '%s'", path);
+
+        /* See what happens. We rely on save_if_interesting() to catch major
+           errors and save the test case. */
+
+        write_to_testcase(mem, st.st_size);
+
+        fault = run_target(argv, exec_tmout);
+
+        if (stop_soon) return;
+
+        syncing_party = sd_ent->d_name;
+        queued_imported += save_if_interesting(argv, mem, st.st_size, fault);
+        syncing_party = 0;
+
+        munmap(mem, st.st_size);
+
+        if (!(stage_cur++ % stats_update_freq)) show_stats();
+
+      }
+
+      ck_free(path);
+      close(fd);
+
+    }
+
+    ck_write(id_fd, &next_min_accept, sizeof(u32), qd_synced_path);
+
+    close(id_fd);
+    closedir(qd);
+    ck_free(qd_path);
+    ck_free(qd_synced_path);
+
+  }
+
+  closedir(sd);
+
+}
+
+/* Trim all new test cases to save cycles when doing deterministic checks. The
+   trimmer uses power-of-two increments somewhere between 1/16 and 1/1024 of
+   file size, to keep the stage short and sweet. */
+
+u8 trim_case(char** argv, struct queue_entry* q, u8* in_buf) {
+
+#ifdef USE_PYTHON
+  if (py_functions[PY_FUNC_TRIM]) return trim_case_python(argv, q, in_buf);
+#endif
+
+  static u8 tmp[64];
+  static u8 clean_trace[MAP_SIZE];
+
+  u8  needs_write = 0, fault = 0;
+  u32 trim_exec = 0;
+  u32 remove_len;
+  u32 len_p2;
+
+  /* Although the trimmer will be less useful when variable behavior is
+     detected, it will still work to some extent, so we don't check for
+     this. */
+
+  if (q->len < 5) return 0;
+
+  stage_name = tmp;
+  bytes_trim_in += q->len;
+
+  /* Select initial chunk len, starting with large steps. */
+
+  len_p2 = next_p2(q->len);
+
+  remove_len = MAX(len_p2 / TRIM_START_STEPS, TRIM_MIN_BYTES);
+
+  /* Continue until the number of steps gets too high or the stepover
+     gets too small. */
+
+  while (remove_len >= MAX(len_p2 / TRIM_END_STEPS, TRIM_MIN_BYTES)) {
+
+    u32 remove_pos = remove_len;
+
+    sprintf(tmp, "trim %s/%s", DI(remove_len), DI(remove_len));
+
+    stage_cur = 0;
+    stage_max = q->len / remove_len;
+
+    while (remove_pos < q->len) {
+
+      u32 trim_avail = MIN(remove_len, q->len - remove_pos);
+      u32 cksum;
+
+      write_with_gap(in_buf, q->len, remove_pos, trim_avail);
+
+      fault = run_target(argv, exec_tmout);
+      ++trim_execs;
+
+      if (stop_soon || fault == FAULT_ERROR) goto abort_trimming;
+
+      /* Note that we don't keep track of crashes or hangs here; maybe TODO? */
+
+      cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
+
+      /* If the deletion had no impact on the trace, make it permanent. This
+         isn't perfect for variable-path inputs, but we're just making a
+         best-effort pass, so it's not a big deal if we end up with false
+         negatives every now and then. */
+
+      if (cksum == q->exec_cksum) {
+
+        u32 move_tail = q->len - remove_pos - trim_avail;
+
+        q->len -= trim_avail;
+        len_p2 = next_p2(q->len);
+
+        memmove(in_buf + remove_pos, in_buf + remove_pos + trim_avail,
+                move_tail);
+
+        /* Let's save a clean trace, which will be needed by
+           update_bitmap_score once we're done with the trimming stuff. */
+
+        if (!needs_write) {
+
+          needs_write = 1;
+          memcpy(clean_trace, trace_bits, MAP_SIZE);
+
+        }
+
+      } else
+
+        remove_pos += remove_len;
+
+      /* Since this can be slow, update the screen every now and then. */
+
+      if (!(trim_exec++ % stats_update_freq)) show_stats();
+      ++stage_cur;
+
+    }
+
+    remove_len >>= 1;
+
+  }
+
+  /* If we have made changes to in_buf, we also need to update the on-disk
+     version of the test case. */
+
+  if (needs_write) {
+
+    s32 fd;
+
+    unlink(q->fname);                                      /* ignore errors */
+
+    fd = open(q->fname, O_WRONLY | O_CREAT | O_EXCL, 0600);
+
+    if (fd < 0) PFATAL("Unable to create '%s'", q->fname);
+
+    ck_write(fd, in_buf, q->len, q->fname);
+    close(fd);
+
+    memcpy(trace_bits, clean_trace, MAP_SIZE);
+    update_bitmap_score(q);
+
+  }
+
+abort_trimming:
+
+  bytes_trim_out += q->len;
+  return fault;
+
+}
+
+/* Write a modified test case, run program, process results. Handle
+   error conditions, returning 1 if it's time to bail out. This is
+   a helper function for fuzz_one(). */
+
+u8 common_fuzz_stuff(char** argv, u8* out_buf, u32 len) {
+
+  u8 fault;
+
+  if (post_handler) {
+
+    out_buf = post_handler(out_buf, &len);
+    if (!out_buf || !len) return 0;
+
+  }
+
+  write_to_testcase(out_buf, len);
+
+  fault = run_target(argv, exec_tmout);
+
+  if (stop_soon) return 1;
+
+  if (fault == FAULT_TMOUT) {
+
+    if (subseq_tmouts++ > TMOUT_LIMIT) {
+
+      ++cur_skipped_paths;
+      return 1;
+
+    }
+
+  } else
+
+    subseq_tmouts = 0;
+
+  /* Users can hit us with SIGUSR1 to request the current input
+     to be abandoned. */
+
+  if (skip_requested) {
+
+    skip_requested = 0;
+    ++cur_skipped_paths;
+    return 1;
+
+  }
+
+  /* This handles FAULT_ERROR for us: */
+
+  queued_discovered += save_if_interesting(argv, out_buf, len, fault);
+
+  if (!(stage_cur % stats_update_freq) || stage_cur + 1 == stage_max)
+    show_stats();
+
+  return 0;
+
+}
+
diff --git a/src/afl-fuzz-stats.c b/src/afl-fuzz-stats.c
new file mode 100644
index 00000000..3614599d
--- /dev/null
+++ b/src/afl-fuzz-stats.c
@@ -0,0 +1,802 @@
+/*
+   american fuzzy lop - fuzzer code
+   --------------------------------
+
+   Written and maintained by Michal Zalewski <lcamtuf@google.com>
+
+   Forkserver design by Jann Horn <jannhorn@googlemail.com>
+
+   Copyright 2013, 2014, 2015, 2016, 2017 Google Inc. All rights reserved.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at:
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+   This is the real deal: the program takes an instrumented binary and
+   attempts a variety of basic fuzzing tricks, paying close attention to
+   how they affect the execution path.
+
+ */
+
+#include "afl-fuzz.h"
+
+/* Update stats file for unattended monitoring. */
+
+void write_stats_file(double bitmap_cvg, double stability, double eps) {
+
+  static double        last_bcvg, last_stab, last_eps;
+  static struct rusage usage;
+
+  u8*   fn = alloc_printf("%s/fuzzer_stats", out_dir);
+  s32   fd;
+  FILE* f;
+
+  fd = open(fn, O_WRONLY | O_CREAT | O_TRUNC, 0600);
+
+  if (fd < 0) PFATAL("Unable to create '%s'", fn);
+
+  ck_free(fn);
+
+  f = fdopen(fd, "w");
+
+  if (!f) PFATAL("fdopen() failed");
+
+  /* Keep last values in case we're called from another context
+     where exec/sec stats and such are not readily available. */
+
+  if (!bitmap_cvg && !stability && !eps) {
+
+    bitmap_cvg = last_bcvg;
+    stability = last_stab;
+    eps = last_eps;
+
+  } else {
+
+    last_bcvg = bitmap_cvg;
+    last_stab = stability;
+    last_eps = eps;
+
+  }
+
+  fprintf(f,
+          "start_time        : %llu\n"
+          "last_update       : %llu\n"
+          "fuzzer_pid        : %d\n"
+          "cycles_done       : %llu\n"
+          "execs_done        : %llu\n"
+          "execs_per_sec     : %0.02f\n"
+          "paths_total       : %u\n"
+          "paths_favored     : %u\n"
+          "paths_found       : %u\n"
+          "paths_imported    : %u\n"
+          "max_depth         : %u\n"
+          "cur_path          : %u\n"    /* Must match find_start_position() */
+          "pending_favs      : %u\n"
+          "pending_total     : %u\n"
+          "variable_paths    : %u\n"
+          "stability         : %0.02f%%\n"
+          "bitmap_cvg        : %0.02f%%\n"
+          "unique_crashes    : %llu\n"
+          "unique_hangs      : %llu\n"
+          "last_path         : %llu\n"
+          "last_crash        : %llu\n"
+          "last_hang         : %llu\n"
+          "execs_since_crash : %llu\n"
+          "exec_timeout      : %u\n"
+          "slowest_exec_ms   : %llu\n"
+          "peak_rss_mb       : %lu\n"
+          "afl_banner        : %s\n"
+          "afl_version       : " VERSION
+          "\n"
+          "target_mode       : %s%s%s%s%s%s%s%s\n"
+          "command_line      : %s\n",
+          start_time / 1000, get_cur_time() / 1000, getpid(),
+          queue_cycle ? (queue_cycle - 1) : 0, total_execs, eps, queued_paths,
+          queued_favored, queued_discovered, queued_imported, max_depth,
+          current_entry, pending_favored, pending_not_fuzzed, queued_variable,
+          stability, bitmap_cvg, unique_crashes, unique_hangs,
+          last_path_time / 1000, last_crash_time / 1000, last_hang_time / 1000,
+          total_execs - last_crash_execs, exec_tmout, slowest_exec_ms,
+          (unsigned long int)usage.ru_maxrss, use_banner,
+          unicorn_mode ? "unicorn" : "", qemu_mode ? "qemu " : "",
+          dumb_mode ? " dumb " : "", no_forkserver ? "no_forksrv " : "",
+          crash_mode ? "crash " : "", persistent_mode ? "persistent " : "",
+          deferred_mode ? "deferred " : "",
+          (unicorn_mode || qemu_mode || dumb_mode || no_forkserver ||
+           crash_mode || persistent_mode || deferred_mode)
+              ? ""
+              : "default",
+          orig_cmdline);
+  /* ignore errors */
+
+  fclose(f);
+
+}
+
+/* Update the plot file if there is a reason to. */
+
+void maybe_update_plot_file(double bitmap_cvg, double eps) {
+
+  static u32 prev_qp, prev_pf, prev_pnf, prev_ce, prev_md;
+  static u64 prev_qc, prev_uc, prev_uh;
+
+  if (prev_qp == queued_paths && prev_pf == pending_favored &&
+      prev_pnf == pending_not_fuzzed && prev_ce == current_entry &&
+      prev_qc == queue_cycle && prev_uc == unique_crashes &&
+      prev_uh == unique_hangs && prev_md == max_depth)
+    return;
+
+  prev_qp = queued_paths;
+  prev_pf = pending_favored;
+  prev_pnf = pending_not_fuzzed;
+  prev_ce = current_entry;
+  prev_qc = queue_cycle;
+  prev_uc = unique_crashes;
+  prev_uh = unique_hangs;
+  prev_md = max_depth;
+
+  /* Fields in the file:
+
+     unix_time, cycles_done, cur_path, paths_total, paths_not_fuzzed,
+     favored_not_fuzzed, unique_crashes, unique_hangs, max_depth,
+     execs_per_sec */
+
+  fprintf(plot_file,
+          "%llu, %llu, %u, %u, %u, %u, %0.02f%%, %llu, %llu, %u, %0.02f\n",
+          get_cur_time() / 1000, queue_cycle - 1, current_entry, queued_paths,
+          pending_not_fuzzed, pending_favored, bitmap_cvg, unique_crashes,
+          unique_hangs, max_depth, eps);                   /* ignore errors */
+
+  fflush(plot_file);
+
+}
+
+/* Check terminal dimensions after resize. */
+
+static void check_term_size(void) {
+
+  struct winsize ws;
+
+  term_too_small = 0;
+
+  if (ioctl(1, TIOCGWINSZ, &ws)) return;
+
+  if (ws.ws_row == 0 || ws.ws_col == 0) return;
+  if (ws.ws_row < 24 || ws.ws_col < 79) term_too_small = 1;
+
+}
+
+/* A spiffy retro stats screen! This is called every stats_update_freq
+   execve() calls, plus in several other circumstances. */
+
+void show_stats(void) {
+
+  static u64    last_stats_ms, last_plot_ms, last_ms, last_execs;
+  static double avg_exec;
+  double        t_byte_ratio, stab_ratio;
+
+  u64 cur_ms;
+  u32 t_bytes, t_bits;
+
+  u32 banner_len, banner_pad;
+  u8  tmp[256];
+
+  cur_ms = get_cur_time();
+
+  /* If not enough time has passed since last UI update, bail out. */
+
+  if (cur_ms - last_ms < 1000 / UI_TARGET_HZ) return;
+
+  /* Check if we're past the 10 minute mark. */
+
+  if (cur_ms - start_time > 10 * 60 * 1000) run_over10m = 1;
+
+  /* Calculate smoothed exec speed stats. */
+
+  if (!last_execs) {
+
+    avg_exec = ((double)total_execs) * 1000 / (cur_ms - start_time);
+
+  } else {
+
+    double cur_avg =
+        ((double)(total_execs - last_execs)) * 1000 / (cur_ms - last_ms);
+
+    /* If there is a dramatic (5x+) jump in speed, reset the indicator
+       more quickly. */
+
+    if (cur_avg * 5 < avg_exec || cur_avg / 5 > avg_exec) avg_exec = cur_avg;
+
+    avg_exec = avg_exec * (1.0 - 1.0 / AVG_SMOOTHING) +
+               cur_avg * (1.0 / AVG_SMOOTHING);
+
+  }
+
+  last_ms = cur_ms;
+  last_execs = total_execs;
+
+  /* Tell the callers when to contact us (as measured in execs). */
+
+  stats_update_freq = avg_exec / (UI_TARGET_HZ * 10);
+  if (!stats_update_freq) stats_update_freq = 1;
+
+  /* Do some bitmap stats. */
+
+  t_bytes = count_non_255_bytes(virgin_bits);
+  t_byte_ratio = ((double)t_bytes * 100) / MAP_SIZE;
+
+  if (t_bytes)
+    stab_ratio = 100 - ((double)var_byte_count) * 100 / t_bytes;
+  else
+    stab_ratio = 100;
+
+  /* Roughly every minute, update fuzzer stats and save auto tokens. */
+
+  if (cur_ms - last_stats_ms > STATS_UPDATE_SEC * 1000) {
+
+    last_stats_ms = cur_ms;
+    write_stats_file(t_byte_ratio, stab_ratio, avg_exec);
+    save_auto();
+    write_bitmap();
+
+  }
+
+  /* Every now and then, write plot data. */
+
+  if (cur_ms - last_plot_ms > PLOT_UPDATE_SEC * 1000) {
+
+    last_plot_ms = cur_ms;
+    maybe_update_plot_file(t_byte_ratio, avg_exec);
+
+  }
+
+  /* Honor AFL_EXIT_WHEN_DONE and AFL_BENCH_UNTIL_CRASH. */
+
+  if (!dumb_mode && cycles_wo_finds > 100 && !pending_not_fuzzed &&
+      getenv("AFL_EXIT_WHEN_DONE"))
+    stop_soon = 2;
+
+  if (total_crashes && getenv("AFL_BENCH_UNTIL_CRASH")) stop_soon = 2;
+
+  /* If we're not on TTY, bail out. */
+
+  if (not_on_tty) return;
+
+  /* Compute some mildly useful bitmap stats. */
+
+  t_bits = (MAP_SIZE << 3) - count_bits(virgin_bits);
+
+  /* Now, for the visuals... */
+
+  if (clear_screen) {
+
+    SAYF(TERM_CLEAR CURSOR_HIDE);
+    clear_screen = 0;
+
+    check_term_size();
+
+  }
+
+  SAYF(TERM_HOME);
+
+  if (term_too_small) {
+
+    SAYF(cBRI
+         "Your terminal is too small to display the UI.\n"
+         "Please resize terminal window to at least 79x24.\n" cRST);
+
+    return;
+
+  }
+
+  /* Let's start by drawing a centered banner. */
+
+  banner_len = (crash_mode ? 24 : 22) + strlen(VERSION) + strlen(use_banner) +
+               strlen(power_name) + 3 + 5;
+  banner_pad = (79 - banner_len) / 2;
+  memset(tmp, ' ', banner_pad);
+
+#ifdef HAVE_AFFINITY
+  sprintf(tmp + banner_pad,
+          "%s " cLCY VERSION cLGN " (%s) " cPIN "[%s]" cBLU " {%d}",
+          crash_mode ? cPIN "peruvian were-rabbit" : cYEL "american fuzzy lop",
+          use_banner, power_name, cpu_aff);
+#else
+  sprintf(tmp + banner_pad, "%s " cLCY VERSION cLGN " (%s) " cPIN "[%s]",
+          crash_mode ? cPIN "peruvian were-rabbit" : cYEL "american fuzzy lop",
+          use_banner, power_name);
+#endif /* HAVE_AFFINITY */
+
+  SAYF("\n%s\n", tmp);
+
+  /* "Handy" shortcuts for drawing boxes... */
+
+#define bSTG bSTART cGRA
+#define bH2 bH bH
+#define bH5 bH2 bH2 bH
+#define bH10 bH5 bH5
+#define bH20 bH10 bH10
+#define bH30 bH20 bH10
+#define SP5 "     "
+#define SP10 SP5 SP5
+#define SP20 SP10 SP10
+
+  /* Lord, forgive me this. */
+
+  SAYF(SET_G1 bSTG bLT bH bSTOP cCYA
+       " process timing " bSTG bH30 bH5 bH bHB bH bSTOP cCYA
+       " overall results " bSTG bH2 bH2 bRT "\n");
+
+  if (dumb_mode) {
+
+    strcpy(tmp, cRST);
+
+  } else {
+
+    u64 min_wo_finds = (cur_ms - last_path_time) / 1000 / 60;
+
+    /* First queue cycle: don't stop now! */
+    if (queue_cycle == 1 || min_wo_finds < 15)
+      strcpy(tmp, cMGN);
+    else
+
+        /* Subsequent cycles, but we're still making finds. */
+        if (cycles_wo_finds < 25 || min_wo_finds < 30)
+      strcpy(tmp, cYEL);
+    else
+
+        /* No finds for a long time and no test cases to try. */
+        if (cycles_wo_finds > 100 && !pending_not_fuzzed && min_wo_finds > 120)
+      strcpy(tmp, cLGN);
+
+    /* Default: cautiously OK to stop? */
+    else
+      strcpy(tmp, cLBL);
+
+  }
+
+  SAYF(bV bSTOP "        run time : " cRST "%-33s " bSTG bV bSTOP
+                "  cycles done : %s%-5s " bSTG              bV "\n",
+       DTD(cur_ms, start_time), tmp, DI(queue_cycle - 1));
+
+  /* We want to warn people about not seeing new paths after a full cycle,
+     except when resuming fuzzing or running in non-instrumented mode. */
+
+  if (!dumb_mode && (last_path_time || resuming_fuzz || queue_cycle == 1 ||
+                     in_bitmap || crash_mode)) {
+
+    SAYF(bV bSTOP "   last new path : " cRST "%-33s ",
+         DTD(cur_ms, last_path_time));
+
+  } else {
+
+    if (dumb_mode)
+
+      SAYF(bV bSTOP "   last new path : " cPIN "n/a" cRST
+                    " (non-instrumented mode)       ");
+
+    else
+
+      SAYF(bV bSTOP "   last new path : " cRST "none yet " cLRD
+                    "(odd, check syntax!)     ");
+
+  }
+
+  SAYF(bSTG bV bSTOP "  total paths : " cRST "%-5s " bSTG bV "\n",
+       DI(queued_paths));
+
+  /* Highlight crashes in red if found, denote going over the KEEP_UNIQUE_CRASH
+     limit with a '+' appended to the count. */
+
+  sprintf(tmp, "%s%s", DI(unique_crashes),
+          (unique_crashes >= KEEP_UNIQUE_CRASH) ? "+" : "");
+
+  SAYF(bV bSTOP " last uniq crash : " cRST "%-33s " bSTG bV bSTOP
+                " uniq crashes : %s%-6s" bSTG               bV "\n",
+       DTD(cur_ms, last_crash_time), unique_crashes ? cLRD : cRST, tmp);
+
+  sprintf(tmp, "%s%s", DI(unique_hangs),
+          (unique_hangs >= KEEP_UNIQUE_HANG) ? "+" : "");
+
+  SAYF(bV bSTOP "  last uniq hang : " cRST "%-33s " bSTG bV bSTOP
+                "   uniq hangs : " cRST "%-6s" bSTG         bV "\n",
+       DTD(cur_ms, last_hang_time), tmp);
+
+  SAYF(bVR bH bSTOP            cCYA
+       " cycle progress " bSTG bH10 bH5 bH2 bH2 bHB bH bSTOP cCYA
+       " map coverage " bSTG bH bHT bH20 bH2 bVL "\n");
+
+  /* This gets funny because we want to print several variable-length variables
+     together, but then cram them into a fixed-width field - so we need to
+     put them in a temporary buffer first. */
+
+  sprintf(tmp, "%s%s%u (%0.02f%%)", DI(current_entry),
+          queue_cur->favored ? "." : "*", queue_cur->fuzz_level,
+          ((double)current_entry * 100) / queued_paths);
+
+  SAYF(bV bSTOP "  now processing : " cRST "%-16s " bSTG bV bSTOP, tmp);
+
+  sprintf(tmp, "%0.02f%% / %0.02f%%",
+          ((double)queue_cur->bitmap_size) * 100 / MAP_SIZE, t_byte_ratio);
+
+  SAYF("    map density : %s%-21s" bSTG bV "\n",
+       t_byte_ratio > 70 ? cLRD : ((t_bytes < 200 && !dumb_mode) ? cPIN : cRST),
+       tmp);
+
+  sprintf(tmp, "%s (%0.02f%%)", DI(cur_skipped_paths),
+          ((double)cur_skipped_paths * 100) / queued_paths);
+
+  SAYF(bV bSTOP " paths timed out : " cRST "%-16s " bSTG bV, tmp);
+
+  sprintf(tmp, "%0.02f bits/tuple", t_bytes ? (((double)t_bits) / t_bytes) : 0);
+
+  SAYF(bSTOP " count coverage : " cRST "%-21s" bSTG bV "\n", tmp);
+
+  SAYF(bVR bH bSTOP            cCYA
+       " stage progress " bSTG bH10 bH5 bH2 bH2 bX bH bSTOP cCYA
+       " findings in depth " bSTG bH10 bH5 bH2 bH2 bVL "\n");
+
+  sprintf(tmp, "%s (%0.02f%%)", DI(queued_favored),
+          ((double)queued_favored) * 100 / queued_paths);
+
+  /* Yeah... it's still going on... halp? */
+
+  SAYF(bV bSTOP "  now trying : " cRST "%-20s " bSTG bV bSTOP
+                " favored paths : " cRST "%-22s" bSTG   bV "\n",
+       stage_name, tmp);
+
+  if (!stage_max) {
+
+    sprintf(tmp, "%s/-", DI(stage_cur));
+
+  } else {
+
+    sprintf(tmp, "%s/%s (%0.02f%%)", DI(stage_cur), DI(stage_max),
+            ((double)stage_cur) * 100 / stage_max);
+
+  }
+
+  SAYF(bV bSTOP " stage execs : " cRST "%-20s " bSTG bV bSTOP, tmp);
+
+  sprintf(tmp, "%s (%0.02f%%)", DI(queued_with_cov),
+          ((double)queued_with_cov) * 100 / queued_paths);
+
+  SAYF("  new edges on : " cRST "%-22s" bSTG bV "\n", tmp);
+
+  sprintf(tmp, "%s (%s%s unique)", DI(total_crashes), DI(unique_crashes),
+          (unique_crashes >= KEEP_UNIQUE_CRASH) ? "+" : "");
+
+  if (crash_mode) {
+
+    SAYF(bV bSTOP " total execs : " cRST "%-20s " bSTG bV bSTOP
+                  "   new crashes : %s%-22s" bSTG         bV "\n",
+         DI(total_execs), unique_crashes ? cLRD : cRST, tmp);
+
+  } else {
+
+    SAYF(bV bSTOP " total execs : " cRST "%-20s " bSTG bV bSTOP
+                  " total crashes : %s%-22s" bSTG         bV "\n",
+         DI(total_execs), unique_crashes ? cLRD : cRST, tmp);
+
+  }
+
+  /* Show a warning about slow execution. */
+
+  if (avg_exec < 100) {
+
+    sprintf(tmp, "%s/sec (%s)", DF(avg_exec),
+            avg_exec < 20 ? "zzzz..." : "slow!");
+
+    SAYF(bV bSTOP "  exec speed : " cLRD "%-20s ", tmp);
+
+  } else {
+
+    sprintf(tmp, "%s/sec", DF(avg_exec));
+    SAYF(bV bSTOP "  exec speed : " cRST "%-20s ", tmp);
+
+  }
+
+  sprintf(tmp, "%s (%s%s unique)", DI(total_tmouts), DI(unique_tmouts),
+          (unique_hangs >= KEEP_UNIQUE_HANG) ? "+" : "");
+
+  SAYF(bSTG bV bSTOP "  total tmouts : " cRST "%-22s" bSTG bV "\n", tmp);
+
+  /* Aaaalmost there... hold on! */
+
+  SAYF(bVR bH cCYA                      bSTOP
+       " fuzzing strategy yields " bSTG bH10 bHT bH10 bH5 bHB bH bSTOP cCYA
+       " path geometry " bSTG bH5 bH2 bVL "\n");
+
+  if (skip_deterministic) {
+
+    strcpy(tmp, "n/a, n/a, n/a");
+
+  } else {
+
+    sprintf(tmp, "%s/%s, %s/%s, %s/%s", DI(stage_finds[STAGE_FLIP1]),
+            DI(stage_cycles[STAGE_FLIP1]), DI(stage_finds[STAGE_FLIP2]),
+            DI(stage_cycles[STAGE_FLIP2]), DI(stage_finds[STAGE_FLIP4]),
+            DI(stage_cycles[STAGE_FLIP4]));
+
+  }
+
+  SAYF(bV bSTOP "   bit flips : " cRST "%-36s " bSTG bV bSTOP
+                "    levels : " cRST "%-10s" bSTG       bV "\n",
+       tmp, DI(max_depth));
+
+  if (!skip_deterministic)
+    sprintf(tmp, "%s/%s, %s/%s, %s/%s", DI(stage_finds[STAGE_FLIP8]),
+            DI(stage_cycles[STAGE_FLIP8]), DI(stage_finds[STAGE_FLIP16]),
+            DI(stage_cycles[STAGE_FLIP16]), DI(stage_finds[STAGE_FLIP32]),
+            DI(stage_cycles[STAGE_FLIP32]));
+
+  SAYF(bV bSTOP "  byte flips : " cRST "%-36s " bSTG bV bSTOP
+                "   pending : " cRST "%-10s" bSTG       bV "\n",
+       tmp, DI(pending_not_fuzzed));
+
+  if (!skip_deterministic)
+    sprintf(tmp, "%s/%s, %s/%s, %s/%s", DI(stage_finds[STAGE_ARITH8]),
+            DI(stage_cycles[STAGE_ARITH8]), DI(stage_finds[STAGE_ARITH16]),
+            DI(stage_cycles[STAGE_ARITH16]), DI(stage_finds[STAGE_ARITH32]),
+            DI(stage_cycles[STAGE_ARITH32]));
+
+  SAYF(bV bSTOP " arithmetics : " cRST "%-36s " bSTG bV bSTOP
+                "  pend fav : " cRST "%-10s" bSTG       bV "\n",
+       tmp, DI(pending_favored));
+
+  if (!skip_deterministic)
+    sprintf(
+        tmp, "%s/%s, %s/%s, %s/%s", DI(stage_finds[STAGE_INTEREST8]),
+        DI(stage_cycles[STAGE_INTEREST8]), DI(stage_finds[STAGE_INTEREST16]),
+        DI(stage_cycles[STAGE_INTEREST16]), DI(stage_finds[STAGE_INTEREST32]),
+        DI(stage_cycles[STAGE_INTEREST32]));
+
+  SAYF(bV bSTOP "  known ints : " cRST "%-36s " bSTG bV bSTOP
+                " own finds : " cRST "%-10s" bSTG       bV "\n",
+       tmp, DI(queued_discovered));
+
+  if (!skip_deterministic)
+    sprintf(tmp, "%s/%s, %s/%s, %s/%s", DI(stage_finds[STAGE_EXTRAS_UO]),
+            DI(stage_cycles[STAGE_EXTRAS_UO]), DI(stage_finds[STAGE_EXTRAS_UI]),
+            DI(stage_cycles[STAGE_EXTRAS_UI]), DI(stage_finds[STAGE_EXTRAS_AO]),
+            DI(stage_cycles[STAGE_EXTRAS_AO]));
+
+  SAYF(bV bSTOP "  dictionary : " cRST "%-36s " bSTG bV bSTOP
+                "  imported : " cRST "%-10s" bSTG       bV "\n",
+       tmp, sync_id ? DI(queued_imported) : (u8*)"n/a");
+
+  sprintf(tmp, "%s/%s, %s/%s, %s/%s", DI(stage_finds[STAGE_HAVOC]),
+          DI(stage_cycles[STAGE_HAVOC]), DI(stage_finds[STAGE_SPLICE]),
+          DI(stage_cycles[STAGE_SPLICE]), DI(stage_finds[STAGE_PYTHON]),
+          DI(stage_cycles[STAGE_PYTHON]));
+
+  SAYF(bV bSTOP "       havoc : " cRST "%-36s " bSTG bV bSTOP, tmp);
+
+  if (t_bytes)
+    sprintf(tmp, "%0.02f%%", stab_ratio);
+  else
+    strcpy(tmp, "n/a");
+
+  SAYF(" stability : %s%-10s" bSTG bV "\n",
+       (stab_ratio < 85 && var_byte_count > 40)
+           ? cLRD
+           : ((queued_variable && (!persistent_mode || var_byte_count > 20))
+                  ? cMGN
+                  : cRST),
+       tmp);
+
+  if (!bytes_trim_out) {
+
+    sprintf(tmp, "n/a, ");
+
+  } else {
+
+    sprintf(tmp, "%0.02f%%/%s, ",
+            ((double)(bytes_trim_in - bytes_trim_out)) * 100 / bytes_trim_in,
+            DI(trim_execs));
+
+  }
+
+  if (!blocks_eff_total) {
+
+    u8 tmp2[128];
+
+    sprintf(tmp2, "n/a");
+    strcat(tmp, tmp2);
+
+  } else {
+
+    u8 tmp2[128];
+
+    sprintf(tmp2, "%0.02f%%",
+            ((double)(blocks_eff_total - blocks_eff_select)) * 100 /
+                blocks_eff_total);
+
+    strcat(tmp, tmp2);
+
+  }
+
+  if (custom_mutator) {
+
+    sprintf(tmp, "%s/%s", DI(stage_finds[STAGE_CUSTOM_MUTATOR]),
+            DI(stage_cycles[STAGE_CUSTOM_MUTATOR]));
+    SAYF(bV bSTOP " custom mut. : " cRST "%-36s " bSTG bVR bH20 bH2 bH bRB
+                  "\n" bLB bH30 bH20 bH2 bH bRB bSTOP cRST RESET_G1,
+         tmp);
+
+  } else {
+
+    SAYF(bV bSTOP "        trim : " cRST "%-36s " bSTG bVR bH20 bH2 bH bRB
+                  "\n" bLB bH30 bH20 bH2 bRB bSTOP cRST RESET_G1,
+         tmp);
+
+  }
+
+  /* Provide some CPU utilization stats. */
+
+  if (cpu_core_count) {
+
+    double cur_runnable = get_runnable_processes();
+    u32    cur_utilization = cur_runnable * 100 / cpu_core_count;
+
+    u8* cpu_color = cCYA;
+
+    /* If we could still run one or more processes, use green. */
+
+    if (cpu_core_count > 1 && cur_runnable + 1 <= cpu_core_count)
+      cpu_color = cLGN;
+
+    /* If we're clearly oversubscribed, use red. */
+
+    if (!no_cpu_meter_red && cur_utilization >= 150) cpu_color = cLRD;
+
+#ifdef HAVE_AFFINITY
+
+    if (cpu_aff >= 0) {
+
+      SAYF(SP10 cGRA "[cpu%03u:%s%3u%%" cGRA "]\r" cRST, MIN(cpu_aff, 999),
+           cpu_color, MIN(cur_utilization, 999));
+
+    } else {
+
+      SAYF(SP10 cGRA "   [cpu:%s%3u%%" cGRA "]\r" cRST, cpu_color,
+           MIN(cur_utilization, 999));
+
+    }
+
+#else
+
+    SAYF(SP10 cGRA "   [cpu:%s%3u%%" cGRA "]\r" cRST, cpu_color,
+         MIN(cur_utilization, 999));
+
+#endif /* ^HAVE_AFFINITY */
+
+  } else
+
+    SAYF("\r");
+
+  /* Hallelujah! */
+
+  fflush(0);
+
+}
+
+/* Display quick statistics at the end of processing the input directory,
+   plus a bunch of warnings. Some calibration stuff also ended up here,
+   along with several hardcoded constants. Maybe clean up eventually. */
+
+void show_init_stats(void) {
+
+  struct queue_entry* q = queue;
+  u32                 min_bits = 0, max_bits = 0;
+  u64                 min_us = 0, max_us = 0;
+  u64                 avg_us = 0;
+  u32                 max_len = 0;
+
+  if (total_cal_cycles) avg_us = total_cal_us / total_cal_cycles;
+
+  while (q) {
+
+    if (!min_us || q->exec_us < min_us) min_us = q->exec_us;
+    if (q->exec_us > max_us) max_us = q->exec_us;
+
+    if (!min_bits || q->bitmap_size < min_bits) min_bits = q->bitmap_size;
+    if (q->bitmap_size > max_bits) max_bits = q->bitmap_size;
+
+    if (q->len > max_len) max_len = q->len;
+
+    q = q->next;
+
+  }
+
+  SAYF("\n");
+
+  if (avg_us > ((qemu_mode || unicorn_mode) ? 50000 : 10000))
+    WARNF(cLRD "The target binary is pretty slow! See %s/perf_tips.txt.",
+          doc_path);
+
+  /* Let's keep things moving with slow binaries. */
+
+  if (avg_us > 50000)
+    havoc_div = 10;                                     /* 0-19 execs/sec   */
+  else if (avg_us > 20000)
+    havoc_div = 5;                                      /* 20-49 execs/sec  */
+  else if (avg_us > 10000)
+    havoc_div = 2;                                      /* 50-100 execs/sec */
+
+  if (!resuming_fuzz) {
+
+    if (max_len > 50 * 1024)
+      WARNF(cLRD "Some test cases are huge (%s) - see %s/perf_tips.txt!",
+            DMS(max_len), doc_path);
+    else if (max_len > 10 * 1024)
+      WARNF("Some test cases are big (%s) - see %s/perf_tips.txt.",
+            DMS(max_len), doc_path);
+
+    if (useless_at_start && !in_bitmap)
+      WARNF(cLRD "Some test cases look useless. Consider using a smaller set.");
+
+    if (queued_paths > 100)
+      WARNF(cLRD
+            "You probably have far too many input files! Consider trimming "
+            "down.");
+    else if (queued_paths > 20)
+      WARNF("You have lots of input files; try starting small.");
+
+  }
+
+  OKF("Here are some useful stats:\n\n"
+
+      cGRA "    Test case count : " cRST
+      "%u favored, %u variable, %u total\n" cGRA "       Bitmap range : " cRST
+      "%u to %u bits (average: %0.02f bits)\n" cGRA
+      "        Exec timing : " cRST "%s to %s us (average: %s us)\n",
+      queued_favored, queued_variable, queued_paths, min_bits, max_bits,
+      ((double)total_bitmap_size) /
+          (total_bitmap_entries ? total_bitmap_entries : 1),
+      DI(min_us), DI(max_us), DI(avg_us));
+
+  if (!timeout_given) {
+
+    /* Figure out the appropriate timeout. The basic idea is: 5x average or
+       1x max, rounded up to EXEC_TM_ROUND ms and capped at 1 second.
+
+       If the program is slow, the multiplier is lowered to 2x or 3x, because
+       random scheduler jitter is less likely to have any impact, and because
+       our patience is wearing thin =) */
+
+    if (avg_us > 50000)
+      exec_tmout = avg_us * 2 / 1000;
+    else if (avg_us > 10000)
+      exec_tmout = avg_us * 3 / 1000;
+    else
+      exec_tmout = avg_us * 5 / 1000;
+
+    exec_tmout = MAX(exec_tmout, max_us / 1000);
+    exec_tmout = (exec_tmout + EXEC_TM_ROUND) / EXEC_TM_ROUND * EXEC_TM_ROUND;
+
+    if (exec_tmout > EXEC_TIMEOUT) exec_tmout = EXEC_TIMEOUT;
+
+    ACTF("No -t option specified, so I'll use exec timeout of %u ms.",
+         exec_tmout);
+
+    timeout_given = 1;
+
+  } else if (timeout_given == 3) {
+
+    ACTF("Applying timeout settings from resumed session (%u ms).", exec_tmout);
+
+  }
+
+  /* In dumb mode, re-running every timing out test case with a generous time
+     limit is very expensive, so let's select a more conservative default. */
+
+  if (dumb_mode && !getenv("AFL_HANG_TMOUT"))
+    hang_tmout = MIN(EXEC_TIMEOUT, exec_tmout * 2 + 100);
+
+  OKF("All set and ready to roll!");
+
+}
+
diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c
new file mode 100644
index 00000000..685840c6
--- /dev/null
+++ b/src/afl-fuzz.c
@@ -0,0 +1,881 @@
+/*
+   american fuzzy lop - fuzzer code
+   --------------------------------
+
+   Written and maintained by Michal Zalewski <lcamtuf@google.com>
+
+   Forkserver design by Jann Horn <jannhorn@googlemail.com>
+
+   Copyright 2013, 2014, 2015, 2016, 2017 Google Inc. All rights reserved.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at:
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+   This is the real deal: the program takes an instrumented binary and
+   attempts a variety of basic fuzzing tricks, paying close attention to
+   how they affect the execution path.
+
+ */
+
+#include "afl-fuzz.h"
+
+/* Display usage hints. */
+
+static void usage(u8* argv0) {
+
+#ifdef USE_PYTHON
+#  define PHYTON_SUPPORT\
+  "Compiled with Python 2.7 module support, see docs/python_mutators.txt\n"
+#else
+#  define PHYTON_SUPPORT ""
+#endif
+
+  SAYF(
+      "\n%s [ options ] -- /path/to/fuzzed_app [ ... ]\n\n"
+
+      "Required parameters:\n"
+      "  -i dir        - input directory with test cases\n"
+      "  -o dir        - output directory for fuzzer findings\n\n"
+
+      "Execution control settings:\n"
+      "  -p schedule   - power schedules recompute a seed's performance "
+      "score.\n"
+      "                  <explore (default), fast, coe, lin, quad, or "
+      "exploit>\n"
+      "                  see docs/power_schedules.txt\n"
+      "  -f file       - location read by the fuzzed program (stdin)\n"
+      "  -t msec       - timeout for each run (auto-scaled, 50-%d ms)\n"
+      "  -m megs       - memory limit for child process (%d MB)\n"
+      "  -Q            - use binary-only instrumentation (QEMU mode)\n"
+      "  -U            - use Unicorn-based instrumentation (Unicorn mode)\n\n"
+      "  -L minutes    - use MOpt(imize) mode and set the limit time for "
+      "entering the\n"
+      "                  pacemaker mode (minutes of no new paths, 0 = "
+      "immediately).\n"
+      "                  a recommended value is 10-60. see docs/README.MOpt\n\n"
+
+      "Fuzzing behavior settings:\n"
+      "  -d            - quick & dirty mode (skips deterministic steps)\n"
+      "  -n            - fuzz without instrumentation (dumb mode)\n"
+      "  -x dir        - optional fuzzer dictionary (see README)\n\n"
+
+      "Testing settings:\n"
+      "  -s seed       - use a fixed seed for the RNG\n"
+      "  -V seconds    - fuzz for a maximum total time of seconds then "
+      "terminate\n"
+      "  -E execs      - fuzz for a maximum number of total executions then "
+      "terminate\n\n"
+
+      "Other stuff:\n"
+      "  -T text       - text banner to show on the screen\n"
+      "  -M / -S id    - distributed mode (see parallel_fuzzing.txt)\n"
+      "  -B bitmap.txt - mutate a specific test case, use the out/fuzz_bitmap "
+      "file\n"
+      "  -C            - crash exploration mode (the peruvian rabbit thing)\n"
+      "  -e ext        - File extension for the temporarily generated test "
+      "case\n\n"
+
+      PHYTON_SUPPORT
+
+      "For additional tips, please consult %s/README\n\n",
+
+      argv0, EXEC_TIMEOUT, MEM_LIMIT, doc_path);
+
+  exit(1);
+#undef PHYTON_SUPPORT
+
+}
+
+#ifndef AFL_LIB
+
+static int stricmp(char const* a, char const* b) {
+
+  for (;; ++a, ++b) {
+
+    int d;
+    d = tolower(*a) - tolower(*b);
+    if (d != 0 || !*a) return d;
+
+  }
+
+}
+
+/* Main entry point */
+
+int main(int argc, char** argv) {
+
+  s32    opt;
+  u64    prev_queued = 0;
+  u32    sync_interval_cnt = 0, seek_to;
+  u8*    extras_dir = 0;
+  u8     mem_limit_given = 0;
+  u8     exit_1 = !!getenv("AFL_BENCH_JUST_ONE");
+  char** use_argv;
+  s64    init_seed;
+
+  struct timeval  tv;
+  struct timezone tz;
+
+  SAYF(cCYA
+       "afl-fuzz" VERSION cRST
+       " based on afl by <lcamtuf@google.com> and a big online community\n");
+
+  doc_path = access(DOC_PATH, F_OK) ? "docs" : DOC_PATH;
+
+  gettimeofday(&tv, &tz);
+  init_seed = tv.tv_sec ^ tv.tv_usec ^ getpid();
+
+  while ((opt = getopt(argc, argv, "+i:o:f:m:t:T:dnCB:S:M:x:QUe:p:s:V:E:L:")) >
+         0)
+
+    switch (opt) {
+
+      case 's': {
+
+        init_seed = strtoul(optarg, 0L, 10);
+        fixed_seed = 1;
+        break;
+
+      }
+
+      case 'p':                                           /* Power schedule */
+
+        if (!stricmp(optarg, "fast")) {
+
+          schedule = FAST;
+
+        } else if (!stricmp(optarg, "coe")) {
+
+          schedule = COE;
+
+        } else if (!stricmp(optarg, "exploit")) {
+
+          schedule = EXPLOIT;
+
+        } else if (!stricmp(optarg, "lin")) {
+
+          schedule = LIN;
+
+        } else if (!stricmp(optarg, "quad")) {
+
+          schedule = QUAD;
+
+        } else if (!stricmp(optarg, "explore") || !stricmp(optarg, "default") ||
+
+                   !stricmp(optarg, "normal") || !stricmp(optarg, "afl")) {
+
+          schedule = EXPLORE;
+
+        } else {
+
+          FATAL("Unknown -p power schedule");
+
+        }
+
+        break;
+
+      case 'e':
+
+        if (file_extension) FATAL("Multiple -e options not supported");
+
+        file_extension = optarg;
+
+        break;
+
+      case 'i':                                                /* input dir */
+
+        if (in_dir) FATAL("Multiple -i options not supported");
+        in_dir = optarg;
+
+        if (!strcmp(in_dir, "-")) in_place_resume = 1;
+
+        break;
+
+      case 'o':                                               /* output dir */
+
+        if (out_dir) FATAL("Multiple -o options not supported");
+        out_dir = optarg;
+        break;
+
+      case 'M': {                                         /* master sync ID */
+
+        u8* c;
+
+        if (sync_id) FATAL("Multiple -S or -M options not supported");
+        sync_id = ck_strdup(optarg);
+
+        if ((c = strchr(sync_id, ':'))) {
+
+          *c = 0;
+
+          if (sscanf(c + 1, "%u/%u", &master_id, &master_max) != 2 ||
+              !master_id || !master_max || master_id > master_max ||
+              master_max > 1000000)
+            FATAL("Bogus master ID passed to -M");
+
+        }
+
+        force_deterministic = 1;
+
+      }
+
+      break;
+
+      case 'S':
+
+        if (sync_id) FATAL("Multiple -S or -M options not supported");
+        sync_id = ck_strdup(optarg);
+        break;
+
+      case 'f':                                              /* target file */
+
+        if (out_file) FATAL("Multiple -f options not supported");
+        out_file = optarg;
+        break;
+
+      case 'x':                                               /* dictionary */
+
+        if (extras_dir) FATAL("Multiple -x options not supported");
+        extras_dir = optarg;
+        break;
+
+      case 't': {                                                /* timeout */
+
+        u8 suffix = 0;
+
+        if (timeout_given) FATAL("Multiple -t options not supported");
+
+        if (sscanf(optarg, "%u%c", &exec_tmout, &suffix) < 1 ||
+            optarg[0] == '-')
+          FATAL("Bad syntax used for -t");
+
+        if (exec_tmout < 5) FATAL("Dangerously low value of -t");
+
+        if (suffix == '+')
+          timeout_given = 2;
+        else
+          timeout_given = 1;
+
+        break;
+
+      }
+
+      case 'm': {                                              /* mem limit */
+
+        u8 suffix = 'M';
+
+        if (mem_limit_given) FATAL("Multiple -m options not supported");
+        mem_limit_given = 1;
+
+        if (!strcmp(optarg, "none")) {
+
+          mem_limit = 0;
+          break;
+
+        }
+
+        if (sscanf(optarg, "%llu%c", &mem_limit, &suffix) < 1 ||
+            optarg[0] == '-')
+          FATAL("Bad syntax used for -m");
+
+        switch (suffix) {
+
+          case 'T': mem_limit *= 1024 * 1024; break;
+          case 'G': mem_limit *= 1024; break;
+          case 'k': mem_limit /= 1024; break;
+          case 'M': break;
+
+          default: FATAL("Unsupported suffix or bad syntax for -m");
+
+        }
+
+        if (mem_limit < 5) FATAL("Dangerously low value of -m");
+
+        if (sizeof(rlim_t) == 4 && mem_limit > 2000)
+          FATAL("Value of -m out of range on 32-bit systems");
+
+      }
+
+      break;
+
+      case 'd':                                       /* skip deterministic */
+
+        if (skip_deterministic) FATAL("Multiple -d options not supported");
+        skip_deterministic = 1;
+        use_splicing = 1;
+        break;
+
+      case 'B':                                              /* load bitmap */
+
+        /* This is a secret undocumented option! It is useful if you find
+           an interesting test case during a normal fuzzing process, and want
+           to mutate it without rediscovering any of the test cases already
+           found during an earlier run.
+
+           To use this mode, you need to point -B to the fuzz_bitmap produced
+           by an earlier run for the exact same binary... and that's it.
+
+           I only used this once or twice to get variants of a particular
+           file, so I'm not making this an official setting. */
+
+        if (in_bitmap) FATAL("Multiple -B options not supported");
+
+        in_bitmap = optarg;
+        read_bitmap(in_bitmap);
+        break;
+
+      case 'C':                                               /* crash mode */
+
+        if (crash_mode) FATAL("Multiple -C options not supported");
+        crash_mode = FAULT_CRASH;
+        break;
+
+      case 'n':                                                /* dumb mode */
+
+        if (dumb_mode) FATAL("Multiple -n options not supported");
+        if (getenv("AFL_DUMB_FORKSRV"))
+          dumb_mode = 2;
+        else
+          dumb_mode = 1;
+
+        break;
+
+      case 'T':                                                   /* banner */
+
+        if (use_banner) FATAL("Multiple -T options not supported");
+        use_banner = optarg;
+        break;
+
+      case 'Q':                                                /* QEMU mode */
+
+        if (qemu_mode) FATAL("Multiple -Q options not supported");
+        qemu_mode = 1;
+
+        if (!mem_limit_given) mem_limit = MEM_LIMIT_QEMU;
+
+        break;
+
+      case 'U':                                             /* Unicorn mode */
+
+        if (unicorn_mode) FATAL("Multiple -U options not supported");
+        unicorn_mode = 1;
+
+        if (!mem_limit_given) mem_limit = MEM_LIMIT_UNICORN;
+
+        break;
+
+      case 'V': {
+
+        most_time_key = 1;
+        if (sscanf(optarg, "%llu", &most_time) < 1 || optarg[0] == '-')
+          FATAL("Bad syntax used for -V");
+
+      } break;
+
+      case 'E': {
+
+        most_execs_key = 1;
+        if (sscanf(optarg, "%llu", &most_execs) < 1 || optarg[0] == '-')
+          FATAL("Bad syntax used for -E");
+
+      } break;
+
+      case 'L': {                                              /* MOpt mode */
+
+        if (limit_time_sig) FATAL("Multiple -L options not supported");
+        limit_time_sig = 1;
+        havoc_max_mult = HAVOC_MAX_MULT_MOPT;
+
+        if (sscanf(optarg, "%llu", &limit_time_puppet) < 1 || optarg[0] == '-')
+          FATAL("Bad syntax used for -L");
+
+        u64 limit_time_puppet2 = limit_time_puppet * 60 * 1000;
+
+        if (limit_time_puppet2 < limit_time_puppet)
+          FATAL("limit_time overflow");
+        limit_time_puppet = limit_time_puppet2;
+
+        SAYF("limit_time_puppet %llu\n", limit_time_puppet);
+        swarm_now = 0;
+
+        if (limit_time_puppet == 0) key_puppet = 1;
+
+        int i;
+        int tmp_swarm = 0;
+
+        if (g_now > g_max) g_now = 0;
+        w_now = (w_init - w_end) * (g_max - g_now) / (g_max) + w_end;
+
+        for (tmp_swarm = 0; tmp_swarm < swarm_num; ++tmp_swarm) {
+
+          double total_puppet_temp = 0.0;
+          swarm_fitness[tmp_swarm] = 0.0;
+
+          for (i = 0; i < operator_num; ++i) {
+
+            stage_finds_puppet[tmp_swarm][i] = 0;
+            probability_now[tmp_swarm][i] = 0.0;
+            x_now[tmp_swarm][i] = ((double)(random() % 7000) * 0.0001 + 0.1);
+            total_puppet_temp += x_now[tmp_swarm][i];
+            v_now[tmp_swarm][i] = 0.1;
+            L_best[tmp_swarm][i] = 0.5;
+            G_best[i] = 0.5;
+            eff_best[tmp_swarm][i] = 0.0;
+
+          }
+
+          for (i = 0; i < operator_num; ++i) {
+
+            stage_cycles_puppet_v2[tmp_swarm][i] =
+                stage_cycles_puppet[tmp_swarm][i];
+            stage_finds_puppet_v2[tmp_swarm][i] =
+                stage_finds_puppet[tmp_swarm][i];
+            x_now[tmp_swarm][i] = x_now[tmp_swarm][i] / total_puppet_temp;
+
+          }
+
+          double x_temp = 0.0;
+
+          for (i = 0; i < operator_num; ++i) {
+
+            probability_now[tmp_swarm][i] = 0.0;
+            v_now[tmp_swarm][i] =
+                w_now * v_now[tmp_swarm][i] +
+                RAND_C * (L_best[tmp_swarm][i] - x_now[tmp_swarm][i]) +
+                RAND_C * (G_best[i] - x_now[tmp_swarm][i]);
+
+            x_now[tmp_swarm][i] += v_now[tmp_swarm][i];
+
+            if (x_now[tmp_swarm][i] > v_max)
+              x_now[tmp_swarm][i] = v_max;
+            else if (x_now[tmp_swarm][i] < v_min)
+              x_now[tmp_swarm][i] = v_min;
+
+            x_temp += x_now[tmp_swarm][i];
+
+          }
+
+          for (i = 0; i < operator_num; ++i) {
+
+            x_now[tmp_swarm][i] = x_now[tmp_swarm][i] / x_temp;
+            if (likely(i != 0))
+              probability_now[tmp_swarm][i] =
+                  probability_now[tmp_swarm][i - 1] + x_now[tmp_swarm][i];
+            else
+              probability_now[tmp_swarm][i] = x_now[tmp_swarm][i];
+
+          }
+
+          if (probability_now[tmp_swarm][operator_num - 1] < 0.99 ||
+              probability_now[tmp_swarm][operator_num - 1] > 1.01)
+            FATAL("ERROR probability");
+
+        }
+
+        for (i = 0; i < operator_num; ++i) {
+
+          core_operator_finds_puppet[i] = 0;
+          core_operator_finds_puppet_v2[i] = 0;
+          core_operator_cycles_puppet[i] = 0;
+          core_operator_cycles_puppet_v2[i] = 0;
+          core_operator_cycles_puppet_v3[i] = 0;
+
+        }
+
+      } break;
+
+      default: usage(argv[0]);
+
+    }
+
+  if (optind == argc || !in_dir || !out_dir) usage(argv[0]);
+
+  if (fixed_seed) OKF("Running with fixed seed: %u", (u32)init_seed);
+  srandom((u32)init_seed);
+  setup_signal_handlers();
+  check_asan_opts();
+
+  power_name = power_names[schedule];
+
+  if (sync_id) fix_up_sync();
+
+  if (!strcmp(in_dir, out_dir))
+    FATAL("Input and output directories can't be the same");
+
+  if ((tmp_dir = getenv("AFL_TMPDIR")) != NULL) {
+
+    char tmpfile[strlen(tmp_dir + 16)];
+    sprintf(tmpfile, "%s/%s", tmp_dir, ".cur_input");
+    if (access(tmpfile, F_OK) !=
+        -1)  // there is still a race condition here, but well ...
+      FATAL("TMP_DIR already has an existing temporary input file: %s",
+            tmpfile);
+
+  } else
+
+    tmp_dir = out_dir;
+
+  if (dumb_mode) {
+
+    if (crash_mode) FATAL("-C and -n are mutually exclusive");
+    if (qemu_mode) FATAL("-Q and -n are mutually exclusive");
+    if (unicorn_mode) FATAL("-U and -n are mutually exclusive");
+
+  }
+
+  if (getenv("AFL_NO_UI") && getenv("AFL_FORCE_UI"))
+    FATAL("AFL_NO_UI and AFL_FORCE_UI are mutually exclusive");
+
+  if (strchr(argv[optind], '/') == NULL)
+    WARNF(cLRD
+          "Target binary called without a prefixed path, make sure you are "
+          "fuzzing the right binary: " cRST "%s",
+          argv[optind]);
+
+  OKF("afl++ is maintained by Marc \"van Hauser\" Heuse, Heiko \"hexcoder\" "
+      "Eissfeldt and Andrea Fioraldi");
+  OKF("afl++ is open source, get it at "
+      "https://github.com/vanhauser-thc/AFLplusplus");
+  OKF("Power schedules from github.com/mboehme/aflfast");
+  OKF("Python Mutator and llvm_mode whitelisting from github.com/choller/afl");
+  OKF("afl-tmin fork server patch from github.com/nccgroup/TriforceAFL");
+  OKF("MOpt Mutator from github.com/puppet-meteor/MOpt-AFL");
+  ACTF("Getting to work...");
+
+  switch (schedule) {
+
+    case FAST: OKF("Using exponential power schedule (FAST)"); break;
+    case COE: OKF("Using cut-off exponential power schedule (COE)"); break;
+    case EXPLOIT:
+      OKF("Using exploitation-based constant power schedule (EXPLOIT)");
+      break;
+    case LIN: OKF("Using linear power schedule (LIN)"); break;
+    case QUAD: OKF("Using quadratic power schedule (QUAD)"); break;
+    case EXPLORE:
+      OKF("Using exploration-based constant power schedule (EXPLORE)");
+      break;
+    default: FATAL("Unknown power schedule"); break;
+
+  }
+
+  if (getenv("AFL_NO_FORKSRV")) no_forkserver = 1;
+  if (getenv("AFL_NO_CPU_RED")) no_cpu_meter_red = 1;
+  if (getenv("AFL_NO_ARITH")) no_arith = 1;
+  if (getenv("AFL_SHUFFLE_QUEUE")) shuffle_queue = 1;
+  if (getenv("AFL_FAST_CAL")) fast_cal = 1;
+
+  if (getenv("AFL_HANG_TMOUT")) {
+
+    hang_tmout = atoi(getenv("AFL_HANG_TMOUT"));
+    if (!hang_tmout) FATAL("Invalid value of AFL_HANG_TMOUT");
+
+  }
+
+  if (dumb_mode == 2 && no_forkserver)
+    FATAL("AFL_DUMB_FORKSRV and AFL_NO_FORKSRV are mutually exclusive");
+
+  if (getenv("AFL_PRELOAD")) {
+
+    setenv("LD_PRELOAD", getenv("AFL_PRELOAD"), 1);
+    setenv("DYLD_INSERT_LIBRARIES", getenv("AFL_PRELOAD"), 1);
+
+  }
+
+  if (getenv("AFL_LD_PRELOAD"))
+    FATAL("Use AFL_PRELOAD instead of AFL_LD_PRELOAD");
+
+  save_cmdline(argc, argv);
+
+  fix_up_banner(argv[optind]);
+
+  check_if_tty();
+  if (getenv("AFL_FORCE_UI")) not_on_tty = 0;
+
+  if (getenv("AFL_CAL_FAST")) {
+
+    /* Use less calibration cycles, for slow applications */
+    cal_cycles = 3;
+    cal_cycles_long = 5;
+
+  }
+
+  if (getenv("AFL_DEBUG")) debug = 1;
+
+  if (getenv("AFL_PYTHON_ONLY")) {
+
+    /* This ensures we don't proceed to havoc/splice */
+    python_only = 1;
+
+    /* Ensure we also skip all deterministic steps */
+    skip_deterministic = 1;
+
+  }
+
+  get_core_count();
+
+#  ifdef HAVE_AFFINITY
+  bind_to_free_cpu();
+#  endif /* HAVE_AFFINITY */
+
+  check_crash_handling();
+  check_cpu_governor();
+
+  setup_post();
+  setup_custom_mutator();
+  setup_shm(dumb_mode);
+
+  if (!in_bitmap) memset(virgin_bits, 255, MAP_SIZE);
+  memset(virgin_tmout, 255, MAP_SIZE);
+  memset(virgin_crash, 255, MAP_SIZE);
+
+  init_count_class16();
+
+  setup_dirs_fds();
+
+#  ifdef USE_PYTHON
+  if (init_py()) FATAL("Failed to initialize Python module");
+#  else
+  if (getenv("AFL_PYTHON_MODULE"))
+    FATAL("Your AFL binary was built without Python support");
+#  endif
+
+  setup_cmdline_file(argv + optind);
+
+  read_testcases();
+  load_auto();
+
+  pivot_inputs();
+
+  if (extras_dir) load_extras(extras_dir);
+
+  if (!timeout_given) find_timeout();
+
+  /* If we don't have a file name chosen yet, use a safe default. */
+
+  if (!out_file) {
+
+    u32 i = optind + 1;
+    while (argv[i]) {
+
+      u8* aa_loc = strstr(argv[i], "@@");
+
+      if (aa_loc && !out_file) {
+
+        if (file_extension) {
+
+          out_file = alloc_printf("%s/.cur_input.%s", out_dir, file_extension);
+
+        } else {
+
+          out_file = alloc_printf("%s/.cur_input", out_dir);
+
+        }
+
+        detect_file_args(argv + optind + 1, out_file);
+        break;
+
+      }
+
+      ++i;
+
+    }
+
+  }
+
+  if (!out_file) setup_stdio_file();
+
+  check_binary(argv[optind]);
+
+  start_time = get_cur_time();
+
+  if (qemu_mode)
+    use_argv = get_qemu_argv(argv[0], argv + optind, argc - optind);
+  else
+    use_argv = argv + optind;
+
+  perform_dry_run(use_argv);
+
+  cull_queue();
+
+  show_init_stats();
+
+  seek_to = find_start_position();
+
+  write_stats_file(0, 0, 0);
+  save_auto();
+
+  if (stop_soon) goto stop_fuzzing;
+
+  /* Woop woop woop */
+
+  if (!not_on_tty) {
+
+    sleep(4);
+    start_time += 4000;
+    if (stop_soon) goto stop_fuzzing;
+
+  }
+
+  // real start time, we reset, so this works correctly with -V
+  start_time = get_cur_time();
+
+  while (1) {
+
+    u8 skipped_fuzz;
+
+    cull_queue();
+
+    if (!queue_cur) {
+
+      ++queue_cycle;
+      current_entry = 0;
+      cur_skipped_paths = 0;
+      queue_cur = queue;
+
+      while (seek_to) {
+
+        ++current_entry;
+        --seek_to;
+        queue_cur = queue_cur->next;
+
+      }
+
+      show_stats();
+
+      if (not_on_tty) {
+
+        ACTF("Entering queue cycle %llu.", queue_cycle);
+        fflush(stdout);
+
+      }
+
+      /* If we had a full queue cycle with no new finds, try
+         recombination strategies next. */
+
+      if (queued_paths == prev_queued) {
+
+        if (use_splicing)
+          ++cycles_wo_finds;
+        else
+          use_splicing = 1;
+
+      } else
+
+        cycles_wo_finds = 0;
+
+      prev_queued = queued_paths;
+
+      if (sync_id && queue_cycle == 1 && getenv("AFL_IMPORT_FIRST"))
+        sync_fuzzers(use_argv);
+
+    }
+
+    skipped_fuzz = fuzz_one(use_argv);
+
+    if (!stop_soon && sync_id && !skipped_fuzz) {
+
+      if (!(sync_interval_cnt++ % SYNC_INTERVAL)) sync_fuzzers(use_argv);
+
+    }
+
+    if (!stop_soon && exit_1) stop_soon = 2;
+
+    if (stop_soon) break;
+
+    queue_cur = queue_cur->next;
+    ++current_entry;
+
+    if (most_time_key == 1) {
+
+      u64 cur_ms_lv = get_cur_time();
+      if (most_time * 1000 < cur_ms_lv - start_time) {
+
+        most_time_key = 2;
+        break;
+
+      }
+
+    }
+
+    if (most_execs_key == 1) {
+
+      if (most_execs <= total_execs) {
+
+        most_execs_key = 2;
+        break;
+
+      }
+
+    }
+
+  }
+
+  if (queue_cur) show_stats();
+
+  /*
+   * ATTENTION - the following 10 lines were copied from a PR to Google's afl
+   * repository - and slightly fixed.
+   * These lines have nothing to do with the purpose of original PR though.
+   * Looks like when an exit condition was completed (AFL_BENCH_JUST_ONE,
+   * AFL_EXIT_WHEN_DONE or AFL_BENCH_UNTIL_CRASH) the child and forkserver
+   * where not killed?
+   */
+  /* if we stopped programmatically, we kill the forkserver and the current
+     runner. if we stopped manually, this is done by the signal handler */
+  if (stop_soon == 2) {
+
+    if (child_pid > 0) kill(child_pid, SIGKILL);
+    if (forksrv_pid > 0) kill(forksrv_pid, SIGKILL);
+    /* Now that we've killed the forkserver, we wait for it to be able to get
+     * rusage stats. */
+    if (waitpid(forksrv_pid, NULL, 0) <= 0) { WARNF("error waitpid\n"); }
+
+  }
+
+  write_bitmap();
+  write_stats_file(0, 0, 0);
+  save_auto();
+
+stop_fuzzing:
+
+  SAYF(CURSOR_SHOW cLRD "\n\n+++ Testing aborted %s +++\n" cRST,
+       stop_soon == 2 ? "programmatically" : "by user");
+
+  if (most_time_key == 2) SAYF(cYEL "[!] " cRST "Time limit was reached\n");
+  if (most_execs_key == 2)
+    SAYF(cYEL "[!] " cRST "Execution limit was reached\n");
+
+  /* Running for more than 30 minutes but still doing first cycle? */
+
+  if (queue_cycle == 1 && get_cur_time() - start_time > 30 * 60 * 1000) {
+
+    SAYF("\n" cYEL "[!] " cRST
+         "Stopped during the first cycle, results may be incomplete.\n"
+         "    (For info on resuming, see %s/README)\n",
+         doc_path);
+
+  }
+
+  fclose(plot_file);
+  destroy_queue();
+  destroy_extras();
+  ck_free(target_path);
+  ck_free(sync_id);
+
+  alloc_report();
+
+#  ifdef USE_PYTHON
+  finalize_py();
+#  endif
+
+  OKF("We're done here. Have a nice day!\n");
+
+  exit(0);
+
+}
+
+#endif /* !AFL_LIB */
+
diff --git a/afl-gcc.c b/src/afl-gcc.c
index f6ededeb..750f9b72 100644
--- a/afl-gcc.c
+++ b/src/afl-gcc.c
@@ -43,19 +43,18 @@
 #include <stdlib.h>
 #include <string.h>
 
-static u8*  as_path;                /* Path to the AFL 'as' wrapper      */
-static u8** cc_params;              /* Parameters passed to the real CC  */
-static u32  cc_par_cnt = 1;         /* Param count, including argv0      */
-static u8   be_quiet,               /* Quiet mode                        */
-            clang_mode;             /* Invoked as afl-clang*?            */
-
+static u8*  as_path;                   /* Path to the AFL 'as' wrapper      */
+static u8** cc_params;                 /* Parameters passed to the real CC  */
+static u32  cc_par_cnt = 1;            /* Param count, including argv0      */
+static u8   be_quiet,                  /* Quiet mode                        */
+    clang_mode;                        /* Invoked as afl-clang*?            */
 
 /* Try to find our "fake" GNU assembler in AFL_PATH or at the location derived
    from argv[0]. If that fails, abort. */
 
 static void find_as(u8* argv0) {
 
-  u8 *afl_path = getenv("AFL_PATH");
+  u8* afl_path = getenv("AFL_PATH");
   u8 *slash, *tmp;
 
   if (afl_path) {
@@ -63,9 +62,11 @@ static void find_as(u8* argv0) {
     tmp = alloc_printf("%s/as", afl_path);
 
     if (!access(tmp, X_OK)) {
+
       as_path = afl_path;
       ck_free(tmp);
       return;
+
     }
 
     ck_free(tmp);
@@ -76,7 +77,7 @@ static void find_as(u8* argv0) {
 
   if (slash) {
 
-    u8 *dir;
+    u8* dir;
 
     *slash = 0;
     dir = ck_strdup(argv0);
@@ -85,9 +86,11 @@ static void find_as(u8* argv0) {
     tmp = alloc_printf("%s/afl-as", dir);
 
     if (!access(tmp, X_OK)) {
+
       as_path = dir;
       ck_free(tmp);
       return;
+
     }
 
     ck_free(tmp);
@@ -96,21 +99,22 @@ static void find_as(u8* argv0) {
   }
 
   if (!access(AFL_PATH "/as", X_OK)) {
+
     as_path = AFL_PATH;
     return;
+
   }
 
   FATAL("Unable to find AFL wrapper binary for 'as'. Please set AFL_PATH");
- 
-}
 
+}
 
 /* Copy argv to cc_params, making the necessary edits. */
 
 static void edit_params(u32 argc, char** argv) {
 
-  u8 fortify_set = 0, asan_set = 0;
-  u8 *name;
+  u8  fortify_set = 0, asan_set = 0;
+  u8* name;
 
 #if defined(__FreeBSD__) && defined(__x86_64__)
   u8 m32_set = 0;
@@ -119,7 +123,10 @@ static void edit_params(u32 argc, char** argv) {
   cc_params = ck_alloc((argc + 128) * sizeof(u8*));
 
   name = strrchr(argv[0], '/');
-  if (!name) name = argv[0]; else name++;
+  if (!name)
+    name = argv[0];
+  else
+    name++;
 
   if (!strncmp(name, "afl-clang", 9)) {
 
@@ -128,11 +135,15 @@ static void edit_params(u32 argc, char** argv) {
     setenv(CLANG_ENV_VAR, "1", 1);
 
     if (!strcmp(name, "afl-clang++")) {
+
       u8* alt_cxx = getenv("AFL_CXX");
       cc_params[0] = alt_cxx ? alt_cxx : (u8*)"clang++";
+
     } else {
+
       u8* alt_cc = getenv("AFL_CC");
       cc_params[0] = alt_cc ? alt_cc : (u8*)"clang";
+
     }
 
   } else {
@@ -145,16 +156,22 @@ static void edit_params(u32 argc, char** argv) {
 
 #ifdef __APPLE__
 
-    if (!strcmp(name, "afl-g++")) cc_params[0] = getenv("AFL_CXX");
-    else if (!strcmp(name, "afl-gcj")) cc_params[0] = getenv("AFL_GCJ");
-    else cc_params[0] = getenv("AFL_CC");
+    if (!strcmp(name, "afl-g++"))
+      cc_params[0] = getenv("AFL_CXX");
+    else if (!strcmp(name, "afl-gcj"))
+      cc_params[0] = getenv("AFL_GCJ");
+    else
+      cc_params[0] = getenv("AFL_CC");
 
     if (!cc_params[0]) {
 
       SAYF("\n" cLRD "[-] " cRST
-           "On Apple systems, 'gcc' is usually just a wrapper for clang. Please use the\n"
-           "    'afl-clang' utility instead of 'afl-gcc'. If you really have GCC installed,\n"
-           "    set AFL_CC or AFL_CXX to specify the correct path to that compiler.\n");
+           "On Apple systems, 'gcc' is usually just a wrapper for clang. "
+           "Please use the\n"
+           "    'afl-clang' utility instead of 'afl-gcc'. If you really have "
+           "GCC installed,\n"
+           "    set AFL_CC or AFL_CXX to specify the correct path to that "
+           "compiler.\n");
 
       FATAL("AFL_CC or AFL_CXX required on MacOS X");
 
@@ -163,14 +180,20 @@ static void edit_params(u32 argc, char** argv) {
 #else
 
     if (!strcmp(name, "afl-g++")) {
+
       u8* alt_cxx = getenv("AFL_CXX");
       cc_params[0] = alt_cxx ? alt_cxx : (u8*)"g++";
+
     } else if (!strcmp(name, "afl-gcj")) {
+
       u8* alt_cc = getenv("AFL_GCJ");
       cc_params[0] = alt_cc ? alt_cc : (u8*)"gcj";
+
     } else {
+
       u8* alt_cc = getenv("AFL_CC");
       cc_params[0] = alt_cc ? alt_cc : (u8*)"gcc";
+
     }
 
 #endif /* __APPLE__ */
@@ -178,13 +201,20 @@ static void edit_params(u32 argc, char** argv) {
   }
 
   while (--argc) {
+
     u8* cur = *(++argv);
 
     if (!strncmp(cur, "-B", 2)) {
 
       if (!be_quiet) WARNF("-B is already set, overriding");
 
-      if (!cur[2] && argc > 1) { argc--; argv++; }
+      if (!cur[2] && argc > 1) {
+
+        argc--;
+        argv++;
+
+      }
+
       continue;
 
     }
@@ -197,8 +227,8 @@ static void edit_params(u32 argc, char** argv) {
     if (!strcmp(cur, "-m32")) m32_set = 1;
 #endif
 
-    if (!strcmp(cur, "-fsanitize=address") ||
-        !strcmp(cur, "-fsanitize=memory")) asan_set = 1;
+    if (!strcmp(cur, "-fsanitize=address") || !strcmp(cur, "-fsanitize=memory"))
+      asan_set = 1;
 
     if (strstr(cur, "FORTIFY_SOURCE")) fortify_set = 1;
 
@@ -209,15 +239,13 @@ static void edit_params(u32 argc, char** argv) {
   cc_params[cc_par_cnt++] = "-B";
   cc_params[cc_par_cnt++] = as_path;
 
-  if (clang_mode)
-    cc_params[cc_par_cnt++] = "-no-integrated-as";
+  if (clang_mode) cc_params[cc_par_cnt++] = "-no-integrated-as";
 
   if (getenv("AFL_HARDEN")) {
 
     cc_params[cc_par_cnt++] = "-fstack-protector-all";
 
-    if (!fortify_set)
-      cc_params[cc_par_cnt++] = "-D_FORTIFY_SOURCE=2";
+    if (!fortify_set) cc_params[cc_par_cnt++] = "-D_FORTIFY_SOURCE=2";
 
   }
 
@@ -229,8 +257,7 @@ static void edit_params(u32 argc, char** argv) {
 
   } else if (getenv("AFL_USE_ASAN")) {
 
-    if (getenv("AFL_USE_MSAN"))
-      FATAL("ASAN and MSAN are mutually exclusive");
+    if (getenv("AFL_USE_MSAN")) FATAL("ASAN and MSAN are mutually exclusive");
 
     if (getenv("AFL_HARDEN"))
       FATAL("ASAN and AFL_HARDEN are mutually exclusive");
@@ -240,8 +267,7 @@ static void edit_params(u32 argc, char** argv) {
 
   } else if (getenv("AFL_USE_MSAN")) {
 
-    if (getenv("AFL_USE_ASAN"))
-      FATAL("ASAN and MSAN are mutually exclusive");
+    if (getenv("AFL_USE_ASAN")) FATAL("ASAN and MSAN are mutually exclusive");
 
     if (getenv("AFL_HARDEN"))
       FATAL("MSAN and AFL_HARDEN are mutually exclusive");
@@ -249,11 +275,10 @@ static void edit_params(u32 argc, char** argv) {
     cc_params[cc_par_cnt++] = "-U_FORTIFY_SOURCE";
     cc_params[cc_par_cnt++] = "-fsanitize=memory";
 
-
   }
 
 #ifdef USEMMAP
-    cc_params[cc_par_cnt++] = "-lrt";
+  cc_params[cc_par_cnt++] = "-lrt";
 #endif
 
   if (!getenv("AFL_DONT_OPTIMIZE")) {
@@ -264,12 +289,11 @@ static void edit_params(u32 argc, char** argv) {
        works OK. This has nothing to do with us, but let's avoid triggering
        that bug. */
 
-    if (!clang_mode || !m32_set)
-      cc_params[cc_par_cnt++] = "-g";
+    if (!clang_mode || !m32_set) cc_params[cc_par_cnt++] = "-g";
 
 #else
 
-      cc_params[cc_par_cnt++] = "-g";
+    cc_params[cc_par_cnt++] = "-g";
 
 #endif
 
@@ -300,7 +324,6 @@ static void edit_params(u32 argc, char** argv) {
 
 }
 
-
 /* Main entry point */
 
 int main(int argc, char** argv) {
@@ -308,23 +331,33 @@ int main(int argc, char** argv) {
   if (isatty(2) && !getenv("AFL_QUIET")) {
 
     SAYF(cCYA "afl-cc" VERSION cRST " by <lcamtuf@google.com>\n");
-    SAYF(cYEL "[!] " cBRI "NOTE: " cRST "afl-gcc is deprecated, llvm_mode is much faster and has more options\n");
+    SAYF(cYEL "[!] " cBRI "NOTE: " cRST
+              "afl-gcc is deprecated, llvm_mode is much faster and has more "
+              "options\n");
+
+  } else
 
-  } else be_quiet = 1;
+    be_quiet = 1;
 
   if (argc < 2) {
 
-    SAYF("\n"
-         "This is a helper application for afl-fuzz. It serves as a drop-in replacement\n"
-         "for gcc or clang, letting you recompile third-party code with the required\n"
-         "runtime instrumentation. A common use pattern would be one of the following:\n\n"
+    SAYF(
+        "\n"
+        "This is a helper application for afl-fuzz. It serves as a drop-in "
+        "replacement\n"
+        "for gcc or clang, letting you recompile third-party code with the "
+        "required\n"
+        "runtime instrumentation. A common use pattern would be one of the "
+        "following:\n\n"
 
-         "  CC=%s/afl-gcc ./configure\n"
-         "  CXX=%s/afl-g++ ./configure\n\n"
+        "  CC=%s/afl-gcc ./configure\n"
+        "  CXX=%s/afl-g++ ./configure\n\n"
 
-         "You can specify custom next-stage toolchain via AFL_CC, AFL_CXX, and AFL_AS.\n"
-         "Setting AFL_HARDEN enables hardening optimizations in the compiled code.\n\n",
-         BIN_PATH, BIN_PATH);
+        "You can specify custom next-stage toolchain via AFL_CC, AFL_CXX, and "
+        "AFL_AS.\n"
+        "Setting AFL_HARDEN enables hardening optimizations in the compiled "
+        "code.\n\n",
+        BIN_PATH, BIN_PATH);
 
     exit(1);
 
@@ -341,3 +374,4 @@ int main(int argc, char** argv) {
   return 0;
 
 }
+
diff --git a/afl-gotcpu.c b/src/afl-gotcpu.c
index 8c04b205..5aa9b35c 100644
--- a/afl-gotcpu.c
+++ b/src/afl-gotcpu.c
@@ -26,10 +26,12 @@
  */
 
 #define AFL_MAIN
-#define _GNU_SOURCE
+#ifndef _GNU_SOURCE
+#  define _GNU_SOURCE
+#endif
 
 #ifdef __ANDROID__
-  #include "android-ashmem.h"
+#  include "android-ashmem.h"
 #endif
 #include <stdio.h>
 #include <stdlib.h>
@@ -49,12 +51,11 @@
 #  define HAVE_AFFINITY 1
 #endif /* __linux__ */
 
-
 /* Get unix time in microseconds. */
 
 static u64 get_cur_time_us(void) {
 
-  struct timeval tv;
+  struct timeval  tv;
   struct timezone tz;
 
   gettimeofday(&tv, &tz);
@@ -63,7 +64,6 @@ static u64 get_cur_time_us(void) {
 
 }
 
-
 /* Get CPU usage in microseconds. */
 
 static u64 get_cpu_usage_us(void) {
@@ -77,7 +77,6 @@ static u64 get_cpu_usage_us(void) {
 
 }
 
-
 /* Measure preemption rate. */
 
 static u32 measure_preemption(u32 target_ms) {
@@ -94,14 +93,17 @@ repeat_loop:
 
   v1 = CTEST_BUSY_CYCLES;
 
-  while (v1--) v2++;
+  while (v1--)
+    v2++;
   sched_yield();
 
   en_t = get_cur_time_us();
 
   if (en_t - st_t < target_ms * 1000) {
+
     loop_repeats++;
     goto repeat_loop;
+
   }
 
   /* Let's see what percentage of this time we actually had a chance to
@@ -109,22 +111,20 @@ repeat_loop:
 
   en_c = get_cpu_usage_us();
 
-  real_delta  = (en_t - st_t) / 1000;
+  real_delta = (en_t - st_t) / 1000;
   slice_delta = (en_c - st_c) / 1000;
 
   return real_delta * 100 / slice_delta;
 
 }
 
-
 /* Do the benchmark thing. */
 
 int main(int argc, char** argv) {
 
 #ifdef HAVE_AFFINITY
 
-  u32 cpu_cnt = sysconf(_SC_NPROCESSORS_ONLN),
-      idle_cpus = 0, maybe_cpus = 0, i;
+  u32 cpu_cnt = sysconf(_SC_NPROCESSORS_ONLN), idle_cpus = 0, maybe_cpus = 0, i;
 
   SAYF(cCYA "afl-gotcpu" VERSION cRST " by <lcamtuf@google.com>\n");
 
@@ -140,7 +140,7 @@ int main(int argc, char** argv) {
     if (!fr) {
 
       cpu_set_t c;
-      u32 util_perc;
+      u32       util_perc;
 
       CPU_ZERO(&c);
       CPU_SET(i, &c);
@@ -157,7 +157,7 @@ int main(int argc, char** argv) {
 
       } else if (util_perc < 250) {
 
-        SAYF("    Core #%u: " cYEL "CAUTION " cRST "(%u%%)\n", i, util_perc); 
+        SAYF("    Core #%u: " cYEL "CAUTION " cRST "(%u%%)\n", i, util_perc);
         exit(1);
 
       }
@@ -253,3 +253,4 @@ int main(int argc, char** argv) {
 #endif /* ^HAVE_AFFINITY */
 
 }
+
diff --git a/sharedmem.c b/src/afl-sharedmem.c
index ce3b76e6..9c7ac7c3 100644
--- a/sharedmem.c
+++ b/src/afl-sharedmem.c
@@ -5,7 +5,7 @@
 #define AFL_MAIN
 
 #ifdef __ANDROID__
-  #include "android-ashmem.h"
+#  include "android-ashmem.h"
 #endif
 #include "config.h"
 #include "types.h"
@@ -32,68 +32,79 @@
 #include <sys/mman.h>
 
 #ifndef USEMMAP
- #include <sys/ipc.h>
- #include <sys/shm.h>
+#  include <sys/ipc.h>
+#  include <sys/shm.h>
 #endif
 
-extern unsigned char*trace_bits;
+extern unsigned char *trace_bits;
 
 #ifdef USEMMAP
 /* ================ Proteas ================ */
-int g_shm_fd = -1;
+int            g_shm_fd = -1;
 unsigned char *g_shm_base = NULL;
-char g_shm_file_path[L_tmpnam];
+char           g_shm_file_path[L_tmpnam];
 /* ========================================= */
 #else
-static s32 shm_id;                    /* ID of the SHM region              */
+static s32 shm_id;                     /* ID of the SHM region              */
 #endif
 
 /* Get rid of shared memory (atexit handler). */
 
 void remove_shm(void) {
+
 #ifdef USEMMAP
   if (g_shm_base != NULL) {
+
     munmap(g_shm_base, MAP_SIZE);
     g_shm_base = NULL;
+
   }
 
   if (g_shm_fd != -1) {
+
     close(g_shm_fd);
     g_shm_fd = -1;
+
   }
+
 #else
   shmctl(shm_id, IPC_RMID, NULL);
 #endif
-}
 
+}
 
 /* Configure shared memory. */
 
 void setup_shm(unsigned char dumb_mode) {
+
 #ifdef USEMMAP
   /* generate random file name for multi instance */
 
-  /* thanks to f*cking glibc we can not use tmpnam securely, it generates a security warning that cannot be suppressed */
+  /* thanks to f*cking glibc we can not use tmpnam securely, it generates a
+   * security warning that cannot be suppressed */
   /* so we do this worse workaround */
   snprintf(g_shm_file_path, L_tmpnam, "/afl_%d_%ld", getpid(), random());
 
   /* create the shared memory segment as if it was a file */
   g_shm_fd = shm_open(g_shm_file_path, O_CREAT | O_RDWR | O_EXCL, 0600);
-  if (g_shm_fd == -1) {
-    PFATAL("shm_open() failed");
-  }
+  if (g_shm_fd == -1) { PFATAL("shm_open() failed"); }
 
   /* configure the size of the shared memory segment */
   if (ftruncate(g_shm_fd, MAP_SIZE)) {
+
     PFATAL("setup_shm(): ftruncate() failed");
+
   }
 
   /* map the shared memory segment to the address space of the process */
-  g_shm_base = mmap(0, MAP_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, g_shm_fd, 0);
+  g_shm_base =
+      mmap(0, MAP_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, g_shm_fd, 0);
   if (g_shm_base == MAP_FAILED) {
+
     close(g_shm_fd);
     g_shm_fd = -1;
     PFATAL("mmap() failed");
+
   }
 
   atexit(remove_shm);
@@ -108,7 +119,7 @@ void setup_shm(unsigned char dumb_mode) {
   trace_bits = g_shm_base;
 
   if (!trace_bits) PFATAL("mmap() failed");
-  
+
 #else
   u8* shm_str;
 
@@ -132,9 +143,10 @@ void setup_shm(unsigned char dumb_mode) {
   ck_free(shm_str);
 
   trace_bits = shmat(shm_id, NULL, 0);
-  
+
   if (!trace_bits) PFATAL("shmat() failed");
 
 #endif
+
 }
 
diff --git a/afl-showmap.c b/src/afl-showmap.c
index a490bca6..ac3d687d 100644
--- a/afl-showmap.c
+++ b/src/afl-showmap.c
@@ -24,7 +24,7 @@
 #define AFL_MAIN
 
 #ifdef __ANDROID__
-  #include "android-ashmem.h"
+#  include "android-ashmem.h"
 #endif
 #include "config.h"
 #include "types.h"
@@ -32,7 +32,7 @@
 #include "alloc-inl.h"
 #include "hash.h"
 #include "sharedmem.h"
-#include "afl-common.h"
+#include "common.h"
 
 #include <stdio.h>
 #include <unistd.h>
@@ -51,61 +51,54 @@
 #include <sys/types.h>
 #include <sys/resource.h>
 
-static s32 child_pid;                 /* PID of the tested program         */
+static s32 child_pid;                  /* PID of the tested program         */
 
-       u8* trace_bits;                /* SHM with instrumentation bitmap   */
+u8* trace_bits;                        /* SHM with instrumentation bitmap   */
 
-static u8 *out_file,                  /* Trace output file                 */
-          *doc_path,                  /* Path to docs                      */
-          *target_path,               /* Path to target binary             */
-          *at_file;                   /* Substitution string for @@        */
+static u8 *out_file,                   /* Trace output file                 */
+    *doc_path,                         /* Path to docs                      */
+    *target_path,                      /* Path to target binary             */
+    *at_file;                          /* Substitution string for @@        */
 
-static u32 exec_tmout;                /* Exec timeout (ms)                 */
+static u32 exec_tmout;                 /* Exec timeout (ms)                 */
 
-static u32 total, highest;            /* tuple content information         */
+static u32 total, highest;             /* tuple content information         */
 
-static u64 mem_limit = MEM_LIMIT;     /* Memory limit (MB)                 */
+static u64 mem_limit = MEM_LIMIT;      /* Memory limit (MB)                 */
 
-static u8  quiet_mode,                /* Hide non-essential messages?      */
-           edges_only,                /* Ignore hit counts?                */
-           raw_instr_output,          /* Do not apply AFL filters          */
-           cmin_mode,                 /* Generate output in afl-cmin mode? */
-           binary_mode,               /* Write output as a binary map      */
-           keep_cores;                /* Allow coredumps?                  */
+static u8 quiet_mode,                  /* Hide non-essential messages?      */
+    edges_only,                        /* Ignore hit counts?                */
+    raw_instr_output,                  /* Do not apply AFL filters          */
+    cmin_mode,                         /* Generate output in afl-cmin mode? */
+    binary_mode,                       /* Write output as a binary map      */
+    keep_cores;                        /* Allow coredumps?                  */
 
-static volatile u8
-           stop_soon,                 /* Ctrl-C pressed?                   */
-           child_timed_out,           /* Child timed out?                  */
-           child_crashed;             /* Child crashed?                    */
+static volatile u8 stop_soon,          /* Ctrl-C pressed?                   */
+    child_timed_out,                   /* Child timed out?                  */
+    child_crashed;                     /* Child crashed?                    */
 
 /* Classify tuple counts. Instead of mapping to individual bits, as in
    afl-fuzz.c, we map to more user-friendly numbers between 1 and 8. */
 
 static const u8 count_class_human[256] = {
 
-  [0]           = 0,
-  [1]           = 1,
-  [2]           = 2,
-  [3]           = 3,
-  [4 ... 7]     = 4,
-  [8 ... 15]    = 5,
-  [16 ... 31]   = 6,
-  [32 ... 127]  = 7,
-  [128 ... 255] = 8
+    [0] = 0,          [1] = 1,        [2] = 2,         [3] = 3,
+    [4 ... 7] = 4,    [8 ... 15] = 5, [16 ... 31] = 6, [32 ... 127] = 7,
+    [128 ... 255] = 8
 
 };
 
 static const u8 count_class_binary[256] = {
 
-  [0]           = 0,
-  [1]           = 1,
-  [2]           = 2,
-  [3]           = 4,
-  [4 ... 7]     = 8,
-  [8 ... 15]    = 16,
-  [16 ... 31]   = 32,
-  [32 ... 127]  = 64,
-  [128 ... 255] = 128
+    [0] = 0,
+    [1] = 1,
+    [2] = 2,
+    [3] = 4,
+    [4 ... 7] = 8,
+    [8 ... 15] = 16,
+    [16 ... 31] = 32,
+    [32 ... 127] = 64,
+    [128 ... 255] = 128
 
 };
 
@@ -116,22 +109,25 @@ static void classify_counts(u8* mem, const u8* map) {
   if (edges_only) {
 
     while (i--) {
+
       if (*mem) *mem = 1;
       mem++;
+
     }
 
   } else if (!raw_instr_output) {
 
     while (i--) {
+
       *mem = map[*mem];
       mem++;
+
     }
 
   }
 
 }
 
-
 /* Write results. */
 
 static u32 write_results(void) {
@@ -139,8 +135,8 @@ static u32 write_results(void) {
   s32 fd;
   u32 i, ret = 0;
 
-  u8  cco = !!getenv("AFL_CMIN_CRASHES_ONLY"),
-      caa = !!getenv("AFL_CMIN_ALLOW_ANY");
+  u8 cco = !!getenv("AFL_CMIN_CRASHES_ONLY"),
+     caa = !!getenv("AFL_CMIN_ALLOW_ANY");
 
   if (!strncmp(out_file, "/dev/", 5)) {
 
@@ -154,7 +150,7 @@ static u32 write_results(void) {
 
   } else {
 
-    unlink(out_file); /* Ignore errors */
+    unlink(out_file);                                      /* Ignore errors */
     fd = open(out_file, O_WRONLY | O_CREAT | O_EXCL, 0600);
     if (fd < 0) PFATAL("Unable to create '%s'", out_file);
 
@@ -164,7 +160,7 @@ static u32 write_results(void) {
 
     for (i = 0; i < MAP_SIZE; i++)
       if (trace_bits[i]) ret++;
-    
+
     ck_write(fd, trace_bits, MAP_SIZE, out_file);
     close(fd);
 
@@ -178,10 +174,9 @@ static u32 write_results(void) {
 
       if (!trace_bits[i]) continue;
       ret++;
-      
+
       total += trace_bits[i];
-      if (highest < trace_bits[i])
-        highest = trace_bits[i];
+      if (highest < trace_bits[i]) highest = trace_bits[i];
 
       if (cmin_mode) {
 
@@ -190,10 +185,12 @@ static u32 write_results(void) {
 
         fprintf(f, "%u%u\n", trace_bits[i], i);
 
-      } else fprintf(f, "%06u:%u\n", i, trace_bits[i]);
+      } else
+
+        fprintf(f, "%06u:%u\n", i, trace_bits[i]);
 
     }
-  
+
     fclose(f);
 
   }
@@ -202,7 +199,6 @@ static u32 write_results(void) {
 
 }
 
-
 /* Handle timeout signal. */
 
 static void handle_timeout(int sig) {
@@ -212,16 +208,14 @@ static void handle_timeout(int sig) {
 
 }
 
-
 /* Execute target application. */
 
 static void run_target(char** argv) {
 
   static struct itimerval it;
-  int status = 0;
+  int                     status = 0;
 
-  if (!quiet_mode)
-    SAYF("-- Program output begins --\n" cRST);
+  if (!quiet_mode) SAYF("-- Program output begins --\n" cRST);
 
   MEM_BARRIER();
 
@@ -238,8 +232,10 @@ static void run_target(char** argv) {
       s32 fd = open("/dev/null", O_RDWR);
 
       if (fd < 0 || dup2(fd, 1) < 0 || dup2(fd, 2) < 0) {
+
         *(u32*)trace_bits = EXEC_FAIL_SIG;
         PFATAL("Descriptor initialization failed");
+
       }
 
       close(fd);
@@ -252,20 +248,22 @@ static void run_target(char** argv) {
 
 #ifdef RLIMIT_AS
 
-      setrlimit(RLIMIT_AS, &r); /* Ignore errors */
+      setrlimit(RLIMIT_AS, &r);                            /* Ignore errors */
 
 #else
 
-      setrlimit(RLIMIT_DATA, &r); /* Ignore errors */
+      setrlimit(RLIMIT_DATA, &r);                          /* Ignore errors */
 
 #endif /* ^RLIMIT_AS */
 
     }
 
-    if (!keep_cores) r.rlim_max = r.rlim_cur = 0;
-    else r.rlim_max = r.rlim_cur = RLIM_INFINITY;
+    if (!keep_cores)
+      r.rlim_max = r.rlim_cur = 0;
+    else
+      r.rlim_max = r.rlim_cur = RLIM_INFINITY;
 
-    setrlimit(RLIMIT_CORE, &r); /* Ignore errors */
+    setrlimit(RLIMIT_CORE, &r);                            /* Ignore errors */
 
     if (!getenv("LD_BIND_LAZY")) setenv("LD_BIND_NOW", "1", 0);
 
@@ -304,14 +302,12 @@ static void run_target(char** argv) {
   if (*(u32*)trace_bits == EXEC_FAIL_SIG)
     FATAL("Unable to execute '%s'", argv[0]);
 
-  classify_counts(trace_bits, binary_mode ?
-                  count_class_binary : count_class_human);
+  classify_counts(trace_bits,
+                  binary_mode ? count_class_binary : count_class_human);
 
-  if (!quiet_mode)
-    SAYF(cRST "-- Program output ends --\n");
+  if (!quiet_mode) SAYF(cRST "-- Program output ends --\n");
 
-  if (!child_timed_out && !stop_soon && WIFSIGNALED(status))
-    child_crashed = 1;
+  if (!child_timed_out && !stop_soon && WIFSIGNALED(status)) child_crashed = 1;
 
   if (!quiet_mode) {
 
@@ -320,14 +316,13 @@ static void run_target(char** argv) {
     else if (stop_soon)
       SAYF(cLRD "\n+++ Program aborted by user +++\n" cRST);
     else if (child_crashed)
-      SAYF(cLRD "\n+++ Program killed by signal %u +++\n" cRST, WTERMSIG(status));
+      SAYF(cLRD "\n+++ Program killed by signal %u +++\n" cRST,
+           WTERMSIG(status));
 
   }
 
-
 }
 
-
 /* Handle Ctrl-C and the like. */
 
 static void handle_stop_sig(int sig) {
@@ -338,15 +333,16 @@ static void handle_stop_sig(int sig) {
 
 }
 
-
 /* Do basic preparations - persistent fds, filenames, etc. */
 
 static void set_up_environment(void) {
 
-  setenv("ASAN_OPTIONS", "abort_on_error=1:"
-                         "detect_leaks=0:"
-                         "symbolize=0:"
-                         "allocator_may_return_null=1", 0);
+  setenv("ASAN_OPTIONS",
+         "abort_on_error=1:"
+         "detect_leaks=0:"
+         "symbolize=0:"
+         "allocator_may_return_null=1",
+         0);
 
   setenv("MSAN_OPTIONS", "exit_code=" STRINGIFY(MSAN_ERROR) ":"
                          "symbolize=0:"
@@ -355,21 +351,22 @@ static void set_up_environment(void) {
                          "msan_track_origins=0", 0);
 
   if (getenv("AFL_PRELOAD")) {
+
     setenv("LD_PRELOAD", getenv("AFL_PRELOAD"), 1);
     setenv("DYLD_INSERT_LIBRARIES", getenv("AFL_PRELOAD"), 1);
+
   }
 
 }
 
-
 /* Setup signal handlers, duh. */
 
 static void setup_signal_handlers(void) {
 
   struct sigaction sa;
 
-  sa.sa_handler   = NULL;
-  sa.sa_flags     = SA_RESTART;
+  sa.sa_handler = NULL;
+  sa.sa_flags = SA_RESTART;
   sa.sa_sigaction = NULL;
 
   sigemptyset(&sa.sa_mask);
@@ -388,7 +385,6 @@ static void setup_signal_handlers(void) {
 
 }
 
-
 /* Show banner. */
 
 static void show_banner(void) {
@@ -403,42 +399,43 @@ static void usage(u8* argv0) {
 
   show_banner();
 
-  SAYF("\n%s [ options ] -- /path/to/target_app [ ... ]\n\n"
+  SAYF(
+      "\n%s [ options ] -- /path/to/target_app [ ... ]\n\n"
 
-       "Required parameters:\n\n"
+      "Required parameters:\n\n"
 
-       "  -o file       - file to write the trace data to\n\n"
+      "  -o file       - file to write the trace data to\n\n"
 
-       "Execution control settings:\n\n"
+      "Execution control settings:\n\n"
 
-       "  -t msec       - timeout for each run (none)\n"
-       "  -m megs       - memory limit for child process (%u MB)\n"
-       "  -Q            - use binary-only instrumentation (QEMU mode)\n"
-       "  -U            - use Unicorn-based instrumentation (Unicorn mode)\n"
-       "                  (Not necessary, here for consistency with other afl-* tools)\n\n"  
+      "  -t msec       - timeout for each run (none)\n"
+      "  -m megs       - memory limit for child process (%d MB)\n"
+      "  -Q            - use binary-only instrumentation (QEMU mode)\n"
+      "  -U            - use Unicorn-based instrumentation (Unicorn mode)\n"
+      "                  (Not necessary, here for consistency with other afl-* "
+      "tools)\n\n"
 
-       "Other settings:\n\n"
+      "Other settings:\n\n"
 
-       "  -q            - sink program's output and don't show messages\n"
-       "  -e            - show edge coverage only, ignore hit counts\n"
-       "  -r            - show real tuple values instead of AFL filter values\n"
-       "  -c            - allow core dumps\n\n"
+      "  -q            - sink program's output and don't show messages\n"
+      "  -e            - show edge coverage only, ignore hit counts\n"
+      "  -r            - show real tuple values instead of AFL filter values\n"
+      "  -c            - allow core dumps\n\n"
 
-       "This tool displays raw tuple data captured by AFL instrumentation.\n"
-       "For additional help, consult %s/README.\n\n" cRST,
+      "This tool displays raw tuple data captured by AFL instrumentation.\n"
+      "For additional help, consult %s/README.\n\n" cRST,
 
-       argv0, MEM_LIMIT, doc_path);
+      argv0, MEM_LIMIT, doc_path);
 
   exit(1);
 
 }
 
-
 /* Find binary. */
 
 static void find_binary(u8* fname) {
 
-  u8* env_path = 0;
+  u8*         env_path = 0;
   struct stat st;
 
   if (strchr(fname, '/') || !(env_path = getenv("PATH"))) {
@@ -461,7 +458,9 @@ static void find_binary(u8* fname) {
         memcpy(cur_elem, env_path, delim - env_path);
         delim++;
 
-      } else cur_elem = ck_strdup(env_path);
+      } else
+
+        cur_elem = ck_strdup(env_path);
 
       env_path = delim;
 
@@ -473,7 +472,8 @@ static void find_binary(u8* fname) {
       ck_free(cur_elem);
 
       if (!stat(target_path, &st) && S_ISREG(st.st_mode) &&
-          (st.st_mode & 0111) && st.st_size >= 4) break;
+          (st.st_mode & 0111) && st.st_size >= 4)
+        break;
 
       ck_free(target_path);
       target_path = 0;
@@ -486,13 +486,12 @@ static void find_binary(u8* fname) {
 
 }
 
-
 /* Fix up argv for QEMU. */
 
 static char** get_qemu_argv(u8* own_loc, char** argv, int argc) {
 
   char** new_argv = ck_alloc(sizeof(char*) * (argc + 4));
-  u8 *tmp, *cp, *rsl, *own_copy;
+  u8 *   tmp, *cp, *rsl, *own_copy;
 
   memcpy(new_argv + 3, argv + 1, sizeof(char*) * argc);
 
@@ -507,8 +506,7 @@ static char** get_qemu_argv(u8* own_loc, char** argv, int argc) {
 
     cp = alloc_printf("%s/afl-qemu-trace", tmp);
 
-    if (access(cp, X_OK))
-      FATAL("Unable to find '%s'", tmp);
+    if (access(cp, X_OK)) FATAL("Unable to find '%s'", tmp);
 
     target_path = new_argv[0] = cp;
     return new_argv;
@@ -532,7 +530,9 @@ static char** get_qemu_argv(u8* own_loc, char** argv, int argc) {
 
     }
 
-  } else ck_free(own_copy);
+  } else
+
+    ck_free(own_copy);
 
   if (!access(BIN_PATH "/afl-qemu-trace", X_OK)) {
 
@@ -556,7 +556,7 @@ int main(int argc, char** argv) {
 
   doc_path = access(DOC_PATH, F_OK) ? "docs" : DOC_PATH;
 
-  while ((opt = getopt(argc,argv,"+o:m:t:A:eqZQUbcr")) > 0)
+  while ((opt = getopt(argc, argv, "+o:m:t:A:eqZQUbcr")) > 0)
 
     switch (opt) {
 
@@ -568,40 +568,41 @@ int main(int argc, char** argv) {
 
       case 'm': {
 
-          u8 suffix = 'M';
+        u8 suffix = 'M';
 
-          if (mem_limit_given) FATAL("Multiple -m options not supported");
-          mem_limit_given = 1;
+        if (mem_limit_given) FATAL("Multiple -m options not supported");
+        mem_limit_given = 1;
 
-          if (!strcmp(optarg, "none")) {
+        if (!strcmp(optarg, "none")) {
 
-            mem_limit = 0;
-            break;
+          mem_limit = 0;
+          break;
 
-          }
+        }
 
-          if (sscanf(optarg, "%llu%c", &mem_limit, &suffix) < 1 ||
-              optarg[0] == '-') FATAL("Bad syntax used for -m");
+        if (sscanf(optarg, "%llu%c", &mem_limit, &suffix) < 1 ||
+            optarg[0] == '-')
+          FATAL("Bad syntax used for -m");
 
-          switch (suffix) {
+        switch (suffix) {
 
-            case 'T': mem_limit *= 1024 * 1024; break;
-            case 'G': mem_limit *= 1024; break;
-            case 'k': mem_limit /= 1024; break;
-            case 'M': break;
+          case 'T': mem_limit *= 1024 * 1024; break;
+          case 'G': mem_limit *= 1024; break;
+          case 'k': mem_limit /= 1024; break;
+          case 'M': break;
 
-            default:  FATAL("Unsupported suffix or bad syntax for -m");
+          default: FATAL("Unsupported suffix or bad syntax for -m");
 
-          }
+        }
 
-          if (mem_limit < 5) FATAL("Dangerously low value of -m");
+        if (mem_limit < 5) FATAL("Dangerously low value of -m");
 
-          if (sizeof(rlim_t) == 4 && mem_limit > 2000)
-            FATAL("Value of -m out of range on 32-bit systems");
+        if (sizeof(rlim_t) == 4 && mem_limit > 2000)
+          FATAL("Value of -m out of range on 32-bit systems");
 
-        }
+      }
 
-        break;
+      break;
 
       case 't':
 
@@ -609,6 +610,7 @@ int main(int argc, char** argv) {
         timeout_given = 1;
 
         if (strcmp(optarg, "none")) {
+
           exec_tmout = atoi(optarg);
 
           if (exec_tmout < 20 || optarg[0] == '-')
@@ -636,7 +638,7 @@ int main(int argc, char** argv) {
         /* This is an undocumented option to write data in the syntax expected
            by afl-cmin. Nobody else should have any use for this. */
 
-        cmin_mode  = 1;
+        cmin_mode = 1;
         quiet_mode = 1;
         break;
 
@@ -675,7 +677,7 @@ int main(int argc, char** argv) {
         if (keep_cores) FATAL("Multiple -c options not supported");
         keep_cores = 1;
         break;
-      
+
       case 'r':
 
         if (raw_instr_output) FATAL("Multiple -r options not supported");
@@ -683,9 +685,7 @@ int main(int argc, char** argv) {
         raw_instr_output = 1;
         break;
 
-      default:
-
-        usage(argv[0]);
+      default: usage(argv[0]);
 
     }
 
@@ -699,8 +699,10 @@ int main(int argc, char** argv) {
   find_binary(argv[optind]);
 
   if (!quiet_mode) {
+
     show_banner();
     ACTF("Executing '%s'...\n", target_path);
+
   }
 
   detect_file_args(argv + optind, at_file);
@@ -717,7 +719,8 @@ int main(int argc, char** argv) {
   if (!quiet_mode) {
 
     if (!tcnt) FATAL("No instrumentation detected" cRST);
-    OKF("Captured %u tuples (highest value %u, total values %u) in '%s'." cRST, tcnt, highest, total, out_file);
+    OKF("Captured %u tuples (highest value %u, total values %u) in '%s'." cRST,
+        tcnt, highest, total, out_file);
 
   }
 
diff --git a/afl-tmin.c b/src/afl-tmin.c
index a36acd10..9decdb4d 100644
--- a/afl-tmin.c
+++ b/src/afl-tmin.c
@@ -22,15 +22,17 @@
 #define AFL_MAIN
 
 #ifdef __ANDROID__
-  #include "android-ashmem.h"
+#  include "android-ashmem.h"
 #endif
+
 #include "config.h"
 #include "types.h"
 #include "debug.h"
 #include "alloc-inl.h"
 #include "hash.h"
+#include "forkserver.h"
 #include "sharedmem.h"
-#include "afl-common.h"
+#include "common.h"
 
 #include <stdio.h>
 #include <unistd.h>
@@ -49,61 +51,71 @@
 #include <sys/types.h>
 #include <sys/resource.h>
 
-static s32 forksrv_pid,               /* PID of the fork server           */
-           child_pid;                 /* PID of the tested program        */
+s32 forksrv_pid,                        /* PID of the fork server           */
+    child_pid;                          /* PID of the tested program        */
 
-static s32 fsrv_ctl_fd,               /* Fork server control pipe (write) */
-           fsrv_st_fd;                /* Fork server status pipe (read)   */
+s32 fsrv_ctl_fd,                        /* Fork server control pipe (write) */
+    fsrv_st_fd;                         /* Fork server status pipe (read)   */
 
-       u8 *trace_bits;                /* SHM with instrumentation bitmap   */
-static u8 *mask_bitmap;               /* Mask for trace bits (-B)          */
+u8*        trace_bits;                 /* SHM with instrumentation bitmap   */
+static u8* mask_bitmap;                /* Mask for trace bits (-B)          */
 
-static u8 *in_file,                   /* Minimizer input test case         */
-          *out_file,                  /* Minimizer output file             */
-          *prog_in,                   /* Targeted program input file       */
-          *target_path,               /* Path to target binary             */
-          *doc_path;                  /* Path to docs                      */
+u8 *in_file,                           /* Minimizer input test case         */
+    *output_file,                      /* Minimizer output file             */
+    *out_file,                         /* Targeted program input file       */
+    *target_path,                      /* Path to target binary             */
+    *doc_path;                         /* Path to docs                      */
 
-static s32 prog_in_fd;                /* Persistent fd for prog_in         */
+s32 out_fd;                           /* Persistent fd for out_file         */
 
-static u8* in_data;                   /* Input data for trimming           */
+static u8* in_data;                    /* Input data for trimming           */
 
-static u32 in_len,                    /* Input data length                 */
-           orig_cksum,                /* Original checksum                 */
-           total_execs,               /* Total number of execs             */
-           missed_hangs,              /* Misses due to hangs               */
-           missed_crashes,            /* Misses due to crashes             */
-           missed_paths,              /* Misses due to exec path diffs     */
-           exec_tmout = EXEC_TIMEOUT; /* Exec timeout (ms)                 */
+static u32 in_len,                     /* Input data length                 */
+    orig_cksum,                        /* Original checksum                 */
+    total_execs,                       /* Total number of execs             */
+    missed_hangs,                      /* Misses due to hangs               */
+    missed_crashes,                    /* Misses due to crashes             */
+    missed_paths;                      /* Misses due to exec path diffs     */
+u32 exec_tmout = EXEC_TIMEOUT;         /* Exec timeout (ms)                 */
 
-static u64 mem_limit = MEM_LIMIT;     /* Memory limit (MB)                 */
+u64 mem_limit = MEM_LIMIT;             /* Memory limit (MB)                 */
 
-static s32 dev_null_fd = -1;          /* FD to /dev/null                   */
+s32 dev_null_fd = -1;                  /* FD to /dev/null                   */
 
-static u8  crash_mode,                /* Crash-centric mode?               */
-           exit_crash,                /* Treat non-zero exit as crash?     */
-           edges_only,                /* Ignore hit counts?                */
-           exact_mode,                /* Require path match for crashes?   */
-           use_stdin = 1;             /* Use stdin for program input?      */
+static u8 crash_mode,                  /* Crash-centric mode?               */
+    exit_crash,                        /* Treat non-zero exit as crash?     */
+    edges_only,                        /* Ignore hit counts?                */
+    exact_mode,                        /* Require path match for crashes?   */
+    use_stdin = 1;                     /* Use stdin for program input?      */
 
-static volatile u8
-           stop_soon,                 /* Ctrl-C pressed?                   */
-           child_timed_out;           /* Child timed out?                  */
+static volatile u8 stop_soon;          /* Ctrl-C pressed?                   */
 
+/*
+ * forkserver section
+ */
 
-/* Classify tuple counts. This is a slow & naive version, but good enough here. */
+/* we only need this to use afl-forkserver */
+FILE* plot_file;
+u8    uses_asan;
+s32   out_fd = -1, out_dir_fd = -1, dev_urandom_fd = -1;
+
+/* we import this as we need this information */
+extern u8 child_timed_out;
+
+/* Classify tuple counts. This is a slow & naive version, but good enough here.
+ */
 
 static const u8 count_class_lookup[256] = {
 
-  [0]           = 0,
-  [1]           = 1,
-  [2]           = 2,
-  [3]           = 4,
-  [4 ... 7]     = 8,
-  [8 ... 15]    = 16,
-  [16 ... 31]   = 32,
-  [32 ... 127]  = 64,
-  [128 ... 255] = 128
+    [0] = 0,
+    [1] = 1,
+    [2] = 2,
+    [3] = 4,
+    [4 ... 7] = 8,
+    [8 ... 15] = 16,
+    [16 ... 31] = 32,
+    [32 ... 127] = 64,
+    [128 ... 255] = 128
 
 };
 
@@ -114,22 +126,25 @@ static void classify_counts(u8* mem) {
   if (edges_only) {
 
     while (i--) {
+
       if (*mem) *mem = 1;
       mem++;
+
     }
 
   } else {
 
     while (i--) {
+
       *mem = count_class_lookup[*mem];
       mem++;
+
     }
 
   }
 
 }
 
-
 /* Apply mask to classified bitmap (if set). */
 
 static void apply_mask(u32* mem, u32* mask) {
@@ -148,25 +163,26 @@ static void apply_mask(u32* mem, u32* mask) {
 
 }
 
-
 /* See if any bytes are set in the bitmap. */
 
 static inline u8 anything_set(void) {
 
   u32* ptr = (u32*)trace_bits;
-  u32  i   = (MAP_SIZE >> 2);
+  u32  i = (MAP_SIZE >> 2);
 
-  while (i--) if (*(ptr++)) return 1;
+  while (i--)
+    if (*(ptr++)) return 1;
 
   return 0;
 
 }
 
-
 /* Get rid of temp files (atexit handler). */
 
 static void at_exit_handler(void) {
-  if (prog_in) unlink(prog_in); /* Ignore errors */
+
+  if (out_file) unlink(out_file);                          /* Ignore errors */
+
 }
 
 /* Read initial file. */
@@ -174,17 +190,16 @@ static void at_exit_handler(void) {
 static void read_initial_file(void) {
 
   struct stat st;
-  s32 fd = open(in_file, O_RDONLY);
+  s32         fd = open(in_file, O_RDONLY);
 
   if (fd < 0) PFATAL("Unable to open '%s'", in_file);
 
-  if (fstat(fd, &st) || !st.st_size)
-    FATAL("Zero-sized input file.");
+  if (fstat(fd, &st) || !st.st_size) FATAL("Zero-sized input file.");
 
   if (st.st_size >= TMIN_MAX_FILE)
     FATAL("Input file is too large (%u MB max)", TMIN_MAX_FILE / 1024 / 1024);
 
-  in_len  = st.st_size;
+  in_len = st.st_size;
   in_data = ck_alloc_nozero(in_len);
 
   ck_read(fd, in_data, in_len, in_file);
@@ -195,14 +210,13 @@ static void read_initial_file(void) {
 
 }
 
-
 /* Write output file. */
 
 static s32 write_to_file(u8* path, u8* mem, u32 len) {
 
   s32 ret;
 
-  unlink(path); /* Ignore errors */
+  unlink(path);                                            /* Ignore errors */
 
   ret = open(path, O_RDWR | O_CREAT | O_EXCL, 0600);
 
@@ -217,38 +231,40 @@ static s32 write_to_file(u8* path, u8* mem, u32 len) {
 }
 
 /* Write modified data to file for testing. If use_stdin is clear, the old file
-   is unlinked and a new one is created. Otherwise, prog_in_fd is rewound and
+   is unlinked and a new one is created. Otherwise, out_fd is rewound and
    truncated. */
 
 static void write_to_testcase(void* mem, u32 len) {
 
-  s32 fd = prog_in_fd;
+  s32 fd = out_fd;
 
   if (!use_stdin) {
 
-    unlink(prog_in); /* Ignore errors. */
+    unlink(out_file);                                     /* Ignore errors. */
 
-    fd = open(prog_in, O_WRONLY | O_CREAT | O_EXCL, 0600);
+    fd = open(out_file, O_WRONLY | O_CREAT | O_EXCL, 0600);
 
-    if (fd < 0) PFATAL("Unable to create '%s'", prog_in);
+    if (fd < 0) PFATAL("Unable to create '%s'", out_file);
 
-  } else lseek(fd, 0, SEEK_SET);
+  } else
 
-  ck_write(fd, mem, len, prog_in);
+    lseek(fd, 0, SEEK_SET);
+
+  ck_write(fd, mem, len, out_file);
 
   if (use_stdin) {
 
     if (ftruncate(fd, len)) PFATAL("ftruncate() failed");
     lseek(fd, 0, SEEK_SET);
 
-  } else close(fd);
-
-}
+  } else
 
+    close(fd);
 
+}
 
 /* Handle timeout signal. */
-
+/*
 static void handle_timeout(int sig) {
 
   if (child_pid > 0) {
@@ -265,8 +281,12 @@ static void handle_timeout(int sig) {
 
 }
 
+*/
+
 /* start the app and it's forkserver */
+/*
 static void init_forkserver(char **argv) {
+
   static struct itimerval it;
   int st_pipe[2], ctl_pipe[2];
   int status = 0;
@@ -283,7 +303,7 @@ static void init_forkserver(char **argv) {
 
     struct rlimit r;
 
-    if (dup2(use_stdin ? prog_in_fd : dev_null_fd, 0) < 0 ||
+    if (dup2(use_stdin ? out_fd : dev_null_fd, 0) < 0 ||
         dup2(dev_null_fd, 1) < 0 ||
         dup2(dev_null_fd, 2) < 0) {
 
@@ -293,7 +313,7 @@ static void init_forkserver(char **argv) {
     }
 
     close(dev_null_fd);
-    close(prog_in_fd);
+    close(out_fd);
 
     setsid();
 
@@ -303,20 +323,20 @@ static void init_forkserver(char **argv) {
 
 #ifdef RLIMIT_AS
 
-      setrlimit(RLIMIT_AS, &r); /* Ignore errors */
+      setrlimit(RLIMIT_AS, &r); // Ignore errors
 
 #else
 
-      setrlimit(RLIMIT_DATA, &r); /* Ignore errors */
+      setrlimit(RLIMIT_DATA, &r); // Ignore errors
 
-#endif /* ^RLIMIT_AS */
+#endif // ^RLIMIT_AS
 
     }
 
     r.rlim_max = r.rlim_cur = 0;
-    setrlimit(RLIMIT_CORE, &r); /* Ignore errors */
+    setrlimit(RLIMIT_CORE, &r); // Ignore errors
 
-    /* Set up control and status pipes, close the unneeded original fds. */
+    // Set up control and status pipes, close the unneeded original fds.
 
     if (dup2(ctl_pipe[0], FORKSRV_FD) < 0) PFATAL("dup2() failed");
     if (dup2(st_pipe[1], FORKSRV_FD + 1) < 0) PFATAL("dup2() failed");
@@ -333,7 +353,7 @@ static void init_forkserver(char **argv) {
 
   }
 
-  /* Close the unneeded endpoints. */
+  // Close the unneeded endpoints.
 
   close(ctl_pipe[0]);
   close(st_pipe[1]);
@@ -341,7 +361,7 @@ static void init_forkserver(char **argv) {
   fsrv_ctl_fd = ctl_pipe[1];
   fsrv_st_fd  = st_pipe[0];
 
-  /* Configure timeout, wait for child, cancel timeout. */
+  // Configure timeout, wait for child, cancel timeout.
 
   if (exec_tmout) {
 
@@ -359,12 +379,14 @@ static void init_forkserver(char **argv) {
   it.it_value.tv_usec = 0;
   setitimer(ITIMER_REAL, &it, NULL);
 
-  /* If we have a four-byte "hello" message from the server, we're all set.
-     Otherwise, try to figure out what went wrong. */
+  // If we have a four-byte "hello" message from the server, we're all set.
+  // Otherwise, try to figure out what went wrong.
 
   if (rlen == 4) {
+
     ACTF("All right - fork server is up.");
     return;
+
   }
 
   if (waitpid(forksrv_pid, &status, 0) <= 0)
@@ -384,6 +406,7 @@ static void init_forkserver(char **argv) {
 
 }
 
+*/
 
 /* Execute target application. Returns 0 if the changes are a dud, or
    1 if they should be kept. */
@@ -391,8 +414,8 @@ static void init_forkserver(char **argv) {
 static u8 run_target(char** argv, u8* mem, u32 len, u8 first_run) {
 
   static struct itimerval it;
-  static u32 prev_timed_out = 0;
-  int status = 0;
+  static u32              prev_timed_out = 0;
+  int                     status = 0;
 
   u32 cksum;
 
@@ -426,9 +449,8 @@ static u8 run_target(char** argv, u8* mem, u32 len, u8 first_run) {
 
   if (exec_tmout) {
 
-  child_timed_out = 0;
-  it.it_value.tv_sec = (exec_tmout / 1000);
-  it.it_value.tv_usec = (exec_tmout % 1000) * 1000;
+    it.it_value.tv_sec = (exec_tmout / 1000);
+    it.it_value.tv_usec = (exec_tmout % 1000) * 1000;
 
   }
 
@@ -461,7 +483,7 @@ static u8 run_target(char** argv, u8* mem, u32 len, u8 first_run) {
   if (stop_soon) {
 
     SAYF(cRST cLRD "\n+++ Minimization aborted by user +++\n" cRST);
-    close(write_to_file(out_file, in_data, in_len));
+    close(write_to_file(output_file, in_data, in_len));
     exit(1);
 
   }
@@ -496,9 +518,9 @@ static u8 run_target(char** argv, u8* mem, u32 len, u8 first_run) {
 
   } else
 
-  /* Handle non-crashing inputs appropriately. */
+      /* Handle non-crashing inputs appropriately. */
 
-  if (crash_mode) {
+      if (crash_mode) {
 
     missed_paths++;
     return 0;
@@ -510,24 +532,23 @@ static u8 run_target(char** argv, u8* mem, u32 len, u8 first_run) {
   if (first_run) orig_cksum = cksum;
 
   if (orig_cksum == cksum) return 1;
-  
+
   missed_paths++;
   return 0;
 
 }
 
-
 /* Find first power of two greater or equal to val. */
 
 static u32 next_p2(u32 val) {
 
   u32 ret = 1;
-  while (val > ret) ret <<= 1;
+  while (val > ret)
+    ret <<= 1;
   return ret;
 
 }
 
-
 /* Actually minimize! */
 
 static void minimize(char** argv) {
@@ -545,8 +566,8 @@ static void minimize(char** argv) {
    * BLOCK NORMALIZATION *
    ***********************/
 
-  set_len    = next_p2(in_len / TMIN_SET_STEPS);
-  set_pos    = 0;
+  set_len = next_p2(in_len / TMIN_SET_STEPS);
+  set_pos = 0;
 
   if (set_len < TMIN_SET_MIN_SIZE) set_len = TMIN_SET_MIN_SIZE;
 
@@ -554,7 +575,6 @@ static void minimize(char** argv) {
 
   while (set_pos < in_len) {
 
-    u8  res;
     u32 use_len = MIN(set_len, in_len - set_pos);
 
     for (i = 0; i < use_len; i++)
@@ -564,13 +584,14 @@ static void minimize(char** argv) {
 
       memcpy(tmp_buf, in_data, in_len);
       memset(tmp_buf + set_pos, '0', use_len);
-  
+
+      u8 res;
       res = run_target(argv, tmp_buf, in_len, 0);
 
       if (res) {
 
         memset(in_data + set_pos, '0', use_len);
-        changed_any = 1;
+        /*        changed_any = 1; value is not used */
         alpha_del0 += use_len;
 
       }
@@ -603,11 +624,11 @@ next_pass:
 next_del_blksize:
 
   if (!del_len) del_len = 1;
-  del_pos  = 0;
+  del_pos = 0;
   prev_del = 1;
 
-  SAYF(cGRA "    Block length = %u, remaining size = %u\n" cRST,
-       del_len, in_len);
+  SAYF(cGRA "    Block length = %u, remaining size = %u\n" cRST, del_len,
+       in_len);
 
   while (del_pos < in_len) {
 
@@ -622,8 +643,8 @@ next_del_blksize:
        very end of the buffer (tail_len > 0), and the current block is the same
        as the previous one... skip this step as a no-op. */
 
-    if (!prev_del && tail_len && !memcmp(in_data + del_pos - del_len,
-        in_data + del_pos, del_len)) {
+    if (!prev_del && tail_len &&
+        !memcmp(in_data + del_pos - del_len, in_data + del_pos, del_len)) {
 
       del_pos += del_len;
       continue;
@@ -644,11 +665,13 @@ next_del_blksize:
 
       memcpy(in_data, tmp_buf, del_pos + tail_len);
       prev_del = 1;
-      in_len   = del_pos + tail_len;
+      in_len = del_pos + tail_len;
 
       changed_any = 1;
 
-    } else del_pos += del_len;
+    } else
+
+      del_pos += del_len;
 
   }
 
@@ -662,7 +685,8 @@ next_del_blksize:
   OKF("Block removal complete, %u bytes deleted.", stage_o_len - in_len);
 
   if (!in_len && changed_any)
-    WARNF(cLRD "Down to zero bytes - check the command line and mem limit!" cRST);
+    WARNF(cLRD
+          "Down to zero bytes - check the command line and mem limit!" cRST);
 
   if (cur_pass > 1 && !changed_any) goto finalize_all;
 
@@ -670,15 +694,17 @@ next_del_blksize:
    * ALPHABET MINIMIZATION *
    *************************/
 
-  alpha_size   = 0;
-  alpha_del1   = 0;
+  alpha_size = 0;
+  alpha_del1 = 0;
   syms_removed = 0;
 
   memset(alpha_map, 0, sizeof(alpha_map));
 
   for (i = 0; i < in_len; i++) {
+
     if (!alpha_map[in_data[i]]) alpha_size++;
     alpha_map[in_data[i]]++;
+
   }
 
   ACTF(cBRI "Stage #2: " cRST "Minimizing symbols (%u code point%s)...",
@@ -687,14 +713,14 @@ next_del_blksize:
   for (i = 0; i < 256; i++) {
 
     u32 r;
-    u8 res;
+    u8  res;
 
     if (i == '0' || !alpha_map[i]) continue;
 
     memcpy(tmp_buf, in_data, in_len);
 
     for (r = 0; r < in_len; r++)
-      if (tmp_buf[r] == i) tmp_buf[r] = '0'; 
+      if (tmp_buf[r] == i) tmp_buf[r] = '0';
 
     res = run_target(argv, tmp_buf, in_len, 0);
 
@@ -712,8 +738,8 @@ next_del_blksize:
   alpha_d_total += alpha_del1;
 
   OKF("Symbol minimization finished, %u symbol%s (%u byte%s) replaced.",
-      syms_removed, syms_removed == 1 ? "" : "s",
-      alpha_del1, alpha_del1 == 1 ? "" : "s");
+      syms_removed, syms_removed == 1 ? "" : "s", alpha_del1,
+      alpha_del1 == 1 ? "" : "s");
 
   /**************************
    * CHARACTER MINIMIZATION *
@@ -740,36 +766,34 @@ next_del_blksize:
       alpha_del2++;
       changed_any = 1;
 
-    } else tmp_buf[i] = orig;
+    } else
+
+      tmp_buf[i] = orig;
 
   }
 
   alpha_d_total += alpha_del2;
 
-  OKF("Character minimization done, %u byte%s replaced.",
-      alpha_del2, alpha_del2 == 1 ? "" : "s");
+  OKF("Character minimization done, %u byte%s replaced.", alpha_del2,
+      alpha_del2 == 1 ? "" : "s");
 
   if (changed_any) goto next_pass;
 
 finalize_all:
 
-  SAYF("\n"
-       cGRA "     File size reduced by : " cRST "%0.02f%% (to %u byte%s)\n"
-       cGRA "    Characters simplified : " cRST "%0.02f%%\n"
-       cGRA "     Number of execs done : " cRST "%u\n"
-       cGRA "          Fruitless execs : " cRST "path=%u crash=%u hang=%s%u\n\n",
+  SAYF("\n" cGRA "     File size reduced by : " cRST
+       "%0.02f%% (to %u byte%s)\n" cGRA "    Characters simplified : " cRST
+       "%0.02f%%\n" cGRA "     Number of execs done : " cRST "%u\n" cGRA
+       "          Fruitless execs : " cRST "path=%u crash=%u hang=%s%u\n\n",
        100 - ((double)in_len) * 100 / orig_len, in_len, in_len == 1 ? "" : "s",
-       ((double)(alpha_d_total)) * 100 / (in_len ? in_len : 1),
-       total_execs, missed_paths, missed_crashes, missed_hangs ? cLRD : "",
-       missed_hangs);
+       ((double)(alpha_d_total)) * 100 / (in_len ? in_len : 1), total_execs,
+       missed_paths, missed_crashes, missed_hangs ? cLRD : "", missed_hangs);
 
   if (total_execs > 50 && missed_hangs * 10 > total_execs)
     WARNF(cLRD "Frequent timeouts - results may be skewed." cRST);
 
 }
 
-
-
 /* Handle Ctrl-C and the like. */
 
 static void handle_stop_sig(int sig) {
@@ -780,7 +804,6 @@ static void handle_stop_sig(int sig) {
 
 }
 
-
 /* Do basic preparations - persistent fds, filenames, etc. */
 
 static void set_up_environment(void) {
@@ -790,7 +813,7 @@ static void set_up_environment(void) {
   dev_null_fd = open("/dev/null", O_RDWR);
   if (dev_null_fd < 0) PFATAL("Unable to open /dev/null");
 
-  if (!prog_in) {
+  if (!out_file) {
 
     u8* use_dir = ".";
 
@@ -801,16 +824,15 @@ static void set_up_environment(void) {
 
     }
 
-    prog_in = alloc_printf("%s/.afl-tmin-temp-%u", use_dir, getpid());
+    out_file = alloc_printf("%s/.afl-tmin-temp-%u", use_dir, getpid());
 
   }
 
-  unlink(prog_in);
+  unlink(out_file);
 
-  prog_in_fd = open(prog_in, O_RDWR | O_CREAT | O_EXCL, 0600);
-
-  if (prog_in_fd < 0) PFATAL("Unable to create '%s'", prog_in);
+  out_fd = open(out_file, O_RDWR | O_CREAT | O_EXCL, 0600);
 
+  if (out_fd < 0) PFATAL("Unable to create '%s'", out_file);
 
   /* Set sane defaults... */
 
@@ -831,18 +853,20 @@ static void set_up_environment(void) {
   if (x) {
 
     if (!strstr(x, "exit_code=" STRINGIFY(MSAN_ERROR)))
-      FATAL("Custom MSAN_OPTIONS set without exit_code="
-            STRINGIFY(MSAN_ERROR) " - please fix!");
+      FATAL("Custom MSAN_OPTIONS set without exit_code=" STRINGIFY(
+          MSAN_ERROR) " - please fix!");
 
     if (!strstr(x, "symbolize=0"))
       FATAL("Custom MSAN_OPTIONS set without symbolize=0 - please fix!");
 
   }
 
-  setenv("ASAN_OPTIONS", "abort_on_error=1:"
-                         "detect_leaks=0:"
-                         "symbolize=0:"
-                         "allocator_may_return_null=1", 0);
+  setenv("ASAN_OPTIONS",
+         "abort_on_error=1:"
+         "detect_leaks=0:"
+         "symbolize=0:"
+         "allocator_may_return_null=1",
+         0);
 
   setenv("MSAN_OPTIONS", "exit_code=" STRINGIFY(MSAN_ERROR) ":"
                          "symbolize=0:"
@@ -851,21 +875,22 @@ static void set_up_environment(void) {
                          "msan_track_origins=0", 0);
 
   if (getenv("AFL_PRELOAD")) {
+
     setenv("LD_PRELOAD", getenv("AFL_PRELOAD"), 1);
     setenv("DYLD_INSERT_LIBRARIES", getenv("AFL_PRELOAD"), 1);
+
   }
 
 }
 
-
 /* Setup signal handlers, duh. */
 
 static void setup_signal_handlers(void) {
 
   struct sigaction sa;
 
-  sa.sa_handler   = NULL;
-  sa.sa_flags     = SA_RESTART;
+  sa.sa_handler = NULL;
+  sa.sa_flags = SA_RESTART;
   sa.sa_sigaction = NULL;
 
   sigemptyset(&sa.sa_mask);
@@ -884,46 +909,46 @@ static void setup_signal_handlers(void) {
 
 }
 
-
 /* Display usage hints. */
 
 static void usage(u8* argv0) {
 
-  SAYF("\n%s [ options ] -- /path/to/target_app [ ... ]\n\n"
+  SAYF(
+      "\n%s [ options ] -- /path/to/target_app [ ... ]\n\n"
 
-       "Required parameters:\n\n"
+      "Required parameters:\n\n"
 
-       "  -i file       - input test case to be shrunk by the tool\n"
-       "  -o file       - final output location for the minimized data\n\n"
+      "  -i file       - input test case to be shrunk by the tool\n"
+      "  -o file       - final output location for the minimized data\n\n"
 
-       "Execution control settings:\n\n"
+      "Execution control settings:\n\n"
 
-       "  -f file       - input file read by the tested program (stdin)\n"
-       "  -t msec       - timeout for each run (%u ms)\n"
-       "  -m megs       - memory limit for child process (%u MB)\n"
-       "  -Q            - use binary-only instrumentation (QEMU mode)\n"
-       "  -U            - use Unicorn-based instrumentation (Unicorn mode)\n\n"
-       "                  (Not necessary, here for consistency with other afl-* tools)\n\n"
+      "  -f file       - input file read by the tested program (stdin)\n"
+      "  -t msec       - timeout for each run (%d ms)\n"
+      "  -m megs       - memory limit for child process (%d MB)\n"
+      "  -Q            - use binary-only instrumentation (QEMU mode)\n"
+      "  -U            - use Unicorn-based instrumentation (Unicorn mode)\n\n"
+      "                  (Not necessary, here for consistency with other afl-* "
+      "tools)\n\n"
 
-       "Minimization settings:\n\n"
+      "Minimization settings:\n\n"
 
-       "  -e            - solve for edge coverage only, ignore hit counts\n"
-       "  -x            - treat non-zero exit codes as crashes\n\n"
+      "  -e            - solve for edge coverage only, ignore hit counts\n"
+      "  -x            - treat non-zero exit codes as crashes\n\n"
 
-       "For additional tips, please consult %s/README.\n\n",
+      "For additional tips, please consult %s/README.\n\n",
 
-       argv0, EXEC_TIMEOUT, MEM_LIMIT, doc_path);
+      argv0, EXEC_TIMEOUT, MEM_LIMIT, doc_path);
 
   exit(1);
 
 }
 
-
 /* Find binary. */
 
 static void find_binary(u8* fname) {
 
-  u8* env_path = 0;
+  u8*         env_path = 0;
   struct stat st;
 
   if (strchr(fname, '/') || !(env_path = getenv("PATH"))) {
@@ -946,7 +971,9 @@ static void find_binary(u8* fname) {
         memcpy(cur_elem, env_path, delim - env_path);
         delim++;
 
-      } else cur_elem = ck_strdup(env_path);
+      } else
+
+        cur_elem = ck_strdup(env_path);
 
       env_path = delim;
 
@@ -958,7 +985,8 @@ static void find_binary(u8* fname) {
       ck_free(cur_elem);
 
       if (!stat(target_path, &st) && S_ISREG(st.st_mode) &&
-          (st.st_mode & 0111) && st.st_size >= 4) break;
+          (st.st_mode & 0111) && st.st_size >= 4)
+        break;
 
       ck_free(target_path);
       target_path = 0;
@@ -971,13 +999,12 @@ static void find_binary(u8* fname) {
 
 }
 
-
 /* Fix up argv for QEMU. */
 
 static char** get_qemu_argv(u8* own_loc, char** argv, int argc) {
 
   char** new_argv = ck_alloc(sizeof(char*) * (argc + 4));
-  u8 *tmp, *cp, *rsl, *own_copy;
+  u8 *   tmp, *cp, *rsl, *own_copy;
 
   memcpy(new_argv + 3, argv + 1, sizeof(char*) * argc);
 
@@ -992,8 +1019,7 @@ static char** get_qemu_argv(u8* own_loc, char** argv, int argc) {
 
     cp = alloc_printf("%s/afl-qemu-trace", tmp);
 
-    if (access(cp, X_OK))
-      FATAL("Unable to find '%s'", tmp);
+    if (access(cp, X_OK)) FATAL("Unable to find '%s'", tmp);
 
     target_path = new_argv[0] = cp;
     return new_argv;
@@ -1017,7 +1043,9 @@ static char** get_qemu_argv(u8* own_loc, char** argv, int argc) {
 
     }
 
-  } else ck_free(own_copy);
+  } else
+
+    ck_free(own_copy);
 
   if (!access(BIN_PATH "/afl-qemu-trace", X_OK)) {
 
@@ -1044,8 +1072,6 @@ static void read_bitmap(u8* fname) {
 
 }
 
-
-
 /* Main entry point */
 
 int main(int argc, char** argv) {
@@ -1058,7 +1084,7 @@ int main(int argc, char** argv) {
 
   SAYF(cCYA "afl-tmin" VERSION cRST " by <lcamtuf@google.com>\n");
 
-  while ((opt = getopt(argc,argv,"+i:o:f:m:t:B:xeQU")) > 0)
+  while ((opt = getopt(argc, argv, "+i:o:f:m:t:B:xeQU")) > 0)
 
     switch (opt) {
 
@@ -1070,15 +1096,15 @@ int main(int argc, char** argv) {
 
       case 'o':
 
-        if (out_file) FATAL("Multiple -o options not supported");
-        out_file = optarg;
+        if (output_file) FATAL("Multiple -o options not supported");
+        output_file = optarg;
         break;
 
       case 'f':
 
-        if (prog_in) FATAL("Multiple -f options not supported");
+        if (out_file) FATAL("Multiple -f options not supported");
         use_stdin = 0;
-        prog_in   = optarg;
+        out_file = optarg;
         break;
 
       case 'e':
@@ -1095,40 +1121,41 @@ int main(int argc, char** argv) {
 
       case 'm': {
 
-          u8 suffix = 'M';
+        u8 suffix = 'M';
 
-          if (mem_limit_given) FATAL("Multiple -m options not supported");
-          mem_limit_given = 1;
+        if (mem_limit_given) FATAL("Multiple -m options not supported");
+        mem_limit_given = 1;
 
-          if (!strcmp(optarg, "none")) {
+        if (!strcmp(optarg, "none")) {
 
-            mem_limit = 0;
-            break;
+          mem_limit = 0;
+          break;
 
-          }
+        }
 
-          if (sscanf(optarg, "%llu%c", &mem_limit, &suffix) < 1 ||
-              optarg[0] == '-') FATAL("Bad syntax used for -m");
+        if (sscanf(optarg, "%llu%c", &mem_limit, &suffix) < 1 ||
+            optarg[0] == '-')
+          FATAL("Bad syntax used for -m");
 
-          switch (suffix) {
+        switch (suffix) {
 
-            case 'T': mem_limit *= 1024 * 1024; break;
-            case 'G': mem_limit *= 1024; break;
-            case 'k': mem_limit /= 1024; break;
-            case 'M': break;
+          case 'T': mem_limit *= 1024 * 1024; break;
+          case 'G': mem_limit *= 1024; break;
+          case 'k': mem_limit /= 1024; break;
+          case 'M': break;
 
-            default:  FATAL("Unsupported suffix or bad syntax for -m");
+          default: FATAL("Unsupported suffix or bad syntax for -m");
 
-          }
+        }
 
-          if (mem_limit < 5) FATAL("Dangerously low value of -m");
+        if (mem_limit < 5) FATAL("Dangerously low value of -m");
 
-          if (sizeof(rlim_t) == 4 && mem_limit > 2000)
-            FATAL("Value of -m out of range on 32-bit systems");
+        if (sizeof(rlim_t) == 4 && mem_limit > 2000)
+          FATAL("Value of -m out of range on 32-bit systems");
 
-        }
+      }
 
-        break;
+      break;
 
       case 't':
 
@@ -1158,7 +1185,7 @@ int main(int argc, char** argv) {
         unicorn_mode = 1;
         break;
 
-      case 'B': /* load bitmap */
+      case 'B':                                              /* load bitmap */
 
         /* This is a secret undocumented option! It is speculated to be useful
            if you have a baseline "boring" input file and another "interesting"
@@ -1178,13 +1205,11 @@ int main(int argc, char** argv) {
         read_bitmap(optarg);
         break;
 
-      default:
-
-        usage(argv[0]);
+      default: usage(argv[0]);
 
     }
 
-  if (optind == argc || !in_file || !out_file) usage(argv[0]);
+  if (optind == argc || !in_file || !output_file) usage(argv[0]);
 
   setup_shm(0);
   atexit(at_exit_handler);
@@ -1193,7 +1218,7 @@ int main(int argc, char** argv) {
   set_up_environment();
 
   find_binary(argv[optind]);
-  detect_file_args(argv + optind, prog_in);
+  detect_file_args(argv + optind, out_file);
 
   if (qemu_mode)
     use_argv = get_qemu_argv(argv[0], argv + optind, argc - optind);
@@ -1218,26 +1243,27 @@ int main(int argc, char** argv) {
 
   if (!crash_mode) {
 
-     OKF("Program terminates normally, minimizing in " 
-         cCYA "instrumented" cRST " mode.");
+    OKF("Program terminates normally, minimizing in " cCYA "instrumented" cRST
+        " mode.");
 
-     if (!anything_set()) FATAL("No instrumentation detected.");
+    if (!anything_set()) FATAL("No instrumentation detected.");
 
   } else {
 
-     OKF("Program exits with a signal, minimizing in " cMGN "%scrash" cRST
-         " mode.", exact_mode ? "EXACT " : "");
+    OKF("Program exits with a signal, minimizing in " cMGN "%scrash" cRST
+        " mode.",
+        exact_mode ? "EXACT " : "");
 
   }
 
   minimize(use_argv);
 
-  ACTF("Writing output to '%s'...", out_file);
+  ACTF("Writing output to '%s'...", output_file);
 
-  unlink(prog_in);
-  prog_in = NULL;
+  unlink(out_file);
+  out_file = NULL;
 
-  close(write_to_file(out_file, in_data, in_len));
+  close(write_to_file(output_file, in_data, in_len));
 
   OKF("We're done here. Have a nice day!\n");
 
diff --git a/test-instr.c b/test-instr.c
index 9107f15e..71838462 100644
--- a/test-instr.c
+++ b/test-instr.c
@@ -20,14 +20,16 @@
 
 int main(int argc, char** argv) {
 
-  char buff[8];
-  char *buf = buff;
+  char  buff[8];
+  char* buf = buff;
 
   if (argc > 1)
     buf = argv[1];
   else if (read(0, buf, sizeof(buf)) < 1) {
+
     printf("Hum?\n");
     exit(1);
+
   }
 
   if (buf[0] == '0')
@@ -40,3 +42,4 @@ int main(int argc, char** argv) {
   exit(0);
 
 }
+
diff --git a/types.h b/types.h
index 7606d4ed..67149a67 100644..120000
--- a/types.h
+++ b/types.h
@@ -1,91 +1 @@
-/*
-   american fuzzy lop - type definitions and minor macros
-   ------------------------------------------------------
-
-   Written and maintained by Michal Zalewski <lcamtuf@google.com>
-
-   Copyright 2013, 2014, 2015 Google Inc. All rights reserved.
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at:
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- */
-
-#ifndef _HAVE_TYPES_H
-#define _HAVE_TYPES_H
-
-#include <stdint.h>
-#include <stdlib.h>
-
-typedef uint8_t  u8;
-typedef uint16_t u16;
-typedef uint32_t u32;
-
-/*
-
-   Ugh. There is an unintended compiler / glibc #include glitch caused by
-   combining the u64 type an %llu in format strings, necessitating a workaround.
-
-   In essence, the compiler is always looking for 'unsigned long long' for %llu.
-   On 32-bit systems, the u64 type (aliased to uint64_t) is expanded to
-   'unsigned long long' in <bits/types.h>, so everything checks out.
-
-   But on 64-bit systems, it is #ifdef'ed in the same file as 'unsigned long'.
-   Now, it only happens in circumstances where the type happens to have the
-   expected bit width, *but* the compiler does not know that... and complains
-   about 'unsigned long' being unsafe to pass to %llu.
-
- */
-
-#ifdef __x86_64__
-typedef unsigned long long u64;
-#else
-typedef uint64_t u64;
-#endif /* ^__x86_64__ */
-
-typedef int8_t   s8;
-typedef int16_t  s16;
-typedef int32_t  s32;
-typedef int64_t  s64;
-
-#ifndef MIN
-#  define MIN(_a,_b) ((_a) > (_b) ? (_b) : (_a))
-#  define MAX(_a,_b) ((_a) > (_b) ? (_a) : (_b))
-#endif /* !MIN */
-
-#define SWAP16(_x) ({ \
-    u16 _ret = (_x); \
-    (u16)((_ret << 8) | (_ret >> 8)); \
-  })
-
-#define SWAP32(_x) ({ \
-    u32 _ret = (_x); \
-    (u32)((_ret << 24) | (_ret >> 24) | \
-          ((_ret << 8) & 0x00FF0000) | \
-          ((_ret >> 8) & 0x0000FF00)); \
-  })
-
-#ifdef AFL_LLVM_PASS
-#  define AFL_R(x) (random() % (x))
-#else
-#  define R(x) (random() % (x))
-#endif /* ^AFL_LLVM_PASS */
-
-#define STRINGIFY_INTERNAL(x) #x
-#define STRINGIFY(x) STRINGIFY_INTERNAL(x)
-
-#define MEM_BARRIER() \
-  __asm__ volatile("" ::: "memory")
-
-#if __GNUC__ < 6
- #define likely(_x)   (_x)
- #define unlikely(_x) (_x)
-#else
- #define likely(_x)   __builtin_expect(!!(_x), 1)
- #define unlikely(_x)  __builtin_expect(!!(_x), 0)
-#endif
-
-#endif /* ! _HAVE_TYPES_H */
+include/types.h
\ No newline at end of file
diff --git a/unicorn_mode/patches/afl-unicorn-common.h b/unicorn_mode/patches/afl-unicorn-common.h
index 6798832c..d5038d06 100644
--- a/unicorn_mode/patches/afl-unicorn-common.h
+++ b/unicorn_mode/patches/afl-unicorn-common.h
@@ -32,19 +32,17 @@
 
 #include "../../config.h"
 
-/* NeverZero */ 
+/* NeverZero */
 
 #if (defined(__x86_64__) || defined(__i386__)) && defined(AFL_QEMU_NOT_ZERO)
-#  define INC_AFL_AREA(loc) \
-    asm volatile ( \
-      "incb (%0, %1, 1)\n" \
-      "adcb $0, (%0, %1, 1)\n" \
-      : /* no out */ \
-      : "r" (afl_area_ptr), "r" (loc) \
-      : "memory", "eax" \
-    )
+#  define INC_AFL_AREA(loc)         \
+  asm volatile(                     \
+      "incb (%0, %1, 1)\n"          \
+      "adcb $0, (%0, %1, 1)\n"      \
+      : /* no out */                \
+      : "r"(afl_area_ptr), "r"(loc) \
+      : "memory", "eax")
 #else
-#  define INC_AFL_AREA(loc) \
-  afl_area_ptr[loc]++
+#  define INC_AFL_AREA(loc) afl_area_ptr[loc]++
 #endif
 
diff --git a/unicorn_mode/patches/afl-unicorn-cpu-inl.h b/unicorn_mode/patches/afl-unicorn-cpu-inl.h
index a713e4ca..082d6d68 100644
--- a/unicorn_mode/patches/afl-unicorn-cpu-inl.h
+++ b/unicorn_mode/patches/afl-unicorn-cpu-inl.h
@@ -44,21 +44,29 @@
    it to translate within its own context, too (this avoids translation
    overhead in the next forked-off copy). */
 
-#define AFL_UNICORN_CPU_SNIPPET1 do { \
+#define AFL_UNICORN_CPU_SNIPPET1         \
+  do {                                   \
+                                         \
     afl_request_tsl(pc, cs_base, flags); \
+                                         \
   } while (0)
 
 /* This snippet kicks in when the instruction pointer is positioned at
    _start and does the usual forkserver stuff, not very different from
    regular instrumentation injected via afl-as.h. */
 
-#define AFL_UNICORN_CPU_SNIPPET2 do { \
-    if(unlikely(afl_first_instr == 0)) { \
-      afl_setup(env->uc); \
-      afl_forkserver(env); \
-      afl_first_instr = 1; \
-    } \
-    afl_maybe_log(env->uc, tb->pc); \
+#define AFL_UNICORN_CPU_SNIPPET2          \
+  do {                                    \
+                                          \
+    if (unlikely(afl_first_instr == 0)) { \
+                                          \
+      afl_setup(env->uc);                 \
+      afl_forkserver(env);                \
+      afl_first_instr = 1;                \
+                                          \
+    }                                     \
+    afl_maybe_log(env->uc, tb->pc);       \
+                                          \
   } while (0)
 
 /* We use one additional file descriptor to relay "needs translation"
@@ -69,26 +77,28 @@
 /* Set in the child process in forkserver mode: */
 
 static unsigned char afl_fork_child;
-static unsigned int afl_forksrv_pid;
+static unsigned int  afl_forksrv_pid;
 
 /* Function declarations. */
 
-static void afl_setup(struct uc_struct* uc);
-static void afl_forkserver(CPUArchState*);
+static void        afl_setup(struct uc_struct* uc);
+static void        afl_forkserver(CPUArchState*);
 static inline void afl_maybe_log(struct uc_struct* uc, unsigned long);
 
 static void afl_wait_tsl(CPUArchState*, int);
 static void afl_request_tsl(target_ulong, target_ulong, uint64_t);
 
-static TranslationBlock *tb_find_slow(CPUArchState*, target_ulong,
-                                      target_ulong, uint64_t);
+static TranslationBlock* tb_find_slow(CPUArchState*, target_ulong, target_ulong,
+                                      uint64_t);
 
 /* Data structure passed around by the translate handlers: */
 
 struct afl_tsl {
+
   target_ulong pc;
   target_ulong cs_base;
-  uint64_t flags;
+  uint64_t     flags;
+
 };
 
 /*************************
@@ -99,8 +109,7 @@ struct afl_tsl {
 
 static void afl_setup(struct uc_struct* uc) {
 
-  char *id_str = getenv(SHM_ENV_VAR),
-       *inst_r = getenv("AFL_INST_RATIO");
+  char *id_str = getenv(SHM_ENV_VAR), *inst_r = getenv("AFL_INST_RATIO");
 
   int shm_id;
 
@@ -116,9 +125,9 @@ static void afl_setup(struct uc_struct* uc) {
     uc->afl_inst_rms = MAP_SIZE * r / 100;
 
   } else {
-  
+
     uc->afl_inst_rms = MAP_SIZE;
-    
+
   }
 
   if (id_str) {
@@ -132,22 +141,22 @@ static void afl_setup(struct uc_struct* uc) {
        so that the parent doesn't give up on us. */
 
     if (inst_r) uc->afl_area_ptr[0] = 1;
-  }
-  
-  /* Maintain for compatibility */
-  if (getenv("AFL_QEMU_COMPCOV")) {
 
-    uc->afl_compcov_level = 1;
   }
+
+  /* Maintain for compatibility */
+  if (getenv("AFL_QEMU_COMPCOV")) { uc->afl_compcov_level = 1; }
   if (getenv("AFL_COMPCOV_LEVEL")) {
 
     uc->afl_compcov_level = atoi(getenv("AFL_COMPCOV_LEVEL"));
+
   }
+
 }
 
 /* Fork server logic, invoked once we hit first emulated instruction. */
 
-static void afl_forkserver(CPUArchState *env) {
+static void afl_forkserver(CPUArchState* env) {
 
   static unsigned char tmp[4];
 
@@ -165,13 +174,13 @@ static void afl_forkserver(CPUArchState *env) {
   while (1) {
 
     pid_t child_pid;
-    int status, t_fd[2];
+    int   status, t_fd[2];
 
     /* Whoops, parent dead? */
 
     if (read(FORKSRV_FD, tmp, 4) != 4) exit(2);
 
-    /* Establish a channel with child to grab translation commands. We'll 
+    /* Establish a channel with child to grab translation commands. We'll
        read from t_fd[0], child will write to TSL_FD. */
 
     if (pipe(t_fd) || dup2(t_fd[1], TSL_FD) < 0) exit(3);
@@ -211,7 +220,6 @@ static void afl_forkserver(CPUArchState *env) {
 
 }
 
-
 /* The equivalent of the tuple logging routine from afl-as.h. */
 
 static inline void afl_maybe_log(struct uc_struct* uc, unsigned long cur_loc) {
@@ -220,14 +228,13 @@ static inline void afl_maybe_log(struct uc_struct* uc, unsigned long cur_loc) {
 
   u8* afl_area_ptr = uc->afl_area_ptr;
 
-  if(!afl_area_ptr)
-    return;
+  if (!afl_area_ptr) return;
 
   /* Looks like QEMU always maps to fixed locations, so ASAN is not a
      concern. Phew. But instruction addresses may be aligned. Let's mangle
      the value to get something quasi-uniform. */
 
-  cur_loc  = (cur_loc >> 4) ^ (cur_loc << 8);
+  cur_loc = (cur_loc >> 4) ^ (cur_loc << 8);
   cur_loc &= MAP_SIZE - 1;
 
   /* Implement probabilistic instrumentation by looking at scrambled block
@@ -243,7 +250,6 @@ static inline void afl_maybe_log(struct uc_struct* uc, unsigned long cur_loc) {
 
 }
 
-
 /* This code is invoked whenever QEMU decides that it doesn't have a
    translation of a particular block and needs to compute it. When this happens,
    we tell the parent to mirror the operation, so that the next fork() has a
@@ -255,20 +261,19 @@ static void afl_request_tsl(target_ulong pc, target_ulong cb, uint64_t flags) {
 
   if (!afl_fork_child) return;
 
-  t.pc      = pc;
+  t.pc = pc;
   t.cs_base = cb;
-  t.flags   = flags;
+  t.flags = flags;
 
   if (write(TSL_FD, &t, sizeof(struct afl_tsl)) != sizeof(struct afl_tsl))
     return;
 
 }
 
-
 /* This is the other side of the same channel. Since timeouts are handled by
    afl-fuzz simply killing the child, we can just wait until the pipe breaks. */
 
-static void afl_wait_tsl(CPUArchState *env, int fd) {
+static void afl_wait_tsl(CPUArchState* env, int fd) {
 
   struct afl_tsl t;
 
@@ -276,12 +281,13 @@ static void afl_wait_tsl(CPUArchState *env, int fd) {
 
     /* Broken pipe means it's time to return to the fork server routine. */
 
-    if (read(fd, &t, sizeof(struct afl_tsl)) != sizeof(struct afl_tsl))
-      break;
+    if (read(fd, &t, sizeof(struct afl_tsl)) != sizeof(struct afl_tsl)) break;
 
     tb_find_slow(env, t.pc, t.cs_base, t.flags);
+
   }
 
   close(fd);
+
 }
 
diff --git a/unicorn_mode/patches/afl-unicorn-cpu-translate-inl.h b/unicorn_mode/patches/afl-unicorn-cpu-translate-inl.h
index 69877c6b..7c84058f 100644
--- a/unicorn_mode/patches/afl-unicorn-cpu-translate-inl.h
+++ b/unicorn_mode/patches/afl-unicorn-cpu-translate-inl.h
@@ -35,28 +35,23 @@
 static void afl_gen_compcov(TCGContext *s, uint64_t cur_loc, TCGv_i64 arg1,
                             TCGv_i64 arg2, TCGMemOp ot, int is_imm) {
 
-  if (!s->uc->afl_compcov_level || !s->uc->afl_area_ptr)
-    return;
-  
-  if (!is_imm && s->uc->afl_compcov_level < 2)
-    return;
+  if (!s->uc->afl_compcov_level || !s->uc->afl_area_ptr) return;
 
-  cur_loc  = (cur_loc >> 4) ^ (cur_loc << 8);
+  if (!is_imm && s->uc->afl_compcov_level < 2) return;
+
+  cur_loc = (cur_loc >> 4) ^ (cur_loc << 8);
   cur_loc &= MAP_SIZE - 7;
-  
+
   if (cur_loc >= s->uc->afl_inst_rms) return;
 
   switch (ot) {
-    case MO_64:
-      gen_afl_compcov_log_64(s, cur_loc, arg1, arg2);
-      break;
-    case MO_32: 
-      gen_afl_compcov_log_32(s, cur_loc, arg1, arg2);
-      break;
-    case MO_16:
-      gen_afl_compcov_log_16(s, cur_loc, arg1, arg2);
-      break;
-    default:
-      return;
+
+    case MO_64: gen_afl_compcov_log_64(s, cur_loc, arg1, arg2); break;
+    case MO_32: gen_afl_compcov_log_32(s, cur_loc, arg1, arg2); break;
+    case MO_16: gen_afl_compcov_log_16(s, cur_loc, arg1, arg2); break;
+    default: return;
+
   }
+
 }
+
diff --git a/unicorn_mode/patches/afl-unicorn-tcg-op-inl.h b/unicorn_mode/patches/afl-unicorn-tcg-op-inl.h
index fa4974d6..d21bbcc7 100644
--- a/unicorn_mode/patches/afl-unicorn-tcg-op-inl.h
+++ b/unicorn_mode/patches/afl-unicorn-tcg-op-inl.h
@@ -31,26 +31,29 @@
  */
 
 static inline void gen_afl_compcov_log_16(TCGContext *tcg_ctx, uint64_t cur_loc,
-                                          TCGv_i64 arg1, TCGv_i64 arg2)
-{
-    TCGv_ptr tuc = tcg_const_ptr(tcg_ctx, tcg_ctx->uc);
-    TCGv_i64 tcur_loc = tcg_const_i64(tcg_ctx, cur_loc);
-    gen_helper_afl_compcov_log_16(tcg_ctx, tuc, tcur_loc, arg1, arg2);
+                                          TCGv_i64 arg1, TCGv_i64 arg2) {
+
+  TCGv_ptr tuc = tcg_const_ptr(tcg_ctx, tcg_ctx->uc);
+  TCGv_i64 tcur_loc = tcg_const_i64(tcg_ctx, cur_loc);
+  gen_helper_afl_compcov_log_16(tcg_ctx, tuc, tcur_loc, arg1, arg2);
+
 }
 
 static inline void gen_afl_compcov_log_32(TCGContext *tcg_ctx, uint64_t cur_loc,
-                                          TCGv_i64 arg1, TCGv_i64 arg2)
-{
-    TCGv_ptr tuc = tcg_const_ptr(tcg_ctx, tcg_ctx->uc);
-    TCGv_i64 tcur_loc = tcg_const_i64(tcg_ctx, cur_loc);
-    gen_helper_afl_compcov_log_32(tcg_ctx, tuc, tcur_loc, arg1, arg2);
+                                          TCGv_i64 arg1, TCGv_i64 arg2) {
+
+  TCGv_ptr tuc = tcg_const_ptr(tcg_ctx, tcg_ctx->uc);
+  TCGv_i64 tcur_loc = tcg_const_i64(tcg_ctx, cur_loc);
+  gen_helper_afl_compcov_log_32(tcg_ctx, tuc, tcur_loc, arg1, arg2);
+
 }
 
 static inline void gen_afl_compcov_log_64(TCGContext *tcg_ctx, uint64_t cur_loc,
-                                          TCGv_i64 arg1, TCGv_i64 arg2)
-{
-    TCGv_ptr tuc = tcg_const_ptr(tcg_ctx, tcg_ctx->uc);
-    TCGv_i64 tcur_loc = tcg_const_i64(tcg_ctx, cur_loc);
-    gen_helper_afl_compcov_log_64(tcg_ctx, tuc, tcur_loc, arg1, arg2);
+                                          TCGv_i64 arg1, TCGv_i64 arg2) {
+
+  TCGv_ptr tuc = tcg_const_ptr(tcg_ctx, tcg_ctx->uc);
+  TCGv_i64 tcur_loc = tcg_const_i64(tcg_ctx, cur_loc);
+  gen_helper_afl_compcov_log_64(tcg_ctx, tuc, tcur_loc, arg1, arg2);
+
 }
 
diff --git a/unicorn_mode/patches/afl-unicorn-tcg-runtime-inl.h b/unicorn_mode/patches/afl-unicorn-tcg-runtime-inl.h
index 1f0667ce..95e68302 100644
--- a/unicorn_mode/patches/afl-unicorn-tcg-runtime-inl.h
+++ b/unicorn_mode/patches/afl-unicorn-tcg-runtime-inl.h
@@ -38,9 +38,8 @@ void HELPER(afl_compcov_log_16)(void* uc_ptr, uint64_t cur_loc, uint64_t arg1,
 
   u8* afl_area_ptr = ((struct uc_struct*)uc_ptr)->afl_area_ptr;
 
-  if ((arg1 & 0xff) == (arg2 & 0xff)) {
-    INC_AFL_AREA(cur_loc);
-  }
+  if ((arg1 & 0xff) == (arg2 & 0xff)) { INC_AFL_AREA(cur_loc); }
+
 }
 
 void HELPER(afl_compcov_log_32)(void* uc_ptr, uint64_t cur_loc, uint64_t arg1,
@@ -49,14 +48,17 @@ void HELPER(afl_compcov_log_32)(void* uc_ptr, uint64_t cur_loc, uint64_t arg1,
   u8* afl_area_ptr = ((struct uc_struct*)uc_ptr)->afl_area_ptr;
 
   if ((arg1 & 0xff) == (arg2 & 0xff)) {
+
     INC_AFL_AREA(cur_loc);
     if ((arg1 & 0xffff) == (arg2 & 0xffff)) {
-      INC_AFL_AREA(cur_loc +1);
-      if ((arg1 & 0xffffff) == (arg2 & 0xffffff)) {
-        INC_AFL_AREA(cur_loc +2);
-      }
+
+      INC_AFL_AREA(cur_loc + 1);
+      if ((arg1 & 0xffffff) == (arg2 & 0xffffff)) { INC_AFL_AREA(cur_loc + 2); }
+
     }
+
   }
+
 }
 
 void HELPER(afl_compcov_log_64)(void* uc_ptr, uint64_t cur_loc, uint64_t arg1,
@@ -65,25 +67,40 @@ void HELPER(afl_compcov_log_64)(void* uc_ptr, uint64_t cur_loc, uint64_t arg1,
   u8* afl_area_ptr = ((struct uc_struct*)uc_ptr)->afl_area_ptr;
 
   if ((arg1 & 0xff) == (arg2 & 0xff)) {
+
     INC_AFL_AREA(cur_loc);
     if ((arg1 & 0xffff) == (arg2 & 0xffff)) {
-      INC_AFL_AREA(cur_loc +1);
+
+      INC_AFL_AREA(cur_loc + 1);
       if ((arg1 & 0xffffff) == (arg2 & 0xffffff)) {
-        INC_AFL_AREA(cur_loc +2);
+
+        INC_AFL_AREA(cur_loc + 2);
         if ((arg1 & 0xffffffff) == (arg2 & 0xffffffff)) {
-          INC_AFL_AREA(cur_loc +3);
+
+          INC_AFL_AREA(cur_loc + 3);
           if ((arg1 & 0xffffffffff) == (arg2 & 0xffffffffff)) {
-            INC_AFL_AREA(cur_loc +4);
+
+            INC_AFL_AREA(cur_loc + 4);
             if ((arg1 & 0xffffffffffff) == (arg2 & 0xffffffffffff)) {
-              INC_AFL_AREA(cur_loc +5);
+
+              INC_AFL_AREA(cur_loc + 5);
               if ((arg1 & 0xffffffffffffff) == (arg2 & 0xffffffffffffff)) {
-                INC_AFL_AREA(cur_loc +6);
+
+                INC_AFL_AREA(cur_loc + 6);
+
               }
+
             }
+
           }
+
         }
+
       }
+
     }
+
   }
+
 }