about summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--GNUmakefile46
-rw-r--r--GNUmakefile.gcc_plugin12
-rw-r--r--GNUmakefile.llvm8
-rw-r--r--README.md15
-rw-r--r--TODO.md3
-rwxr-xr-xafl-system-config4
-rwxr-xr-xafl-whatsup49
-rw-r--r--custom_mutators/grammar_mutator/GRAMMAR_VERSION2
m---------custom_mutators/grammar_mutator/grammar_mutator0
-rw-r--r--docs/Changelog.md14
-rw-r--r--docs/env_variables.md29
-rw-r--r--docs/ideas.md43
-rw-r--r--docs/notes_for_asan.md7
-rw-r--r--docs/status_screen.md11
-rw-r--r--frida_mode/src/instrument.c8
-rw-r--r--frida_mode/src/ranges.c3
-rw-r--r--frida_mode/test/testinstr.c7
-rwxr-xr-xfrida_mode/test/testinstr.py49
-rw-r--r--include/afl-fuzz.h4
-rw-r--r--include/android-ashmem.h16
-rw-r--r--include/config.h4
-rw-r--r--include/envs.h3
-rw-r--r--instrumentation/afl-compiler-rt.o.c26
-rw-r--r--instrumentation/afl-llvm-lto-instrumentation.so.cc4
-rw-r--r--qemu_mode/QEMUAFL_VERSION2
-rwxr-xr-xqemu_mode/build_qemu_support.sh2
-rw-r--r--qemu_mode/libcompcov/compcovtest.cc148
m---------qemu_mode/qemuafl0
-rw-r--r--src/afl-analyze.c19
-rw-r--r--src/afl-as.c7
-rw-r--r--src/afl-cc.c14
-rw-r--r--src/afl-forkserver.c24
-rw-r--r--src/afl-fuzz-init.c23
-rw-r--r--src/afl-fuzz-queue.c6
-rw-r--r--src/afl-fuzz-redqueen.c26
-rw-r--r--src/afl-fuzz-state.c7
-rw-r--r--src/afl-fuzz-stats.c131
-rw-r--r--src/afl-fuzz.c10
-rw-r--r--src/afl-showmap.c7
-rw-r--r--src/afl-tmin.c19
-rwxr-xr-xtest/test-pre.sh1
-rw-r--r--unicorn_mode/helper_scripts/ida_context_loader.py84
-rw-r--r--utils/afl_network_proxy/afl-network-server.c5
-rw-r--r--utils/aflpp_driver/GNUmakefile10
-rw-r--r--utils/aflpp_driver/README.md30
-rw-r--r--utils/aflpp_driver/aflpp_qemu_driver_hook.c21
-rw-r--r--utils/autodict_ql/autodict-ql.py154
-rw-r--r--utils/autodict_ql/litan.py126
-rw-r--r--utils/autodict_ql/memcmp-strings.py64
-rw-r--r--utils/autodict_ql/readme.md37
-rw-r--r--utils/autodict_ql/stan-strings.py64
-rw-r--r--utils/autodict_ql/strcmp-strings.py64
-rw-r--r--utils/autodict_ql/strncmp-strings.py64
-rw-r--r--utils/qemu_persistent_hook/read_into_rdi.c2
54 files changed, 1000 insertions, 538 deletions
diff --git a/GNUmakefile b/GNUmakefile
index 963004bd..804bfe08 100644
--- a/GNUmakefile
+++ b/GNUmakefile
@@ -36,6 +36,11 @@ SH_PROGS    = afl-plot afl-cmin afl-cmin.bash afl-whatsup afl-system-config
 MANPAGES=$(foreach p, $(PROGS) $(SH_PROGS), $(p).8) afl-as.8
 ASAN_OPTIONS=detect_leaks=0
 
+SYS = $(shell uname -s)
+ARCH = $(shell uname -m)
+
+$(info [*] Compiling afl++ for OS $(SYS) on ARCH $(ARCH))
+
 ifdef NO_SPLICING
   override CFLAGS += -DNO_SPLICING
 endif
@@ -82,7 +87,7 @@ endif
 #  endif
 #endif
 
-ifneq "$(shell uname)" "Darwin"
+ifneq "$(SYS)" "Darwin"
   #ifeq "$(HAVE_MARCHNATIVE)" "1"
   #  SPECIAL_PERFORMANCE += -march=native
   #endif
@@ -92,7 +97,7 @@ ifneq "$(shell uname)" "Darwin"
   endif
 endif
 
-ifeq "$(shell uname)" "SunOS"
+ifeq "$(SYS)" "SunOS"
   CFLAGS_OPT += -Wno-format-truncation
   LDFLAGS = -lkstat -lrt
 endif
@@ -119,11 +124,10 @@ ifdef INTROSPECTION
   CFLAGS_OPT += -DINTROSPECTION=1
 endif
 
-
-ifneq "$(shell uname -m)" "x86_64"
- ifneq "$(patsubst i%86,i386,$(shell uname -m))" "i386"
-  ifneq "$(shell uname -m)" "amd64"
-   ifneq "$(shell uname -m)" "i86pc"
+ifneq "$(ARCH)" "x86_64"
+ ifneq "$(patsubst i%86,i386,$(ARCH))" "i386"
+  ifneq "$(ARCH)" "amd64"
+   ifneq "$(ARCH)" "i86pc"
 	AFL_NO_X86=1
    endif
   endif
@@ -141,30 +145,30 @@ override CFLAGS += -g -Wno-pointer-sign -Wno-variadic-macros -Wall -Wextra -Wpoi
 			  -I include/ -DAFL_PATH=\"$(HELPER_PATH)\" \
 			  -DBIN_PATH=\"$(BIN_PATH)\" -DDOC_PATH=\"$(DOC_PATH)\"
 
-ifeq "$(shell uname -s)" "FreeBSD"
+ifeq "$(SYS)" "FreeBSD"
   override CFLAGS  += -I /usr/local/include/
   LDFLAGS += -L /usr/local/lib/
 endif
 
-ifeq "$(shell uname -s)" "DragonFly"
+ifeq "$(SYS)" "DragonFly"
   override CFLAGS  += -I /usr/local/include/
   LDFLAGS += -L /usr/local/lib/
 endif
 
-ifeq "$(shell uname -s)" "OpenBSD"
+ifeq "$(SYS)" "OpenBSD"
   override CFLAGS  += -I /usr/local/include/ -mno-retpoline
   LDFLAGS += -Wl,-z,notext -L /usr/local/lib/
 endif
 
-ifeq "$(shell uname -s)" "NetBSD"
+ifeq "$(SYS)" "NetBSD"
   override CFLAGS  += -I /usr/pkg/include/
   LDFLAGS += -L /usr/pkg/lib/
 endif
 
-ifeq "$(shell uname -s)" "Haiku"
+ifeq "$(SYS)" "Haiku"
   SHMAT_OK=0
   override CFLAGS  += -DUSEMMAP=1 -Wno-error=format -fPIC
-  LDFLAGS += -Wno-deprecated-declarations -lgnu
+  LDFLAGS += -Wno-deprecated-declarations -lgnu -lnetwork
   SPECIAL_PERFORMANCE += -DUSEMMAP=1
 endif
 
@@ -236,24 +240,24 @@ else
     BUILD_DATE ?= $(shell date "+%Y-%m-%d")
 endif
 
-ifneq "$(filter Linux GNU%,$(shell uname))" ""
+ifneq "$(filter Linux GNU%,$(SYS))" ""
  ifndef DEBUG
   override CFLAGS += -D_FORTIFY_SOURCE=2
  endif
   LDFLAGS += -ldl -lrt -lm
 endif
 
-ifneq "$(findstring FreeBSD, $(shell uname))" ""
+ifneq "$(findstring FreeBSD, $(ARCH))" ""
   override CFLAGS  += -pthread
   LDFLAGS += -lpthread
 endif
 
-ifneq "$(findstring NetBSD, $(shell uname))" ""
+ifneq "$(findstring NetBSD, $(ARCH))" ""
   override CFLAGS  += -pthread
   LDFLAGS += -lpthread
 endif
 
-ifneq "$(findstring OpenBSD, $(shell uname))" ""
+ifneq "$(findstring OpenBSD, $(ARCH))" ""
   override CFLAGS  += -pthread
   LDFLAGS += -lpthread
 endif
@@ -485,7 +489,7 @@ unit_clean:
 	@rm -f ./test/unittests/unit_preallocable ./test/unittests/unit_list ./test/unittests/unit_maybe_alloc test/unittests/*.o
 
 .PHONY: unit
-ifneq "$(shell uname)" "Darwin"
+ifneq "$(ARCH)" "Darwin"
 unit:	unit_maybe_alloc unit_preallocable unit_list unit_clean unit_rand unit_hash
 else
 unit:
@@ -517,7 +521,7 @@ code-format:
 ifndef AFL_NO_X86
 test_build: afl-cc afl-gcc afl-as afl-showmap
 	@echo "[*] Testing the CC wrapper afl-cc and its instrumentation output..."
-	@unset AFL_MAP_SIZE AFL_USE_UBSAN AFL_USE_CFISAN AFL_USE_ASAN AFL_USE_MSAN; ASAN_OPTIONS=detect_leaks=0 AFL_INST_RATIO=100 AFL_PATH=. ./afl-cc test-instr.c -o test-instr 2>&1 || (echo "Oops, afl-cc failed"; exit 1 )
+	@unset AFL_MAP_SIZE AFL_USE_UBSAN AFL_USE_CFISAN AFL_USE_LSAN AFL_USE_ASAN AFL_USE_MSAN; ASAN_OPTIONS=detect_leaks=0 AFL_INST_RATIO=100 AFL_PATH=. ./afl-cc test-instr.c -o test-instr 2>&1 || (echo "Oops, afl-cc failed"; exit 1 )
 	ASAN_OPTIONS=detect_leaks=0 ./afl-showmap -m none -q -o .test-instr0 ./test-instr < /dev/null
 	echo 1 | ASAN_OPTIONS=detect_leaks=0 ./afl-showmap -m none -q -o .test-instr1 ./test-instr
 	@rm -f test-instr
@@ -525,7 +529,7 @@ test_build: afl-cc afl-gcc afl-as afl-showmap
 	@echo
 	@echo "[+] All right, the instrumentation of afl-cc seems to be working!"
 #	@echo "[*] Testing the CC wrapper afl-gcc and its instrumentation output..."
-#	@unset AFL_MAP_SIZE AFL_USE_UBSAN AFL_USE_CFISAN AFL_USE_ASAN AFL_USE_MSAN; AFL_CC=$(CC) ASAN_OPTIONS=detect_leaks=0 AFL_INST_RATIO=100 AFL_PATH=. ./afl-gcc test-instr.c -o test-instr 2>&1 || (echo "Oops, afl-gcc failed"; exit 1 )
+#	@unset AFL_MAP_SIZE AFL_USE_UBSAN AFL_USE_CFISAN AFL_USE_LSAN AFL_USE_ASAN AFL_USE_MSAN; AFL_CC=$(CC) ASAN_OPTIONS=detect_leaks=0 AFL_INST_RATIO=100 AFL_PATH=. ./afl-gcc test-instr.c -o test-instr 2>&1 || (echo "Oops, afl-gcc failed"; exit 1 )
 #	ASAN_OPTIONS=detect_leaks=0 ./afl-showmap -m none -q -o .test-instr0 ./test-instr < /dev/null
 #	echo 1 | ASAN_OPTIONS=detect_leaks=0 ./afl-showmap -m none -q -o .test-instr1 ./test-instr
 #	@rm -f test-instr
@@ -546,7 +550,7 @@ all_done: test_build
 	@test -e SanitizerCoverageLTO.so && echo "[+] LLVM LTO mode for 'afl-cc' successfully built!" || echo "[-] LLVM LTO mode for 'afl-cc'  failed to build, this would need LLVM 11+, see instrumentation/README.lto.md how to build it"
 	@test -e afl-gcc-pass.so && echo "[+] gcc_plugin for 'afl-cc' successfully built!" || echo "[-] gcc_plugin for 'afl-cc'  failed to build, unless you really need it that is fine - or read instrumentation/README.gcc_plugin.md how to build it"
 	@echo "[+] All done! Be sure to review the README.md - it's pretty short and useful."
-	@if [ "`uname`" = "Darwin" ]; then printf "\nWARNING: Fuzzing on MacOS X is slow because of the unusually high overhead of\nfork() on this OS. Consider using Linux or *BSD. You can also use VirtualBox\n(virtualbox.org) to put AFL inside a Linux or *BSD VM.\n\n"; fi
+	@if [ "`uname`" = "Darwin" ]; then printf "\nWARNING: Fuzzing on MacOS X is slow because of the unusually high overhead of\nfork() on this OS. Consider using Linux or *BSD for fuzzing software not\nspecifically for MacOS.\n\n"; fi
 	@! tty <&1 >/dev/null || printf "\033[0;30mNOTE: If you can read this, your terminal probably uses white background.\nThis will make the UI hard to read. See docs/status_screen.md for advice.\033[0m\n" 2>/dev/null
 
 .NOTPARALLEL: clean all
diff --git a/GNUmakefile.gcc_plugin b/GNUmakefile.gcc_plugin
index aa93c688..b0f90f1b 100644
--- a/GNUmakefile.gcc_plugin
+++ b/GNUmakefile.gcc_plugin
@@ -41,6 +41,8 @@ CXXEFLAGS   := $(CXXFLAGS) -Wall -std=c++11
 CC          ?= gcc
 CXX         ?= g++
 
+SYS = $(shell uname -s)
+
 ifeq "clang" "$(CC)"
         CC  = gcc
         CXX = g++
@@ -75,25 +77,25 @@ ifeq "$(TEST_MMAP)" "1"
 	override CFLAGS_SAFE += -DUSEMMAP=1
 endif
 
-ifneq "$(shell uname -s)" "Haiku"
-ifneq "$(shell uname -s)" "OpenBSD"
+ifneq "$(SYS)" "Haiku"
+ifneq "$(SYS)" "OpenBSD"
   	LDFLAGS += -lrt
 endif
 else
 	CFLAGS_SAFE += -DUSEMMAP=1
 endif
 
-ifeq "$(shell uname -s)" "OpenBSD"
+ifeq "$(SYS)" "OpenBSD"
     CC  = egcc
     CXX = eg++
     PLUGIN_FLAGS += -I/usr/local/include
 endif
 
-ifeq "$(shell uname -s)" "DragonFly"
+ifeq "$(SYS)" "DragonFly"
   	PLUGIN_FLAGS += -I/usr/local/include
 endif
 
-ifeq "$(shell uname -s)" "SunOS"
+ifeq "$(SYS)" "SunOS"
   	PLUGIN_FLAGS += -I/usr/include/gmp
 endif
 
diff --git a/GNUmakefile.llvm b/GNUmakefile.llvm
index 4b5ac520..61c17e92 100644
--- a/GNUmakefile.llvm
+++ b/GNUmakefile.llvm
@@ -30,7 +30,9 @@ BUILD_DATE  ?= $(shell date -u -d "@$(SOURCE_DATE_EPOCH)" "+%Y-%m-%d" 2>/dev/nul
 
 VERSION     = $(shell grep '^$(HASH)define VERSION ' ./config.h | cut -d '"' -f2)
 
-ifeq "$(shell uname)" "OpenBSD"
+SYS = $(shell uname -s)
+
+ifeq "$(SYS)" "OpenBSD"
   LLVM_CONFIG ?= $(BIN_PATH)/llvm-config
   HAS_OPT = $(shell test -x $(BIN_PATH)/opt && echo 0 || echo 1)
   ifeq "$(HAS_OPT)" "1"
@@ -275,13 +277,13 @@ CLANG_LFL    = `$(LLVM_CONFIG) --ldflags` $(LDFLAGS)
 
 
 # User teor2345 reports that this is required to make things work on MacOS X.
-ifeq "$(shell uname)" "Darwin"
+ifeq "$(SYS)" "Darwin"
   CLANG_LFL += -Wl,-flat_namespace -Wl,-undefined,suppress
 else
   CLANG_CPPFL += -Wl,-znodelete
 endif
 
-ifeq "$(shell uname)" "OpenBSD"
+ifeq "$(SYS)" "OpenBSD"
   CLANG_LFL += `$(LLVM_CONFIG) --libdir`/libLLVM.so
   CLANG_CPPFL += -mno-retpoline
   CFLAGS += -mno-retpoline
diff --git a/README.md b/README.md
index b0ed8634..583db85f 100644
--- a/README.md
+++ b/README.md
@@ -99,15 +99,15 @@ behaviours and defaults:
   | Ngram prev_loc Coverage  |         |     x(6)  |            |            |                  |              |
   | Context Coverage         |         |     x(6)  |            |            |                  |              |
   | Auto Dictionary          |         |     x(7)  |            |            |                  |              |
-  | Snapshot LKM Support     |         |     x(8)  |     x(8)   |            |        (x)(5)    |              |
+  | Snapshot LKM Support     |         |    (x)(8) |    (x)(8)  |            |        (x)(5)    |              |
 
-  1. default for LLVM >= 9.0, env var for older version due an efficiency bug in llvm <= 8
+  1. default for LLVM >= 9.0, env var for older version due an efficiency bug in previous llvm versions
   2. GCC creates non-performant code, hence it is disabled in gcc_plugin
   3. (currently unassigned)
-  4. with pcguard mode and LTO mode for LLVM >= 11
+  4. with pcguard mode and LTO mode for LLVM 11 and newer
   5. upcoming, development in the branch
-  6. not compatible with LTO instrumentation and needs at least LLVM >= 4.1
-  7. automatic in LTO mode with LLVM >= 11, an extra pass for all LLVM version that writes to a file to use with afl-fuzz' `-x`
+  6. not compatible with LTO instrumentation and needs at least LLVM v4.1
+  7. automatic in LTO mode with LLVM 11 and newer, an extra pass for all LLVM version that writes to a file to use with afl-fuzz' `-x`
   8. the snapshot LKM is currently unmaintained due to too many kernel changes coming too fast :-(
 
   Among others, the following features and patches have been integrated:
@@ -605,8 +605,9 @@ Every -M/-S entry needs a unique name (that can be whatever), however the same
 For every secondary fuzzer there should be a variation, e.g.:
  * one should fuzz the target that was compiled differently: with sanitizers
    activated (`export AFL_USE_ASAN=1 ; export AFL_USE_UBSAN=1 ;
-   export AFL_USE_CFISAN=1 ; `
- * one should fuzz the target with CMPLOG/redqueen (see above)
+   export AFL_USE_CFISAN=1 ; export AFL_USE_LSAN=1`)
+ * one or two should fuzz the target with CMPLOG/redqueen (see above), at
+   least one cmplog instance should follow transformations (`-l AT`)
  * one to three fuzzers should fuzz a target compiled with laf-intel/COMPCOV
    (see above). Important note: If you run more than one laf-intel/COMPCOV
    fuzzer and you want them to share their intermediate results, the main
diff --git a/TODO.md b/TODO.md
index dc765ec4..7e420d23 100644
--- a/TODO.md
+++ b/TODO.md
@@ -2,6 +2,7 @@
 
 ## Roadmap 3.00+
 
+ - align map to 64 bytes but keep real IDs
  - Update afl->pending_not_fuzzed for MOpt
  - CPU affinity for many cores? There seems to be an issue > 96 cores
  - afl-plot to support multiple plot_data
@@ -10,9 +11,9 @@
  - intel-pt tracer
  - better autodetection of shifting runtime timeout values
  - cmplog: use colorization input for havoc?
- - cmplog: too much tainted bytes, directly add to dict and skip?
  - parallel builds for source-only targets
 
+
 ## Further down the road
 
 afl-fuzz:
diff --git a/afl-system-config b/afl-system-config
index ae37a062..5ad9d937 100755
--- a/afl-system-config
+++ b/afl-system-config
@@ -98,7 +98,9 @@ if [ "$PLATFORM" = "Darwin" ] ; then
   DONE=1
 fi
 if [ "$PLATFORM" = "Haiku" ] ; then
-  SETTINGS=~/config/settings/system/debug_server/settings
+  DEBUG_SERVER_DIR=~/config/settings/system/debug_server
+  [ ! -d ${DEBUG_SERVER_DIR} ] && mkdir -p ${DEBUG_SERVER_DIR}
+  SETTINGS=${DEBUG_SERVER_DIR}/settings
   [ -r ${SETTINGS} ] && grep -qE "default_action\s+kill" ${SETTINGS} && { echo "Nothing to do"; } || { \
     echo We change the debug_server default_action from user to silently kill; \
     [ ! -r ${SETTINGS} ] && echo "default_action kill" >${SETTINGS} || { mv ${SETTINGS} s.tmp; sed -e "s/default_action\s\s*user/default_action kill/" s.tmp > ${SETTINGS}; rm s.tmp; }; \
diff --git a/afl-whatsup b/afl-whatsup
index e92b24bd..be259829 100755
--- a/afl-whatsup
+++ b/afl-whatsup
@@ -21,29 +21,37 @@
 echo "$0 status check tool for afl-fuzz by Michal Zalewski"
 echo
 test "$1" = "-h" -o "$1" = "-hh" && {
-  echo $0 [-s] output_directory
+  echo "$0 [-s] [-d] output_directory"
   echo
   echo Options:
   echo   -s  -  skip details and output summary results only
+  echo   -d  -  include dead fuzzer stats
   echo
   exit 1
 }
 
-if [ "$1" = "-s" ]; then
+unset SUMMARY_ONLY
+unset PROCESS_DEAD
 
-  SUMMARY_ONLY=1
-  DIR="$2"
+while [ "$1" = "-s" -o "$1" = "-d" ]; do
 
-else
+  if [ "$1" = "-s" ]; then
+    SUMMARY_ONLY=1
+  fi
 
-  unset SUMMARY_ONLY
-  DIR="$1"
+  if [ "$1" = "-d" ]; then
+    PROCESS_DEAD=1
+  fi
+  
+  shift
 
-fi
+done
+
+DIR="$1"
 
 if [ "$DIR" = "" ]; then
 
-  echo "Usage: $0 [ -s ] afl_sync_dir" 1>&2
+  echo "Usage: $0 [-s] [-d] afl_sync_dir" 1>&2
   echo 1>&2
   echo "The -s option causes the tool to skip all the per-fuzzer trivia and show" 1>&2
   echo "just the summary results. See docs/parallel_fuzzing.md for additional tips." 1>&2
@@ -133,7 +141,7 @@ for i in `find . -maxdepth 2 -iname fuzzer_stats | sort`; do
   sed 's/^command_line.*$/_skip:1/;s/[ ]*:[ ]*/="/;s/$/"/' "$i" >"$TMP"
   . "$TMP"
 
-  RUN_UNIX=$((CUR_TIME - start_time))
+  RUN_UNIX=$run_time
   RUN_DAYS=$((RUN_UNIX / 60 / 60 / 24))
   RUN_HRS=$(((RUN_UNIX / 60 / 60) % 24))
 
@@ -160,7 +168,13 @@ for i in `find . -maxdepth 2 -iname fuzzer_stats | sort`; do
     fi
 
     DEAD_CNT=$((DEAD_CNT + 1))
-    continue
+    last_path=0
+
+    if [ "$PROCESS_DEAD" = "" ]; then
+
+      continue
+
+    fi
 
   fi
 
@@ -252,13 +266,24 @@ fmt_duration $TOTAL_LAST_PATH && TOTAL_LAST_PATH=$DUR_STRING
 
 test "$TOTAL_TIME" = "0" && TOTAL_TIME=1
 
+if [ "$PROCESS_DEAD" = "" ]; then
+
+  TXT="excluded from stats"
+
+else
+
+  TXT="included in stats"
+  ALIVE_CNT=$(($ALIVE_CNT - $DEAD_CNT))
+
+fi
+
 echo "Summary stats"
 echo "============="
 echo
 echo "       Fuzzers alive : $ALIVE_CNT"
 
 if [ ! "$DEAD_CNT" = "0" ]; then
-  echo "      Dead or remote : $DEAD_CNT (excluded from stats)"
+  echo "      Dead or remote : $DEAD_CNT ($TXT)"
 fi
 
 echo "      Total run time : $FMT_TIME"
diff --git a/custom_mutators/grammar_mutator/GRAMMAR_VERSION b/custom_mutators/grammar_mutator/GRAMMAR_VERSION
index a3fe6bb1..c7c1948d 100644
--- a/custom_mutators/grammar_mutator/GRAMMAR_VERSION
+++ b/custom_mutators/grammar_mutator/GRAMMAR_VERSION
@@ -1 +1 @@
-b3c4fcf
+a2d4e4a
diff --git a/custom_mutators/grammar_mutator/grammar_mutator b/custom_mutators/grammar_mutator/grammar_mutator
-Subproject b3c4fcfa6ae28918bc410f7747135eafd4fb726
+Subproject a2d4e4ab966f0581219fbb282f5ac8c89e85ead
diff --git a/docs/Changelog.md b/docs/Changelog.md
index 91d1a8cc..9c9a3976 100644
--- a/docs/Changelog.md
+++ b/docs/Changelog.md
@@ -11,6 +11,8 @@ sending a mail to <afl-users+subscribe@googlegroups.com>.
 ### Version ++3.13a (development)
   - frida_mode - new mode that uses frida to fuzz binary-only targets,
     thanks to @WorksButNotTested!
+  - create a fuzzing dictionary with the help of CodeQL thanks to
+    @microsvuln! see utils/autodict_ql
   - afl-fuzz:
     - added patch by @realmadsci to support @@ as part of command line
       options, e.g. `afl-fuzz ... -- ./target --infile=@@`
@@ -18,9 +20,21 @@ sending a mail to <afl-users+subscribe@googlegroups.com>.
       to allow replay of non-reproducable crashes, see
       AFL_PERSISTENT_RECORD in config.h and docs/envs.h
     - default cmplog level (-l) is now 2, better efficiency.
+    - cmplog level 3 (-l 3) now performs redqueen on everything.
+      use with care.
+    - better fuzzing strategy yields for enabled options
     - ensure one fuzzer sync per cycle
+    - fix afl_custom_queue_new_entry original file name when syncing
+      from fuzzers
+    - added AFL_EXIT_ON_SEED_ISSUES env that will exit if a seed in
+      -i dir crashes the target or results in a timeout. By default
+      afl++ ignores these and uses them for splicing instead.
   - afl-cc:
+    - Leak Sanitizer (AFL_USE_LSAN) added by Joshua Rogers, thanks!
     - Removed InsTrim instrumentation as it is not as good as PCGUARD
+    - Removed automatic linking with -lc++ for LTO mode
+  - utils/aflpp_driver/aflpp_qemu_driver_hook fixed to work with qemu mode
+  - add -d (add dead fuzzer stats) to afl-whatsup
 
 ### Version ++3.12c (release)
   - afl-fuzz:
diff --git a/docs/env_variables.md b/docs/env_variables.md
index 899b36cc..0100ffac 100644
--- a/docs/env_variables.md
+++ b/docs/env_variables.md
@@ -55,7 +55,7 @@ make fairly broad use of environmental variables instead:
     overridden.
 
   - Setting `AFL_USE_ASAN` automatically enables ASAN, provided that your
-    compiler supports that. Note that fuzzing with ASAN is mildly challenging
+    compiler supports it. Note that fuzzing with ASAN is mildly challenging
     - see [notes_for_asan.md](notes_for_asan.md).
 
     (You can also enable MSAN via `AFL_USE_MSAN`; ASAN and MSAN come with the
@@ -64,6 +64,13 @@ make fairly broad use of environmental variables instead:
     there is the Control Flow Integrity sanitizer that can be activated by
     `AFL_USE_CFISAN=1`)
 
+  - Setting `AFL_USE_LSAN` automatically enables Leak-Sanitizer, provided
+    that your compiler supports it. To perform a leak check within your
+    program at a certain point (such as at the end of an __AFL_LOOP),
+    you can run the macro __AFL_LEAK_CHECK(); which will cause
+    an abort if any memory is leaked (you can combine this with the
+    LSAN_OPTIONS=suppressions option to supress some known leaks).
+
   - Setting `AFL_CC`, `AFL_CXX`, and `AFL_AS` lets you use alternate downstream
     compilation tools, rather than the default 'clang', 'gcc', or 'as' binaries
     in your `$PATH`.
@@ -277,6 +284,9 @@ checks or alter some of the more exotic semantics of the tool:
     normally indicated by the cycle counter in the UI turning green. May be
     convenient for some types of automated jobs.
 
+  - `AFL_EXIT_ON_SEED_ISSUES` will restore the vanilla afl-fuzz behaviour
+    which does not allow crashes or timeout seeds in the initial -i corpus.
+
   - `AFL_MAP_SIZE` sets the size of the shared map that afl-fuzz, afl-showmap,
     afl-tmin and afl-analyze create to gather instrumentation data from
     the target. This must be equal or larger than the size the target was
@@ -372,8 +382,8 @@ checks or alter some of the more exotic semantics of the tool:
     may complain of high load prematurely, especially on systems with low core
     counts. To avoid the alarming red color, you can set `AFL_NO_CPU_RED`.
 
-  - In QEMU mode (-Q), Unicorn mode (-U) and Frida mode (-O), `AFL_PATH` will
-    be searched for afl-qemu-trace.
+  - In QEMU mode (-Q) and Frida mode (-O), `AFL_PATH` will
+    be searched for afl-qemu-trace and afl-frida-trace.so.
 
   - In QEMU mode (-Q), setting `AFL_QEMU_CUSTOM_BIN` cause afl-fuzz to skip
     prepending `afl-qemu-trace` to your command line. Use this if you wish to use a
@@ -627,7 +637,14 @@ optimal values if not already present in the environment:
     msan_track_origins=0
     allocator_may_return_null=1
 ```
-  Be sure to include the first one when customizing anything, since some
-    MSAN versions don't call `abort()` on error, and we need a way to detect
-    faults.
+  - Similarly, the default `LSAN_OPTIONS` are set to:
+```
+    exit_code=23
+    fast_unwind_on_malloc=0
+    symbolize=0
+    print_suppressions=0
+```
+  Be sure to include the first ones for LSAN and MSAN when customizing
+     anything, since some MSAN and LSAN versions don't call `abort()` on
+     error, and we need a way to detect faults.
 
diff --git a/docs/ideas.md b/docs/ideas.md
index 0130cf61..e25d3ba6 100644
--- a/docs/ideas.md
+++ b/docs/ideas.md
@@ -3,42 +3,6 @@
 In the following, we describe a variety of ideas that could be implemented
 for future AFL++ versions.
 
-# GSoC 2021
-
-All GSoC 2021 projects will be in the Rust development language!
-
-## UI for libaflrs
-
-Write a user interface to libaflrs, the upcoming backend of afl++.
-This might look like the afl-fuzz UI, but you can improve on it - and should!
-
-## Schedulers for libaflrs
-
-Schedulers is a mechanism that selects items from the fuzzing corpus based
-on strategy and randomness. One scheduler might focus on long paths,
-another on rarity of edges disocvered, still another on a combination on
-things. Some of the schedulers in afl++ have to be ported, but you are free
-to come up with your own if you want to - and see how it performs.
-
-## Forkserver support for libaflrs
-
-The current libaflrs implementation fuzzes in-memory, however obviously we
-want to support afl instrumented binaries as well.
-Hence a forkserver support needs to be implemented - forking off the target
-and talking to the target via a socketpair and the communication protocol
-within.
-
-## More Observers for libaflrs
-
-An observer is measuring functionality that looks at the target being fuzzed
-and documents something about it. In traditional fuzzing this is the coverage
-in the target, however we want to add various more observers, e.g. stack depth,
-heap usage, etc. - this is a topic for an experienced Rust developer.
-
-# Generic ideas and wishlist - NOT PART OF GSoC 2021 !
-
-The below list is not part of GSoC 2021.
-
 ## Analysis software
 
 Currently analysis is done by using afl-plot, which is rather outdated.
@@ -65,6 +29,13 @@ the current Unicorn instrumentation.
 
 Mentor: any
 
+## Support other programming languages
+
+Other programming languages also use llvm hence they could (easily?) supported
+for fuzzing, e.g. mono, swift, go, kotlin native, fortran, ...
+
+Mentor: vanhauser-thc
+
 ## Machine Learning
 
 Something with machine learning, better than [NEUZZ](https://github.com/dongdongshe/neuzz) :-)
diff --git a/docs/notes_for_asan.md b/docs/notes_for_asan.md
index 2b3bc028..f55aeaf2 100644
--- a/docs/notes_for_asan.md
+++ b/docs/notes_for_asan.md
@@ -28,6 +28,13 @@ Note that ASAN is incompatible with -static, so be mindful of that.
 
 (You can also use AFL_USE_MSAN=1 to enable MSAN instead.)
 
+When compiling with AFL_USE_LSAN, the leak sanitizer will normally run
+when the program exits. In order to utilize this check at different times,
+such as at the end of a loop, you may use the macro __AFL_LEAK_CHECK();.
+This macro will report a crash in afl-fuzz if any memory is left leaking
+at this stage. You can also use LSAN_OPTIONS and a supressions file
+for more fine-tuned checking, however make sure you keep exitcode=23.
+
 NOTE: if you run several secondary instances, only one should run the target
 compiled with ASAN (and UBSAN, CFISAN), the others should run the target with
 no sanitizers compiled in.
diff --git a/docs/status_screen.md b/docs/status_screen.md
index 0329d960..e3abcc5f 100644
--- a/docs/status_screen.md
+++ b/docs/status_screen.md
@@ -251,8 +251,9 @@ exceed it by a margin sufficient to be classified as hangs.
   | arithmetics : 53/2.54M, 0/537k, 0/55.2k             |
   |  known ints : 8/322k, 12/1.32M, 10/1.70M            |
   |  dictionary : 9/52k, 1/53k, 1/24k                   |
-  |       havoc : 1903/20.0M, 0/0                       |
-  |        trim : 20.31%/9201, 17.05%                   |
+  |havoc/splice : 1903/20.0M, 0/0                       |
+  |py/custom/rq : unused, 53/2.54M, unused              |
+  |    trim/eff : 20.31%/9201, 17.05%                   |
   +-----------------------------------------------------+
 ```
 
@@ -268,6 +269,12 @@ goal. Finally, the third number shows the proportion of bytes that, although
 not possible to remove, were deemed to have no effect and were excluded from
 some of the more expensive deterministic fuzzing steps.
 
+Note that when deterministic mutation mode is off (which is the default
+because it is not very efficient) the first five lines display
+"disabled (default, enable with -D)".
+
+Only what is activated will have counter shown.
+
 ### Path geometry
 
 ```
diff --git a/frida_mode/src/instrument.c b/frida_mode/src/instrument.c
index 042fdab8..22910062 100644
--- a/frida_mode/src/instrument.c
+++ b/frida_mode/src/instrument.c
@@ -174,7 +174,13 @@ void instrument_coverage_optimize(const cs_insn *   instr,
 
 static void on_basic_block(GumCpuContext *context, gpointer user_data) {
 
-  /* Avoid stack operations in potentially performance critical code */
+  /*
+   * This function is performance critical as it is called to instrument every
+   * basic block. By moving our print buffer to a global, we avoid it affecting
+   * the critical path with additional stack adjustments if tracing is not
+   * enabled. If tracing is enabled, then we're printing a load of diagnostic
+   * information so this overhead is unlikely to be noticeable.
+   */
   static char buffer[200];
   int         len;
   guint64     current_pc = (guint64)user_data;
diff --git a/frida_mode/src/ranges.c b/frida_mode/src/ranges.c
index fc14710f..49ef5a62 100644
--- a/frida_mode/src/ranges.c
+++ b/frida_mode/src/ranges.c
@@ -29,8 +29,7 @@ static void convert_address_token(gchar *token, GumMemoryRange *range) {
   gchar **tokens;
   int     token_count;
   tokens = g_strsplit(token, "-", 2);
-  for (token_count = 0; tokens[token_count] != NULL; token_count++)
-    ;
+  for (token_count = 0; tokens[token_count] != NULL; token_count++) {}
 
   if (token_count != 2) {
 
diff --git a/frida_mode/test/testinstr.c b/frida_mode/test/testinstr.c
index 2c3d5144..37d47f91 100644
--- a/frida_mode/test/testinstr.c
+++ b/frida_mode/test/testinstr.c
@@ -78,6 +78,13 @@ int main(int argc, char **argv) {
     }
 
     buf = malloc(len);
+    if (buf == NULL) {
+
+      perror("malloc");
+      break;
+
+    }
+
     n_read = read(fd, buf, len);
     if (n_read != len) {
 
diff --git a/frida_mode/test/testinstr.py b/frida_mode/test/testinstr.py
index 8f5fe886..f648808b 100755
--- a/frida_mode/test/testinstr.py
+++ b/frida_mode/test/testinstr.py
@@ -1,32 +1,49 @@
-#!/usr/bin/python3
+#!/usr/bin/env python3
 import argparse
 from elftools.elf.elffile import ELFFile
 
+
 def process_file(file, section, base):
-    with open(file, 'rb') as f:
+    with open(file, "rb") as f:
         for sect in ELFFile(f).iter_sections():
-            if (sect.name == section):
-                start = base + sect.header['sh_offset']
-                end = start + sect.header['sh_size']
-                print ("0x%016x-0x%016x" % (start, end))
+            if sect.name == section:
+                start = base + sect.header["sh_offset"]
+                end = start + sect.header["sh_size"]
+                print("0x%016x-0x%016x" % (start, end))
                 return
 
-    print ("Section '%s' not found in '%s'" % (section, file))
+    print("Section '%s' not found in '%s'" % (section, file))
+
 
 def hex_value(x):
     return int(x, 16)
 
+
 def main():
-    parser = argparse.ArgumentParser(description='Process some integers.')
-    parser.add_argument('-f', '--file', dest='file', type=str,
-                    help='elf file name', required=True)
-    parser.add_argument('-s', '--section', dest='section', type=str,
-                    help='elf section name', required=True)
-    parser.add_argument('-b', '--base', dest='base', type=hex_value,
-                    help='elf base address', required=True)
+    parser = argparse.ArgumentParser(description="Process some integers.")
+    parser.add_argument(
+        "-f", "--file", dest="file", type=str, help="elf file name", required=True
+    )
+    parser.add_argument(
+        "-s",
+        "--section",
+        dest="section",
+        type=str,
+        help="elf section name",
+        required=True,
+    )
+    parser.add_argument(
+        "-b",
+        "--base",
+        dest="base",
+        type=hex_value,
+        help="elf base address",
+        required=True,
+    )
 
     args = parser.parse_args()
-    process_file (args.file, args.section, args.base)
+    process_file(args.file, args.section, args.base)
+
 
 if __name__ == "__main__":
-    main()
\ No newline at end of file
+    main()
diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h
index 40a7fc85..f201782a 100644
--- a/include/afl-fuzz.h
+++ b/include/afl-fuzz.h
@@ -384,7 +384,8 @@ typedef struct afl_env_vars {
       afl_dumb_forksrv, afl_import_first, afl_custom_mutator_only, afl_no_ui,
       afl_force_ui, afl_i_dont_care_about_missing_crashes, afl_bench_just_one,
       afl_bench_until_crash, afl_debug_child, afl_autoresume, afl_cal_fast,
-      afl_cycle_schedules, afl_expand_havoc, afl_statsd, afl_cmplog_only_new;
+      afl_cycle_schedules, afl_expand_havoc, afl_statsd, afl_cmplog_only_new,
+      afl_exit_on_seed_issues;
 
   u8 *afl_tmpdir, *afl_custom_mutator_library, *afl_python_module, *afl_path,
       *afl_hang_tmout, *afl_forksrv_init_tmout, *afl_skip_crashes, *afl_preload,
@@ -483,7 +484,6 @@ typedef struct afl_state {
       no_unlink,                        /* do not unlink cur_input          */
       debug,                            /* Debug mode                       */
       custom_only,                      /* Custom mutator only mode         */
-      python_only,                      /* Python-only mode                 */
       is_main_node,                     /* if this is the main node         */
       is_secondary_node;                /* if this is a secondary instance  */
 
diff --git a/include/android-ashmem.h b/include/android-ashmem.h
index 44fe556a..1bfd3220 100644
--- a/include/android-ashmem.h
+++ b/include/android-ashmem.h
@@ -13,12 +13,14 @@
     #include <stdio.h>
     #define ASHMEM_DEVICE "/dev/ashmem"
 
-int shmdt(const void* address) {
-#if defined(SYS_shmdt)
+int shmdt(const void *address) {
+
+    #if defined(SYS_shmdt)
   return syscall(SYS_shmdt, address);
-#else
+    #else
   return syscall(SYS_ipc, SHMDT, 0, 0, 0, address, 0);
-#endif
+    #endif
+
 }
 
 int shmctl(int __shmid, int __cmd, struct shmid_ds *__buf) {
@@ -26,7 +28,7 @@ int shmctl(int __shmid, int __cmd, struct shmid_ds *__buf) {
   int ret = 0;
   if (__cmd == IPC_RMID) {
 
-    int length = ioctl(__shmid, ASHMEM_GET_SIZE, NULL);
+    int               length = ioctl(__shmid, ASHMEM_GET_SIZE, NULL);
     struct ashmem_pin pin = {0, length};
     ret = ioctl(__shmid, ASHMEM_UNPIN, &pin);
     close(__shmid);
@@ -77,6 +79,6 @@ void *shmat(int __shmid, const void *__shmaddr, int __shmflg) {
 
 }
 
-  #endif /* !_ANDROID_ASHMEM_H */
-#endif /* !__ANDROID__ */
+  #endif                                              /* !_ANDROID_ASHMEM_H */
+#endif                                                      /* !__ANDROID__ */
 
diff --git a/include/config.h b/include/config.h
index 75f363f7..aa24ea6c 100644
--- a/include/config.h
+++ b/include/config.h
@@ -406,6 +406,10 @@
 
 #define MSAN_ERROR 86
 
+/* Distinctive exit code used to indicate LSAN trip condition: */
+
+#define LSAN_ERROR 23
+
 /* Designated file descriptors for forkserver commands (the application will
    use FORKSRV_FD and FORKSRV_FD + 1): */
 
diff --git a/include/envs.h b/include/envs.h
index f7c8b460..ebe98257 100644
--- a/include/envs.h
+++ b/include/envs.h
@@ -26,6 +26,7 @@ static char *afl_environment_variables[] = {
     "AFL_BENCH_UNTIL_CRASH",
     "AFL_CAL_FAST",
     "AFL_CC",
+    "AFL_CC_COMPILER",
     "AFL_CMIN_ALLOW_ANY",
     "AFL_CMIN_CRASHES_ONLY",
     "AFL_CMPLOG_ONLY_NEW",
@@ -48,6 +49,7 @@ static char *afl_environment_variables[] = {
     "AFL_DUMB_FORKSRV",
     "AFL_ENTRYPOINT",
     "AFL_EXIT_WHEN_DONE",
+    "AFL_EXIT_ON_SEED_ISSUES",
     "AFL_FAST_CAL",
     "AFL_FORCE_UI",
     "AFL_FRIDA_DEBUG_MAPS",
@@ -181,6 +183,7 @@ static char *afl_environment_variables[] = {
     "AFL_USE_TRACE_PC",
     "AFL_USE_UBSAN",
     "AFL_USE_CFISAN",
+    "AFL_USE_LSAN",
     "AFL_WINE_PATH",
     "AFL_NO_SNAPSHOT",
     "AFL_EXPAND_HAVOC_NOW",
diff --git a/instrumentation/afl-compiler-rt.o.c b/instrumentation/afl-compiler-rt.o.c
index 36b9c2f2..552bbea8 100644
--- a/instrumentation/afl-compiler-rt.o.c
+++ b/instrumentation/afl-compiler-rt.o.c
@@ -34,8 +34,10 @@
 #include <errno.h>
 
 #include <sys/mman.h>
-#include <sys/syscall.h>
 #ifndef __HAIKU__
+  #include <sys/syscall.h>
+#endif
+#ifndef USEMMAP
   #include <sys/shm.h>
 #endif
 #include <sys/wait.h>
@@ -76,6 +78,10 @@
   #define MAP_INITIAL_SIZE MAP_SIZE
 #endif
 
+#if defined(__HAIKU__)
+  extern ssize_t _kern_write(int fd, off_t pos, const void *buffer,	size_t bufferSize);
+#endif // HAIKU
+
 u8   __afl_area_initial[MAP_INITIAL_SIZE];
 u8 * __afl_area_ptr_dummy = __afl_area_initial;
 u8 * __afl_area_ptr = __afl_area_initial;
@@ -1138,6 +1144,18 @@ void __afl_manual_init(void) {
 
 __attribute__((constructor())) void __afl_auto_init(void) {
 
+#ifdef __ANDROID__
+  // Disable handlers in linker/debuggerd, check include/debuggerd/handler.h
+  signal(SIGABRT, SIG_DFL);
+  signal(SIGBUS, SIG_DFL);
+  signal(SIGFPE, SIG_DFL);
+  signal(SIGILL, SIG_DFL);
+  signal(SIGSEGV, SIG_DFL);
+  signal(SIGSTKFLT, SIG_DFL);
+  signal(SIGSYS, SIG_DFL);
+  signal(SIGTRAP, SIG_DFL);
+#endif
+
   if (getenv("AFL_DISABLE_LLVM_INSTRUMENTATION")) return;
 
   if (getenv(DEFER_ENV_VAR)) return;
@@ -1736,7 +1754,11 @@ static int area_is_valid(void *ptr, size_t len) {
 
   if (unlikely(!ptr || __asan_region_is_poisoned(ptr, len))) { return 0; }
 
-  long r = syscall(SYS_write, __afl_dummy_fd[1], ptr, len);
+  #ifndef __HAIKU__
+    long r = syscall(SYS_write, __afl_dummy_fd[1], ptr, len);
+  #else
+    long r = _kern_write(__afl_dummy_fd[1], -1, ptr, len);
+  #endif // HAIKU
 
   if (r <= 0 || r > len) return 0;
 
diff --git a/instrumentation/afl-llvm-lto-instrumentation.so.cc b/instrumentation/afl-llvm-lto-instrumentation.so.cc
index 50306224..f6cdbe9e 100644
--- a/instrumentation/afl-llvm-lto-instrumentation.so.cc
+++ b/instrumentation/afl-llvm-lto-instrumentation.so.cc
@@ -92,7 +92,7 @@ class AFLLTOPass : public ModulePass {
   uint32_t afl_global_id = 1, autodictionary = 1;
   uint32_t function_minimum_size = 1;
   uint32_t inst_blocks = 0, inst_funcs = 0, total_instr = 0;
-  uint64_t map_addr = 0x10000;
+  unsigned long long int map_addr = 0x10000;
   char *   skip_nozero = NULL;
 
 };
@@ -176,7 +176,7 @@ bool AFLLTOPass::runOnModule(Module &M) {
 
   }
 
-  if (debug) { fprintf(stderr, "map address is 0x%lx\n", map_addr); }
+  if (debug) { fprintf(stderr, "map address is 0x%llx\n", map_addr); }
 
   /* Get/set the globals for the SHM region. */
 
diff --git a/qemu_mode/QEMUAFL_VERSION b/qemu_mode/QEMUAFL_VERSION
index 8d95c359..0fb33ae2 100644
--- a/qemu_mode/QEMUAFL_VERSION
+++ b/qemu_mode/QEMUAFL_VERSION
@@ -1 +1 @@
-ddc4a9748d
+d73b0336b4
diff --git a/qemu_mode/build_qemu_support.sh b/qemu_mode/build_qemu_support.sh
index 38085389..6436d43a 100755
--- a/qemu_mode/build_qemu_support.sh
+++ b/qemu_mode/build_qemu_support.sh
@@ -360,6 +360,8 @@ if ! command -v "$CROSS" > /dev/null ; then
     make -C unsigaction && echo "[+] unsigaction ready"
     echo "[+] Building libqasan ..."
     make -C libqasan && echo "[+] unsigaction ready"
+    echo "[+] Building qemu libfuzzer helpers ..."
+    make -C ../utils/aflpp_driver
   else
     echo "[!] Cross compiler $CROSS could not be found, cannot compile libcompcov libqasan and unsigaction"
   fi
diff --git a/qemu_mode/libcompcov/compcovtest.cc b/qemu_mode/libcompcov/compcovtest.cc
index d70bba91..b446ebfa 100644
--- a/qemu_mode/libcompcov/compcovtest.cc
+++ b/qemu_mode/libcompcov/compcovtest.cc
@@ -1,67 +1,83 @@
-/////////////////////////////////////////////////////////////////////////

-//

-// Author: Mateusz Jurczyk (mjurczyk@google.com)

-//

-// Copyright 2019-2020 Google LLC

-//

-// Licensed under the Apache License, Version 2.0 (the "License");

-// you may not use this file except in compliance with the License.

-// You may obtain a copy of the License at

-//

-// https://www.apache.org/licenses/LICENSE-2.0

-//

-// Unless required by applicable law or agreed to in writing, software

-// distributed under the License is distributed on an "AS IS" BASIS,

-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

-// See the License for the specific language governing permissions and

-// limitations under the License.

-//

-

-// solution: echo -ne 'The quick brown fox jumps over the lazy

-// dog\xbe\xba\xfe\xca\xbe\xba\xfe\xca\xde\xc0\xad\xde\xef\xbe' | ./compcovtest

-

-#include <cstdint>

-#include <cstdio>

-#include <cstdlib>

-#include <cstring>

-

-int main() {

-
-  char buffer[44] = {/* zero padding */};

-  fread(buffer, 1, sizeof(buffer) - 1, stdin);

-

-  if (memcmp(&buffer[0], "The quick brown fox ", 20) != 0 ||

-      strncmp(&buffer[20], "jumps over ", 11) != 0 ||

-      strcmp(&buffer[31], "the lazy dog") != 0) {

-
-    return 1;

-
-  }

-

-  uint64_t x = 0;

-  fread(&x, sizeof(x), 1, stdin);

-  if (x != 0xCAFEBABECAFEBABE) { return 2; }

-

-  uint32_t y = 0;

-  fread(&y, sizeof(y), 1, stdin);

-  if (y != 0xDEADC0DE) { return 3; }

-

-  uint16_t z = 0;

-  fread(&z, sizeof(z), 1, stdin);

-

-  switch (z) {

-
-    case 0xBEEF:

-      break;

-

-    default:

-      return 4;

-
-  }

-

-  printf("Puzzle solved, congrats!\n");

-  abort();

-  return 0;

-
-}

+/////////////////////////////////////////////////////////////////////////
+//
+// Author: Mateusz Jurczyk (mjurczyk@google.com)
+//
+// Copyright 2019-2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// solution: echo -ne 'The quick brown fox jumps over the lazy
+// dog\xbe\xba\xfe\xca\xbe\xba\xfe\xca\xde\xc0\xad\xde\xef\xbe' | ./compcovtest
+
+#include "../../include/config.h"
+
+#include <cstdint>
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
+
+int main(int argc, char **argv) {
+
+  char buffer[44] = {/* zero padding */};
+
+  FILE *file = stdin;
+
+  if (argc > 1) {
+
+    if ((file = fopen(argv[1], "r")) == NULL) {
+
+      perror(argv[1]);
+      exit(-1);
+
+    }
+
+  }
+
+  fread(buffer, 1, sizeof(buffer) - 1, file);
+
+  if (memcmp(&buffer[0], "The quick brown fox ", 20) != 0 ||
+      strncmp(&buffer[20], "jumps over ", 11) != 0 ||
+      strcmp(&buffer[31], "the lazy dog") != 0) {
+
+    return 1;
+
+  }
+
+  uint64_t x = 0;
+  fread(&x, sizeof(x), 1, file);
+  if (x != 0xCAFEBABECAFEBABE) { return 2; }
+
+  uint32_t y = 0;
+  fread(&y, sizeof(y), 1, file);
+  if (y != 0xDEADC0DE) { return 3; }
+
+  uint16_t z = 0;
+  fread(&z, sizeof(z), 1, file);
+
+  switch (z) {
+
+    case 0xBEEF:
+      break;
+
+    default:
+      return 4;
+
+  }
+
+  printf("Puzzle solved, congrats!\n");
+  abort();
+  return 0;
+
+}
 
diff --git a/qemu_mode/qemuafl b/qemu_mode/qemuafl
-Subproject ddc4a9748d59857753fb33c30a356f354595f36
+Subproject d73b0336b451fd034e5f469089fb7ee96c80adf
diff --git a/src/afl-analyze.c b/src/afl-analyze.c
index 8e5a1772..aabdbf1a 100644
--- a/src/afl-analyze.c
+++ b/src/afl-analyze.c
@@ -784,6 +784,18 @@ static void set_up_environment(char **argv) {
 
   }
 
+  x = get_afl_env("LSAN_OPTIONS");
+
+  if (x) {
+
+    if (!strstr(x, "symbolize=0")) {
+
+      FATAL("Custom LSAN_OPTIONS set without symbolize=0 - please fix!");
+
+    }
+
+  }
+
   setenv("ASAN_OPTIONS",
          "abort_on_error=1:"
          "detect_leaks=0:"
@@ -821,6 +833,13 @@ static void set_up_environment(char **argv) {
                          "handle_sigfpe=0:"
                          "handle_sigill=0", 0);
 
+  setenv("LSAN_OPTIONS",
+         "exitcode=" STRINGIFY(LSAN_ERROR) ":"
+         "fast_unwind_on_malloc=0:"
+         "symbolize=0:"
+         "print_suppressions=0",
+         0);
+
   if (get_afl_env("AFL_PRELOAD")) {
 
     if (qemu_mode) {
diff --git a/src/afl-as.c b/src/afl-as.c
index aebd0ac8..7119d630 100644
--- a/src/afl-as.c
+++ b/src/afl-as.c
@@ -517,11 +517,12 @@ static void add_instrumentation(void) {
     } else {
 
       char modeline[100];
-      snprintf(modeline, sizeof(modeline), "%s%s%s%s",
+      snprintf(modeline, sizeof(modeline), "%s%s%s%s%s",
                getenv("AFL_HARDEN") ? "hardened" : "non-hardened",
                getenv("AFL_USE_ASAN") ? ", ASAN" : "",
                getenv("AFL_USE_MSAN") ? ", MSAN" : "",
-               getenv("AFL_USE_UBSAN") ? ", UBSAN" : "");
+               getenv("AFL_USE_UBSAN") ? ", UBSAN" : "",
+               getenv("AFL_USE_LSAN") ? ", LSAN" : "");
 
       OKF("Instrumented %u locations (%s-bit, %s mode, ratio %u%%).", ins_lines,
           use_64bit ? "64" : "32", modeline, inst_ratio);
@@ -585,7 +586,7 @@ int main(int argc, char **argv) {
         "AFL_QUIET: suppress verbose output\n"
         "AFL_KEEP_ASSEMBLY: leave instrumented assembly files\n"
         "AFL_AS_FORCE_INSTRUMENT: force instrumentation for asm sources\n"
-        "AFL_HARDEN, AFL_USE_ASAN, AFL_USE_MSAN, AFL_USE_UBSAN:\n"
+        "AFL_HARDEN, AFL_USE_ASAN, AFL_USE_MSAN, AFL_USE_UBSAN, AFL_USE_LSAN:\n"
         "  used in the instrumentation summary message\n",
         argv[0]);
 
diff --git a/src/afl-cc.c b/src/afl-cc.c
index b354077e..1f89bac5 100644
--- a/src/afl-cc.c
+++ b/src/afl-cc.c
@@ -430,9 +430,6 @@ static void edit_params(u32 argc, char **argv, char **envp) {
 
     cc_params[cc_par_cnt++] = "-Wno-unused-command-line-argument";
 
-    if (lto_mode && plusplus_mode)
-      cc_params[cc_par_cnt++] = "-lc++";  // needed by fuzzbench, early
-
     if (lto_mode && have_instr_env) {
 
       cc_params[cc_par_cnt++] = "-Xclang";
@@ -819,6 +816,14 @@ static void edit_params(u32 argc, char **argv, char **envp) {
 
   }
 
+  if (getenv("AFL_USE_LSAN")) {
+
+    cc_params[cc_par_cnt++] = "-fsanitize=leak";
+    cc_params[cc_par_cnt++] = "-includesanitizer/lsan_interface.h";
+    cc_params[cc_par_cnt++] = "-D__AFL_LEAK_CHECK()=__lsan_do_leak_check()";
+
+  }
+
   if (getenv("AFL_USE_CFISAN")) {
 
     if (!lto_mode) {
@@ -1730,7 +1735,8 @@ int main(int argc, char **argv, char **envp) {
           "  AFL_USE_ASAN: activate address sanitizer\n"
           "  AFL_USE_CFISAN: activate control flow sanitizer\n"
           "  AFL_USE_MSAN: activate memory sanitizer\n"
-          "  AFL_USE_UBSAN: activate undefined behaviour sanitizer\n");
+          "  AFL_USE_UBSAN: activate undefined behaviour sanitizer\n"
+          "  AFL_USE_LSAN: activate leak-checker sanitizer\n");
 
       if (have_gcc_plugin)
         SAYF(
diff --git a/src/afl-forkserver.c b/src/afl-forkserver.c
index 0037d2d5..727e7f8d 100644
--- a/src/afl-forkserver.c
+++ b/src/afl-forkserver.c
@@ -502,7 +502,7 @@ void afl_fsrv_start(afl_forkserver_t *fsrv, char **argv,
 
     if (!getenv("LD_BIND_LAZY")) { setenv("LD_BIND_NOW", "1", 1); }
 
-    /* Set sane defaults for ASAN if nothing else specified. */
+    /* Set sane defaults for ASAN if nothing else is specified. */
 
     if (!getenv("ASAN_OPTIONS"))
       setenv("ASAN_OPTIONS",
@@ -519,7 +519,7 @@ void afl_fsrv_start(afl_forkserver_t *fsrv, char **argv,
              "handle_sigill=0",
              1);
 
-    /* Set sane defaults for UBSAN if nothing else specified. */
+    /* Set sane defaults for UBSAN if nothing else is specified. */
 
     if (!getenv("UBSAN_OPTIONS"))
       setenv("UBSAN_OPTIONS",
@@ -557,6 +557,16 @@ void afl_fsrv_start(afl_forkserver_t *fsrv, char **argv,
            "handle_sigill=0",
            1);
 
+    /* LSAN, too, does not support abort_on_error=1. */
+
+    if (!getenv("LSAN_OPTIONS"))
+      setenv("LSAN_OPTIONS",
+            "exitcode=" STRINGIFY(LSAN_ERROR) ":"
+            "fast_unwind_on_malloc=0:"
+            "symbolize=0:"
+            "print_suppressions=0",
+            1);
+
     fsrv->init_child_func(fsrv, argv);
 
     /* Use a distinctive bitmap signature to tell the parent about execv()
@@ -811,7 +821,9 @@ void afl_fsrv_start(afl_forkserver_t *fsrv, char **argv,
 
   if (fsrv->last_run_timed_out) {
 
-    FATAL("Timeout while initializing fork server (adjusting -t may help)");
+    FATAL(
+        "Timeout while initializing fork server (setting "
+        "AFL_FORKSRV_INIT_TMOUT may help)");
 
   }
 
@@ -1303,8 +1315,10 @@ fsrv_run_result_t afl_fsrv_run_target(afl_forkserver_t *fsrv, u32 timeout,
   if (unlikely(
           /* A normal crash/abort */
           (WIFSIGNALED(fsrv->child_status)) ||
-          /* special handling for msan */
-          (fsrv->uses_asan && WEXITSTATUS(fsrv->child_status) == MSAN_ERROR) ||
+          /* special handling for msan and lsan */
+          (fsrv->uses_asan &&
+           (WEXITSTATUS(fsrv->child_status) == MSAN_ERROR ||
+            WEXITSTATUS(fsrv->child_status) == LSAN_ERROR)) ||
           /* the custom crash_exitcode was returned by the target */
           (fsrv->uses_crash_exitcode &&
            WEXITSTATUS(fsrv->child_status) == fsrv->crash_exitcode))) {
diff --git a/src/afl-fuzz-init.c b/src/afl-fuzz-init.c
index cb0190a0..b6bfbc29 100644
--- a/src/afl-fuzz-init.c
+++ b/src/afl-fuzz-init.c
@@ -881,7 +881,7 @@ void perform_dry_run(afl_state_t *afl) {
 
       case FSRV_RUN_TMOUT:
 
-        if (afl->timeout_given) {
+        if (afl->timeout_given && !afl->afl_env.afl_exit_on_seed_issues) {
 
           /* if we have a timeout but a timeout value was given then always
              skip. The '+' meaning has been changed! */
@@ -1036,6 +1036,12 @@ void perform_dry_run(afl_state_t *afl) {
 
         }
 
+        if (afl->afl_env.afl_exit_on_seed_issues) {
+
+          FATAL("As AFL_EXIT_ON_SEED_ISSUES is set, afl-fuzz exits.");
+
+        }
+
         /* Remove from fuzzing queue but keep for splicing */
 
         struct queue_entry *p = afl->queue;
@@ -2490,6 +2496,18 @@ void check_asan_opts(afl_state_t *afl) {
 
   }
 
+  x = get_afl_env("LSAN_OPTIONS");
+
+  if (x) {
+
+    if (!strstr(x, "symbolize=0")) {
+
+      FATAL("Custom LSAN_OPTIONS set without symbolize=0 - please fix!");
+
+    }
+
+  }
+
 }
 
 /* Handle stop signal (Ctrl-C, etc). */
@@ -2735,7 +2753,8 @@ void check_binary(afl_state_t *afl, u8 *fname) {
   }
 
   if (memmem(f_data, f_len, "__asan_init", 11) ||
-      memmem(f_data, f_len, "__msan_init", 11)) {
+      memmem(f_data, f_len, "__msan_init", 11) ||
+      memmem(f_data, f_len, "__lsan_init", 11)) {
 
     afl->fsrv.uses_asan = 1;
 
diff --git a/src/afl-fuzz-queue.c b/src/afl-fuzz-queue.c
index e5f51a6c..811e805c 100644
--- a/src/afl-fuzz-queue.c
+++ b/src/afl-fuzz-queue.c
@@ -478,7 +478,11 @@ void add_to_queue(afl_state_t *afl, u8 *fname, u32 len, u8 passed_det) {
         u8 *fname_orig = NULL;
 
         /* At the initialization stage, queue_cur is NULL */
-        if (afl->queue_cur) fname_orig = afl->queue_cur->fname;
+        if (afl->queue_cur && !afl->syncing_party) {
+
+          fname_orig = afl->queue_cur->fname;
+
+        }
 
         el->afl_custom_queue_new_entry(el->data, fname, fname_orig);
 
diff --git a/src/afl-fuzz-redqueen.c b/src/afl-fuzz-redqueen.c
index 9bfbf95b..cf1e5ea5 100644
--- a/src/afl-fuzz-redqueen.c
+++ b/src/afl-fuzz-redqueen.c
@@ -437,7 +437,7 @@ static u8 colorization(afl_state_t *afl, u8 *buf, u32 len,
 
   if (taint) {
 
-    if (afl->colorize_success &&
+    if (afl->colorize_success && afl->cmplog_lvl < 3 &&
         (len / positions == 1 && positions > CMPLOG_POSITIONS_MAX &&
          afl->active_paths / afl->colorize_success > CMPLOG_CORPUS_PERCENT)) {
 
@@ -1749,6 +1749,12 @@ static u8 cmp_fuzz(afl_state_t *afl, u32 key, u8 *orig_buf, u8 *buf, u8 *cbuf,
 
 #endif
 
+#ifdef _DEBUG
+      if (o->v0 != orig_o->v0 || o->v1 != orig_o->v1)
+        fprintf(stderr, "key=%u idx=%u o0=%llu v0=%llu o1=%llu v1=%llu\n", key,
+                idx, orig_o->v0, o->v0, orig_o->v1, o->v1);
+#endif
+
       // even for u128 and _ExtInt we do cmp_extend_encoding() because
       // if we got here their own special trials failed and it might just be
       // a cast from e.g. u64 to u128 from the input data.
@@ -2365,6 +2371,24 @@ static u8 rtn_fuzz(afl_state_t *afl, u32 key, u8 *orig_buf, u8 *buf, u8 *cbuf,
 
       status = 0;
 
+#ifdef _DEBUG
+      int w;
+      fprintf(stderr, "key=%u idx=%u len=%u o0=", key, idx,
+              SHAPE_BYTES(h->shape));
+      for (w = 0; w < SHAPE_BYTES(h->shape); ++w)
+        fprintf(stderr, "%02x", orig_o->v0[w]);
+      fprintf(stderr, " v0=");
+      for (w = 0; w < SHAPE_BYTES(h->shape); ++w)
+        fprintf(stderr, "%02x", o->v0[w]);
+      fprintf(stderr, " o1=");
+      for (w = 0; w < SHAPE_BYTES(h->shape); ++w)
+        fprintf(stderr, "%02x", orig_o->v1[w]);
+      fprintf(stderr, " v1=");
+      for (w = 0; w < SHAPE_BYTES(h->shape); ++w)
+        fprintf(stderr, "%02x", o->v1[w]);
+      fprintf(stderr, "\n");
+#endif
+
       if (unlikely(rtn_extend_encoding(
               afl, o->v0, o->v1, orig_o->v0, orig_o->v1, SHAPE_BYTES(h->shape),
               idx, taint_len, orig_buf, buf, cbuf, len, lvl, &status))) {
diff --git a/src/afl-fuzz-state.c b/src/afl-fuzz-state.c
index f65ff1bb..28d3339a 100644
--- a/src/afl-fuzz-state.c
+++ b/src/afl-fuzz-state.c
@@ -306,6 +306,13 @@ void read_afl_environment(afl_state_t *afl, char **envp) {
             afl->cycle_schedules = afl->afl_env.afl_cycle_schedules =
                 get_afl_env(afl_environment_variables[i]) ? 1 : 0;
 
+          } else if (!strncmp(env, "AFL_EXIT_ON_SEED_ISSUES",
+
+                              afl_environment_variable_len)) {
+
+            afl->afl_env.afl_exit_on_seed_issues =
+                get_afl_env(afl_environment_variables[i]) ? 1 : 0;
+
           } else if (!strncmp(env, "AFL_EXPAND_HAVOC_NOW",
 
                               afl_environment_variable_len)) {
diff --git a/src/afl-fuzz-stats.c b/src/afl-fuzz-stats.c
index 2c814d90..52d9de87 100644
--- a/src/afl-fuzz-stats.c
+++ b/src/afl-fuzz-stats.c
@@ -355,18 +355,18 @@ void write_stats_file(afl_state_t *afl, u32 t_bytes, double bitmap_cvg,
 void maybe_update_plot_file(afl_state_t *afl, u32 t_bytes, double bitmap_cvg,
                             double eps) {
 
-  if (unlikely(afl->stop_soon) ||
-      unlikely(afl->plot_prev_qp == afl->queued_paths &&
-               afl->plot_prev_pf == afl->pending_favored &&
-               afl->plot_prev_pnf == afl->pending_not_fuzzed &&
-               afl->plot_prev_ce == afl->current_entry &&
-               afl->plot_prev_qc == afl->queue_cycle &&
-               afl->plot_prev_uc == afl->unique_crashes &&
-               afl->plot_prev_uh == afl->unique_hangs &&
-               afl->plot_prev_md == afl->max_depth &&
-               afl->plot_prev_ed == afl->fsrv.total_execs) ||
-      unlikely(!afl->queue_cycle) ||
-      unlikely(get_cur_time() - afl->start_time <= 60)) {
+  if (unlikely(!afl->force_ui_update &&
+               (afl->stop_soon ||
+                (afl->plot_prev_qp == afl->queued_paths &&
+                 afl->plot_prev_pf == afl->pending_favored &&
+                 afl->plot_prev_pnf == afl->pending_not_fuzzed &&
+                 afl->plot_prev_ce == afl->current_entry &&
+                 afl->plot_prev_qc == afl->queue_cycle &&
+                 afl->plot_prev_uc == afl->unique_crashes &&
+                 afl->plot_prev_uh == afl->unique_hangs &&
+                 afl->plot_prev_md == afl->max_depth &&
+                 afl->plot_prev_ed == afl->fsrv.total_execs) ||
+                !afl->queue_cycle || get_cur_time() - afl->start_time <= 60))) {
 
     return;
 
@@ -427,7 +427,7 @@ void show_stats(afl_state_t *afl) {
   u32 t_bytes, t_bits;
 
   u32 banner_len, banner_pad;
-  u8  tmp[256];
+  u8  tmp[256], tmp2[256];
   u8  time_tmp[64];
 
   u8 val_buf[8][STRINGIFY_VAL_SIZE_MAX];
@@ -531,7 +531,8 @@ void show_stats(afl_state_t *afl) {
 
   /* Roughly every minute, update fuzzer stats and save auto tokens. */
 
-  if (cur_ms - afl->stats_last_stats_ms > STATS_UPDATE_SEC * 1000) {
+  if (unlikely(afl->force_ui_update ||
+               cur_ms - afl->stats_last_stats_ms > STATS_UPDATE_SEC * 1000)) {
 
     afl->stats_last_stats_ms = cur_ms;
     write_stats_file(afl, t_bytes, t_byte_ratio, stab_ratio,
@@ -543,7 +544,8 @@ void show_stats(afl_state_t *afl) {
 
   if (unlikely(afl->afl_env.afl_statsd)) {
 
-    if (cur_ms - afl->statsd_last_send_ms > STATSD_UPDATE_SEC * 1000) {
+    if (unlikely(afl->force_ui_update && cur_ms - afl->statsd_last_send_ms >
+                                             STATSD_UPDATE_SEC * 1000)) {
 
       /* reset counter, even if send failed. */
       afl->statsd_last_send_ms = cur_ms;
@@ -555,7 +557,8 @@ void show_stats(afl_state_t *afl) {
 
   /* Every now and then, write plot data. */
 
-  if (cur_ms - afl->stats_last_plot_ms > PLOT_UPDATE_SEC * 1000) {
+  if (unlikely(afl->force_ui_update ||
+               cur_ms - afl->stats_last_plot_ms > PLOT_UPDATE_SEC * 1000)) {
 
     afl->stats_last_plot_ms = cur_ms;
     maybe_update_plot_file(afl, t_bytes, t_byte_ratio, afl->stats_avg_exec);
@@ -564,14 +567,14 @@ void show_stats(afl_state_t *afl) {
 
   /* Honor AFL_EXIT_WHEN_DONE and AFL_BENCH_UNTIL_CRASH. */
 
-  if (!afl->non_instrumented_mode && afl->cycles_wo_finds > 100 &&
-      !afl->pending_not_fuzzed && afl->afl_env.afl_exit_when_done) {
+  if (unlikely(!afl->non_instrumented_mode && afl->cycles_wo_finds > 100 &&
+               !afl->pending_not_fuzzed && afl->afl_env.afl_exit_when_done)) {
 
     afl->stop_soon = 2;
 
   }
 
-  if (afl->total_crashes && afl->afl_env.afl_bench_until_crash) {
+  if (unlikely(afl->total_crashes && afl->afl_env.afl_bench_until_crash)) {
 
     afl->stop_soon = 2;
 
@@ -583,7 +586,7 @@ void show_stats(afl_state_t *afl) {
 
   /* If we haven't started doing things, bail out. */
 
-  if (!afl->queue_cur) { return; }
+  if (unlikely(!afl->queue_cur)) { return; }
 
   /* Compute some mildly useful bitmap stats. */
 
@@ -602,7 +605,7 @@ void show_stats(afl_state_t *afl) {
 
   SAYF(TERM_HOME);
 
-  if (afl->term_too_small) {
+  if (unlikely(afl->term_too_small)) {
 
     SAYF(cBRI
          "Your terminal is too small to display the UI.\n"
@@ -861,9 +864,13 @@ void show_stats(afl_state_t *afl) {
        " fuzzing strategy yields " bSTG bH10 bHT bH10 bH5 bHB bH bSTOP cCYA
        " path geometry " bSTG bH5 bH2 bVL "\n");
 
-  if (afl->skip_deterministic) {
+  if (unlikely(afl->custom_only)) {
 
-    strcpy(tmp, "n/a, n/a, n/a");
+    strcpy(tmp, "disabled (custom-mutator-only mode)");
+
+  } else if (likely(afl->skip_deterministic)) {
+
+    strcpy(tmp, "disabled (default, enable with -D)");
 
   } else {
 
@@ -881,7 +888,7 @@ void show_stats(afl_state_t *afl) {
                 "    levels : " cRST "%-10s" bSTG       bV "\n",
        tmp, u_stringify_int(IB(0), afl->max_depth));
 
-  if (!afl->skip_deterministic) {
+  if (unlikely(!afl->skip_deterministic)) {
 
     sprintf(tmp, "%s/%s, %s/%s, %s/%s",
             u_stringify_int(IB(0), afl->stage_finds[STAGE_FLIP8]),
@@ -897,7 +904,7 @@ void show_stats(afl_state_t *afl) {
                 "   pending : " cRST "%-10s" bSTG       bV "\n",
        tmp, u_stringify_int(IB(0), afl->pending_not_fuzzed));
 
-  if (!afl->skip_deterministic) {
+  if (unlikely(!afl->skip_deterministic)) {
 
     sprintf(tmp, "%s/%s, %s/%s, %s/%s",
             u_stringify_int(IB(0), afl->stage_finds[STAGE_ARITH8]),
@@ -913,7 +920,7 @@ void show_stats(afl_state_t *afl) {
                 "  pend fav : " cRST "%-10s" bSTG       bV "\n",
        tmp, u_stringify_int(IB(0), afl->pending_favored));
 
-  if (!afl->skip_deterministic) {
+  if (unlikely(!afl->skip_deterministic)) {
 
     sprintf(tmp, "%s/%s, %s/%s, %s/%s",
             u_stringify_int(IB(0), afl->stage_finds[STAGE_INTEREST8]),
@@ -929,7 +936,7 @@ void show_stats(afl_state_t *afl) {
                 " own finds : " cRST "%-10s" bSTG       bV "\n",
        tmp, u_stringify_int(IB(0), afl->queued_discovered));
 
-  if (!afl->skip_deterministic) {
+  if (unlikely(!afl->skip_deterministic)) {
 
     sprintf(tmp, "%s/%s, %s/%s, %s/%s",
             u_stringify_int(IB(0), afl->stage_finds[STAGE_EXTRAS_UO]),
@@ -939,6 +946,14 @@ void show_stats(afl_state_t *afl) {
             u_stringify_int(IB(4), afl->stage_finds[STAGE_EXTRAS_AO]),
             u_stringify_int(IB(5), afl->stage_cycles[STAGE_EXTRAS_AO]));
 
+  } else if (unlikely(!afl->extras_cnt || afl->custom_only)) {
+
+    strcpy(tmp, "n/a");
+
+  } else {
+
+    strcpy(tmp, "havoc mode");
+
   }
 
   SAYF(bV bSTOP "  dictionary : " cRST "%-36s " bSTG bV bSTOP
@@ -974,35 +989,52 @@ void show_stats(afl_state_t *afl) {
                   : cRST),
        tmp);
 
-  if (afl->shm.cmplog_mode) {
+  if (unlikely(afl->afl_env.afl_python_module)) {
 
-    sprintf(tmp, "%s/%s, %s/%s, %s/%s, %s/%s",
+    sprintf(tmp, "%s/%s,",
             u_stringify_int(IB(0), afl->stage_finds[STAGE_PYTHON]),
-            u_stringify_int(IB(1), afl->stage_cycles[STAGE_PYTHON]),
-            u_stringify_int(IB(2), afl->stage_finds[STAGE_CUSTOM_MUTATOR]),
-            u_stringify_int(IB(3), afl->stage_cycles[STAGE_CUSTOM_MUTATOR]),
+            u_stringify_int(IB(1), afl->stage_cycles[STAGE_PYTHON]));
+
+  } else {
+
+    strcpy(tmp, "unused,");
+
+  }
+
+  if (unlikely(afl->afl_env.afl_custom_mutator_library)) {
+
+    sprintf(tmp2, "%s %s/%s,", tmp,
+            u_stringify_int(IB(2), afl->stage_finds[STAGE_PYTHON]),
+            u_stringify_int(IB(3), afl->stage_cycles[STAGE_PYTHON]));
+
+  } else {
+
+    sprintf(tmp2, "%s unused,", tmp);
+
+  }
+
+  if (unlikely(afl->shm.cmplog_mode)) {
+
+    sprintf(tmp, "%s %s/%s, %s/%s", tmp2,
             u_stringify_int(IB(4), afl->stage_finds[STAGE_COLORIZATION]),
             u_stringify_int(IB(5), afl->stage_cycles[STAGE_COLORIZATION]),
             u_stringify_int(IB(6), afl->stage_finds[STAGE_ITS]),
             u_stringify_int(IB(7), afl->stage_cycles[STAGE_ITS]));
 
-    SAYF(bV bSTOP "   custom/rq : " cRST "%-36s " bSTG bVR bH20 bH2 bH bRB "\n",
-         tmp);
-
   } else {
 
-    sprintf(tmp, "%s/%s, %s/%s",
-            u_stringify_int(IB(0), afl->stage_finds[STAGE_PYTHON]),
-            u_stringify_int(IB(1), afl->stage_cycles[STAGE_PYTHON]),
-            u_stringify_int(IB(2), afl->stage_finds[STAGE_CUSTOM_MUTATOR]),
-            u_stringify_int(IB(3), afl->stage_cycles[STAGE_CUSTOM_MUTATOR]));
-
-    SAYF(bV bSTOP "   py/custom : " cRST "%-36s " bSTG bVR bH20 bH2 bH bRB "\n",
-         tmp);
+    sprintf(tmp, "%s unused, unused", tmp2);
 
   }
 
-  if (!afl->bytes_trim_out) {
+  SAYF(bV bSTOP "py/custom/rq : " cRST "%-36s " bSTG bVR bH20 bH2 bH bRB "\n",
+       tmp);
+
+  if (likely(afl->disable_trim)) {
+
+    sprintf(tmp, "disabled, ");
+
+  } else if (unlikely(!afl->bytes_trim_out)) {
 
     sprintf(tmp, "n/a, ");
 
@@ -1015,12 +1047,13 @@ void show_stats(afl_state_t *afl) {
 
   }
 
-  if (!afl->blocks_eff_total) {
+  if (likely(afl->skip_deterministic)) {
 
-    u8 tmp2[128];
+    strcat(tmp, "disabled");
 
-    sprintf(tmp2, "n/a");
-    strcat(tmp, tmp2);
+  } else if (unlikely(!afl->blocks_eff_total)) {
+
+    strcat(tmp, "n/a");
 
   } else {
 
@@ -1044,7 +1077,7 @@ void show_stats(afl_state_t *afl) {
   //
   //} else {
 
-  SAYF(bV bSTOP "        trim : " cRST "%-36s " bSTG bV RESET_G1, tmp);
+  SAYF(bV bSTOP "    trim/eff : " cRST "%-36s " bSTG bV RESET_G1, tmp);
 
   //}
 
diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c
index 9688c84f..2b035a23 100644
--- a/src/afl-fuzz.c
+++ b/src/afl-fuzz.c
@@ -855,6 +855,14 @@ int main(int argc, char **argv_orig, char **envp) {
               break;
             case '3':
               afl->cmplog_lvl = 3;
+
+              if (!afl->disable_trim) {
+
+                ACTF("Deactivating trimming due CMPLOG level 3");
+                afl->disable_trim = 1;
+
+              }
+
               break;
             case 'a':
             case 'A':
@@ -2125,12 +2133,10 @@ int main(int argc, char **argv_orig, char **envp) {
   }
 
   write_bitmap(afl);
-  maybe_update_plot_file(afl, 0, 0, 0);
   save_auto(afl);
 
 stop_fuzzing:
 
-  write_stats_file(afl, 0, 0, 0, 0);
   afl->force_ui_update = 1;  // ensure the screen is reprinted
   show_stats(afl);           // print the screen one last time
 
diff --git a/src/afl-showmap.c b/src/afl-showmap.c
index 38d03d80..946b19cd 100644
--- a/src/afl-showmap.c
+++ b/src/afl-showmap.c
@@ -572,6 +572,13 @@ static void set_up_environment(afl_forkserver_t *fsrv, char **argv) {
          "handle_sigill=0",
          0);
 
+  setenv("LSAN_OPTIONS",
+         "exitcode=" STRINGIFY(LSAN_ERROR) ":"
+         "fast_unwind_on_malloc=0:"
+         "symbolize=0:"
+         "print_suppressions=0",
+          0);
+
   setenv("UBSAN_OPTIONS",
          "halt_on_error=1:"
          "abort_on_error=1:"
diff --git a/src/afl-tmin.c b/src/afl-tmin.c
index bad5d71b..6656712a 100644
--- a/src/afl-tmin.c
+++ b/src/afl-tmin.c
@@ -714,6 +714,18 @@ static void set_up_environment(afl_forkserver_t *fsrv, char **argv) {
 
   }
 
+  x = get_afl_env("LSAN_OPTIONS");
+
+  if (x) {
+
+    if (!strstr(x, "symbolize=0")) {
+
+      FATAL("Custom LSAN_OPTIONS set without symbolize=0 - please fix!");
+
+    }
+
+  }
+
   setenv("ASAN_OPTIONS",
          "abort_on_error=1:"
          "detect_leaks=0:"
@@ -751,6 +763,13 @@ static void set_up_environment(afl_forkserver_t *fsrv, char **argv) {
                          "handle_sigfpe=0:"
                          "handle_sigill=0", 0);
 
+  setenv("LSAN_OPTIONS",
+         "exitcode=" STRINGIFY(LSAN_ERROR) ":"
+         "fast_unwind_on_malloc=0:"
+         "symbolize=0:"
+         "print_suppressions=0",
+         0);
+
   if (get_afl_env("AFL_PRELOAD")) {
 
     if (fsrv->qemu_mode) {
diff --git a/test/test-pre.sh b/test/test-pre.sh
index 85ac320b..174f2f7f 100755
--- a/test/test-pre.sh
+++ b/test/test-pre.sh
@@ -71,6 +71,7 @@ unset AFL_HARDEN
 unset AFL_USE_ASAN
 unset AFL_USE_MSAN
 unset AFL_USE_UBSAN
+unset AFL_USE_LSAN
 unset AFL_TMPDIR
 unset AFL_CC
 unset AFL_PRELOAD
diff --git a/unicorn_mode/helper_scripts/ida_context_loader.py b/unicorn_mode/helper_scripts/ida_context_loader.py
index 31d47a90..d7984c77 100644
--- a/unicorn_mode/helper_scripts/ida_context_loader.py
+++ b/unicorn_mode/helper_scripts/ida_context_loader.py
@@ -34,13 +34,11 @@ import ida_segment
 
 
 class ContextLoaderError(Exception):
-    """Base "catch all" exception for this script
-    """
+    """Base "catch all" exception for this script"""
 
 
 class ArchNotSupportedError(ContextLoaderError):
-    """Exception raised if the input file CPU architecture isn't supported fully
-    """
+    """Exception raised if the input file CPU architecture isn't supported fully"""
 
 
 def parse_mapping_index(filepath: str):
@@ -51,13 +49,16 @@ def parse_mapping_index(filepath: str):
     """
 
     if filepath is None:
-        raise ContextLoaderError('_index.json file was not selected')
+        raise ContextLoaderError("_index.json file was not selected")
 
     try:
-        with open(filepath, 'rb') as _file:
+        with open(filepath, "rb") as _file:
             return json.load(_file)
     except Exception as ex:
-        raise ContextLoaderError('Failed to parse json file {}'.format(filepath)) from ex
+        raise ContextLoaderError(
+            "Failed to parse json file {}".format(filepath)
+        ) from ex
+
 
 def get_input_name():
     """Get the name of the input file
@@ -68,19 +69,21 @@ def get_input_name():
     input_filepath = ida_nalt.get_input_file_path()
     return Path(input_filepath).name
 
+
 def write_segment_bytes(start: int, filepath: str):
-    """"Read data from context file and write it to the IDA segment
+    """ "Read data from context file and write it to the IDA segment
 
     :param start: Start address
     :param filepath: Path to context file
     """
 
-    with open(filepath, 'rb') as _file:
+    with open(filepath, "rb") as _file:
         data = _file.read()
 
     decompressed_data = zlib.decompress(data)
     ida_bytes.put_bytes(start, decompressed_data)
 
+
 def create_segment(context_dir: str, segment: dict, is_be: bool):
     """Create segment in IDA and map in the data from the file
 
@@ -90,23 +93,30 @@ def create_segment(context_dir: str, segment: dict, is_be: bool):
     """
 
     input_name = get_input_name()
-    if Path(segment['name']).name != input_name:
+    if Path(segment["name"]).name != input_name:
         ida_seg = idaapi.segment_t()
-        ida_seg.start_ea = segment['start']
-        ida_seg.end_ea = segment['end']
+        ida_seg.start_ea = segment["start"]
+        ida_seg.end_ea = segment["end"]
         ida_seg.bitness = 1 if is_be else 0
-        if segment['permissions']['r']:
+        if segment["permissions"]["r"]:
             ida_seg.perm |= ida_segment.SEGPERM_READ
-        if segment['permissions']['w']:
+        if segment["permissions"]["w"]:
             ida_seg.perm |= ida_segment.SEGPERM_WRITE
-        if segment['permissions']['x']:
+        if segment["permissions"]["x"]:
             ida_seg.perm |= ida_segment.SEGPERM_EXEC
-            idaapi.add_segm_ex(ida_seg, Path(segment['name']).name, 'CODE', idaapi.ADDSEG_OR_DIE)
+            idaapi.add_segm_ex(
+                ida_seg, Path(segment["name"]).name, "CODE", idaapi.ADDSEG_OR_DIE
+            )
         else:
-            idaapi.add_segm_ex(ida_seg, Path(segment['name']).name, 'DATA', idaapi.ADDSEG_OR_DIE)
+            idaapi.add_segm_ex(
+                ida_seg, Path(segment["name"]).name, "DATA", idaapi.ADDSEG_OR_DIE
+            )
+
+    if segment["content_file"]:
+        write_segment_bytes(
+            segment["start"], PurePath(context_dir, segment["content_file"])
+        )
 
-    if segment['content_file']:
-        write_segment_bytes(segment['start'], PurePath(context_dir, segment['content_file']))
 
 def create_segments(index: dict, context_dir: str):
     """Iterate segments in index JSON, create the segment in IDA, and map in the data from the file
@@ -117,9 +127,10 @@ def create_segments(index: dict, context_dir: str):
 
     info = idaapi.get_inf_structure()
     is_be = info.is_be()
-    for segment in index['segments']:
+    for segment in index["segments"]:
         create_segment(context_dir, segment, is_be)
 
+
 def rebase_program(index: dict):
     """Rebase the program to the offset specified in the context _index.json
 
@@ -128,20 +139,21 @@ def rebase_program(index: dict):
 
     input_name = get_input_name()
     new_base = None
-    for segment in index['segments']:
-        if not segment['name']:
+    for segment in index["segments"]:
+        if not segment["name"]:
             continue
 
-        segment_name = Path(segment['name']).name
+        segment_name = Path(segment["name"]).name
         if input_name == segment_name:
-            new_base = segment['start']
+            new_base = segment["start"]
             break
 
     if not new_base:
-        raise ContextLoaderError('Input file is not in _index.json')
+        raise ContextLoaderError("Input file is not in _index.json")
 
     current_base = idaapi.get_imagebase()
-    ida_segment.rebase_program(new_base-current_base, 8)
+    ida_segment.rebase_program(new_base - current_base, 8)
+
 
 def get_pc_by_arch(index: dict) -> int:
     """Queries the input file CPU architecture and attempts to lookup the address of the program
@@ -153,13 +165,14 @@ def get_pc_by_arch(index: dict) -> int:
 
     progctr = None
     info = idaapi.get_inf_structure()
-    if info.procname == 'metapc':
+    if info.procname == "metapc":
         if info.is_64bit():
-            progctr = index['regs']['rax']
+            progctr = index["regs"]["rax"]
         elif info.is_32bit():
-            progctr = index['regs']['eax']
+            progctr = index["regs"]["eax"]
     return progctr
 
+
 def write_reg_info(index: dict):
     """Write register info as line comment at instruction pointed to by the program counter and
     change focus to that location
@@ -167,17 +180,19 @@ def write_reg_info(index: dict):
     :param index: _index.json JSON data
     """
 
-    cmt = ''
-    for reg, val in index['regs'].items():
+    cmt = ""
+    for reg, val in index["regs"].items():
         cmt += f"{reg.ljust(6)} : {hex(val)}\n"
 
     progctr = get_pc_by_arch(index)
     if progctr is None:
         raise ArchNotSupportedError(
-            'Architecture not fully supported, skipping register status comment')
+            "Architecture not fully supported, skipping register status comment"
+        )
     ida_bytes.set_cmt(progctr, cmt, 0)
     ida_kernwin.jumpto(progctr)
 
+
 def main(filepath):
     """Main - parse _index.json input and map context files into the database
 
@@ -193,5 +208,6 @@ def main(filepath):
     except ContextLoaderError as ex:
         print(ex)
 
-if __name__ == '__main__':
-    main(ida_kernwin.ask_file(1, '*.json', 'Import file name'))
+
+if __name__ == "__main__":
+    main(ida_kernwin.ask_file(1, "*.json", "Import file name"))
diff --git a/utils/afl_network_proxy/afl-network-server.c b/utils/afl_network_proxy/afl-network-server.c
index 0dfae658..60f174ee 100644
--- a/utils/afl_network_proxy/afl-network-server.c
+++ b/utils/afl_network_proxy/afl-network-server.c
@@ -45,7 +45,6 @@
 
 #include <sys/wait.h>
 #include <sys/time.h>
-#include <sys/shm.h>
 #include <sys/stat.h>
 #include <sys/types.h>
 #include <sys/resource.h>
@@ -53,7 +52,9 @@
 #include <netinet/ip6.h>
 #include <arpa/inet.h>
 #include <sys/mman.h>
-#include <sys/shm.h>
+#ifndef USEMMAP
+  #include <sys/shm.h>
+#endif
 #include <sys/socket.h>
 #include <netdb.h>
 
diff --git a/utils/aflpp_driver/GNUmakefile b/utils/aflpp_driver/GNUmakefile
index c1a087d7..8ac054a6 100644
--- a/utils/aflpp_driver/GNUmakefile
+++ b/utils/aflpp_driver/GNUmakefile
@@ -26,17 +26,17 @@ debug:
 	ar ru libAFLDriver.a afl-performance.o aflpp_driver.o
 
 aflpp_qemu_driver.o:	aflpp_qemu_driver.c
-	$(LLVM_BINDIR)clang $(CFLAGS) -O0 -funroll-loops -c aflpp_qemu_driver.c
+	-$(LLVM_BINDIR)clang $(CFLAGS) -O0 -funroll-loops -c aflpp_qemu_driver.c
 
 libAFLQemuDriver.a:	aflpp_qemu_driver.o
-	ar ru libAFLQemuDriver.a aflpp_qemu_driver.o
-	cp -vf libAFLQemuDriver.a ../../
+	-ar ru libAFLQemuDriver.a aflpp_qemu_driver.o
+	-cp -vf libAFLQemuDriver.a ../../
 
 aflpp_qemu_driver_hook.so:	aflpp_qemu_driver_hook.o
-	$(LLVM_BINDIR)clang -shared aflpp_qemu_driver_hook.o -o aflpp_qemu_driver_hook.so
+	-$(LLVM_BINDIR)clang -shared aflpp_qemu_driver_hook.o -o aflpp_qemu_driver_hook.so
 
 aflpp_qemu_driver_hook.o:	aflpp_qemu_driver_hook.c
-	$(LLVM_BINDIR)clang -fPIC $(CFLAGS) -funroll-loops -c aflpp_qemu_driver_hook.c
+	-$(LLVM_BINDIR)clang -fPIC $(CFLAGS) -funroll-loops -c aflpp_qemu_driver_hook.c
 
 test:	debug
 	#clang -S -emit-llvm -D_DEBUG=\"1\" -I../../include -Wl,--allow-multiple-definition -funroll-loops -o aflpp_driver_test.ll aflpp_driver_test.c
diff --git a/utils/aflpp_driver/README.md b/utils/aflpp_driver/README.md
new file mode 100644
index 00000000..01bd10c0
--- /dev/null
+++ b/utils/aflpp_driver/README.md
@@ -0,0 +1,30 @@
+# afl++ drivers
+
+## aflpp_driver
+
+aflpp_driver is used to compile directly libfuzzer `LLVMFuzzerTestOneInput()`
+targets.
+
+Just do `afl-clang-fast++ -o fuzz fuzzer_harness.cc libAFLDriver.a [plus required linking]`.
+
+You can also sneakily do this little trick: 
+If this is the clang compile command to build for libfuzzer:
+  `clang++ -o fuzz -fsanitize=fuzzer fuzzer_harness.cc -lfoo`
+then just switch `clang++` with `afl-clang-fast++` and our compiler will
+magically insert libAFLDriver.a :)
+
+
+## aflpp_qemu_driver
+
+aflpp_qemu_driver is used for libfuzzer `LLVMFuzzerTestOneInput()` targets that
+are to be fuzzed in qemu_mode. So we compile them with clang/clang++, without
+-fsantize=fuzzer or afl-clang-fast, and link in libAFLQemuDriver.a:
+
+`clang++ -o fuzz fuzzer_harness.cc libAFLQemuDriver.a [plus required linking]`.
+
+
+Then just do (where the name of the binary is `fuzz`):
+```
+AFL_QEMU_PERSISTENT_ADDR=0x$(nm fuzz | grep "T LLVMFuzzerTestOneInput" | awk '{print $1}')
+AFL_QEMU_PERSISTENT_HOOK=/path/to/aflpp_qemu_driver_hook.so afl-fuzz -Q ... -- ./fuzz`
+```
diff --git a/utils/aflpp_driver/aflpp_qemu_driver_hook.c b/utils/aflpp_driver/aflpp_qemu_driver_hook.c
index 823cc42d..d3dd98b0 100644
--- a/utils/aflpp_driver/aflpp_qemu_driver_hook.c
+++ b/utils/aflpp_driver/aflpp_qemu_driver_hook.c
@@ -1,21 +1,30 @@
+#include "../../qemu_mode/qemuafl/qemuafl/api.h"
+
 #include <stdint.h>
 #include <string.h>
 
+void afl_persistent_hook(struct x86_64_regs *regs, uint64_t guest_base,
+                         uint8_t *input_buf, uint32_t input_buf_len) {
+
 #define g2h(x) ((void *)((unsigned long)(x) + guest_base))
+#define h2g(x) ((uint64_t)(x)-guest_base)
 
-#define REGS_RDI 7
-#define REGS_RSI 6
+  // In this example the register RDI is pointing to the memory location
+  // of the target buffer, and the length of the input is in RSI.
+  // This can be seen with a debugger, e.g. gdb (and "disass main")
 
-void afl_persistent_hook(uint64_t *regs, uint64_t guest_base,
-                         uint8_t *input_buf, uint32_t input_len) {
+  memcpy(g2h(regs->rdi), input_buf, input_buf_len);
+  regs->rsi = input_buf_len;
 
-  memcpy(g2h(regs[REGS_RDI]), input_buf, input_len);
-  regs[REGS_RSI] = input_len;
+#undef g2h
+#undef h2g
 
 }
 
 int afl_persistent_hook_init(void) {
 
+  // 1 for shared memory input (faster), 0 for normal input (you have to use
+  // read(), input_buf will be NULL)
   return 1;
 
 }
diff --git a/utils/autodict_ql/autodict-ql.py b/utils/autodict_ql/autodict-ql.py
index 0fe7eabf..f64e3fae 100644
--- a/utils/autodict_ql/autodict-ql.py
+++ b/utils/autodict_ql/autodict-ql.py
@@ -11,7 +11,7 @@
 
 import os
 import string
-import binascii 
+import binascii
 import codecs
 import errno
 import struct
@@ -21,6 +21,7 @@ import subprocess
 
 from binascii import unhexlify
 
+
 def ensure_dir(dir):
     try:
         os.makedirs(dir)
@@ -28,109 +29,118 @@ def ensure_dir(dir):
         if e.errno != errno.EEXIST:
             raise
 
+
 def parse_args():
-    parser = argparse.ArgumentParser(description=(
-        "Helper - Specify input file analysis and output folder to save corpus for strings in the overall project ---------------------------------------------------------------------------  Example usage : python2 thisfile.py outdir str.txt"    ))
-    
-    #parser.add_argument("tokenpath",
-        #help="Destination directory for tokens")
-    parser.add_argument("cur",
-            help = "Current Path")
-    parser.add_argument("db",
-            help = "CodeQL database Path")
-    parser.add_argument("tokenpath",
-            help="Destination directory for tokens")
+    parser = argparse.ArgumentParser(
+        description=(
+            "Helper - Specify input file analysis and output folder to save corpus for strings in the overall project ---------------------------------------------------------------------------  Example usage : python2 thisfile.py outdir str.txt"
+        )
+    )
+
+    # parser.add_argument("tokenpath",
+    # help="Destination directory for tokens")
+    parser.add_argument("cur", help="Current Path")
+    parser.add_argument("db", help="CodeQL database Path")
+    parser.add_argument("tokenpath", help="Destination directory for tokens")
 
     return parser.parse_args()
 
-def static_analysis(file,file2,cur,db) :
-    with open(cur+"/"+file, "w") as f:
-        print(cur+"/"+file)
-        stream = os.popen("codeql query run " + cur +"/"+ file2 +  " -d " + db )
+
+def static_analysis(file, file2, cur, db):
+    with open(cur + "/" + file, "w") as f:
+        print(cur + "/" + file)
+        stream = os.popen("codeql query run " + cur + "/" + file2 + " -d " + db)
         output = stream.read()
         f.write(output)
         f.close()
 
-def copy_tokens(cur, tokenpath) :
-    subprocess.call(["mv " + cur  + "/" + "strcmp-strs/*" + " " + cur + "/" + tokenpath + "/."] ,shell=True)
-    subprocess.call(["mv " + cur  + "/" + "strncmp-strs/*" + " " + cur + "/" + tokenpath + "/."] ,shell=True)
-    subprocess.call(["mv " + cur  + "/" + "memcmp-strs/*" + " " + cur + "/" + tokenpath + "/."] ,shell=True)
-    subprocess.call(["mv " + cur  + "/" + "lits/*" + " " + cur + "/" + tokenpath + "/."] ,shell=True)
-    subprocess.call(["mv " + cur  + "/" + "strtool-strs/*" + " " + cur + "/" + tokenpath + "/."] ,shell=True)
-    subprocess.call(["rm -rf strcmp-strs memcmp-strs strncmp-strs lits strtool-strs"],shell=True)
-    subprocess.call(["rm *.out"],shell=True)
-    subprocess.call(["find "+tokenpath+" -size 0 -delete"],shell=True)
-
-
-
-def codeql_analysis(cur, db) :
-    static_analysis("litout.out","litool.ql", cur, db)
-    static_analysis("strcmp-strings.out","strcmp-str.ql", cur, db)
-    static_analysis("strncmp-strings.out","strncmp-str.ql", cur, db)
-    static_analysis("memcmp-strings.out","memcmp-str.ql", cur, db)
-    static_analysis("strtool-strings.out","strtool.ql", cur, db)
-    start_autodict(0,cur)
 
+def copy_tokens(cur, tokenpath):
+    subprocess.call(
+        ["mv " + cur + "/" + "strcmp-strs/*" + " " + cur + "/" + tokenpath + "/."],
+        shell=True,
+    )
+    subprocess.call(
+        ["mv " + cur + "/" + "strncmp-strs/*" + " " + cur + "/" + tokenpath + "/."],
+        shell=True,
+    )
+    subprocess.call(
+        ["mv " + cur + "/" + "memcmp-strs/*" + " " + cur + "/" + tokenpath + "/."],
+        shell=True,
+    )
+    subprocess.call(
+        ["mv " + cur + "/" + "lits/*" + " " + cur + "/" + tokenpath + "/."], shell=True
+    )
+    subprocess.call(
+        ["mv " + cur + "/" + "strtool-strs/*" + " " + cur + "/" + tokenpath + "/."],
+        shell=True,
+    )
+    subprocess.call(
+        ["rm -rf strcmp-strs memcmp-strs strncmp-strs lits strtool-strs"], shell=True
+    )
+    subprocess.call(["rm *.out"], shell=True)
+    subprocess.call(["find " + tokenpath + " -size 0 -delete"], shell=True)
+
+
+def codeql_analysis(cur, db):
+    static_analysis("litout.out", "litool.ql", cur, db)
+    static_analysis("strcmp-strings.out", "strcmp-str.ql", cur, db)
+    static_analysis("strncmp-strings.out", "strncmp-str.ql", cur, db)
+    static_analysis("memcmp-strings.out", "memcmp-str.ql", cur, db)
+    static_analysis("strtool-strings.out", "strtool.ql", cur, db)
+    start_autodict(0, cur)
 
 
 def start_autodict(tokenpath, cur):
-    command = [
-           'python3',
-           cur + '/litan.py',
-           cur+'/lits/',
-           cur+'/litout.out'
-        ]
+    command = ["python3", cur + "/litan.py", cur + "/lits/", cur + "/litout.out"]
     worker1 = subprocess.Popen(command)
     print(worker1.communicate())
-    
+
     command1 = [
-           'python3',
-           cur + '/strcmp-strings.py',
-           cur + '/strcmp-strs/',
-           cur + '/strcmp-strings.out'
-        ]
+        "python3",
+        cur + "/strcmp-strings.py",
+        cur + "/strcmp-strs/",
+        cur + "/strcmp-strings.out",
+    ]
     worker2 = subprocess.Popen(command1)
     print(worker2.communicate())
 
     command2 = [
-           'python3',
-           cur + '/strncmp-strings.py',
-           cur + '/strncmp-strs/',
-           cur + '/strncmp-strings.out'
-        ]
+        "python3",
+        cur + "/strncmp-strings.py",
+        cur + "/strncmp-strs/",
+        cur + "/strncmp-strings.out",
+    ]
     worker3 = subprocess.Popen(command2)
     print(worker3.communicate())
 
-
-
     command5 = [
-           'python3',
-           cur + '/memcmp-strings.py',
-           cur + '/memcmp-strs/',
-           cur + '/memcmp-strings.out'
-        ]
+        "python3",
+        cur + "/memcmp-strings.py",
+        cur + "/memcmp-strs/",
+        cur + "/memcmp-strings.out",
+    ]
     worker6 = subprocess.Popen(command5)
     print(worker6.communicate())
 
-
-
     command8 = [
-           'python3',
-           cur + '/stan-strings.py',
-           cur + '/strtool-strs/',
-           cur + '/strtool-strings.out'
-        ]
+        "python3",
+        cur + "/stan-strings.py",
+        cur + "/strtool-strs/",
+        cur + "/strtool-strings.out",
+    ]
     worker9 = subprocess.Popen(command8)
     print(worker9.communicate())
 
 
-
 def main():
-    args = parse_args()    
+    args = parse_args()
     ensure_dir(args.tokenpath)
-    #copy_tokens(args.cur, args.tokenpath)
+    # copy_tokens(args.cur, args.tokenpath)
     codeql_analysis(args.cur, args.db)
     copy_tokens(args.cur, args.tokenpath)
-    #start_autodict(args.tokenpath, args.cur)
-if __name__ == '__main__':
-    main()
\ No newline at end of file
+    # start_autodict(args.tokenpath, args.cur)
+
+
+if __name__ == "__main__":
+    main()
diff --git a/utils/autodict_ql/litan.py b/utils/autodict_ql/litan.py
index 18c04c34..7033d363 100644
--- a/utils/autodict_ql/litan.py
+++ b/utils/autodict_ql/litan.py
@@ -4,7 +4,7 @@
 # Author : Microsvuln - Arash.vre@gmail.com
 import string
 import os
-import binascii 
+import binascii
 import codecs
 import struct
 import errno
@@ -12,75 +12,101 @@ import argparse
 import re
 import base64
 from binascii import unhexlify
+
+
 def parse_args():
-    parser = argparse.ArgumentParser(description=(
-        "Helper - Specify input file to analysis and output folder to save corpdirus for constants in the overall project -------  Example usage : python2 thisfile.py outdir o.txt"))
-    parser.add_argument("corpdir",
-        help="The path to the corpus directory to generate files.")
-    parser.add_argument("infile",
-        help="Specify file output of codeql analysis - ex. ooo-hex.txt, analysis take place on this file, example : python2 thisfile.py outdir out.txt")
-    return parser.parse_args()    
+    parser = argparse.ArgumentParser(
+        description=(
+            "Helper - Specify input file to analysis and output folder to save corpdirus for constants in the overall project -------  Example usage : python2 thisfile.py outdir o.txt"
+        )
+    )
+    parser.add_argument(
+        "corpdir", help="The path to the corpus directory to generate files."
+    )
+    parser.add_argument(
+        "infile",
+        help="Specify file output of codeql analysis - ex. ooo-hex.txt, analysis take place on this file, example : python2 thisfile.py outdir out.txt",
+    )
+    return parser.parse_args()
+
+
 def ensure_dir(dir):
     try:
         os.makedirs(dir)
     except OSError as e:
         if e.errno == errno.EEXIST:
-            #print "[-] Directory exists, specify another directory"
+            # print "[-] Directory exists, specify another directory"
             exit(1)
+
+
 def do_analysis1(corpdir, infile):
-    with open(infile, "rb") as f:        
-        lines = f.readlines()[1:]       
-        f.close()       
+    with open(infile, "rb") as f:
+        lines = f.readlines()[1:]
+        f.close()
         new_lst = []
         n = 1
         for i, num in enumerate(lines):
             if i != 0:
-                new_lst.append(num)                               
+                new_lst.append(num)
                 str1 = str(num)
-                print ("num is " + str1)
-                str1 = str1.rstrip('\n\n')
-                #str1 = str1.replace("0x","");
-                str1 = str1.replace("|","")
-                str1 = str1.rstrip('\r\n')
-                str1 = str1.rstrip('\n')
-                str1 = str1.replace(" ","")
-                    #str1 = str1.translate(None, string.punctuation)
-                translator=str.maketrans('','',string.punctuation)
-                str1=str1.translate(translator)
+                print("num is " + str1)
+                str1 = str1.rstrip("\n\n")
+                # str1 = str1.replace("0x","");
+                str1 = str1.replace("|", "")
+                str1 = str1.rstrip("\r\n")
+                str1 = str1.rstrip("\n")
+                str1 = str1.replace(" ", "")
+                # str1 = str1.translate(None, string.punctuation)
+                translator = str.maketrans("", "", string.punctuation)
+                str1 = str1.translate(translator)
                 str1 = str1[1:]
                 str1 = str1[:-1]
                 print("After cleanup : " + str1)
-                if (str1 != '0') and (str1 != 'ffffffff') and (str1 != 'fffffffe') or (len(str1) == 4) or (len(str1) == 8):
-                    print ("first : "+str1)
-                    if len(str1) > 8 :
+                if (
+                    (str1 != "0")
+                    and (str1 != "ffffffff")
+                    and (str1 != "fffffffe")
+                    or (len(str1) == 4)
+                    or (len(str1) == 8)
+                ):
+                    print("first : " + str1)
+                    if len(str1) > 8:
                         str1 = str1[:-1]
-                    elif (len(str1) == 5) :
+                    elif len(str1) == 5:
                         str1 = str1 = "0"
                     try:
-                            #str1 = str1.decode("hex")
-                            with open(corpdir+'/lit-seed{0}'.format(n), 'w') as file:                    
-                                    str1 = str1.replace("0x","");
-                                    print (str1)                                    
-                                    str1 = int(str1,base=16)                                    
-                                    str1 = str1.to_bytes(4, byteorder='little')                                                          
-                                    file.write(str(str1))                                    
-                                    file.close()
-                                    with open (corpdir+'/lit-seed{0}'.format(n), 'r') as q :
-                                        a = q.readline()                                        
-                                        a = a[1:]
-                                        print ("AFL++ Autodict-QL by Microsvuln : Writing Token :" + str(a))
-                                        q.close()
-                                        with open (corpdir+'/lit-seed{0}'.format(n), 'w') as w1 :
-                                                w1.write(str(a))
-                                                print ("Done!")
-                                                w1.close()                                                                                
-                    except:                                 
-                            print("Error!") 
-                    n = n+1
+                        # str1 = str1.decode("hex")
+                        with open(corpdir + "/lit-seed{0}".format(n), "w") as file:
+                            str1 = str1.replace("0x", "")
+                            print(str1)
+                            str1 = int(str1, base=16)
+                            str1 = str1.to_bytes(4, byteorder="little")
+                            file.write(str(str1))
+                            file.close()
+                            with open(corpdir + "/lit-seed{0}".format(n), "r") as q:
+                                a = q.readline()
+                                a = a[1:]
+                                print(
+                                    "AFL++ Autodict-QL by Microsvuln : Writing Token :"
+                                    + str(a)
+                                )
+                                q.close()
+                                with open(
+                                    corpdir + "/lit-seed{0}".format(n), "w"
+                                ) as w1:
+                                    w1.write(str(a))
+                                    print("Done!")
+                                    w1.close()
+                    except:
+                        print("Error!")
+                    n = n + 1
+
 
 def main():
-    args = parse_args()    
+    args = parse_args()
     ensure_dir(args.corpdir)
     do_analysis1(args.corpdir, args.infile)
-if __name__ == '__main__':
-    main()
\ No newline at end of file
+
+
+if __name__ == "__main__":
+    main()
diff --git a/utils/autodict_ql/memcmp-strings.py b/utils/autodict_ql/memcmp-strings.py
index d1047caa..270a697c 100644
--- a/utils/autodict_ql/memcmp-strings.py
+++ b/utils/autodict_ql/memcmp-strings.py
@@ -5,7 +5,7 @@
 
 import os
 import string
-import binascii 
+import binascii
 import codecs
 import errno
 import struct
@@ -13,6 +13,7 @@ import argparse
 import re
 from binascii import unhexlify
 
+
 def ensure_dir(dir):
     try:
         os.makedirs(dir)
@@ -20,44 +21,63 @@ def ensure_dir(dir):
         if e.errno != errno.EEXIST:
             raise
 
+
 def parse_args():
-    parser = argparse.ArgumentParser(description=(
-        "Helper - Specify input file analysis and output folder to save corpus for strings in the overall project ---------------------------------------------------------------------------  Example usage : python2 thisfile.py outdir str.txt"    ))
-    parser.add_argument("corpdir",
-        help="The path to the corpus directory to generate strings.")
-    parser.add_argument("infile",
-        help="Specify file output of codeql analysis - ex. ooo-atr.txt, analysis take place on this file, example : python2 thisfile.py outdir strings.txt")
+    parser = argparse.ArgumentParser(
+        description=(
+            "Helper - Specify input file analysis and output folder to save corpus for strings in the overall project ---------------------------------------------------------------------------  Example usage : python2 thisfile.py outdir str.txt"
+        )
+    )
+    parser.add_argument(
+        "corpdir", help="The path to the corpus directory to generate strings."
+    )
+    parser.add_argument(
+        "infile",
+        help="Specify file output of codeql analysis - ex. ooo-atr.txt, analysis take place on this file, example : python2 thisfile.py outdir strings.txt",
+    )
 
     return parser.parse_args()
 
 
 def do_string_analysis(corpdir, infile1):
-    with open(infile1, "r") as f1:        
-        lines = f1.readlines()[1:]       
-        f1.close()       
+    with open(infile1, "r") as f1:
+        lines = f1.readlines()[1:]
+        f1.close()
         new_lst1 = []
         n = 1
         for i, num1 in enumerate(lines):
             if i != 0:
                 new_lst1.append(num1)
-                #print("num : %s" % num1)
+                # print("num : %s" % num1)
                 str11 = str(num1)
-                str11 = str11.replace("|","")
-                str11 = str11.replace("\n","")
+                str11 = str11.replace("|", "")
+                str11 = str11.replace("\n", "")
                 str11 = str11.lstrip()
                 str11 = str11.rstrip()
                 str11 = str(str11)
-                if ((" " in str11 ) or (")" in str11) or ("(" in str11) or ("<" in str11) or (">" in str11)) :
+                if (
+                    (" " in str11)
+                    or (")" in str11)
+                    or ("(" in str11)
+                    or ("<" in str11)
+                    or (">" in str11)
+                ):
                     print("Space / Paranthesis String : %s" % str11)
-                else :
-                    with open(corpdir+'/memcmp-str{0}'.format(n), 'w') as file:                    
-                            file.write(str11)
-                            print("AFL++ Autodict-QL by Microsvuln : Writing Token : %s" % str11)
-                            n=n+1
+                else:
+                    with open(corpdir + "/memcmp-str{0}".format(n), "w") as file:
+                        file.write(str11)
+                        print(
+                            "AFL++ Autodict-QL by Microsvuln : Writing Token : %s"
+                            % str11
+                        )
+                        n = n + 1
+
 
 def main():
-    args = parse_args()    
+    args = parse_args()
     ensure_dir(args.corpdir)
     do_string_analysis(args.corpdir, args.infile)
-if __name__ == '__main__':
-    main()
\ No newline at end of file
+
+
+if __name__ == "__main__":
+    main()
diff --git a/utils/autodict_ql/readme.md b/utils/autodict_ql/readme.md
index 3402a210..9170f552 100644
--- a/utils/autodict_ql/readme.md
+++ b/utils/autodict_ql/readme.md
@@ -2,13 +2,13 @@
 
 ## What is this?
 
-`Autodict-QL` is a plugin system that enables fast generation of Tokens/Dictionaries in a handy way that can be manipulated by the user (Unlike The LLVM Passes that are hard to modify). This means that autodict-ql is a scriptable feature which basically uses the CodeQL (A powerful semantic code analysis engine) to fetch information from a code base.
+`Autodict-QL` is a plugin system that enables fast generation of Tokens/Dictionaries in a handy way that can be manipulated by the user (unlike The LLVM Passes that are hard to modify). This means that autodict-ql is a scriptable feature which basically uses CodeQL (a powerful semantic code analysis engine) to fetch information from a code base.
 
-Tokens are useful when you perform fuzzing on different parsers. AFL++ `-x` switch enables the usage of dictionaries through your fuzzing campagin. if you are not familiar with Dictionaries in fuzzing, take a look [here](https://github.com/AFLplusplus/AFLplusplus/tree/stable/dictionaries) .
+Tokens are useful when you perform fuzzing on different parsers. The AFL++ `-x` switch enables the usage of dictionaries through your fuzzing campaign. If you are not familiar with Dictionaries in fuzzing, take a look [here](https://github.com/AFLplusplus/AFLplusplus/tree/stable/dictionaries) .
 
 
 ## Why CodeQL ?
-We basically developed this plugin on top of CodeQL engine because it gives the user scripting features, it's easier and it's independent of the LLVM system. This means that a user can write his CodeQL scripts or modify the current scripts to improve or change the token generation algorithms based on different program analysis concepts.
+We basically developed this plugin on top of the CodeQL engine because it gives the user scripting features, it's easier and it's independent of the LLVM system. This means that a user can write his CodeQL scripts or modify the current scripts to improve or change the token generation algorithms based on different program analysis concepts.
 
 
 ## CodeQL scripts
@@ -16,7 +16,7 @@ Currently, we pushed some scripts as defaults for Token generation. In addition,
 
 Currently we provided the following CodeQL scripts :
 
-`strcmp-str.ql` is used to extract strings that are related to `strcmp` function.
+`strcmp-str.ql` is used to extract strings that are related to the `strcmp` function.
 
 `strncmp-str.ql` is used to extract the strings from the `strncmp` function.
 
@@ -24,18 +24,18 @@ Currently we provided the following CodeQL scripts :
 
 `litool.ql` extracts Magic numbers as Hexadecimal format.
 
-`strtool.ql` extracts strings with uses of a regex and dataflow concept to capture the string comparison functions. if strcmp is rewritten in a project as Mystrcmp or something like strmycmp, then this script can catch the arguments and these are valuable tokens.
+`strtool.ql` extracts strings with uses of a regex and dataflow concept to capture the string comparison functions. If `strcmp` is rewritten in a project as Mystrcmp or something like strmycmp, then this script can catch the arguments and these are valuable tokens.
 
 You can write other CodeQL scripts to extract possible effective tokens if you think they can be useful.
 
 
 ## Usage
 
-Before proceed to installation make sure that you have the following packages by installing them :
+Before you proceed to installation make sure that you have the following packages by installing them :
 ```shell
 sudo apt install build-essential libtool-bin python3-dev python3 automake git vim wget -y
 ```
-The usage of Autodict-QL is pretty easy. But let's describe it as :
+The usage of Autodict-QL is pretty easy. But let's describe it as:
 
 1. First of all, you need to have CodeQL installed on the system. we make this possible with `build-codeql.sh` bash script. This script will install CodeQL completety and will set the required environment variables for your system.
 Do the following :
@@ -45,7 +45,7 @@ Do the following :
 # source ~/.bashrc
 # codeql 
 ```
-Then you should get :
+Then you should get:
 
 ```shell
 Usage: codeql <command> <argument>...
@@ -73,29 +73,28 @@ Commands:
   github    Commands useful for interacting with the GitHub API through CodeQL.
 ```
 
-2. Compile your project with CodeQL: For using the Autodict-QL plugin, you need to compile the source of the target you want to fuzz with CodeQL. This is not something hard .
-	- First you need to create a CodeQL database of the project codebase, suppose we want to compile the libxml with codeql. go to libxml and issue the following commands:
+2. Compile your project with CodeQL: For using the Autodict-QL plugin, you need to compile the source of the target you want to fuzz with CodeQL. This is not something hard.
+	- First you need to create a CodeQL database of the project codebase, suppose we want to compile `libxml` with codeql. Go to libxml and issue the following commands:
 		- `./configure --disable-shared`
 		- `codeql create database libxml-db --language=cpp --command=make`
 			- Now you have the CodeQL database of the project :-)
-3. The final step is to update the CodeQL database you created in the step 2 (Suppose we are in `aflplusplus/utils/autodict_ql/` directory) :
+3. The final step is to update the CodeQL database you created in step 2 (Suppose we are in `aflplusplus/utils/autodict_ql/` directory):
 	- `codeql database upgrade /home/user/libxml/libxml-db`
-4. Everything is set! Now you should issue the following to get the tokens :
+4. Everything is set! Now you should issue the following to get the tokens:
 	- `python3 autodict-ql.py [CURRECT_DIR] [CODEQL_DATABASE_PATH] [TOKEN_PATH]`
 		- example : `python3 /home/user/AFLplusplus/utils/autodict_ql/autodict-ql.py $PWD /home/user/libxml/libxml-db tokens`
-			- This will create the final `tokens` dir for you and you are done, then pass the tokens path to afl `-x` flag.
+			- This will create the final `tokens` dir for you and you are done, then pass the tokens path to AFL++'s `-x` flag.
 5. Done! 
 
 
 ## More on dictionaries and tokens
-Core developer of the AFL++ project Marc Heuse also developed a similar tool named `dict2file` which is a LLVM pass which can automatically extracts useful tokens, in addition with LTO instrumentation mode, this dict2file is automatically generating tokens. `Autodict-QL` plugin gives you scripting capability and you can do whatever you want to extract from the Codebase and it's up to you. in addition it's independent from LLVM system.
-On the other hand, you can also use Google dictionaries which have been made public in May 2020, but the problem of using Google dictionaries is that they are limited to specific file format and speicifications. for example, for testing binutils and ELF file format or AVI in FFMPEG, there are no prebuilt dictionary, so it is highly recommended to use `Autodict-QL` or `Dict2File` features to automatically generating dictionaries based on the target.
+Core developer of the AFL++ project Marc Heuse also developed a similar tool named `dict2file` which is a LLVM pass which can automatically extract useful tokens, in addition with LTO instrumentation mode, this dict2file is automatically generates token extraction. `Autodict-QL` plugin gives you scripting capability and you can do whatever you want to extract from the Codebase and it's up to you. In addition it's independent from LLVM system.
+On the other hand, you can also use Google dictionaries which have been made public in May 2020, but the problem of using Google dictionaries is that they are limited to specific file formats and specifications. For example, for testing binutils and ELF file format or AVI in FFMPEG, there are no prebuilt dictionaries, so it is highly recommended to use `Autodict-QL` or `Dict2File` features to automatically generate dictionaries based on the target.
 
-I've personally prefer to use `Autodict-QL` or `dict2file` rather than Google dictionaries or any other manully generated dictionaries as `Autodict-QL` and `dict2file` are working based on the target.
+I've personally prefered to use `Autodict-QL` or `dict2file` rather than Google dictionaries or any other manually generated dictionaries as `Autodict-QL` and `dict2file` are working based on the target.
 In overall, fuzzing with dictionaries and well-generated tokens will give better results.
 
 There are 2 important points to remember :
 
-- If you combine `Autodict-QL` with AFL++ cmplog, you will get much better code coverage and hence better chance to discover new bugs.
-- Do not remember to set the `AFL_MAX_DET_EXTRAS` to the number of generated dictionaries, if you forget to set this environment variable, then AFL++ use just 200 tokens and use the rest of them probablistically. So this will guarantees that your tokens will be used by AFL++.
-
+- If you combine `Autodict-QL` with AFL++ cmplog, you will get much better code coverage and hence better chances to discover new bugs.
+- Do not forget to set `AFL_MAX_DET_EXTRAS` at least to the number of generated dictionaries. If you forget to set this environment variable, then AFL++ uses just 200 tokens and use the rest of them only probabilistically. So this will guarantee that your tokens will be used by AFL++.
diff --git a/utils/autodict_ql/stan-strings.py b/utils/autodict_ql/stan-strings.py
index 65d08c97..81cb0b97 100644
--- a/utils/autodict_ql/stan-strings.py
+++ b/utils/autodict_ql/stan-strings.py
@@ -5,7 +5,7 @@
 
 import os
 import string
-import binascii 
+import binascii
 import codecs
 import errno
 import struct
@@ -13,6 +13,7 @@ import argparse
 import re
 from binascii import unhexlify
 
+
 def ensure_dir(dir):
     try:
         os.makedirs(dir)
@@ -20,44 +21,63 @@ def ensure_dir(dir):
         if e.errno != errno.EEXIST:
             raise
 
+
 def parse_args():
-    parser = argparse.ArgumentParser(description=(
-        "Helper - Specify input file analysis and output folder to save corpus for strings in the overall project ---------------------------------------------------------------------------  Example usage : python2 thisfile.py outdir str.txt"    ))
-    parser.add_argument("corpdir",
-        help="The path to the corpus directory to generate strings.")
-    parser.add_argument("infile",
-        help="Specify file output of codeql analysis - ex. ooo-atr.txt, analysis take place on this file, example : python2 thisfile.py outdir strings.txt")
+    parser = argparse.ArgumentParser(
+        description=(
+            "Helper - Specify input file analysis and output folder to save corpus for strings in the overall project ---------------------------------------------------------------------------  Example usage : python2 thisfile.py outdir str.txt"
+        )
+    )
+    parser.add_argument(
+        "corpdir", help="The path to the corpus directory to generate strings."
+    )
+    parser.add_argument(
+        "infile",
+        help="Specify file output of codeql analysis - ex. ooo-atr.txt, analysis take place on this file, example : python2 thisfile.py outdir strings.txt",
+    )
 
     return parser.parse_args()
 
 
 def do_string_analysis(corpdir, infile1):
-    with open(infile1, "r") as f1:        
-        lines = f1.readlines()[1:]       
-        f1.close()       
+    with open(infile1, "r") as f1:
+        lines = f1.readlines()[1:]
+        f1.close()
         new_lst1 = []
         n = 1
         for i, num1 in enumerate(lines):
             if i != 0:
                 new_lst1.append(num1)
-                #print("num : %s" % num1)
+                # print("num : %s" % num1)
                 str11 = str(num1)
-                str11 = str11.replace("|","")
-                str11 = str11.replace("\n","")
+                str11 = str11.replace("|", "")
+                str11 = str11.replace("\n", "")
                 str11 = str11.lstrip()
                 str11 = str11.rstrip()
                 str11 = str(str11)
-                if ((" " in str11 ) or (")" in str11) or ("(" in str11) or ("<" in str11) or (">" in str11)) :
+                if (
+                    (" " in str11)
+                    or (")" in str11)
+                    or ("(" in str11)
+                    or ("<" in str11)
+                    or (">" in str11)
+                ):
                     print("Space / Paranthesis String : %s" % str11)
-                else :
-                    with open(corpdir+'/seed-str{0}'.format(n), 'w') as file:                    
-                            file.write(str11)
-                            print("AFL++ Autodict-QL by Microsvuln : Writing Token : %s" % str11)
-                            n=n+1
+                else:
+                    with open(corpdir + "/seed-str{0}".format(n), "w") as file:
+                        file.write(str11)
+                        print(
+                            "AFL++ Autodict-QL by Microsvuln : Writing Token : %s"
+                            % str11
+                        )
+                        n = n + 1
+
 
 def main():
-    args = parse_args()    
+    args = parse_args()
     ensure_dir(args.corpdir)
     do_string_analysis(args.corpdir, args.infile)
-if __name__ == '__main__':
-    main()
\ No newline at end of file
+
+
+if __name__ == "__main__":
+    main()
diff --git a/utils/autodict_ql/strcmp-strings.py b/utils/autodict_ql/strcmp-strings.py
index 88128dbb..9c2520c9 100644
--- a/utils/autodict_ql/strcmp-strings.py
+++ b/utils/autodict_ql/strcmp-strings.py
@@ -5,7 +5,7 @@
 
 import os
 import string
-import binascii 
+import binascii
 import codecs
 import errno
 import struct
@@ -13,6 +13,7 @@ import argparse
 import re
 from binascii import unhexlify
 
+
 def ensure_dir(dir):
     try:
         os.makedirs(dir)
@@ -20,44 +21,63 @@ def ensure_dir(dir):
         if e.errno != errno.EEXIST:
             raise
 
+
 def parse_args():
-    parser = argparse.ArgumentParser(description=(
-        "Helper - Specify input file analysis and output folder to save corpus for strings in the overall project ---------------------------------------------------------------------------  Example usage : python2 thisfile.py outdir str.txt"    ))
-    parser.add_argument("corpdir",
-        help="The path to the corpus directory to generate strings.")
-    parser.add_argument("infile",
-        help="Specify file output of codeql analysis - ex. ooo-atr.txt, analysis take place on this file, example : python2 thisfile.py outdir strings.txt")
+    parser = argparse.ArgumentParser(
+        description=(
+            "Helper - Specify input file analysis and output folder to save corpus for strings in the overall project ---------------------------------------------------------------------------  Example usage : python2 thisfile.py outdir str.txt"
+        )
+    )
+    parser.add_argument(
+        "corpdir", help="The path to the corpus directory to generate strings."
+    )
+    parser.add_argument(
+        "infile",
+        help="Specify file output of codeql analysis - ex. ooo-atr.txt, analysis take place on this file, example : python2 thisfile.py outdir strings.txt",
+    )
 
     return parser.parse_args()
 
 
 def do_string_analysis(corpdir, infile1):
-    with open(infile1, "r") as f1:        
-        lines = f1.readlines()[1:]       
-        f1.close()       
+    with open(infile1, "r") as f1:
+        lines = f1.readlines()[1:]
+        f1.close()
         new_lst1 = []
         n = 1
         for i, num1 in enumerate(lines):
             if i != 0:
                 new_lst1.append(num1)
-                #print("num : %s" % num1)
+                # print("num : %s" % num1)
                 str11 = str(num1)
-                str11 = str11.replace("|","")
-                str11 = str11.replace("\n","")
+                str11 = str11.replace("|", "")
+                str11 = str11.replace("\n", "")
                 str11 = str11.lstrip()
                 str11 = str11.rstrip()
                 str11 = str(str11)
-                if ((" " in str11 ) or (")" in str11) or ("(" in str11) or ("<" in str11) or (">" in str11)) :
+                if (
+                    (" " in str11)
+                    or (")" in str11)
+                    or ("(" in str11)
+                    or ("<" in str11)
+                    or (">" in str11)
+                ):
                     print("Space / Paranthesis String : %s" % str11)
-                else :
-                    with open(corpdir+'/strcmp-str{0}'.format(n), 'w') as file:                    
-                            file.write(str11)
-                            print("AFL++ Autodict-QL by Microsvuln : Writing Token : %s" % str11)
-                            n=n+1
+                else:
+                    with open(corpdir + "/strcmp-str{0}".format(n), "w") as file:
+                        file.write(str11)
+                        print(
+                            "AFL++ Autodict-QL by Microsvuln : Writing Token : %s"
+                            % str11
+                        )
+                        n = n + 1
+
 
 def main():
-    args = parse_args()    
+    args = parse_args()
     ensure_dir(args.corpdir)
     do_string_analysis(args.corpdir, args.infile)
-if __name__ == '__main__':
-    main()
\ No newline at end of file
+
+
+if __name__ == "__main__":
+    main()
diff --git a/utils/autodict_ql/strncmp-strings.py b/utils/autodict_ql/strncmp-strings.py
index 0ad0e697..6206b4c4 100644
--- a/utils/autodict_ql/strncmp-strings.py
+++ b/utils/autodict_ql/strncmp-strings.py
@@ -5,7 +5,7 @@
 
 import os
 import string
-import binascii 
+import binascii
 import codecs
 import errno
 import struct
@@ -13,6 +13,7 @@ import argparse
 import re
 from binascii import unhexlify
 
+
 def ensure_dir(dir):
     try:
         os.makedirs(dir)
@@ -20,44 +21,63 @@ def ensure_dir(dir):
         if e.errno != errno.EEXIST:
             raise
 
+
 def parse_args():
-    parser = argparse.ArgumentParser(description=(
-        "Helper - Specify input file analysis and output folder to save corpus for strings in the overall project ---------------------------------------------------------------------------  Example usage : python2 thisfile.py outdir str.txt"    ))
-    parser.add_argument("corpdir",
-        help="The path to the corpus directory to generate strings.")
-    parser.add_argument("infile",
-        help="Specify file output of codeql analysis - ex. ooo-atr.txt, analysis take place on this file, example : python2 thisfile.py outdir strings.txt")
+    parser = argparse.ArgumentParser(
+        description=(
+            "Helper - Specify input file analysis and output folder to save corpus for strings in the overall project ---------------------------------------------------------------------------  Example usage : python2 thisfile.py outdir str.txt"
+        )
+    )
+    parser.add_argument(
+        "corpdir", help="The path to the corpus directory to generate strings."
+    )
+    parser.add_argument(
+        "infile",
+        help="Specify file output of codeql analysis - ex. ooo-atr.txt, analysis take place on this file, example : python2 thisfile.py outdir strings.txt",
+    )
 
     return parser.parse_args()
 
 
 def do_string_analysis(corpdir, infile1):
-    with open(infile1, "r") as f1:        
-        lines = f1.readlines()[1:]       
-        f1.close()       
+    with open(infile1, "r") as f1:
+        lines = f1.readlines()[1:]
+        f1.close()
         new_lst1 = []
         n = 1
         for i, num1 in enumerate(lines):
             if i != 0:
                 new_lst1.append(num1)
-                #print("num : %s" % num1)
+                # print("num : %s" % num1)
                 str11 = str(num1)
-                str11 = str11.replace("|","")
-                str11 = str11.replace("\n","")
+                str11 = str11.replace("|", "")
+                str11 = str11.replace("\n", "")
                 str11 = str11.lstrip()
                 str11 = str11.rstrip()
                 str11 = str(str11)
-                if ((" " in str11 ) or (")" in str11) or ("(" in str11) or ("<" in str11) or (">" in str11)) :
+                if (
+                    (" " in str11)
+                    or (")" in str11)
+                    or ("(" in str11)
+                    or ("<" in str11)
+                    or (">" in str11)
+                ):
                     print("Space / Paranthesis String : %s" % str11)
-                else :
-                    with open(corpdir+'/strncmp-str{0}'.format(n), 'w') as file:                    
-                            file.write(str11)
-                            print("AFL++ Autodict-QL by Microsvuln : Writing Token : %s" % str11)
-                            n=n+1
+                else:
+                    with open(corpdir + "/strncmp-str{0}".format(n), "w") as file:
+                        file.write(str11)
+                        print(
+                            "AFL++ Autodict-QL by Microsvuln : Writing Token : %s"
+                            % str11
+                        )
+                        n = n + 1
+
 
 def main():
-    args = parse_args()    
+    args = parse_args()
     ensure_dir(args.corpdir)
     do_string_analysis(args.corpdir, args.infile)
-if __name__ == '__main__':
-    main()
\ No newline at end of file
+
+
+if __name__ == "__main__":
+    main()
diff --git a/utils/qemu_persistent_hook/read_into_rdi.c b/utils/qemu_persistent_hook/read_into_rdi.c
index f4a8ae59..c1c6642f 100644
--- a/utils/qemu_persistent_hook/read_into_rdi.c
+++ b/utils/qemu_persistent_hook/read_into_rdi.c
@@ -5,7 +5,7 @@
 
 void afl_persistent_hook(struct x86_64_regs *regs, uint64_t guest_base,
                          uint8_t *input_buf, uint32_t input_buf_len) {
-\
+
 #define g2h(x) ((void *)((unsigned long)(x) + guest_base))
 #define h2g(x) ((uint64_t)(x)-guest_base)