about summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--Makefile4
-rw-r--r--README.md7
-rw-r--r--TODO35
-rw-r--r--afl-common.h1
-rw-r--r--afl-fuzz.c694
-rw-r--r--docs/ChangeLog3
-rw-r--r--docs/env_variables.txt8
-rw-r--r--llvm_mode/Makefile12
-rw-r--r--llvm_mode/MarkNodes.cc2
-rw-r--r--llvm_mode/README.llvm3
-rw-r--r--llvm_mode/split-compares-pass.so.cc6
-rw-r--r--qemu_mode/README.qemu31
-rw-r--r--qemu_mode/patches/syscall.diff5
-rw-r--r--test-instr.c7
14 files changed, 431 insertions, 387 deletions
diff --git a/Makefile b/Makefile
index 9e92de81..e6e3af85 100644
--- a/Makefile
+++ b/Makefile
@@ -158,10 +158,10 @@ ifndef AFL_NO_X86
 test_build: afl-gcc afl-as afl-showmap
 	@echo "[*] Testing the CC wrapper and instrumentation output..."
 	unset AFL_USE_ASAN AFL_USE_MSAN AFL_CC; AFL_QUIET=1 AFL_INST_RATIO=100 AFL_PATH=. ./$(TEST_CC) $(CFLAGS) test-instr.c -o test-instr $(LDFLAGS)
-	./afl-showmap -m none -q -o .test-instr0 ./test-instr </dev/null
+	./afl-showmap -m none -q -o .test-instr0 ./test-instr < /dev/null
 	echo 1 | ./afl-showmap -m none -q -o .test-instr1 ./test-instr
 	@rm -f test-instr
-	@cmp -s .test-instr0 .test-instr1; DR="$$?"; rm -f .test-instr0 .test-instr1; if [ "$$DR" = "0" ]; then echo; echo "Oops, the instrumentation does not seem to be behaving correctly!"; echo; echo "Please ping <lcamtuf@google.com> to troubleshoot the issue."; echo; exit 1; fi
+	@cmp -s .test-instr0 .test-instr1; DR="$$?"; rm -f .test-instr0 .test-instr1; if [ "$$DR" = "0" ]; then echo; echo "Oops, the instrumentation does not seem to be behaving correctly!"; echo; echo "Please post to https://github.com/vanhauser-thc/AFLplusplus/issues to troubleshoot the issue."; echo; exit 1; fi
 	@echo "[+] All right, the instrumentation seems to be working!"
 
 else
diff --git a/README.md b/README.md
index 2124b862..dff6463b 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,7 @@
 # american fuzzy lop plus plus (afl++)
 
   Release Version: 2.53c 
+
   Github Version: 2.53d
 
 
@@ -17,7 +18,7 @@
   get any improvements since November 2017.
 
   Among others afl++ has, e.g. more performant llvm_mode, supporting
-  llvm up to version 8, Qemu 3.1, more speed and crashfixes for Qemu,
+  llvm up to version 9, Qemu 3.1, more speed and crashfixes for Qemu,
   laf-intel feature for Qemu (with libcompcov) and more.
 
   Additionally the following patches have been integrated:
@@ -120,7 +121,7 @@ superior to blind fuzzing or coverage-only tools.
 PLEASE NOTE: llvm_mode compilation with afl-clang-fast/afl-clang-fast++
 instead of afl-gcc/afl-g++ is much faster and has a few cool features.
 See llvm_mode/ - however few code does not compile with llvm.
-We support llvm versions 4.0 to 8.
+We support llvm versions 3.8.0 to 9.
 
 When source code is available, instrumentation can be injected by a companion
 tool that works as a drop-in replacement for gcc or clang in any standard build
@@ -143,7 +144,7 @@ For C++ programs, you'd would also want to set `CXX=/path/to/afl/afl-g++`.
 The clang wrappers (afl-clang and afl-clang++) can be used in the same way;
 clang users may also opt to leverage a higher-performance instrumentation mode,
 as described in [llvm_mode/README.llvm](llvm_mode/README.llvm).
-Clang/LLVM has a much better performance and works with LLVM version 4.0 to 8.
+Clang/LLVM has a much better performance and works with LLVM version 3.8.0 to 9.
 
 Using the LAF Intel performance enhancements are also recommended, see 
 [llvm_mode/README.laf-intel](llvm_mode/README.laf-intel)
diff --git a/TODO b/TODO
index 3d1e444d..42987cb9 100644
--- a/TODO
+++ b/TODO
@@ -1,6 +1,6 @@
 Roadmap 2.53d:
 ==============
- - indent all the code: clang-format -style=Google
+ - indent all the code: .clang-format
 
  - update docs/sister_projects.txt
 
@@ -18,16 +18,41 @@ gcc_plugin:
 
 qemu_mode:
  - deferred mode with AFL_DEFERRED_QEMU=0xaddress
+   @andrea - dont we have that already with AFL_ENTRYPOINT?
 
 unit testing / or large testcase campaign
 
 
 Roadmap 2.54d:
 ==============
- - expand MAP size to 256k (current L2 cache size on processors)
-   -> 18 bit map
- - llvm_mode: dynamic map size and collission free basic block IDs
-
+Problem: Average targets (tiff, jpeg, unrar) go through 1500 edges.
+         At afl's default map that means ~16 collisions and ~3 wrappings.
+  Solution #1: increase map size.
+    every +1 decreases fuzzing speed by ~10% and halfs the collisions
+    birthday paradox predicts at collisions at this # of edges:
+	2^16 = 302
+	2^17 = 427
+	2^18 = 603
+	2^19 = 853
+	2^20 = 1207
+	2^21 = 1706
+	2^22 = 2412
+	2^23 = 3411
+	2^24 = 4823
+    Its an easy solution but also not a good one.
+  Solution #2: use dynamic map size and collision free basic block IDs
+    This only works in llvm_mode and llvm >= 9 though
+    A potential good future solution
+  Solution #3: write instruction pointers to a big shared map
+    512kb/1MB shared map and the instrumented code writes the instruction
+    pointer into the map. Map must be big enough but could be command line
+    controlled.
+    Good: complete coverage information, nothing is lost. choice of analysis
+          impacts speed, but this can be decided by user options
+    Neutral: a little bit slower but no loss of coverage
+    Bad: completely changes how afl uses the map and the scheduling.
+    Overall another very good solution
+    
 qemu_mode:
  - persistent mode patching the return address (WinAFL style)
  - instrument only comparison with immediate values by default when using compcov
diff --git a/afl-common.h b/afl-common.h
index 07afb75d..161caa39 100644
--- a/afl-common.h
+++ b/afl-common.h
@@ -1,5 +1,6 @@
 #ifndef __AFLCOMMON_H
 #define __AFLCOMMON_H
+#include "types.h"
 
 void detect_file_args(char **argv, u8 *prog_in);
 #endif
diff --git a/afl-fuzz.c b/afl-fuzz.c
index 9397d5be..e9fb8bf0 100644
--- a/afl-fuzz.c
+++ b/afl-fuzz.c
@@ -649,7 +649,7 @@ int select_algorithm(void) {
 
   double sele = ((double)(UR(10000))*0.0001);
   j_puppet = 0;
-  for (i_puppet = 0; i_puppet < operator_num; i_puppet++) {
+  for (i_puppet = 0; i_puppet < operator_num; ++i_puppet) {
       if (unlikely(i_puppet == 0)) {
           if (sele < probability_now[swarm_now][i_puppet])
             break;
@@ -726,7 +726,7 @@ static void shuffle_ptrs(void** ptrs, u32 cnt) {
 
   u32 i;
 
-  for (i = 0; i < cnt - 2; i++) {
+  for (i = 0; i < cnt - 2; ++i) {
 
     u32 j = i + UR(cnt - i);
     void *s = ptrs[i];
@@ -825,7 +825,7 @@ static void bind_to_free_cpu(void) {
 
   closedir(d);
 
-  for (i = 0; i < cpu_core_count; i++) if (!cpu_used[i]) break;
+  for (i = 0; i < cpu_core_count; ++i) if (!cpu_used[i]) break;
 
   if (i == cpu_core_count) {
 
@@ -865,7 +865,7 @@ static void locate_diffs(u8* ptr1, u8* ptr2, u32 len, s32* first, s32* last) {
   s32 l_loc = -1;
   u32 pos;
 
-  for (pos = 0; pos < len; pos++) {
+  for (pos = 0; pos < len; ++pos) {
 
     if (*(ptr1++) != *(ptr2++)) {
 
@@ -1141,8 +1141,8 @@ static void add_to_queue(u8* fname, u32 len, u8 passed_det) {
 
   } else q_prev100 = queue = queue_top = q;
 
-  queued_paths++;
-  pending_not_fuzzed++;
+  ++queued_paths;
+  ++pending_not_fuzzed;
 
   cycles_wo_finds = 0;
 
@@ -1283,8 +1283,8 @@ static inline u8 has_new_bits(u8* virgin_map) {
 
     }
 
-    current++;
-    virgin++;
+    ++current;
+    ++virgin;
 
   }
 
@@ -1344,10 +1344,10 @@ static u32 count_bytes(u8* mem) {
     u32 v = *(ptr++);
 
     if (!v) continue;
-    if (v & FF(0)) ret++;
-    if (v & FF(1)) ret++;
-    if (v & FF(2)) ret++;
-    if (v & FF(3)) ret++;
+    if (v & FF(0)) ++ret;
+    if (v & FF(1)) ++ret;
+    if (v & FF(2)) ++ret;
+    if (v & FF(3)) ++ret;
 
   }
 
@@ -1373,10 +1373,10 @@ static u32 count_non_255_bytes(u8* mem) {
        case. */
 
     if (v == 0xffffffff) continue;
-    if ((v & FF(0)) != FF(0)) ret++;
-    if ((v & FF(1)) != FF(1)) ret++;
-    if ((v & FF(2)) != FF(2)) ret++;
-    if ((v & FF(3)) != FF(3)) ret++;
+    if ((v & FF(0)) != FF(0)) ++ret;
+    if ((v & FF(1)) != FF(1)) ++ret;
+    if ((v & FF(2)) != FF(2)) ++ret;
+    if ((v & FF(3)) != FF(3)) ++ret;
 
   }
 
@@ -1422,7 +1422,7 @@ static void simplify_trace(u64* mem) {
 
     } else *mem = 0x0101010101010101ULL;
 
-    mem++;
+    ++mem;
 
   }
 
@@ -1449,7 +1449,7 @@ static void simplify_trace(u32* mem) {
 
     } else *mem = 0x01010101;
 
-    mem++;
+    ++mem;
   }
 
 }
@@ -1512,7 +1512,7 @@ static inline void classify_counts(u64* mem) {
 
     }
 
-    mem++;
+    ++mem;
 
   }
 
@@ -1537,7 +1537,7 @@ static inline void classify_counts(u32* mem) {
 
     }
 
-    mem++;
+    ++mem;
 
   }
 
@@ -1557,7 +1557,7 @@ static void minimize_bits(u8* dst, u8* src) {
   while (i < MAP_SIZE) {
 
     if (*(src++)) dst[i >> 3] |= 1 << (i & 7);
-    i++;
+    ++i;
 
   }
 
@@ -1597,7 +1597,7 @@ static void update_bitmap_score(struct queue_entry* q) {
   /* For every byte set in trace_bits[], see if there is a previous winner,
      and how it compares to us. */
 
-  for (i = 0; i < MAP_SIZE; i++)
+  for (i = 0; i < MAP_SIZE; ++i)
 
     if (trace_bits[i]) {
 
@@ -1629,7 +1629,7 @@ static void update_bitmap_score(struct queue_entry* q) {
        /* Insert ourselves as the new winner. */
 
        top_rated[i] = q;
-       q->tc_ref++;
+       ++q->tc_ref;
 
        if (!q->trace_mini) {
          q->trace_mini = ck_alloc(MAP_SIZE >> 3);
@@ -1674,7 +1674,7 @@ static void cull_queue(void) {
   /* Let's see if anything in the bitmap isn't captured in temp_v.
      If yes, and if it has a top_rated[] contender, let's use it. */
 
-  for (i = 0; i < MAP_SIZE; i++)
+  for (i = 0; i < MAP_SIZE; ++i)
     if (top_rated[i] && (temp_v[i >> 3] & (1 << (i & 7)))) {
 
       u32 j = MAP_SIZE >> 3;
@@ -1686,9 +1686,9 @@ static void cull_queue(void) {
           temp_v[j] &= ~top_rated[i]->trace_mini[j];
 
       top_rated[i]->favored = 1;
-      queued_favored++;
+      ++queued_favored;
 
-      if (top_rated[i]->fuzz_level == 0 || !top_rated[i]->was_fuzzed) pending_favored++;
+      if (top_rated[i]->fuzz_level == 0 || !top_rated[i]->was_fuzzed) ++pending_favored;
 
     }
 
@@ -1793,7 +1793,7 @@ static void read_testcases(void) {
 
   }
 
-  for (i = 0; i < nl_cnt; i++) {
+  for (i = 0; i < nl_cnt; ++i) {
 
     struct stat st;
 
@@ -1889,15 +1889,15 @@ static void load_extras_file(u8* fname, u32* min_len, u32* max_len,
     u8 *rptr, *wptr;
     u32 klen = 0;
 
-    cur_line++;
+    ++cur_line;
 
     /* Trim on left and right. */
 
-    while (isspace(*lptr)) lptr++;
+    while (isspace(*lptr)) ++lptr;
 
     rptr = lptr + strlen(lptr) - 1;
-    while (rptr >= lptr && isspace(*rptr)) rptr--;
-    rptr++;
+    while (rptr >= lptr && isspace(*rptr)) --rptr;
+    ++rptr;
     *rptr = 0;
 
     /* Skip empty lines and comments. */
@@ -1906,7 +1906,7 @@ static void load_extras_file(u8* fname, u32* min_len, u32* max_len,
 
     /* All other lines must end with '"', which we can consume. */
 
-    rptr--;
+    --rptr;
 
     if (rptr < lptr || *rptr != '"')
       FATAL("Malformed name=\"value\" pair in line %u.", cur_line);
@@ -1915,28 +1915,28 @@ static void load_extras_file(u8* fname, u32* min_len, u32* max_len,
 
     /* Skip alphanumerics and dashes (label). */
 
-    while (isalnum(*lptr) || *lptr == '_') lptr++;
+    while (isalnum(*lptr) || *lptr == '_') ++lptr;
 
     /* If @number follows, parse that. */
 
     if (*lptr == '@') {
 
-      lptr++;
+      ++lptr;
       if (atoi(lptr) > dict_level) continue;
-      while (isdigit(*lptr)) lptr++;
+      while (isdigit(*lptr)) ++lptr;
 
     }
 
     /* Skip whitespace and = signs. */
 
-    while (isspace(*lptr) || *lptr == '=') lptr++;
+    while (isspace(*lptr) || *lptr == '=') ++lptr;
 
     /* Consume opening '"'. */
 
     if (*lptr != '"')
       FATAL("Malformed name=\"keyword\" pair in line %u.", cur_line);
 
-    lptr++;
+    ++lptr;
 
     if (!*lptr) FATAL("Empty keyword in line %u.", cur_line);
 
@@ -1960,7 +1960,7 @@ static void load_extras_file(u8* fname, u32* min_len, u32* max_len,
 
         case '\\':
 
-          lptr++;
+          ++lptr;
 
           if (*lptr == '\\' || *lptr == '"') {
             *(wptr++) = *(lptr++);
@@ -1976,14 +1976,14 @@ static void load_extras_file(u8* fname, u32* min_len, u32* max_len,
             (strchr(hexdigits, tolower(lptr[2])) - hexdigits);
 
           lptr += 3;
-          klen++;
+          ++klen;
 
           break;
 
         default:
 
           *(wptr++) = *(lptr++);
-          klen++;
+          ++klen;
 
       }
 
@@ -1998,7 +1998,7 @@ static void load_extras_file(u8* fname, u32* min_len, u32* max_len,
     if (*min_len > klen) *min_len = klen;
     if (*max_len < klen) *max_len = klen;
 
-    extras_cnt++;
+    ++extras_cnt;
 
   }
 
@@ -2081,7 +2081,7 @@ static void load_extras(u8* dir) {
     close(fd);
     ck_free(fn);
 
-    extras_cnt++;
+    ++extras_cnt;
 
   }
 
@@ -2131,7 +2131,7 @@ static void maybe_add_auto(u8* mem, u32 len) {
 
   /* Skip runs of identical bytes. */
 
-  for (i = 1; i < len; i++)
+  for (i = 1; i < len; ++i)
     if (mem[0] ^ mem[i]) break;
 
   if (i == len) return;
@@ -2162,10 +2162,10 @@ static void maybe_add_auto(u8* mem, u32 len) {
      match. We optimize by exploiting the fact that extras[] are sorted
      by size. */
 
-  for (i = 0; i < extras_cnt; i++)
+  for (i = 0; i < extras_cnt; ++i)
     if (extras[i].len >= len) break;
 
-  for (; i < extras_cnt && extras[i].len == len; i++)
+  for (; i < extras_cnt && extras[i].len == len; ++i)
     if (!memcmp_nocase(extras[i].data, mem, len)) return;
 
   /* Last but not least, check a_extras[] for matches. There are no
@@ -2173,7 +2173,7 @@ static void maybe_add_auto(u8* mem, u32 len) {
 
   auto_changed = 1;
 
-  for (i = 0; i < a_extras_cnt; i++) {
+  for (i = 0; i < a_extras_cnt; ++i) {
 
     if (a_extras[i].len == len && !memcmp_nocase(a_extras[i].data, mem, len)) {
 
@@ -2195,7 +2195,7 @@ static void maybe_add_auto(u8* mem, u32 len) {
 
     a_extras[a_extras_cnt].data = ck_memdup(mem, len);
     a_extras[a_extras_cnt].len  = len;
-    a_extras_cnt++;
+    ++a_extras_cnt;
 
   } else {
 
@@ -2234,7 +2234,7 @@ static void save_auto(void) {
   if (!auto_changed) return;
   auto_changed = 0;
 
-  for (i = 0; i < MIN(USE_AUTO_EXTRAS, a_extras_cnt); i++) {
+  for (i = 0; i < MIN(USE_AUTO_EXTRAS, a_extras_cnt); ++i) {
 
     u8* fn = alloc_printf("%s/queue/.state/auto_extras/auto_%06u", out_dir, i);
     s32 fd;
@@ -2259,7 +2259,7 @@ static void load_auto(void) {
 
   u32 i;
 
-  for (i = 0; i < USE_AUTO_EXTRAS; i++) {
+  for (i = 0; i < USE_AUTO_EXTRAS; ++i) {
 
     u8  tmp[MAX_AUTO_EXTRA + 1];
     u8* fn = alloc_printf("%s/.state/auto_extras/auto_%06u", in_dir, i);
@@ -2302,12 +2302,12 @@ static void destroy_extras(void) {
 
   u32 i;
 
-  for (i = 0; i < extras_cnt; i++) 
+  for (i = 0; i < extras_cnt; ++i) 
     ck_free(extras[i].data);
 
   ck_free(extras);
 
-  for (i = 0; i < a_extras_cnt; i++) 
+  for (i = 0; i < a_extras_cnt; ++i) 
     ck_free(a_extras[i].data);
 
   ck_free(a_extras);
@@ -2775,7 +2775,7 @@ static u8 run_target(char** argv, u32 timeout) {
 
   setitimer(ITIMER_REAL, &it, NULL);
 
-  total_execs++;
+  ++total_execs;
 
   /* Any subsequent operations on trace_bits must not be moved by the
      compiler below this point. Past this location, trace_bits[] behave
@@ -2916,7 +2916,7 @@ static u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem,
     use_tmout = MAX(exec_tmout + CAL_TMOUT_ADD,
                     exec_tmout * CAL_TMOUT_PERC / 100);
 
-  q->cal_failed++;
+  ++q->cal_failed;
 
   stage_name = "calibration";
   stage_max  = fast_cal ? 3 : CAL_CYCLES;
@@ -2931,7 +2931,7 @@ static u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem,
 
   start_us = get_cur_time_us();
 
-  for (stage_cur = 0; stage_cur < stage_max; stage_cur++) {
+  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
 
     u32 cksum;
 
@@ -2962,7 +2962,7 @@ static u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem,
 
         u32 i;
 
-        for (i = 0; i < MAP_SIZE; i++) {
+        for (i = 0; i < MAP_SIZE; ++i) {
 
           if (!var_bytes[i] && first_trace[i] != trace_bits[i]) {
 
@@ -3000,7 +3000,7 @@ static u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem,
   q->cal_failed  = 0;
 
   total_bitmap_size += q->bitmap_size;
-  total_bitmap_entries++;
+  ++total_bitmap_entries;
 
   update_bitmap_score(q);
 
@@ -3014,7 +3014,7 @@ abort_calibration:
 
   if (new_bits == 2 && !q->has_new_cov) {
     q->has_new_cov = 1;
-    queued_with_cov++;
+    ++queued_with_cov;
   }
 
   /* Mark variable paths. */
@@ -3025,7 +3025,7 @@ abort_calibration:
 
     if (!q->var_behavior) {
       mark_as_variable(q);
-      queued_variable++;
+      ++queued_variable;
     }
 
   }
@@ -3049,7 +3049,7 @@ static void check_map_coverage(void) {
 
   if (count_bytes(trace_bits) < 100) return;
 
-  for (i = (1 << (MAP_SIZE_POW2 - 1)); i < MAP_SIZE; i++)
+  for (i = (1 << (MAP_SIZE_POW2 - 1)); i < MAP_SIZE; ++i)
     if (trace_bits[i]) return;
 
   WARNF("Recompile binary with newer version of afl to improve coverage!");
@@ -3116,7 +3116,7 @@ static void perform_dry_run(char** argv) {
           if (timeout_given > 1) {
             WARNF("Test case results in a timeout (skipping)");
             q->cal_failed = CAL_CHANCES;
-            cal_failures++;
+            ++cal_failures;
             break;
           }
 
@@ -3151,7 +3151,7 @@ static void perform_dry_run(char** argv) {
         if (skip_crashes) {
           WARNF("Test case results in a crash (skipping)");
           q->cal_failed = CAL_CHANCES;
-          cal_failures++;
+          ++cal_failures;
           break;
         }
 
@@ -3227,7 +3227,7 @@ static void perform_dry_run(char** argv) {
 
       case FAULT_NOBITS: 
 
-        useless_at_start++;
+        ++useless_at_start;
 
         if (!in_bitmap && !shuffle_queue)
           WARNF("No new instrumentation output, test case may be useless.");
@@ -3309,7 +3309,7 @@ static void pivot_inputs(void) {
     u8  *nfn, *rsl = strrchr(q->fname, '/');
     u32 orig_id;
 
-    if (!rsl) rsl = q->fname; else rsl++;
+    if (!rsl) rsl = q->fname; else ++rsl;
 
     /* If the original file name conforms to the syntax and the recorded
        ID matches the one we'd assign, just use the original file name.
@@ -3376,7 +3376,7 @@ static void pivot_inputs(void) {
     if (q->passed_det) mark_as_det_done(q);
 
     q = q->next;
-    id++;
+    ++id;
 
   }
 
@@ -3506,7 +3506,7 @@ static u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) {
        future fuzzing, etc. */
 
     if (!(hnb = has_new_bits(virgin_bits))) {
-      if (crash_mode) total_crashes++;
+      if (crash_mode) ++total_crashes;
       return 0;
     }    
 
@@ -3525,7 +3525,7 @@ static u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) {
 
     if (hnb == 2) {
       queue_top->has_new_cov = 1;
-      queued_with_cov++;
+      ++queued_with_cov;
     }
 
     queue_top->exec_cksum = cksum;
@@ -3556,7 +3556,7 @@ static u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) {
          hang-specific bitmap as a signal of uniqueness. In "dumb" mode, we
          just keep everything. */
 
-      total_tmouts++;
+      ++total_tmouts;
 
       if (unique_hangs >= KEEP_UNIQUE_HANG) return keeping;
 
@@ -3572,7 +3572,7 @@ static u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) {
 
       }
 
-      unique_tmouts++;
+      ++unique_tmouts;
 
       /* Before saving, we make sure that it's a genuine hang by re-running
          the target with a more generous timeout (unless the default timeout
@@ -3606,7 +3606,7 @@ static u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) {
 
 #endif /* ^!SIMPLE_FILES */
 
-      unique_hangs++;
+      ++unique_hangs;
 
       last_hang_time = get_cur_time();
 
@@ -3620,7 +3620,7 @@ keep_as_crash:
          except for slightly different limits and no need to re-run test
          cases. */
 
-      total_crashes++;
+      ++total_crashes;
 
       if (unique_crashes >= KEEP_UNIQUE_CRASH) return keeping;
 
@@ -3650,7 +3650,7 @@ keep_as_crash:
 
 #endif /* ^!SIMPLE_FILES */
 
-      unique_crashes++;
+      ++unique_crashes;
 
       last_crash_time = get_cur_time();
       last_crash_execs = total_execs;
@@ -4880,7 +4880,7 @@ static u8 trim_case_python(char** argv, struct queue_entry* q, u8* in_buf) {
     write_to_testcase(retbuf, retlen);
 
     fault = run_target(argv, exec_tmout);
-    trim_execs++;
+    ++trim_execs;
 
     if (stop_soon || fault == FAULT_ERROR) goto abort_trimming;
 
@@ -5006,7 +5006,7 @@ static u8 trim_case(char** argv, struct queue_entry* q, u8* in_buf) {
       write_with_gap(in_buf, q->len, remove_pos, trim_avail);
 
       fault = run_target(argv, exec_tmout);
-      trim_execs++;
+      ++trim_execs;
 
       if (stop_soon || fault == FAULT_ERROR) goto abort_trimming;
 
@@ -5044,7 +5044,7 @@ static u8 trim_case(char** argv, struct queue_entry* q, u8* in_buf) {
       /* Since this can be slow, update the screen every now and then. */
 
       if (!(trim_exec++ % stats_update_freq)) show_stats();
-      stage_cur++;
+      ++stage_cur;
 
     }
 
@@ -5105,7 +5105,7 @@ EXP_ST u8 common_fuzz_stuff(char** argv, u8* out_buf, u32 len) {
   if (fault == FAULT_TMOUT) {
 
     if (subseq_tmouts++ > TMOUT_LIMIT) {
-      cur_skipped_paths++;
+      ++cur_skipped_paths;
       return 1;
     }
 
@@ -5117,7 +5117,7 @@ EXP_ST u8 common_fuzz_stuff(char** argv, u8* out_buf, u32 len) {
   if (skip_requested) {
 
      skip_requested = 0;
-     cur_skipped_paths++;
+     ++cur_skipped_paths;
      return 1;
 
   }
@@ -5221,7 +5221,7 @@ static u32 calculate_score(struct queue_entry* q) {
   } else if (q->handicap) {
 
     perf_score *= 2;
-    q->handicap--;
+    --q->handicap;
 
   }
 
@@ -5327,7 +5327,7 @@ static u8 could_be_bitflip(u32 xor_val) {
 
   /* Shift left until first bit set. */
 
-  while (!(xor_val & 1)) { sh++; xor_val >>= 1; }
+  while (!(xor_val & 1)) { ++sh; xor_val >>= 1; }
 
   /* 1-, 2-, and 4-bit patterns are OK anywhere. */
 
@@ -5357,12 +5357,12 @@ static u8 could_be_arith(u32 old_val, u32 new_val, u8 blen) {
 
   /* See if one-byte adjustments to any byte could produce this result. */
 
-  for (i = 0; i < blen; i++) {
+  for (i = 0; i < blen; ++i) {
 
     u8 a = old_val >> (8 * i),
        b = new_val >> (8 * i);
 
-    if (a != b) { diffs++; ov = a; nv = b; }
+    if (a != b) { ++diffs; ov = a; nv = b; }
 
   }
 
@@ -5381,12 +5381,12 @@ static u8 could_be_arith(u32 old_val, u32 new_val, u8 blen) {
 
   diffs = 0;
 
-  for (i = 0; i < blen / 2; i++) {
+  for (i = 0; i < blen / 2; ++i) {
 
     u16 a = old_val >> (16 * i),
         b = new_val >> (16 * i);
 
-    if (a != b) { diffs++; ov = a; nv = b; }
+    if (a != b) { ++diffs; ov = a; nv = b; }
 
   }
 
@@ -5439,9 +5439,9 @@ static u8 could_be_interest(u32 old_val, u32 new_val, u8 blen, u8 check_le) {
   /* See if one-byte insertions from interesting_8 over old_val could
      produce new_val. */
 
-  for (i = 0; i < blen; i++) {
+  for (i = 0; i < blen; ++i) {
 
-    for (j = 0; j < sizeof(interesting_8); j++) {
+    for (j = 0; j < sizeof(interesting_8); ++j) {
 
       u32 tval = (old_val & ~(0xff << (i * 8))) |
                  (((u8)interesting_8[j]) << (i * 8));
@@ -5459,9 +5459,9 @@ static u8 could_be_interest(u32 old_val, u32 new_val, u8 blen, u8 check_le) {
 
   /* See if two-byte insertions over old_val could give us new_val. */
 
-  for (i = 0; i < blen - 1; i++) {
+  for (i = 0; i < blen - 1; ++i) {
 
-    for (j = 0; j < sizeof(interesting_16) / 2; j++) {
+    for (j = 0; j < sizeof(interesting_16) / 2; ++j) {
 
       u32 tval = (old_val & ~(0xffff << (i * 8))) |
                  (((u16)interesting_16[j]) << (i * 8));
@@ -5488,7 +5488,7 @@ static u8 could_be_interest(u32 old_val, u32 new_val, u8 blen, u8 check_le) {
     /* See if four-byte insertions could produce the same result
        (LE only). */
 
-    for (j = 0; j < sizeof(interesting_32) / 4; j++)
+    for (j = 0; j < sizeof(interesting_32) / 4; ++j)
       if (new_val == (u32)interesting_32[j]) return 1;
 
   }
@@ -5600,7 +5600,7 @@ static u8 fuzz_one_original(char** argv) {
     }
 
     if (stop_soon || res != crash_mode) {
-      cur_skipped_paths++;
+      ++cur_skipped_paths;
       goto abandon_entry;
     }
 
@@ -5618,7 +5618,7 @@ static u8 fuzz_one_original(char** argv) {
       FATAL("Unable to execute target application");
 
     if (stop_soon) {
-      cur_skipped_paths++;
+      ++cur_skipped_paths;
       goto abandon_entry;
     }
 
@@ -5651,7 +5651,7 @@ static u8 fuzz_one_original(char** argv) {
 
     orig_hit_cnt = queued_paths + unique_crashes;
 
-    for (stage_cur = 0 ; stage_cur < stage_max ; stage_cur++) {
+    for (stage_cur = 0 ; stage_cur < stage_max ; ++stage_cur) {
       size_t orig_size = (size_t) len;
       size_t mutated_size = custom_mutator(out_buf, orig_size, mutated_buf, max_seed_size, UR(UINT32_MAX));
       if (mutated_size > 0) {
@@ -5724,7 +5724,7 @@ static u8 fuzz_one_original(char** argv) {
 
   prev_cksum = queue_cur->exec_cksum;
 
-  for (stage_cur = 0; stage_cur < stage_max; stage_cur++) {
+  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
 
     stage_cur_byte = stage_cur >> 3;
 
@@ -5771,7 +5771,7 @@ static u8 fuzz_one_original(char** argv) {
            final character and force output. */
 
         if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3];
-        a_len++;
+        ++a_len;
 
         if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA)
           maybe_add_auto(a_collect, a_len);
@@ -5795,7 +5795,7 @@ static u8 fuzz_one_original(char** argv) {
       if (cksum != queue_cur->exec_cksum) {
 
         if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3];        
-        a_len++;
+        ++a_len;
 
       }
 
@@ -5816,7 +5816,7 @@ static u8 fuzz_one_original(char** argv) {
 
   orig_hit_cnt = new_hit_cnt;
 
-  for (stage_cur = 0; stage_cur < stage_max; stage_cur++) {
+  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
 
     stage_cur_byte = stage_cur >> 3;
 
@@ -5843,7 +5843,7 @@ static u8 fuzz_one_original(char** argv) {
 
   orig_hit_cnt = new_hit_cnt;
 
-  for (stage_cur = 0; stage_cur < stage_max; stage_cur++) {
+  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
 
     stage_cur_byte = stage_cur >> 3;
 
@@ -5887,7 +5887,7 @@ static u8 fuzz_one_original(char** argv) {
 
   if (EFF_APOS(len - 1) != 0) {
     eff_map[EFF_APOS(len - 1)] = 1;
-    eff_cnt++;
+    ++eff_cnt;
   }
 
   /* Walking byte. */
@@ -5898,7 +5898,7 @@ static u8 fuzz_one_original(char** argv) {
 
   orig_hit_cnt = new_hit_cnt;
 
-  for (stage_cur = 0; stage_cur < stage_max; stage_cur++) {
+  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
 
     stage_cur_byte = stage_cur;
 
@@ -5925,7 +5925,7 @@ static u8 fuzz_one_original(char** argv) {
 
       if (cksum != queue_cur->exec_cksum) {
         eff_map[EFF_APOS(stage_cur)] = 1;
-        eff_cnt++;
+        ++eff_cnt;
       }
 
     }
@@ -5969,12 +5969,12 @@ static u8 fuzz_one_original(char** argv) {
 
   orig_hit_cnt = new_hit_cnt;
 
-  for (i = 0; i < len - 1; i++) {
+  for (i = 0; i < len - 1; ++i) {
 
     /* Let's consult the effector map... */
 
     if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
-      stage_max--;
+      --stage_max;
       continue;
     }
 
@@ -5983,7 +5983,7 @@ static u8 fuzz_one_original(char** argv) {
     *(u16*)(out_buf + i) ^= 0xFFFF;
 
     if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-    stage_cur++;
+    ++stage_cur;
 
     *(u16*)(out_buf + i) ^= 0xFFFF;
 
@@ -6006,12 +6006,12 @@ static u8 fuzz_one_original(char** argv) {
 
   orig_hit_cnt = new_hit_cnt;
 
-  for (i = 0; i < len - 3; i++) {
+  for (i = 0; i < len - 3; ++i) {
 
     /* Let's consult the effector map... */
     if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
         !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
-      stage_max--;
+      --stage_max;
       continue;
     }
 
@@ -6020,7 +6020,7 @@ static u8 fuzz_one_original(char** argv) {
     *(u32*)(out_buf + i) ^= 0xFFFFFFFF;
 
     if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-    stage_cur++;
+    ++stage_cur;
 
     *(u32*)(out_buf + i) ^= 0xFFFFFFFF;
 
@@ -6050,7 +6050,7 @@ skip_bitflip:
 
   orig_hit_cnt = new_hit_cnt;
 
-  for (i = 0; i < len; i++) {
+  for (i = 0; i < len; ++i) {
 
     u8 orig = out_buf[i];
 
@@ -6063,7 +6063,7 @@ skip_bitflip:
 
     stage_cur_byte = i;
 
-    for (j = 1; j <= ARITH_MAX; j++) {
+    for (j = 1; j <= ARITH_MAX; ++j) {
 
       u8 r = orig ^ (orig + j);
 
@@ -6076,9 +6076,9 @@ skip_bitflip:
         out_buf[i] = orig + j;
 
         if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        stage_cur++;
+        ++stage_cur;
 
-      } else stage_max--;
+      } else --stage_max;
 
       r =  orig ^ (orig - j);
 
@@ -6088,9 +6088,9 @@ skip_bitflip:
         out_buf[i] = orig - j;
 
         if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        stage_cur++;
+        ++stage_cur;
 
-      } else stage_max--;
+      } else --stage_max;
 
       out_buf[i] = orig;
 
@@ -6114,7 +6114,7 @@ skip_bitflip:
 
   orig_hit_cnt = new_hit_cnt;
 
-  for (i = 0; i < len - 1; i++) {
+  for (i = 0; i < len - 1; ++i) {
 
     u16 orig = *(u16*)(out_buf + i);
 
@@ -6127,7 +6127,7 @@ skip_bitflip:
 
     stage_cur_byte = i;
 
-    for (j = 1; j <= ARITH_MAX; j++) {
+    for (j = 1; j <= ARITH_MAX; ++j) {
 
       u16 r1 = orig ^ (orig + j),
           r2 = orig ^ (orig - j),
@@ -6147,9 +6147,9 @@ skip_bitflip:
         *(u16*)(out_buf + i) = orig + j;
 
         if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        stage_cur++;
+        ++stage_cur;
  
-      } else stage_max--;
+      } else --stage_max;
 
       if ((orig & 0xff) < j && !could_be_bitflip(r2)) {
 
@@ -6157,9 +6157,9 @@ skip_bitflip:
         *(u16*)(out_buf + i) = orig - j;
 
         if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        stage_cur++;
+        ++stage_cur;
 
-      } else stage_max--;
+      } else --stage_max;
 
       /* Big endian comes next. Same deal. */
 
@@ -6172,9 +6172,9 @@ skip_bitflip:
         *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) + j);
 
         if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        stage_cur++;
+        ++stage_cur;
 
-      } else stage_max--;
+      } else --stage_max;
 
       if ((orig >> 8) < j && !could_be_bitflip(r4)) {
 
@@ -6182,9 +6182,9 @@ skip_bitflip:
         *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) - j);
 
         if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        stage_cur++;
+        ++stage_cur;
 
-      } else stage_max--;
+      } else --stage_max;
 
       *(u16*)(out_buf + i) = orig;
 
@@ -6208,7 +6208,7 @@ skip_bitflip:
 
   orig_hit_cnt = new_hit_cnt;
 
-  for (i = 0; i < len - 3; i++) {
+  for (i = 0; i < len - 3; ++i) {
 
     u32 orig = *(u32*)(out_buf + i);
 
@@ -6222,7 +6222,7 @@ skip_bitflip:
 
     stage_cur_byte = i;
 
-    for (j = 1; j <= ARITH_MAX; j++) {
+    for (j = 1; j <= ARITH_MAX; ++j) {
 
       u32 r1 = orig ^ (orig + j),
           r2 = orig ^ (orig - j),
@@ -6240,9 +6240,9 @@ skip_bitflip:
         *(u32*)(out_buf + i) = orig + j;
 
         if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        stage_cur++;
+        ++stage_cur;
 
-      } else stage_max--;
+      } else --stage_max;
 
       if ((orig & 0xffff) < j && !could_be_bitflip(r2)) {
 
@@ -6250,9 +6250,9 @@ skip_bitflip:
         *(u32*)(out_buf + i) = orig - j;
 
         if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        stage_cur++;
+        ++stage_cur;
 
-      } else stage_max--;
+      } else --stage_max;
 
       /* Big endian next. */
 
@@ -6264,9 +6264,9 @@ skip_bitflip:
         *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) + j);
 
         if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        stage_cur++;
+        ++stage_cur;
 
-      } else stage_max--;
+      } else --stage_max;
 
       if ((SWAP32(orig) & 0xffff) < j && !could_be_bitflip(r4)) {
 
@@ -6274,9 +6274,9 @@ skip_bitflip:
         *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) - j);
 
         if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        stage_cur++;
+        ++stage_cur;
 
-      } else stage_max--;
+      } else --stage_max;
 
       *(u32*)(out_buf + i) = orig;
 
@@ -6306,7 +6306,7 @@ skip_arith:
 
   /* Setting 8-bit integers. */
 
-  for (i = 0; i < len; i++) {
+  for (i = 0; i < len; ++i) {
 
     u8 orig = out_buf[i];
 
@@ -6319,13 +6319,13 @@ skip_arith:
 
     stage_cur_byte = i;
 
-    for (j = 0; j < sizeof(interesting_8); j++) {
+    for (j = 0; j < sizeof(interesting_8); ++j) {
 
       /* Skip if the value could be a product of bitflips or arithmetics. */
 
       if (could_be_bitflip(orig ^ (u8)interesting_8[j]) ||
           could_be_arith(orig, (u8)interesting_8[j], 1)) {
-        stage_max--;
+        --stage_max;
         continue;
       }
 
@@ -6335,7 +6335,7 @@ skip_arith:
       if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
 
       out_buf[i] = orig;
-      stage_cur++;
+      ++stage_cur;
 
     }
 
@@ -6357,7 +6357,7 @@ skip_arith:
 
   orig_hit_cnt = new_hit_cnt;
 
-  for (i = 0; i < len - 1; i++) {
+  for (i = 0; i < len - 1; ++i) {
 
     u16 orig = *(u16*)(out_buf + i);
 
@@ -6370,7 +6370,7 @@ skip_arith:
 
     stage_cur_byte = i;
 
-    for (j = 0; j < sizeof(interesting_16) / 2; j++) {
+    for (j = 0; j < sizeof(interesting_16) / 2; ++j) {
 
       stage_cur_val = interesting_16[j];
 
@@ -6386,9 +6386,9 @@ skip_arith:
         *(u16*)(out_buf + i) = interesting_16[j];
 
         if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        stage_cur++;
+        ++stage_cur;
 
-      } else stage_max--;
+      } else --stage_max;
 
       if ((u16)interesting_16[j] != SWAP16(interesting_16[j]) &&
           !could_be_bitflip(orig ^ SWAP16(interesting_16[j])) &&
@@ -6399,9 +6399,9 @@ skip_arith:
 
         *(u16*)(out_buf + i) = SWAP16(interesting_16[j]);
         if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        stage_cur++;
+        ++stage_cur;
 
-      } else stage_max--;
+      } else --stage_max;
 
     }
 
@@ -6439,7 +6439,7 @@ skip_arith:
 
     stage_cur_byte = i;
 
-    for (j = 0; j < sizeof(interesting_32) / 4; j++) {
+    for (j = 0; j < sizeof(interesting_32) / 4; ++j) {
 
       stage_cur_val = interesting_32[j];
 
@@ -6455,9 +6455,9 @@ skip_arith:
         *(u32*)(out_buf + i) = interesting_32[j];
 
         if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        stage_cur++;
+        ++stage_cur;
 
-      } else stage_max--;
+      } else --stage_max;
 
       if ((u32)interesting_32[j] != SWAP32(interesting_32[j]) &&
           !could_be_bitflip(orig ^ SWAP32(interesting_32[j])) &&
@@ -6468,9 +6468,9 @@ skip_arith:
 
         *(u32*)(out_buf + i) = SWAP32(interesting_32[j]);
         if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        stage_cur++;
+        ++stage_cur;
 
-      } else stage_max--;
+      } else --stage_max;
 
     }
 
@@ -6502,7 +6502,7 @@ skip_interest:
 
   orig_hit_cnt = new_hit_cnt;
 
-  for (i = 0; i < len; i++) {
+  for (i = 0; i < len; ++i) {
 
     u32 last_len = 0;
 
@@ -6513,7 +6513,7 @@ skip_interest:
        between writes at a particular offset determined by the outer
        loop. */
 
-    for (j = 0; j < extras_cnt; j++) {
+    for (j = 0; j < extras_cnt; ++j) {
 
       /* Skip extras probabilistically if extras_cnt > MAX_DET_EXTRAS. Also
          skip them if there's no room to insert the payload, if the token
@@ -6525,7 +6525,7 @@ skip_interest:
           !memcmp(extras[j].data, out_buf + i, extras[j].len) ||
           !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, extras[j].len))) {
 
-        stage_max--;
+        --stage_max;
         continue;
 
       }
@@ -6535,7 +6535,7 @@ skip_interest:
 
       if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
 
-      stage_cur++;
+      ++stage_cur;
 
     }
 
@@ -6560,14 +6560,14 @@ skip_interest:
 
   ex_tmp = ck_alloc(len + MAX_DICT_FILE);
 
-  for (i = 0; i <= len; i++) {
+  for (i = 0; i <= len; ++i) {
 
     stage_cur_byte = i;
 
-    for (j = 0; j < extras_cnt; j++) {
+    for (j = 0; j < extras_cnt; ++j) {
 
       if (len + extras[j].len > MAX_FILE) {
-        stage_max--; 
+        --stage_max; 
         continue;
       }
 
@@ -6582,7 +6582,7 @@ skip_interest:
         goto abandon_entry;
       }
 
-      stage_cur++;
+      ++stage_cur;
 
     }
 
@@ -6611,13 +6611,13 @@ skip_user_extras:
 
   orig_hit_cnt = new_hit_cnt;
 
-  for (i = 0; i < len; i++) {
+  for (i = 0; i < len; ++i) {
 
     u32 last_len = 0;
 
     stage_cur_byte = i;
 
-    for (j = 0; j < MIN(a_extras_cnt, USE_AUTO_EXTRAS); j++) {
+    for (j = 0; j < MIN(a_extras_cnt, USE_AUTO_EXTRAS); ++j) {
 
       /* See the comment in the earlier code; extras are sorted by size. */
 
@@ -6625,7 +6625,7 @@ skip_user_extras:
           !memcmp(a_extras[j].data, out_buf + i, a_extras[j].len) ||
           !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, a_extras[j].len))) {
 
-        stage_max--;
+        --stage_max;
         continue;
 
       }
@@ -6635,7 +6635,7 @@ skip_user_extras:
 
       if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
 
-      stage_cur++;
+      ++stage_cur;
 
     }
 
@@ -6676,7 +6676,7 @@ python_stage:
   char* retbuf = NULL;
   size_t retlen = 0;
 
-  for (stage_cur = 0; stage_cur < stage_max; stage_cur++) {
+  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
     struct queue_entry* target;
     u32 tid;
     u8* new_buf;
@@ -6694,7 +6694,7 @@ retry_external_pick:
 
     while (target && (target->len < 2 || target == queue_cur) && queued_paths > 1) {
       target = target->next;
-      splicing_with++;
+      ++splicing_with;
     }
 
     if (!target) goto retry_external_pick;
@@ -6792,13 +6792,13 @@ havoc_stage:
   /* We essentially just do several thousand runs (depending on perf_score)
      where we take the input file and make random stacked tweaks. */
 
-  for (stage_cur = 0; stage_cur < stage_max; stage_cur++) {
+  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
 
     u32 use_stacking = 1 << (1 + UR(HAVOC_STACK_POW2));
 
     stage_cur_val = use_stacking;
  
-    for (i = 0; i < use_stacking; i++) {
+    for (i = 0; i < use_stacking; ++i) {
 
       switch (UR(15 + ((extras_cnt + a_extras_cnt) ? 2 : 0))) {
 
@@ -7252,7 +7252,7 @@ retry_splicing:
 
     while (target && (target->len < 2 || target == queue_cur)) {
       target = target->next;
-      splicing_with++;
+      ++splicing_with;
     }
 
     if (!target) goto retry_splicing;
@@ -7314,12 +7314,12 @@ abandon_entry:
      cycle and have not seen this entry before. */
 
   if (!stop_soon && !queue_cur->cal_failed && (queue_cur->was_fuzzed == 0 || queue_cur->fuzz_level == 0)) {
-    pending_not_fuzzed--;
+    --pending_not_fuzzed;
     queue_cur->was_fuzzed = 1;
-    if (queue_cur->favored) pending_favored--;
+    if (queue_cur->favored) --pending_favored;
   }
 
-  queue_cur->fuzz_level++;
+  ++queue_cur->fuzz_level;
 
   munmap(orig_in, queue_cur->len);
 
@@ -7434,7 +7434,7 @@ static u8 pilot_fuzzing(char** argv) {
 		}
 
 		if (stop_soon || res != crash_mode) {
-			cur_skipped_paths++;
+			++cur_skipped_paths;
 			goto abandon_entry;
 		}
 
@@ -7452,7 +7452,7 @@ static u8 pilot_fuzzing(char** argv) {
 			FATAL("Unable to execute target application");
 
 		if (stop_soon) {
-			cur_skipped_paths++;
+			++cur_skipped_paths;
 			goto abandon_entry;
 		}
 
@@ -7521,7 +7521,7 @@ static u8 pilot_fuzzing(char** argv) {
 
 		prev_cksum = queue_cur->exec_cksum;
 
-		for (stage_cur = 0; stage_cur < stage_max; stage_cur++) {
+		for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
 
 			stage_cur_byte = stage_cur >> 3;
 
@@ -7568,7 +7568,7 @@ static u8 pilot_fuzzing(char** argv) {
 					   final character and force output. */
 
 					if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3];
-					a_len++;
+					++a_len;
 
 					if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA)
 						maybe_add_auto(a_collect, a_len);
@@ -7593,7 +7593,7 @@ static u8 pilot_fuzzing(char** argv) {
 				if (cksum != queue_cur->exec_cksum) {
 
 					if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3];
-					a_len++;
+					++a_len;
 
 				}
 
@@ -7614,7 +7614,7 @@ static u8 pilot_fuzzing(char** argv) {
 
 		orig_hit_cnt = new_hit_cnt;
 
-		for (stage_cur = 0; stage_cur < stage_max; stage_cur++) {
+		for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
 
 			stage_cur_byte = stage_cur >> 3;
 
@@ -7647,7 +7647,7 @@ static u8 pilot_fuzzing(char** argv) {
 
 		orig_hit_cnt = new_hit_cnt;
 
-		for (stage_cur = 0; stage_cur < stage_max; stage_cur++) {
+		for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
 
 			stage_cur_byte = stage_cur >> 3;
 
@@ -7694,7 +7694,7 @@ static u8 pilot_fuzzing(char** argv) {
 
 		if (EFF_APOS(len - 1) != 0) {
 			eff_map[EFF_APOS(len - 1)] = 1;
-			eff_cnt++;
+			++eff_cnt;
 		}
 
 		/* Walking byte. */
@@ -7707,7 +7707,7 @@ static u8 pilot_fuzzing(char** argv) {
 
 		orig_hit_cnt = new_hit_cnt;
 
-		for (stage_cur = 0; stage_cur < stage_max; stage_cur++) {
+		for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
 
 			stage_cur_byte = stage_cur;
 
@@ -7734,7 +7734,7 @@ static u8 pilot_fuzzing(char** argv) {
 
 				if (cksum != queue_cur->exec_cksum) {
 					eff_map[EFF_APOS(stage_cur)] = 1;
-					eff_cnt++;
+					++eff_cnt;
 				}
 
 			}
@@ -7785,12 +7785,12 @@ static u8 pilot_fuzzing(char** argv) {
 
 		orig_hit_cnt = new_hit_cnt;
 
-		for (i = 0; i < len - 1; i++) {
+		for (i = 0; i < len - 1; ++i) {
 
 			/* Let's consult the effector map... */
 
 			if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
-				stage_max--;
+				--stage_max;
 				continue;
 			}
 
@@ -7799,7 +7799,7 @@ static u8 pilot_fuzzing(char** argv) {
 			*(u16*)(out_buf + i) ^= 0xFFFF;
 
 			if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-			stage_cur++;
+			++stage_cur;
 
 			*(u16*)(out_buf + i) ^= 0xFFFF;
 
@@ -7827,12 +7827,12 @@ static u8 pilot_fuzzing(char** argv) {
 
 		orig_hit_cnt = new_hit_cnt;
 
-		for (i = 0; i < len - 3; i++) {
+		for (i = 0; i < len - 3; ++i) {
 
 			/* Let's consult the effector map... */
 			if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
 				!eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
-				stage_max--;
+				--stage_max;
 				continue;
 			}
 
@@ -7841,7 +7841,7 @@ static u8 pilot_fuzzing(char** argv) {
 			*(u32*)(out_buf + i) ^= 0xFFFFFFFF;
 
 			if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-			stage_cur++;
+			++stage_cur;
 
 			*(u32*)(out_buf + i) ^= 0xFFFFFFFF;
 
@@ -7879,7 +7879,7 @@ static u8 pilot_fuzzing(char** argv) {
 
 		orig_hit_cnt = new_hit_cnt;
 
-		for (i = 0; i < len; i++) {
+		for (i = 0; i < len; ++i) {
 
 			u8 orig = out_buf[i];
 
@@ -7892,7 +7892,7 @@ static u8 pilot_fuzzing(char** argv) {
 
 			stage_cur_byte = i;
 
-			for (j = 1; j <= ARITH_MAX; j++) {
+			for (j = 1; j <= ARITH_MAX; ++j) {
 
 				u8 r = orig ^ (orig + j);
 
@@ -7905,9 +7905,9 @@ static u8 pilot_fuzzing(char** argv) {
 					out_buf[i] = orig + j;
 
 					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					stage_cur++;
+					++stage_cur;
 
-				} else stage_max--;
+				} else --stage_max;
 
 				r = orig ^ (orig - j);
 
@@ -7917,9 +7917,9 @@ static u8 pilot_fuzzing(char** argv) {
 					out_buf[i] = orig - j;
 
 					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					stage_cur++;
+					++stage_cur;
 
-				} else stage_max--;
+				} else --stage_max;
 
 				out_buf[i] = orig;
 
@@ -7950,7 +7950,7 @@ static u8 pilot_fuzzing(char** argv) {
 
 		orig_hit_cnt = new_hit_cnt;
 
-		for (i = 0; i < len - 1; i++) {
+		for (i = 0; i < len - 1; ++i) {
 
 			u16 orig = *(u16*)(out_buf + i);
 
@@ -7963,7 +7963,7 @@ static u8 pilot_fuzzing(char** argv) {
 
 			stage_cur_byte = i;
 
-			for (j = 1; j <= ARITH_MAX; j++) {
+			for (j = 1; j <= ARITH_MAX; ++j) {
 
 				u16 r1 = orig ^ (orig + j),
 					r2 = orig ^ (orig - j),
@@ -7983,9 +7983,9 @@ static u8 pilot_fuzzing(char** argv) {
 					*(u16*)(out_buf + i) = orig + j;
 
 					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					stage_cur++;
+					++stage_cur;
 
-				} else stage_max--;
+				} else --stage_max;
 
 				if ((orig & 0xff) < j && !could_be_bitflip(r2)) {
 
@@ -7993,9 +7993,9 @@ static u8 pilot_fuzzing(char** argv) {
 					*(u16*)(out_buf + i) = orig - j;
 
 					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					stage_cur++;
+					++stage_cur;
 
-				} else stage_max--;
+				} else --stage_max;
 
 				/* Big endian comes next. Same deal. */
 
@@ -8008,9 +8008,9 @@ static u8 pilot_fuzzing(char** argv) {
 					*(u16*)(out_buf + i) = SWAP16(SWAP16(orig) + j);
 
 					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					stage_cur++;
+					++stage_cur;
 
-				} else stage_max--;
+				} else --stage_max;
 
 				if ((orig >> 8) < j && !could_be_bitflip(r4)) {
 
@@ -8018,9 +8018,9 @@ static u8 pilot_fuzzing(char** argv) {
 					*(u16*)(out_buf + i) = SWAP16(SWAP16(orig) - j);
 
 					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					stage_cur++;
+					++stage_cur;
 
-				} else stage_max--;
+				} else --stage_max;
 
 				*(u16*)(out_buf + i) = orig;
 
@@ -8049,7 +8049,7 @@ static u8 pilot_fuzzing(char** argv) {
 
 		orig_hit_cnt = new_hit_cnt;
 
-		for (i = 0; i < len - 3; i++) {
+		for (i = 0; i < len - 3; ++i) {
 
 			u32 orig = *(u32*)(out_buf + i);
 
@@ -8063,7 +8063,7 @@ static u8 pilot_fuzzing(char** argv) {
 
 			stage_cur_byte = i;
 
-			for (j = 1; j <= ARITH_MAX; j++) {
+			for (j = 1; j <= ARITH_MAX; ++j) {
 
 				u32 r1 = orig ^ (orig + j),
 					r2 = orig ^ (orig - j),
@@ -8081,9 +8081,9 @@ static u8 pilot_fuzzing(char** argv) {
 					*(u32*)(out_buf + i) = orig + j;
 
 					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					stage_cur++;
+					++stage_cur;
 
-				} else stage_max--;
+				} else --stage_max;
 
 				if ((orig & 0xffff) < j && !could_be_bitflip(r2)) {
 
@@ -8093,7 +8093,7 @@ static u8 pilot_fuzzing(char** argv) {
 					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
 					stage_cur++;
 
-				} else stage_max--;
+				} else --stage_max;
 
 				/* Big endian next. */
 
@@ -8105,9 +8105,9 @@ static u8 pilot_fuzzing(char** argv) {
 					*(u32*)(out_buf + i) = SWAP32(SWAP32(orig) + j);
 
 					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					stage_cur++;
+					++stage_cur;
 
-				} else stage_max--;
+				} else --stage_max;
 
 				if ((SWAP32(orig) & 0xffff) < j && !could_be_bitflip(r4)) {
 
@@ -8115,9 +8115,9 @@ static u8 pilot_fuzzing(char** argv) {
 					*(u32*)(out_buf + i) = SWAP32(SWAP32(orig) - j);
 
 					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					stage_cur++;
+					++stage_cur;
 
-				} else stage_max--;
+				} else --stage_max;
 
 				*(u32*)(out_buf + i) = orig;
 
@@ -8152,7 +8152,7 @@ static u8 pilot_fuzzing(char** argv) {
 
 		/* Setting 8-bit integers. */
 
-		for (i = 0; i < len; i++) {
+		for (i = 0; i < len; ++i) {
 
 			u8 orig = out_buf[i];
 
@@ -8165,13 +8165,13 @@ static u8 pilot_fuzzing(char** argv) {
 
 			stage_cur_byte = i;
 
-			for (j = 0; j < sizeof(interesting_8); j++) {
+			for (j = 0; j < sizeof(interesting_8); ++j) {
 
 				/* Skip if the value could be a product of bitflips or arithmetics. */
 
 				if (could_be_bitflip(orig ^ (u8)interesting_8[j]) ||
 					could_be_arith(orig, (u8)interesting_8[j], 1)) {
-					stage_max--;
+					--stage_max;
 					continue;
 				}
 
@@ -8181,7 +8181,7 @@ static u8 pilot_fuzzing(char** argv) {
 				if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
 
 				out_buf[i] = orig;
-				stage_cur++;
+				++stage_cur;
 
 			}
 
@@ -8208,7 +8208,7 @@ static u8 pilot_fuzzing(char** argv) {
 
 		orig_hit_cnt = new_hit_cnt;
 
-		for (i = 0; i < len - 1; i++) {
+		for (i = 0; i < len - 1; ++i) {
 
 			u16 orig = *(u16*)(out_buf + i);
 
@@ -8221,7 +8221,7 @@ static u8 pilot_fuzzing(char** argv) {
 
 			stage_cur_byte = i;
 
-			for (j = 0; j < sizeof(interesting_16) / 2; j++) {
+			for (j = 0; j < sizeof(interesting_16) / 2; ++j) {
 
 				stage_cur_val = interesting_16[j];
 
@@ -8237,9 +8237,9 @@ static u8 pilot_fuzzing(char** argv) {
 					*(u16*)(out_buf + i) = interesting_16[j];
 
 					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					stage_cur++;
+					++stage_cur;
 
-				} else stage_max--;
+				} else --stage_max;
 
 				if ((u16)interesting_16[j] != SWAP16(interesting_16[j]) &&
 					!could_be_bitflip(orig ^ SWAP16(interesting_16[j])) &&
@@ -8250,9 +8250,9 @@ static u8 pilot_fuzzing(char** argv) {
 
 					*(u16*)(out_buf + i) = SWAP16(interesting_16[j]);
 					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					stage_cur++;
+					++stage_cur;
 
-				} else stage_max--;
+				} else --stage_max;
 
 			}
 
@@ -8281,7 +8281,7 @@ static u8 pilot_fuzzing(char** argv) {
 
 		orig_hit_cnt = new_hit_cnt;
 
-		for (i = 0; i < len - 3; i++) {
+		for (i = 0; i < len - 3; ++i) {
 
 			u32 orig = *(u32*)(out_buf + i);
 
@@ -8295,7 +8295,7 @@ static u8 pilot_fuzzing(char** argv) {
 
 			stage_cur_byte = i;
 
-			for (j = 0; j < sizeof(interesting_32) / 4; j++) {
+			for (j = 0; j < sizeof(interesting_32) / 4; ++j) {
 
 				stage_cur_val = interesting_32[j];
 
@@ -8311,9 +8311,9 @@ static u8 pilot_fuzzing(char** argv) {
 					*(u32*)(out_buf + i) = interesting_32[j];
 
 					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					stage_cur++;
+					++stage_cur;
 
-				} else stage_max--;
+				} else --stage_max;
 
 				if ((u32)interesting_32[j] != SWAP32(interesting_32[j]) &&
 					!could_be_bitflip(orig ^ SWAP32(interesting_32[j])) &&
@@ -8324,9 +8324,9 @@ static u8 pilot_fuzzing(char** argv) {
 
 					*(u32*)(out_buf + i) = SWAP32(interesting_32[j]);
 					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					stage_cur++;
+					++stage_cur;
 
-				} else stage_max--;
+				} else --stage_max;
 
 			}
 
@@ -8365,7 +8365,7 @@ static u8 pilot_fuzzing(char** argv) {
 
 		orig_hit_cnt = new_hit_cnt;
 
-		for (i = 0; i < len; i++) {
+		for (i = 0; i < len; ++i) {
 
 			u32 last_len = 0;
 
@@ -8376,7 +8376,7 @@ static u8 pilot_fuzzing(char** argv) {
 			   between writes at a particular offset determined by the outer
 			   loop. */
 
-			for (j = 0; j < extras_cnt; j++) {
+			for (j = 0; j < extras_cnt; ++j) {
 
 				/* Skip extras probabilistically if extras_cnt > MAX_DET_EXTRAS. Also
 				   skip them if there's no room to insert the payload, if the token
@@ -8388,7 +8388,7 @@ static u8 pilot_fuzzing(char** argv) {
 					!memcmp(extras[j].data, out_buf + i, extras[j].len) ||
 					!memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, extras[j].len))) {
 
-					stage_max--;
+					--stage_max;
 					continue;
 
 				}
@@ -8398,7 +8398,7 @@ static u8 pilot_fuzzing(char** argv) {
 
 				if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
 
-				stage_cur++;
+				++stage_cur;
 
 			}
 
@@ -8426,14 +8426,14 @@ static u8 pilot_fuzzing(char** argv) {
 
 		ex_tmp = ck_alloc(len + MAX_DICT_FILE);
 
-		for (i = 0; i <= len; i++) {
+		for (i = 0; i <= len; ++i) {
 
 			stage_cur_byte = i;
 
-			for (j = 0; j < extras_cnt; j++) {
+			for (j = 0; j < extras_cnt; ++j) {
 
 				if (len + extras[j].len > MAX_FILE) {
-					stage_max--;
+					--stage_max;
 					continue;
 				}
 
@@ -8448,7 +8448,7 @@ static u8 pilot_fuzzing(char** argv) {
 					goto abandon_entry;
 				}
 
-				stage_cur++;
+				++stage_cur;
 
 			}
 
@@ -8478,13 +8478,13 @@ static u8 pilot_fuzzing(char** argv) {
 
 		orig_hit_cnt = new_hit_cnt;
 
-		for (i = 0; i < len; i++) {
+		for (i = 0; i < len; ++i) {
 
 			u32 last_len = 0;
 
 			stage_cur_byte = i;
 
-			for (j = 0; j < MIN(a_extras_cnt, USE_AUTO_EXTRAS); j++) {
+			for (j = 0; j < MIN(a_extras_cnt, USE_AUTO_EXTRAS); ++j) {
 
 				/* See the comment in the earlier code; extras are sorted by size. */
 
@@ -8492,7 +8492,7 @@ static u8 pilot_fuzzing(char** argv) {
 					!memcmp(a_extras[j].data, out_buf + i, a_extras[j].len) ||
 					!memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, a_extras[j].len))) {
 
-					stage_max--;
+					--stage_max;
 					continue;
 
 				}
@@ -8502,7 +8502,7 @@ static u8 pilot_fuzzing(char** argv) {
 
 				if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
 
-				stage_cur++;
+				++stage_cur;
 
 			}
 
@@ -8612,20 +8612,20 @@ static u8 pilot_fuzzing(char** argv) {
 
 
 
-				for (stage_cur = 0; stage_cur < stage_max; stage_cur++) {
+				for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
 
 					u32 use_stacking = 1 << (1 + UR(HAVOC_STACK_POW2));
 
 					stage_cur_val = use_stacking;
 
 
-					for (i = 0; i < operator_num; i++)
+					for (i = 0; i < operator_num; ++i)
 					{
 						stage_cycles_puppet_v3[swarm_now][i] = stage_cycles_puppet_v2[swarm_now][i];
 					}
 
 
-					for (i = 0; i < use_stacking; i++) {
+					for (i = 0; i < use_stacking; ++i) {
 
 						switch (select_algorithm()) {
 
@@ -8933,7 +8933,7 @@ static u8 pilot_fuzzing(char** argv) {
 					{
 						u64 temp_temp_puppet = queued_paths + unique_crashes - temp_total_found;
 						total_puppet_find = total_puppet_find + temp_temp_puppet;
-						for (i = 0; i < 16; i++)
+						for (i = 0; i < 16; ++i)
 						{
 							if (stage_cycles_puppet_v2[swarm_now][i] > stage_cycles_puppet_v3[swarm_now][i])
 								stage_finds_puppet_v2[swarm_now][i] += temp_temp_puppet;
@@ -8991,7 +8991,7 @@ static u8 pilot_fuzzing(char** argv) {
 
 					while (target && (target->len < 2 || target == queue_cur)) {
 						target = target->next;
-						splicing_with++;
+						++splicing_with;
 					}
 
 					if (!target) goto retry_splicing_puppet;
@@ -9053,8 +9053,8 @@ static u8 pilot_fuzzing(char** argv) {
 
 				   // if (!stop_soon && !queue_cur->cal_failed && !queue_cur->was_fuzzed) {
 				   //   queue_cur->was_fuzzed = 1;
-				   //   pending_not_fuzzed--;
-				   //   if (queue_cur->favored) pending_favored--;
+				   //   --pending_not_fuzzed;
+				   //   if (queue_cur->favored) --pending_favored;
 				   // }
 
 				munmap(orig_in, queue_cur->len);
@@ -9083,7 +9083,7 @@ static u8 pilot_fuzzing(char** argv) {
 					temp_puppet_find = total_puppet_find;
 
 					u64 temp_stage_finds_puppet = 0;
-					for (i = 0; i < operator_num; i++) {
+					for (i = 0; i < operator_num; ++i) {
 						double temp_eff = 0.0;
 
 						if (stage_cycles_puppet_v2[swarm_now][i] > stage_cycles_puppet[swarm_now][i])
@@ -9103,7 +9103,7 @@ static u8 pilot_fuzzing(char** argv) {
 					swarm_now = swarm_now + 1;
 						if (swarm_now == swarm_num) {
 							key_module = 1;
-							for (i = 0; i < operator_num; i++) {
+							for (i = 0; i < operator_num; ++i) {
 								core_operator_cycles_puppet_v2[i] = core_operator_cycles_puppet[i];
 								core_operator_cycles_puppet_v3[i] = core_operator_cycles_puppet[i];
 								core_operator_finds_puppet_v2[i] = core_operator_finds_puppet[i];
@@ -9111,7 +9111,7 @@ static u8 pilot_fuzzing(char** argv) {
 
 							double swarm_eff = 0.0;
 							swarm_now = 0;
-							for (i = 0; i < swarm_num; i++)	{
+							for (i = 0; i < swarm_num; ++i)	{
 								if (swarm_fitness[i] > swarm_eff) {
 									swarm_eff = swarm_fitness[i];
 									swarm_now = i;
@@ -9237,7 +9237,7 @@ static u8 core_fuzzing(char** argv) {
 			}
 
 			if (stop_soon || res != crash_mode) {
-				cur_skipped_paths++;
+				++cur_skipped_paths;
 				goto abandon_entry;
 			}
 
@@ -9255,7 +9255,7 @@ static u8 core_fuzzing(char** argv) {
 				FATAL("Unable to execute target application");
 
 			if (stop_soon) {
-				cur_skipped_paths++;
+				++cur_skipped_paths;
 				goto abandon_entry;
 			}
 
@@ -9321,7 +9321,7 @@ static u8 core_fuzzing(char** argv) {
 
 		prev_cksum = queue_cur->exec_cksum;
 
-		for (stage_cur = 0; stage_cur < stage_max; stage_cur++) {
+		for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
 
 			stage_cur_byte = stage_cur >> 3;
 
@@ -9368,7 +9368,7 @@ static u8 core_fuzzing(char** argv) {
 					   final character and force output. */
 
 					if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3];
-					a_len++;
+					++a_len;
 
 					if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA)
 						maybe_add_auto(a_collect, a_len);
@@ -9393,7 +9393,7 @@ static u8 core_fuzzing(char** argv) {
 				if (cksum != queue_cur->exec_cksum) {
 
 					if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3];
-					a_len++;
+					++a_len;
 
 				}
 
@@ -9416,7 +9416,7 @@ static u8 core_fuzzing(char** argv) {
 
 		orig_hit_cnt = new_hit_cnt;
 
-		for (stage_cur = 0; stage_cur < stage_max; stage_cur++) {
+		for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
 
 			stage_cur_byte = stage_cur >> 3;
 
@@ -9445,7 +9445,7 @@ static u8 core_fuzzing(char** argv) {
 
 		orig_hit_cnt = new_hit_cnt;
 
-		for (stage_cur = 0; stage_cur < stage_max; stage_cur++) {
+		for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
 
 			stage_cur_byte = stage_cur >> 3;
 
@@ -9490,7 +9490,7 @@ static u8 core_fuzzing(char** argv) {
 
 		if (EFF_APOS(len - 1) != 0) {
 			eff_map[EFF_APOS(len - 1)] = 1;
-			eff_cnt++;
+			++eff_cnt;
 		}
 
 		/* Walking byte. */
@@ -9502,7 +9502,7 @@ static u8 core_fuzzing(char** argv) {
 
 		orig_hit_cnt = new_hit_cnt;
 
-		for (stage_cur = 0; stage_cur < stage_max; stage_cur++) {
+		for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
 
 			stage_cur_byte = stage_cur;
 
@@ -9529,7 +9529,7 @@ static u8 core_fuzzing(char** argv) {
 
 				if (cksum != queue_cur->exec_cksum) {
 					eff_map[EFF_APOS(stage_cur)] = 1;
-					eff_cnt++;
+					++eff_cnt;
 				}
 
 			}
@@ -9577,12 +9577,12 @@ static u8 core_fuzzing(char** argv) {
 
 		orig_hit_cnt = new_hit_cnt;
 
-		for (i = 0; i < len - 1; i++) {
+		for (i = 0; i < len - 1; ++i) {
 
 			/* Let's consult the effector map... */
 
 			if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
-				stage_max--;
+				--stage_max;
 				continue;
 			}
 
@@ -9591,7 +9591,7 @@ static u8 core_fuzzing(char** argv) {
 			*(u16*)(out_buf + i) ^= 0xFFFF;
 
 			if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-			stage_cur++;
+			++stage_cur;
 
 			*(u16*)(out_buf + i) ^= 0xFFFF;
 
@@ -9617,12 +9617,12 @@ static u8 core_fuzzing(char** argv) {
 
 		orig_hit_cnt = new_hit_cnt;
 
-		for (i = 0; i < len - 3; i++) {
+		for (i = 0; i < len - 3; ++i) {
 
 			/* Let's consult the effector map... */
 			if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
 				!eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
-				stage_max--;
+				--stage_max;
 				continue;
 			}
 
@@ -9631,7 +9631,7 @@ static u8 core_fuzzing(char** argv) {
 			*(u32*)(out_buf + i) ^= 0xFFFFFFFF;
 
 			if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-			stage_cur++;
+			++stage_cur;
 
 			*(u32*)(out_buf + i) ^= 0xFFFFFFFF;
 
@@ -9665,7 +9665,7 @@ static u8 core_fuzzing(char** argv) {
 
 		orig_hit_cnt = new_hit_cnt;
 
-		for (i = 0; i < len; i++) {
+		for (i = 0; i < len; ++i) {
 
 			u8 orig = out_buf[i];
 
@@ -9678,7 +9678,7 @@ static u8 core_fuzzing(char** argv) {
 
 			stage_cur_byte = i;
 
-			for (j = 1; j <= ARITH_MAX; j++) {
+			for (j = 1; j <= ARITH_MAX; ++j) {
 
 				u8 r = orig ^ (orig + j);
 
@@ -9691,9 +9691,9 @@ static u8 core_fuzzing(char** argv) {
 					out_buf[i] = orig + j;
 
 					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					stage_cur++;
+					++stage_cur;
 
-				} else stage_max--;
+				} else --stage_max;
 
 				r = orig ^ (orig - j);
 
@@ -9703,9 +9703,9 @@ static u8 core_fuzzing(char** argv) {
 					out_buf[i] = orig - j;
 
 					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					stage_cur++;
+					++stage_cur;
 
-				} else stage_max--;
+				} else --stage_max;
 
 				out_buf[i] = orig;
 
@@ -9733,7 +9733,7 @@ static u8 core_fuzzing(char** argv) {
 
 		orig_hit_cnt = new_hit_cnt;
 
-		for (i = 0; i < len - 1; i++) {
+		for (i = 0; i < len - 1; ++i) {
 
 			u16 orig = *(u16*)(out_buf + i);
 
@@ -9746,7 +9746,7 @@ static u8 core_fuzzing(char** argv) {
 
 			stage_cur_byte = i;
 
-			for (j = 1; j <= ARITH_MAX; j++) {
+			for (j = 1; j <= ARITH_MAX; ++j) {
 
 				u16 r1 = orig ^ (orig + j),
 					r2 = orig ^ (orig - j),
@@ -9766,9 +9766,9 @@ static u8 core_fuzzing(char** argv) {
 					*(u16*)(out_buf + i) = orig + j;
 
 					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					stage_cur++;
+					++stage_cur;
 
-				} else stage_max--;
+				} else --stage_max;
 
 				if ((orig & 0xff) < j && !could_be_bitflip(r2)) {
 
@@ -9776,9 +9776,9 @@ static u8 core_fuzzing(char** argv) {
 					*(u16*)(out_buf + i) = orig - j;
 
 					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					stage_cur++;
+					++stage_cur;
 
-				} else stage_max--;
+				} else --stage_max;
 
 				/* Big endian comes next. Same deal. */
 
@@ -9791,9 +9791,9 @@ static u8 core_fuzzing(char** argv) {
 					*(u16*)(out_buf + i) = SWAP16(SWAP16(orig) + j);
 
 					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					stage_cur++;
+					++stage_cur;
 
-				} else stage_max--;
+				} else --stage_max;
 
 				if ((orig >> 8) < j && !could_be_bitflip(r4)) {
 
@@ -9801,9 +9801,9 @@ static u8 core_fuzzing(char** argv) {
 					*(u16*)(out_buf + i) = SWAP16(SWAP16(orig) - j);
 
 					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					stage_cur++;
+					++stage_cur;
 
-				} else stage_max--;
+				} else --stage_max;
 
 				*(u16*)(out_buf + i) = orig;
 
@@ -9829,7 +9829,7 @@ static u8 core_fuzzing(char** argv) {
 
 		orig_hit_cnt = new_hit_cnt;
 
-		for (i = 0; i < len - 3; i++) {
+		for (i = 0; i < len - 3; ++i) {
 
 			u32 orig = *(u32*)(out_buf + i);
 
@@ -9843,7 +9843,7 @@ static u8 core_fuzzing(char** argv) {
 
 			stage_cur_byte = i;
 
-			for (j = 1; j <= ARITH_MAX; j++) {
+			for (j = 1; j <= ARITH_MAX; ++j) {
 
 				u32 r1 = orig ^ (orig + j),
 					r2 = orig ^ (orig - j),
@@ -9861,9 +9861,9 @@ static u8 core_fuzzing(char** argv) {
 					*(u32*)(out_buf + i) = orig + j;
 
 					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					stage_cur++;
+					++stage_cur;
 
-				} else stage_max--;
+				} else --stage_max;
 
 				if ((orig & 0xffff) < j && !could_be_bitflip(r2)) {
 
@@ -9871,9 +9871,9 @@ static u8 core_fuzzing(char** argv) {
 					*(u32*)(out_buf + i) = orig - j;
 
 					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					stage_cur++;
+					++stage_cur;
 
-				} else stage_max--;
+				} else --stage_max;
 
 				/* Big endian next. */
 
@@ -9885,9 +9885,9 @@ static u8 core_fuzzing(char** argv) {
 					*(u32*)(out_buf + i) = SWAP32(SWAP32(orig) + j);
 
 					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					stage_cur++;
+					++stage_cur;
 
-				} else stage_max--;
+				} else --stage_max;
 
 				if ((SWAP32(orig) & 0xffff) < j && !could_be_bitflip(r4)) {
 
@@ -9895,9 +9895,9 @@ static u8 core_fuzzing(char** argv) {
 					*(u32*)(out_buf + i) = SWAP32(SWAP32(orig) - j);
 
 					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					stage_cur++;
+					++stage_cur;
 
-				} else stage_max--;
+				} else --stage_max;
 
 				*(u32*)(out_buf + i) = orig;
 
@@ -9931,7 +9931,7 @@ static u8 core_fuzzing(char** argv) {
 
 		/* Setting 8-bit integers. */
 
-		for (i = 0; i < len; i++) {
+		for (i = 0; i < len; ++i) {
 
 			u8 orig = out_buf[i];
 
@@ -9944,13 +9944,13 @@ static u8 core_fuzzing(char** argv) {
 
 			stage_cur_byte = i;
 
-			for (j = 0; j < sizeof(interesting_8); j++) {
+			for (j = 0; j < sizeof(interesting_8); ++j) {
 
 				/* Skip if the value could be a product of bitflips or arithmetics. */
 
 				if (could_be_bitflip(orig ^ (u8)interesting_8[j]) ||
 					could_be_arith(orig, (u8)interesting_8[j], 1)) {
-					stage_max--;
+					--stage_max;
 					continue;
 				}
 
@@ -9960,7 +9960,7 @@ static u8 core_fuzzing(char** argv) {
 				if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
 
 				out_buf[i] = orig;
-				stage_cur++;
+				++stage_cur;
 
 			}
 
@@ -9985,7 +9985,7 @@ static u8 core_fuzzing(char** argv) {
 
 		orig_hit_cnt = new_hit_cnt;
 
-		for (i = 0; i < len - 1; i++) {
+		for (i = 0; i < len - 1; ++i) {
 
 			u16 orig = *(u16*)(out_buf + i);
 
@@ -9998,7 +9998,7 @@ static u8 core_fuzzing(char** argv) {
 
 			stage_cur_byte = i;
 
-			for (j = 0; j < sizeof(interesting_16) / 2; j++) {
+			for (j = 0; j < sizeof(interesting_16) / 2; ++j) {
 
 				stage_cur_val = interesting_16[j];
 
@@ -10014,9 +10014,9 @@ static u8 core_fuzzing(char** argv) {
 					*(u16*)(out_buf + i) = interesting_16[j];
 
 					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					stage_cur++;
+					++stage_cur;
 
-				} else stage_max--;
+				} else --stage_max;
 
 				if ((u16)interesting_16[j] != SWAP16(interesting_16[j]) &&
 					!could_be_bitflip(orig ^ SWAP16(interesting_16[j])) &&
@@ -10027,9 +10027,9 @@ static u8 core_fuzzing(char** argv) {
 
 					*(u16*)(out_buf + i) = SWAP16(interesting_16[j]);
 					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					stage_cur++;
+					++stage_cur;
 
-				} else stage_max--;
+				} else --stage_max;
 
 			}
 
@@ -10057,7 +10057,7 @@ static u8 core_fuzzing(char** argv) {
 
 		orig_hit_cnt = new_hit_cnt;
 
-		for (i = 0; i < len - 3; i++) {
+		for (i = 0; i < len - 3; ++i) {
 
 			u32 orig = *(u32*)(out_buf + i);
 
@@ -10071,7 +10071,7 @@ static u8 core_fuzzing(char** argv) {
 
 			stage_cur_byte = i;
 
-			for (j = 0; j < sizeof(interesting_32) / 4; j++) {
+			for (j = 0; j < sizeof(interesting_32) / 4; ++j) {
 
 				stage_cur_val = interesting_32[j];
 
@@ -10087,9 +10087,9 @@ static u8 core_fuzzing(char** argv) {
 					*(u32*)(out_buf + i) = interesting_32[j];
 
 					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					stage_cur++;
+					++stage_cur;
 
-				} else stage_max--;
+				} else --stage_max;
 
 				if ((u32)interesting_32[j] != SWAP32(interesting_32[j]) &&
 					!could_be_bitflip(orig ^ SWAP32(interesting_32[j])) &&
@@ -10100,9 +10100,9 @@ static u8 core_fuzzing(char** argv) {
 
 					*(u32*)(out_buf + i) = SWAP32(interesting_32[j]);
 					if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-					stage_cur++;
+					++stage_cur;
 
-				} else stage_max--;
+				} else --stage_max;
 
 			}
 
@@ -10137,7 +10137,7 @@ static u8 core_fuzzing(char** argv) {
 
 		orig_hit_cnt = new_hit_cnt;
 
-		for (i = 0; i < len; i++) {
+		for (i = 0; i < len; ++i) {
 
 			u32 last_len = 0;
 
@@ -10148,7 +10148,7 @@ static u8 core_fuzzing(char** argv) {
 			   between writes at a particular offset determined by the outer
 			   loop. */
 
-			for (j = 0; j < extras_cnt; j++) {
+			for (j = 0; j < extras_cnt; ++j) {
 
 				/* Skip extras probabilistically if extras_cnt > MAX_DET_EXTRAS. Also
 				   skip them if there's no room to insert the payload, if the token
@@ -10160,7 +10160,7 @@ static u8 core_fuzzing(char** argv) {
 					!memcmp(extras[j].data, out_buf + i, extras[j].len) ||
 					!memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, extras[j].len))) {
 
-					stage_max--;
+					--stage_max;
 					continue;
 
 				}
@@ -10170,7 +10170,7 @@ static u8 core_fuzzing(char** argv) {
 
 				if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
 
-				stage_cur++;
+				++stage_cur;
 
 			}
 
@@ -10198,14 +10198,14 @@ static u8 core_fuzzing(char** argv) {
 
 		ex_tmp = ck_alloc(len + MAX_DICT_FILE);
 
-		for (i = 0; i <= len; i++) {
+		for (i = 0; i <= len; ++i) {
 
 			stage_cur_byte = i;
 
-			for (j = 0; j < extras_cnt; j++) {
+			for (j = 0; j < extras_cnt; ++j) {
 
 				if (len + extras[j].len > MAX_FILE) {
-					stage_max--;
+					--stage_max;
 					continue;
 				}
 
@@ -10220,7 +10220,7 @@ static u8 core_fuzzing(char** argv) {
 					goto abandon_entry;
 				}
 
-				stage_cur++;
+				++stage_cur;
 
 			}
 
@@ -10250,13 +10250,13 @@ static u8 core_fuzzing(char** argv) {
 
 		orig_hit_cnt = new_hit_cnt;
 
-		for (i = 0; i < len; i++) {
+		for (i = 0; i < len; ++i) {
 
 			u32 last_len = 0;
 
 			stage_cur_byte = i;
 
-			for (j = 0; j < MIN(a_extras_cnt, USE_AUTO_EXTRAS); j++) {
+			for (j = 0; j < MIN(a_extras_cnt, USE_AUTO_EXTRAS); ++j) {
 
 				/* See the comment in the earlier code; extras are sorted by size. */
 
@@ -10264,7 +10264,7 @@ static u8 core_fuzzing(char** argv) {
 					!memcmp(a_extras[j].data, out_buf + i, a_extras[j].len) ||
 					!memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, a_extras[j].len))) {
 
-					stage_max--;
+					--stage_max;
 					continue;
 
 				}
@@ -10274,7 +10274,7 @@ static u8 core_fuzzing(char** argv) {
 
 				if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
 
-				stage_cur++;
+				++stage_cur;
 
 			}
 
@@ -10332,7 +10332,7 @@ static u8 core_fuzzing(char** argv) {
 		s32 temp_len_puppet;
 		cur_ms_lv = get_cur_time();
 
-		//for (; swarm_now < swarm_num; swarm_now++)
+		//for (; swarm_now < swarm_num; ++swarm_now)
 		{
 			if (key_puppet == 1) {
 				if (unlikely(orig_hit_cnt_puppet == 0)) {
@@ -10368,16 +10368,16 @@ static u8 core_fuzzing(char** argv) {
 				orig_hit_cnt = queued_paths + unique_crashes;
 				havoc_queued = queued_paths;
 
-				for (stage_cur = 0; stage_cur < stage_max; stage_cur++) {
+				for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
 
 					u32 use_stacking = 1 << (1 + UR(HAVOC_STACK_POW2));
 					stage_cur_val = use_stacking;
 
-					for (i = 0; i < operator_num; i++) {
+					for (i = 0; i < operator_num; ++i) {
 						core_operator_cycles_puppet_v3[i] = core_operator_cycles_puppet_v2[i];
 					}
 
-					for (i = 0; i < use_stacking; i++) {
+					for (i = 0; i < use_stacking; ++i) {
 
 						switch (select_algorithm()) {
 
@@ -10669,7 +10669,7 @@ static u8 core_fuzzing(char** argv) {
 					{
 						u64 temp_temp_puppet = queued_paths + unique_crashes - temp_total_found;
 						total_puppet_find = total_puppet_find + temp_temp_puppet;
-						for (i = 0; i < 16; i++)
+						for (i = 0; i < 16; ++i)
 						{
 							if (core_operator_cycles_puppet_v2[i] > core_operator_cycles_puppet_v3[i])
 								core_operator_finds_puppet_v2[i] += temp_temp_puppet;
@@ -10723,7 +10723,7 @@ static u8 core_fuzzing(char** argv) {
 
 					while (target && (target->len < 2 || target == queue_cur)) {
 						target = target->next;
-						splicing_with++;
+						++splicing_with;
 					}
 
 					if (!target) goto retry_splicing_puppet;
@@ -10809,7 +10809,7 @@ static u8 core_fuzzing(char** argv) {
 					new_hit_cnt = queued_paths + unique_crashes;
 
 					u64 temp_stage_finds_puppet = 0;
-					for (i = 0; i < operator_num; i++)
+					for (i = 0; i < operator_num; ++i)
 					{
 
 						core_operator_finds_puppet[i] = core_operator_finds_puppet_v2[i];
@@ -10838,27 +10838,27 @@ void pso_updating(void) {
 	w_now = (w_init - w_end)*(g_max - g_now) / (g_max)+w_end;
 	int tmp_swarm, i, j;
 	u64 temp_operator_finds_puppet = 0;
-	for (i = 0; i < operator_num; i++)
+	for (i = 0; i < operator_num; ++i)
 	{
 		operator_finds_puppet[i] = core_operator_finds_puppet[i];
 
-		for (j = 0; j < swarm_num; j++)
+		for (j = 0; j < swarm_num; ++j)
 		{
 			operator_finds_puppet[i] = operator_finds_puppet[i] + stage_finds_puppet[j][i];
 		}
 		temp_operator_finds_puppet = temp_operator_finds_puppet + operator_finds_puppet[i];
 	}
 
-	for (i = 0; i < operator_num; i++)
+	for (i = 0; i < operator_num; ++i)
 	{
 		if (operator_finds_puppet[i])
 			G_best[i] = (double)((double)(operator_finds_puppet[i]) / (double)(temp_operator_finds_puppet));
 	}
 
-	for (tmp_swarm = 0; tmp_swarm < swarm_num; tmp_swarm++)
+	for (tmp_swarm = 0; tmp_swarm < swarm_num; ++tmp_swarm)
 	{
 		double x_temp = 0.0;
-		for (i = 0; i < operator_num; i++)
+		for (i = 0; i < operator_num; ++i)
 		{
 			probability_now[tmp_swarm][i] = 0.0;
 			v_now[tmp_swarm][i] = w_now * v_now[tmp_swarm][i] + RAND_C * (L_best[tmp_swarm][i] - x_now[tmp_swarm][i]) + RAND_C * (G_best[i] - x_now[tmp_swarm][i]);
@@ -10870,7 +10870,7 @@ void pso_updating(void) {
 			x_temp += x_now[tmp_swarm][i];
 		}
 
-		for (i = 0; i < operator_num; i++)
+		for (i = 0; i < operator_num; ++i)
 		{
 			x_now[tmp_swarm][i] = x_now[tmp_swarm][i] / x_temp;
 			if (likely(i != 0))
@@ -11114,7 +11114,7 @@ EXP_ST void check_binary(u8* fname) {
 
         cur_elem = ck_alloc(delim - env_path + 1);
         memcpy(cur_elem, env_path, delim - env_path);
-        delim++;
+        ++delim;
 
       } else cur_elem = ck_strdup(env_path);
 
@@ -11518,7 +11518,7 @@ static void setup_cmdline_file(char** argv) {
 
   while (argv[i]) {
     fprintf(cmdline_file, "%s\n", argv[i]);
-    i++;
+    ++i;
   }
 
   fclose(cmdline_file);
@@ -11722,7 +11722,7 @@ static void get_core_count(void) {
   if (!f) return;
 
   while (fgets(tmp, sizeof(tmp), f))
-    if (!strncmp(tmp, "cpu", 3) && isdigit(tmp[3])) cpu_core_count++;
+    if (!strncmp(tmp, "cpu", 3) && isdigit(tmp[3])) ++cpu_core_count;
 
   fclose(f);
 
@@ -11738,7 +11738,7 @@ static void get_core_count(void) {
 
     /* Add ourselves, since the 1-minute average doesn't include that yet. */
 
-    cur_runnable++;
+    ++cur_runnable;
 
 #endif /* __APPLE__ || __FreeBSD__ || __OpenBSD__ */
 
@@ -11793,7 +11793,7 @@ static void fix_up_sync(void) {
     if (!isalnum(*x) && *x != '_' && *x != '-')
       FATAL("Non-alphanumeric fuzzer ID specified via -S or -M");
 
-    x++;
+    ++x;
 
   }
 
@@ -11970,12 +11970,12 @@ static void save_cmdline(u32 argc, char** argv) {
   u32 len = 1, i;
   u8* buf;
 
-  for (i = 0; i < argc; i++)
+  for (i = 0; i < argc; ++i)
     len += strlen(argv[i]) + 1;
   
   buf = orig_cmdline = ck_alloc(len);
 
-  for (i = 0; i < argc; i++) {
+  for (i = 0; i < argc; ++i) {
 
     u32 l = strlen(argv[i]);
 
@@ -11992,7 +11992,7 @@ static void save_cmdline(u32 argc, char** argv) {
 
 int stricmp(char const *a, char const *b) {
   int d;
-  for (;; a++, b++) {
+  for (;; ++a, ++b) {
     d = tolower(*a) - tolower(*b);
     if (d != 0 || !*a)
       return d;
@@ -12274,11 +12274,11 @@ int main(int argc, char** argv) {
 			if (g_now > g_max) g_now = 0;
 			w_now = (w_init - w_end)*(g_max - g_now) / (g_max)+w_end;
 
-			for (tmp_swarm = 0; tmp_swarm < swarm_num; tmp_swarm++) {
+			for (tmp_swarm = 0; tmp_swarm < swarm_num; ++tmp_swarm) {
 				double total_puppet_temp = 0.0;
 				swarm_fitness[tmp_swarm] = 0.0;
 
-				for (i = 0; i < operator_num; i++) {
+				for (i = 0; i < operator_num; ++i) {
 					stage_finds_puppet[tmp_swarm][i] = 0;
 					probability_now[tmp_swarm][i] = 0.0;
 					x_now[tmp_swarm][i] = ((double)(random() % 7000)*0.0001 + 0.1);
@@ -12290,7 +12290,7 @@ int main(int argc, char** argv) {
 
 				}
 
-				for (i = 0; i < operator_num; i++) {
+				for (i = 0; i < operator_num; ++i) {
 					stage_cycles_puppet_v2[tmp_swarm][i] = stage_cycles_puppet[tmp_swarm][i];
 					stage_finds_puppet_v2[tmp_swarm][i] = stage_finds_puppet[tmp_swarm][i];
 					x_now[tmp_swarm][i] = x_now[tmp_swarm][i] / total_puppet_temp;
@@ -12298,7 +12298,7 @@ int main(int argc, char** argv) {
 
 				double x_temp = 0.0;
 
-				for (i = 0; i < operator_num; i++) {
+				for (i = 0; i < operator_num; ++i) {
 					probability_now[tmp_swarm][i] = 0.0;
 					v_now[tmp_swarm][i] = w_now * v_now[tmp_swarm][i] + RAND_C * (L_best[tmp_swarm][i] - x_now[tmp_swarm][i]) + RAND_C * (G_best[i] - x_now[tmp_swarm][i]);
 
@@ -12312,7 +12312,7 @@ int main(int argc, char** argv) {
 					x_temp += x_now[tmp_swarm][i];
 				}
 
-				for (i = 0; i < operator_num; i++) {
+				for (i = 0; i < operator_num; ++i) {
 					x_now[tmp_swarm][i] = x_now[tmp_swarm][i] / x_temp;
 					if (likely(i != 0))
 						probability_now[tmp_swarm][i] = probability_now[tmp_swarm][i - 1] + x_now[tmp_swarm][i];
@@ -12323,7 +12323,7 @@ int main(int argc, char** argv) {
                                     FATAL("ERROR probability");
 			}
 
-			for (i = 0; i < operator_num; i++) {
+			for (i = 0; i < operator_num; ++i) {
 				core_operator_finds_puppet[i] = 0;
 				core_operator_finds_puppet_v2[i] = 0;
 				core_operator_cycles_puppet[i] = 0;
@@ -12496,7 +12496,7 @@ int main(int argc, char** argv) {
 	break;
       }
 
-      i++;
+      ++i;
 
     }
   }
@@ -12544,14 +12544,14 @@ int main(int argc, char** argv) {
 
     if (!queue_cur) {
 
-      queue_cycle++;
+      ++queue_cycle;
       current_entry     = 0;
       cur_skipped_paths = 0;
       queue_cur         = queue;
 
       while (seek_to) {
-        current_entry++;
-        seek_to--;
+        ++current_entry;
+        --seek_to;
         queue_cur = queue_cur->next;
       }
 
@@ -12567,7 +12567,7 @@ int main(int argc, char** argv) {
 
       if (queued_paths == prev_queued) {
 
-        if (use_splicing) cycles_wo_finds++; else use_splicing = 1;
+        if (use_splicing) ++cycles_wo_finds; else use_splicing = 1;
 
       } else cycles_wo_finds = 0;
 
@@ -12592,7 +12592,7 @@ int main(int argc, char** argv) {
     if (stop_soon) break;
 
     queue_cur = queue_cur->next;
-    current_entry++;
+    ++current_entry;
 
     if (most_time_key == 1) {
       u64 cur_ms_lv = get_cur_time();
diff --git a/docs/ChangeLog b/docs/ChangeLog
index 5e78610e..dfebb68a 100644
--- a/docs/ChangeLog
+++ b/docs/ChangeLog
@@ -17,6 +17,8 @@ sending a mail to <afl-users+subscribe@googlegroups.com>.
 Version ++2.53d (dev):
 ----------------------
 
+  - llvm 9 is now supported (still needs testing)
+  - fix building qemu on some Ubuntus (thanks to floyd!)
   - custom mutator by a loaded library is now supported (thanks to kyakdan!)
   - fix for a few features to support different map sized than 2^16
   - afl-showmap: new option -r now shows the real values in the buckets (stock
@@ -26,7 +28,6 @@ Version ++2.53d (dev):
   - ... your patch? :)
 
 
-
 --------------------------
 Version ++2.53c (release):
 --------------------------
diff --git a/docs/env_variables.txt b/docs/env_variables.txt
index 36fdc369..93066dbc 100644
--- a/docs/env_variables.txt
+++ b/docs/env_variables.txt
@@ -257,10 +257,14 @@ The QEMU wrapper used to instrument binary-only code supports several settings:
     Use this if you are unsure if the entrypoint might be wrong - but
     use it directly, e.g. afl-qemu-trace ./program 
 
-  - If you want to specify a specific entrypoint into the binary (this can
-    be very good for the performance!), use AFL_ENTRYPOINT for this.
+  - AFL_ENTRYPOINT allows you to specify a specific entrypoint into the
+    binary (this can be very good for the performance!).
     The entrypoint is specified as hex address, e.g. 0x4004110
 
+  - AFL_QEMU_COMPCOV is for a sub-project in qemu_mode called ./libcompcov
+    which implements laf-intel for qemu. It also needs AFL_PRELOAD and
+    you can find more information in qemu_mode/libcompcov/README.compcov
+
 5) Settings for afl-cmin
 ------------------------
 
diff --git a/llvm_mode/Makefile b/llvm_mode/Makefile
index d01fbbdf..160a8fe6 100644
--- a/llvm_mode/Makefile
+++ b/llvm_mode/Makefile
@@ -27,11 +27,11 @@ VERSION     = $(shell grep '^\#define VERSION ' ../config.h | cut -d '"' -f2)
 
 LLVM_CONFIG ?= llvm-config
 LLVMVER  = $(shell $(LLVM_CONFIG) --version)
-LLVM_UNSUPPORTED = $(shell $(LLVM_CONFIG) --version | egrep -q '^9|3.0' && echo 1 || echo 0 )
+LLVM_UNSUPPORTED = $(shell $(LLVM_CONFIG) --version | egrep -q '^[12]|^3\.0|^1[0-9]' && echo 1 || echo 0 )
 LLVM_MAJOR = ($shell $(LLVM_CONFIG) --version | sed 's/\..*//')
 
 ifeq "$(LLVM_UNSUPPORTED)" "1"
-  $(warn llvm_mode only supports versions 3.8.0 up to 8.x )
+  $(warn llvm_mode only supports versions 3.8.0 up to 9 )
 endif
 
 # this is not visible yet:
@@ -48,7 +48,7 @@ ifdef AFL_TRACE_PC
 endif
 
 CXXFLAGS    ?= -O3 -funroll-loops
-CXXFLAGS    += -Wall -D_FORTIFY_SOURCE=2 -g -Wno-pointer-sign \
+CXXFLAGS    += -Wall -D_FORTIFY_SOURCE=2 -g \
                -DVERSION=\"$(VERSION)\" -Wno-variadic-macros
 
 CLANG_CFL    = `$(LLVM_CONFIG) --cxxflags` -Wl,-znodelete -fno-rtti -fpic $(CXXFLAGS)
@@ -75,7 +75,7 @@ endif
 
 # sanity check.
 # Are versions of clang --version and llvm-config --version equal?
-CLANGVER = $(shell $(CC) --version | sed -E -ne '/^.*([0-9]\.[0-9]\.[0-9]).*/s//\1/p')
+CLANGVER = $(shell $(CC) --version | sed -E -ne '/^.*version\ ([0-9]\.[0-9]\.[0-9]).*/s//\1/p')
 
 
 ifeq "$(shell echo '\#include <sys/ipc.h>@\#include <sys/shm.h>@int main() { int _id = shmget(IPC_PRIVATE, 65536, IPC_CREAT | IPC_EXCL | 0600); shmctl(_id, IPC_RMID, 0); return 0;}' | tr @ '\n' | $(CC) -x c - -o .test2 2>/dev/null && echo 1 || echo 0 )" "1"
@@ -174,10 +174,10 @@ endif
 test_build: $(PROGS)
 	@echo "[*] Testing the CC wrapper and instrumentation output..."
 	unset AFL_USE_ASAN AFL_USE_MSAN AFL_INST_RATIO; AFL_QUIET=1 AFL_PATH=. AFL_CC=$(CC) AFL_LLVM_LAF_SPLIT_SWITCHES=1 AFL_LLVM_LAF_TRANSFORM_COMPARES=1 AFL_LLVM_LAF_SPLIT_COMPARES=1 ../afl-clang-fast $(CFLAGS) ../test-instr.c -o test-instr $(LDFLAGS)
-	../afl-showmap -m none -q -o .test-instr0 ./test-instr </dev/null
+	../afl-showmap -m none -q -o .test-instr0 ./test-instr < /dev/null
 	echo 1 | ../afl-showmap -m none -q -o .test-instr1 ./test-instr
 	@rm -f test-instr
-	@cmp -s .test-instr0 .test-instr1; DR="$$?"; rm -f .test-instr0 .test-instr1; if [ "$$DR" = "0" ]; then echo; echo "Oops, the instrumentation does not seem to be behaving correctly!"; echo; echo "Please ping <lcamtuf@google.com> to troubleshoot the issue."; echo; exit 1; fi
+	@cmp -s .test-instr0 .test-instr1; DR="$$?"; rm -f .test-instr0 .test-instr1; if [ "$$DR" = "0" ]; then echo; echo "Oops, the instrumentation does not seem to be behaving correctly!"; echo; echo "Please post to https://github.com/vanhauser-thc/AFLplusplus/issues to troubleshoot the issue."; echo; exit 1; fi
 	@echo "[+] All right, the instrumentation seems to be working!"
 
 all_done: test_build
diff --git a/llvm_mode/MarkNodes.cc b/llvm_mode/MarkNodes.cc
index a156fccb..348dc264 100644
--- a/llvm_mode/MarkNodes.cc
+++ b/llvm_mode/MarkNodes.cc
@@ -193,7 +193,7 @@ namespace DominatorTree{
         idom[now] = idom[idom[now]];
     }
   }
-}; // End of DominatorTree
+} // End of DominatorTree
 
 std::vector<uint32_t> Visited, InStack;
 std::vector<uint32_t> TopoOrder, InDeg;
diff --git a/llvm_mode/README.llvm b/llvm_mode/README.llvm
index a0c40211..9bb091ac 100644
--- a/llvm_mode/README.llvm
+++ b/llvm_mode/README.llvm
@@ -8,8 +8,7 @@ Fast LLVM-based instrumentation for afl-fuzz
 1) Introduction
 ---------------
 
-! llvm_mode works with llvm version 3.8.1 up to 8.x !
-! llvm version 9 does not work yet !
+! llvm_mode works with llvm versions 3.8.0 up to 9 !
 
 The code in this directory allows you to instrument programs for AFL using
 true compiler-level instrumentation, instead of the more crude
diff --git a/llvm_mode/split-compares-pass.so.cc b/llvm_mode/split-compares-pass.so.cc
index c025628f..a74b60fa 100644
--- a/llvm_mode/split-compares-pass.so.cc
+++ b/llvm_mode/split-compares-pass.so.cc
@@ -495,14 +495,12 @@ bool SplitComparesTransform::runOnModule(Module &M) {
       errs() << "Running split-compare-pass " << 64 << "\n"; 
       splitCompares(M, 64);
 
-      [[clang::fallthrough]];
-      /* fallthrough */
+      [[clang::fallthrough]]; /*FALLTHRU*/ /* FALLTHROUGH */
     case 32:
       errs() << "Running split-compare-pass " << 32 << "\n"; 
       splitCompares(M, 32);
 
-      [[clang::fallthrough]];
-      /* fallthrough */
+      [[clang::fallthrough]]; /*FALLTHRU*/ /* FALLTHROUGH */
     case 16:
       errs() << "Running split-compare-pass " << 16 << "\n"; 
       splitCompares(M, 16);
diff --git a/qemu_mode/README.qemu b/qemu_mode/README.qemu
index 124fce12..754c0259 100644
--- a/qemu_mode/README.qemu
+++ b/qemu_mode/README.qemu
@@ -46,7 +46,19 @@ Note: if you want the QEMU helper to be installed on your system for all
 users, you need to build it before issuing 'make install' in the parent
 directory.
 
-3) Notes on linking
+3) Options
+----------
+
+There is ./libcompcov/ which implements laf-intel (splitting memcmp,
+strncmp, etc. to make these conditions easier solvable by afl-fuzz).
+Highly recommended.
+
+Another option is the environment variable AFL_ENTRYPOINT which allows
+move the forkserver to a different part, e.g. just before the file is
+opened (e.g. way after command line parsing and config file loading, etc)
+which can be a huge speed improvement.
+
+4) Notes on linking
 -------------------
 
 The feature is supported only on Linux. Supporting BSD may amount to porting
@@ -68,7 +80,7 @@ practice, this means two things:
 Setting AFL_INST_LIBS=1 can be used to circumvent the .text detection logic
 and instrument every basic block encountered.
 
-4) Benchmarking
+5) Benchmarking
 ---------------
 
 If you want to compare the performance of the QEMU instrumentation with that of
@@ -84,7 +96,7 @@ Comparative measurements of execution speed or instrumentation coverage will be
 fairly meaningless if the optimization levels or instrumentation scopes don't
 match.
 
-5) Gotchas, feedback, bugs
+6) Gotchas, feedback, bugs
 --------------------------
 
 If you need to fix up checksums or do other cleanup on mutated test cases, see
@@ -106,7 +118,7 @@ with -march=core2, can help.
 Beyond that, this is an early-stage mechanism, so fields reports are welcome.
 You can send them to <afl-users@googlegroups.com>.
 
-6) Alternatives: static rewriting
+7) Alternatives: static rewriting
 ---------------------------------
 
 Statically rewriting binaries just once, instead of attempting to translate
@@ -114,12 +126,11 @@ them at run time, can be a faster alternative. That said, static rewriting is
 fraught with peril, because it depends on being able to properly and fully model
 program control flow without actually executing each and every code path.
 
-If you want to experiment with this mode of operation, there is a module
-contributed by Aleksandar Nikolich:
+The best implementation is this one:
 
   https://github.com/vanhauser-thc/afl-dyninst
-  https://groups.google.com/forum/#!topic/afl-users/HlSQdbOTlpg
 
-At this point, the author reports the possibility of hiccups with stripped
-binaries. That said, if we can get it to be comparably reliable to QEMU, we may
-decide to switch to this mode, but I had no time to play with it yet.
+The issue however is Dyninst which is not rewriting the binaries so that
+they run stable. a lot of crashes happen, especially in C++ programs that
+use throw/catch. Try it first, and if it works for you be happy as it is
+2-3x as fast as qemu_mode.
diff --git a/qemu_mode/patches/syscall.diff b/qemu_mode/patches/syscall.diff
index cb2acfcd..60b5905e 100644
--- a/qemu_mode/patches/syscall.diff
+++ b/qemu_mode/patches/syscall.diff
@@ -2,9 +2,10 @@ diff --git a/linux-user/syscall.c b/linux-user/syscall.c
 index 280137da..8c0e749f 100644
 --- a/linux-user/syscall.c
 +++ b/linux-user/syscall.c
-@@ -112,6 +112,8 @@
+@@ -112,6 +112,9 @@
  #include "qemu.h"
  #include "fd-trans.h"
++#include <linux/sockios.h>
  
 +extern unsigned int afl_forksrv_pid;
 +
@@ -32,4 +33,4 @@ index 280137da..8c0e749f 100644
 +        }
  
  #ifdef TARGET_NR_set_robust_list
-     case TARGET_NR_set_robust_list:
+     case TARGET_NR_set_robust_list:
\ No newline at end of file
diff --git a/test-instr.c b/test-instr.c
index 1b978c55..9107f15e 100644
--- a/test-instr.c
+++ b/test-instr.c
@@ -20,9 +20,12 @@
 
 int main(int argc, char** argv) {
 
-  char buf[8];
+  char buff[8];
+  char *buf = buff;
 
-  if (read(0, buf, sizeof(buf)) < 1) {
+  if (argc > 1)
+    buf = argv[1];
+  else if (read(0, buf, sizeof(buf)) < 1) {
     printf("Hum?\n");
     exit(1);
   }