about summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--custom_mutators/README.md10
-rw-r--r--custom_mutators/autotokens/Makefile16
-rw-r--r--custom_mutators/autotokens/README24
-rw-r--r--custom_mutators/autotokens/TODO6
-rw-r--r--custom_mutators/autotokens/autotokens.cpp1143
-rw-r--r--custom_mutators/examples/custom_send.c9
-rw-r--r--custom_mutators/examples/example.c4
-rw-r--r--custom_mutators/examples/post_library_gif.so.c4
-rw-r--r--docs/custom_mutators.md12
-rw-r--r--docs/env_variables.md3
-rw-r--r--include/afl-fuzz.h27
-rw-r--r--include/config.h6
-rw-r--r--include/envs.h1
m---------qemu_mode/qemuafl0
-rw-r--r--src/afl-fuzz-bitmap.c6
-rw-r--r--src/afl-fuzz-mutators.c13
-rw-r--r--src/afl-fuzz-one.c52
-rw-r--r--src/afl-fuzz-python.c16
-rw-r--r--src/afl-fuzz-state.c7
-rw-r--r--src/afl-fuzz.c26
20 files changed, 1350 insertions, 35 deletions
diff --git a/custom_mutators/README.md b/custom_mutators/README.md
index 0289e150..8d01856f 100644
--- a/custom_mutators/README.md
+++ b/custom_mutators/README.md
@@ -11,6 +11,16 @@ The `./examples` folder contains examples for custom mutators in python and C.
 
 In `./rust`, you will find rust bindings, including a simple example in `./rust/example` and an example for structured fuzzing, based on lain, in`./rust/example_lain`.
 
+## The AFL++ grammar agnostic grammar mutator
+
+In `./autotokens` you find a token-level fuzzer that does not need to know
+anything about the grammar of an input as long as it is in ascii and allows
+whitespace.
+It is very fast and effective.
+
+If you are looking for an example of how to effectively create a custom
+mutator take a look at this one.
+
 ## The AFL++ Grammar Mutator
 
 If you use git to clone AFL++, then the following will incorporate our
diff --git a/custom_mutators/autotokens/Makefile b/custom_mutators/autotokens/Makefile
new file mode 100644
index 00000000..ab1da4b6
--- /dev/null
+++ b/custom_mutators/autotokens/Makefile
@@ -0,0 +1,16 @@
+ifdef debug
+	CFLAGS += -fsanitize=address -Wall
+	CXX := clang++
+endif
+ifdef DEBUG
+	CFLAGS += -fsanitize=address -Wall
+	CXX := clang++
+endif
+
+all:	autotokens.so
+
+autotokens.so:	autotokens.cpp
+	$(CXX) -g -O3 $(CFLAGS) -shared -fPIC -o autotokens.so -I../../include autotokens.cpp ../../src/afl-performance.o
+
+clean:
+	rm -f autotokens.so *~ core
diff --git a/custom_mutators/autotokens/README b/custom_mutators/autotokens/README
new file mode 100644
index 00000000..f82dcd98
--- /dev/null
+++ b/custom_mutators/autotokens/README
@@ -0,0 +1,24 @@
+# autotokens
+
+This implements an improved autotoken grammar fuzzing idea presented in
+[Token-Level Fuzzing][https://www.usenix.org/system/files/sec21-salls.pdf].
+It is a grammar fuzzer without actually knowing the grammar.
+
+It is recommended to run with together in an instance with `CMPLOG`.
+
+If you have a dictionary (`-x`) this improves this custom grammar mutator.
+
+If **not** running with `CMPLOG`, it is possible to set
+`AFL_CUSTOM_MUTATOR_ONLY` to concentrate on grammar bug classes.
+
+Do **not** set `AFL_DISABLE_TRIM` with this custom mutator!
+
+## Configuration via environment variables
+
+`AUTOTOKENS_ONLY_FAV` - only use this mutator on favorite queue items
+`AUTOTOKENS_COMMENT` - what character or string starts a comment which will be
+                       removed. Default: `/* ... */`
+`AUTOTOKENS_ALTERNATIVE_TOKENIZE` - use an alternative tokenize implementation
+                                   (experimental)
+`AUTOTOKENS_WHITESPACE` - whitespace string to use for ALTERNATIVE_TOKENIZE,
+                          default is " "
diff --git a/custom_mutators/autotokens/TODO b/custom_mutators/autotokens/TODO
new file mode 100644
index 00000000..2e39511c
--- /dev/null
+++ b/custom_mutators/autotokens/TODO
@@ -0,0 +1,6 @@
+cmplog: only add tokens that were found to fit?
+
+create from thin air if no good seed after a cycle and dict large enough?
+(static u32 no_of_struct_inputs;) 
+
+splicing -> check if whitespace/token is needed
diff --git a/custom_mutators/autotokens/autotokens.cpp b/custom_mutators/autotokens/autotokens.cpp
new file mode 100644
index 00000000..f9b5bd2e
--- /dev/null
+++ b/custom_mutators/autotokens/autotokens.cpp
@@ -0,0 +1,1143 @@
+extern "C" {
+
+#include "afl-fuzz.h"
+
+}
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+
+#include <iostream>
+#include <fstream>
+#include <unordered_map>
+#include <vector>
+#include <regex>
+
+#define AUTOTOKENS_DEBUG 0
+#define AUTOTOKENS_ONLY_FAV 0
+#define AUTOTOKENS_ALTERNATIVE_TOKENIZE 0
+#define AUTOTOKENS_CHANGE_MIN 8
+#define AUTOTOKENS_WHITESPACE " "
+#define AUTOTOKENS_SIZE_MIN 8
+#define AUTOTOKENS_SPLICE_MIN 4
+#define AUTOTOKENS_SPLICE_MAX 64
+#ifndef AUTOTOKENS_SPLICE_DISABLE
+  #define AUTOTOKENS_SPLICE_DISABLE 0
+#endif
+
+#if AUTOTOKENS_SPLICE_MIN >= AUTOTOKENS_SIZE_MIN
+  #error SPLICE_MIN must be lower than SIZE_MIN
+#endif
+
+using namespace std;
+
+typedef struct my_mutator {
+
+  afl_state *afl;
+
+} my_mutator_t;
+
+#undef DEBUGF
+#define DEBUGF \
+  if (unlikely(debug)) fprintf
+#define IFDEBUG if (unlikely(debug))
+
+static afl_state *afl_ptr;
+static int        debug = AUTOTOKENS_DEBUG;
+static int        only_fav = AUTOTOKENS_ONLY_FAV;
+static int        alternative_tokenize = AUTOTOKENS_ALTERNATIVE_TOKENIZE;
+static u32        current_id;
+static u32        valid_structures;
+static u32        whitespace_ids;
+static u32        extras_cnt, a_extras_cnt;
+static u64        all_spaces, all_tabs, all_lf, all_ws;
+static u64        all_structure_items;
+static u64        fuzz_count;
+static unordered_map<string, vector<u32> *> file_mapping;
+static unordered_map<u32, vector<u32> *>    id_mapping;
+static unordered_map<string, u32>           token_to_id;
+static unordered_map<u32, string>           id_to_token;
+static string                               whitespace = AUTOTOKENS_WHITESPACE;
+static string                               output;
+static regex                               *regex_comment_custom;
+static regex        regex_comment_star("/\\*([:print:]|\n)*?\\*/",
+                                       regex::multiline | regex::optimize);
+static regex        regex_word("[A-Za-z0-9_$.-]+", regex::optimize);
+static regex        regex_whitespace(R"([ \t]+)", regex::optimize);
+static vector<u32> *s;  // the structure of the currently selected input
+
+u32 good_whitespace_or_singleval() {
+
+  u32 i = rand_below(afl_ptr, current_id);
+  if (id_to_token[i].size() == 1) { return i; }
+  i = rand_below(afl_ptr, all_ws);
+  if (i < all_spaces) {
+
+    return 0;
+
+  } else if (i < all_tabs) {
+
+    return 1;
+
+  } else
+
+    return 2;  // linefeed
+
+}
+
+extern "C" size_t afl_custom_fuzz(my_mutator_t *data, u8 *buf, size_t buf_size,
+                                  u8 **out_buf, u8 *add_buf,
+                                  size_t add_buf_size, size_t max_size) {
+
+  (void)(data);
+
+  if (s == NULL) {
+
+    *out_buf = NULL;
+    return 0;
+
+  }
+
+  vector<u32> m = *s;  // copy of the structure we will modify
+  u32         i, m_size = (u32)m.size();
+
+  u32 rounds =
+      MAX(AUTOTOKENS_CHANGE_MIN,
+          MIN(m_size >> 3, HAVOC_CYCLES * afl_ptr->queue_cur->perf_score *
+                               afl_ptr->havoc_div / 256));
+  // DEBUGF(stderr, "structure size: %lu, rounds: %u \n", m.size(), rounds);
+
+#if AUTOTOKENS_SPLICE_DISABLE == 1
+  #define AUTOTOKENS_MUT_MAX 18
+#else
+  #define AUTOTOKENS_MUT_MAX 27
+#endif
+
+  u32 max_rand = AUTOTOKENS_MUT_MAX, new_item, pos;
+
+  for (i = 0; i < rounds; ++i) {
+
+    switch (rand_below(afl_ptr, max_rand)) {
+
+      /* CHANGE/MUTATE single item */
+      case 0 ... 9: {
+
+        pos = rand_below(afl_ptr, m_size);
+        u32 cur_item = m[pos];
+        do {
+
+          new_item = rand_below(afl_ptr, current_id);
+
+        } while (unlikely(
+
+            new_item == cur_item ||
+            (!alternative_tokenize &&
+             ((whitespace_ids < new_item && whitespace_ids >= cur_item) ||
+              (whitespace_ids >= new_item && whitespace_ids < cur_item)))));
+
+        DEBUGF(stderr, "MUT: %u -> %u\n", cur_item, new_item);
+        m[pos] = new_item;
+        break;
+
+      }
+
+      /* INSERT (m_size +1 so we insert also after last place) */
+      case 10 ... 13: {
+
+        do {
+
+          new_item = rand_below(afl_ptr, current_id);
+
+        } while (unlikely(!alternative_tokenize && new_item >= whitespace_ids));
+
+        u32 pos = rand_below(afl_ptr, m_size + 1);
+        m.insert(m.begin() + pos, new_item);
+        ++m_size;
+        DEBUGF(stderr, "INS: %u at %u\n", new_item, pos);
+
+        if (likely(!alternative_tokenize)) {
+
+          // if we insert an identifier or string we might need whitespace
+          if (id_to_token[new_item].size() > 1) {
+
+            // need to insert before?
+
+            if (pos && m[pos - 1] >= whitespace_ids &&
+                id_to_token[m[pos - 1]].size() > 1) {
+
+              m.insert(m.begin() + pos, good_whitespace_or_singleval());
+              ++m_size;
+
+            }
+
+            if (pos + 1 < m_size && m[pos + 1] >= whitespace_ids &&
+                id_to_token[m[pos + 1]].size() > 1) {
+
+              // need to insert after?
+
+              m.insert(m.begin() + pos + 1, good_whitespace_or_singleval());
+              ++m_size;
+
+            }
+
+          }
+
+        }
+
+        break;
+
+      }
+
+#if AUTOTOKENS_SPLICE_DISABLE != 1
+      /* SPLICING */
+      case 14 ... 22: {
+
+        u32  strategy = rand_below(afl_ptr, 4), dst_off, n;
+        auto src = id_mapping[rand_below(afl_ptr, valid_structures)];
+        u32  src_size = src->size();
+        u32  src_off = rand_below(afl_ptr, src_size - AUTOTOKENS_SPLICE_MIN);
+        u32  rand_r = 1 + MAX(AUTOTOKENS_SPLICE_MIN,
+                              MIN(AUTOTOKENS_SPLICE_MAX, src_size - src_off));
+
+        switch (strategy) {
+
+          // insert
+          case 0: {
+
+            dst_off = rand_below(afl_ptr, m_size);
+            n = AUTOTOKENS_SPLICE_MIN +
+                rand_below(afl_ptr, MIN(AUTOTOKENS_SPLICE_MAX,
+                                        rand_r - AUTOTOKENS_SPLICE_MIN));
+            m.insert(m.begin() + dst_off, src->begin() + src_off,
+                     src->begin() + src_off + n);
+            m_size += n;
+            DEBUGF(stderr, "SPLICE-INS: %u at %u\n", n, dst_off);
+
+            break;
+
+          }
+
+          // overwrite
+          default: {
+
+            dst_off = rand_below(afl_ptr, m_size - AUTOTOKENS_SPLICE_MIN);
+            n = AUTOTOKENS_SPLICE_MIN +
+                rand_below(
+                    afl_ptr,
+                    MIN(AUTOTOKENS_SPLICE_MAX - AUTOTOKENS_SPLICE_MIN,
+                        MIN(m_size - dst_off - AUTOTOKENS_SPLICE_MIN,
+                            src_size - src_off - AUTOTOKENS_SPLICE_MIN)));
+
+            copy(src->begin() + src_off, src->begin() + src_off + n,
+                 m.begin() + dst_off);
+
+            DEBUGF(stderr, "SPLICE-MUT: %u at %u\n", n, dst_off);
+            break;
+
+          }
+
+        }
+
+        if (likely(!alternative_tokenize)) {
+
+          // do we need a whitespace/token at the beginning?
+          if (dst_off && id_to_token[m[dst_off - 1]].size() > 1 &&
+              id_to_token[m[dst_off]].size() > 1) {
+
+            m.insert(m.begin() + dst_off, good_whitespace_or_singleval());
+            ++m_size;
+
+          }
+
+          // do we need a whitespace/token at the end?
+          if (dst_off + n < m_size &&
+              id_to_token[m[dst_off + n - 1]].size() > 1 &&
+              id_to_token[m[dst_off + n]].size() > 1) {
+
+            m.insert(m.begin() + dst_off + n, good_whitespace_or_singleval());
+            ++m_size;
+
+          }
+
+        }
+
+        break;
+
+      }
+
+#endif
+
+      /* ERASE - only if large enough */
+      default: {
+
+        if (m_size > 8) {
+
+          do {
+
+            pos = rand_below(afl_ptr, m_size);
+
+          } while (unlikely(m[pos] < whitespace_ids));
+
+          // if what we delete will result in a missing whitespace/token,
+          // instead of deleting we switch the item to a whitespace or token.
+          if (likely(!alternative_tokenize) && pos && pos + 1 < m_size &&
+              id_to_token[m[pos - 1]].size() > 1 &&
+              id_to_token[m[pos + 1]].size() > 1) {
+
+            m[pos] = good_whitespace_or_singleval();
+
+          } else {
+
+            m.erase(m.begin() + pos);
+            --m_size;
+
+          }
+
+        } else {
+
+          // if the data is already too small do not try to make it smaller
+          // again this run.
+
+          max_rand -= 4;
+
+        }
+
+        break;
+
+      }
+
+    }
+
+  }
+
+  u32 m_size_1 = m_size - 1;
+  output = "";
+
+  for (i = 0; i < m_size; ++i) {
+
+    output += id_to_token[m[i]];
+    if (unlikely(alternative_tokenize && i < m_size_1)) {
+
+      output += whitespace;
+
+    }
+
+  }
+
+  u32 mutated_size = (u32)output.size();
+  u8 *mutated_out = (u8 *)output.data();
+
+  if (unlikely(mutated_size > max_size)) { mutated_size = max_size; }
+
+  IFDEBUG {
+
+    DEBUGF(stderr, "MUTATED to %u bytes:\n", mutated_size);
+    fwrite(output.data(), 1, mutated_size, stderr);
+    DEBUGF(stderr, "\n---\n");
+
+  }
+
+  *out_buf = mutated_out;
+  ++fuzz_count;
+  return mutated_size;
+
+}
+
+/* I get f*cking stack overflow using C++ regex with a regex of
+   "\"[[:print:]]*?\"" if this matches a long string even with regex::optimize
+   enabled :-( */
+u8 my_search_string(string::const_iterator cur, string::const_iterator ende,
+                    string::const_iterator *match_begin,
+                    string::const_iterator *match_end) {
+
+  string::const_iterator start = cur, found_begin;
+  u8                     quote_type = 0;
+
+  while (cur < ende) {
+
+    switch (*cur) {
+
+      case '"': {
+
+        if (cur == start || *(cur - 1) != '\\') {
+
+          if (!quote_type) {
+
+            found_begin = cur;
+            quote_type = 1;
+
+          } else if (quote_type == 1) {
+
+            *match_begin = found_begin;
+            *match_end = cur + 1;
+            return 1;
+
+          }
+
+        }
+
+        break;
+
+      }
+
+      case '\'': {
+
+        if (cur == start || *(cur - 1) != '\\') {
+
+          if (!quote_type) {
+
+            found_begin = cur;
+            quote_type = 2;
+
+          } else if (quote_type == 2) {
+
+            *match_begin = found_begin;
+            *match_end = cur + 1;
+            return 1;
+
+          }
+
+        }
+
+        break;
+
+      }
+
+      case '\n':
+      case '\r':
+      case 0: {
+
+        quote_type = 0;
+        break;
+
+      }
+
+      default:
+        if (unlikely(quote_type && !isprint(*cur))) { quote_type = 0; }
+        break;
+
+    }
+
+    ++cur;
+
+  }
+
+  return 0;
+
+}
+
+/* We are not using afl_custom_queue_new_entry() because not every corpus entry
+   will be necessarily fuzzed. so we use afl_custom_queue_get() instead */
+
+extern "C" unsigned char afl_custom_queue_get(void                *data,
+                                              const unsigned char *filename) {
+
+  (void)(data);
+
+  if (likely(!debug)) {
+
+    if (unlikely(!afl_ptr->custom_only) &&
+        ((afl_ptr->shm.cmplog_mode && !afl_ptr->queue_cur->is_ascii) ||
+         (only_fav && !afl_ptr->queue_cur->favored))) {
+
+      s = NULL;
+      DEBUGF(stderr, "cmplog not ascii or only_fav and not favorite\n");
+      return 0;
+
+    }
+
+  }
+
+  // check if there are new dictionary entries and add them to the tokens
+  if (valid_structures) {
+
+    while (extras_cnt < afl_ptr->extras_cnt) {
+
+      u32 ok = 1, l = afl_ptr->extras[extras_cnt].len;
+      u8 *ptr = afl_ptr->extras[extras_cnt].data;
+
+      for (u32 i = 0; i < l; ++i) {
+
+        if (!isascii((int)ptr[i]) && !isprint((int)ptr[i])) {
+
+          ok = 0;
+          break;
+
+        }
+
+      }
+
+      if (ok) {
+
+        token_to_id[(char *)ptr] = current_id;
+        id_to_token[current_id] = (char *)ptr;
+        ++current_id;
+
+      }
+
+      ++extras_cnt;
+      DEBUGF(stderr, "Added from dictionary: \"%s\"\n", ptr);
+
+    }
+
+    while (a_extras_cnt < afl_ptr->a_extras_cnt) {
+
+      u32 ok = 1, l = afl_ptr->a_extras[a_extras_cnt].len;
+      u8 *ptr = afl_ptr->a_extras[a_extras_cnt].data;
+
+      for (u32 i = 0; i < l; ++i) {
+
+        if (!isascii((int)ptr[i]) && !isprint((int)ptr[i])) {
+
+          ok = 0;
+          break;
+
+        }
+
+      }
+
+      if (ok) {
+
+        token_to_id[(char *)ptr] = current_id;
+        id_to_token[current_id] = (char *)ptr;
+        ++current_id;
+
+      }
+
+      ++a_extras_cnt;
+      DEBUGF(stderr, "Added from auto dictionary: \"%s\"\n", ptr);
+
+    }
+
+  }
+
+  vector<u32> *structure = NULL;
+  string       fn = (char *)filename;
+  auto         entry = file_mapping.find(fn);
+
+  if (entry == file_mapping.end()) {
+
+    // this input file was not analyzed for tokens yet, so let's do it!
+
+    FILE *fp = fopen((char *)filename, "rb");
+    if (!fp) {
+
+      s = NULL;
+      return 0;
+
+    }  // should not happen
+
+    fseek(fp, 0, SEEK_END);
+    size_t len = (size_t)ftell(fp);
+
+    if (len < AFL_TXT_MIN_LEN) {
+
+      fclose(fp);
+      file_mapping[fn] = structure;  // NULL ptr so we don't read the file again
+      s = NULL;
+      DEBUGF(stderr, "Too short (%lu) %s\n", len, filename);
+      return 0;
+
+    }
+
+    string input;
+    input.resize(len);
+    rewind(fp);
+
+    if (fread((void *)input.data(), 1, len, fp) != len) {
+
+      s = NULL;
+      DEBUGF(stderr, "Too short read %s\n", len, filename);
+      return 0;
+
+    }
+
+    fclose(fp);
+
+    if (!afl_ptr->shm.cmplog_mode) {
+
+      // not running with CMPLOG? bad choice, but whatever ...
+      // we only want text inputs, so we have to check it ourselves.
+
+      u32 valid_chars = 0;
+      for (u32 i = 0; i < len; ++i) {
+
+        if (isascii((int)input[i]) || isprint((int)input[i])) { ++valid_chars; }
+
+      }
+
+      // we want at least 95% of text characters ...
+      if (((len * AFL_TXT_MIN_PERCENT) / 100) > valid_chars) {
+
+        file_mapping[fn] = NULL;
+        s = NULL;
+        DEBUGF(stderr, "Not text (%lu) %s\n", len, filename);
+        return 0;
+
+      }
+
+    }
+
+    // DEBUGF(stderr, "Read %lu bytes for %s\nBefore comment trim:\n%s\n",
+    // input.size(), filename, input.c_str());
+
+    if (regex_comment_custom) {
+
+      input = regex_replace(input, *regex_comment_custom, "$2");
+
+    } else {
+
+      input = regex_replace(input, regex_comment_star, "");
+
+    }
+
+    DEBUGF(stderr, "After replace %lu bytes for %s\n%s\n", input.size(),
+           filename, input.c_str());
+
+    u32  spaces = count(input.begin(), input.end(), ' ');
+    u32  tabs = count(input.begin(), input.end(), '\t');
+    u32  linefeeds = count(input.begin(), input.end(), '\n');
+    bool ends_with_linefeed = input[input.length() - 1] == '\n';
+    DEBUGF(stderr, "spaces=%u tabs=%u linefeeds=%u ends=%u\n", spaces, tabs,
+           linefeeds, ends_with_linefeed);
+    all_spaces += spaces;
+    all_tabs += tabs;
+    all_lf += linefeeds;
+    all_ws = all_spaces + all_tabs + all_lf;
+
+    // now extract all tokens
+    vector<string>         tokens;
+    string::const_iterator cur = input.begin(), ende = input.end(), found, prev,
+                           match_begin, match_end;
+
+    DEBUGF(stderr, "START!\n");
+
+    if (likely(!alternative_tokenize)) {
+
+      while (my_search_string(cur, ende, &match_begin, &match_end)) {
+
+        prev = cur;
+        found = match_begin;
+        cur = match_end;
+
+        IFDEBUG {
+
+          string foo(match_begin, match_end);
+          DEBUGF(stderr,
+                 "string %s found at start %lu offset %lu continue at %lu\n",
+                 foo.c_str(), prev - input.begin(), found - prev,
+                 cur - input.begin());
+
+        }
+
+        if (prev < found) {  // there are items between search start and find
+          while (prev < found) {
+
+            if (isspace(*prev)) {
+
+              auto start = prev;
+              while (isspace(*prev)) {
+
+                ++prev;
+
+              }
+
+              tokens.push_back(std::string(start, prev));
+              DEBUGF(stderr, "WHITESPACE %ld \"%s\"\n", prev - start,
+                     tokens[tokens.size() - 1].c_str());
+
+            } else if (isalnum(*prev) || *prev == '$' || *prev == '_') {
+
+              auto start = prev;
+              while (isalnum(*prev) || *prev == '$' || *prev == '_' ||
+                     *prev == '.' || *prev == '/') {
+
+                ++prev;
+
+              }
+
+              tokens.push_back(string(start, prev));
+              DEBUGF(stderr, "IDENTIFIER %ld \"%s\"\n", prev - start,
+                     tokens[tokens.size() - 1].c_str());
+
+            } else {
+
+              tokens.push_back(string(prev, prev + 1));
+              DEBUGF(stderr, "OTHER \"%c\"\n", *prev);
+              ++prev;
+
+            }
+
+          }
+
+        }
+
+        tokens.push_back(string(match_begin, match_end));
+        DEBUGF(stderr, "TOK: %s\n", tokens[tokens.size() - 1].c_str());
+
+      }
+
+      DEBUGF(stderr, "AFTER all strings\n");
+
+      if (cur < ende) {
+
+        while (cur < ende) {
+
+          if (isspace(*cur)) {
+
+            auto start = cur;
+            while (isspace(*cur)) {
+
+              ++cur;
+
+            }
+
+            tokens.push_back(std::string(start, cur));
+            DEBUGF(stderr, "WHITESPACE %ld \"%s\"\n", cur - start,
+                   tokens[tokens.size() - 1].c_str());
+
+          } else if (isalnum(*cur) || *cur == '$' || *cur == '_') {
+
+            auto start = cur;
+            while (isalnum(*cur) || *cur == '$' || *cur == '_' || *cur == '.' ||
+                   *cur == '/') {
+
+              ++cur;
+
+            }
+
+            tokens.push_back(std::string(start, cur));
+            DEBUGF(stderr, "IDENTIFIER %ld \"%s\"\n", cur - start,
+                   tokens[tokens.size() - 1].c_str());
+
+          } else {
+
+            tokens.push_back(std::string(cur, cur + 1));
+            DEBUGF(stderr, "OTHER \"%c\"\n", *cur);
+            ++cur;
+
+          }
+
+        }
+
+      }
+
+    } else {
+
+      // alternative tokenize
+      while (my_search_string(cur, ende, &match_begin, &match_end)) {
+
+        prev = cur;
+        found = match_begin;
+        cur = match_end;
+        IFDEBUG {
+
+          string foo(match_begin, match_end);
+          DEBUGF(stderr,
+                 "string %s found at start %lu offset %lu continue at %lu\n",
+                 foo.c_str(), prev - input.begin(), found - prev,
+                 cur - input.begin());
+
+        }
+
+        if (prev < found) {  // there are items between search start and find
+
+          sregex_token_iterator it{prev, found, regex_whitespace, -1};
+          vector<std::string>   tokenized{it, {}};
+          tokenized.erase(std::remove_if(tokenized.begin(), tokenized.end(),
+                                         [](std::string const &s) {
+
+                                           return s.size() == 0;
+
+                                         }),
+
+                          tokenized.end());
+          tokens.reserve(tokens.size() + tokenized.size() * 2 + 1);
+
+          IFDEBUG {
+
+            DEBUGF(stderr, "tokens1: %lu   input size: %lu\n", tokenized.size(),
+                   input.size());
+            for (auto x : tokenized) {
+
+              cerr << x << endl;
+
+            }
+
+          }
+
+          for (auto token : tokenized) {
+
+            string::const_iterator c = token.begin(), e = token.end(), f, p;
+            smatch                 m;
+
+            while (regex_search(c, e, m, regex_word)) {
+
+              p = c;
+              f = m[0].first;
+              c = m[0].second;
+              if (p < f) {
+
+                // there are items between search start and find
+                while (p < f) {
+
+                  IFDEBUG {
+
+                    string foo(p, p + 1);
+                    DEBUGF(stderr, "before string: \"%s\"\n", foo.c_str());
+
+                  }
+
+                  tokens.push_back(std::string(p, p + 1));
+                  ++p;
+
+                }
+
+                IFDEBUG {
+
+                  string foo(p, f);
+                  DEBUGF(stderr, "before string: \"%s\"\n", foo.c_str());
+                  tokens.push_back(std::string(p, f));
+
+                }
+
+              }
+
+              DEBUGF(stderr,
+                     "SUBstring \"%s\" found at start %lu offset %lu continue "
+                     "at %lu\n",
+                     m[0].str().c_str(), p - input.begin(), m.position(),
+                     c - token.begin());
+              tokens.push_back(m[0].str());
+
+            }
+
+            if (c < e) {
+
+              while (c < e) {
+
+                IFDEBUG {
+
+                  string foo(c, c + 1);
+                  DEBUGF(stderr, "after string: \"%s\"\n", foo.c_str());
+
+                }
+
+                tokens.push_back(std::string(c, c + 1));
+                ++c;
+
+              }
+
+              IFDEBUG {
+
+                string foo(c, e);
+                DEBUGF(stderr, "after string: \"%s\"\n", foo.c_str());
+
+              }
+
+              tokens.push_back(std::string(c, e));
+
+            }
+
+          }
+
+        }
+
+        tokens.push_back(string(match_begin, match_end));
+
+      }
+
+      if (cur < ende) {
+
+        sregex_token_iterator it{cur, ende, regex_whitespace, -1};
+        vector<std::string>   tokenized{it, {}};
+        tokenized.erase(
+            std::remove_if(tokenized.begin(), tokenized.end(),
+                           [](std::string const &s) { return s.size() == 0; }),
+            tokenized.end());
+        tokens.reserve(tokens.size() + tokenized.size() * 2 + 1);
+
+        IFDEBUG {
+
+          DEBUGF(stderr, "tokens2: %lu   input size: %lu\n", tokenized.size(),
+                 input.size());
+          for (auto x : tokenized) {
+
+            cerr << x << endl;
+
+          }
+
+        }
+
+        for (auto token : tokenized) {
+
+          string::const_iterator c = token.begin(), e = token.end(), f, p;
+          smatch                 m;
+
+          while (regex_search(c, e, m, regex_word)) {
+
+            p = c;
+            f = m[0].first;
+            c = m[0].second;
+            if (p < f) {
+
+              // there are items between search start and find
+              while (p < f) {
+
+                IFDEBUG {
+
+                  string foo(p, p + 1);
+                  DEBUGF(stderr, "before string: \"%s\"\n", foo.c_str());
+
+                }
+
+                tokens.push_back(std::string(p, p + 1));
+                ++p;
+
+              }
+
+              IFDEBUG {
+
+                string foo(p, f);
+                DEBUGF(stderr, "before string: \"%s\"\n", foo.c_str());
+
+              }
+
+              tokens.push_back(std::string(p, f));
+
+            }
+
+            DEBUGF(stderr,
+                   "SUB2string \"%s\" found at start %lu offset %lu continue "
+                   "at %lu\n",
+                   m[0].str().c_str(), p - input.begin(), m.position(),
+                   c - token.begin());
+            tokens.push_back(m[0].str());
+
+          }
+
+          if (c < e) {
+
+            while (c < e) {
+
+              IFDEBUG {
+
+                string foo(c, c + 1);
+                DEBUGF(stderr, "after string: \"%s\"\n", foo.c_str());
+
+              }
+
+              tokens.push_back(std::string(c, c + 1));
+              ++c;
+
+            }
+
+            IFDEBUG {
+
+              string foo(c, e);
+              DEBUGF(stderr, "after string: \"%s\"\n", foo.c_str());
+
+            }
+
+            tokens.push_back(std::string(c, e));
+
+          }
+
+        }
+
+      }
+
+    }
+
+    IFDEBUG {
+
+      DEBUGF(stderr, "DUMPING TOKENS:\n");
+      u32 size_1 = tokens.size() - 1;
+      for (u32 i = 0; i < tokens.size(); ++i) {
+
+        DEBUGF(stderr, "%s", tokens[i].c_str());
+        if (unlikely(alternative_tokenize && i < size_1)) {
+
+          DEBUGF(stderr, "%s", whitespace.c_str());
+
+        }
+
+      }
+
+      DEBUGF(stderr, "---------------------------\n");
+
+    }
+
+    if (tokens.size() < AUTOTOKENS_SIZE_MIN) {
+
+      file_mapping[fn] = NULL;
+      s = NULL;
+      DEBUGF(stderr, "too few tokens\n");
+      return 0;
+
+    }
+
+    /* Now we transform the tokens into an ID list and saved that */
+
+    structure = new vector<u32>();
+    u32 id;
+
+    for (u32 i = 0; i < tokens.size(); ++i) {
+
+      if ((id = token_to_id[tokens[i]]) == 0) {
+
+        // First time we see this token, add it to the list
+        token_to_id[tokens[i]] = current_id;
+        id_to_token[current_id] = tokens[i];
+        structure->push_back(current_id);
+        ++current_id;
+
+      } else {
+
+        structure->push_back(id);
+
+      }
+
+    }
+
+    // save the token structure to the file mapping
+    file_mapping[fn] = structure;
+    id_mapping[valid_structures] = structure;
+    ++valid_structures;
+    s = structure;
+    all_structure_items += structure->size();
+
+    // we are done!
+    DEBUGF(stderr, "DONE! We have %lu tokens in the structure\n",
+           structure->size());
+
+  }
+
+  else {
+
+    if (entry->second == NULL) {
+
+      DEBUGF(stderr, "Skipping %s\n", filename);
+      s = NULL;
+      return 0;
+
+    }
+
+    s = entry->second;
+    DEBUGF(stderr, "OK %s\n", filename);
+
+  }
+
+  return 1;  // we always fuzz unless non-ascii or too small
+
+}
+
+extern "C" my_mutator_t *afl_custom_init(afl_state *afl, unsigned int seed) {
+
+  (void)(seed);
+  my_mutator_t *data = (my_mutator_t *)calloc(1, sizeof(my_mutator_t));
+  if (!data) {
+
+    perror("afl_custom_init alloc");
+    return NULL;
+
+  }
+
+  if (getenv("AUTOTOKENS_DEBUG")) { debug = 1; }
+  if (getenv("AUTOTOKENS_ONLY_FAV")) { only_fav = 1; }
+  if (getenv("AUTOTOKENS_ALTERNATIVE_TOKENIZE")) { alternative_tokenize = 1; }
+  if (getenv("AUTOTOKENS_WHITESPACE")) {
+
+    whitespace = getenv("AUTOTOKENS_WHITESPACE");
+
+  }
+
+  if (getenv("AUTOTOKENS_COMMENT")) {
+
+    char buf[256];
+    snprintf(buf, sizeof(buf), "(%s.*)([\r\n]?)", getenv("AUTOTOKENS_COMMENT"));
+    regex_comment_custom = new regex(buf, regex::optimize);
+
+  }
+
+  data->afl = afl_ptr = afl;
+
+  // set common whitespace tokens
+  // we deliberately do not put uncommon ones here to these will count as
+  // identifier tokens.
+  if (!alternative_tokenize) {
+
+    token_to_id[" "] = current_id;
+    id_to_token[current_id] = " ";
+    ++current_id;
+    token_to_id["\t"] = current_id;
+    id_to_token[current_id] = "\t";
+    ++current_id;
+    token_to_id["\n"] = current_id;
+    id_to_token[current_id] = "\n";
+    ++current_id;
+    token_to_id["\r\n"] = current_id;
+    id_to_token[current_id] = "\r\n";
+    ++current_id;
+    token_to_id[" \n"] = current_id;
+    id_to_token[current_id] = " \n";
+    ++current_id;
+    token_to_id["  "] = current_id;
+    id_to_token[current_id] = "  ";
+    ++current_id;
+    token_to_id["\t\t"] = current_id;
+    id_to_token[current_id] = "\t\t";
+    ++current_id;
+    token_to_id["\n\n"] = current_id;
+    id_to_token[current_id] = "\n\n";
+    ++current_id;
+    token_to_id["\r\n\r\n"] = current_id;
+    id_to_token[current_id] = "\r\n\r\n";
+    ++current_id;
+    token_to_id["    "] = current_id;
+    id_to_token[current_id] = "    ";
+    ++current_id;
+    token_to_id["\t\t\t\t"] = current_id;
+    id_to_token[current_id] = "\t\t\t\t";
+    ++current_id;
+    token_to_id["\n\n\n\n"] = current_id;
+    id_to_token[current_id] = "\n\n\n\n";
+    ++current_id;
+    whitespace_ids = current_id;
+    token_to_id["\""] = current_id;
+    id_to_token[current_id] = "\"";
+    ++current_id;
+    token_to_id["'"] = current_id;
+    id_to_token[current_id] = "'";
+    ++current_id;
+
+  }
+
+  return data;
+
+}
+
+extern "C" void afl_custom_splice_optout(my_mutator_t *data) {
+
+  (void)(data);
+
+}
+
+extern "C" void afl_custom_deinit(my_mutator_t *data) {
+
+  /* we use this to print statistics at exit :-)
+     needs to be stderr as stdout is filtered */
+
+  fprintf(stderr,
+          "\n\nAutotoken mutator statistics:\n"
+          "  Number of all seen tokens:  %u\n"
+          "  Number of input structures: %u\n"
+          "  Number of all items in structures: %llu\n"
+          "  Number of total fuzzes: %llu\n\n",
+          current_id - 1, valid_structures, all_structure_items, fuzz_count);
+
+  free(data);
+
+}
+
diff --git a/custom_mutators/examples/custom_send.c b/custom_mutators/examples/custom_send.c
index ffea927e..7de72819 100644
--- a/custom_mutators/examples/custom_send.c
+++ b/custom_mutators/examples/custom_send.c
@@ -1,7 +1,14 @@
+//
+// This is an example on how to use afl_custom_send
+// It writes each mutated data set to /tmp/foo
+// You can modify this to send to IPC, shared memory, etc.
+//
 // cc -O3 -fPIC -shared -g -o custom_send.so -I../../include custom_send.c
 // cd ../..
 // afl-cc -o test-instr test-instr.c
-// afl-fuzz -i in -o out -- ./test-instr -f /tmp/foo
+// AFL_CUSTOM_MUTATOR_LIBRARY=custom_mutators/examples/custom_send.so \
+//   afl-fuzz -i in -o out -- ./test-instr -f /tmp/foo
+//
 
 #include "custom_mutator_helpers.h"
 
diff --git a/custom_mutators/examples/example.c b/custom_mutators/examples/example.c
index 3f299508..e680ec8e 100644
--- a/custom_mutators/examples/example.c
+++ b/custom_mutators/examples/example.c
@@ -6,7 +6,7 @@
              Dominik Maier <mail@dmnk.co>
 */
 
-// You need to use -I /path/to/AFLplusplus/include
+// You need to use -I/path/to/AFLplusplus/include -I.
 #include "custom_mutator_helpers.h"
 
 #include <stdint.h>
@@ -118,6 +118,8 @@ size_t afl_custom_fuzz(my_mutator_t *data, uint8_t *buf, size_t buf_size,
 
   }
 
+  if (max_size > mutated_size) { mutated_size = max_size; }
+
   *out_buf = mutated_out;
   return mutated_size;
 
diff --git a/custom_mutators/examples/post_library_gif.so.c b/custom_mutators/examples/post_library_gif.so.c
index 9cd224f4..3cb018a6 100644
--- a/custom_mutators/examples/post_library_gif.so.c
+++ b/custom_mutators/examples/post_library_gif.so.c
@@ -129,8 +129,8 @@ size_t afl_custom_post_process(post_state_t *data, unsigned char *in_buf,
 
   /* Allocate memory for new buffer, reusing previous allocation if
      possible. Note we have to use afl-fuzz's own realloc!
-     Note that you should only do this if you need to grow the buffer,
-     otherwise work with in_buf, and assign it to *out_buf instead. */
+     We use afl_realloc because it is effective.
+     You can also work within in_buf, and assign it to *out_buf. */
 
   *out_buf = afl_realloc(out_buf, len);
 
diff --git a/docs/custom_mutators.md b/docs/custom_mutators.md
index 4ffeda7a..82131c92 100644
--- a/docs/custom_mutators.md
+++ b/docs/custom_mutators.md
@@ -48,6 +48,7 @@ C/C++:
 ```c
 void *afl_custom_init(afl_state_t *afl, unsigned int seed);
 unsigned int afl_custom_fuzz_count(void *data, const unsigned char *buf, size_t buf_size);
+void afl_custom_splice_optout(void *data);
 size_t afl_custom_fuzz(void *data, unsigned char *buf, size_t buf_size, unsigned char **out_buf, unsigned char *add_buf, size_t add_buf_size, size_t max_size);
 const char *afl_custom_describe(void *data, size_t max_description_len);
 size_t afl_custom_post_process(void *data, unsigned char *buf, size_t buf_size, unsigned char **out_buf);
@@ -72,6 +73,9 @@ def init(seed):
 def fuzz_count(buf):
     return cnt
 
+def splice_optout()
+    pass
+
 def fuzz(buf, add_buf, max_size):
     return mutated_out
 
@@ -132,6 +136,13 @@ def deinit():  # optional for Python
     for a specific queue entry, use this function. This function is most useful
     if `AFL_CUSTOM_MUTATOR_ONLY` is **not** used.
 
+- `splice_optout` (optional):
+
+    If this function is present, no splicing target is passed to the `fuzz`
+    function. This saves time if splicing data is not needed by the custom
+    fuzzing function.
+    This function is never called, just needs to be present to activate.
+
 - `fuzz` (optional):
 
     This method performs custom mutations on a given input. It also accepts an
@@ -139,6 +150,7 @@ def deinit():  # optional for Python
     sense to use it. You would only skip this if `post_process` is used to fix
     checksums etc. so if you are using it, e.g., as a post processing library.
     Note that a length > 0 *must* be returned!
+    The returned output buffer is under **your** memory management!
 
 - `describe` (optional):
 
diff --git a/docs/env_variables.md b/docs/env_variables.md
index 22a5c386..0a57d190 100644
--- a/docs/env_variables.md
+++ b/docs/env_variables.md
@@ -354,6 +354,9 @@ checks or alter some of the more exotic semantics of the tool:
   - Setting `AFL_KEEP_TIMEOUTS` will keep longer running inputs if they reach
     new coverage
 
+  - On the contrary, if you are not interested in any timeouts, you can set
+    `AFL_IGNORE_TIMEOUTS` to get a bit of speed instead.
+
   - `AFL_EXIT_ON_SEED_ISSUES` will restore the vanilla afl-fuzz behavior which
     does not allow crashes or timeout seeds in the initial -i corpus.
 
diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h
index edef9207..229bc025 100644
--- a/include/afl-fuzz.h
+++ b/include/afl-fuzz.h
@@ -344,6 +344,7 @@ enum {
   /* 12 */ PY_FUNC_INTROSPECTION,
   /* 13 */ PY_FUNC_DESCRIBE,
   /* 14 */ PY_FUNC_FUZZ_SEND,
+  /* 15 */ PY_FUNC_SPLICE_OPTOUT,
   PY_FUNC_COUNT
 
 };
@@ -398,7 +399,7 @@ typedef struct afl_env_vars {
       afl_cycle_schedules, afl_expand_havoc, afl_statsd, afl_cmplog_only_new,
       afl_exit_on_seed_issues, afl_try_affinity, afl_ignore_problems,
       afl_keep_timeouts, afl_pizza_mode, afl_no_crash_readme,
-      afl_no_startup_calibration;
+      afl_ignore_timeouts, afl_no_startup_calibration;
 
   u8 *afl_tmpdir, *afl_custom_mutator_library, *afl_python_module, *afl_path,
       *afl_hang_tmout, *afl_forksrv_init_tmout, *afl_preload,
@@ -495,6 +496,7 @@ typedef struct afl_state {
       no_unlink,                        /* do not unlink cur_input          */
       debug,                            /* Debug mode                       */
       custom_only,                      /* Custom mutator only mode         */
+      custom_splice_optout,             /* Custom mutator no splice buffer  */
       is_main_node,                     /* if this is the main node         */
       is_secondary_node,                /* if this is a secondary instance  */
       pizza_is_served;                  /* pizza mode                       */
@@ -829,17 +831,29 @@ struct custom_mutator {
   u32 (*afl_custom_fuzz_count)(void *data, const u8 *buf, size_t buf_size);
 
   /**
-   * Perform custom mutations on a given input
+   * Opt-out of a splicing input for the fuzz mutator
    *
-   * (Optional for now. Required in the future)
+   * Empty dummy function. It's presence tells afl-fuzz not to pass a
+   * splice data pointer and len.
    *
    * @param data pointer returned in afl_custom_init by this custom mutator
+   * @noreturn
+   */
+  void (*afl_custom_splice_optout)(void *data);
+
+  /**
+   * Perform custom mutations on a given input
+   *
+   * (Optional)
+   *
+   * Getting an add_buf can be skipped by using afl_custom_splice_optout().
+   *
+   * @param[in] data Pointer returned in afl_custom_init by this custom mutator
    * @param[in] buf Pointer to the input data to be mutated and the mutated
    *     output
    * @param[in] buf_size Size of the input/output data
-   * @param[out] out_buf the new buffer. We may reuse *buf if large enough.
-   *             *out_buf = NULL is treated as FATAL.
-   * @param[in] add_buf Buffer containing the additional test case
+   * @param[out] out_buf The new buffer, under your memory mgmt.
+   * @param[in] add_buf Buffer containing an additional test case (splicing)
    * @param[in] add_buf_size Size of the additional test case
    * @param[in] max_size Maximum size of the mutated output. The mutation must
    * not produce data larger than max_size.
@@ -1057,6 +1071,7 @@ u8          havoc_mutation_probability_py(void *);
 u8          queue_get_py(void *, const u8 *);
 const char *introspection_py(void *);
 u8          queue_new_entry_py(void *, const u8 *, const u8 *);
+void        splice_optout(void *);
 void        deinit_py(void *);
 
 #endif
diff --git a/include/config.h b/include/config.h
index a5a4c473..f8a742f2 100644
--- a/include/config.h
+++ b/include/config.h
@@ -364,9 +364,9 @@
  *                                                         *
  ***********************************************************/
 
-/* Call count interval between reseeding the libc PRNG from /dev/urandom: */
+/* Call count interval between reseeding the PRNG from /dev/urandom: */
 
-#define RESEED_RNG 100000
+#define RESEED_RNG 2500000
 
 /* The default maximum testcase cache size in MB, 0 = disable.
    A value between 50 and 250 is a good default value. Note that the
@@ -494,7 +494,7 @@
 /* What is the minimum percentage of ascii characters present to be classifed
    as "is_ascii"? */
 
-#define AFL_TXT_MIN_PERCENT 94
+#define AFL_TXT_MIN_PERCENT 95
 
 /* How often to perform ASCII mutations 0 = disable, 1-8 are good values */
 
diff --git a/include/envs.h b/include/envs.h
index f4cdf390..0770f94d 100644
--- a/include/envs.h
+++ b/include/envs.h
@@ -103,6 +103,7 @@ static char *afl_environment_variables[] = {
     "AFL_HARDEN",
     "AFL_I_DONT_CARE_ABOUT_MISSING_CRASHES",
     "AFL_IGNORE_PROBLEMS",
+    "AFL_IGNORE_TIMEOUTS",
     "AFL_IGNORE_UNKNOWN_ENVS",
     "AFL_IMPORT_FIRST",
     "AFL_INPUT_LEN_MIN",
diff --git a/qemu_mode/qemuafl b/qemu_mode/qemuafl
-Subproject a8af9cbde71e333ce72a46f15e655d0b82ed093
+Subproject a120c3feb573d4cade292cdeb7c1f6b1ce109ef
diff --git a/src/afl-fuzz-bitmap.c b/src/afl-fuzz-bitmap.c
index 485b82db..b4e9537e 100644
--- a/src/afl-fuzz-bitmap.c
+++ b/src/afl-fuzz-bitmap.c
@@ -457,6 +457,12 @@ save_if_interesting(afl_state_t *afl, void *mem, u32 len, u8 fault) {
 
   if (unlikely(len == 0)) { return 0; }
 
+  if (unlikely(fault == FSRV_RUN_TMOUT && afl->afl_env.afl_ignore_timeouts)) {
+
+    return 0;
+
+  }
+
   u8  fn[PATH_MAX];
   u8 *queue_fn = "";
   u8  new_bits = 0, keeping = 0, res, classified = 0, is_timeout = 0;
diff --git a/src/afl-fuzz-mutators.c b/src/afl-fuzz-mutators.c
index 22e5262e..ce43064a 100644
--- a/src/afl-fuzz-mutators.c
+++ b/src/afl-fuzz-mutators.c
@@ -358,6 +358,19 @@ struct custom_mutator *load_custom_mutator(afl_state_t *afl, const char *fn) {
 
   }
 
+  /* "afl_custom_splice_optout", optional, never called */
+  mutator->afl_custom_splice_optout = dlsym(dh, "afl_custom_splice_optout");
+  if (!mutator->afl_custom_splice_optout) {
+
+    ACTF("optional symbol 'afl_custom_splice_optout' not found.");
+
+  } else {
+
+    OKF("Found 'afl_custom_splice_optout'.");
+    afl->custom_splice_optout = 1;
+
+  }
+
   /* "afl_custom_fuzz_send", optional */
   mutator->afl_custom_fuzz_send = dlsym(dh, "afl_custom_fuzz_send");
   if (!mutator->afl_custom_fuzz_send) {
diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c
index 97855607..b25398c4 100644
--- a/src/afl-fuzz-one.c
+++ b/src/afl-fuzz-one.c
@@ -446,9 +446,12 @@ u8 fuzz_one_original(afl_state_t *afl) {
 
     ACTF(
         "Fuzzing test case #%u (%u total, %llu crashes saved, "
-        "perf_score=%0.0f, exec_us=%llu, hits=%u, map=%u, ascii=%u)...",
+        "perf_score=%0.0f, weight=%0.0f, favorite=%u, was_fuzzed=%u, "
+        "exec_us=%llu, hits=%u, map=%u, ascii=%u)...",
         afl->current_entry, afl->queued_items, afl->saved_crashes,
-        afl->queue_cur->perf_score, afl->queue_cur->exec_us,
+        afl->queue_cur->perf_score, afl->queue_cur->weight,
+        afl->queue_cur->favored, afl->queue_cur->was_fuzzed,
+        afl->queue_cur->exec_us,
         likely(afl->n_fuzz) ? afl->n_fuzz[afl->queue_cur->n_fuzz_entry] : 0,
         afl->queue_cur->bitmap_size, afl->queue_cur->is_ascii);
     fflush(stdout);
@@ -561,11 +564,11 @@ u8 fuzz_one_original(afl_state_t *afl) {
 
     } else {
 
-      if (afl->cmplog_lvl == 3 ||
-          (afl->cmplog_lvl == 2 && afl->queue_cur->tc_ref) ||
-          afl->queue_cur->favored ||
-          !(afl->fsrv.total_execs % afl->queued_items) ||
-          get_cur_time() - afl->last_find_time > 300000) {  // 300 seconds
+      if (afl->queue_cur->favored || afl->cmplog_lvl == 3 ||
+          (afl->cmplog_lvl == 2 &&
+           (afl->queue_cur->tc_ref ||
+            afl->fsrv.total_execs % afl->queued_items <= 10)) ||
+          get_cur_time() - afl->last_find_time > 250000) {  // 250 seconds
 
         if (input_to_state_stage(afl, in_buf, out_buf, len)) {
 
@@ -584,7 +587,7 @@ u8 fuzz_one_original(afl_state_t *afl) {
      if it has gone through deterministic testing in earlier, resumed runs
      (passed_det). */
 
-  if (likely(afl->queue_cur->passed_det) || likely(afl->skip_deterministic) ||
+  if (likely(afl->skip_deterministic) || likely(afl->queue_cur->passed_det) ||
       likely(perf_score <
              (afl->queue_cur->depth * 30 <= afl->havoc_max_mult * 100
                   ? afl->queue_cur->depth * 30
@@ -1908,9 +1911,10 @@ custom_mutator_stage:
 
   afl->stage_name = "custom mutator";
   afl->stage_short = "custom";
-  afl->stage_max = HAVOC_CYCLES * perf_score / afl->havoc_div / 100;
   afl->stage_val_type = STAGE_VAL_NONE;
   bool has_custom_fuzz = false;
+  u32  shift = unlikely(afl->custom_only) ? 7 : 8;
+  afl->stage_max = (HAVOC_CYCLES * perf_score / afl->havoc_div) >> shift;
 
   if (afl->stage_max < HAVOC_MIN) { afl->stage_max = HAVOC_MIN; }
 
@@ -1953,7 +1957,8 @@ custom_mutator_stage:
           u32                 target_len = 0;
 
           /* check if splicing makes sense yet (enough entries) */
-          if (likely(afl->ready_for_splicing_count > 1)) {
+          if (likely(!afl->custom_splice_optout &&
+                     afl->ready_for_splicing_count > 1)) {
 
             /* Pick a random other queue entry for passing to external API
                that has the necessary length */
@@ -2063,8 +2068,9 @@ havoc_stage:
 
     afl->stage_name = "havoc";
     afl->stage_short = "havoc";
-    afl->stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) *
-                     perf_score / afl->havoc_div / 100;
+    afl->stage_max = ((doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) *
+                      perf_score / afl->havoc_div) >>
+                     7;
 
   } else {
 
@@ -2073,7 +2079,7 @@ havoc_stage:
     snprintf(afl->stage_name_buf, STAGE_BUF_SIZE, "splice %u", splice_cycle);
     afl->stage_name = afl->stage_name_buf;
     afl->stage_short = "splice";
-    afl->stage_max = SPLICE_HAVOC * perf_score / afl->havoc_div / 100;
+    afl->stage_max = (SPLICE_HAVOC * perf_score / afl->havoc_div) >> 7;
 
   }
 
@@ -4621,8 +4627,9 @@ pacemaker_fuzzing:
 
     afl->stage_name = MOpt_globals.havoc_stagename;
     afl->stage_short = MOpt_globals.havoc_stagenameshort;
-    afl->stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) *
-                     perf_score / afl->havoc_div / 100;
+    afl->stage_max = ((doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) *
+                      perf_score / afl->havoc_div) >>
+                     7;
 
   } else {
 
@@ -4632,7 +4639,7 @@ pacemaker_fuzzing:
              MOpt_globals.splice_stageformat, splice_cycle);
     afl->stage_name = afl->stage_name_buf;
     afl->stage_short = MOpt_globals.splice_stagenameshort;
-    afl->stage_max = SPLICE_HAVOC * perf_score / afl->havoc_div / 100;
+    afl->stage_max = (SPLICE_HAVOC * perf_score / afl->havoc_div) >> 7;
 
   }
 
@@ -5792,10 +5799,8 @@ void pso_updating(afl_state_t *afl) {
 
 }
 
-/* larger change for MOpt implementation: the original fuzz_one was renamed
-   to fuzz_one_original. All documentation references to fuzz_one therefore
-   mean fuzz_one_original */
-
+/* The entry point for the mutator, choosing the default mutator, and/or MOpt
+   depending on the configuration. */
 u8 fuzz_one(afl_state_t *afl) {
 
   int key_val_lv_1 = 0, key_val_lv_2 = 0;
@@ -5818,7 +5823,12 @@ u8 fuzz_one(afl_state_t *afl) {
 
 #endif
 
-  // if limit_time_sig == -1 then both are run after each other
+  /*
+     -L command line paramter => limit_time_sig value
+       limit_time_sig == 0 then run the default mutator
+       limit_time_sig  > 0 then run MOpt
+       limit_time_sig  < 0 both are run
+  */
 
   if (afl->limit_time_sig <= 0) { key_val_lv_1 = fuzz_one_original(afl); }
 
diff --git a/src/afl-fuzz-python.c b/src/afl-fuzz-python.c
index b509b936..69c305f7 100644
--- a/src/afl-fuzz-python.c
+++ b/src/afl-fuzz-python.c
@@ -248,6 +248,8 @@ static py_mutator_t *init_py_module(afl_state_t *afl, u8 *module_name) {
         PyObject_GetAttrString(py_module, "queue_get");
     py_functions[PY_FUNC_FUZZ_SEND] =
         PyObject_GetAttrString(py_module, "fuzz_send");
+    py_functions[PY_FUNC_SPLICE_OPTOUT] =
+        PyObject_GetAttrString(py_module, "splice_optout");
     py_functions[PY_FUNC_QUEUE_NEW_ENTRY] =
         PyObject_GetAttrString(py_module, "queue_new_entry");
     py_functions[PY_FUNC_INTROSPECTION] =
@@ -394,6 +396,13 @@ void deinit_py(void *py_mutator) {
 
 }
 
+void splice_optout_py(void *py_mutator) {
+
+  // this is never called
+  (void)(py_mutator);
+
+}
+
 struct custom_mutator *load_custom_mutator_py(afl_state_t *afl,
                                               char        *module_name) {
 
@@ -474,6 +483,13 @@ struct custom_mutator *load_custom_mutator_py(afl_state_t *afl,
 
   }
 
+  if (py_functions[PY_FUNC_SPLICE_OPTOUT]) {
+
+    mutator->afl_custom_splice_optout = splice_optout_py;
+    afl->custom_splice_optout = 1;
+
+  }
+
   if (py_functions[PY_FUNC_QUEUE_NEW_ENTRY]) {
 
     mutator->afl_custom_queue_new_entry = queue_new_entry_py;
diff --git a/src/afl-fuzz-state.c b/src/afl-fuzz-state.c
index 896b5f71..104b1e4b 100644
--- a/src/afl-fuzz-state.c
+++ b/src/afl-fuzz-state.c
@@ -292,6 +292,13 @@ void read_afl_environment(afl_state_t *afl, char **envp) {
             afl->afl_env.afl_ignore_problems =
                 get_afl_env(afl_environment_variables[i]) ? 1 : 0;
 
+          } else if (!strncmp(env, "AFL_IGNORE_TIMEOUTS",
+
+                              afl_environment_variable_len)) {
+
+            afl->afl_env.afl_ignore_timeouts =
+                get_afl_env(afl_environment_variables[i]) ? 1 : 0;
+
           } else if (!strncmp(env, "AFL_I_DONT_CARE_ABOUT_MISSING_CRASHES",
 
                               afl_environment_variable_len)) {
diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c
index 20c655cf..4de2baf6 100644
--- a/src/afl-fuzz.c
+++ b/src/afl-fuzz.c
@@ -258,8 +258,9 @@ static void usage(u8 *argv0, int more_help) {
       "AFL_FORKSRV_INIT_TMOUT: time spent waiting for forkserver during startup (in ms)\n"
       "AFL_HANG_TMOUT: override timeout value (in milliseconds)\n"
       "AFL_I_DONT_CARE_ABOUT_MISSING_CRASHES: don't warn about core dump handlers\n"
-      "AFL_IGNORE_UNKNOWN_ENVS: don't warn on unknown env vars\n"
       "AFL_IGNORE_PROBLEMS: do not abort fuzzing if an incorrect setup is detected\n"
+      "AFL_IGNORE_TIMEOUTS: do not process or save any timeouts\n"
+      "AFL_IGNORE_UNKNOWN_ENVS: don't warn on unknown env vars\n"
       "AFL_IMPORT_FIRST: sync and import test cases from other fuzzer instances first\n"
       "AFL_INPUT_LEN_MIN/AFL_INPUT_LEN_MAX: like -g/-G set min/max fuzz length produced\n"
       "AFL_PIZZA_MODE: 1 - enforce pizza mode, 0 - disable for April 1st\n"
@@ -1580,6 +1581,29 @@ int main(int argc, char **argv_orig, char **envp) {
 
   }
 
+  if (afl->limit_time_sig > 0 && afl->custom_mutators_count) {
+
+    if (afl->custom_only) {
+
+      FATAL("Custom mutators are incompatible with MOpt (-L)");
+
+    }
+
+    u32 custom_fuzz = 0;
+    LIST_FOREACH(&afl->custom_mutator_list, struct custom_mutator, {
+
+      if (el->afl_custom_fuzz) { custom_fuzz = 1; }
+
+    });
+
+    if (custom_fuzz) {
+
+      WARNF("afl_custom_fuzz is incompatible with MOpt (-L)");
+
+    }
+
+  }
+
   if (afl->afl_env.afl_max_det_extras) {
 
     s32 max_det_extras = atoi(afl->afl_env.afl_max_det_extras);