about summary refs log tree commit diff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/afl-cc.c177
-rw-r--r--src/afl-forkserver.c26
-rw-r--r--src/afl-fuzz-bitmap.c156
-rw-r--r--src/afl-fuzz-init.c221
-rw-r--r--src/afl-fuzz-mutators.c10
-rw-r--r--src/afl-fuzz-one.c369
-rw-r--r--src/afl-fuzz-python.c3
-rw-r--r--src/afl-fuzz-queue.c238
-rw-r--r--src/afl-fuzz-run.c164
-rw-r--r--src/afl-fuzz-stats.c216
-rw-r--r--src/afl-fuzz.c233
-rw-r--r--src/afl-sharedmem.c89
-rw-r--r--src/afl-showmap.c181
-rw-r--r--src/aflrun-cc.c30
-rw-r--r--src/aflrun.cpp4039
15 files changed, 5941 insertions, 211 deletions
diff --git a/src/afl-cc.c b/src/afl-cc.c
index 803e784e..a4776730 100644
--- a/src/afl-cc.c
+++ b/src/afl-cc.c
@@ -31,6 +31,8 @@
 #include <strings.h>
 #include <limits.h>
 #include <assert.h>
+#include <sys/stat.h>
+#include <sys/types.h>
 
 #if (LLVM_MAJOR - 0 == 0)
   #undef LLVM_MAJOR
@@ -48,6 +50,8 @@
 static u8  *obj_path;                  /* Path to runtime libraries         */
 static u8 **cc_params;                 /* Parameters passed to the real CC  */
 static u32  cc_par_cnt = 1;            /* Param count, including argv0      */
+static u8*  pwd;
+static u8*  lib_fuzz;
 static u8   clang_mode;                /* Invoked as afl-clang*?            */
 static u8   llvm_fullpath[PATH_MAX];
 static u8   instrument_mode, instrument_opt_mode, ngram_size, ctx_k, lto_mode;
@@ -375,13 +379,96 @@ void parse_fsanitize(char *string) {
 
 }
 
+static const char b64_tab[64] = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_";
+static u8* create_temp_dir(const char* target_name) {
+
+  // Generate random directory name
+  FILE* fd = fopen("/dev/urandom", "rb");
+  if (fd == NULL)
+    FATAL("Cannot open urandom");
+  char dir_name[13];
+  u8 tmp;
+  for (size_t i = 0; i < sizeof(dir_name) - 1; ++i) {
+    if (fread(&tmp, 1, 1, fd) != 1)
+      FATAL("fread() failed");
+    dir_name[i] = b64_tab[tmp % sizeof(b64_tab)];
+  }
+  dir_name[sizeof(dir_name) - 1] = 0;
+  fclose(fd);
+
+  // Create directories and files as init of dir
+  const char* tmp_dir = getenv("AFLRUN_TMP");
+  if (tmp_dir && tmp_dir[0] != '/')
+    FATAL("Please use absolute path for AFLRUN_TMP");
+  u8* ret = alloc_printf("%s/%s.%s",
+    tmp_dir ? tmp_dir : "/tmp", target_name, dir_name);
+  if (mkdir(ret, 0700) < 0) FATAL("mkdir() failed");
+  return ret;
+}
+
+static void parse_out(const char* out, u8** dir, u8** name) {
+  if (out == NULL)
+    FATAL("No out file path");
+
+  char* cp = strdup(out);
+
+  u8* pos = strrchr(cp, '/');
+  if (pos == NULL) {
+    *name = cp;
+    *dir = pwd;
+  }
+  else {
+    *pos = 0;
+    *name = pos + 1;
+    if (out[0] == '/')
+      *dir = alloc_printf("/%s", cp);
+    else
+      *dir = alloc_printf("%s/%s", pwd, cp);
+  }
+}
+
+static u8 is_fuzzer(u8* target_name) {
+
+  size_t len = strlen(target_name);
+  // strlen("_fuzzer") == 7 && strlen("fuzz_") == 5
+  return
+    (len >= 7 && memcmp(target_name + len - 7, "_fuzzer", 7) == 0) ||
+    (len >= 5 && memcmp(target_name, "fuzz_", 5) == 0);
+}
+
+static u8 is_target(u8* target_name, u8* targets) {
+
+  // "::" represent we want to treat everything as target
+  if (strcmp(targets, "::") == 0)
+    return 1;
+
+  u8* iter = ck_strdup(targets);
+
+  while (1) {
+
+    u8* p = strchr(iter, ':');
+    if (p == NULL)
+      break;
+
+    *p = 0;
+    if (strcmp(target_name, iter) == 0)
+      return 1;
+
+    iter = p + 1;
+
+  }
+
+  return strcmp(target_name, iter) == 0;
+}
+
+
 /* Copy argv to cc_params, making the necessary edits. */
 
 static void edit_params(u32 argc, char **argv, char **envp) {
 
   u8 fortify_set = 0, asan_set = 0, x_set = 0, bit_mode = 0, shared_linking = 0,
      preprocessor_only = 0, have_unroll = 0, have_o = 0, have_pic = 0,
-     have_c = 0, partial_linking = 0;
+     have_c = 0, partial_linking = 0, lib_fuzz_bin = 0;
 
   cc_params = ck_alloc((argc + 128) * sizeof(u8 *));
 
@@ -585,25 +672,6 @@ static void edit_params(u32 argc, char **argv, char **envp) {
 
     }
 
-    if (getenv("LAF_SPLIT_COMPARES") || getenv("AFL_LLVM_LAF_SPLIT_COMPARES") ||
-        getenv("AFL_LLVM_LAF_SPLIT_FLOATS")) {
-
-#if LLVM_MAJOR >= 11                                /* use new pass manager */
-  #if LLVM_MAJOR < 16
-      cc_params[cc_par_cnt++] = "-fexperimental-new-pass-manager";
-  #endif
-      cc_params[cc_par_cnt++] =
-          alloc_printf("-fpass-plugin=%s/split-compares-pass.so", obj_path);
-#else
-      cc_params[cc_par_cnt++] = "-Xclang";
-      cc_params[cc_par_cnt++] = "-load";
-      cc_params[cc_par_cnt++] = "-Xclang";
-      cc_params[cc_par_cnt++] =
-          alloc_printf("%s/split-compares-pass.so", obj_path);
-#endif
-
-    }
-
     // /laf
 
     unsetenv("AFL_LD");
@@ -646,6 +714,22 @@ static void edit_params(u32 argc, char **argv, char **envp) {
     //    use. cc_params[cc_par_cnt++] = "-flegacy-pass-manager";
     //#endif
 
+    if (lto_mode) {
+
+#if LLVM_MAJOR >= 11                                /* use new pass manager */
+      cc_params[cc_par_cnt++] = "-fexperimental-new-pass-manager";
+      cc_params[cc_par_cnt++] =
+          alloc_printf("-fpass-plugin=%s/lto-marker.so", obj_path);
+#else
+      cc_params[cc_par_cnt++] = "-Xclang";
+      cc_params[cc_par_cnt++] = "-load";
+      cc_params[cc_par_cnt++] = "-Xclang";
+      cc_params[cc_par_cnt++] =
+          alloc_printf("%s/lto-marker.so", obj_path);
+#endif
+
+    }
+
     if (lto_mode && !have_c) {
 
       u8 *ld_path = NULL;
@@ -825,6 +909,7 @@ static void edit_params(u32 argc, char **argv, char **envp) {
   /* Detect stray -v calls from ./configure scripts. */
 
   u8 skip_next = 0, non_dash = 0;
+  u8 *target_path = NULL, *target_name = NULL;
   while (--argc) {
 
     u8 *cur = *(++argv);
@@ -836,6 +921,13 @@ static void edit_params(u32 argc, char **argv, char **envp) {
 
     }
 
+    if (!strcmp(cur, "-o")) parse_out(argv[1], &target_path, &target_name);
+
+    // If there is libfuzzer engine environment variable,
+    // we can check command line to see if this is compiling fuzzed binary
+    if (lib_fuzz && !strcmp(cur, lib_fuzz))
+      lib_fuzz_bin = 1;
+
     if (cur[0] != '-') { non_dash = 1; }
     if (!strncmp(cur, "--afl", 5)) continue;
     if (lto_mode && !strncmp(cur, "-fuse-ld=", 9)) continue;
@@ -851,6 +943,9 @@ static void edit_params(u32 argc, char **argv, char **envp) {
 
     if (compiler_mode == GCC_PLUGIN && !strcmp(cur, "-pipe")) { continue; }
 
+    // Disable `-Werror` flag
+    if (!strcmp(cur, "-Werror")) continue;
+
     if (!strcmp(cur, "-z") || !strcmp(cur, "-Wl,-z")) {
 
       u8 *param = *(argv + 1);
@@ -970,6 +1065,14 @@ static void edit_params(u32 argc, char **argv, char **envp) {
 
   }
 
+  if (target_path == NULL) {
+    target_path = pwd;
+    target_name = "a.out";
+  }
+
+  cc_params[cc_par_cnt++] = "-g";
+  cc_params[cc_par_cnt++] = "-fno-inline-functions";
+
   // in case LLVM is installed not via a package manager or "make install"
   // e.g. compiled download or compiled from github then its ./lib directory
   // might not be in the search path. Add it if so.
@@ -1066,8 +1169,7 @@ static void edit_params(u32 argc, char **argv, char **envp) {
 
   if (!getenv("AFL_DONT_OPTIMIZE")) {
 
-    cc_params[cc_par_cnt++] = "-g";
-    if (!have_o) cc_params[cc_par_cnt++] = "-O3";
+    if (!have_o) cc_params[cc_par_cnt++] = "-O1";
     if (!have_unroll) cc_params[cc_par_cnt++] = "-funroll-loops";
     // if (strlen(march_opt) > 1 && march_opt[0] == '-')
     //  cc_params[cc_par_cnt++] = march_opt;
@@ -1221,6 +1323,20 @@ static void edit_params(u32 argc, char **argv, char **envp) {
 
   if (compiler_mode != GCC && compiler_mode != CLANG) {
 
+    char* targets = getenv("AFLRUN_TARGETS");
+    if ((targets != NULL && is_target(target_name, targets)) ||
+      lib_fuzz_bin || (lib_fuzz && is_fuzzer(target_name))) {
+      // we use aflrun for all fuzzed binary
+
+      char* bb_targets = getenv("AFLRUN_BB_TARGETS");
+      if (bb_targets == NULL)
+        FATAL("Please set env var 'AFLRUN_BB_TARGETS'");
+      if (getenv("AFLRUN_SAVE_TEMPS"))
+        cc_params[cc_par_cnt++] = "-Wl,-plugin-opt=save-temps";
+      u8* temp_dir = create_temp_dir(target_name);
+      setenv("AFLRUN_TEMP_DIR", temp_dir, 1);
+    }
+
     switch (bit_mode) {
 
       case 0:
@@ -1313,6 +1429,12 @@ int main(int argc, char **argv, char **envp) {
   int   i;
   char *callname = argv[0], *ptr = NULL;
 
+  pwd = getenv("PWD");
+  if (pwd == NULL)
+    FATAL("$PWD is not set");
+
+  lib_fuzz = getenv("AFLRUN_NO_OSS") ? NULL : getenv("LIB_FUZZING_ENGINE");
+
   if (getenv("AFL_DEBUG")) {
 
     debug = 1;
@@ -1988,7 +2110,7 @@ int main(int argc, char **argv, char **envp) {
           "  AFL_CC: path to the C compiler to use\n"
           "  AFL_CXX: path to the C++ compiler to use\n"
           "  AFL_DEBUG: enable developer debugging output\n"
-          "  AFL_DONT_OPTIMIZE: disable optimization instead of -O3\n"
+          "  AFL_DONT_OPTIMIZE: disable optimization instead of -O1\n"
           "  AFL_NO_BUILTIN: no builtins for string compare functions (for "
           "libtokencap.so)\n"
           "  AFL_NOOP: behave like a normal compiler (to pass configure "
@@ -2319,6 +2441,15 @@ int main(int argc, char **argv, char **envp) {
 
   } else {
 
+    if (getenv("AFLRUN_SHOW_CLANG_ARGS")) {
+      for (int i = 0; i < argc; ++i)
+        fprintf(stderr, "%s ", argv[i]);
+      fprintf(stderr, "\n");
+      for (u8** i = cc_params; *i; ++i)
+        fprintf(stderr, "%s ", *i);
+      fprintf(stderr, "\n");
+    }
+
     execvp(cc_params[0], (char **)cc_params);
 
   }
diff --git a/src/afl-forkserver.c b/src/afl-forkserver.c
index 9b8660ce..67105dda 100644
--- a/src/afl-forkserver.c
+++ b/src/afl-forkserver.c
@@ -1395,6 +1395,26 @@ afl_fsrv_write_to_testcase(afl_forkserver_t *fsrv, u8 *buf, size_t len) {
 
 }
 
+/* Reset shared memory before each run */
+void afl_fsrv_clear(afl_forkserver_t *fsrv) {
+  memset(fsrv->trace_bits, 0, fsrv->map_size);
+
+  if (fsrv->num_reachables != 0) {
+
+    memset(fsrv->trace_reachables, 0, MAP_RBB_SIZE(fsrv->num_reachables));
+    memset(fsrv->trace_freachables, 0, MAP_RF_SIZE(fsrv->num_freachables));
+    memset(fsrv->trace_ctx, 0, MAP_TR_SIZE(fsrv->num_reachables));
+    fsrv->trace_virgin->num = 0;
+    fsrv->trace_targets->num = 0;
+
+    // If we want to count frequency, set last bit of block bitmap
+    if (fsrv->testing)
+      fsrv->trace_reachables[fsrv->num_reachables / 8] |=
+        1 << (fsrv->num_reachables % 8);
+
+  }
+}
+
 /* Execute target application, monitoring for timeouts. Return status
    information. The called program will update afl->fsrv->trace_bits. */
 
@@ -1470,14 +1490,12 @@ afl_fsrv_run_target(afl_forkserver_t *fsrv, u32 timeout,
 
 #ifdef __linux__
   if (!fsrv->nyx_mode) {
-
-    memset(fsrv->trace_bits, 0, fsrv->map_size);
+    afl_fsrv_clear(fsrv);
     MEM_BARRIER();
-
   }
 
 #else
-  memset(fsrv->trace_bits, 0, fsrv->map_size);
+  afl_fsrv_clear(fsrv);
   MEM_BARRIER();
 #endif
 
diff --git a/src/afl-fuzz-bitmap.c b/src/afl-fuzz-bitmap.c
index 485b82db..f05bb7db 100644
--- a/src/afl-fuzz-bitmap.c
+++ b/src/afl-fuzz-bitmap.c
@@ -24,6 +24,7 @@
  */
 
 #include "afl-fuzz.h"
+#include "aflrun.h"
 #include <limits.h>
 #if !defined NAME_MAX
   #define NAME_MAX _XOPEN_NAME_MAX
@@ -237,6 +238,35 @@ inline u8 has_new_bits(afl_state_t *afl, u8 *virgin_map) {
 
 }
 
+inline u8 has_new_bits_mul(afl_state_t *afl,
+  u8* const *virgin_maps, u8** p_new_bits, size_t num, u8 modify) {
+
+  u8* new_bits = *p_new_bits = afl_realloc((void **)p_new_bits, sizeof(u8) * num);
+  memset(new_bits, 0, sizeof(u8) * num);
+
+  // TODO: 32-bit
+  u64 *current = (u64 *)afl->fsrv.trace_bits;
+  u64* const *virgins = (u64* const *)virgin_maps;
+
+  u32 len = ((afl->fsrv.real_map_size + 7) >> 3);
+
+  for (u32 i = 0; i < len; ++i, ++current) {
+
+    if (unlikely(*current))
+      discover_word_mul(new_bits, current, virgins, num, i, modify);
+
+  }
+
+  u8 primary = new_bits[0], diversity = 0;
+  for (size_t i = 1; i < num; ++i) // Get max level of new edge from all div maps
+    diversity = MAX(diversity, new_bits[i]);
+
+  // lowest 2 bits are result from primary map,
+  // and 2-3 bits are from diversity maps
+  return primary | (diversity << 2);
+
+}
+
 /* A combination of classify_counts and has_new_bits. If 0 is returned, then the
  * trace bits are kept as-is. Otherwise, the trace bits are overwritten with
  * classified values.
@@ -247,25 +277,29 @@ inline u8 has_new_bits(afl_state_t *afl, u8 *virgin_map) {
  * on rare cases it fall backs to the slow path: classify_counts() first, then
  * return has_new_bits(). */
 
-inline u8 has_new_bits_unclassified(afl_state_t *afl, u8 *virgin_map) {
+static u8 has_new_bits_unclassified(
+  afl_state_t *afl, u8 *const * virgin_maps, size_t num) {
 
   /* Handle the hot path first: no new coverage */
   u8 *end = afl->fsrv.trace_bits + afl->fsrv.map_size;
 
 #ifdef WORD_SIZE_64
 
-  if (!skim((u64 *)virgin_map, (u64 *)afl->fsrv.trace_bits, (u64 *)end))
+  if (!skim((const u64* const*)virgin_maps, num,
+    (u64 *)afl->fsrv.trace_bits, (u64 *)end))
     return 0;
 
 #else
 
+  #error "TODO: 32-bit"
   if (!skim((u32 *)virgin_map, (u32 *)afl->fsrv.trace_bits, (u32 *)end))
     return 0;
 
 #endif                                                     /* ^WORD_SIZE_64 */
-  classify_counts(&afl->fsrv);
-  return has_new_bits(afl, virgin_map);
 
+  // We don't classify here and call `has_new_bits_mul` here,
+  // we because some virgin maps may be missed due to incomplete fringe
+  return 1;
 }
 
 /* Compact trace bytes into a smaller bitmap. We effectively just drop the
@@ -290,7 +324,8 @@ void minimize_bits(afl_state_t *afl, u8 *dst, u8 *src) {
 /* Construct a file name for a new test case, capturing the operation
    that led to its discovery. Returns a ptr to afl->describe_op_buf_256. */
 
-u8 *describe_op(afl_state_t *afl, u8 new_bits, size_t max_description_len) {
+u8 *describe_op(afl_state_t *afl,
+  u8 new_bits, u8 new_paths, size_t max_description_len) {
 
   u8 is_timeout = 0;
 
@@ -301,6 +336,9 @@ u8 *describe_op(afl_state_t *afl, u8 new_bits, size_t max_description_len) {
 
   }
 
+  u8 new_div = new_bits >> 2;
+  new_bits &= 3;
+
   size_t real_max_len =
       MIN(max_description_len, sizeof(afl->describe_op_buf_256));
   u8 *ret = afl->describe_op_buf_256;
@@ -333,7 +371,8 @@ u8 *describe_op(afl_state_t *afl, u8 new_bits, size_t max_description_len) {
       ret[len_current++] = ',';
       ret[len_current] = '\0';
 
-      ssize_t size_left = real_max_len - len_current - strlen(",+cov") - 2;
+      ssize_t size_left = real_max_len - len_current -
+        strlen(",+cov2") - strlen(",+div2") - strlen(",+path") - 2;
       if (is_timeout) { size_left -= strlen(",+tout"); }
       if (unlikely(size_left <= 0)) FATAL("filename got too long");
 
@@ -382,7 +421,15 @@ u8 *describe_op(afl_state_t *afl, u8 new_bits, size_t max_description_len) {
 
   if (is_timeout) { strcat(ret, ",+tout"); }
 
-  if (new_bits == 2) { strcat(ret, ",+cov"); }
+  if (new_bits) {
+    strcat(ret, ",+cov");
+    if (new_bits == 2) { strcat(ret, "2"); }
+  }
+  if (new_div) {
+    strcat(ret, ",+div");
+    if (new_div == 2) { strcat(ret, "2"); }
+  }
+  if (new_paths) { strcat(ret, ",+path"); }
 
   if (unlikely(strlen(ret) >= max_description_len))
     FATAL("describe string is too long");
@@ -453,13 +500,17 @@ void write_crash_readme(afl_state_t *afl) {
    entry is saved, 0 otherwise. */
 
 u8 __attribute__((hot))
-save_if_interesting(afl_state_t *afl, void *mem, u32 len, u8 fault) {
+save_if_interesting(afl_state_t *afl, void *mem, u32 len, u8 fault, u8 inc) {
 
-  if (unlikely(len == 0)) { return 0; }
+  if (unlikely(len == 0)) {
+    aflrun_recover_virgin(afl);
+    return 0;
+  }
 
   u8  fn[PATH_MAX];
   u8 *queue_fn = "";
-  u8  new_bits = 0, keeping = 0, res, classified = 0, is_timeout = 0;
+  u8  new_bits = 0, new_paths = 0,
+    keeping = 0, res, classified = 0, is_timeout = 0;
   s32 fd;
   u64 cksum = 0;
 
@@ -467,7 +518,8 @@ save_if_interesting(afl_state_t *afl, void *mem, u32 len, u8 fault) {
 
   /* Generating a hash on every input is super expensive. Bad idea and should
      only be used for special schedules */
-  if (unlikely(afl->schedule >= FAST && afl->schedule <= RARE)) {
+  if (unlikely(!afl->is_aflrun &&
+    afl->schedule >= FAST && afl->schedule <= RARE)) {
 
     cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
 
@@ -482,16 +534,63 @@ save_if_interesting(afl_state_t *afl, void *mem, u32 len, u8 fault) {
     /* Keep only if there are new bits in the map, add to queue for
        future fuzzing, etc. */
 
-    new_bits = has_new_bits_unclassified(afl, afl->virgin_bits);
+    size_t n = afl->fsrv.trace_targets->num;
+    afl->virgins = afl_realloc((void **)&afl->virgins, sizeof(u8*) * (n+1));
+    afl->clusters = afl_realloc((void **)&afl->clusters, sizeof(size_t) * (n+1));
+    afl->virgins[0] = afl->virgin_bits;
+    afl->clusters[0] = 0; // primary map is always cluster 0
+    afl->num_maps = aflrun_get_virgins(afl->fsrv.trace_targets->trace, n,
+      afl->virgins + 1, afl->clusters + 1) + 1;
+    new_bits = has_new_bits_unclassified(afl, afl->virgins, afl->num_maps);
+    if (new_bits) {
+      classify_counts(&afl->fsrv);
+      classified = 1;
+      has_new_bits_mul(afl, afl->virgins, &afl->new_bits, afl->num_maps, 0);
+    }
+    new_paths = aflrun_has_new_path(afl->fsrv.trace_freachables,
+      afl->fsrv.trace_reachables, afl->fsrv.trace_ctx,
+      afl->fsrv.trace_virgin->trace, afl->fsrv.trace_virgin->num,
+      inc, afl->queued_items,
+      new_bits ? afl->new_bits : NULL, afl->clusters, afl->num_maps);
 
-    if (likely(!new_bits)) {
+    if (likely(!new_bits && !new_paths)) {
 
       if (unlikely(afl->crash_mode)) { ++afl->total_crashes; }
       return 0;
 
     }
 
-    classified = new_bits;
+    /*/ DEBUG
+    printf("Targets(Interesting): ");
+    for (size_t i = 0; i < n; ++i) {
+      printf("%u ", afl->fsrv.trace_targets->trace[i].block);
+    }
+    printf("\n");
+    printf("Clusters(Interesting): ");
+    for (size_t i = 0; i < afl->num_maps; ++i) {
+      printf("%u ", afl->clusters[i]);
+    }
+    printf("\n");
+    // DEBUG*/
+
+    // We clasify and update bits after related fringes is updated,
+    // but before that we may need to update `virgin_maps`
+    // because there might be new fringes.
+
+    n = aflrun_max_clusters(afl->queued_items);
+    afl->virgins = afl_realloc((void **)&afl->virgins, sizeof(u8*) * n);
+    afl->clusters = afl_realloc((void **)&afl->clusters, sizeof(size_t) * n);
+    afl->virgins[0] = afl->virgin_bits;
+    afl->clusters[0] = 0;
+    afl->num_maps = aflrun_get_seed_virgins(
+      afl->queued_items, afl->virgins + 1, afl->clusters + 1) + 1;
+
+    if (!classified) {
+      classify_counts(&afl->fsrv);
+      classified = 1;
+    }
+    new_bits = has_new_bits_mul(
+      afl, afl->virgins, &afl->new_bits, afl->num_maps, 1);
 
   save_to_queue:
 
@@ -499,7 +598,7 @@ save_if_interesting(afl_state_t *afl, void *mem, u32 len, u8 fault) {
 
     queue_fn =
         alloc_printf("%s/queue/id:%06u,%s", afl->out_dir, afl->queued_items,
-                     describe_op(afl, new_bits + is_timeout,
+                     describe_op(afl, new_bits + is_timeout, new_paths,
                                  NAME_MAX - strlen("id:000000,")));
 
 #else
@@ -513,6 +612,15 @@ save_if_interesting(afl_state_t *afl, void *mem, u32 len, u8 fault) {
     ck_write(fd, mem, len, queue_fn);
     close(fd);
     add_to_queue(afl, queue_fn, len, 0);
+    afl->queue_top->tested = 1;
+    afl->queue_top->path_cksum = hash64(
+      afl->fsrv.trace_ctx, MAP_TR_SIZE(afl->fsrv.num_reachables), HASH_CONST);
+
+    // If the new seed only comes from diversity or path, mark it as extra
+    if ((new_bits & 3) == 0 && ((new_bits >> 2) || new_paths)) {
+      ++afl->queued_extra;
+      afl->queue_top->aflrun_extra = 1;
+    }
 
 #ifdef INTROSPECTION
     if (afl->custom_mutators_count && afl->current_custom_fuzz) {
@@ -543,7 +651,7 @@ save_if_interesting(afl_state_t *afl, void *mem, u32 len, u8 fault) {
 
 #endif
 
-    if (new_bits == 2) {
+    if ((new_bits & 3) == 2) {
 
       afl->queue_top->has_new_cov = 1;
       ++afl->queued_with_cov;
@@ -565,7 +673,7 @@ save_if_interesting(afl_state_t *afl, void *mem, u32 len, u8 fault) {
     /* Try to calibrate inline; this also calls update_bitmap_score() when
        successful. */
 
-    res = calibrate_case(afl, afl->queue_top, mem, afl->queue_cycle - 1, 0);
+    res = calibrate_case(afl, afl->queue_top, mem, aflrun_queue_cycle(), 0);
 
     if (unlikely(res == FSRV_RUN_ERROR)) {
 
@@ -582,6 +690,9 @@ save_if_interesting(afl_state_t *afl, void *mem, u32 len, u8 fault) {
     keeping = 1;
 
   }
+  else {
+    aflrun_recover_virgin(afl);
+  }
 
   switch (fault) {
 
@@ -678,6 +789,13 @@ save_if_interesting(afl_state_t *afl, void *mem, u32 len, u8 fault) {
           if (afl->afl_env.afl_keep_timeouts) {
 
             ++afl->saved_tmouts;
+            // For saved timeout case, we don't update it with aflrun,
+            // so we don't call it with `aflrun_has_new_path`, e.i. `tested = 1`.
+            // Also, we need to set virgin map array to have only primary map.
+            afl->virgins = afl_realloc((void **)&afl->virgins, sizeof(u8*));
+            afl->virgins[0] = afl->virgin_bits;
+            afl->clusters[0] = 0;
+            afl->num_maps = 1;
             goto save_to_queue;
 
           } else {
@@ -694,7 +812,7 @@ save_if_interesting(afl_state_t *afl, void *mem, u32 len, u8 fault) {
 
       snprintf(fn, PATH_MAX, "%s/hangs/id:%06llu,%s", afl->out_dir,
                afl->saved_hangs,
-               describe_op(afl, 0, NAME_MAX - strlen("id:000000,")));
+               describe_op(afl, 0, 0, NAME_MAX - strlen("id:000000,")));
 
 #else
 
@@ -742,7 +860,7 @@ save_if_interesting(afl_state_t *afl, void *mem, u32 len, u8 fault) {
 
       snprintf(fn, PATH_MAX, "%s/crashes/id:%06llu,sig:%02u,%s", afl->out_dir,
                afl->saved_crashes, afl->fsrv.last_kill_signal,
-               describe_op(afl, 0, NAME_MAX - strlen("id:000000,sig:00,")));
+               describe_op(afl, 0, 0, NAME_MAX - strlen("id:000000,sig:00,")));
 
 #else
 
diff --git a/src/afl-fuzz-init.c b/src/afl-fuzz-init.c
index adfc55ad..3fe308b8 100644
--- a/src/afl-fuzz-init.c
+++ b/src/afl-fuzz-init.c
@@ -24,8 +24,12 @@
  */
 
 #include "afl-fuzz.h"
-#include <limits.h>
+#include "aflrun.h"
 #include "cmplog.h"
+#include <limits.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
 
 #ifdef HAVE_AFFINITY
 
@@ -624,7 +628,7 @@ void read_foreign_testcases(afl_state_t *afl, int first) {
         u32 len = write_to_testcase(afl, (void **)&mem, st.st_size, 1);
         fault = fuzz_run_target(afl, &afl->fsrv, afl->fsrv.exec_tmout);
         afl->syncing_party = foreign_name;
-        afl->queued_imported += save_if_interesting(afl, mem, len, fault);
+        afl->queued_imported += save_if_interesting(afl, mem, len, fault, 0);
         afl->syncing_party = 0;
         munmap(mem, st.st_size);
         close(fd);
@@ -655,8 +659,7 @@ void read_foreign_testcases(afl_state_t *afl, int first) {
 void read_testcases(afl_state_t *afl, u8 *directory) {
 
   struct dirent **nl;
-  s32             nl_cnt, subdirs = 1;
-  u32             i;
+  s32             i, nl_cnt, subdirs = 1;
   u8             *fn1, *dir = directory;
   u8              val_buf[2][STRINGIFY_VAL_SIZE_MAX];
 
@@ -716,10 +719,7 @@ void read_testcases(afl_state_t *afl, u8 *directory) {
 
   if (nl_cnt) {
 
-    i = nl_cnt;
-    do {
-
-      --i;
+    for (i = 0; i < nl_cnt; ++i) {
 
       struct stat st;
       u8          dfn[PATH_MAX];
@@ -810,7 +810,7 @@ void read_testcases(afl_state_t *afl, u8 *directory) {
 
       */
 
-    } while (i > 0);
+    }
 
   }
 
@@ -1163,7 +1163,7 @@ void perform_dry_run(afl_state_t *afl) {
       struct queue_entry *p = afl->queue_buf[i];
       if (p->disabled || p->cal_failed || !p->exec_cksum) { continue; }
 
-      if (p->exec_cksum == q->exec_cksum) {
+      if (p->exec_cksum == q->exec_cksum && p->path_cksum == q->path_cksum) {
 
         duplicates = 1;
 
@@ -1179,6 +1179,7 @@ void perform_dry_run(afl_state_t *afl) {
           }
 
           p->disabled = 1;
+          aflrun_remove_seed(p->id);
           p->perf_score = 0;
 
         } else {
@@ -1192,6 +1193,7 @@ void perform_dry_run(afl_state_t *afl) {
           }
 
           q->disabled = 1;
+          aflrun_remove_seed(q->id);
           q->perf_score = 0;
 
           done = 1;
@@ -1222,6 +1224,9 @@ void perform_dry_run(afl_state_t *afl) {
 
   OKF("All test cases processed.");
 
+  if (getenv("AFLRUN_TRACE_CORPUS"))
+    exit(0);
+
 }
 
 /* Helper function: link() if possible, copy otherwise. */
@@ -1813,6 +1818,10 @@ static void handle_existing_out_dir(afl_state_t *afl) {
   if (delete_files(fn, CASE_PREFIX)) { goto dir_cleanup_failed; }
   ck_free(fn);
 
+  fn = alloc_printf("%s/cvx", afl->out_dir);
+  if (delete_files(fn, NULL)) { goto dir_cleanup_failed; }
+  ck_free(fn);
+
   /* And now, for some finishing touches. */
 
   if (afl->file_extension) {
@@ -2027,6 +2036,12 @@ void setup_dirs_fds(afl_state_t *afl) {
   if (mkdir(tmp, 0700)) { PFATAL("Unable to create '%s'", tmp); }
   ck_free(tmp);
 
+  /* All convex optimization information */
+
+  tmp = alloc_printf("%s/cvx", afl->out_dir);
+  if (mkdir(tmp, 0700)) { PFATAL("Unable to create '%s'", tmp); }
+  ck_free(tmp);
+
   /* All recorded hangs. */
 
   tmp = alloc_printf("%s/hangs", afl->out_dir);
@@ -2863,6 +2878,13 @@ void check_binary(afl_state_t *afl, u8 *fname) {
 
   }
 
+  // Load path to AFLRun temporary directory
+  const char* temp_str = memmem(
+    f_data, f_len, AFLRUN_TEMP_SIG, strlen(AFLRUN_TEMP_SIG));
+  if (temp_str == NULL)
+    FATAL("Binary is not compiled from AFLRun compiler.");
+  afl->temp_dir = strdup(temp_str + strlen(AFLRUN_TEMP_SIG));
+
   if (munmap(f_data, f_len)) { PFATAL("unmap() failed"); }
 
 }
@@ -2968,3 +2990,182 @@ void save_cmdline(afl_state_t *afl, u32 argc, char **argv) {
 
 }
 
+/* Initialize AFLRun with the temp dir */
+
+void aflrun_temp_dir_init(afl_state_t* afl, const char* temp_dir) {
+
+  u8* bbr_path = alloc_printf("%s/BBreachable.txt", temp_dir);
+  FILE* fd = fopen(bbr_path, "r");
+  ck_free(bbr_path);
+  if (fd == NULL)
+    FATAL("Failed to open BBreachable.txt");
+  char* line = NULL; size_t len = 0;
+  ssize_t res = getline(&line, &len, fd);
+  if (res == -1 || line == NULL)
+    FATAL("Failed to read BBreachable.txt");
+
+  char* endptr;
+  afl->fsrv.num_targets = strtoul(line, &endptr, 10);
+  if (*endptr != ',')
+    FATAL("Wrong format for BBreachable.txt");
+  *endptr = 0;
+  afl->fsrv.num_reachables = strtoul(endptr + 1, &endptr, 10);
+  if (*endptr != 0 && *endptr != '\n')
+    FATAL("Wrong format for BBreachable.txt");
+
+  if (afl->fsrv.num_targets == 0 ||
+    afl->fsrv.num_targets > afl->fsrv.num_reachables)
+    FATAL("Wrong number of targets and reachables");
+
+  afl->reachable_names = malloc(
+    afl->fsrv.num_reachables * sizeof(char*));
+
+  free(line);
+
+  afl->reachable_to_targets =
+    malloc(afl->fsrv.num_reachables * sizeof(reach_t*));
+  afl->reachable_to_size =
+    malloc(afl->fsrv.num_reachables * sizeof(reach_t));
+  afl->virgin_reachables =
+    malloc(MAP_RBB_SIZE(afl->fsrv.num_reachables));
+  afl->target_weights =
+    malloc(afl->fsrv.num_targets * sizeof(double));
+
+  reach_t next_idx = 0;
+  while (1) {
+    line = NULL; len = 0;
+    res = getline(&line, &len, fd);
+    if (res == -1) {
+      free(line);
+      break;
+    }
+
+    // Parse the line into targets
+    reach_t* buf = malloc(afl->fsrv.num_targets * sizeof(reach_t));
+    reach_t idx = 0;
+    char* iter = strchr(line, ',');
+    if (iter == NULL)
+      FATAL("Wrong format for BBreachable.txt");
+    *iter = 0;
+    afl->reachable_names[next_idx] = strdup(line);
+    ++iter;
+    char* endptr;
+    while (1) {
+      if (idx >= afl->fsrv.num_targets)
+        FATAL("Too many elements in line of BBreachable.txt");
+      reach_t t = strtoul(iter, &endptr, 10);
+      if (t >= afl->fsrv.num_targets)
+        FATAL("Invalid target in line of BBreachable.txt");
+      buf[idx++] = t;
+      if (*endptr != ',')
+        break;
+      iter = endptr + 1;
+    }
+    if (next_idx < afl->fsrv.num_targets) {
+      if (*endptr != '|')
+        FATAL("Need weight for each target");
+      afl->target_weights[next_idx] = strtod(endptr + 1, NULL);
+    }
+
+    if (next_idx >= afl->fsrv.num_reachables)
+      FATAL("Header and countent of BBreachable.txt does not match");
+    afl->reachable_to_size[next_idx] = idx;
+    afl->reachable_to_targets[next_idx++] =
+      realloc(buf, idx * sizeof(reach_t));
+    free(line);
+  }
+  if (next_idx != afl->fsrv.num_reachables)
+    FATAL("Header and countent of BBreachable.txt does not match");
+
+  fclose(fd);
+
+  aflrun_load_freachables(
+    temp_dir, &afl->fsrv.num_ftargets, &afl->fsrv.num_freachables);
+  if (afl->fsrv.num_ftargets == 0 ||
+    afl->fsrv.num_ftargets > afl->fsrv.num_freachables)
+      FATAL("Parsing Freachable.txt failed");
+  afl->virgin_freachables = malloc(
+    MAP_RF_SIZE(afl->fsrv.num_freachables));
+
+  ACTF("Loading edges...");
+  aflrun_load_edges(temp_dir, afl->fsrv.num_reachables);
+  ACTF("Loading distances...");
+  aflrun_load_dists(
+    temp_dir, afl->fsrv.num_targets,
+    afl->fsrv.num_reachables, afl->reachable_names);
+  aflrun_init_groups(afl->fsrv.num_targets);
+
+}
+
+/* Search directory with name `temp_dir` in directory `root`,
+   return its full path if any is found, the returned buffer needs free(). */
+
+static char* search_temp_dir(const char* dir_path, const char* temp_dir) {
+
+  DIR *dir = opendir(dir_path);
+  if (dir == NULL)
+    return NULL;
+
+  struct dirent *entry;
+  while ((entry = readdir(dir)) != NULL) {
+    if (strcmp(entry->d_name, ".") == 0 || strcmp(entry->d_name, "..") == 0)
+      continue;
+
+    char* sub_dir = malloc(PATH_MAX);
+    snprintf(sub_dir, PATH_MAX, "%s/%s", dir_path, entry->d_name);
+
+    struct stat path_stat;
+    if (lstat(sub_dir, &path_stat) != 0) {
+      free(sub_dir);
+      continue;
+    }
+
+    if (S_ISDIR(path_stat.st_mode)) {
+      if (strcmp(entry->d_name, temp_dir) == 0)
+        return sub_dir;
+      char* res = search_temp_dir(sub_dir, temp_dir);
+      if (res) {
+        free(sub_dir);
+        return res;
+      }
+    }
+
+    free(sub_dir);
+
+  }
+
+  closedir(dir);
+  return NULL;
+}
+
+/* Given the temp dir found in the binary, we find its real path.
+  We always return the allocated real path if any is found */
+
+char* aflrun_find_temp(char* temp_dir) {
+
+  size_t len = strlen(temp_dir);
+  if (len == 0) {
+    return NULL;
+  }
+
+  // If the path in the binary is correct, simply use the directory.
+  struct stat path_stat;
+  if (lstat(temp_dir, &path_stat) == 0 && S_ISDIR(path_stat.st_mode))
+    return strdup(temp_dir);
+
+  // Remove the trailing slashes
+  for (char* p = temp_dir + len - 1; *p == '/' || *p == '\\'; --p) *p = 0;
+
+  char* dir_name = strrchr(temp_dir, '/') + 1;
+
+  char pwd[PATH_MAX];
+  if (getcwd(pwd, PATH_MAX) == NULL)
+    FATAL("getcwd() failed");
+
+  // We alternatively find a directory whose name is same as the stored name.
+  // Since the name of temp dir is randomly generated with unlikely collition,
+  // the approach should work properly.
+
+  return search_temp_dir(pwd, dir_name);
+
+}
\ No newline at end of file
diff --git a/src/afl-fuzz-mutators.c b/src/afl-fuzz-mutators.c
index 22e5262e..18f315f4 100644
--- a/src/afl-fuzz-mutators.c
+++ b/src/afl-fuzz-mutators.c
@@ -25,6 +25,7 @@
  */
 
 #include "afl-fuzz.h"
+#include "aflrun.h"
 
 struct custom_mutator *load_custom_mutator(afl_state_t *, const char *);
 #ifdef USE_PYTHON
@@ -209,7 +210,7 @@ struct custom_mutator *load_custom_mutator(afl_state_t *afl, const char *fn) {
     mutator->afl_custom_fuzz = dlsym(dh, "afl_custom_mutator");
     if (!mutator->afl_custom_fuzz) {
 
-      WARNF("Symbol 'afl_custom_mutator' not found.");
+      FATAL("Neither symbol 'afl_custom_mutator' nor 'afl_custom_fuzz' found.");
 
     } else {
 
@@ -452,7 +453,7 @@ u8 trim_case_custom(afl_state_t *afl, struct queue_entry *q, u8 *in_buf,
     sprintf(afl->stage_name_buf, "ptrim %s",
             u_stringify_int(val_buf, trim_exec));
 
-    u64 cksum;
+    u64 cksum, pcksum;
 
     size_t retlen = mutator->afl_custom_trim(mutator->data, &retbuf);
 
@@ -507,12 +508,14 @@ u8 trim_case_custom(afl_state_t *afl, struct queue_entry *q, u8 *in_buf,
 
         classify_counts(&afl->fsrv);
         cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
+        pcksum = hash64(
+          afl->fsrv.trace_ctx, MAP_TR_SIZE(afl->fsrv.num_reachables), HASH_CONST);
 
       }
 
     }
 
-    if (likely(retlen && cksum == q->exec_cksum)) {
+    if (likely(retlen && cksum == q->exec_cksum && pcksum == q->path_cksum)) {
 
       /* Let's save a clean trace, which will be needed by
          update_bitmap_score once we're done with the trimming stuff.
@@ -598,6 +601,7 @@ u8 trim_case_custom(afl_state_t *afl, struct queue_entry *q, u8 *in_buf,
 
     memcpy(afl->fsrv.trace_bits, afl->clean_trace_custom, afl->fsrv.map_size);
     update_bitmap_score(afl, q);
+    aflrun_update_fringe_score(q->id);
 
   }
 
diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c
index 97855607..40db2df4 100644
--- a/src/afl-fuzz-one.c
+++ b/src/afl-fuzz-one.c
@@ -24,6 +24,7 @@
  */
 
 #include "afl-fuzz.h"
+#include "aflrun.h"
 #include <string.h>
 #include <limits.h>
 #include "cmplog.h"
@@ -76,7 +77,7 @@ static int select_algorithm(afl_state_t *afl, u32 max_algorithm) {
 static inline u32 choose_block_len(afl_state_t *afl, u32 limit) {
 
   u32 min_value, max_value;
-  u32 rlim = MIN(afl->queue_cycle, (u32)3);
+  u32 rlim = MIN(aflrun_queue_cycle() + 1, (u32)3);
 
   if (unlikely(!afl->run_over10m)) { rlim = 1; }
 
@@ -363,6 +364,72 @@ static void locate_diffs(u8 *ptr1, u8 *ptr2, u32 len, s32 *first, s32 *last) {
 
 #endif                                                     /* !IGNORE_FINDS */
 
+static void log_when_no_tty(afl_state_t *afl) {
+
+  if (unlikely(afl->not_on_tty)) {
+
+    u32 idx = afl->is_aflrun ? afl->aflrun_idx :
+      (afl->old_seed_selection ? afl->current_entry : afl->runs_in_current_cycle);
+
+    ACTF(
+        "Fuzzing test case #%u,%u (%u total, %u disabled, %llu crashes saved, "
+        "perf_score=%0.0f, quant_score=%0.0f, "
+        "exec_us=%llu, hits=%u, map=%u, ascii=%u)...",
+        afl->queue_cur->id, idx,
+        afl->queued_items, afl->queued_extra_disabled, afl->saved_crashes,
+        afl->queue_cur->perf_score, afl->queue_cur->quant_score,
+        afl->queue_cur->exec_us,
+        likely(afl->n_fuzz) ? afl->n_fuzz[afl->queue_cur->n_fuzz_entry] : 0,
+        afl->queue_cur->bitmap_size, afl->queue_cur->is_ascii);
+    static const char mode_c[] = {'C', 'N', 'P', 'T', 'U'};
+    u8 mode = aflrun_get_mode();
+    u64 t = get_cur_time();
+    reach_t num_reached, num_freached;
+    reach_t num_reached_targets, num_freached_targets;
+    size_t div_num_invalid, div_num_fringes;
+    int cycle_count; u32 cov_quant;
+    u64 last_reachable, last_fringe, last_pro_fringe, last_target;
+    u64 last_ctx_reachable, last_ctx_fringe, last_ctx_pro_fringe, last_ctx_target;
+    aflrun_get_reached(
+      &num_reached, &num_freached, &num_reached_targets, &num_freached_targets);
+    aflrun_get_state(
+      &cycle_count, &cov_quant, &div_num_invalid, &div_num_fringes);
+    aflrun_get_time(&last_reachable, &last_fringe, &last_pro_fringe,
+      &last_target, &last_ctx_reachable, &last_ctx_fringe,
+      &last_ctx_pro_fringe, &last_ctx_target);
+    ACTF("%c,%d,%llu,%u %lu/%lu %llu/%llu/%llu/%llu %llu/%llu/%llu/%llu "
+        "%llu/%llu(%llu%%) %llu/%llu(%llu%%) "
+#ifdef AFLRUN_OVERHEAD
+        "%0.02f%%, "
+#endif // AFLRUN_OVERHEAD
+        "%0.02f%%, %0.02f%%, %0.02f%%",
+      mode_c[mode], cycle_count, aflrun_queue_cycle(), cov_quant,
+      div_num_invalid, div_num_fringes,
+      (t - last_reachable) / 1000, (t - last_fringe) / 1000,
+      (t - last_pro_fringe) / 1000, (t - last_target) / 1000,
+      (t - last_ctx_reachable) / 1000, (t - last_ctx_fringe) / 1000,
+      (t - last_ctx_pro_fringe) / 1000, (t - last_ctx_target) / 1000,
+      (u64)num_reached, (u64)afl->fsrv.num_reachables,
+      100uLL * num_reached / afl->fsrv.num_reachables,
+      (u64)num_reached_targets, (u64)afl->fsrv.num_targets,
+      100uLL * num_reached_targets / afl->fsrv.num_targets,
+#ifdef AFLRUN_OVERHEAD
+      (double)afl->fsrv.trace_virgin->overhead * 100 /
+      (afl->exec_time + afl->fuzz_time),
+#endif // AFLRUN_OVERHEAD
+      (double)afl->exec_time * 100 /
+        (afl->exec_time + afl->fuzz_time),
+      (double)afl->exec_time_short * 100 /
+        (afl->exec_time_short + afl->fuzz_time_short),
+        afl->quantum_ratio * 100);
+
+
+    fflush(stdout);
+
+  }
+
+}
+
 /* Take the current entry from the queue, fuzz it for a while. This
    function is a tad too long... returns 0 if fuzzed successfully, 1 if
    skipped or bailed out. */
@@ -374,7 +441,8 @@ u8 fuzz_one_original(afl_state_t *afl) {
   u32 i;
   u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0;
   u64 havoc_queued = 0, orig_hit_cnt, new_hit_cnt = 0, prev_cksum, _prev_cksum;
-  u32 splice_cycle = 0, perf_score = 100, orig_perf, eff_cnt = 1;
+  u32 splice_cycle = 0, retry_splicing_times, eff_cnt = 1, num_execs = UINT_MAX;
+  double perf_score = 100, orig_perf;
 
   u8 ret_val = 1, doing_det = 0;
 
@@ -407,6 +475,13 @@ u8 fuzz_one_original(afl_state_t *afl) {
 
   }
 
+  if (!afl->is_aflrun) {
+
+    // For extra seed from aflrun, we don't fuzz it in coverage mode
+    // unless it is a favored seed
+    if (!afl->queue_cur->favored && afl->queue_cur->aflrun_extra)
+      return 1;
+
   if (likely(afl->pending_favored)) {
 
     /* If we have any favored, non-fuzzed new arrivals in the queue,
@@ -428,7 +503,7 @@ u8 fuzz_one_original(afl_state_t *afl) {
        The odds of skipping stuff are higher for already-fuzzed inputs and
        lower for never-fuzzed entries. */
 
-    if (afl->queue_cycle > 1 && !afl->queue_cur->fuzz_level) {
+      if (aflrun_queue_cycle() > 0 && !afl->queue_cur->fuzz_level) {
 
       if (likely(rand_below(afl, 100) < SKIP_NFAV_NEW_PROB)) { return 1; }
 
@@ -440,20 +515,11 @@ u8 fuzz_one_original(afl_state_t *afl) {
 
   }
 
-#endif                                                     /* ^IGNORE_FINDS */
-
-  if (unlikely(afl->not_on_tty)) {
+  }
 
-    ACTF(
-        "Fuzzing test case #%u (%u total, %llu crashes saved, "
-        "perf_score=%0.0f, exec_us=%llu, hits=%u, map=%u, ascii=%u)...",
-        afl->current_entry, afl->queued_items, afl->saved_crashes,
-        afl->queue_cur->perf_score, afl->queue_cur->exec_us,
-        likely(afl->n_fuzz) ? afl->n_fuzz[afl->queue_cur->n_fuzz_entry] : 0,
-        afl->queue_cur->bitmap_size, afl->queue_cur->is_ascii);
-    fflush(stdout);
+#endif                                                     /* ^IGNORE_FINDS */
 
-  }
+  log_when_no_tty(afl);
 
   orig_in = in_buf = queue_testcase_get(afl, afl->queue_cur);
   len = afl->queue_cur->len;
@@ -478,7 +544,7 @@ u8 fuzz_one_original(afl_state_t *afl) {
       afl->queue_cur->exec_cksum = 0;
 
       res =
-          calibrate_case(afl, afl->queue_cur, in_buf, afl->queue_cycle - 1, 0);
+          calibrate_case(afl, afl->queue_cur, in_buf, aflrun_queue_cycle(), 0);
 
       if (unlikely(res == FSRV_RUN_ERROR)) {
 
@@ -502,7 +568,9 @@ u8 fuzz_one_original(afl_state_t *afl) {
    ************/
 
   if (unlikely(!afl->non_instrumented_mode && !afl->queue_cur->trim_done &&
-               !afl->disable_trim)) {
+               !afl->disable_trim && (!afl->is_aflrun ||
+                 // In aflrun mode, we only trim when quant is large enough
+                 afl->queue_cur->quant_score > afl->trim_thr))) {
 
     u32 old_len = afl->queue_cur->len;
 
@@ -538,8 +606,25 @@ u8 fuzz_one_original(afl_state_t *afl) {
   /*********************
    * PERFORMANCE SCORE *
    *********************/
+  if (likely(afl->is_aflrun)) {
+    orig_perf = perf_score = afl->queue_cur->quant_score;
+    // num_execs = 0.1s * (1 / exec_us) * quantum
+    num_execs = QUANTUM_TIME * perf_score / afl->queue_cur->exec_us;
+
+    // If num_execs is smaller than minimum threthold:
+    if (num_execs < afl->min_num_exec) {
+
+      // for prob num_execs/afl->min_num_exec, we execute for min_num_exec times.
+      if (rand_below(afl, afl->min_num_exec) < num_execs)
+        num_execs = afl->min_num_exec;
+      else // otherwise, we skip
+        goto abandon_entry;
+      // In this way, we the expected number of execution can be num_execs.
+
+    }
 
-  if (likely(!afl->old_seed_selection))
+  }
+  else if (likely(!afl->old_seed_selection))
     orig_perf = perf_score = afl->queue_cur->perf_score;
   else
     afl->queue_cur->perf_score = orig_perf = perf_score =
@@ -1906,15 +1991,37 @@ custom_mutator_stage:
 
   if (likely(!afl->custom_mutators_count)) { goto havoc_stage; }
 
+  if (afl->is_aflrun) {
+
+    afl->stage_name = "run-custom";
+    afl->stage_short = "run-custom";
+    if (afl->custom_only) {
+      afl->stage_max = num_execs / afl->custom_mutators_count;
+      num_execs = 0;
+    }
+    else {
+      afl->stage_max = AFLRUN_CUSTOM_TIMES(num_execs, afl->custom_mutators_count);
+      // Leave remaining counts to havoc and splice
+      num_execs -= afl->stage_max * afl->custom_mutators_count;
+    }
+
+  }
+  else {
+
   afl->stage_name = "custom mutator";
   afl->stage_short = "custom";
   afl->stage_max = HAVOC_CYCLES * perf_score / afl->havoc_div / 100;
+
+  }
   afl->stage_val_type = STAGE_VAL_NONE;
   bool has_custom_fuzz = false;
 
-  if (afl->stage_max < HAVOC_MIN) { afl->stage_max = HAVOC_MIN; }
+  if (afl->stage_max < HAVOC_MIN && !afl->is_aflrun) {
+    afl->stage_max = HAVOC_MIN;
+  }
 
-  const u32 max_seed_size = MAX_FILE, saved_max = afl->stage_max;
+  const u32 max_seed_size = MAX_FILE;
+  u32 saved_max = afl->stage_max;
 
   orig_hit_cnt = afl->queued_items + afl->saved_crashes;
 
@@ -1924,13 +2031,17 @@ custom_mutator_stage:
 
   LIST_FOREACH(&afl->custom_mutator_list, struct custom_mutator, {
 
-    if (el->afl_custom_fuzz) {
-
       afl->current_custom_fuzz = el;
 
       if (el->afl_custom_fuzz_count) {
 
-        afl->stage_max = el->afl_custom_fuzz_count(el->data, out_buf, len);
+      // Some custom mutator will do some initialization at afl_custom_fuzz_count,
+      // so we always call it but in aflrun mode we don't care its return value.
+      u32 tmp_max = el->afl_custom_fuzz_count(el->data, out_buf, len, saved_max);
+      if (afl->is_aflrun)
+        afl->stage_max = saved_max;
+      else
+        afl->stage_max = tmp_max;
 
       } else {
 
@@ -1963,7 +2074,7 @@ custom_mutator_stage:
               tid = rand_below(afl, afl->queued_items);
 
             } while (unlikely(tid == afl->current_entry ||
-
+                   afl->queue_buf[tid]->aflrun_extra ||
                               afl->queue_buf[tid]->len < 4));
 
             target = afl->queue_buf[tid];
@@ -1995,14 +2106,14 @@ custom_mutator_stage:
 
             }
 
-            if (!el->afl_custom_fuzz_count) {
+          if (afl->is_aflrun || !el->afl_custom_fuzz_count) {
 
               /* If we're finding new stuff, let's run for a bit longer, limits
                 permitting. */
 
               if (afl->queued_items != havoc_queued) {
 
-                if (perf_score <= afl->havoc_max_mult * 100) {
+              if (!afl->is_aflrun && perf_score <= afl->havoc_max_mult * 100) {
 
                   afl->stage_max *= 2;
                   perf_score *= 2;
@@ -2015,6 +2126,13 @@ custom_mutator_stage:
 
             }
 
+          if (aflrun_end_cycle()) {
+            afl->stage_max = afl->stage_cur;
+            saved_max = 0; num_execs = 0;
+            afl->force_cycle_end = 1;
+            orig_perf = perf_score = 0;
+          }
+
           }
 
           /* out_buf may have been changed by the call to custom_fuzz */
@@ -2024,8 +2142,6 @@ custom_mutator_stage:
 
       }
 
-    }
-
   });
 
   afl->current_custom_fuzz = NULL;
@@ -2059,6 +2175,33 @@ havoc_stage:
   /* The havoc stage mutation code is also invoked when splicing files; if the
      splice_cycle variable is set, generate different descriptions and such. */
 
+  if (afl->is_aflrun) {
+
+    if (!splice_cycle) {
+
+      afl->stage_name = "run-havoc";
+      afl->stage_short = "run-havoc";
+      if (afl->use_splicing && afl->ready_for_splicing_count > 1 &&
+          afl->queue_cur->len >= 4) {
+        afl->stage_max = AFLRUN_HAVOC_TIMES(num_execs, SPLICE_CYCLES);
+      }
+      else { // If splice is not enabled, assign all executions to havoc
+        afl->stage_max = num_execs;
+      }
+
+    }
+    else {
+
+      snprintf(afl->stage_name_buf, STAGE_BUF_SIZE, "run-splice %u", splice_cycle);
+      afl->stage_name = afl->stage_name_buf;
+      afl->stage_short = "run-splice";
+
+      afl->stage_max = AFLRUN_SPLICE_TIMES(num_execs, SPLICE_CYCLES);
+
+    }
+  }
+  else {
+
   if (!splice_cycle) {
 
     afl->stage_name = "havoc";
@@ -2076,8 +2219,11 @@ havoc_stage:
     afl->stage_max = SPLICE_HAVOC * perf_score / afl->havoc_div / 100;
 
   }
+  }
 
-  if (afl->stage_max < HAVOC_MIN) { afl->stage_max = HAVOC_MIN; }
+  if (afl->stage_max < HAVOC_MIN && !afl->is_aflrun) {
+    afl->stage_max = HAVOC_MIN;
+  }
 
   temp_len = len;
 
@@ -2957,13 +3103,23 @@ havoc_stage:
 
     if (afl->queued_items != havoc_queued) {
 
-      if (perf_score <= afl->havoc_max_mult * 100) {
+      // Don't increase perf_score in aflrun mode
+      if (!afl->is_aflrun && perf_score <= afl->havoc_max_mult * 100) {
 
         afl->stage_max *= 2;
         perf_score *= 2;
 
       }
 
+      // If resetted, we need to skip current cycle;
+      // Note that after this afl->is_aflrun is not updated, which is intended
+      if (aflrun_end_cycle()) {
+        afl->stage_max = afl->stage_cur;
+        num_execs = 0;
+        afl->force_cycle_end = 1;
+        orig_perf = perf_score = 0;
+      }
+
       havoc_queued = afl->queued_items;
 
     }
@@ -3001,9 +3157,10 @@ havoc_stage:
      splices them together at some offset, then relies on the havoc
      code to mutate that blob. */
 
+  retry_splicing_times = 0;
 retry_splicing:
 
-  if (afl->use_splicing && splice_cycle++ < SPLICE_CYCLES &&
+  if (afl->use_splicing && splice_cycle < SPLICE_CYCLES &&
       afl->ready_for_splicing_count > 1 && afl->queue_cur->len >= 4) {
 
     struct queue_entry *target;
@@ -3027,7 +3184,8 @@ retry_splicing:
 
       tid = rand_below(afl, afl->queued_items);
 
-    } while (tid == afl->current_entry || afl->queue_buf[tid]->len < 4);
+    } while (tid == afl->current_entry || afl->queue_buf[tid]->len < 4 ||
+        afl->queue_buf[tid]->aflrun_extra);
 
     /* Get the testcase */
     afl->splicing_with = tid;
@@ -3040,7 +3198,12 @@ retry_splicing:
 
     locate_diffs(in_buf, new_buf, MIN(len, (s64)target->len), &f_diff, &l_diff);
 
-    if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) { goto retry_splicing; }
+    if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) {
+      // If retry happens for `SPLICE_CYCLES` times, we force splice
+      if (retry_splicing_times++ < SPLICE_CYCLES) { goto retry_splicing; }
+      else { f_diff = 0; l_diff = 1; } // Force splice by using fake diff value
+    }
+    ++splice_cycle;
 
     /* Split somewhere between the first and last differing byte. */
 
@@ -3059,7 +3222,7 @@ retry_splicing:
     if (unlikely(!out_buf)) { PFATAL("alloc"); }
     memcpy(out_buf, in_buf, len);
 
-    goto custom_mutator_stage;
+    goto havoc_stage; // We don't want to do custom mutation after splice
 
   }
 
@@ -3113,7 +3276,9 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) {
   u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0;
   u64 havoc_queued = 0, orig_hit_cnt, new_hit_cnt = 0, cur_ms_lv, prev_cksum,
       _prev_cksum;
-  u32 splice_cycle = 0, perf_score = 100, orig_perf, eff_cnt = 1;
+  u32 splice_cycle = 0, eff_cnt = 1, retry_splicing_times, num_execs = UINT_MAX;
+  double perf_score = 100, orig_perf;
+  s32 temp_len_puppet;
 
   u8 ret_val = 1, doing_det = 0;
 
@@ -3129,6 +3294,13 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) {
 
 #else
 
+  if (!afl->is_aflrun) {
+
+    // For extra seed from aflrun, we don't fuzz it in coverage mode
+    // unless it is a favored seed
+    if (!afl->queue_cur->favored && afl->queue_cur->aflrun_extra)
+      return 1;
+
   if (likely(afl->pending_favored)) {
 
     /* If we have any favored, non-fuzzed new arrivals in the queue,
@@ -3150,7 +3322,7 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) {
        The odds of skipping stuff are higher for already-fuzzed inputs and
        lower for never-fuzzed entries. */
 
-    if (afl->queue_cycle > 1 && !afl->queue_cur->fuzz_level) {
+      if (aflrun_queue_cycle() > 0 && !afl->queue_cur->fuzz_level) {
 
       if (likely(rand_below(afl, 100) < SKIP_NFAV_NEW_PROB)) { return 1; }
 
@@ -3162,15 +3334,11 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) {
 
   }
 
-#endif                                                     /* ^IGNORE_FINDS */
-
-  if (afl->not_on_tty) {
+  }
 
-    ACTF("Fuzzing test case #%u (%u total, %llu crashes saved)...",
-         afl->current_entry, afl->queued_items, afl->saved_crashes);
-    fflush(stdout);
+#endif                                                     /* ^IGNORE_FINDS */
 
-  }
+  log_when_no_tty(afl);
 
   /* Map the test case into memory. */
   orig_in = in_buf = queue_testcase_get(afl, afl->queue_cur);
@@ -3196,7 +3364,7 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) {
       afl->queue_cur->exec_cksum = 0;
 
       res =
-          calibrate_case(afl, afl->queue_cur, in_buf, afl->queue_cycle - 1, 0);
+          calibrate_case(afl, afl->queue_cur, in_buf, aflrun_queue_cycle(), 0);
 
       if (res == FSRV_RUN_ERROR) {
 
@@ -3220,7 +3388,9 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) {
    ************/
 
   if (unlikely(!afl->non_instrumented_mode && !afl->queue_cur->trim_done &&
-               !afl->disable_trim)) {
+               !afl->disable_trim && (!afl->is_aflrun ||
+                 // In aflrun mode, we only trim when quant is large enough
+                 afl->queue_cur->quant_score > afl->trim_thr))) {
 
     u32 old_len = afl->queue_cur->len;
 
@@ -3257,7 +3427,25 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) {
    * PERFORMANCE SCORE *
    *********************/
 
-  if (likely(!afl->old_seed_selection))
+  if (likely(afl->is_aflrun)) {
+    orig_perf = perf_score = afl->queue_cur->quant_score;
+    // num_execs = 0.1s * (1 / exec_us) * quantum
+    num_execs = QUANTUM_TIME * perf_score / afl->queue_cur->exec_us;
+
+    // If num_execs is smaller than minimum threthold:
+    if (num_execs < afl->min_num_exec) {
+
+      // for prob num_execs/afl->min_num_exec, we execute for min_num_exec times.
+      if (rand_below(afl, afl->min_num_exec) < num_execs)
+        num_execs = afl->min_num_exec;
+      else // otherwise, we skip
+        goto abandon_entry;
+      // In this way, we the expected number of execution can be num_execs.
+
+    }
+
+  }
+  else if (likely(!afl->old_seed_selection))
     orig_perf = perf_score = afl->queue_cur->perf_score;
   else
     orig_perf = perf_score = calculate_score(afl, afl->queue_cur);
@@ -4612,32 +4800,6 @@ skip_extras:
 havoc_stage:
 pacemaker_fuzzing:
 
-  afl->stage_cur_byte = -1;
-
-  /* The havoc stage mutation code is also invoked when splicing files; if the
-     splice_cycle variable is set, generate different descriptions and such. */
-
-  if (!splice_cycle) {
-
-    afl->stage_name = MOpt_globals.havoc_stagename;
-    afl->stage_short = MOpt_globals.havoc_stagenameshort;
-    afl->stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) *
-                     perf_score / afl->havoc_div / 100;
-
-  } else {
-
-    perf_score = orig_perf;
-
-    snprintf(afl->stage_name_buf, STAGE_BUF_SIZE,
-             MOpt_globals.splice_stageformat, splice_cycle);
-    afl->stage_name = afl->stage_name_buf;
-    afl->stage_short = MOpt_globals.splice_stagenameshort;
-    afl->stage_max = SPLICE_HAVOC * perf_score / afl->havoc_div / 100;
-
-  }
-
-  s32 temp_len_puppet;
-
   // for (; afl->swarm_now < swarm_num; ++afl->swarm_now)
   {
 
@@ -4668,6 +4830,39 @@ pacemaker_fuzzing:
          the splice_cycle variable is set, generate different descriptions and
          such. */
 
+      if (afl->is_aflrun) {
+
+        if (!splice_cycle) {
+
+          snprintf(afl->stage_name_buf, STAGE_BUF_SIZE,
+            "run-%s", MOpt_globals.havoc_stagename);
+          afl->stage_name = afl->stage_name_buf;
+          afl->stage_short = "run-mopt-havoc";
+          if (afl->use_splicing && afl->ready_for_splicing_count > 1 &&
+              afl->queue_cur->len >= 4) {
+            afl->stage_max = AFLRUN_HAVOC_TIMES(num_execs, afl->SPLICE_CYCLES_puppet);
+          }
+          else { // If splice is not enabled, assign all executions to havoc
+            afl->stage_max = num_execs;
+          }
+
+        }
+        else {
+
+          char tmp_buf[STAGE_BUF_SIZE];
+          snprintf(tmp_buf, STAGE_BUF_SIZE,
+                  MOpt_globals.splice_stageformat, splice_cycle);
+          snprintf(afl->stage_name_buf, STAGE_BUF_SIZE, "run-%s", tmp_buf);
+          afl->stage_name = afl->stage_name_buf;
+          afl->stage_short = "run-mopt-splice";
+
+          afl->stage_max = AFLRUN_SPLICE_TIMES(
+            num_execs, afl->SPLICE_CYCLES_puppet);
+
+        }
+      }
+      else {
+
       if (!splice_cycle) {
 
         afl->stage_name = MOpt_globals.havoc_stagename;
@@ -4686,7 +4881,11 @@ pacemaker_fuzzing:
 
       }
 
-      if (afl->stage_max < HAVOC_MIN) { afl->stage_max = HAVOC_MIN; }
+      }
+
+      if (afl->stage_max < HAVOC_MIN && !afl->is_aflrun) {
+        afl->stage_max = HAVOC_MIN;
+      }
 
       temp_len = len;
 
@@ -5364,13 +5563,21 @@ pacemaker_fuzzing:
 
         if (afl->queued_items != havoc_queued) {
 
-          if (perf_score <= afl->havoc_max_mult * 100) {
+          // Don't increase perf_score in aflrun mode
+          if (!afl->is_aflrun && perf_score <= afl->havoc_max_mult * 100) {
 
             afl->stage_max *= 2;
             perf_score *= 2;
 
           }
 
+          if (aflrun_end_cycle()) {
+            afl->stage_max = afl->stage_cur;
+            num_execs = 0;
+            afl->force_cycle_end = 1;
+            orig_perf = perf_score = 0;
+          }
+
           havoc_queued = afl->queued_items;
 
         }
@@ -5443,10 +5650,11 @@ pacemaker_fuzzing:
        * SPLICING *
        ************/
 
+    retry_splicing_times = 0;
     retry_splicing_puppet:
 
       if (afl->use_splicing &&
-          splice_cycle++ < (u32)afl->SPLICE_CYCLES_puppet &&
+          splice_cycle < (u32)afl->SPLICE_CYCLES_puppet &&
           afl->ready_for_splicing_count > 1 && afl->queue_cur->len >= 4) {
 
         struct queue_entry *target;
@@ -5471,7 +5679,8 @@ pacemaker_fuzzing:
 
           tid = rand_below(afl, afl->queued_items);
 
-        } while (tid == afl->current_entry || afl->queue_buf[tid]->len < 4);
+        } while (tid == afl->current_entry || afl->queue_buf[tid]->len < 4 ||
+                  afl->queue_buf[tid]->aflrun_extra);
 
         afl->splicing_with = tid;
         target = afl->queue_buf[tid];
@@ -5487,9 +5696,13 @@ pacemaker_fuzzing:
 
         if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) {
 
+          if (retry_splicing_times++ < SPLICE_CYCLES) {
           goto retry_splicing_puppet;
+          }
+          else { f_diff = 0; l_diff = 1; }
 
         }
+        ++splice_cycle;
 
         /* Split somewhere between the first and last differing byte. */
 
@@ -5819,6 +6032,8 @@ u8 fuzz_one(afl_state_t *afl) {
 #endif
 
   // if limit_time_sig == -1 then both are run after each other
+  // therefore we reduce the quant by half so the sum is unchanged
+  if (afl->limit_time_sig < 0) { afl->queue_cur->quant_score /= 2; }
 
   if (afl->limit_time_sig <= 0) { key_val_lv_1 = fuzz_one_original(afl); }
 
@@ -5835,6 +6050,8 @@ u8 fuzz_one(afl_state_t *afl) {
     } else if (afl->key_module == 2) {
 
       pso_updating(afl);
+      // We should not skip this seed after updating PSO.
+      key_val_lv_2 = pilot_fuzzing(afl);
 
     }
 
diff --git a/src/afl-fuzz-python.c b/src/afl-fuzz-python.c
index b509b936..22f66aab 100644
--- a/src/afl-fuzz-python.c
+++ b/src/afl-fuzz-python.c
@@ -416,6 +416,7 @@ struct custom_mutator *load_custom_mutator_py(afl_state_t *afl,
   if (py_functions[PY_FUNC_DEINIT]) { mutator->afl_custom_deinit = deinit_py; }
 
   if (py_functions[PY_FUNC_FUZZ]) { mutator->afl_custom_fuzz = fuzz_py; }
+  else FATAL("'afl_custom_fuzz' is required");
 
   if (py_functions[PY_FUNC_DESCRIBE]) {
 
@@ -437,7 +438,7 @@ struct custom_mutator *load_custom_mutator_py(afl_state_t *afl,
 
   if (py_functions[PY_FUNC_FUZZ_COUNT]) {
 
-    mutator->afl_custom_fuzz_count = fuzz_count_py;
+    WARNF("AFLRun will ignore afl_custom_fuzz_count");
 
   }
 
diff --git a/src/afl-fuzz-queue.c b/src/afl-fuzz-queue.c
index e3faa392..0c5dfee1 100644
--- a/src/afl-fuzz-queue.c
+++ b/src/afl-fuzz-queue.c
@@ -23,6 +23,7 @@
  */
 
 #include "afl-fuzz.h"
+#include "aflrun.h"
 #include <limits.h>
 #include <ctype.h>
 #include <math.h>
@@ -102,7 +103,7 @@ void create_alias_table(afl_state_t *afl) {
       struct queue_entry *q = afl->queue_buf[i];
 
       // disabled entries might have timings and bitmap values
-      if (likely(!q->disabled)) {
+      if (likely(!q->disabled && (!q->aflrun_extra || q->favored))) {
 
         avg_exec_us += q->exec_us;
         avg_bitmap_size += log(q->bitmap_size);
@@ -121,7 +122,7 @@ void create_alias_table(afl_state_t *afl) {
 
       struct queue_entry *q = afl->queue_buf[i];
 
-      if (likely(!q->disabled)) {
+      if (likely(!q->disabled && (!q->aflrun_extra || q->favored))) {
 
         q->weight =
             compute_weight(afl, q, avg_exec_us, avg_bitmap_size, avg_top_size);
@@ -145,7 +146,8 @@ void create_alias_table(afl_state_t *afl) {
 
       struct queue_entry *q = afl->queue_buf[i];
 
-      if (likely(!q->disabled)) { q->perf_score = calculate_score(afl, q); }
+      if (likely(!q->disabled && (!q->aflrun_extra || q->favored)))
+        { q->perf_score = calculate_score(afl, q); }
 
       sum += q->perf_score;
 
@@ -597,6 +599,53 @@ void destroy_queue(afl_state_t *afl) {
 
 }
 
+u64 get_seed_fav_factor(void* afl, u32 seed) {
+
+  struct queue_entry *q = ((afl_state_t*)afl)->queue_buf[seed];
+
+  if (q->id != seed)
+    FATAL("ID Error");
+
+  return q->exec_us * q->len;
+}
+
+double get_seed_perf_score(void* afl, u32 seed) {
+
+  struct queue_entry *q = ((afl_state_t*)afl)->queue_buf[seed];
+
+  if (q->id != seed)
+    FATAL("ID Error");
+
+  return q->perf_score;
+}
+
+bool get_seed_div_favored(void* afl, u32 seed) {
+
+  struct queue_entry *q = ((afl_state_t*)afl)->queue_buf[seed];
+
+  if (q->id != seed)
+    FATAL("ID Error");
+
+  return q->div_favored;
+
+}
+
+u8 get_seed_cov_favored(void* afl, u32 seed) {
+
+  struct queue_entry *q = ((afl_state_t*)afl)->queue_buf[seed];
+
+  if (q->id != seed)
+    FATAL("ID Error");
+
+  if (q->favored)
+    return 2; // 2 for favored seed
+  if (q->aflrun_extra)
+    return 0; // 0 for extra seed
+  else
+    return 1; // 1 for non-favored seed
+
+}
+
 /* When we bump into a new path, we call this to see if the path appears
    more "favorable" than any of the existing ones. The purpose of the
    "favorables" is to have a minimal set of paths that trigger all the bits
@@ -608,7 +657,8 @@ void destroy_queue(afl_state_t *afl) {
    previous contender, or if the contender has a more favorable speed x size
    factor. */
 
-void update_bitmap_score(afl_state_t *afl, struct queue_entry *q) {
+static void update_bitmap_score_original(u8 primary,
+  afl_state_t *afl, struct queue_entry *q, struct queue_entry **top_rated) {
 
   u32 i;
   u64 fav_factor;
@@ -637,25 +687,25 @@ void update_bitmap_score(afl_state_t *afl, struct queue_entry *q) {
 
     if (afl->fsrv.trace_bits[i]) {
 
-      if (afl->top_rated[i]) {
+      if (top_rated[i]) {
 
         /* Faster-executing or smaller test cases are favored. */
         u64 top_rated_fav_factor;
         u64 top_rated_fuzz_p2;
         if (unlikely(afl->schedule >= FAST && afl->schedule <= RARE))
           top_rated_fuzz_p2 =
-              next_pow2(afl->n_fuzz[afl->top_rated[i]->n_fuzz_entry]);
+              next_pow2(afl->n_fuzz[top_rated[i]->n_fuzz_entry]);
         else
-          top_rated_fuzz_p2 = afl->top_rated[i]->fuzz_level;
+          top_rated_fuzz_p2 = top_rated[i]->fuzz_level;
 
         if (unlikely(afl->schedule >= RARE) || unlikely(afl->fixed_seed)) {
 
-          top_rated_fav_factor = afl->top_rated[i]->len << 2;
+          top_rated_fav_factor = top_rated[i]->len << 2;
 
         } else {
 
           top_rated_fav_factor =
-              afl->top_rated[i]->exec_us * afl->top_rated[i]->len;
+              top_rated[i]->exec_us * top_rated[i]->len;
 
         }
 
@@ -671,12 +721,12 @@ void update_bitmap_score(afl_state_t *afl, struct queue_entry *q) {
 
         if (unlikely(afl->schedule >= RARE) || unlikely(afl->fixed_seed)) {
 
-          if (fav_factor > afl->top_rated[i]->len << 2) { continue; }
+          if (fav_factor > top_rated[i]->len << 2) { continue; }
 
         } else {
 
           if (fav_factor >
-              afl->top_rated[i]->exec_us * afl->top_rated[i]->len) {
+              top_rated[i]->exec_us * top_rated[i]->len) {
 
             continue;
 
@@ -687,10 +737,10 @@ void update_bitmap_score(afl_state_t *afl, struct queue_entry *q) {
         /* Looks like we're going to win. Decrease ref count for the
            previous winner, discard its afl->fsrv.trace_bits[] if necessary. */
 
-        if (!--afl->top_rated[i]->tc_ref) {
+        if (!--top_rated[i]->tc_ref) {
 
-          ck_free(afl->top_rated[i]->trace_mini);
-          afl->top_rated[i]->trace_mini = 0;
+          ck_free(top_rated[i]->trace_mini);
+          top_rated[i]->trace_mini = 0;
 
         }
 
@@ -698,7 +748,7 @@ void update_bitmap_score(afl_state_t *afl, struct queue_entry *q) {
 
       /* Insert ourselves as the new winner. */
 
-      afl->top_rated[i] = q;
+      top_rated[i] = q;
       ++q->tc_ref;
 
       if (!q->trace_mini) {
@@ -709,7 +759,10 @@ void update_bitmap_score(afl_state_t *afl, struct queue_entry *q) {
 
       }
 
+      if (primary)
       afl->score_changed = 1;
+      else
+        afl->div_score_changed = 1;
 
     }
 
@@ -717,6 +770,18 @@ void update_bitmap_score(afl_state_t *afl, struct queue_entry *q) {
 
 }
 
+void update_bitmap_score(afl_state_t *afl, struct queue_entry *q) {
+
+  size_t n = aflrun_max_clusters(q->id);
+  afl->tops = afl_realloc((void **)&afl->tops, sizeof(struct queue_entry**) * n);
+  afl->tops[0] = afl->top_rated;
+  size_t num_tops = aflrun_get_seed_tops(q->id, (void***)(afl->tops + 1)) + 1;
+
+  for (size_t i = 0; i < num_tops; ++i)
+    update_bitmap_score_original(i == 0, afl, q, afl->tops[i]);
+
+}
+
 /* The second part of the mechanism discussed above is a routine that
    goes over afl->top_rated[] entries, and then sequentially grabs winners for
    previously-unseen bytes (temp_v) and marks them as favored, at least
@@ -790,6 +855,61 @@ void cull_queue(afl_state_t *afl) {
 
 }
 
+static void clear_div_favored(afl_state_t *afl) {
+
+  for (u32 i = 0; i < afl->queued_items; i++) {
+
+    afl->queue_buf[i]->div_favored = 0;
+
+  }
+
+}
+
+static void cull_queue_div(afl_state_t *afl, u8 mode) {
+
+  if (likely(!afl->div_score_changed || afl->non_instrumented_mode)) { return; }
+
+  u32 len = (afl->fsrv.map_size >> 3);
+  u8 *temp_v = afl->map_tmp_buf;
+
+  afl->div_score_changed = 0;
+
+  clear_div_favored(afl);
+
+  size_t n = aflrun_get_num_clusters();
+  afl->tops = afl_realloc((void**)&afl->tops, sizeof(struct queue_entry**) * n);
+  n = aflrun_get_all_tops((void***)afl->tops, mode);
+
+  for (size_t k = 0; k < n; ++k) {
+
+    struct queue_entry** top_rated = afl->tops[k];
+    memset(temp_v, 255, len);
+
+    for (u32 i = 0; i < afl->fsrv.map_size; ++i) {
+
+      if (top_rated[i] && (temp_v[i >> 3] & (1 << (i & 7)))) {
+
+        u32 j = len;
+        while (j--) {
+
+          if (top_rated[i]->trace_mini[j]) {
+
+            temp_v[j] &= ~top_rated[i]->trace_mini[j];
+
+          }
+
+        }
+
+        top_rated[i]->div_favored = 1;
+
+      }
+
+    }
+
+  }
+
+}
+
 /* Calculate case desirability score to adjust the length of havoc fuzzing.
    A helper function for fuzz_one(). Maybe some of these constants should
    go into config.h. */
@@ -883,6 +1003,9 @@ u32 calculate_score(afl_state_t *afl, struct queue_entry *q) {
      in the game we learned about this path. Latecomers are allowed to run
      for a bit longer until they catch up with the rest. */
 
+  // We only want to change `handicap` in coverage mode
+  if (!afl->is_aflrun) {
+
   if (q->handicap >= 4) {
 
     perf_score *= 4;
@@ -895,6 +1018,8 @@ u32 calculate_score(afl_state_t *afl, struct queue_entry *q) {
 
   }
 
+  }
+
   /* Final adjustment based on input depth, under the assumption that fuzzing
      deeper test cases is more likely to reveal stuff that can't be
      discovered with traditional fuzzers. */
@@ -1359,3 +1484,86 @@ inline void queue_testcase_store_mem(afl_state_t *afl, struct queue_entry *q,
 
 }
 
+/* select seeds for aflrun to fuzz in this cycle */
+
+u32 select_aflrun_seeds(afl_state_t *afl) {
+
+  afl->aflrun_seeds = afl_realloc(
+    (void**)&afl->aflrun_seeds, afl->queued_items * sizeof(u32));
+
+  u32 idx = 0;
+  for (u32 i = 0; i < afl->queued_items; i++) {
+
+    struct queue_entry *q = afl->queue_buf[i];
+
+    if (!q->disabled) {
+      afl->aflrun_seeds[idx++] = q->id;
+
+      // This is needed for energy allocation among identical seeds
+      q->perf_score = calculate_score(afl, q);
+    }
+
+    q->quant_score = 0;
+
+  }
+
+  if (aflrun_is_uni()) {
+
+    // For unite mode, we select favored seeds for each of 3 modes
+    for (u8 mode = 1; mode <= 3; ++mode) {
+      cull_queue_div(afl, mode);
+      aflrun_set_favored_seeds(afl->aflrun_seeds, idx, mode);
+    }
+
+  } else {
+
+    cull_queue_div(afl, aflrun_get_mode());
+
+  }
+
+  return aflrun_cull_queue(afl->aflrun_seeds, idx);
+
+}
+
+int cmp_quant_score(const void* a, const void* b) {
+
+  const struct queue_entry* entry_a = *(const struct queue_entry* const*)a;
+  const struct queue_entry* entry_b = *(const struct queue_entry* const*)b;
+
+  // We want to sort in descending order
+  if (entry_b->quant_score > entry_a->quant_score)
+    return 1;
+  else if (entry_b->quant_score == entry_a->quant_score)
+    return 0;
+  else
+    return -1;
+
+}
+
+void disable_aflrun_extra(void* afl_void, u32 seed) {
+
+  afl_state_t* afl = (afl_state_t *)afl_void;
+  struct queue_entry* s = afl->queue_buf[seed];
+
+  if (s->aflrun_extra) {
+
+    if (s->disabled)
+      FATAL("Disabling same seed twice");
+
+    // If this is not correctly decremented, coverage fuzzer cannot work well.
+    if (!s->was_fuzzed) {
+
+      --afl->pending_not_fuzzed;
+      if (s->favored) { --afl->pending_favored; }
+
+    }
+
+    if (s->favored) { --afl->queued_favored; }
+
+    s->favored = false;
+    s->disabled = true;
+    ++afl->queued_extra_disabled;
+
+  }
+
+}
\ No newline at end of file
diff --git a/src/afl-fuzz-run.c b/src/afl-fuzz-run.c
index 7dd83150..fbbe4169 100644
--- a/src/afl-fuzz-run.c
+++ b/src/afl-fuzz-run.c
@@ -25,6 +25,7 @@
  */
 
 #include "afl-fuzz.h"
+#include "aflrun.h"
 #include <sys/time.h>
 #include <signal.h>
 #include <limits.h>
@@ -58,8 +59,24 @@ fuzz_run_target(afl_state_t *afl, afl_forkserver_t *fsrv, u32 timeout) {
 
 #endif
 
+  u64 t1 = get_cur_time_us();
+  u64 fuzz_time_cur = t1 - afl->last_exec_time;
+
   fsrv_run_result_t res = afl_fsrv_run_target(fsrv, timeout, &afl->stop_soon);
 
+  u64 t2 = get_cur_time_us();
+  u64 exec_time_cur = t2 - t1;
+  afl->last_exec_time = t2;
+
+  afl->exec_time += exec_time_cur;
+  afl->fuzz_time += fuzz_time_cur;
+  if (unlikely(afl->exec_time_short + afl->fuzz_time_short >
+      5 * 1000 * 1000)) {
+      afl->exec_time_short = afl->fuzz_time_short = 0;
+  }
+  afl->exec_time_short += exec_time_cur;
+  afl->fuzz_time_short += fuzz_time_cur;
+
 #ifdef PROFILING
   clock_gettime(CLOCK_REALTIME, &spec);
   time_spent_start = (spec.tv_sec * 1000000000) + spec.tv_nsec;
@@ -199,7 +216,7 @@ write_to_testcase(afl_state_t *afl, void **mem, u32 len, u32 fix) {
   char fn[PATH_MAX];
   snprintf(fn, PATH_MAX, "%s/mutations/%09u:%s", afl->out_dir,
            afl->document_counter++,
-           describe_op(afl, 0, NAME_MAX - strlen("000000000:")));
+           describe_op(afl, 0, 0, NAME_MAX - strlen("000000000:")));
 
   if ((doc_fd = open(fn, O_WRONLY | O_CREAT | O_TRUNC, DEFAULT_PERMISSION)) >=
       0) {
@@ -373,6 +390,46 @@ static void write_with_gap(afl_state_t *afl, u8 *mem, u32 len, u32 skip_at,
 
 }
 
+static void aflrun_new_bits(afl_state_t* afl, struct queue_entry* q) {
+
+  // Skip for following calibration of a new seed
+  if (q->tested == 2)
+    return;
+
+  if (q->tested == 0) {
+    // For imported case, we need to get its path for first calibration
+    aflrun_has_new_path(afl->fsrv.trace_freachables,
+      afl->fsrv.trace_reachables, afl->fsrv.trace_ctx,
+      afl->fsrv.trace_virgin->trace, afl->fsrv.trace_virgin->num,
+      0, q->id, NULL, NULL, 0); // For imported case, we don't do seed isolation.
+    q->path_cksum = hash64(afl->fsrv.trace_ctx,
+      MAP_TR_SIZE(afl->fsrv.num_reachables), HASH_CONST);
+    // xargs -I{} cp -u {} /tmp/in/
+    if (getenv("AFLRUN_TRACE_CORPUS")) {
+      u8* fn; int r;
+      aflrun_write_to_log(afl);
+
+      fn = alloc_printf("cp '%s/aflrun_fringe.txt' '%s/aflrun_fringe_%u.txt'",
+        afl->out_dir, afl->out_dir, q->id);
+      r = system(fn);
+      if (r) FATAL("cp failed");
+      ck_free(fn);
+      fn = alloc_printf("cp %s/aflrun_pro_fringe.txt %s/aflrun_pro_fringe_%u.txt",
+        afl->out_dir, afl->out_dir, q->id);
+      r = system(fn);
+      if (r) FATAL("cp failed");
+      ck_free(fn);
+      fn = alloc_printf("cp %s/aflrun_targets.txt %s/aflrun_targets_%u.txt",
+        afl->out_dir, afl->out_dir, q->id);
+      r = system(fn);
+      if (r) FATAL("cp failed");
+      ck_free(fn);
+    }
+  }
+
+  q->tested = 2;
+}
+
 /* Calibrate a new test case. This is done when processing the input directory
    to warn about flaky or otherwise problematic test cases early on; and when
    new paths are discovered to detect variable behavior and so on. */
@@ -464,6 +521,18 @@ u8 calibrate_case(afl_state_t *afl, struct queue_entry *q, u8 *use_mem,
     hnb = has_new_bits(afl, afl->virgin_bits);
     if (hnb > new_bits) { new_bits = hnb; }
 
+    // If `exec_cksum` is non-zero, `calibrate_case` must come from
+    // `save_if_interesting`, which means we can use `afl->virgins` directly.
+    // If it comes from timeout seeds, it is not supposed to be updated to
+    // aflrun, so there should be no diversity maps, so `has_new_bits` should
+    // be same as `has_new_bits_mul`; if it comes from interesting seeds,
+    // `afl->virgin_bits` should already be updated by `afl->fsrv.trace_bits`,
+    // so `has_new_bits` here does not do anything.
+
+    // Otherwise, it can come from either calibration failure or seed import,
+    // which we are going to handle after the first run of calibration.
+    // e.i. ensure the seed to be updated to aflrun.
+
   }
 
   start_us = get_cur_time_us();
@@ -480,17 +549,24 @@ u8 calibrate_case(afl_state_t *afl, struct queue_entry *q, u8 *use_mem,
 
     (void)write_to_testcase(afl, (void **)&use_mem, q->len, 1);
 
+    if (q->tested == 0)
+      afl->fsrv.testing = 1;
     fault = fuzz_run_target(afl, &afl->fsrv, use_tmout);
+    afl->fsrv.testing = 0;
 
     /* afl->stop_soon is set by the handler for Ctrl+C. When it's pressed,
        we want to bail out quickly. */
 
-    if (afl->stop_soon || fault != afl->crash_mode) { goto abort_calibration; }
+    if (afl->stop_soon || fault != afl->crash_mode) {
+      aflrun_recover_virgin(afl);
+      goto abort_calibration;
+    }
 
     if (!afl->non_instrumented_mode && !afl->stage_cur &&
         !count_bytes(afl, afl->fsrv.trace_bits)) {
 
       fault = FSRV_RUN_NOINST;
+      aflrun_recover_virgin(afl);
       goto abort_calibration;
 
     }
@@ -501,10 +577,34 @@ u8 calibrate_case(afl_state_t *afl, struct queue_entry *q, u8 *use_mem,
 
     classify_counts(&afl->fsrv);
     cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
+
+    aflrun_new_bits(afl, q);
+
+    // At this point the new seed must have been updated to aflrun,
+    // so we can get its virgin maps from its target reaching information
+    if (!afl->stage_cur && first_run) {
+
+      size_t n = aflrun_max_clusters(q->id);
+      afl->virgins = afl_realloc((void **)&afl->virgins, sizeof(u8*) * n);
+      afl->clusters = afl_realloc((void **)&afl->clusters, sizeof(size_t) * n);
+      afl->virgins[0] = afl->virgin_bits;
+      afl->clusters[0] = 0;
+      afl->num_maps = aflrun_get_seed_virgins(
+        q->id, afl->virgins + 1, afl->clusters + 1) + 1;
+
+      /*/ DEBUG
+      printf("Clusters(calibration): ");
+      for (size_t i = 0; i < afl->num_maps; ++i) {
+        printf("%u ", afl->clusters[i]);
+      }
+      printf("\n");
+      // DEBUG*/
+    }
+
     if (q->exec_cksum != cksum) {
 
-      hnb = has_new_bits(afl, afl->virgin_bits);
-      if (hnb > new_bits) { new_bits = hnb; }
+      hnb = has_new_bits_mul(afl, afl->virgins, &afl->new_bits, afl->num_maps, 1);
+      if ((hnb & 3) > new_bits) { new_bits = hnb & 3; }
 
       if (q->exec_cksum) {
 
@@ -517,7 +617,8 @@ u8 calibrate_case(afl_state_t *afl, struct queue_entry *q, u8 *use_mem,
 
             afl->var_bytes[i] = 1;
             // ignore the variable edge by setting it to fully discovered
-            afl->virgin_bits[i] = 0;
+            for (size_t j = 0; j < afl->num_maps; ++j)
+              afl->virgins[j][i] = 0;
 
           }
 
@@ -587,6 +688,7 @@ u8 calibrate_case(afl_state_t *afl, struct queue_entry *q, u8 *use_mem,
   ++afl->total_bitmap_entries;
 
   update_bitmap_score(afl, q);
+  aflrun_update_fringe_score(q->id);
 
   /* If this case didn't result in new output from the instrumentation, tell
      parent. This is a non-critical problem, but something to warn the user
@@ -628,6 +730,18 @@ abort_calibration:
 
   if (!first_run) { show_stats(afl); }
 
+  // This commit can commit sequences from both `save_if_interesting` and
+  // `calibrate_case`, even if calibration fails; but the sequences can be
+  // empty here due to `+path` only seeds, so we must handle this inside it.
+  /*/ DEBUG
+  printf("Commit: ");
+  for (size_t i = 0; i < afl->num_maps; ++i) {
+    printf("%u ", afl->clusters[i]);
+  }
+  printf("\n");
+  // DEBUG*/
+  aflrun_commit_bit_seqs(afl->clusters, afl->num_maps);
+
   return fault;
 
 }
@@ -791,13 +905,18 @@ void sync_fuzzers(afl_state_t *afl) {
 
         (void)write_to_testcase(afl, (void **)&mem, st.st_size, 1);
 
+        afl->fsrv.testing = 1;
         fault = fuzz_run_target(afl, &afl->fsrv, afl->fsrv.exec_tmout);
+        afl->fsrv.testing = 0;
 
-        if (afl->stop_soon) { goto close_sync; }
+        if (afl->stop_soon) {
+          aflrun_recover_virgin(afl);
+          goto close_sync;
+        }
 
         afl->syncing_party = sd_ent->d_name;
         afl->queued_imported +=
-            save_if_interesting(afl, mem, st.st_size, fault);
+            save_if_interesting(afl, mem, st.st_size, fault, 0);
         afl->syncing_party = 0;
 
         munmap(mem, st.st_size);
@@ -918,7 +1037,7 @@ u8 trim_case(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) {
     while (remove_pos < q->len) {
 
       u32 trim_avail = MIN(remove_len, q->len - remove_pos);
-      u64 cksum;
+      u64 cksum, pcksum;
 
       write_with_gap(afl, in_buf, q->len, remove_pos, trim_avail);
 
@@ -932,13 +1051,15 @@ u8 trim_case(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) {
       ++afl->trim_execs;
       classify_counts(&afl->fsrv);
       cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
+      pcksum = hash64(
+        afl->fsrv.trace_ctx, MAP_TR_SIZE(afl->fsrv.num_reachables), HASH_CONST);
 
       /* If the deletion had no impact on the trace, make it permanent. This
          isn't perfect for variable-path inputs, but we're just making a
          best-effort pass, so it's not a big deal if we end up with false
          negatives every now and then. */
 
-      if (cksum == q->exec_cksum) {
+      if (cksum == q->exec_cksum && pcksum == q->path_cksum) {
 
         u32 move_tail = q->len - remove_pos - trim_avail;
 
@@ -1013,6 +1134,7 @@ u8 trim_case(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) {
 
     memcpy(afl->fsrv.trace_bits, afl->clean_trace, afl->fsrv.map_size);
     update_bitmap_score(afl, q);
+    aflrun_update_fringe_score(q->id);
 
   }
 
@@ -1038,15 +1160,24 @@ common_fuzz_stuff(afl_state_t *afl, u8 *out_buf, u32 len) {
 
   }
 
+  ++afl->fuzzed_times;
+  ++afl->queue_cur->fuzzed_times;
+  afl->fsrv.testing = 1;
   fault = fuzz_run_target(afl, &afl->fsrv, afl->fsrv.exec_tmout);
+  afl->fsrv.testing = 0;
+  ++afl->total_perf_score;
 
-  if (afl->stop_soon) { return 1; }
+  if (afl->stop_soon) {
+    aflrun_recover_virgin(afl);
+    return 1;
+  }
 
   if (fault == FSRV_RUN_TMOUT) {
 
     if (afl->subseq_tmouts++ > TMOUT_LIMIT) {
 
       ++afl->cur_skipped_items;
+      aflrun_recover_virgin(afl);
       return 1;
 
     }
@@ -1064,13 +1195,14 @@ common_fuzz_stuff(afl_state_t *afl, u8 *out_buf, u32 len) {
 
     afl->skip_requested = 0;
     ++afl->cur_skipped_items;
+    aflrun_recover_virgin(afl);
     return 1;
 
   }
 
   /* This handles FAULT_ERROR for us: */
 
-  afl->queued_discovered += save_if_interesting(afl, out_buf, len, fault);
+  afl->queued_discovered += save_if_interesting(afl, out_buf, len, fault, 1);
 
   if (!(afl->stage_cur % afl->stats_update_freq) ||
       afl->stage_cur + 1 == afl->stage_max) {
@@ -1083,3 +1215,13 @@ common_fuzz_stuff(afl_state_t *afl, u8 *out_buf, u32 len) {
 
 }
 
+void aflrun_recover_virgin(afl_state_t* afl) {
+  u8* virgin_ctx = afl->virgin_ctx;
+  const ctx_t* new_paths = afl->fsrv.trace_virgin->trace;
+  size_t len = afl->fsrv.trace_virgin->num;
+  // Reset the virgin bits
+  for (size_t i = 0; i < len; ++i) {
+    size_t idx = CTX_IDX(new_paths[i].block, new_paths[i].call_ctx);
+    virgin_ctx[idx / 8] |= 1 << (idx % 8);
+  }
+}
\ No newline at end of file
diff --git a/src/afl-fuzz-stats.c b/src/afl-fuzz-stats.c
index bfd30845..0b81d8a9 100644
--- a/src/afl-fuzz-stats.c
+++ b/src/afl-fuzz-stats.c
@@ -24,6 +24,7 @@
  */
 
 #include "afl-fuzz.h"
+#include "aflrun.h"
 #include "envs.h"
 #include <limits.h>
 
@@ -482,6 +483,56 @@ void show_stats(afl_state_t *afl) {
 
 }
 
+void aflrun_write_to_log(afl_state_t* afl) {
+
+  u8* fn = alloc_printf("%s/aflrun_log.txt", afl->out_dir);
+
+  FILE* fd = fopen(fn, "w");
+
+  ck_free(fn);
+
+  if (fd == NULL) {
+    WARNF("Error in opening log file");
+    return;
+  }
+
+  fprintf(fd, "Reached Blocks\n");
+
+  for (reach_t i = afl->fsrv.num_targets; i < afl->fsrv.num_reachables; ++i) {
+    if (!IS_SET(afl->virgin_reachables, i))
+      fprintf(fd, "%s\n", afl->reachable_names[i]);
+  }
+
+  fprintf(fd, "\nSeed Factor and Quant\n");
+
+  double sum = 0;
+  for (u32 i = 0; i < afl->queued_items; i++) {
+
+    struct queue_entry *q = afl->queue_buf[i];
+    if (q->quant_score != 0) {
+      fprintf(fd, "%u: k=%lf q=%lf\n",
+        q->id, q->quant_score, aflrun_get_seed_quant(q->id));
+      sum += q->quant_score;
+    }
+
+  }
+  fprintf(fd, "sum = %lf", sum);
+
+  fclose(fd);
+
+  fn = alloc_printf("%s/aflrun_fringe.txt", afl->out_dir);
+  aflrun_log_fringes(fn, 0);
+  ck_free(fn);
+
+  fn = alloc_printf("%s/aflrun_pro_fringe.txt", afl->out_dir);
+  aflrun_log_fringes(fn, 1);
+  ck_free(fn);
+
+  fn = alloc_printf("%s/aflrun_targets.txt", afl->out_dir);
+  aflrun_log_fringes(fn, 2);
+  ck_free(fn);
+}
+
 void show_stats_normal(afl_state_t *afl) {
 
   double t_byte_ratio, stab_ratio;
@@ -683,6 +734,14 @@ void show_stats_normal(afl_state_t *afl) {
 
   }
 
+  static u64 last_ms_log = 0;
+
+  if (get_cur_time() - last_ms_log > afl->log_check_interval) {
+    aflrun_write_to_log(afl);
+    aflrun_check_state();
+    last_ms_log = get_cur_time();
+  }
+
   /* If we're not on TTY, bail out. */
 
   if (afl->not_on_tty) { return; }
@@ -890,9 +949,21 @@ void show_stats_normal(afl_state_t *afl) {
      together, but then cram them into a fixed-width field - so we need to
      put them in a temporary buffer first. */
 
+  if (likely(afl->is_aflrun)) {
+    sprintf(tmp, "%s%s%u (%0.01f%%)", u_stringify_int(IB(0), afl->aflrun_idx),
+            afl->queue_cur->favored ? "." : "*", afl->queue_cur->fuzz_level,
+            ((double)afl->aflrun_idx * 100) / afl->queued_aflrun);
+  } else if (likely(!afl->old_seed_selection)) {
+    sprintf(tmp, "%s%s%u (%0.01f%%)",
+            u_stringify_int(IB(0), afl->runs_in_current_cycle),
+            afl->queue_cur->favored ? "." : "*", afl->queue_cur->fuzz_level,
+            ((double)afl->runs_in_current_cycle * 100) /
+              (afl->queued_items - afl->queued_extra));
+  } else {
   sprintf(tmp, "%s%s%u (%0.01f%%)", u_stringify_int(IB(0), afl->current_entry),
           afl->queue_cur->favored ? "." : "*", afl->queue_cur->fuzz_level,
           ((double)afl->current_entry * 100) / afl->queued_items);
+  }
 
   SAYF(bV bSTOP "  now processing : " cRST "%-18s " bSTG bV bSTOP, tmp);
 
@@ -919,8 +990,10 @@ void show_stats_normal(afl_state_t *afl) {
        " stage progress " bSTG bH10 bH5 bH2 bH2 bH2 bX bH bSTOP cCYA
        " findings in depth " bSTG bH10 bH5 bH2                  bVL "\n");
 
-  sprintf(tmp, "%s (%0.02f%%)", u_stringify_int(IB(0), afl->queued_favored),
-          ((double)afl->queued_favored) * 100 / afl->queued_items);
+  u32 favored = afl->is_aflrun ? afl->aflrun_favored : afl->queued_favored;
+  sprintf(tmp, "%s (%0.02f%%) %u", u_stringify_int(IB(0), favored),
+          ((double)favored) * 100 / afl->queued_items,
+          afl->queued_extra_disabled);
 
   /* Yeah... it's still going on... halp? */
 
@@ -953,14 +1026,16 @@ void show_stats_normal(afl_state_t *afl) {
 
   if (afl->crash_mode) {
 
-    SAYF(bV bSTOP " total execs : " cRST "%-22s " bSTG bV bSTOP
+    SAYF(bV bSTOP " total execs : " cRST "%-10s/%-11s " bSTG bV bSTOP
                   "   new crashes : %s%-20s" bSTG         bV "\n",
+         u_stringify_int(IB(1), afl->total_perf_score),
          u_stringify_int(IB(0), afl->fsrv.total_execs), crash_color, tmp);
 
   } else {
 
-    SAYF(bV bSTOP " total execs : " cRST "%-22s " bSTG bV bSTOP
+    SAYF(bV bSTOP " total execs : " cRST "%-10s/%-11s " bSTG bV bSTOP
                   " total crashes : %s%-20s" bSTG         bV "\n",
+         u_stringify_int(IB(1), afl->total_perf_score),
          u_stringify_int(IB(0), afl->fsrv.total_execs), crash_color, tmp);
 
   }
@@ -992,13 +1067,29 @@ void show_stats_normal(afl_state_t *afl) {
   SAYF(bVR bH cCYA bSTOP " fuzzing strategy yields " bSTG bH10 bH2 bHT bH10 bH2
            bH bHB bH bSTOP cCYA " item geometry " bSTG bH5 bH2 bVL "\n");
 
-  if (unlikely(afl->custom_only)) {
-
-    strcpy(tmp, "disabled (custom-mutator-only mode)");
+  if (likely(afl->skip_deterministic)) {
 
-  } else if (likely(afl->skip_deterministic)) {
+    static const char mode_c[] = {'N', 'P', 'T', 'U'};
+    u8 mode = aflrun_get_mode();
+    int cycle_count; u32 cov_quant; u64 whole_count = aflrun_queue_cycle();
+    size_t div_num_invalid, div_num_fringes;
+    aflrun_get_state(
+      &cycle_count, &cov_quant, &div_num_invalid, &div_num_fringes);
+    if (mode) {
+      sprintf(tmp, "id=%u %.2lf,%.4lf %c,%d,%llu %lu/%lu", afl->queue_cur->id,
+        (double)QUANTUM_TIME / afl->queue_cur->exec_us,
+        afl->queue_cur->quant_score, mode_c[mode-1], cycle_count, whole_count,
+        div_num_invalid, div_num_fringes);
+    } else {
+      sprintf(tmp, "id=%u %.2lf,%.4lf C,%d,%llu,%u %lu/%lu", afl->queue_cur->id,
+        (double)QUANTUM_TIME / afl->queue_cur->exec_us,
+        afl->queue_cur->quant_score, cycle_count, whole_count, cov_quant,
+        div_num_invalid, div_num_fringes);
+    }
 
-    strcpy(tmp, "disabled (default, enable with -D)");
+    SAYF(bV bSTOP "  cur states : " cRST "%-36s " bSTG bV bSTOP
+                  "    levels : " cRST "%-10s" bSTG       bV "\n",
+         tmp, u_stringify_int(IB(0), afl->max_depth));
 
   } else {
 
@@ -1010,13 +1101,32 @@ void show_stats_normal(afl_state_t *afl) {
             u_stringify_int(IB(4), afl->stage_finds[STAGE_FLIP4]),
             u_stringify_int(IB(5), afl->stage_cycles[STAGE_FLIP4]));
 
-  }
-
   SAYF(bV bSTOP "   bit flips : " cRST "%-36s " bSTG bV bSTOP
                 "    levels : " cRST "%-10s" bSTG       bV "\n",
        tmp, u_stringify_int(IB(0), afl->max_depth));
+  }
 
-  if (unlikely(!afl->skip_deterministic)) {
+  if (likely(afl->skip_deterministic)) {
+
+    u64 t = get_cur_time();
+    u64 last_reachable, last_fringe, last_pro_fringe, last_target;
+    u64 last_ctx_reachable, last_ctx_fringe, last_ctx_pro_fringe, last_ctx_target;
+    aflrun_get_time(&last_reachable, &last_fringe, &last_pro_fringe,
+      &last_target, &last_ctx_reachable, &last_ctx_fringe,
+      &last_ctx_pro_fringe, &last_ctx_target);
+    sprintf(tmp, "%s/%s/%s %s/%s/%s",
+      u_stringify_int(IB(0), (t - last_fringe) / 1000),
+      u_stringify_int(IB(1), (t - last_pro_fringe) / 1000),
+      u_stringify_int(IB(2), (t - last_target) / 1000),
+      u_stringify_int(IB(3), (t - last_ctx_fringe) / 1000),
+      u_stringify_int(IB(4), (t - last_ctx_pro_fringe) / 1000),
+      u_stringify_int(IB(5), (t - last_ctx_target) / 1000));
+
+    SAYF(bV bSTOP " last update : " cRST "%-36s " bSTG bV bSTOP
+                  "   pending : " cRST "%-10s" bSTG       bV "\n",
+         tmp, u_stringify_int(IB(0), afl->pending_not_fuzzed));
+
+  } else {
 
     sprintf(tmp, "%s/%s, %s/%s, %s/%s",
             u_stringify_int(IB(0), afl->stage_finds[STAGE_FLIP8]),
@@ -1026,13 +1136,30 @@ void show_stats_normal(afl_state_t *afl) {
             u_stringify_int(IB(4), afl->stage_finds[STAGE_FLIP32]),
             u_stringify_int(IB(5), afl->stage_cycles[STAGE_FLIP32]));
 
-  }
-
   SAYF(bV bSTOP "  byte flips : " cRST "%-36s " bSTG bV bSTOP
                 "   pending : " cRST "%-10s" bSTG       bV "\n",
        tmp, u_stringify_int(IB(0), afl->pending_not_fuzzed));
+  }
 
-  if (unlikely(!afl->skip_deterministic)) {
+  reach_t num_reached, num_freached;
+  reach_t num_reached_targets, num_freached_targets;
+  aflrun_get_reached(
+    &num_reached, &num_freached, &num_reached_targets, &num_freached_targets);
+
+  if (likely(afl->skip_deterministic)) {
+
+
+    sprintf(tmp, "%llu/%llu(%llu%%) %llu/%llu(%llu%%)",
+      (u64)num_reached, (u64)afl->fsrv.num_reachables,
+      100uLL * num_reached / afl->fsrv.num_reachables,
+      (u64)num_freached, (u64)afl->fsrv.num_freachables,
+      100uLL * num_freached / afl->fsrv.num_freachables);
+
+    SAYF(bV bSTOP "  reachables : " cRST "%-36s " bSTG bV bSTOP
+                  "  pend fav : " cRST "%-10s" bSTG       bV "\n",
+         tmp, u_stringify_int(IB(0), afl->pending_favored));
+
+  } else {
 
     sprintf(tmp, "%s/%s, %s/%s, %s/%s",
             u_stringify_int(IB(0), afl->stage_finds[STAGE_ARITH8]),
@@ -1042,13 +1169,25 @@ void show_stats_normal(afl_state_t *afl) {
             u_stringify_int(IB(4), afl->stage_finds[STAGE_ARITH32]),
             u_stringify_int(IB(5), afl->stage_cycles[STAGE_ARITH32]));
 
-  }
-
   SAYF(bV bSTOP " arithmetics : " cRST "%-36s " bSTG bV bSTOP
                 "  pend fav : " cRST "%-10s" bSTG       bV "\n",
        tmp, u_stringify_int(IB(0), afl->pending_favored));
+  }
 
-  if (unlikely(!afl->skip_deterministic)) {
+
+  if (likely(afl->skip_deterministic)) {
+
+    sprintf(tmp, "%llu/%llu(%llu%%) %llu/%llu(%llu%%)",
+      (u64)num_reached_targets, (u64)afl->fsrv.num_targets,
+      100uLL * num_reached_targets / afl->fsrv.num_targets,
+      (u64)num_freached_targets, (u64)afl->fsrv.num_ftargets,
+      100uLL * num_freached_targets / afl->fsrv.num_ftargets);
+
+    SAYF(bV bSTOP "     targets : " cRST "%-36s " bSTG bV bSTOP
+                  " own finds : " cRST "%-10s" bSTG       bV "\n",
+         tmp, u_stringify_int(IB(0), afl->queued_discovered));
+
+  } else {
 
     sprintf(tmp, "%s/%s, %s/%s, %s/%s",
             u_stringify_int(IB(0), afl->stage_finds[STAGE_INTEREST8]),
@@ -1058,13 +1197,36 @@ void show_stats_normal(afl_state_t *afl) {
             u_stringify_int(IB(4), afl->stage_finds[STAGE_INTEREST32]),
             u_stringify_int(IB(5), afl->stage_cycles[STAGE_INTEREST32]));
 
-  }
-
   SAYF(bV bSTOP "  known ints : " cRST "%-36s " bSTG bV bSTOP
                 " own finds : " cRST "%-10s" bSTG       bV "\n",
        tmp, u_stringify_int(IB(0), afl->queued_discovered));
+  }
 
-  if (unlikely(!afl->skip_deterministic)) {
+
+  if (likely(afl->skip_deterministic)) {
+
+    sprintf(tmp,
+#ifdef AFLRUN_OVERHEAD
+      "%0.02f%%, "
+#endif // AFLRUN_OVERHEAD
+      "%0.02f%%, %0.02f%%, %0.02f%%",
+#ifdef AFLRUN_OVERHEAD
+      (double)afl->fsrv.trace_virgin->overhead * 100 /
+      (afl->exec_time + afl->fuzz_time),
+#endif // AFLRUN_OVERHEAD
+      (double)afl->exec_time * 100 /
+        (afl->exec_time + afl->fuzz_time),
+      (double)afl->exec_time_short * 100 /
+        (afl->exec_time_short + afl->fuzz_time_short),
+        afl->quantum_ratio * 100);
+
+    SAYF(bV bSTOP "  exec ratio : " cRST "%-36s " bSTG bV bSTOP
+                  "  imported : " cRST "%-10s" bSTG       bV "\n",
+         tmp,
+         afl->sync_id ? u_stringify_int(IB(0), afl->queued_imported)
+                      : (u8 *)"n/a");
+
+  } else {
 
     sprintf(tmp, "%s/%s, %s/%s, %s/%s, %s/%s",
             u_stringify_int(IB(0), afl->stage_finds[STAGE_EXTRAS_UO]),
@@ -1076,21 +1238,13 @@ void show_stats_normal(afl_state_t *afl) {
             u_stringify_int(IB(6), afl->stage_finds[STAGE_EXTRAS_AI]),
             u_stringify_int(IB(7), afl->stage_cycles[STAGE_EXTRAS_AI]));
 
-  } else if (unlikely(!afl->extras_cnt || afl->custom_only)) {
-
-    strcpy(tmp, "n/a");
-
-  } else {
-
-    strcpy(tmp, "havoc mode");
-
-  }
-
   SAYF(bV bSTOP "  dictionary : " cRST "%-36s " bSTG bV bSTOP
                 "  imported : " cRST "%-10s" bSTG       bV "\n",
        tmp,
        afl->sync_id ? u_stringify_int(IB(0), afl->queued_imported)
                     : (u8 *)"n/a");
+  }
+
 
   sprintf(tmp, "%s/%s, %s/%s",
           u_stringify_int(IB(0), afl->stage_finds[STAGE_HAVOC]),
diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c
index 138df26c..7fc1b769 100644
--- a/src/afl-fuzz.c
+++ b/src/afl-fuzz.c
@@ -24,10 +24,12 @@
  */
 
 #include "afl-fuzz.h"
+#include "aflrun.h"
 #include "cmplog.h"
 #include "common.h"
 #include <limits.h>
 #include <stdlib.h>
+#include <math.h>
 #ifndef USEMMAP
   #include <sys/mman.h>
   #include <sys/stat.h>
@@ -48,7 +50,9 @@ extern u64 time_spent_working;
 static void at_exit() {
 
   s32   i, pid1 = 0, pid2 = 0, pgrp = -1;
-  char *list[4] = {SHM_ENV_VAR, SHM_FUZZ_ENV_VAR, CMPLOG_SHM_ENV_VAR, NULL};
+  char *list[] = {SHM_ENV_VAR, SHM_FUZZ_ENV_VAR, CMPLOG_SHM_ENV_VAR,
+    SHM_RBB_ENV_VAR, SHM_RF_ENV_VAR, SHM_TR_ENV_VAR, SHM_VIR_ENV_VAR,
+    SHM_VTR_ENV_VAR, NULL};
   char *ptr;
 
   ptr = getenv("__AFL_TARGET_PID2");
@@ -506,9 +510,11 @@ int main(int argc, char **argv_orig, char **envp) {
   u8 *extras_dir[4];
   u8  mem_limit_given = 0, exit_1 = 0, debug = 0,
      extras_dir_cnt = 0 /*, have_p = 0*/;
+  char* aflrun_d = NULL;
   char  *afl_preload;
   char  *frida_afl_preload = NULL;
   char **use_argv;
+  FILE* fd;
 
   struct timeval  tv;
   struct timezone tz;
@@ -532,6 +538,7 @@ int main(int argc, char **argv_orig, char **envp) {
   if (get_afl_env("AFL_DEBUG")) { debug = afl->debug = 1; }
 
   afl_state_init(afl, map_size);
+  afl->last_exec_time = get_cur_time_us();
   afl->debug = debug;
   afl_fsrv_init(&afl->fsrv);
   if (debug) { afl->fsrv.debug = true; }
@@ -549,11 +556,21 @@ int main(int argc, char **argv_orig, char **envp) {
 
   afl->shmem_testcase_mode = 1;  // we always try to perform shmem fuzzing
 
+#define AFLRUN_ARG_DIR 0x101
+#define AFLRUN_ARG_CONF 0x102
+
+  struct option long_options[] = {
+    {"dir", required_argument, NULL, AFLRUN_ARG_DIR},
+    {"config", required_argument, NULL, AFLRUN_ARG_CONF},
+    {0}
+  };
+  const char* config = "";
+
   while (
-      (opt = getopt(
+      (opt = getopt_long(
            argc, argv,
-           "+Ab:B:c:CdDe:E:hi:I:f:F:g:G:l:L:m:M:nNOo:p:RQs:S:t:T:UV:WXx:YZ")) >
-      0) {
+           "+Ab:B:c:CdDe:E:hi:I:f:F:g:G:l:L:m:M:nNOo:p:RQs:S:t:T:UV:WXx:YZ",
+           long_options, NULL)) > 0) {
 
     switch (opt) {
 
@@ -814,6 +831,7 @@ int main(int argc, char **argv_orig, char **envp) {
         }
 
         extras_dir[extras_dir_cnt++] = optarg;
+        setenv("AFL_DICT", optarg, 1); // TODO: let custom mutators parse cmdlines
         break;
 
       case 't': {                                                /* timeout */
@@ -1290,6 +1308,16 @@ int main(int argc, char **argv_orig, char **envp) {
 
         break;
 
+        case AFLRUN_ARG_CONF:
+          config = strdup(optarg);
+          break;
+
+        case AFLRUN_ARG_DIR: /* Argument to receive temp directory */
+          if (aflrun_d)
+            FATAL("Please only provide one --dir argument");
+          aflrun_d = strdup(optarg);
+          break;
+
       default:
         if (!show_help) { show_help = 1; }
 
@@ -1297,12 +1325,22 @@ int main(int argc, char **argv_orig, char **envp) {
 
   }
 
+#undef AFLRUN_ARG_DIR
+#undef AFLRUN_ARG_CONF
+
   if (optind == argc || !afl->in_dir || !afl->out_dir || show_help) {
 
     usage(argv[0], show_help);
 
   }
 
+  if (!afl->skip_deterministic)
+    FATAL("Please use -d for AFLRUN to skip deterministic fuzzing");
+
+  if (afl->old_seed_selection)
+    WARNF("Old seed selection is enabled; "
+      "this is supported but might cause bias. TODO: fix");
+
   if (unlikely(afl->afl_env.afl_persistent_record)) {
 
   #ifdef AFL_PERSISTENT_RECORD
@@ -1958,6 +1996,19 @@ int main(int argc, char **argv_orig, char **envp) {
 
   check_binary(afl, argv[optind]);
 
+  aflrun_d = aflrun_d != NULL ? aflrun_d : aflrun_find_temp(afl->temp_dir);
+  if (aflrun_d == NULL)
+    FATAL("Cannot find temp dir, please provide '--dir' manually");
+  OKF("Path to AFLRun directory: %s", aflrun_d);
+  aflrun_temp_dir_init(afl, aflrun_d);
+  free(aflrun_d);
+
+  aflrun_load_config(config,
+    &afl->check_at_begin, &afl->log_at_begin, &afl->log_check_interval,
+    &afl->trim_thr, &afl->queue_quant_thr, &afl->min_num_exec);
+  aflrun_init_fringes(
+    afl->fsrv.num_reachables, afl->fsrv.num_targets);
+
   #ifdef AFL_PERSISTENT_RECORD
   if (unlikely(afl->fsrv.persistent_record)) {
 
@@ -2023,6 +2074,14 @@ int main(int argc, char **argv_orig, char **envp) {
   afl->argv = use_argv;
   afl->fsrv.trace_bits =
       afl_shm_init(&afl->shm, afl->fsrv.map_size, afl->non_instrumented_mode);
+  aflrun_shm_init(&afl->shm_run, afl->fsrv.num_reachables,
+    afl->fsrv.num_freachables, afl->non_instrumented_mode);
+  afl->fsrv.trace_reachables = afl->shm_run.map_reachables;
+  afl->fsrv.trace_freachables = afl->shm_run.map_freachables;
+  afl->fsrv.trace_ctx = afl->shm_run.map_ctx;
+  afl->virgin_ctx = afl->shm_run.map_virgin_ctx;
+  afl->fsrv.trace_virgin = afl->shm_run.map_new_blocks;
+  afl->fsrv.trace_targets = afl->shm_run.map_targets;
 
   if (!afl->non_instrumented_mode && !afl->fsrv.qemu_mode &&
       !afl->unicorn_mode && !afl->fsrv.frida_mode && !afl->fsrv.cs_mode &&
@@ -2184,6 +2243,16 @@ int main(int argc, char **argv_orig, char **envp) {
 
   memset(afl->virgin_tmout, 255, map_size);
   memset(afl->virgin_crash, 255, map_size);
+  memset(afl->virgin_reachables, 255, MAP_RBB_SIZE(afl->fsrv.num_reachables));
+  memset(afl->virgin_freachables, 255, MAP_RF_SIZE(afl->fsrv.num_freachables));
+
+  aflrun_init_globals(afl,
+    afl->fsrv.num_targets, afl->fsrv.num_reachables,
+    afl->fsrv.num_ftargets, afl->fsrv.num_freachables, afl->virgin_reachables,
+    afl->virgin_freachables, afl->virgin_ctx, afl->reachable_names,
+    afl->reachable_to_targets, afl->reachable_to_size, afl->out_dir,
+    afl->target_weights, afl->fsrv.map_size, afl->shm_run.div_switch,
+    getenv("AFLRUN_CYCLE_TIME"));
 
   if (likely(!afl->afl_env.afl_no_startup_calibration)) {
 
@@ -2282,7 +2351,8 @@ int main(int argc, char **argv_orig, char **envp) {
   #ifdef INTROSPECTION
   u32 prev_saved_crashes = 0, prev_saved_tmouts = 0;
   #endif
-  u32 prev_queued_items = 0, runs_in_current_cycle = (u32)-1;
+  afl->runs_in_current_cycle = (u32)-1;
+  u32 prev_queued_items = 0;
   u8  skipped_fuzz;
 
   #ifdef INTROSPECTION
@@ -2298,13 +2368,43 @@ int main(int argc, char **argv_orig, char **envp) {
   OKF("Writing mutation introspection to '%s'", ifn);
   #endif
 
+  /* AFLRun fuzzing loop:
+  while (!stop)
+    cycle_end() // end of each cycle, its first call is a pseudo cycle end
+    if (aflrun)
+      energy = assign_energy_each_seed()
+    else
+      // AFL++ cycle begin process
+    for (seed in corpus)
+      fuzz_one(seed, energy[seed])
+      update_prev()
+      if (resetted) // begin next cycle directly if mode is resetted
+        break
+  */
+
   while (likely(!afl->stop_soon)) {
 
     cull_queue(afl);
+    afl->is_aflrun = aflrun_get_mode();
+    u8 is_cycle_end = afl->old_seed_selection || afl->is_aflrun ?
+      !afl->queue_cur :
+      afl->runs_in_current_cycle > (afl->queued_items - afl->queued_extra);
+
+    if (unlikely(is_cycle_end || afl->force_cycle_end)) {
+
+      afl->force_cycle_end = 0;
+      u8 whole_end;
+      afl->is_aflrun = aflrun_cycle_end(&whole_end);
+      // afl->is_aflrun may be updated because cycle end may change the mode
 
-    if (unlikely((!afl->old_seed_selection &&
-                  runs_in_current_cycle > afl->queued_items) ||
-                 (afl->old_seed_selection && !afl->queue_cur))) {
+      /* Now it's the beginning of a new cycle */
+
+      // We need to re-calculate perf_score at beginning of each coverage cycle.
+      // Also we need to cull queue before each coverage cycle.
+      if (!afl->is_aflrun) {
+        afl->reinit_table = 1;
+        afl->score_changed = 1;
+      }
 
       if (unlikely((afl->last_sync_cycle < afl->queue_cycle ||
                     (!afl->queue_cycle && afl->afl_env.afl_import_first)) &&
@@ -2317,11 +2417,16 @@ int main(int argc, char **argv_orig, char **envp) {
         }
 
         sync_fuzzers(afl);
+        // If sync causes some reset, we need to re-start the cycle
+        if (aflrun_end_cycle()) {
+          afl->force_cycle_end = 1;
+          continue;
+        }
 
       }
 
       ++afl->queue_cycle;
-      runs_in_current_cycle = (u32)-1;
+      afl->runs_in_current_cycle = (u32)-1;
       afl->cur_skipped_items = 0;
 
       // 1st april fool joke - enable pizza mode
@@ -2343,7 +2448,7 @@ int main(int argc, char **argv_orig, char **envp) {
 
       }
 
-      if (unlikely(afl->old_seed_selection)) {
+      if (unlikely(afl->old_seed_selection && !afl->is_aflrun)) {
 
         afl->current_entry = 0;
         while (unlikely(afl->current_entry < afl->queued_items &&
@@ -2372,6 +2477,50 @@ int main(int argc, char **argv_orig, char **envp) {
 
         }
 
+      } else if (likely(afl->is_aflrun)) {
+
+        // In aflrun mode, at beginning of each cycle,
+        // we need to allocate engergy and sort for each seed
+
+        u32 n = select_aflrun_seeds(afl);
+        afl->aflrun_favored = n;
+        u32* seeds = afl->aflrun_seeds;
+
+        afl->perf_scores = afl_realloc(
+          (void**)&afl->perf_scores, n * sizeof(double));
+        aflrun_assign_energy(n, seeds, afl->perf_scores);
+
+        afl->aflrun_queue = afl_realloc(
+          (void**)&afl->aflrun_queue, (n+1) * sizeof(struct queue_entry *));
+
+        u32 idx = 0;
+        for (u32 i = 0; i < n; i++) {
+
+          struct queue_entry *q = afl->queue_buf[seeds[i]];
+
+          if (q->id != seeds[i])
+            FATAL("ID does not match");
+
+          q->quant_score = afl->perf_scores[i];
+          if (q->quant_score > afl->queue_quant_thr)
+            afl->aflrun_queue[idx++] = q;
+
+        }
+        afl->aflrun_queue[idx] = NULL; // set last to NULL to detect end of cycle
+        afl->queued_aflrun = idx;
+
+        qsort(afl->aflrun_queue, idx, sizeof(struct queue_entry*), cmp_quant_score);
+
+        afl->queue_cur = afl->aflrun_queue[(afl->aflrun_idx = 0)];
+        if (afl->queue_cur == NULL)
+          continue; // if no seed, we skip the cycle
+        afl->current_entry = afl->queue_cur->id;
+
+        if (afl->log_at_begin)
+          aflrun_write_to_log(afl);
+        if (afl->check_at_begin)
+          aflrun_check_state();
+        aflrun_set_num_active_seeds(idx);
       }
 
       if (unlikely(afl->not_on_tty)) {
@@ -2381,6 +2530,10 @@ int main(int argc, char **argv_orig, char **envp) {
 
       }
 
+      if (unlikely(whole_end || afl->queue_cycle == 1)) {
+      // Only do these AFL cycle end stuff at whole_end or at very beginning.
+      // In other words, we regard aflrun whole cycle as original AFL++ cycle.
+
       /* If we had a full queue cycle with no new finds, try
          recombination strategies next. */
 
@@ -2465,7 +2618,7 @@ int main(int argc, char **argv_orig, char **envp) {
               afl->queued_items);
   #endif
 
-      if (afl->cycle_schedules) {
+      if (afl->cycle_schedules && !afl->is_aflrun) {
 
         /* we cannot mix non-AFLfast schedules with others */
 
@@ -2518,11 +2671,13 @@ int main(int argc, char **argv_orig, char **envp) {
 
     }
 
-    ++runs_in_current_cycle;
+    }
+
+    ++afl->runs_in_current_cycle;
 
     do {
 
-      if (likely(!afl->old_seed_selection)) {
+      if (unlikely(!afl->old_seed_selection && !afl->is_aflrun)) {
 
         if (unlikely(prev_queued_items < afl->queued_items ||
                      afl->reinit_table)) {
@@ -2543,6 +2698,8 @@ int main(int argc, char **argv_orig, char **envp) {
 
       }
 
+      // count num of `common_fuzz_stuff` called in `fuzz_one`
+      afl->fuzzed_times = 0;
       skipped_fuzz = fuzz_one(afl);
   #ifdef INTROSPECTION
       ++afl->queue_cur->stats_selected;
@@ -2578,9 +2735,17 @@ int main(int argc, char **argv_orig, char **envp) {
 
   #endif
 
+      // Commit the fuzzed quantum,
+      // we need floating point here to prevent always adding zero
+      double fuzzed_quantum =
+        afl->fuzzed_times * afl->queue_cur->exec_us / (double)QUANTUM_TIME;
+      aflrun_update_fuzzed_quant(afl->queue_cur->id, fuzzed_quantum);
+      afl->quantum_ratio = afl->is_aflrun ?
+        fuzzed_quantum / afl->queue_cur->quant_score : -1;
+
       if (unlikely(!afl->stop_soon && exit_1)) { afl->stop_soon = 2; }
 
-      if (unlikely(afl->old_seed_selection)) {
+      if (unlikely(afl->old_seed_selection && !afl->is_aflrun)) {
 
         while (++afl->current_entry < afl->queued_items &&
                afl->queue_buf[afl->current_entry]->disabled) {};
@@ -2596,9 +2761,17 @@ int main(int argc, char **argv_orig, char **envp) {
 
         }
 
+      } else if (afl->is_aflrun) {
+
+        afl->queue_cur = afl->aflrun_queue[++afl->aflrun_idx];
+        afl->current_entry = afl->queue_cur == NULL ?
+          afl->queued_items : afl->queue_cur->id;
+        // TODO: skip disabled seeds just like above
+
       }
 
-    } while (skipped_fuzz && afl->queue_cur && !afl->stop_soon);
+    } while (skipped_fuzz && afl->queue_cur && !afl->stop_soon
+        && !afl->force_cycle_end);
 
     if (likely(!afl->stop_soon && afl->sync_id)) {
 
@@ -2633,6 +2806,8 @@ int main(int argc, char **argv_orig, char **envp) {
 
       }
 
+      if (aflrun_end_cycle()) { afl->force_cycle_end = 1; }
+
     }
 
   }
@@ -2700,8 +2875,36 @@ stop_fuzzing:
             "Profiling information: %llu ms total work, %llu ns/run\n",
        time_spent_working / 1000000,
        time_spent_working / afl->fsrv.total_execs);
+
+  u8* profile_file = alloc_printf("%s/profiling.txt", afl->out_dir);
+  fd = fopen(profile_file, "w");
+  ck_free(profile_file);
+  if (fd == NULL)
+    FATAL("Cannot open profiling file");
+  fprintf(fd, "%llu %llu %lu "
+  #ifdef AFLRUN_OVERHEAD
+        "%0.02f%% "
+  #endif // AFLRUN_OVERHEAD
+        "%0.02f%%\n",
+        time_spent_working, afl->fsrv.total_execs, aflrun_get_num_clusters(),
+  #ifdef AFLRUN_OVERHEAD
+      (double)afl->fsrv.trace_virgin->overhead * 100 /
+      (afl->exec_time + afl->fuzz_time),
+  #endif // AFLRUN_OVERHEAD
+      (double)afl->exec_time * 100 /
+        (afl->exec_time + afl->fuzz_time));
+  fclose(fd);
   #endif
 
+  u8* times_file = alloc_printf("%s/fuzzed_times.txt", afl->out_dir);
+  fd = fopen(times_file, "w");
+  ck_free(times_file);
+  if (fd == NULL)
+    FATAL("Cannot open file to record number of fuzzed times for each seed");
+  for (u32 i = 0; i < afl->queued_items; i++)
+    fprintf(fd, "%llu\n", afl->queue_buf[i]->fuzzed_times);
+  fclose(fd);
+
   if (afl->is_main_node) {
 
     u8 path[PATH_MAX];
diff --git a/src/afl-sharedmem.c b/src/afl-sharedmem.c
index a2c81586..5d891ace 100644
--- a/src/afl-sharedmem.c
+++ b/src/afl-sharedmem.c
@@ -365,3 +365,92 @@ u8 *afl_shm_init(sharedmem_t *shm, size_t map_size,
 
 }
 
+void aflrun_shm_init(aflrun_shm_t *shm, reach_t num_reachables,
+  reach_t num_freachables, unsigned char non_instrumented_mode) {
+
+  u8 *shm_rbb_str, *shm_rf_str, *shm_tr_str,
+    *shm_vir_str, *shm_vtr_str, *shm_tt_str, *shm_div_str;
+
+  shm->shm_rbb_id = shmget(IPC_PRIVATE, MAP_RBB_SIZE(num_reachables),
+    IPC_CREAT | IPC_EXCL | DEFAULT_PERMISSION);
+  shm->shm_rf_id = shmget(IPC_PRIVATE, MAP_RF_SIZE(num_freachables),
+    IPC_CREAT | IPC_EXCL | DEFAULT_PERMISSION);
+  shm->shm_tr_id = shmget(IPC_PRIVATE, MAP_TR_SIZE(num_reachables),
+      IPC_CREAT | IPC_EXCL | DEFAULT_PERMISSION);
+  shm->shm_vir_id = shmget(IPC_PRIVATE, MAP_TR_SIZE(num_reachables),
+      IPC_CREAT | IPC_EXCL | DEFAULT_PERMISSION);
+  shm->shm_vtr_id = shmget(IPC_PRIVATE, MAP_VTR_SIZE(num_reachables),
+      IPC_CREAT | IPC_EXCL | DEFAULT_PERMISSION);
+  shm->shm_tt_id = shmget(IPC_PRIVATE, MAP_VTR_SIZE(num_reachables),
+      IPC_CREAT | IPC_EXCL | DEFAULT_PERMISSION);
+  shm->shm_div_id = shmget(IPC_PRIVATE, MAP_RBB_SIZE(num_reachables),
+    IPC_CREAT | IPC_EXCL | DEFAULT_PERMISSION);
+
+  if (shm->shm_rbb_id < 0 || shm->shm_rf_id < 0 || shm->shm_tr_id < 0 ||
+    shm->shm_vir_id < 0 || shm->shm_vtr_id < 0 || shm->shm_tt_id < 0 ||
+    shm->shm_div_id < 0)
+    PFATAL("shmget() failed");
+
+  if (!non_instrumented_mode) {
+
+    shm_rbb_str = alloc_printf("%d", shm->shm_rbb_id);
+    shm_rf_str = alloc_printf("%d", shm->shm_rf_id);
+    shm_tr_str = alloc_printf("%d", shm->shm_tr_id);
+    shm_vir_str = alloc_printf("%d", shm->shm_vir_id);
+    shm_vtr_str = alloc_printf("%d", shm->shm_vtr_id);
+    shm_tt_str = alloc_printf("%d", shm->shm_tt_id);
+    shm_div_str = alloc_printf("%d", shm->shm_div_id);
+
+    setenv(SHM_RBB_ENV_VAR, shm_rbb_str, 1);
+    setenv(SHM_RF_ENV_VAR, shm_rf_str, 1);
+    setenv(SHM_TR_ENV_VAR, shm_tr_str, 1);
+    setenv(SHM_VIR_ENV_VAR, shm_vir_str, 1);
+    setenv(SHM_VTR_ENV_VAR, shm_vtr_str, 1);
+    setenv(SHM_TT_ENV_VAR, shm_tt_str, 1);
+    setenv(SHM_DIV_ENV_VAR, shm_div_str, 1);
+
+    ck_free(shm_rbb_str);
+    ck_free(shm_rf_str);
+    ck_free(shm_tr_str);
+    ck_free(shm_vir_str);
+    ck_free(shm_vtr_str);
+    ck_free(shm_tt_str);
+    ck_free(shm_div_str);
+
+  }
+
+  shm->map_reachables = shmat(shm->shm_rbb_id, NULL, 0);
+  shm->map_freachables = shmat(shm->shm_rf_id, NULL, 0);
+  shm->map_ctx = shmat(shm->shm_tr_id, NULL, 0);
+  shm->map_virgin_ctx = shmat(shm->shm_vir_id, NULL, 0);
+  shm->map_new_blocks = shmat(shm->shm_vtr_id, NULL, 0);
+  shm->map_targets = shmat(shm->shm_tt_id, NULL, 0);
+  shm->div_switch = shmat(shm->shm_div_id, NULL, 0);
+
+#define ERROR_SHM(addr) ((addr) == ((void *)-1) || !(addr))
+  if (ERROR_SHM(shm->map_reachables) || ERROR_SHM(shm->map_freachables) ||
+      ERROR_SHM(shm->map_ctx) || ERROR_SHM(shm->map_virgin_ctx) ||
+      ERROR_SHM(shm->map_new_blocks) || ERROR_SHM(shm->map_targets) ||
+      ERROR_SHM(shm->div_switch)) {
+
+    aflrun_shm_deinit(shm);
+    PFATAL("shmat() failed");
+
+  }
+#undef ERROR_SHM
+
+  memset(shm->map_virgin_ctx, 255, MAP_TR_SIZE(num_reachables));
+
+}
+
+void aflrun_shm_deinit(aflrun_shm_t *shm) {
+
+  shmctl(shm->shm_rbb_id, IPC_RMID, NULL);
+  shmctl(shm->shm_rf_id, IPC_RMID, NULL);
+  shmctl(shm->shm_tr_id, IPC_RMID, NULL);
+  shmctl(shm->shm_vir_id, IPC_RMID, NULL);
+  shmctl(shm->shm_vtr_id, IPC_RMID, NULL);
+  shmctl(shm->shm_tt_id, IPC_RMID, NULL);
+  shmctl(shm->shm_div_id, IPC_RMID, NULL);
+
+}
\ No newline at end of file
diff --git a/src/afl-showmap.c b/src/afl-showmap.c
index 4e019794..f785adb3 100644
--- a/src/afl-showmap.c
+++ b/src/afl-showmap.c
@@ -72,6 +72,11 @@ static u8 outfile[PATH_MAX];
 static u8 *in_data,                    /* Input data                        */
     *coverage_map;                     /* Coverage map                      */
 
+static u8 **target_coverage_maps;      /* Coverage map for each target      */
+static u32* target_coverage_cnts; /* Number of covered edges of each target.
+target_coverage_cnts[t] should be number of ones of target_coverage_maps[t].*/
+static u32* target_cnts;          /* Number of seeds that cover each target */
+
 static u64 total;                      /* tuple content information         */
 static u32 tcnt, highest;              /* tuple content information         */
 
@@ -91,12 +96,15 @@ static bool quiet_mode,                /* Hide non-essential messages?      */
     no_classify,                       /* do not classify counts            */
     debug,                             /* debug mode                        */
     print_filenames,                   /* print the current filename        */
-    wait_for_gdb;
+    wait_for_gdb, aflrun_d;
+
+FILE* aflrun_log, *aflrun_cnt, *aflrun_tcov;
 
 static volatile u8 stop_soon,          /* Ctrl-C pressed?                   */
     child_crashed;                     /* Child crashed?                    */
 
 static sharedmem_t       shm;
+static aflrun_shm_t      shm_run;
 static afl_forkserver_t *fsrv;
 static sharedmem_t      *shm_fuzz;
 
@@ -187,6 +195,7 @@ static void at_exit_handler(void) {
 
     if (shm.map) afl_shm_deinit(&shm);
     if (fsrv->use_shmem_fuzz) deinit_shmem(fsrv, shm_fuzz);
+    if (aflrun_d) aflrun_shm_deinit(&shm_run);
 
   }
 
@@ -213,6 +222,60 @@ static void analyze_results(afl_forkserver_t *fsrv) {
 
 }
 
+static void aflrun_analyze_results(afl_forkserver_t *fsrv, u8* fn) {
+
+  const ctx_t* beg = fsrv->trace_targets->trace;
+  const ctx_t* end = beg + fsrv->trace_targets->num;
+  u8* visited = calloc(fsrv->num_targets, sizeof(u8));
+
+  // Iterate each target it covers
+  for (const ctx_t* it = beg; it < end; it++) {
+
+    reach_t t = it->block;
+
+    if (visited[t]) continue;
+    visited[t] = 1;
+
+    u8* map = target_coverage_maps[t];
+
+    // Iterate each map corresponding to each covered target
+    for (u32 i = 0; i < map_size; i++) {
+
+      if (fsrv->trace_bits[i] && !map[i]) {
+
+        map[i] = 1;
+        target_coverage_cnts[t]++;
+
+      }
+
+    }
+
+    // If a target is covered by this seed, increment its count.
+    target_cnts[t]++;
+
+  }
+
+  // If called from executing one input, we don't log
+  if (fn == NULL)
+    return;
+  // Record current state about number of edges for each target
+
+  fprintf(aflrun_tcov, "%s\n", fn);
+  for (reach_t t = 0; t < fsrv->num_targets; t++)
+    fprintf(aflrun_tcov, "%u ", visited[t]);
+  fprintf(aflrun_tcov, "\n");
+
+  free(visited);
+
+  fprintf(aflrun_log, "%s\n", fn);
+  for (reach_t t = 0; t < fsrv->num_targets; t++) {
+    fprintf(aflrun_log, "%u", target_coverage_cnts[t]);
+    if (t != fsrv->num_targets - 1)
+      fprintf(aflrun_log, ",");
+  }
+  fprintf(aflrun_log, "\n");
+}
+
 /* Write results. */
 
 static u32 write_results_to_file(afl_forkserver_t *fsrv, u8 *outfile) {
@@ -743,6 +806,9 @@ u32 execute_testcases(u8 *dir) {
        a dot */
     if (subdirs && S_ISDIR(st.st_mode) && nl[i]->d_name[0] != '.') {
 
+      if (aflrun_d)
+        FATAL("Use -d flag only for queue/ directory");
+
       free(nl[i]);                                           /* not tracked */
       done += execute_testcases(fn2);
       ck_free(fn2);
@@ -758,6 +824,10 @@ u32 execute_testcases(u8 *dir) {
 
     }
 
+    // Only process file begin with "id:" in aflrun mode
+    if (aflrun_d && strncmp(nl[i]->d_name, "id:", 3))
+      continue;
+
     if (st.st_size > MAX_FILE && !be_quiet && !quiet_mode) {
 
       WARNF("Test case '%s' is too big (%s, limit is %s), partial reading", fn2,
@@ -792,6 +862,9 @@ u32 execute_testcases(u8 *dir) {
       else
         tcnt = write_results_to_file(fsrv, outfile);
 
+      if (aflrun_d)
+        aflrun_analyze_results(fsrv, fn2);
+
     }
 
   }
@@ -849,7 +922,8 @@ static void usage(u8 *argv0) {
       "  -e         - show edge coverage only, ignore hit counts\n"
       "  -r         - show real tuple values instead of AFL filter values\n"
       "  -s         - do not classify the map\n"
-      "  -c         - allow core dumps\n\n"
+      "  -c         - allow core dumps\n"
+      "  -d         - get target coverage, may need AFL_DRIVER_DONT_DEFER=1\n\n"
 
       "This tool displays raw tuple data captured by AFL instrumentation.\n"
       "For additional help, consult %s/README.md.\n\n"
@@ -886,6 +960,30 @@ static void usage(u8 *argv0) {
 
 }
 
+static void load_aflrun_header(
+  u8* file, reach_t* num_targets, reach_t* num_reachables) {
+  FILE* fd = fopen(file, "r");
+  if (fd == NULL)
+    FATAL("Failed to open %s", file);
+  char* line = NULL; size_t len = 0;
+  ssize_t res = getline(&line, &len, fd);
+  if (res == -1 || line == NULL)
+    FATAL("Failed to read %s", file);
+
+  char* endptr;
+  *num_targets = strtoul(line, &endptr, 10);
+  if (*endptr != ',')
+    FATAL("Wrong format for %s", file);
+  *endptr = 0;
+  *num_reachables = strtoul(endptr + 1, &endptr, 10);
+  if (*endptr != 0 && *endptr != '\n')
+    FATAL("Wrong format for %s", file);
+  if (*num_targets == 0 || *num_targets > *num_reachables)
+    FATAL("Wrong number of targets and reachables");
+  ck_free(file);
+  free(line);
+}
+
 /* Main entry point */
 
 int main(int argc, char **argv_orig, char **envp) {
@@ -896,6 +994,7 @@ int main(int argc, char **argv_orig, char **envp) {
   bool mem_limit_given = false, timeout_given = false, unicorn_mode = false,
        use_wine = false;
   char **use_argv;
+  const char* prefix = NULL;
 
   char **argv = argv_cpy_dup(argc, argv_orig);
 
@@ -912,7 +1011,7 @@ int main(int argc, char **argv_orig, char **envp) {
 
   if (getenv("AFL_QUIET") != NULL) { be_quiet = true; }
 
-  while ((opt = getopt(argc, argv, "+i:o:f:m:t:AeqCZOH:QUWbcrsh")) > 0) {
+  while ((opt = getopt(argc, argv, "+i:o:f:m:t:AeqCZOH:QUWbcrshd:p:")) > 0) {
 
     switch (opt) {
 
@@ -1126,6 +1225,42 @@ int main(int argc, char **argv_orig, char **envp) {
         return -1;
         break;
 
+      case 'd':
+        aflrun_d = true;
+        load_aflrun_header(alloc_printf("%s/BBreachable.txt", optarg),
+          &fsrv->num_targets, &fsrv->num_reachables);
+        load_aflrun_header(alloc_printf("%s/Freachable.txt", optarg),
+          &fsrv->num_ftargets, &fsrv->num_freachables);
+        u8* log_file; u8* cnt_file; u8* tcov_file;
+        if (prefix) {
+          log_file = alloc_printf("%s/%s.log.txt", optarg, prefix);
+          cnt_file = alloc_printf("%s/%s.cnt.txt", optarg, prefix);
+          tcov_file = alloc_printf("%s/%s.tcov.txt", optarg, prefix);
+        } else {
+          log_file = alloc_printf("%s/log.txt", optarg);
+          cnt_file = alloc_printf("%s/cnt.txt", optarg);
+          tcov_file = alloc_printf("%s/tcov.txt", optarg);
+        }
+        aflrun_log = fopen(log_file, "w");
+        if (aflrun_log == NULL)
+          FATAL("Open log.txt failed");
+        aflrun_cnt = fopen(cnt_file, "w");
+        if (aflrun_cnt == NULL)
+          FATAL("Open cnt.txt failed");
+        aflrun_tcov = fopen(tcov_file, "w");
+        if (aflrun_tcov == NULL)
+          FATAL("Open tcov.txt failed");
+        ck_free(tcov_file);
+        ck_free(log_file);
+        ck_free(cnt_file);
+        break;
+
+      case 'p':
+        if (aflrun_log)
+          FATAL("Please put -p before -d");
+        prefix = strdup(optarg);
+        break;
+
       default:
         usage(argv[0]);
 
@@ -1173,6 +1308,16 @@ int main(int argc, char **argv_orig, char **envp) {
 
   fsrv->target_path = find_binary(argv[optind]);
   fsrv->trace_bits = afl_shm_init(&shm, map_size, 0);
+  if (aflrun_d) {
+    aflrun_shm_init(&shm_run, fsrv->num_reachables, fsrv->num_freachables, 0);
+    fsrv->trace_reachables = shm_run.map_reachables;
+    fsrv->trace_freachables = shm_run.map_freachables;
+    fsrv->trace_ctx = shm_run.map_ctx;
+    fsrv->trace_virgin = shm_run.map_new_blocks;
+    fsrv->trace_targets = shm_run.map_targets;
+    for (reach_t t = 0; t < fsrv->num_targets; ++t)
+      shm_run.div_switch[t / 8] |= 1 << (t % 8);
+  }
 
   if (!quiet_mode) {
 
@@ -1332,6 +1477,21 @@ int main(int argc, char **argv_orig, char **envp) {
 
   }
 
+  if (aflrun_d) {
+    // Initialize coverage map for each target
+    target_coverage_maps = (u8**)malloc(fsrv->num_targets * sizeof(u8*));
+    if (target_coverage_maps == NULL)
+      FATAL("coult not grab memory");
+    for (reach_t i = 0; i < fsrv->num_targets; ++i) {
+      target_coverage_maps[i] = malloc(map_size + 64);
+      if (target_coverage_maps[i] == NULL)
+        FATAL("coult not grab memory");
+      memset(target_coverage_maps[i], 0, map_size + 64);
+    }
+    target_coverage_cnts = calloc(fsrv->num_targets, sizeof(u32));
+    target_cnts = calloc(fsrv->num_targets, sizeof(u32));
+  }
+
   if (in_dir) {
 
     DIR *dir_in, *dir_out = NULL;
@@ -1411,6 +1571,11 @@ int main(int argc, char **argv_orig, char **envp) {
 
     }
 
+    for (reach_t t = 0; t < fsrv->num_targets; ++t) {
+      fprintf(aflrun_cnt, "%u ", target_cnts[t]);
+    }
+    fprintf(aflrun_cnt, "\n");
+
     if (!quiet_mode) { OKF("Processed %llu input files.", fsrv->total_execs); }
 
     if (dir_out) { closedir(dir_out); }
@@ -1429,6 +1594,15 @@ int main(int argc, char **argv_orig, char **envp) {
 
     showmap_run_target(fsrv, use_argv);
     tcnt = write_results_to_file(fsrv, out_file);
+    if (aflrun_d) {
+
+      aflrun_analyze_results(fsrv, NULL);
+      for (reach_t t = 0; t < fsrv->num_targets; ++t) {
+        fprintf(aflrun_cnt, "%u ", target_cnts[t]);
+      }
+      fprintf(aflrun_cnt, "\n");
+
+    }
     if (!quiet_mode) {
 
       OKF("Hash of coverage map: %llx",
@@ -1485,6 +1659,7 @@ int main(int argc, char **argv_orig, char **envp) {
 
   argv_cpy_free(argv);
   if (fsrv->qemu_mode) { free(use_argv[2]); }
+  if (aflrun_d) { fclose(aflrun_log); fclose(aflrun_cnt); fclose(aflrun_tcov); }
 
   exit(ret);
 
diff --git a/src/aflrun-cc.c b/src/aflrun-cc.c
new file mode 100644
index 00000000..2b12c413
--- /dev/null
+++ b/src/aflrun-cc.c
@@ -0,0 +1,30 @@
+#include <stdlib.h>
+#include <string.h>
+#include <stdio.h>
+#include <unistd.h>
+
+#include "debug.h"
+#include "alloc-inl.h"
+
+/* This compiler is used to handle cases where we cannot designate compiler
+via $CC and $CXX, but instead we can only replace their compiler with the AFL one.
+For example, when compiling chroimium/v8. */
+
+int main(int argc, char const *argv[])
+{
+	char const** new_argv = (char const**)malloc((argc + 1) * sizeof(char*));
+
+	char* afl_path = getenv("AFL_PATH");
+	if (afl_path == NULL)
+		FATAL("Please specify AFL_PATH");
+
+	new_argv[0] = alloc_printf("%s/%s", afl_path,
+		strstr(argv[0], "++") == NULL ? "afl-clang-lto" : "afl-clang-lto++");
+	for (int i = 1; i < argc; ++i)
+		new_argv[i] = argv[i];
+	new_argv[argc] = NULL;
+
+	execvp(new_argv[0], (char**)new_argv);
+
+	return 0;
+}
diff --git a/src/aflrun.cpp b/src/aflrun.cpp
new file mode 100644
index 00000000..fb94cb16
--- /dev/null
+++ b/src/aflrun.cpp
@@ -0,0 +1,4039 @@
+#include "aflrun.h"
+
+#include <boost/algorithm/string.hpp>
+#include <boost/dynamic_bitset.hpp>
+#include <boost/functional/hash.hpp>
+#include <boost/make_shared.hpp>
+namespace bo = boost;
+
+#include <robin_hood.h>
+namespace rh = robin_hood;
+
+#include <algorithm>
+#include <cassert>
+#include <cmath>
+#include <cstring>
+#include <fstream>
+#include <functional>
+#include <iostream>
+#include <memory>
+#include <numeric>
+#include <queue>
+#include <random>
+#include <stack>
+#include <string>
+#include <tuple>
+#include <vector>
+
+namespace { struct Fringe; struct SeedFringes; struct ClusterPair; }
+template<> struct std::hash<Fringe>
+{
+	std::size_t operator()(const Fringe&) const noexcept;
+};
+template<> struct std::hash<SeedFringes>
+{
+	std::size_t operator()(const SeedFringes&) const noexcept;
+};
+template<> struct std::hash<std::pair<reach_t, reach_t>>
+{
+	std::size_t operator()(const std::pair<reach_t, reach_t>&) const noexcept;
+};
+template<> struct std::hash<ClusterPair>
+{
+	std::size_t operator()(const ClusterPair&) const noexcept;
+};
+
+using namespace std;
+
+/* ----- Global data structures for AFLRUN ----- */
+namespace
+{
+
+struct AFLRunUpdateTime
+{
+	/* Record the time of last update of our maintained information */
+	u64 last_reachable, last_fringe,
+		last_pro_fringe, last_target;
+	u64 last_ctx_reachable, last_ctx_fringe,
+		last_ctx_pro_fringe, last_ctx_target;
+
+	AFLRunUpdateTime() :
+		last_reachable(0), last_fringe(0),
+		last_pro_fringe(0), last_target(0),
+		last_ctx_reachable(0), last_ctx_fringe(0),
+		last_ctx_pro_fringe(0), last_ctx_target(0) {}
+};
+
+AFLRunUpdateTime update_time;
+
+struct AFLRunConfig
+{
+	bool slow_ctx_bfs;
+	bool check_at_begin, log_at_begin;
+	u64 log_check_interval;
+	double cycle_energy; int max_cycle_count;
+	bool check_fringe;
+	double supp_cnt_thr; double conf_thr; bool count_seed;
+	double trim_thr; double linear_cycle_energy;
+	double exp_ratio; bool favor_high_cov;
+	bool disable_mode[4]; u8 reset_level; bool reset_target;
+	bool no_diversity; bool uni_whole_cycle; bool show_all_seeds;
+	double init_cov_quant; double col_weight_k;
+	u8 div_level; u32 div_seed_thr; bool trim_col; u8 init_cov_reset;
+	bool seed_based_energy; bool assign_ctx;
+	bool unite_assign; double unite_ratio[4]; bool single_supp_thr;
+	double dist_k; double queue_quant_thr; u32 min_num_exec;
+	bool uniform_targets; bool extra_cov; bool no_critical;
+	/*
+	This callback function takes in information about seeds and fringes,
+	and allocate given `total_energy` to `ret` array by adding to it.
+	In other word, increase of sum of `ret` array should equal to `total_energy`.
+	*/
+
+	explicit AFLRunConfig() : slow_ctx_bfs(false),
+	check_at_begin(false), log_at_begin(false),
+	log_check_interval(36000),
+	cycle_energy(60 * 10), max_cycle_count(32), check_fringe(false),
+	supp_cnt_thr(100), conf_thr(0.9), count_seed(true), trim_thr(1),
+	linear_cycle_energy(0), exp_ratio(1), favor_high_cov(false),
+	disable_mode{false, false, false, false}, reset_level(1),
+	reset_target(true), no_diversity(false), uni_whole_cycle(false),
+	show_all_seeds(false), init_cov_quant(10 * 60 * 10),
+	col_weight_k(1.0), div_level(1), div_seed_thr(100), trim_col(true),
+	init_cov_reset(0), seed_based_energy(true), assign_ctx(false),
+	unite_assign(true), unite_ratio{1, 1, 1, 3}, single_supp_thr(false),
+	dist_k(1), queue_quant_thr(0), min_num_exec(1), uniform_targets(false),
+	extra_cov(false), no_critical(false) {}
+
+	static const rh::unordered_map<string,
+		function<void(AFLRunConfig*, const string&)>> loaders;
+
+	void load(const string& cmd)
+	{
+		if (cmd.empty())
+			return;
+		size_t idx = cmd.find('=');
+		if (idx == string::npos)
+			throw string("Format of config must be 'key=value'");
+		auto key = cmd.substr(0, idx);
+		auto callback = loaders.find(key);
+		if (callback == loaders.end())
+			throw string("No such option: " + key);
+		callback->second(this, cmd.substr(idx + 1));
+	}
+	void check() const
+	{
+		if (!check_fringe && check_at_begin)
+			throw string("If you want to check at beginning, "
+				"please enable check_fringe.");
+		if (no_critical && !unite_assign)
+			throw string("For no critical block ablation study, "
+				"please enable unite_assign.");
+	}
+private:
+	static void check_digit(const string& val, string name)
+	{
+		if (val.empty())
+			throw string("'"+name+"' must be digit");
+		for (char c : val)
+		{
+			if (!isdigit(c))
+				throw string("'"+name+"' must be digit");
+		}
+	}
+};
+
+const rh::unordered_map<string, function<void(AFLRunConfig*, const string&)>>
+AFLRunConfig::loaders(
+{
+	#define BOOL_AFLRUN_ARG(name) \
+		if (val == "1") \
+			config->name = true; \
+		else if (val == "0") \
+			config->name = false; \
+		else \
+			throw string("Invalid value '"+val+"' for '"#name"'");
+
+	{"slow_ctx_bfs", [](AFLRunConfig* config, const string& val)
+	{
+		BOOL_AFLRUN_ARG(slow_ctx_bfs)
+	}},
+	{"check_at_begin", [](AFLRunConfig* config, const string& val)
+	{
+		BOOL_AFLRUN_ARG(check_at_begin)
+	}},
+	{"log_at_begin", [](AFLRunConfig* config, const string& val)
+	{
+		BOOL_AFLRUN_ARG(log_at_begin)
+	}},
+	{"log_check_interval", [](AFLRunConfig* config, const string& val)
+	{
+		check_digit(val, "log_check_interval");
+		config->log_check_interval = stoull(val);
+	}},
+	{"count_seed", [](AFLRunConfig* config, const string& val)
+	{
+		BOOL_AFLRUN_ARG(count_seed);
+	}},
+	{"cycle_energy", [](AFLRunConfig* config, const string& val)
+	{
+		config->cycle_energy = stod(val);
+		if (isnan(config->cycle_energy) || isinf(config->cycle_energy) ||
+			config->cycle_energy <= 0)
+			throw string("Invalid 'cycle_energy'");
+	}},
+	{"max_cycle_count", [](AFLRunConfig* config, const string& val)
+	{
+		check_digit(val, "max_cycle_count");
+		config->max_cycle_count = stoi(val);
+	}},
+	{"check_fringe", [](AFLRunConfig* config, const string& val)
+	{
+		BOOL_AFLRUN_ARG(check_fringe)
+	}},
+	{"supp_cnt_thr", [](AFLRunConfig* config, const string& val)
+	{ // To disable target diversity, set "supp_cnt_thr=0:conf_thr=0"
+		config->supp_cnt_thr = stod(val);
+		if (isnan(config->supp_cnt_thr) || isinf(config->supp_cnt_thr) ||
+			config->supp_cnt_thr < 0)
+			throw string("Invalid 'supp_cnt_thr'");
+	}},
+	{"conf_thr", [](AFLRunConfig* config, const string& val)
+	{
+		if (val == "inf")
+		{ // For infinite threshold, we don't cluster anything
+			config->conf_thr = numeric_limits<double>::infinity();
+			return;
+		}
+		config->conf_thr = stod(val);
+		if (isnan(config->conf_thr) ||
+			config->conf_thr < 0 || config->conf_thr > 1)
+			throw string("Invalid 'conf_thr'");
+	}},
+	{"dist_k", [](AFLRunConfig* config, const string& val)
+	{
+		if (val == "inf")
+		{ // If `k` is infinity, we distribute weight uniformly
+			config->dist_k = numeric_limits<double>::infinity();
+			return;
+		}
+		config->dist_k = stod(val);
+		if (isnan(config->dist_k) || config->dist_k <= 0)
+			throw string("Invalid 'dist_k'");
+	}},
+	{"trim_thr", [](AFLRunConfig* config, const string& val)
+	{
+		if (val == "inf")
+		{ // For infinite threshold, we don't trim any seed.
+			config->trim_thr = numeric_limits<double>::infinity();
+			return;
+		}
+		config->trim_thr = stod(val);
+		// For 0 threshold, we always trim every seed.
+		if (isnan(config->trim_thr) || config->trim_thr < 0)
+			throw string("Invalid 'trim_thr'");
+	}},
+	{"linear_cycle_energy", [](AFLRunConfig* config, const string& val)
+	{
+		// If this value is non-zero, we will have cycle energy to be:
+		// max(cycle_energy, linear_cycle_energy * num_active_seeds)
+		config->linear_cycle_energy = stod(val);
+		if (isnan(config->linear_cycle_energy) ||
+			isinf(config->linear_cycle_energy) ||
+			config->linear_cycle_energy < 0)
+			throw string("Invalid 'linear_cycle_energy'");
+	}},
+	{"exp_ratio", [](AFLRunConfig* config, const string& val)
+	{
+		// Ratio of desired exploitation / exploration:
+		// if >1, more energy will be allocated to exploitation;
+		// if <1, more energy will be allocated to exploration;
+		// if =1, exploitation and exploration are equal;
+		// if =inf, it almost only does exploitation;
+		// if =0, it amlmost only does exploration.
+		config->exp_ratio = stod(val);
+		if (isnan(config->exp_ratio) || config->exp_ratio < 0)
+			throw string("Invalid 'exp_ratio'");
+	}},
+	{"favor_high_cov", [](AFLRunConfig* config, const string& val)
+	{
+		BOOL_AFLRUN_ARG(favor_high_cov)
+	}},
+	{"disable_mode", [](AFLRunConfig* config, const string& val)
+	{ // Same as order in enum Mode:
+		// 0 for cov; 1 for ctx fringe; 2 for fringe; 3 for target
+		unsigned long m = stoul(val);
+		if (m > 3)
+			throw string("Invalid 'disable_mode'");
+		config->disable_mode[m] = true;
+	}},
+	{"reset_level", [](AFLRunConfig* config, const string& val)
+	{
+		unsigned long l = stoul(val);
+		if (l > 1) // TODO: level=2, reset when new ctx fringe is reached
+			throw string("Invalid 'reset_level'");
+		config->reset_level = static_cast<u8>(l);
+	}},
+	{"reset_target", [](AFLRunConfig* config, const string& val)
+	{
+		BOOL_AFLRUN_ARG(reset_target)
+	}},
+	{"no_diversity", [](AFLRunConfig* config, const string& val)
+	{
+		BOOL_AFLRUN_ARG(no_diversity)
+	}},
+	{"uni_whole_cycle", [](AFLRunConfig* config, const string& val)
+	{ // If set, whole_count will not increase, use cautiously because
+		// this will make some AFL stuff based on cycle count not work.
+		BOOL_AFLRUN_ARG(uni_whole_cycle)
+	}},
+	{"show_all_seeds", [](AFLRunConfig* config, const string& val)
+	{
+		BOOL_AFLRUN_ARG(show_all_seeds)
+	}},
+	{"init_cov_quant", [](AFLRunConfig* config, const string& val)
+	{
+		config->init_cov_quant = stod(val);
+		if (isnan(config->init_cov_quant) || config->init_cov_quant < 0)
+			throw string("Invalid 'init_cov_quant'");
+	}},
+	{"col_weight_k", [](AFLRunConfig* config, const string& val)
+	{
+		config->col_weight_k = stod(val);
+		if (isnan(config->col_weight_k) || isinf(config->col_weight_k) ||
+			config->col_weight_k < 0)
+			throw string("Invalid 'col_weight_k'");
+	}},
+	{"div_level", [](AFLRunConfig* config, const string& val)
+	{ // 0: only target diversity; 1: +pro fringe diversity; 2. +fringe diversity
+		config->div_level = stoi(val);
+		if (config->div_level > 1)
+			throw string("Invalid 'div_level'");
+		/* TODO: diversity for context-sensitive fringe
+		Current implementation is problematic. Instead, we should use a switch
+		bitmap with context for these context sensitive fringe, which is leaved
+		as future work.
+		*/
+	}},
+	{"div_seed_thr", [](AFLRunConfig* config, const string& val)
+	{
+		if (val == "inf")
+		{
+			config->div_seed_thr = numeric_limits<u32>::max();
+			return;
+		}
+		config->div_seed_thr = stoi(val);
+		if (config->div_seed_thr < 2)
+			throw string("Invalid 'div_seed_thr'");
+	}},
+	{"trim_col", [](AFLRunConfig* config, const string& val)
+	{
+		BOOL_AFLRUN_ARG(trim_col)
+	}},
+	{"init_cov_reset", [](AFLRunConfig* config, const string& val)
+	{
+		// 0: no reset;
+		// 1: reset at update on reachable;
+		// 2: reset at update on context reachable;
+		// 3: reset at new seed covering fringe.
+		config->init_cov_reset = stoi(val);
+		if (config->init_cov_reset > 2)
+			throw string("Invalid 'init_cov_reset'");
+	}},
+	{"seed_based_energy", [](AFLRunConfig* config, const string& val)
+	{ // Use new energy assignment algorithm!
+		BOOL_AFLRUN_ARG(seed_based_energy)
+	}},
+	{"assign_ctx", [](AFLRunConfig* config, const string& val)
+	{ // If we should assign uniformly among different contexts in new allocation
+		BOOL_AFLRUN_ARG(assign_ctx)
+	}},
+	{"unite_assign", [](AFLRunConfig* config, const string& val)
+	{ // If true, we don't use state machine, instead we do everything together
+		BOOL_AFLRUN_ARG(unite_assign)
+	}},
+	{"unite_ratio", [](AFLRunConfig* config, const string& val)
+	{ // Format: "cov,ctx,pro,tgt"
+		vector<string> ratios;
+		bo::split(ratios, val, [](char c) -> bool { return c == ','; });
+		if (ratios.size() != 4)
+			throw string("Invalid 'unite_ratio'");
+		for (size_t i = 0; i < 4; ++i)
+		{
+			double r = stod(ratios[i]);
+			if (isnan(r) || isinf(r) || r < 0)
+				throw string("Invalid 'unite_ratio'");
+			config->unite_ratio[i] = r;
+		}
+	}},
+	{"single_supp_thr", [](AFLRunConfig* config, const string& val)
+	{ // If true, we only use LHS as support count threshold
+		BOOL_AFLRUN_ARG(single_supp_thr)
+	}},
+	{"queue_quant_thr", [](AFLRunConfig* config, const string& val)
+	{
+		config->queue_quant_thr = stod(val);
+		if (config->queue_quant_thr < 0 || isnan(config->queue_quant_thr) ||
+			isinf(config->queue_quant_thr))
+			throw string("Invalid 'queue_quant_thr'");
+	}},
+	{"min_num_exec", [](AFLRunConfig* config, const string& val)
+	{
+		config->min_num_exec = stoul(val);
+		if (config->min_num_exec < 1)
+			throw string("Invalid 'min_num_exec'");
+	}},
+	{"uniform_targets", [](AFLRunConfig* config, const string& val)
+	{
+		BOOL_AFLRUN_ARG(uniform_targets)
+	}},
+	{"extra_cov", [](AFLRunConfig* config, const string& val)
+	{
+		BOOL_AFLRUN_ARG(extra_cov)
+	}},
+	{"no_critical", [](AFLRunConfig* config, const string& val)
+	{
+		BOOL_AFLRUN_ARG(no_critical)
+	}},
+	#undef BOOL_AFLRUN_ARG
+});
+
+AFLRunConfig config;
+
+struct AFLRunGlobals
+{
+	reach_t num_targets, num_reachables;
+	reach_t num_ftargets, num_freachables;
+	u8* virgin_reachables;
+	u8* virgin_freachables;
+	u8* virgin_ctx;
+	char** reachable_names;
+	reach_t** reachable_to_targets;
+	reach_t* reachable_to_size;
+	reach_t num_reached, num_freached; /* Number of non-virgin */
+	reach_t num_reached_targets, num_freached_targets;
+	string out_dir;
+	const double* target_weights;
+	u32 map_size;
+	void* afl;
+	u64 init_time, cycle_time;
+
+	explicit AFLRunGlobals(reach_t num_targets, reach_t num_reachables,
+		reach_t num_ftargets, reach_t num_freachables,
+		u8* virgin_reachables, u8* virgin_freachables, u8* virgin_ctx,
+		char** reachable_names, reach_t** reachable_to_targets,
+		reach_t* reachable_to_size, const char* out_dir,
+		const double* target_weights, u32 map_size, void* afl,
+		u64 init_time, u64 cycle_time)
+	: num_targets(num_targets), num_reachables(num_reachables),
+	num_ftargets(num_ftargets), num_freachables(num_freachables),
+	virgin_reachables(virgin_reachables), virgin_freachables(virgin_freachables),
+	virgin_ctx(virgin_ctx), reachable_names(reachable_names),
+	reachable_to_targets(reachable_to_targets),
+	reachable_to_size(reachable_to_size),
+	num_reached(0), num_freached(0), num_reached_targets(0),
+	num_freached_targets(0), out_dir(out_dir),
+	target_weights(target_weights), map_size(map_size), afl(afl),
+	init_time(init_time), cycle_time(cycle_time)
+	{
+		if (this->out_dir.back() != '/')
+			this->out_dir.push_back('/');
+	}
+
+	inline double get_tw(reach_t t) const
+	{
+		return config.uniform_targets ? 1 : target_weights[t];
+	}
+};
+
+unique_ptr<AFLRunGlobals> g = nullptr;
+
+struct AFLRunGraph
+{
+	vector<rh::unordered_flat_set<reach_t>> src_to_dst;
+	vector<vector<reach_t>> dst_to_src;
+	rh::unordered_map<pair<reach_t, reach_t>, vector<u32>> call_hashes;
+	explicit AFLRunGraph(reach_t num)
+		: src_to_dst(num), dst_to_src(num) {}
+};
+
+struct BasicBlockGraph : public AFLRunGraph
+{
+	explicit BasicBlockGraph(const char* bb_edges, reach_t num_reachables)
+		: AFLRunGraph(num_reachables)
+	{
+		ifstream in(bb_edges); assert(in.is_open());
+		string line; char* endptr;
+		while (getline(in, line))
+		{
+			const char* l = line.c_str();
+			reach_t src = strtoul(l, &endptr, 10); assert(*endptr == ',');
+			reach_t dst = strtoul(endptr+1, &endptr, 10); assert(*endptr == 0);
+			assert(src < num_reachables && dst < num_reachables);
+			src_to_dst[src].insert(dst);
+			dst_to_src[dst].push_back(src);
+		}
+		in.close();
+	}
+};
+
+struct Fringe
+{
+	reach_t block;
+	u32 context;
+	explicit Fringe(reach_t block, u32 context) :
+		block(block), context(context) {}
+	bool operator==(const Fringe& rhs) const
+	{
+		return this->block == rhs.block && this->context == rhs.context;
+	}
+};
+
+class TargetGrouper;
+
+struct SeedFringes
+{
+	bo::shared_ptr<u8[]> bitmap;
+	size_t bitmap_size;
+	size_t num_ones;
+	explicit SeedFringes(size_t num_fringes)
+		: bitmap_size((num_fringes + 7) / 8), num_ones(0)
+	{
+		bitmap = bo::make_shared<u8[]>(bitmap_size);
+		fill(bitmap.get(), bitmap.get() + bitmap_size, 0);
+	}
+	bool operator==(const SeedFringes& rhs) const
+	{
+		size_t size = this->bitmap_size;
+		const u8* ptr = this->bitmap.get();
+		return size == rhs.bitmap_size &&
+			equal(ptr, ptr + size, rhs.bitmap.get());
+	}
+	inline void set(size_t idx)
+	{
+		if (!get(idx)) ++num_ones;
+		bitmap[idx / 8] |= 1 << (idx % 8);
+	}
+	inline bool get(size_t idx)
+	{
+		return ((bitmap[idx / 8]) & (1 << (idx % 8))) != 0;
+	}
+};
+
+template <typename F, typename D>
+struct FringeBlocks
+{
+	struct Info
+	{
+		rh::unordered_set<u32> seeds; // Set of all seeds that cover the fringe
+		rh::unordered_map<reach_t, rh::unordered_set<D>> decisives;
+		// decisives for each target of this fringe
+		double fuzzed_quant;
+
+		// We can only access these 2 variables when `has_top_rated == true`
+		u64 top_rated_factor;
+		u32 top_rated_seed; bool has_top_rated;
+		Info() : fuzzed_quant(0), has_top_rated(false) {}
+	};
+
+	vector<rh::unordered_set<F>> target_to_fringes;
+	// maps each target to a set of fringe blocks
+	rh::unordered_map<reach_t, rh::unordered_set<F>> block_to_fringes;
+	// maps each block of fringe to fringes with that block
+
+	rh::unordered_map<F, Info> fringes;
+	// Maps each fringe block to set of targets that contain such block as fringe,
+	// this information should be consistent with `target_to_fringes`;
+	// for each target a set of neighbor virgin blocks are recorded,
+	// which are the blocks that make this fringe block a fringe.
+	// Note that when set of neighbors are emptied, we need to delete target;
+	// similarly when set of targets are emptied, we need to delete fringe.
+
+	rh::unordered_map<D, rh::unordered_set<F>> decisive_to_fringes;
+	// Map decisive blocks to corresponding fringes,
+	// works for both normal and progressive fringes.
+
+	rh::unordered_map<F, size_t> freq_idx;
+	vector<pair<size_t, u64>> freq; // frequency for each current fringe
+	// first element is index to bitmap and second is frequency
+	rh::unordered_map<u32, rh::unordered_set<F>> seed_fringes;
+	// map seed to all fringes covered by it, must be consistent as above
+
+	rh::unordered_set<u32> favored_seeds;
+
+	explicit FringeBlocks(reach_t num_targets) : target_to_fringes(num_targets) {}
+
+	void add_fringe(
+		const F& f, reach_t t, rh::unordered_set<D>&& decisives);
+	bool del_fringe(const F& f, const vector<reach_t>& ts);
+	bool del_fringe(const F& f);
+	bool fringe_coverage(const u8* bitmap, u32 seed,
+		const rh::unordered_set<Fringe>* new_criticals = nullptr,
+		const rh::unordered_set<reach_t>* new_bits_targets = nullptr);
+	void inc_freq(const u8* bitmap);
+	void update_fuzzed_quant(u32 seed, double fuzzed_quant);
+	void update_fringe_score(u32 seed);
+	u32 cull_queue(u32* seeds, u32 num);
+	rh::unordered_set<u32> select_favored_seeds() const;
+	void set_favored_seeds(const u32* seeds, u32 num);
+
+	unique_ptr<TargetGrouper> grouper;
+	void group();
+	void assign_energy(u32 num_seeds, const u32* seeds, double* ret) const;
+
+	u8 try_add_fringe(const Fringe& cand);
+	vector<reach_t> try_del_fringe(const Fringe& cand);
+
+	void remove_seed(u32 seed);
+
+	friend void assign_energy_unite(u32 num_seeds, const u32* ss, double* ret);
+
+private:
+	struct FringeInfo
+	{
+		double quant;
+		rh::unordered_set<u32> seeds;
+		size_t idx;
+		FringeInfo() : quant(0), idx(0) {}
+	};
+
+	pair<rh::unordered_map<u32, double>, double> assign_seed_no_ctx(
+		const rh::unordered_map<reach_t, double>& block_weight,
+		const rh::unordered_map<u32, u32>& seed_to_idx) const;
+	pair<rh::unordered_map<u32, double>, double> assign_seed_ctx(
+		const rh::unordered_map<reach_t, double>& block_weight,
+		const rh::unordered_map<u32, u32>& seed_to_idx) const;
+	inline pair<rh::unordered_map<u32, double>, double> assign_seed(
+		const rh::unordered_map<reach_t, double>& block_weight,
+		const rh::unordered_map<u32, u32>& seed_to_idx) const;
+	void assign_seeds_covered(
+		const rh::unordered_set<u32>& seeds, double total_weight,
+		const rh::unordered_map<u32, u32>& seed_to_idx,
+		rh::unordered_map<u32, double>& seed_weight, double& all_sum) const;
+	void record_new_cvx_opt(
+		const vector<pair<reach_t, rh::unordered_set<reach_t>>>& target_fringes,
+		const rh::unordered_map<reach_t, double>& block_weight,
+		const rh::unordered_map<u32, double>& seed_ratio,
+		const vector<pair<u32, double>>& sol) const;
+
+	void remove_freq(const F& f);
+	bool remove_block(const F& f);
+	pair<unique_ptr<double[]>, unique_ptr<double[]>> allocate_ratio(
+		const rh::unordered_map<reach_t, FringeInfo>& fringe_info,
+		const vector<reach_t>& vec_fringes) const;
+};
+
+unique_ptr<FringeBlocks<Fringe, Fringe>> path_fringes = nullptr;
+unique_ptr<FringeBlocks<Fringe, reach_t>> path_pro_fringes = nullptr;
+unique_ptr<FringeBlocks<Fringe, u8/*not used*/>> reached_targets = nullptr;
+
+// Convert block index into distance for each target
+vector<rh::unordered_map<reach_t, double>> bb_to_dists;
+
+// Given set of blocks, we distribute target weight `total` to basic blocks,
+// using distance to target `t`, returned by adding each weight value to `dst`.
+void dist_block_ratio(
+	const rh::unordered_set<reach_t>& blocks, reach_t t, double total,
+	rh::unordered_map<reach_t, double>& dst)
+{
+	if (isinf(config.dist_k))
+	{ // If `k` is infinity, we just uniformly distribute.
+		for (reach_t b : blocks)
+		{
+			dst[b] += total / blocks.size();
+		}
+		return;
+	}
+	vector<pair<reach_t, double>> block_ratios; double sum = 0;
+	for (reach_t b : blocks)
+	{
+		double w = 1.0 / (bb_to_dists[b].find(t)->second + config.dist_k);
+		sum += w;
+		block_ratios.emplace_back(b, w);
+	}
+	for (const auto& p : block_ratios)
+	{
+		dst[p.first] += total * p.second / sum;
+	}
+}
+
+class AFLRunState
+{
+public:
+	enum Mode : u8
+	{
+		kCoverage = 0, kFringe, kProFringe, kTarget, kUnite
+	};
+private:
+	Mode mode;
+	bool reset_exploit, init_cov;
+	int cycle_count;
+	u64 whole_count;
+	double cov_quant, exploration_quant, exploitation_quant;
+	void reset(Mode new_mode)
+	{
+		if (mode == kUnite)
+		{ // Unite mode always resets to it self when there is any fringe update
+			cycle_count = -1;
+			assert(cov_quant == 0);
+			return;
+		}
+		// we don't want to reset to a more explorative state;
+		// we also don't want to reset to exploration mode in exploitation mode,
+		// exploitation goes back to exploration only if certain amount of energy
+		// is totally executed. e.i. see `cycle_end` when `mode == kTarget`.
+		if (new_mode < mode)
+			return;
+		mode = new_mode;
+		// set to -1 because we don't want to count current cycle
+		cycle_count = -1;
+		cov_quant = 0;
+	}
+	// 4. Solve favor high column(e.i. linear to number of seeds)
+	// 5. Better Splice
+	// 6. Better design for fringe? Keep some deleted fringe. (Might take time)
+
+public:
+	AFLRunState() : mode(kCoverage), reset_exploit(false), init_cov(true),
+		cycle_count(-1), whole_count(0), cov_quant(0),
+		exploration_quant(0), exploitation_quant(0) {}
+	// Initialize cycle_count to -1 since cycle_end is called at start of cycle
+
+	inline u64 get_whole_count() const
+	{
+		return whole_count;
+	}
+
+	bool cycle_end() // Return true if whole_count has increased
+	{
+		// For coverage mode,
+		// we look quantum being executed instead of number of cycles
+		if (init_cov)
+		{
+			assert(mode == kCoverage);
+			++cycle_count;
+			if (cov_quant >= config.init_cov_quant)
+			{
+				// After initial coverage fuzzing,
+				// we switch to either state machine or unite assignment.
+				if (config.unite_assign)
+					mode = kUnite; // Start unite energy assignment mode
+				else
+					mode = kProFringe; // Start directed fuzzing
+				cov_quant = 0; cycle_count = 0;
+				init_cov = false;
+			}
+			return false;
+		}
+		if (mode == kUnite)
+		{
+			++cycle_count; // We never switch as long as we enter unite state
+			return false; // TODO: whole_count for unite mode?
+		}
+		if (mode == kCoverage)
+		{
+			// we still need to count cycle to precent cycle to be always -1
+			++cycle_count;
+			if (cov_quant >= config.max_cycle_count * config.cycle_energy ||
+				config.disable_mode[kCoverage])
+			{ // When we cannot find anything new, start exploitation
+				mode = kTarget;
+				cov_quant = 0;
+				cycle_count = 0;
+			}
+			return false;
+		}
+		assert(cov_quant == 0); // We should not have cov_quant in non-cov mode
+		if (mode == kTarget)
+		{
+			bool ret = false;
+			++cycle_count;
+			// If we have already done more exploitation than exploration,
+			// switch back to exploration again.
+			if (exploitation_quant >= exploration_quant * config.exp_ratio ||
+				reached_targets->fringes.empty() || // If no reached target, skip
+				config.disable_mode[kTarget])
+			{
+				mode = kProFringe;
+				cycle_count = 0;
+				if (reset_exploit || config.uni_whole_cycle)
+				{
+					reset_exploit = false;
+				}
+				else
+				{
+					++whole_count; // Only inc when exploitation is not resetted
+					ret = true;
+				}
+			}
+			return ret;
+		}
+		assert(cycle_count < config.max_cycle_count);
+		if (mode == kProFringe)
+		{
+			if (++cycle_count == config.max_cycle_count ||
+				path_pro_fringes->fringes.empty() || // If no pro fringe, skip
+				config.disable_mode[kProFringe])
+			{
+				mode = kFringe;
+				cycle_count = 0;
+			}
+		}
+		else
+		{
+			assert(mode == kFringe);
+			if (++cycle_count == config.max_cycle_count ||
+				path_fringes->fringes.empty() || // If no fringe, skip
+				config.disable_mode[kFringe])
+			{
+				mode = kCoverage;
+				cycle_count = 0;
+			}
+		}
+		return false;
+	}
+
+	void reset(u8 r)
+	{
+		if (init_cov)
+			return; // Don't reset at initial coverage based stage
+		switch (r)
+		{
+			case 2:
+				return reset(kProFringe);
+			case 1:
+				return reset(kFringe);
+			case 0:
+				return reset(kCoverage);
+			default: abort();
+		}
+	}
+
+	// Reset to exploitation state directly
+	void exploit()
+	{
+		if (mode == kUnite)
+		{ // Unite mode always resets to it self when there is any target update
+			cycle_count = -1;
+			assert(cov_quant == 0);
+			return;
+		}
+		// If already in exploitation, we don't reset itself,
+		// this is different from situation in explorative mode.
+		if (init_cov || mode == kTarget)
+			return;
+		reset_exploit = true;
+		mode = kTarget;
+		cycle_count = -1;
+		cov_quant = 0;
+	}
+
+	void add_quant(double quant)
+	{
+		Mode m = get_mode();
+		// We don't need to use quant in unite mode
+		if (m == kUnite)
+			return;
+
+		if (m == kTarget)
+		{
+			exploitation_quant += quant;
+		}
+		else
+		{
+			exploration_quant += quant;
+			if (m == kCoverage)
+				cov_quant += quant;
+		}
+	}
+
+	inline Mode get_mode() const { return mode; }
+	inline bool is_reset() const { return cycle_count == -1; }
+	inline bool is_init_cov() const { return init_cov; }
+	inline void reset_cov_quant() { cov_quant = 0; }
+	inline bool is_end_cov() const
+	{
+		if (init_cov)
+			return cov_quant >= config.init_cov_quant;
+		if (mode == kCoverage)
+			return config.disable_mode[kCoverage] ||
+				cov_quant >= config.max_cycle_count * config.cycle_energy;
+		return false;
+	}
+	inline void get_counts(int& cycle, u32& cov) const
+	{
+		cycle = cycle_count;
+		cov = cov_quant;
+	}
+};
+
+AFLRunState state;
+
+template <typename F>
+inline size_t to_bitmap_idx(const F& f);
+
+template <>
+inline size_t to_bitmap_idx<Fringe>(const Fringe& f)
+{
+	return CTX_IDX(f.block, f.context);
+}
+
+template <typename F>
+inline F from_bitmap_idx(size_t idx);
+
+template <>
+inline Fringe from_bitmap_idx<Fringe>(size_t idx)
+{
+	return Fringe(idx / CTX_SIZE, idx % CTX_SIZE);
+}
+
+template <typename F>
+inline reach_t to_fringe_block(const F& f);
+
+template <>
+inline reach_t to_fringe_block<Fringe>(const Fringe& f)
+{
+	return f.block;
+}
+
+template <>
+inline reach_t to_fringe_block<reach_t>(const reach_t& f)
+{
+	return f;
+}
+
+// Add new fringe to the given target
+template <typename F, typename D>
+void FringeBlocks<F, D>::add_fringe(
+	const F& f, reach_t t, rh::unordered_set<D>&& decisives)
+{
+	target_to_fringes[t].insert(f);
+	block_to_fringes[to_fringe_block<F>(f)].insert(f);
+	for (const D& dec : decisives)
+		decisive_to_fringes[dec].insert(f);
+	auto p = fringes.emplace(f, Info());
+	p.first->second.decisives.emplace(t, std::move(decisives));
+	if (p.second)
+	{
+		freq_idx.emplace(f, freq.size());
+		freq.emplace_back(to_bitmap_idx<F>(f), 0);
+	}
+}
+
+// Return true if the block is removed
+template <typename F, typename D>
+bool FringeBlocks<F, D>::remove_block(const F& f)
+{
+	// Remove fringe from `block_to_fringes`
+	auto it2 = block_to_fringes.find(to_fringe_block<F>(f));
+	it2->second.erase(f);
+	if (it2->second.empty())
+	{
+		block_to_fringes.erase(it2);
+		return true;
+	}
+	return false;
+}
+
+// Remove the element in frequency array
+template <typename F, typename D>
+void FringeBlocks<F, D>::remove_freq(const F& f)
+{
+	auto i = freq_idx.find(f);
+	if (i != freq_idx.end())
+	{
+		assert(freq[i->second].first == to_bitmap_idx<F>(f));
+		assert(i->second < freq.size());
+		if (i->second + 1 != freq.size())
+		{
+			// Remove the fringe from `freq` array
+			freq[i->second] = freq.back();
+			freq.pop_back();
+
+			// Update index value in `freq_idx` map
+			size_t idx = freq[i->second].first;
+			freq_idx.find(from_bitmap_idx<F>(idx))->second = i->second;
+			freq_idx.erase(i);
+		}
+		else
+		{ // Special case: remove last element in `freq` array
+			freq.pop_back();
+			freq_idx.erase(i);
+		}
+	}
+}
+
+void try_disable_seed(u32 s)
+{
+	if (reached_targets->seed_fringes.count(s) == 0 &&
+		path_pro_fringes->seed_fringes.count(s) == 0 &&
+		path_fringes->seed_fringes.count(s) == 0)
+	{ // If the seed is not used by aflrun now, try to disable it
+		disable_aflrun_extra(g->afl, s);
+	}
+}
+
+// Remove the fringe in given set of targets, return true if `f.block` is removed
+template <typename F, typename D>
+bool FringeBlocks<F, D>::del_fringe(const F& f, const vector<reach_t>& ts)
+{
+	auto it = fringes.find(f);
+	assert(it != fringes.end());
+
+	// Remove the fringe in given set of targets
+	for (reach_t t : ts)
+	{
+		it->second.decisives.erase(t);
+		target_to_fringes[t].erase(f);
+	}
+
+	// If given fringe in all targets is removed, remove the fringe itself
+	if (it->second.decisives.empty())
+	{
+		auto seeds = std::move(it->second.seeds);
+		fringes.erase(it);
+		// Remove the all seeds reaching the deleted fringe
+		for (u32 seed : seeds)
+		{
+			auto it3 = seed_fringes.find(seed);
+			it3->second.erase(f);
+			if (it3->second.empty())
+			{
+				seed_fringes.erase(it3);
+				try_disable_seed(seed);
+			}
+		}
+		remove_freq(f);
+		return remove_block(f);
+	}
+	return false;
+}
+
+// Remove the fringe in all targets, return true if `f.block` is removed
+template <typename F, typename D>
+bool FringeBlocks<F, D>::del_fringe(const F& f)
+{
+	auto it = fringes.find(f);
+
+	for (const auto& td : it->second.decisives)
+	{
+		target_to_fringes[td.first].erase(f);
+	}
+	it->second.decisives.clear();
+
+	auto seeds = std::move(it->second.seeds);
+	fringes.erase(it);
+
+	for (u32 seed : seeds)
+	{
+		auto it3 = seed_fringes.find(seed);
+		it3->second.erase(f);
+		if (it3->second.empty())
+		{
+			seed_fringes.erase(it3);
+			try_disable_seed(seed);
+		}
+	}
+	remove_freq(f);
+	return remove_block(f);
+}
+
+template <typename F>
+inline void log_fringe(ostream& out, const F& f);
+
+// Given `trace_ctx` of a `seed`, check its coverage of fringe and add if necessary
+template <typename F, typename D>
+bool FringeBlocks<F, D>::fringe_coverage(const u8* bitmap, u32 seed,
+	const rh::unordered_set<Fringe>* new_criticals,
+	const rh::unordered_set<reach_t>* new_bits_targets)
+{
+	// fringe_coverage for each seed should only be called once
+	assert(seed_fringes.find(seed) == seed_fringes.end());
+	rh::unordered_set<F> sf;
+	for (auto& p : fringes)
+	{
+		const F& f = p.first;
+
+		// If new_criticals is NULL, we think no new critical is found;
+		// otherwise, we consider coverage only if `f` is new critical.
+		bool is_new_critical = new_criticals ?
+			(new_criticals->count(f) != 0) : false;
+		// If new_bits_targets is NULL, we consider coverage of every critical,
+		// so in other word, there is no seed isolation, used for non-extra seeds;
+		// otherwise, we consider coverage only if `f` is target with new bits.
+		bool is_new_bits_targets = new_bits_targets ?
+			(new_bits_targets->count(f.block) != 0) : true;
+
+		// We try coverage if at least one of them is true.
+		if ((is_new_critical || is_new_bits_targets) &&
+			IS_SET(bitmap, to_bitmap_idx<F>(f)))
+		{ // If covered, add the seed
+			p.second.seeds.insert(seed);
+			sf.insert(f);
+		}
+	}
+	if (!sf.empty())
+	{
+		ofstream out(g->out_dir + "seeds.txt", ios::app);
+		out << seed << " | ";
+		for (const auto& f : sf)
+		{
+			log_fringe<F>(out, f); out << ' ';
+		}
+		out << endl;
+		seed_fringes.emplace(seed, std::move(sf));
+		return true;
+	}
+	else
+	{
+		return false;
+	}
+}
+
+// Increase frequency of fringe according to given trace exerted by mutated input
+template <typename F, typename D>
+void FringeBlocks<F, D>::inc_freq(const u8* bitmap)
+{
+	for (auto& p : freq)
+	{
+		if (IS_SET(bitmap, p.first))
+		{
+			p.second++;
+		}
+	}
+}
+
+template <typename F, typename D>
+void FringeBlocks<F, D>::update_fuzzed_quant(u32 seed, double fuzzed_quant)
+{
+	// When fuzzing norm fringe, seed fuzzed can have no fringe in pro fringe.
+	auto it = seed_fringes.find(seed);
+	if (it == seed_fringes.end())
+		return;
+	const auto& fs = it->second;
+	for (const F& f : fs)
+	{ // For each of its fringe, add `fuzzed_quant`
+		fringes.find(f)->second.fuzzed_quant += fuzzed_quant;
+	}
+}
+
+template <typename F, typename D>
+void FringeBlocks<F, D>::update_fringe_score(u32 seed)
+{
+	// If seed does not touch any fringe, skip
+	auto it = seed_fringes.find(seed);
+	if (it == seed_fringes.end())
+		return;
+	u64 fav_factor = get_seed_fav_factor(g->afl, seed);
+	for (const F& f : it->second)
+	{
+		Info& info = fringes.find(f)->second;
+		if (info.has_top_rated && fav_factor > info.top_rated_factor)
+			continue;
+
+		// Update top-rated seed and factor when possible
+		assert(info.seeds.find(seed) != info.seeds.end());
+		info.top_rated_seed = seed;
+		info.top_rated_factor = fav_factor;
+		info.has_top_rated = true;
+	}
+}
+
+vector<double> seed_quant;
+
+random_device rd;
+mt19937 gen(rd());
+uniform_int_distribution<> distrib(0, 99);
+
+template <typename F, typename D>
+rh::unordered_set<u32> FringeBlocks<F, D>::select_favored_seeds() const
+{
+	// Seeds that are considered favored
+	rh::unordered_set<u32> favored;
+
+	// Record all visited fringes
+	rh::unordered_set<F> temp_v;
+
+	for (const auto& p : fringes)
+	{ // For each unvisited fringe, we get top rated seed, if any
+		if (p.second.has_top_rated && temp_v.find(p.first) == temp_v.end())
+		{
+			// The seed must be contained in initial seed set,
+			// because disabled seeds cannot be considered top-rated.
+			u32 seed = p.second.top_rated_seed;
+
+			// We insert all fringes the seed cover into visited set
+			const auto& fs = seed_fringes.find(seed)->second;
+			temp_v.insert(fs.begin(), fs.end());
+			assert(temp_v.find(p.first) != temp_v.end());
+
+			// Add seed to favored set
+			favored.insert(seed);
+		}
+	}
+
+	return favored;
+}
+
+template <typename F, typename D>
+void FringeBlocks<F, D>::set_favored_seeds(const u32* seeds, u32 num)
+{
+	auto favored = select_favored_seeds();
+	favored_seeds.clear();
+	for (u32 i = 0; i < num; ++i)
+	{
+		if (favored.count(seeds[i]) > 0 || get_seed_div_favored(g->afl, seeds[i]))
+			favored_seeds.insert(seeds[i]);
+	}
+}
+
+template <typename F, typename D>
+u32 FringeBlocks<F, D>::cull_queue(u32* seeds, u32 num)
+{
+	// Set containing original seeds
+	const rh::unordered_set<u32> seed_set(seeds, seeds + num);
+
+	auto favored = select_favored_seeds();
+	for (u32 seed : favored)
+		assert(seed_set.find(seed) != seed_set.end());
+
+	// Select seeds to fuzz in this cycle
+	u32 idx = 0;
+	favored_seeds.clear();
+	for (u32 seed : seed_set)
+	{
+		if (favored.find(seed) != favored.end() ||
+			get_seed_div_favored(g->afl, seed))
+			// `cull_queue_div` should be called first
+		{
+			seeds[idx++] = seed;
+			favored_seeds.insert(seed);
+		}
+		else if (aflrun_get_seed_quant(seed) > 0)
+		{ // If the unfavored seed is fuzzed before
+			if (distrib(gen) >= SKIP_NFAV_OLD_PROB)
+				seeds[idx++] = seed;
+		}
+		else
+		{
+			if (distrib(gen) >= SKIP_NFAV_NEW_PROB)
+				seeds[idx++] = seed;
+		}
+	}
+	return idx;
+}
+
+u32 cull_queue_unite(u32* seeds, u32 num)
+{
+	// Set containing original seeds
+	const rh::unordered_set<u32> seed_set(seeds, seeds + num);
+
+	u32 idx = 0;
+	for (u32 seed : seed_set)
+	{ // Similar to `cull_queue` above
+		if (path_fringes->favored_seeds.count(seed) > 0 ||
+			path_pro_fringes->favored_seeds.count(seed) > 0 ||
+			reached_targets->favored_seeds.count(seed) > 0 ||
+			get_seed_cov_favored(g->afl, seed) == 2)
+		{
+			seeds[idx++] = seed;
+		}
+		else if (aflrun_get_seed_quant(seed) > 0)
+		{
+			if (distrib(gen) >= SKIP_NFAV_OLD_PROB)
+				seeds[idx++] = seed;
+		}
+		else
+		{
+			if (distrib(gen) >= SKIP_NFAV_NEW_PROB)
+				seeds[idx++] = seed;
+		}
+	}
+
+	return idx;
+}
+
+template <typename T>
+void write_vec(ostream& o, const vector<T>& v)
+{
+	o << '[';
+	for (T e : v)
+	{
+		o << e << ", ";
+	}
+	o << "]";
+}
+
+template <typename T>
+void write_arr(ostream& o, const T* arr, size_t size)
+{
+	o << '[';
+	for (size_t i = 0; i < size; ++i)
+	{
+		o << arr[i] << ", ";
+	}
+	o << "]";
+}
+
+template <typename F, typename D>
+pair<unique_ptr<double[]>, unique_ptr<double[]>>
+	FringeBlocks<F, D>::allocate_ratio(
+	const rh::unordered_map<reach_t, FringeInfo>& fringe_info,
+	const vector<reach_t>& vec_fringes) const
+{
+	assert(fringe_info.size() == vec_fringes.size());
+	size_t num_fringes = vec_fringes.size();
+	struct Elem
+	{
+		reach_t target;
+		rh::unordered_set<reach_t> fringes;
+		Elem(reach_t target, rh::unordered_set<reach_t>&& fringes) :
+			target(target), fringes(std::move(fringes)) {}
+	};
+
+	// We firstly get fringes of each active target
+	vector<Elem> targets_info;
+	for (reach_t t = 0; t < target_to_fringes.size(); ++t)
+	{
+		const auto& tf = target_to_fringes[t];
+
+		// Skip targets without fringe
+		if (tf.empty())
+			continue;
+
+		rh::unordered_set<reach_t> fringes;
+		for (const F& f : tf)
+		{
+			fringes.insert(f.block);
+		}
+		targets_info.emplace_back(t, std::move(fringes));
+	}
+
+	// Allocate weight of each target to its fringes
+	auto static_weights = make_unique<double[]>(num_fringes);
+	auto ret = make_unique<double[]>(num_fringes);
+	for (const Elem& e : targets_info)
+	{
+		rh::unordered_map<reach_t, double> res;
+		dist_block_ratio(e.fringes, e.target, g->get_tw(e.target), res);
+		for (const auto& fw : res)
+		{
+			static_weights[fringe_info.find(fw.first)->second.idx] += fw.second;
+		}
+	}
+
+	double sum = 0;
+	for (size_t i = 0; i < num_fringes; ++i)
+	{
+		double w = static_weights[i];
+		ret[i] = w;
+		sum += w;
+	}
+	for (size_t i = 0; i < num_fringes; ++i)
+	{
+		ret[i] /= sum;
+	}
+	return make_pair<>(std::move(ret), std::move(static_weights));
+}
+
+u32 num_active_seeds = 0;
+void trim_new_cvx(
+	rh::unordered_map<u32, double>& seed_weight, double& all_sum, double total)
+{
+	bool trimed;
+	do
+	{
+		double total_after = total;
+		vector<tuple<u32, double, double>> seed_weight_prev;
+		seed_weight_prev.reserve(seed_weight.size());
+
+		// Flatten the unordered map, also add `prev`,
+		// and calculate total energy after the allocation.
+		for (const auto& sw : seed_weight)
+		{
+			double prev = aflrun_get_seed_quant(sw.first);
+			total_after += prev;
+			seed_weight_prev.emplace_back(sw.first, sw.second, prev);
+		}
+
+		double sum = all_sum;
+		trimed = false;
+		for (const auto& swp : seed_weight_prev)
+		{
+			// If previous energy is already >= than desired energy calculated
+			// from desired ratio, we will not allocate energy to it, so it can
+			// be removed for optimization.
+			if (get<2>(swp) >= total_after * get<1>(swp) / sum)
+			{
+				seed_weight.erase(get<0>(swp));
+				all_sum -= get<1>(swp);
+				trimed = true;
+			}
+		}
+
+	// We recursively trim, until there is no trimming happens
+	} while (trimed);
+}
+
+vector<pair<u32, double>> solve_new_cvx(
+	const rh::unordered_map<u32, double>& seed_weight, double sum, double total)
+{
+	// Same as above
+	double total_after = total;
+	vector<tuple<u32, double, double>> seed_weight_prev;
+	seed_weight_prev.reserve(seed_weight.size());
+	for (const auto& sw : seed_weight)
+	{
+		double prev = aflrun_get_seed_quant(sw.first);
+		total_after += prev;
+		seed_weight_prev.emplace_back(sw.first, sw.second, prev);
+	}
+
+	vector<pair<u32, double>> ret;
+	for (const auto& swp : seed_weight_prev)
+	{ // After trimming, desired energy must be larger than previous energy
+		double seed_energy = total_after * get<1>(swp) / sum - get<2>(swp);
+		assert(seed_energy > 0);
+
+		// TODO: potential precision problem?
+		ret.emplace_back(get<0>(swp), seed_energy);
+	}
+
+	return ret;
+}
+
+template <typename F, typename D>
+void FringeBlocks<F, D>::record_new_cvx_opt(
+	const vector<pair<reach_t, rh::unordered_set<reach_t>>>& target_fringes,
+	const rh::unordered_map<reach_t, double>& block_weight,
+	const rh::unordered_map<u32, double>& seed_ratio,
+	const vector<pair<u32, double>>& sol) const
+{
+	ofstream out(g->out_dir + "cvx/opt.py");
+	if (!out.is_open())
+		return;
+
+	out << "import numpy as np" << endl;
+
+	// Output normalized weights of targets
+	double sum = 0;
+	for (const auto& t : target_fringes)
+		sum += g->get_tw(t.first);
+	out << "target_weight = np.array([" << endl;
+	out << "# target, ratio" << endl;
+	for (const auto& t : target_fringes)
+		out << "[\"" << g->reachable_names[t.first] <<
+			"\", " << g->get_tw(t.first) / sum << "]," << endl;
+	out << "])" << endl;
+
+	// Output normalized weights of blocks
+	sum = 0;
+	for (const auto& bw : block_weight)
+		sum += bw.second;
+	out << "block_weight = np.array([" << endl;
+	out << "# block, ctx_count, ratio" << endl;
+	for (const auto& bw : block_weight)
+		out << "[\"" << g->reachable_names[bw.first] <<
+			"\", " << block_to_fringes.find(bw.first)->second.size() <<
+			", " << bw.second / sum << "]," << endl;
+	out << "])" << endl;
+
+	out << "opt = np.array([" << endl;
+	out << "# seed, prev, ratio, solution" << endl;
+
+	rh::unordered_set<u32> non_zero_seeds;
+	for (const auto& se : sol)
+	{
+		out << '[' << se.first << ", " << aflrun_get_seed_quant(se.first) <<
+			", " << seed_ratio.find(se.first)->second << ", " <<
+			se.second << "]," << endl;
+		non_zero_seeds.insert(se.first);
+	}
+	for (const auto& sr : seed_ratio)
+	{
+		if (non_zero_seeds.count(sr.first) > 0)
+			continue;
+		out << '[' << sr.first << ", " << aflrun_get_seed_quant(sr.first) <<
+			", " << sr.second << ", " << 0.0 << "]," << endl;
+	}
+	out << "])" << endl;
+}
+
+void record_new_cvx_opt_uni(
+	const vector<pair<reach_t, array<double, 3>>>& target_type_weights,
+	const array<rh::unordered_map<reach_t, double>, 3>& block_weights,
+	const array<rh::unordered_map<u32, double>, 4>& seed_weights,
+	const double* seed_sums, const rh::unordered_map<u32, double>& seed_ratio,
+	const vector<pair<u32, double>>& sol)
+{
+	ofstream out(g->out_dir + "cvx/opt.py");
+	if (!out.is_open())
+		return;
+	out << "import numpy as np" << endl;
+
+	// Output normalized weights of targets for each type
+	double sum = 0;
+	for (const auto& ttw : target_type_weights)
+		for (size_t i = 0; i < 3; ++i)
+			sum += ttw.second[i];
+	out << "target_weights = np.array([" << endl;
+	out << "# target, N ratio, P ratio, T ratio" << endl;
+	for (const auto& ttw : target_type_weights)
+	{
+		out << "[\"" << g->reachable_names[ttw.first] << "\"";
+		for (size_t i = 0; i < 3; ++i)
+			out << ", " << ttw.second[i] / sum;
+		out << "]," << endl;
+	}
+	out << "])" << endl;
+
+	// Output normalized weights of blocks for each mode
+	function<size_t(u32)> ctx_count[3] = {
+		[](u32 s) -> size_t
+		{
+			return path_fringes->block_to_fringes.find(s)->second.size();
+		},
+		[](u32 s) -> size_t
+		{
+			return path_pro_fringes->block_to_fringes.find(s)->second.size();
+		},
+		[](u32 s) -> size_t
+		{
+			return reached_targets->block_to_fringes.find(s)->second.size();
+		},
+	};
+	const char* names = "NPT";
+	for (size_t i = 0; i < 3; ++i)
+	{
+		sum = 0;
+		for (const auto& btw : block_weights[i])
+			sum += btw.second;
+		out << "block_weight_" << names[i] << " = np.array([" << endl;
+		out << "# block, ctx_count, ratio" << endl;
+		for (const auto& btw : block_weights[i])
+			out << "[\"" << g->reachable_names[btw.first] <<
+				"\", " << ctx_count[i](btw.first) <<
+				", " << btw.second / sum << "]," << endl;
+		out << "])" << endl;
+	}
+
+	out << "opt = np.array([" << endl;
+	out << "# seed, prev, ratio, solution, N, P, T, C" << endl;
+	rh::unordered_set<u32> non_zero_seeds;
+	double weight_sums[4] = {0,0,0,0}; double ratio_sum = 0;
+	auto log_seed_weights =
+	[&seed_weights, &out, &weight_sums](u32 seed)
+	{
+		for (size_t i = 0; i < 4; ++i)
+		{
+			auto it = seed_weights[i].find(seed);
+			double val = it == seed_weights[i].end() ? 0.0 : it->second;
+			out << ", " << val;
+			weight_sums[i] += val;
+		}
+	};
+	for (const auto& se : sol)
+	{
+		double ratio = seed_ratio.find(se.first)->second;
+		ratio_sum += ratio;
+		out << '[' << se.first << ", " << aflrun_get_seed_quant(se.first) <<
+			", " << ratio << ", " << se.second;
+		log_seed_weights(se.first);
+		out << "]," << endl;
+		non_zero_seeds.insert(se.first);
+	}
+	for (const auto& sr : seed_ratio)
+	{
+		if (non_zero_seeds.count(sr.first) > 0)
+			continue;
+		ratio_sum += sr.second;
+		out << '[' << sr.first << ", " << aflrun_get_seed_quant(sr.first) <<
+			", " << sr.second << ", " << 0.0;
+		log_seed_weights(sr.first);
+		out << "]," << endl;
+	}
+	out << "])" << endl;
+	out << "# " << ratio_sum;
+	for (size_t i = 0; i < 4; ++i)
+	{
+		out << ' ' << seed_sums[i] << "==" << weight_sums[i];
+	}
+	out << endl;
+}
+
+template <typename F, typename D>
+void FringeBlocks<F, D>::assign_seeds_covered(
+	const rh::unordered_set<u32>& seeds, double total_weight,
+	const rh::unordered_map<u32, u32>& seed_to_idx,
+	rh::unordered_map<u32, double>& seed_weight, double& all_sum) const
+{
+	// For all seeds that cover this context block,
+	// we get their expected performance scores, and calculate their sum.
+	vector<pair<u32, double>> seed_perf_score;
+	double sum = 0;
+	for (u32 s : seeds)
+	{
+		// Skip seeds not selected for fuzzing
+		if (seed_to_idx.count(s) == 0)
+			continue;
+		double e_perf_score = get_seed_perf_score(g->afl, s) *
+			(favored_seeds.find(s) == favored_seeds.end() ?
+				(100 - SKIP_NFAV_OLD_PROB) / 100.0 : 1.0);
+		// Skip non-positive seeds,
+		// which is not quite possible but we do it anyway
+		if (e_perf_score <= 0)
+			continue;
+		seed_perf_score.emplace_back(s, e_perf_score);
+		sum += e_perf_score;
+	}
+
+	for (const auto& sps : seed_perf_score)
+	{ // Allocate weight of seeds according to ratio of performance scores
+		double w = total_weight * sps.second / sum;
+		seed_weight[sps.first] += w;
+		all_sum += w;
+	}
+}
+
+rh::unordered_map<u32, double> assign_seeds_coverage(
+	const u32* seeds, u32 num, double cov_sum)
+{
+	vector<pair<u32, double>> seed_perf_score;
+	double sum = 0;
+	for (u32 i = 0; i < num; ++i)
+	{
+		u8 level = get_seed_cov_favored(g->afl, seeds[i]);
+		if (!config.extra_cov && level == 0) // Skip aflrun extra seeds
+			continue;
+		double e_perf_score = get_seed_perf_score(g->afl, seeds[i]) *
+			(level == 2 ? 1.0 : (100 - SKIP_NFAV_OLD_PROB) / 100.0);
+		if (e_perf_score <= 0)
+			continue;
+		seed_perf_score.emplace_back(seeds[i], e_perf_score);
+		sum += e_perf_score;
+	}
+
+	rh::unordered_map<u32, double> ret;
+	for (const auto& sps : seed_perf_score)
+		ret.emplace(sps.first, cov_sum * sps.second / sum);
+	return ret;
+}
+
+template <typename F, typename D>
+pair<rh::unordered_map<u32, double>, double>
+	FringeBlocks<F, D>::assign_seed_no_ctx(
+	const rh::unordered_map<reach_t, double>& block_weight,
+	const rh::unordered_map<u32, u32>& seed_to_idx) const
+{
+	// Here we assign energy from fringe to seed directly, without context pass.
+	rh::unordered_map<u32, double> seed_weight;
+	double all_sum = 0;
+	for (const auto& bw : block_weight)
+	{
+		const auto& ctx_blocks = block_to_fringes.find(bw.first)->second;
+		assert(!ctx_blocks.empty());
+		rh::unordered_set<u32> seeds;
+		for (const F& cb : ctx_blocks)
+		{ // Collect all seeds that cover this fringe
+			assert(cb.block == bw.first);
+			const Info& info = fringes.find(cb)->second;
+			seeds.insert(info.seeds.begin(), info.seeds.end());
+		}
+
+		assign_seeds_covered(
+			seeds, bw.second, seed_to_idx, seed_weight, all_sum);
+	}
+
+	return make_pair(seed_weight, all_sum);
+}
+
+template <typename F, typename D>
+pair<rh::unordered_map<u32, double>, double> FringeBlocks<F, D>::assign_seed_ctx(
+	const rh::unordered_map<reach_t, double>& block_weight,
+	const rh::unordered_map<u32, u32>& seed_to_idx) const
+{
+	// Map context block to weight being allocated from parent block
+	rh::unordered_map<Fringe, double> ctx_block_weight;
+	for (const auto& bw : block_weight)
+	{
+		const auto& ctx_blocks = block_to_fringes.find(bw.first)->second;
+		assert(!ctx_blocks.empty());
+		for (const F& cb : ctx_blocks)
+		{ // Allocate weight of each block uniformly to its context blocks
+			assert(cb.block == bw.first && ctx_block_weight.count(cb) == 0);
+			ctx_block_weight.emplace(cb, bw.second / ctx_blocks.size());
+		}
+	}
+
+	// Map seed to weight being allocated from context blocks it covers
+	rh::unordered_map<u32, double> seed_weight;
+	double all_sum = 0;
+	for (const auto& cbw : ctx_block_weight)
+	{
+		const Info& info = fringes.find(cbw.first)->second;
+		assign_seeds_covered(
+			info.seeds, cbw.second, seed_to_idx, seed_weight, all_sum);
+	}
+
+	return make_pair(seed_weight, all_sum);
+}
+
+template <typename F, typename D>
+pair<rh::unordered_map<u32, double>, double> FringeBlocks<F, D>::assign_seed(
+	const rh::unordered_map<reach_t, double>& block_weight,
+	const rh::unordered_map<u32, u32>& seed_to_idx) const
+{
+	if (config.assign_ctx)
+		return assign_seed_ctx(block_weight, seed_to_idx);
+	else
+		return assign_seed_no_ctx(block_weight, seed_to_idx);
+}
+
+template <typename F, typename D>
+void FringeBlocks<F, D>::assign_energy(
+	u32 num_seeds, const u32* ss, double* ret) const
+{
+	// Map seed to index to the return array
+	rh::unordered_map<u32, u32> seed_to_idx;
+	for (u32 i = 0; i < num_seeds; ++i)
+		seed_to_idx.emplace(ss[i], i);
+	assert(seed_to_idx.size() == num_seeds);
+
+	vector<pair<reach_t, rh::unordered_set<reach_t>>> target_fringes;
+
+	for (reach_t t = 0; t < target_to_fringes.size(); ++t)
+	{ // Iterate all targets with any fringes
+		const auto& tf = target_to_fringes[t];
+		if (tf.empty())
+			continue;
+
+		// Record all fringes a target has
+		rh::unordered_set<reach_t> fringes;
+		for (const F& f : tf)
+			fringes.insert(f.block);
+		target_fringes.emplace_back(t, std::move(fringes));
+	}
+
+	// Map fringe block to weight being allocated from targets
+	rh::unordered_map<reach_t, double> block_weight;
+	for (const auto& e : target_fringes)
+	{
+		dist_block_ratio(
+			e.second, e.first, g->get_tw(e.first), block_weight);
+	}
+
+	rh::unordered_map<u32, double> seed_weight; double all_sum;
+	tie(seed_weight, all_sum) = assign_seed(block_weight, seed_to_idx);
+
+	// Original seed ratio, used for output only
+	rh::unordered_map<u32, double> seed_ratio;
+	for (const auto& sw : seed_weight)
+		seed_ratio.emplace(sw.first, sw.second / all_sum);
+
+	const double total = max<double>(
+		num_active_seeds * config.linear_cycle_energy, config.cycle_energy);
+	trim_new_cvx(seed_weight, all_sum, total);
+	auto sol = solve_new_cvx(seed_weight, all_sum, total);
+
+	fill(ret, ret + num_seeds, 0.0);
+	for (const auto& se : sol)
+		ret[seed_to_idx.find(se.first)->second] = se.second;
+
+	record_new_cvx_opt(target_fringes, block_weight, seed_ratio, sol);
+}
+
+rh::unordered_set<reach_t> strip_ctx(const rh::unordered_set<Fringe>& from)
+{
+	// Record all blocks a target has
+	rh::unordered_set<reach_t> blocks;
+	for (const Fringe& f : from)
+		blocks.insert(f.block);
+	return blocks;
+}
+
+void sum_seed_weight(
+	rh::unordered_map<u32, double>& seed_weight, double& all_sum,
+	const rh::unordered_map<u32, double>& tmp_weight, double tmp_sum)
+{
+	all_sum += tmp_sum;
+	for (const auto& sw : tmp_weight)
+		seed_weight[sw.first] += sw.second;
+}
+
+void assign_energy_unite(u32 num_seeds, const u32* ss, double* ret)
+{
+	// Map seed to index to the return array
+	rh::unordered_map<u32, u32> seed_to_idx;
+	for (u32 i = 0; i < num_seeds; ++i)
+		seed_to_idx.emplace(ss[i], i);
+	assert(seed_to_idx.size() == num_seeds);
+
+	constexpr size_t kNumTypes = 3;
+	// [0]: ctx_fringes; [1]: pro_fringes; [2]: targets
+	using FringeEach = array<rh::unordered_set<reach_t>, kNumTypes>;
+	vector<pair<reach_t, FringeEach>> target_fringes;
+	for (reach_t t = 0; t < g->num_targets; ++t)
+	{ // For each target, we get its fringes from all 3 types, if any
+		FringeEach tf;
+		if (config.unite_ratio[1] > 0)
+			tf[0] = strip_ctx(path_fringes->target_to_fringes[t]);
+		if (config.unite_ratio[2] > 0)
+			tf[1] = strip_ctx(path_pro_fringes->target_to_fringes[t]);
+		if (config.unite_ratio[3] > 0)
+			tf[2] = strip_ctx(reached_targets->target_to_fringes[t]);
+
+		// If the target has no block in any of these, skip it
+		if (tf[0].empty() && tf[1].empty() && tf[2].empty())
+			continue;
+
+		target_fringes.emplace_back(t, std::move(tf));
+	}
+
+	// Map target to weights of 3 types, whose sum should be target weight
+	vector<pair<reach_t, array<double, kNumTypes>>> target_type_weights;
+	for (const auto& e : target_fringes)
+	{
+		array<double, kNumTypes> type_weights; double sum = 0;
+		for (size_t i = 0; i < kNumTypes; ++i)
+		{
+			double ratio = config.unite_ratio[i + 1];
+			if (e.second[i].empty() || ratio == 0)
+			{ // For each non-active type, we skip it by setting weight to zero.
+				type_weights[i] = 0;
+			}
+			else
+			{ // For each active type, we sum and record the ratio.
+				sum += ratio;
+				type_weights[i] = ratio;
+			}
+		}
+		assert(sum > 0);
+
+		// Assign `type_weights` from `tw` according to the ratio
+		double tw = g->get_tw(e.first);
+		for (size_t i = 0; i < kNumTypes; ++i)
+		{
+			type_weights[i] = tw * type_weights[i] / sum;
+		}
+
+		target_type_weights.emplace_back(e.first, std::move(type_weights));
+	}
+	assert(target_fringes.size() == target_type_weights.size());
+
+	// Now we can allocate weight for each block
+	array<rh::unordered_map<reach_t, double>, kNumTypes> block_weights;
+	for (size_t i = 0; i < kNumTypes; ++i)
+	{
+		// For each type, we iterate its active targets,
+		// each of which has a weight and a set of blocks;
+		// we can imagine this to be allocation of
+		// `target_weights` -> `block_weight` in non-unite modes.
+		auto tf_it = target_fringes.begin();
+		auto ttw_it = target_type_weights.begin();
+		for (; tf_it != target_fringes.end(); ++tf_it, ++ttw_it)
+		{
+			assert(tf_it->first == ttw_it->first);
+			double ttw = ttw_it->second[i];
+			if (ttw == 0) // Skip non-active targets
+				continue;
+			dist_block_ratio(
+				tf_it->second[i], tf_it->first, ttw, block_weights[i]);
+		}
+	}
+
+	// Assign seed for each block_weights[i], and sum them together
+	rh::unordered_map<u32, double> seed_weight; double all_sum = 0;
+	array<rh::unordered_map<u32, double>, kNumTypes + 1> type_seed_weight;
+	double type_sum[kNumTypes + 1];
+	tie(type_seed_weight[0], type_sum[0]) =
+		path_fringes->assign_seed(block_weights[0], seed_to_idx);
+	sum_seed_weight(seed_weight, all_sum, type_seed_weight[0], type_sum[0]);
+	tie(type_seed_weight[1], type_sum[1]) =
+		path_pro_fringes->assign_seed(block_weights[1], seed_to_idx);
+	sum_seed_weight(seed_weight, all_sum, type_seed_weight[1], type_sum[1]);
+	tie(type_seed_weight[2], type_sum[2]) =
+		reached_targets->assign_seed(block_weights[2], seed_to_idx);
+	sum_seed_weight(seed_weight, all_sum, type_seed_weight[2], type_sum[2]);
+
+	// Calculate total weight for coverage background according to ratios
+	type_sum[3] = 0; size_t count = 0;
+	for (size_t i = 0; i < kNumTypes; ++i)
+	{
+		if (type_sum[i] > 0)
+		{
+			double ratio = config.unite_ratio[i + 1]; assert(ratio > 0);
+			type_sum[3] += type_sum[i] * config.unite_ratio[0] / ratio;
+			++count;
+		}
+	}
+
+	if (count == 0)
+	{ // If no reachable block is covered, do coverage mode
+		assert(all_sum == 0 && seed_weight.empty());
+		all_sum = 1;
+		seed_weight = assign_seeds_coverage(ss, num_seeds, all_sum);
+	}
+	else
+	{
+		type_sum[3] /= count; // Take the average
+
+		if (type_sum[3] > 0)
+		{
+			type_seed_weight[3] =
+				assign_seeds_coverage(ss, num_seeds, type_sum[3]);
+			sum_seed_weight(
+				seed_weight, all_sum, type_seed_weight[3], type_sum[3]);
+		}
+	}
+
+	// Original seed ratio, used for output only
+	rh::unordered_map<u32, double> seed_ratio;
+	for (const auto& sw : seed_weight)
+		seed_ratio.emplace(sw.first, sw.second / all_sum);
+
+	// Finally we get the correct `seed_weight` just like before,
+	// we solve it as the final energy assignment.
+	const double total = max<double>(
+		num_active_seeds * config.linear_cycle_energy, config.cycle_energy);
+	trim_new_cvx(seed_weight, all_sum, total);
+	auto sol = solve_new_cvx(seed_weight, all_sum, total);
+
+	fill(ret, ret + num_seeds, 0.0);
+	for (const auto& se : sol)
+		ret[seed_to_idx.find(se.first)->second] = se.second;
+
+	record_new_cvx_opt_uni(target_type_weights, block_weights,
+		type_seed_weight, type_sum, seed_ratio, sol);
+}
+
+unique_ptr<AFLRunGraph> graph = nullptr;
+
+rh::unordered_map<reach_t, double> bb_to_avg_dists;
+
+rh::unordered_map<string, reach_t> name_to_id, fname_to_id;
+vector<string> id_to_fname;
+
+// Array of traces for each target
+// e.g. (*all_exec_paths[id])[target].data()
+vector<unique_ptr<reach_t[]>> all_exec_paths;
+
+template <>
+inline void log_fringe<Fringe>(ostream& out, const Fringe& f)
+{
+	if (f.block == g->num_reachables)
+	{ // For printing cluster only
+		assert(f.context == 0);
+		out << "primary";
+	}
+	else
+	{
+		char hex_buf[4];
+		snprintf(hex_buf, sizeof(hex_buf), "%.2X", f.context);
+		out << g->reachable_names[f.block] << ',' << hex_buf;
+	}
+}
+
+template <>
+inline void log_fringe<reach_t>(ostream& out, const reach_t& f)
+{
+	out << g->reachable_names[f];
+}
+
+// template<typename F>
+// F target_trace_to_fringe(const T* targets, size_t idx);
+
+// template<>
+// Fringe target_trace_to_fringe<Fringe, ctx_t>(
+// 	const ctx_t* targets, size_t idx)
+// {
+// 	// Pseudo fringe representing primary map
+// 	if (idx == 0)
+// 		return Fringe(g->num_reachables, 0);
+// 	else
+// 	{
+// 		const ctx_t* t = targets + (idx - 1);
+// 		return Fringe(t->block, t->call_ctx);
+// 	}
+// }
+
+// template<>
+// reach_t target_trace_to_fringe<reach_t, reach_t>(
+// 	const reach_t* targets, size_t idx)
+// {
+// 	return idx == 0 ? g->num_reachables : targets[idx - 1];
+// }
+
+struct ClusterPair
+{
+private:
+	size_t fst; size_t snd;
+public:
+	inline size_t get_fst() const noexcept
+	{
+		return fst;
+	}
+	inline size_t get_snd() const noexcept
+	{
+		return snd;
+	}
+	bool operator==(const ClusterPair& rhs) const
+	{
+		return this->fst == rhs.fst && this->snd == rhs.snd;
+	}
+	explicit ClusterPair(size_t c1, size_t c2)
+	{ // Make the pair order insensitive such that `fst <= snd` always holds
+		assert(c1 != c2);
+		if (c1 < c2)
+		{
+			fst = c1;
+			snd = c2;
+		}
+		else
+		{
+			fst = c2;
+			snd = c1;
+		}
+	}
+};
+
+template<typename F>
+class Clusters
+{
+private:
+	rh::unordered_map<F, size_t> target_to_idx;
+	vector<rh::unordered_set<F>> clusters; // Reverse of `target_to_idx`
+	vector<unique_ptr<u8[]>> cluster_maps;
+	vector<unique_ptr<void*[]>> cluster_tops;
+
+	// Each pair of the vector stores 64 `and` bit sequences corresponding to
+	// each virgin map including the primary map, and first `u64` is a `or`
+	// value of all values in the `vector`, so we don't need to consider 0 seqs.
+	vector<pair<u64, unique_ptr<vector<u64>>>> and_bit_seqs;
+
+	// Support count for single target or a pair of targets
+	rh::unordered_map<ClusterPair, double> pair_supp_cnt;
+	vector<double> supp_cnt;
+
+	bool cluster_valid(size_t cluster) const
+	{
+		return cluster == 0 || cluster_maps[cluster] != nullptr;
+	}
+
+	// Merge cluster `src` to cluster `dst`;
+	// after this function, `src` cluster is invalid.
+	void merge_cluster(size_t src, size_t dst)
+	{
+		// `src` cannot be primary cluster, and cannot be invalid cluster
+		assert(src != dst && cluster_maps[src] != nullptr && cluster_valid(dst));
+		rh::unordered_set<F> src_cluster(std::move(clusters[src]));
+		for (F t : src_cluster)
+			target_to_idx.find(t)->second = dst;
+		clusters[dst].insert(src_cluster.begin(), src_cluster.end());
+		cluster_maps[src] = nullptr; cluster_tops[src] = nullptr;
+		// We don't clean support counts with `src` here,
+		// because they cannot be used again.
+	}
+
+	inline static bool supp_cnt_enough(double lhs, double both)
+	{
+		return config.single_supp_thr ?
+			lhs >= config.supp_cnt_thr : both >= config.supp_cnt_thr;
+	}
+
+	// Try to merge clusters, if any; return true iff merge happens
+	bool try_merge()
+	{
+		// Store each LHS->RHS to be merged and corresponding confidence value
+		rh::unordered_map<size_t, pair<size_t, double>> to_merge;
+
+		for (const auto& p : pair_supp_cnt)
+		{ // Iterate each pair support count
+			size_t fst = p.first.get_fst();
+			size_t snd = p.first.get_snd();
+
+			// Check if snd->fst reach merge threshold
+			double snd_supp_cnt = supp_cnt[snd];
+			if (supp_cnt_enough(snd_supp_cnt, p.second))
+			{
+				double conf = p.second / snd_supp_cnt;
+				if (conf >= config.conf_thr)
+				{ // If snd->fst reach merge threshold, merge snd into fst
+					auto p2 = make_pair(fst, conf);
+					auto p3 = to_merge.emplace(snd, p2);
+					// If existing element has less confidence, replace
+					if (!p3.second && p3.first->second.second < conf)
+						p3.first->second = p2;
+				}
+			}
+
+			// Note that we should not merge primary map to anything
+			if (fst == 0) continue;
+
+			// Check fst->snd, same as above
+			double fst_supp_cnt = supp_cnt[fst];
+			if (supp_cnt_enough(fst_supp_cnt, p.second))
+			{
+				double conf = p.second / fst_supp_cnt;
+				if (conf >= config.conf_thr)
+				{
+					auto p2 = make_pair(snd, conf);
+					auto p3 = to_merge.emplace(fst, p2);
+					if (!p3.second && p3.first->second.second < conf)
+						p3.first->second = p2;
+				}
+			}
+		}
+
+		if (to_merge.empty()) return false;
+
+		// Todo: Merge may be optimized using Kosaraju's algorithm.
+		for (const auto& p : to_merge)
+		{
+			size_t src = p.first;
+			size_t dst = p.second.first;
+			// We are going to merge src to dst, but dst can already be invalid,
+			// so we need to walk to find a valid cluster to merge
+			while (!cluster_valid(dst))
+			{ // Walk through the graph until `dst` is valid
+				dst = to_merge.find(dst)->second.first;
+			}
+			// If they finally become same cluster, we don't merge
+			if (src != dst)
+				merge_cluster(src, dst);
+		}
+
+		clean_supp_cnts();
+
+		return true;
+	}
+
+public:
+
+	// Index 0 prepresent primary map, which is not stored here.
+	Clusters() : clusters(1), cluster_maps(1), cluster_tops(1), supp_cnt(1) {}
+
+	void clean_supp_cnts()
+	{
+		vector<ClusterPair> to_remove;
+		for (const auto& p : pair_supp_cnt)
+		{
+			if (!cluster_valid(p.first.get_fst()) ||
+				!cluster_valid(p.first.get_snd()))
+				to_remove.push_back(p.first);
+		}
+		for (const auto& p : to_remove)
+		{
+			pair_supp_cnt.erase(p);
+		}
+	}
+
+	// Given a context-sensitive target, return corresponding cluster index;
+	// this may also create a new cluster, for example, when target is new.
+	size_t get_cluster(F target)
+	{
+		/*
+		Currently `get_cluster` allocate each different target with a new cluster,
+		but this is going to be changed when clustering algorithm is implemented.
+		*/
+		size_t num_clusters = cluster_maps.size();
+		auto res = target_to_idx.emplace(target, num_clusters);
+		if (res.second)
+		{
+			auto v = make_unique<u8[]>(g->map_size);
+			fill(v.get(), v.get() + g->map_size, 255);
+			cluster_maps.push_back(std::move(v));
+			cluster_tops.push_back(make_unique<void*[]>(g->map_size));
+			clusters.emplace_back(initializer_list<F>{target});
+			supp_cnt.push_back(0);
+			return num_clusters;
+		}
+		else
+		{
+			return res.first->second;
+		}
+	}
+	// Return virgin map of given cluster, cluster id must < num of clusters
+	u8* get_virgin_map(size_t cluster) const
+	{
+		return cluster_maps[cluster].get();
+	}
+	void** get_top_rated(size_t cluster) const
+	{
+		return cluster_tops[cluster].get();
+	}
+	const rh::unordered_set<F>& get_targets(size_t cluster) const
+	{
+		return clusters[cluster];
+	}
+	size_t size(void) const
+	{
+		return clusters.size();
+	}
+	size_t get_all_tops(void*** ret_tops, u8 mode) const
+	{
+		// Instead of get all top_rated maps,
+		// we only get ones corresponding to fringe blocks of current state.
+		const rh::unordered_map<reach_t, rh::unordered_set<Fringe>>* blocks;
+		switch (mode)
+		{
+		case AFLRunState::kFringe:
+			blocks = &path_fringes->block_to_fringes;
+			break;
+		case AFLRunState::kProFringe:
+			blocks = &path_pro_fringes->block_to_fringes;
+			break;
+		case AFLRunState::kTarget:
+			blocks = &reached_targets->block_to_fringes;
+			break;
+		default:
+			abort();
+		}
+		size_t idx = 0;
+		bo::dynamic_bitset<> visited_clusters(size());
+		for (const auto& b : *blocks)
+		{
+			auto it = target_to_idx.find(b.first);
+			if (it == target_to_idx.end() || visited_clusters[it->second] ||
+				cluster_tops[it->second] == nullptr)
+				continue;
+
+			visited_clusters[it->second] = true;
+			ret_tops[idx++] = cluster_tops[it->second].get();
+		}
+		return idx;
+	}
+	void add_bit_seq(u64 or_all, unique_ptr<vector<u64>>&& seq)
+	{
+		and_bit_seqs.emplace_back(or_all, std::move(seq));
+	}
+	void commit_bit_seqs(const size_t* clusters, size_t num)
+	{
+		if (and_bit_seqs.empty()) return;
+
+		// Sequences representing all ones in bit arrays
+		vector<vector<size_t>> sequences;
+
+		for (const auto& seq : and_bit_seqs)
+		{
+			u64 or_all = seq.first;
+			assert(seq.second->size() == num);
+			for (size_t i = 0; or_all != 0; ++i, or_all >>= 1)
+			{ // Iterate each bit of `or_all`, and process these `1` bits
+				if ((or_all & 1) == 0)
+					continue;
+
+				vector<size_t> sequence;
+				size_t j = 0; auto it = seq.second->begin();
+				for (; it != seq.second->end(); ++it, ++j)
+				{ // Iterate bit sequence `i`
+					if (((*it) & (1uLL << i)) != 0uLL)
+					{
+						sequence.push_back(clusters[j]);
+					}
+				}
+				assert(!sequence.empty()); // Sequence must have at least one `1`
+				sequences.push_back(std::move(sequence));
+			}
+		}
+		and_bit_seqs.clear();
+		// If count using seed, we should deem each sequence as a factor count.
+		double w_each = config.count_seed ? 1.0 / sequences.size() : 1.0;
+
+		bool if_try = false;
+		for (const auto& seq : sequences)
+		{
+			for (auto i = seq.begin(); i != seq.end(); ++i)
+			{ // For each cluster, increment support count
+				double cnt_after = (supp_cnt[*i] += w_each);
+				if_try = if_try || cnt_after >= config.supp_cnt_thr;
+				for (auto j = i + 1; j != seq.end(); ++j)
+				{ // For each cluster pair, increment pair support count
+					pair_supp_cnt[ClusterPair(*i, *j)] += w_each;
+				}
+			}
+		}
+
+		if (if_try)
+		{ // Only try to merge if there is any support count >= threshold
+			while (try_merge()) {}
+		}
+
+	}
+	// Move `b` from its cluster to primary cluster,
+	// if original cluster becomes empty, remove the cluster.
+	void invalidate_div_block(F b)
+	{
+		// Find corresponding cluster
+		auto it = target_to_idx.find(b);
+		auto& cluster = clusters[it->second];
+		assert(cluster.find(b) != cluster.end());
+
+		// Move `b` to primary map
+		cluster.erase(b);
+		clusters.front().insert(b);
+
+		if (cluster.empty())
+		{ // If it is the last seed in the corpus, we remove the cluster
+			cluster_maps[it->second] = nullptr;
+			cluster_tops[it->second] = nullptr;
+		}
+		it->second = 0;
+	}
+	void remove_div_block(F b)
+	{ // To remove a div block, we need delete corresponding `target_to_idx`.
+		// If final cluster is also empty, we delete the cluster.
+
+		// Find corresponding cluster
+		auto it = target_to_idx.find(b);
+		auto& cluster = clusters[it->second];
+		assert(cluster.find(b) != cluster.end());
+
+		cluster.erase(b);
+		if (cluster.empty())
+		{
+			cluster_maps[it->second] = nullptr;
+			cluster_tops[it->second] = nullptr;
+		}
+		target_to_idx.erase(it);
+	}
+	void print(ostream& out) const
+	{
+		out << "Clusters" << endl;
+		size_t c = 0; auto it = clusters.begin();
+		for (; it != clusters.end(); ++it, ++c)
+		{
+			const auto& cluster = *it;
+			if (cluster.empty())
+				continue;
+			out << c << " | ";
+			for (const F& t : cluster)
+			{
+				log_fringe<F>(out, t); out << ' ';
+			}
+			out << endl;
+		}
+		out << "Confidence Values" << endl;
+
+		vector<ClusterPair> to_erase;
+		for (const auto& p : pair_supp_cnt)
+		{
+			size_t fst = p.first.get_fst();
+			size_t snd = p.first.get_snd();
+			assert(cluster_valid(fst) && cluster_valid(snd));
+			double fst_cnt = supp_cnt[fst];
+			double snd_cnt = supp_cnt[snd];
+
+			out << fst << "->" << snd << " | " << p.second << " / " <<
+				fst_cnt << " = " << p.second / fst_cnt << endl;
+			out << snd << "->" << fst << " | " << p.second << " / " <<
+				snd_cnt << " = " << p.second / snd_cnt << endl;
+		}
+	}
+};
+
+#ifdef AFLRUN_CTX_DIV
+Clusters<Fringe> clusters;
+inline Fringe to_cluster_target(const ctx_t* t)
+{
+	return Fringe(t->block, t->call_ctx);
+}
+inline Fringe to_cluster_target(const Fringe& t)
+{
+	return t;
+}
+#else
+Clusters<reach_t> clusters;
+inline reach_t to_cluster_target(const ctx_t* t)
+{
+	return t->block;
+}
+inline reach_t to_cluster_target(const Fringe& t)
+{
+	return t.block;
+}
+#endif
+
+// TODO: context sensitive diversity blocks
+template <typename F>
+struct DiversityBlocks
+{
+	// Map seed to currently active diversity blocks
+	rh::unordered_map<u32, rh::unordered_set<F>> seed_blocks;
+
+	// Map diversity block to seeds that cover it
+	rh::unordered_map<F, rh::unordered_set<u32>> block_seeds;
+
+	// Both unordered maps above must not contain any empty value
+
+	u8* div_switch; // Shared Memory
+
+	// Number of diversity block that reach threshold, targets not included;
+	// Number of fringe diversity currently has,
+	// including invalid ones, excluding targets.
+	size_t num_invalid, num_fringes;
+
+	explicit DiversityBlocks(u8* div_switch)
+		: div_switch(div_switch), num_invalid(0), num_fringes(0) {}
+
+	// For each new seed, update its coverage of active diversity blocks
+	void div_coverage(const u8* bitmap, u32 seed,
+		const rh::unordered_set<reach_t>* new_criticals = nullptr,
+		const rh::unordered_set<reach_t>* new_bits_targets = nullptr);
+
+	// TODO: add and delete diversity blocks when new fringe is added or deleted
+	void switch_on(F f);
+	void switch_off(F f);
+	void remove_seed(u32 seed);
+
+	void print(ostream& out) const;
+};
+
+template <>
+void DiversityBlocks<reach_t>::div_coverage(const u8 *bitmap, u32 seed,
+	const rh::unordered_set<reach_t>* new_criticals,
+	const rh::unordered_set<reach_t>* new_bits_targets)
+{ // Similar to `fringe_coverage`
+	if (config.no_diversity)
+		return;
+	assert(seed_blocks.find(seed) == seed_blocks.end());
+	rh::unordered_set<reach_t> blocks;
+	vector<reach_t> to_invalidate;
+	for (auto& b : block_seeds)
+	{
+		bool is_new_critical = new_criticals ?
+			(new_criticals->count(b.first) != 0) : false;
+		bool is_new_bits_targets = new_bits_targets ?
+			(new_bits_targets->count(b.first) != 0) : true;
+
+		assert(IS_SET(div_switch, b.first));
+		if ((is_new_critical || is_new_bits_targets) && IS_SET(bitmap, b.first))
+		{
+			b.second.insert(seed);
+			// We don't use `>=` to not invalidate already invalid blocks
+			if (b.second.size() == config.div_seed_thr &&
+				b.first >= g->num_targets)
+			{ // If number of seeds reach threshold for fringe, invalidate it
+				to_invalidate.push_back(b.first);
+				++num_invalid;
+			}
+			blocks.insert(b.first);
+		}
+	}
+	if (!blocks.empty())
+		seed_blocks.emplace(seed, std::move(blocks));
+
+	// For invalid diversity blocks, we only merge it to primary cluster,
+	// so that it will not contribute to any new extra seeds.
+	// We don't turn it off because we don't want to lose all previous seeds.
+	if (!to_invalidate.empty())
+	{
+		for (reach_t b : to_invalidate)
+			clusters.invalidate_div_block(b);
+		clusters.clean_supp_cnts();
+	}
+}
+
+template <>
+void DiversityBlocks<reach_t>::switch_on(reach_t f)
+{
+	// If already switched on, this function does nothing
+	if (!block_seeds.emplace(f, rh::unordered_set<u32>()).second)
+		return;
+	div_switch[f / 8] |= 1 << (f % 8);
+
+	// This will be added very soon, so empty value does not matter
+	if (f >= g->num_targets)
+		++num_fringes;
+}
+
+template <>
+void DiversityBlocks<reach_t>::switch_off(reach_t f)
+{
+	auto it = block_seeds.find(f);
+	assert(f >= g->num_targets && IS_SET(div_switch, f));
+	div_switch[f / 8] &= ~(1 << (f % 8));
+
+	for (u32 s : it->second)
+	{ // Delete the block in all seeds
+		auto it2 = seed_blocks.find(s);
+		it2->second.erase(f);
+		if (it2->second.empty())
+			seed_blocks.erase(it2);
+	}
+	if (it->second.size() >= config.div_seed_thr)
+		--num_invalid;
+	--num_fringes;
+	block_seeds.erase(it);
+	clusters.remove_div_block(f);
+}
+
+template <>
+void DiversityBlocks<reach_t>::remove_seed(u32 seed)
+{
+	auto it = seed_blocks.find(seed);
+	if (it == seed_blocks.end())
+		return;
+	assert(!it->second.empty());
+	for (reach_t b : it->second)
+	{
+		auto& seeds = block_seeds.find(b)->second;
+		seeds.erase(seed); assert(!seeds.empty());
+	}
+	seed_blocks.erase(it);
+}
+
+template<>
+void DiversityBlocks<reach_t>::print(ostream& out) const
+{
+	out << "Diversity" << endl;
+	size_t num_reached = 0, num_non_targets = 0;
+	for (const auto& b : block_seeds)
+	{
+		bool target = b.first < g->num_targets;
+		size_t s = b.second.size();
+		bool reached = s >= config.div_seed_thr;
+		if (!target)
+		{
+			++num_non_targets;
+			if (reached) ++num_reached;
+		}
+		out << g->reachable_names[b.first] << " | " << s <<
+			(target ? " T" : (reached ? " R" : "")) << endl;
+	}
+	assert(num_reached == num_invalid && num_fringes == num_non_targets);
+}
+
+unique_ptr<DiversityBlocks<reach_t>> div_blocks = nullptr;
+
+using group_t = reach_t;
+class TargetGrouper
+{
+public:
+	friend void ::aflrun_init_groups(reach_t num_targets);
+private:
+	static rh::unordered_set<reach_t> all_targets;
+
+	unique_ptr<group_t[]> target_to_group;
+	vector<rh::unordered_set<reach_t>> groups;
+
+	// some memory pool to save allocation time
+	vector<reach_t> subgroups_;
+	vector<reach_t> sizes_;
+	vector<u8> covered_groups_;
+	vector<group_t> covered_groups_arr_;
+public:
+	explicit TargetGrouper()
+	{
+		target_to_group = make_unique<group_t[]>(g->num_targets);
+		fill(target_to_group.get(), target_to_group.get() + g->num_targets, 0);
+		groups.emplace_back(all_targets);
+		subgroups_.resize(g->num_targets);
+		sizes_.resize(1);
+		covered_groups_.resize(1);
+		covered_groups_arr_.resize(1);
+	}
+
+	// Pre: targets must be unique
+	template<typename D>
+	void add_reachable(
+		const rh::unordered_map<reach_t, rh::unordered_set<D>>& decs)
+	{
+		reach_t* subgroups = subgroups_.data();
+		reach_t* sizes = sizes_.data();
+		u8* covered_groups = covered_groups_.data();
+		group_t* covered_groups_arr = covered_groups_arr_.data();
+		const reach_t num_targets = g->num_targets;
+
+		size_t group_size = groups.size();
+		// Map each group index into all elements in `targets` belonging to it
+		fill(sizes, sizes + group_size, 0);
+		fill(covered_groups, covered_groups + group_size, 0);
+		size_t num_covered_groups = 0;
+		for (const auto& t : decs)
+		{
+			// Get group idx that the target belongs to
+			group_t g = target_to_group[t.first];
+			// Append the target to the corresponding subgroup
+			subgroups[g * num_targets + sizes[g]++] = t.first;
+			if (!covered_groups[g])
+			{
+				covered_groups[g] = 1;
+				covered_groups_arr[num_covered_groups++] = g;
+			}
+		}
+
+		// For subgroup that can cut any of existing group, we need to cut
+		for (size_t i = 0; i < num_covered_groups; ++i)
+		{
+			group_t g = covered_groups_arr[i];
+			size_t size = sizes[g];
+			assert(0 < size);
+			if (size < groups[g].size())
+			{
+				const reach_t* subgroup = subgroups + g * num_targets;
+				group_t new_idx = groups.size();
+				groups.emplace_back(subgroup, subgroup + size);
+				for (size_t j = 0; j < size; ++j)
+				{
+					groups[g].erase(subgroup[j]);
+					target_to_group[subgroup[j]] = new_idx;
+				}
+			}
+		}
+
+		group_size = groups.size();
+		subgroups_.resize(group_size * num_targets);
+		sizes_.resize(group_size);
+		covered_groups_.resize(group_size);
+		covered_groups_arr_.resize(group_size);
+	}
+
+	inline group_t to_group(reach_t target) const
+	{
+		return target_to_group[target];
+	}
+
+	inline const rh::unordered_set<reach_t>& to_targets(group_t group) const
+	{
+		return groups[group];
+	}
+
+	inline size_t size() const
+	{
+		return groups.size();
+	}
+
+	// Separate given set of targets according to current group
+	template<typename D>
+	vector<rh::unordered_set<reach_t>> separate(
+		const rh::unordered_map<reach_t, rh::unordered_set<D>>& decs) const
+	{
+		rh::unordered_set<reach_t> targets;
+		for (const auto& td : decs)
+		{
+			targets.insert(td.first);
+		}
+		vector<rh::unordered_set<reach_t>> ret;
+		while (!targets.empty())
+		{
+			reach_t t = *targets.begin();
+			rh::unordered_set<reach_t> group = to_targets(to_group(t));
+#ifndef NDEBUG
+			size_t prev_size = targets.size();
+#endif
+			for (reach_t e : group)
+				targets.erase(e);
+			// Check that all group elements removed are indeed in `targets`
+			assert(targets.size() + group.size() == prev_size);
+
+			ret.push_back(std::move(group));
+		}
+		return ret;
+	}
+
+	void slow_check() const
+	{
+		for (reach_t t = 0; t < g->num_targets; ++t)
+		{
+			group_t g = target_to_group[t];
+			assert(groups.at(g).find(t) != groups.at(g).end());
+			for (size_t i = 0; i < groups.size(); ++i)
+			{
+				if (i == g)
+					continue;
+				assert(groups[i].find(t) == groups[i].end());
+			}
+		}
+	}
+};
+
+template <typename F, typename D>
+void FringeBlocks<F, D>::group()
+{
+	grouper = make_unique<TargetGrouper>();
+	// For each of fringe, we add associated targets to grouper
+	for (const auto& f : fringes)
+	{
+		grouper->add_reachable<D>(f.second.decisives);
+	}
+}
+
+rh::unordered_set<reach_t> TargetGrouper::all_targets;
+
+} // namespace
+
+size_t hash<Fringe>::operator()(const Fringe& p) const noexcept
+{
+	size_t seed = 0;
+	bo::hash_combine(seed, p.block);
+	bo::hash_combine(seed, p.context);
+	return seed;
+}
+
+size_t hash<SeedFringes>::operator()(const SeedFringes& p) const noexcept
+{
+	const u8* ptr = p.bitmap.get();
+	return bo::hash_range(ptr, ptr + p.bitmap_size);
+}
+
+size_t hash<pair<reach_t, reach_t>>::operator()(
+	const pair<reach_t, reach_t>& p) const noexcept
+{
+	size_t seed = 0;
+	bo::hash_combine(seed, p.first);
+	bo::hash_combine(seed, p.second);
+	return seed;
+}
+
+size_t hash<ClusterPair>::operator()(
+	const ClusterPair& p) const noexcept
+{
+	size_t seed = 0;
+	bo::hash_combine(seed, p.get_fst());
+	bo::hash_combine(seed, p.get_snd());
+	return seed;
+}
+
+/* ----- Functions called at initialization ----- */
+
+void aflrun_init_groups(reach_t num_targets)
+{
+	for (reach_t t = 0; t < num_targets; ++t)
+	{
+		TargetGrouper::all_targets.insert(t);
+	}
+}
+
+void aflrun_init_fringes(reach_t num_reachables, reach_t num_targets)
+{
+	path_fringes = make_unique<FringeBlocks<Fringe, Fringe>>(num_targets);
+	path_pro_fringes = make_unique<FringeBlocks<Fringe, reach_t>>(num_targets);
+	reached_targets = make_unique<FringeBlocks<Fringe, u8>>(num_targets);
+}
+
+void aflrun_init_globals(void* afl, reach_t num_targets, reach_t num_reachables,
+		reach_t num_ftargets, reach_t num_freachables,
+		u8* virgin_reachables, u8* virgin_freachables, u8* virgin_ctx,
+		char** reachable_names, reach_t** reachable_to_targets,
+		reach_t* reachable_to_size, const char* out_dir,
+		const double* target_weights, u32 map_size, u8* div_switch,
+		const char* cycle_time)
+{
+	assert(g == nullptr);
+	g = make_unique<AFLRunGlobals>(num_targets, num_reachables,
+		num_ftargets, num_freachables, virgin_reachables,
+		virgin_freachables, virgin_ctx, reachable_names,
+		reachable_to_targets, reachable_to_size, out_dir,
+		target_weights, map_size, afl, get_cur_time(),
+		cycle_time == NULL ? 0 : strtoull(cycle_time, NULL, 10));
+	div_blocks = make_unique<DiversityBlocks<reach_t>>(div_switch);
+}
+
+void aflrun_load_freachables(const char* temp_path,
+	reach_t* num_ftargets, reach_t* num_freachables)
+{
+	string temp(temp_path);
+	if (temp.back() != '/')
+		temp.push_back('/');
+	ifstream fd(temp + "Freachable.txt"); assert(fd.is_open());
+	string line;
+	getline(fd, line);
+	size_t idx = line.find(','); assert(idx != string::npos);
+	*num_ftargets = strtoul(line.c_str(), NULL, 10);
+	*num_freachables = strtoul(line.c_str() + idx + 1, NULL, 10);
+
+	reach_t i = 0;
+	while (getline(fd, line))
+	{
+		fname_to_id.emplace(line, i++);
+		id_to_fname.push_back(std::move(line));
+	}
+
+	assert(i == *num_freachables && i == fname_to_id.size());
+}
+
+void aflrun_load_edges(const char* temp_path, reach_t num_reachables)
+{
+	string temp(temp_path);
+	if (temp.back() != '/')
+		temp.push_back('/');
+	graph = make_unique<BasicBlockGraph>(
+		(temp + "BBedges.txt").c_str(), num_reachables);
+
+	ifstream fd(temp + "Chash.txt"); assert(fd.is_open());
+	string line;
+	while (getline(fd, line))
+	{
+		size_t idx1 = line.find(','); assert(idx1 != string::npos);
+		size_t idx2 = line.find('|'); assert(idx2 != string::npos);
+		auto call_edge = make_pair<reach_t, reach_t>(
+			strtoul(line.c_str(), NULL, 10),
+			strtoul(line.c_str() + idx1 + 1, NULL, 10));
+		graph->call_hashes[call_edge].push_back(
+			strtoul(line.c_str() + idx2 + 1, NULL, 10));
+	}
+}
+
+void aflrun_load_dists(const char* dir, reach_t num_targets,
+	reach_t num_reachables, char** reachable_names)
+{
+	bb_to_dists.resize(num_reachables);
+
+	// Convert reachable name to id in O(1)
+	for (reach_t i = 0; i < num_reachables; i++)
+	{
+		name_to_id.emplace(reachable_names[i], i);
+	}
+
+	string path(dir);
+	if (path.back() != '/')
+		path.push_back('/');
+	path += "distance.cfg/";
+	for (reach_t t = 0; t < num_targets; ++t)
+	{
+		ifstream cf(path + to_string(t) + ".txt"); assert(cf.is_open());
+		string line;
+		while (getline(cf, line))
+		{
+			// get name and dist
+			size_t pos = line.find(","); assert(pos != string::npos);
+			string bb_name = line.substr(0, pos);
+			double bb_dis = atof(line.substr(pos + 1, line.length()).c_str());
+
+			// update name and dist into global data structure
+			assert(name_to_id.find(bb_name) != name_to_id.end());
+			reach_t block = name_to_id.find(bb_name)->second;
+			auto tmp = bb_to_dists[block].find(t);
+			if (tmp == bb_to_dists[block].end())
+			{
+				bb_to_dists[block].emplace(t, bb_dis);
+			}
+			else if (tmp->second > bb_dis)
+			{
+				tmp->second = bb_dis; // we get minimum of all distances
+			}
+		}
+		cf.close();
+	}
+
+	// calculate the average distance among all targets
+	// TODO: calculate the average lazily
+	rh::unordered_map<reach_t, double> dists;
+	for (reach_t bb = 0; bb < num_reachables; ++bb)
+	{
+		double sum = 0.0; size_t count = 0;
+		for (reach_t t = 0; t < num_targets; ++t)
+		{
+			auto d = bb_to_dists[bb].find(t);
+			if (d != bb_to_dists[bb].end())
+			{
+				sum += d->second; ++count;
+			}
+		}
+		assert(count > 0);
+		bb_to_avg_dists.emplace(bb, sum / count);
+	}
+}
+
+// The config is in form "xxx=aaa:yyy=bbb"
+void aflrun_load_config(const char* config_str,
+	u8* check_at_begin, u8* log_at_begin, u64* log_check_interval,
+	double* trim_thr, double* queue_quant_thr, u32* min_num_exec)
+{
+	string s(config_str);
+	try
+	{
+		while (true)
+		{
+			size_t idx = s.find(':');
+			if (idx == string::npos)
+			{
+				config.load(s);
+				break;
+			}
+			config.load(s.substr(0, idx));
+			s = s.substr(idx + 1);
+		}
+		config.check();
+	}
+	catch (const string& e)
+	{
+		cerr << e << endl;
+		abort();
+	}
+	*check_at_begin = config.check_at_begin;
+	*log_at_begin = config.log_at_begin;
+	*log_check_interval = config.log_check_interval;
+	*trim_thr = config.trim_thr;
+	*queue_quant_thr = config.queue_quant_thr;
+	*min_num_exec = config.min_num_exec;
+}
+
+void aflrun_remove_seed(u32 seed)
+{
+	path_pro_fringes->remove_seed(seed);
+	path_fringes->remove_seed(seed);
+	reached_targets->remove_seed(seed);
+	div_blocks->remove_seed(seed);
+}
+
+/* ----- Functions called for some time interval to log and check ----- */
+
+#ifdef NDEBUG
+void aflrun_check_state(void) {}
+#else
+namespace
+{
+template <typename F, typename D>
+void check_state(const FringeBlocks<F, D>& fringes)
+{
+	for (reach_t t = 0; t < fringes.target_to_fringes.size(); ++t)
+	{
+		for (const F& f : fringes.target_to_fringes[t])
+		{
+			auto it = fringes.fringes.find(f);
+			assert(it != fringes.fringes.end());
+			auto it2 = it->second.decisives.find(t);
+			assert(it2 != it->second.decisives.end());
+			assert(!it->second.seeds.empty());
+			for (u32 seed : it->second.seeds)
+			{
+				const auto& seed_fringes = fringes.seed_fringes.find(seed)->second;
+				assert(seed_fringes.find(f) != seed_fringes.end());
+			}
+		}
+	}
+	for (const auto& fi : fringes.fringes)
+	{
+		assert(!fi.second.decisives.empty());
+		for (const auto& td : fi.second.decisives)
+		{
+			const auto& fs = fringes.target_to_fringes.at(td.first);
+			assert(fs.find(fi.first) != fs.end());
+		}
+	}
+}
+}
+void aflrun_check_state(void)
+{
+	if (!config.check_fringe)
+		return;
+	check_state(*path_fringes);
+	check_state(*path_pro_fringes);
+	check_state(*reached_targets);
+	for (reach_t t = 0; t < path_pro_fringes->target_to_fringes.size(); ++t)
+	{
+		for (const auto& f : path_pro_fringes->target_to_fringes[t])
+		{
+			assert(path_fringes->target_to_fringes[t].find(f) !=
+				path_fringes->target_to_fringes[t].end());
+		}
+	}
+}
+#endif
+
+
+namespace
+{
+string targets_info;
+
+template <typename F, typename D>
+void log_fringes(ofstream& out, const FringeBlocks<F, D>& fringes)
+{
+	out << "fringe | target group | decisives | seeds | freq" << endl;
+	for (const auto& f : fringes.fringes)
+	{
+		auto res = fringes.grouper->separate(f.second.decisives);
+		for (const rh::unordered_set<reach_t>& group : res)
+		{
+			log_fringe<F>(out, f.first);
+			out << " |";
+			rh::unordered_set<D> decisives;
+			for (reach_t t : group)
+			{
+				out << ' ' << g->reachable_names[t];
+				const auto& tmp = f.second.decisives.find(t)->second;
+				decisives.insert(tmp.begin(), tmp.end());
+			}
+			out << " | ";
+			for (const D& d : decisives)
+			{
+				log_fringe<D>(out, d); out << ' ';
+			}
+			out << '|';
+			if (config.show_all_seeds)
+			{
+				for (u32 s : f.second.seeds)
+				{
+					out << ' ' << s;
+				}
+			}
+			else if (f.second.has_top_rated)
+			{
+				out << ' ' << f.second.top_rated_seed;
+			}
+			out << " | " << f.second.fuzzed_quant << endl;
+		}
+	}
+}
+}
+
+void aflrun_log_fringes(const char* path, u8 which)
+{
+	ofstream out(path);
+	// When critical block is disabled, we don't need log.
+	if (!out.is_open() || config.no_critical)
+		return;
+
+	path_fringes->group();
+	path_pro_fringes->group();
+	reached_targets->group();
+
+	switch (which)
+	{
+	case 2: // print all paths towards all targets
+		out << "context | target | seeds" << endl;
+		for (const auto& f : reached_targets->fringes)
+		{
+			assert(f.first.block < g->num_targets);
+			out << f.first.context << " | " <<
+				g->reachable_names[f.first.block] << " |";
+			if (config.show_all_seeds)
+			{
+				for (u32 s : f.second.seeds)
+				{
+					out << ' ' << s;
+				}
+			}
+			else if (f.second.has_top_rated)
+			{
+				out << ' ' << f.second.top_rated_seed;
+			}
+			out << endl;
+		}
+		clusters.print(out);
+		div_blocks->print(out);
+		break;
+	case 1:
+		log_fringes(out, *path_pro_fringes);
+		break;
+	case 0:
+		log_fringes(out, *path_fringes);
+		break;
+	default:
+		abort();
+	}
+	if (which == 2)
+		out << targets_info;
+	out.close();
+}
+
+u64 aflrun_queue_cycle(void)
+{
+	if (g->cycle_time)
+		return (get_cur_time() - g->init_time) / 1000 / g->cycle_time;
+	else
+		return state.get_whole_count();
+}
+
+void aflrun_get_state(int* cycle_count, u32* cov_quant,
+	size_t* div_num_invalid, size_t* div_num_fringes)
+{
+	state.get_counts(*cycle_count, *cov_quant);
+	*div_num_invalid = div_blocks->num_invalid;
+	*div_num_fringes = div_blocks->num_fringes;
+}
+
+u8 aflrun_get_mode(void)
+{
+	return state.get_mode();
+}
+
+bool aflrun_is_uni(void)
+{
+	return state.get_mode() == AFLRunState::kUnite;
+}
+
+double aflrun_get_seed_quant(u32 seed)
+{
+	return seed < seed_quant.size() ? seed_quant[seed] : 0;
+}
+
+void aflrun_get_reached(reach_t* num_reached, reach_t* num_freached,
+	reach_t* num_reached_targets, reach_t* num_freached_targets)
+{
+	*num_reached = g->num_reached;
+	*num_freached = g->num_freached;
+	*num_reached_targets = g->num_reached_targets;
+	*num_freached_targets = g->num_freached_targets;
+}
+
+void aflrun_get_time(u64* last_reachable, u64* last_fringe,
+	u64* last_pro_fringe, u64* last_target, u64* last_ctx_reachable,
+	u64* last_ctx_fringe, u64* last_ctx_pro_fringe, u64* last_ctx_target)
+{
+	*last_reachable = update_time.last_reachable;
+	*last_fringe = update_time.last_fringe;
+	*last_pro_fringe = update_time.last_pro_fringe;
+	*last_target = update_time.last_target;
+	*last_ctx_reachable = update_time.last_ctx_reachable;
+	*last_ctx_fringe = update_time.last_ctx_fringe;
+	*last_ctx_pro_fringe = update_time.last_ctx_pro_fringe;
+	*last_ctx_target = update_time.last_ctx_target;
+}
+
+/* ----- Functions called at begining of each cycle ----- */
+
+namespace
+{
+void assign_energy_seed(u32 num_seeds, const u32* seeds, double* ret)
+{
+	switch (state.get_mode())
+	{
+	case AFLRunState::kFringe:
+	{
+		path_fringes->assign_energy(num_seeds, seeds, ret);
+		return;
+	}
+	case AFLRunState::kProFringe:
+	{
+		path_pro_fringes->assign_energy(num_seeds, seeds, ret);
+		return;
+	}
+	case AFLRunState::kTarget:
+	{
+		reached_targets->assign_energy(num_seeds, seeds, ret);
+		return;
+	}
+	case AFLRunState::kUnite:
+	{
+		assign_energy_unite(num_seeds, seeds, ret);
+		return;
+	}
+	default:
+		abort();
+	}
+}
+}
+
+void aflrun_assign_energy(u32 num_seeds, const u32* seeds, double* ret)
+{
+	if (!config.seed_based_energy)
+	{
+		cerr << "Old energy assignment is no longer supported" << endl;
+		abort();
+	}
+	assign_energy_seed(num_seeds, seeds, ret);
+}
+
+// Call this function at end of all cycles,
+// including beginning of the first cycle or when state is reset
+// (pseudo cycle end where `cycle_count` increment from -1 to 0).
+// The function return the new mode
+u8 aflrun_cycle_end(u8* whole_end)
+{
+	*whole_end = state.cycle_end();
+	return state.get_mode();
+}
+
+/* ----- Functions called when new reachable block becomes non-virgin ----- */
+
+namespace
+{
+
+// Perform vigin BFS from given block,
+// return map from reached target to a set of blocks containing a path to it
+
+template <typename D>
+inline rh::unordered_set<D> trace_decisives(
+	const rh::unordered_map<D, D>& parent, const D& start, const D& v)
+{
+	rh::unordered_set<D> decisives;
+
+	// Get all blocks consisting of path towards the target
+	D cur = v;
+	do
+	{
+		decisives.insert(cur);
+		cur = parent.find(cur)->second;
+	} while (!(cur == start));
+
+	return decisives;
+}
+
+template <typename D>
+rh::unordered_map<reach_t, rh::unordered_set<D>> get_target_paths(D);
+
+template <>
+rh::unordered_map<reach_t, rh::unordered_set<reach_t>>
+	get_target_paths<reach_t>(reach_t block)
+{
+	// https://en.wikipedia.org/wiki/Breadth-first_search#Pseudocode
+	rh::unordered_map<reach_t, rh::unordered_set<reach_t>> ret;
+	queue<reach_t> q; rh::unordered_map<reach_t, reach_t> parent;
+	for (reach_t dst : graph->src_to_dst[block])
+	{
+		if (IS_SET(g->virgin_reachables, dst))
+		{ // add all outgoing virgin vertexes to queue as initialization
+			parent.emplace(dst, block);
+			q.push(dst);
+		}
+	}
+	while (!q.empty())
+	{
+		reach_t v = q.front(); q.pop();
+
+		if (v < g->num_targets)
+		{
+			ret.emplace(v, trace_decisives<reach_t>(parent, block, v));
+		}
+
+		for (reach_t w : graph->src_to_dst[v])
+		{
+			if (!IS_SET(g->virgin_reachables, w))
+				continue;
+			if (parent.find(w) == parent.end())
+			{
+				parent.emplace(w, v);
+				q.push(w);
+			}
+		}
+	}
+	return ret;
+}
+
+vector<u32> get_next_hashes(const Fringe& src, reach_t dst, bool& is_call)
+{
+	u32 ctx = src.context;
+	reach_t block = src.block;
+	auto p = make_pair<reach_t, reach_t>(std::move(block), std::move(dst));
+	auto it = graph->call_hashes.find(p);
+
+	vector<u32> next_hashes;
+	// If it is a call edge with multiple hashes, calculate new ctx.
+	// e.i. one block calls same function for multiple times
+	if (it != graph->call_hashes.end())
+	{
+		for (u32 h : it->second)
+		{
+			next_hashes.push_back(ctx ^ h);
+		}
+		is_call = true;
+	}
+	else
+	{
+		next_hashes.push_back(ctx);
+		is_call = false;
+	}
+
+	return next_hashes;
+}
+
+// This is a helper function for `get_target_paths<Fringe>` for optimization,
+// because going through all possible states with contexts are too expensive.
+bool all_targets_visited(reach_t block,
+	const rh::unordered_map<reach_t, rh::unordered_set<Fringe>>& cur)
+{
+	size_t num_ts = g->reachable_to_size[block];
+
+	// If number of reachable targets are larger than number of visited targets,
+	// then there must be some targets reachable by `block` not visited yet;
+	// this is just a quick path for slight optimization.
+	if (num_ts > cur.size())
+		return false;
+
+	const reach_t* beg = g->reachable_to_targets[block];
+	const reach_t* end = beg + num_ts;
+
+	for (const reach_t* t = beg; t < end; ++t)
+	{ // If there is a target rechable by `block` not yet visited by `cur`,
+		// we should return false.
+		if (cur.find(*t) == cur.end())
+			return false;
+	}
+
+	// If all targets reachable by `block` already has a path,
+	// we can then skip this block by not adding it to stack.
+	return true;
+}
+
+// Basically same as above, except when doing BFS,
+// we consider the context and regard same `dst` node with different contexts
+// as different next possible states.
+rh::unordered_map<reach_t, rh::unordered_set<Fringe>>
+	get_target_paths_slow(const Fringe& block_ctx)
+{
+	rh::unordered_map<reach_t, rh::unordered_set<Fringe>> ret;
+	queue<Fringe> q; rh::unordered_map<Fringe, Fringe> parent;
+	reach_t block = block_ctx.block;
+	bool dummy;
+
+	// For given source state (e.i. block and context),
+	// we iterate all possible next states and add them into queue.
+	for (reach_t dst : graph->src_to_dst[block])
+	{
+		auto next_hashes = get_next_hashes(block_ctx, dst, dummy);
+		for (u32 next_hash : next_hashes)
+		{
+			if (IS_SET(g->virgin_ctx, CTX_IDX(dst, next_hash)))
+			{
+				Fringe next(dst, next_hash);
+				parent.emplace(next, block_ctx);
+				q.push(next);
+			}
+		}
+	}
+
+	while (!q.empty())
+	{
+		Fringe v = q.front(); q.pop();
+
+		// If we reached the target via BFS for the first time,
+		// we trace and record paths to it, similar to above
+		if (v.block < g->num_targets && ret.find(v.block) == ret.end())
+		{
+			ret.emplace(v.block, trace_decisives<Fringe>(parent, block_ctx, v));
+		}
+
+		// All possible next states are virgin (block, ctx) pairs
+		for (reach_t w : graph->src_to_dst[v.block])
+		{
+			if (all_targets_visited(w, ret))
+				continue;
+			auto next_hashes = get_next_hashes(v, w, dummy);
+			for (u32 next_hash : next_hashes)
+			{
+				if (!IS_SET(g->virgin_ctx, CTX_IDX(w, next_hash)))
+					continue;
+				Fringe next(w, next_hash);
+				if (parent.find(next) == parent.end())
+				{
+					parent.emplace(next, v);
+					q.push(next);
+				}
+			}
+		}
+	}
+
+	return ret;
+}
+
+// The problem of a thorough BFS is that it can be too slow for big binaries,
+// so we have another fast version that only BFS inside the function and
+// entry block of each function it calls.
+// The core idea is that as long as it reaches a entry block whose next context
+// is virgin, it add all targets the block can reach into `decisives` with the
+// partial trace reaching the entry block. The idea is that as long as we have
+// entry block with virgin context, the stack trace before + this call site
+// must not have been visited, thus any target it can reach can potentially
+// have stack trace before + this call site + other call sites to reach target,
+// which must also be not visited before. Thus it is okay to know there exists
+// a context-sensitive path to these targets.
+// However, such fast hack has 2 problems:
+// 1. Cannot handle hash collision; 2. potentially problematic for recursion.
+rh::unordered_map<reach_t, rh::unordered_set<Fringe>>
+	get_target_paths_fast(const Fringe& block_ctx)
+{
+	rh::unordered_map<reach_t, rh::unordered_set<Fringe>> ret;
+	queue<pair<Fringe, bool>> q; rh::unordered_map<Fringe, Fringe> parent;
+	reach_t block = block_ctx.block;
+
+	// Similar to the slow one,
+	// except we also record whether `Fringe` is a call in the queue.
+
+	for (reach_t dst : graph->src_to_dst[block])
+	{
+		bool is_call;
+		auto next_hashes = get_next_hashes(block_ctx, dst, is_call);
+		for (u32 next_hash : next_hashes)
+		{
+			if (IS_SET(g->virgin_ctx, CTX_IDX(dst, next_hash)))
+			{
+				Fringe next(dst, next_hash);
+				parent.emplace(next, block_ctx);
+				q.push(make_pair(std::move(next), std::move(is_call)));
+			}
+		}
+	}
+
+	while (!q.empty())
+	{
+		auto tmp = q.front(); q.pop();
+		const Fringe& v = tmp.first;
+
+		// We still need to check potential targets in the function
+		if (!tmp.second &&
+			v.block < g->num_targets && ret.find(v.block) == ret.end())
+		{
+			ret.emplace(v.block, trace_decisives<Fringe>(parent, block_ctx, v));
+		}
+
+		// If current virgin `Fringe` is visited from call edge,
+		// then we get a trace from it, and assign to each target it can reach;
+		// also we don't continue to visit its child blocks.
+		if (tmp.second)
+		{
+			auto decisives = trace_decisives(parent, block_ctx, v);
+
+			const reach_t* beg = g->reachable_to_targets[v.block];
+			const reach_t* end = beg + g->reachable_to_size[v.block];
+			for (const reach_t* t = beg + 1; t < end; ++t)
+			{
+				// If key `*t` already exists, `emplace` does nothing.
+				ret.emplace(*t, decisives);
+			}
+			ret.emplace(*beg, std::move(decisives));
+		}
+		else
+		{
+			for (reach_t w : graph->src_to_dst[v.block])
+			{
+				bool is_call;
+				auto next_hashes = get_next_hashes(v, w, is_call);
+				for (u32 next_hash : next_hashes)
+				{
+					if (!IS_SET(g->virgin_ctx, CTX_IDX(w, next_hash)))
+						continue;
+					Fringe next(w, next_hash);
+					if (parent.find(next) == parent.end())
+					{
+						parent.emplace(next, v);
+						q.push(make_pair(std::move(next), std::move(is_call)));
+					}
+				}
+			}
+		}
+	}
+
+	return ret;
+}
+
+template <>
+rh::unordered_map<reach_t, rh::unordered_set<Fringe>>
+	get_target_paths<Fringe>(Fringe block_ctx)
+{
+	if (config.slow_ctx_bfs)
+		return get_target_paths_slow(block_ctx);
+	else
+		return get_target_paths_fast(block_ctx);
+}
+
+/* ----- Functions called for each test case mutated and executed ----- */
+
+template <typename D>
+inline D to_decisive(const Fringe& f);
+template <>
+inline reach_t to_decisive<reach_t>(const Fringe& f)
+{
+	return f.block;
+}
+template <>
+inline Fringe to_decisive<Fringe>(const Fringe& f)
+{
+	return f;
+}
+
+template <typename F, typename D>
+u8 FringeBlocks<F, D>::try_add_fringe(const Fringe& cand)
+{
+	auto target_decisives = get_target_paths<D>(to_decisive<D>(cand));
+	if (target_decisives.empty())
+		return 0;
+	for (auto& td : target_decisives)
+	{
+		this->add_fringe(cand, td.first, std::move(td.second));
+	}
+	return 1;
+}
+
+u8 try_add_fringe(const ctx_t& cand)
+{
+	reach_t block = cand.block;
+	Fringe f_cand(block, cand.call_ctx);
+
+	/* For the ablation study that removes the critical blocks,
+	`path_pro_fringes` and `path_fringes` are both empty,
+	and we put all covered blocks into reached_targets.
+	Hope this hack does not cause any other problem. :) */
+	if (config.no_critical)
+	{
+		const reach_t* beg = g->reachable_to_targets[block];
+		const reach_t* end = beg + g->reachable_to_size[block];
+		for (const reach_t* i = beg; i < end; ++i)
+		{
+			reached_targets->add_fringe(f_cand, *i, rh::unordered_set<u8>());
+		}
+		return 0;
+	}
+
+	u8 r2 = path_pro_fringes->try_add_fringe(f_cand);
+
+#ifdef AFLRUN_CTX
+	// We add criticals to into `path_fringes` only when context is enabled.
+	u8 r1 = path_fringes->try_add_fringe(f_cand);
+	assert(!r2 || r1); // r2 -> r1
+#endif
+
+	// If candidate is fringe reaching a target and it is not added yet, we add it
+	if (block < g->num_targets)
+	{
+		reached_targets->add_fringe(f_cand, block, rh::unordered_set<u8>());
+	}
+
+#ifdef AFLRUN_CTX
+	return r2 + r1;
+#else
+	// When context is not enabled, we return 2 when new critical is added.
+	return r2 * 2;
+#endif
+}
+
+// Return set of all blocks it removed
+template <typename F, typename D>
+vector<reach_t> FringeBlocks<F, D>::try_del_fringe(const Fringe& cand)
+{
+	vector<reach_t> ret;
+	auto it = this->decisive_to_fringes.find(to_decisive<D>(cand));
+	if (it == this->decisive_to_fringes.end())
+		return ret;
+	rh::unordered_set<Fringe> fringes_decided(std::move(it->second));
+	this->decisive_to_fringes.erase(it);
+
+	for (const Fringe& f : fringes_decided)
+	{
+		auto it2 = this->fringes.find(f);
+		if (it2 == this->fringes.end())
+			continue;
+
+		// Re-evaluate the fringe to see if it can still reach any target
+		auto target_decisives = get_target_paths<D>(to_decisive<D>(f));
+		if (target_decisives.empty())
+		{ // If not, delete the fringe
+			if (this->del_fringe(f))
+				ret.push_back(f.block);
+		}
+		else
+		{ // Otherwise, update the fringe with:
+			// 1. new targets(a subset of original targets) and 2. new decisives
+			for (const auto& td : it2->second.decisives)
+			{
+				// If an old target is not covered by new set of targets
+				if (target_decisives.find(td.first) == target_decisives.end())
+				{
+					this->target_to_fringes[td.first].erase(f);
+				}
+			}
+			for (const auto& td : target_decisives)
+			{
+				for (const D& d : td.second)
+				{
+					this->decisive_to_fringes[d].insert(f);
+				}
+			}
+			it2->second.decisives = std::move(target_decisives);
+		}
+	}
+	return ret;
+}
+
+template <typename F, typename D>
+void FringeBlocks<F, D>::remove_seed(u32 seed)
+{
+	auto it = seed_fringes.find(seed);
+	// skip if seed does not exists
+	if (it == seed_fringes.end())
+		return;
+	assert(!it->second.empty());
+	for (const auto& f : it->second)
+	{ // For all fringes, we need also to update its info about seeds
+		auto& info = fringes.find(f)->second;
+
+		// Because we only remove duplicate seed,
+		// there must be another seed covering the fringe
+		info.seeds.erase(seed); assert(!info.seeds.empty());
+
+		if (info.has_top_rated && info.top_rated_seed == seed)
+		{ // If top_rated_seed has been removed, we need to update it
+			u32 best_seed = 0xdeadbeefu;
+			u64 best_fav_factor = numeric_limits<u64>::max();
+			for (u32 seed : info.seeds)
+			{
+				u64 fav_factor = get_seed_fav_factor(g->afl, seed);
+				if (fav_factor <= best_fav_factor)
+				{
+					best_seed = seed;
+					best_fav_factor = fav_factor;
+				}
+			}
+			info.top_rated_seed = best_seed;
+			info.top_rated_factor = best_fav_factor;
+		}
+	}
+	seed_fringes.erase(it);
+}
+
+}
+
+u8 aflrun_has_new_path(const u8* freached, const u8* reached, const u8* path,
+	const ctx_t* new_paths, size_t len, u8 inc, u32 seed,
+	const u8* new_bits, const size_t* cur_clusters, size_t num_clusters)
+{
+	u8 ret = 0;
+	unique_ptr<rh::unordered_set<Fringe>> new_criticals;
+	unique_ptr<rh::unordered_set<reach_t>> new_critical_blocks;
+	if (len != 0)
+	{
+		// If there are `new_paths`, we update virgin bits.
+		// Note that if there are new virgin bits, there must be `new_paths`,
+		// so any newly reached virgin bits will not be missed.
+
+		// update virgin bit for reachale functions
+		for (reach_t i = 0; i < g->num_freachables; ++i)
+		{
+			if (IS_SET(g->virgin_freachables, i) && IS_SET(freached, i))
+			{
+				g->virgin_freachables[i / 8] &= 0xffu ^ (1u << (i % 8));
+				g->num_freached++;
+				if (i < g->num_ftargets)
+					g->num_freached_targets++;
+			}
+		}
+
+		rh::unordered_set<reach_t> new_blocks;
+		for (reach_t i = 0; i < g->num_reachables; ++i)
+		{
+			// If the bit is virgin (e.i not reached before),
+			// and this execution can reach such virgin bit
+			if (IS_SET(g->virgin_reachables, i) && IS_SET(reached, i))
+			{
+				// we clear the virgin bit
+				g->virgin_reachables[i / 8] &= 0xffu ^ (1u << (i % 8));
+				g->num_reached++;
+				new_blocks.insert(i);
+				if (i < g->num_targets)
+					g->num_reached_targets++;
+			}
+		}
+
+		for (size_t i = 0; i < len; ++i)
+		{
+			const ctx_t& cand = new_paths[i];
+			Fringe f_cand(cand.block, cand.call_ctx);
+			auto del_norm = path_fringes->try_del_fringe(f_cand);
+			auto del_pro = path_pro_fringes->try_del_fringe(f_cand);
+			if (config.no_diversity)
+				continue;
+			if (config.div_level == 1) // Only pro-fringe
+			{
+				for (reach_t b : del_pro)
+				{ // For all blocks removed from pro fringe
+					assert(path_pro_fringes->block_to_fringes.count(b) == 0);
+					if (b >= g->num_targets)
+					{ // If it is not target, we switch it off
+						div_blocks->switch_off(b);
+					}
+				}
+				clusters.clean_supp_cnts();
+			}
+			else if (config.div_level == 2) // pro-fringe + norm-fringe
+			{
+				rh::unordered_set<reach_t> switched_off;
+				for (reach_t b : del_pro)
+				{
+					assert(path_pro_fringes->block_to_fringes.count(b) == 0);
+					if (b >= g->num_targets &&
+						path_fringes->block_to_fringes.count(b) == 0)
+					{ // If fringe is not pro, but still in norm, we still keep.
+						div_blocks->switch_off(b);
+						switched_off.insert(b);
+					}
+				}
+				for (reach_t b : del_norm)
+				{
+					// If a block is deleted from norm fringe,
+					// it cannot appear in pro fringe either.
+					assert(path_pro_fringes->block_to_fringes.count(b) == 0);
+					assert(path_fringes->block_to_fringes.count(b) == 0);
+					if (b >= g->num_targets && switched_off.count(b) == 0)
+					{
+						div_blocks->switch_off(b);
+					}
+				}
+				clusters.clean_supp_cnts();
+			}
+			// All fringes removed by `path_pro_fringes`
+		}
+
+		u8 cf = 0, ct = 0, f = 0, t = 0;
+		new_criticals = make_unique<rh::unordered_set<Fringe>>();
+		new_critical_blocks = make_unique<rh::unordered_set<reach_t>>();
+		for (size_t i = 0; i < len; ++i)
+		{
+			reach_t block = new_paths[i].block;
+			u8 r = try_add_fringe(new_paths[i]) + 1;
+
+			// Update context-sensitive fringe and target
+			cf = max(r, cf);
+			if (block < g->num_targets)
+				ct = 1;
+
+			// It it is the first time a block is reached,
+			// we update context-insensitive fringe and target.
+			if (new_blocks.find(block) != new_blocks.end())
+			{
+				f = max(r, f);
+				if (block < g->num_targets)
+					t = 1;
+			}
+
+			if (r >= 2 || block < g->num_targets)
+			{
+				new_criticals->emplace(
+					new_paths[i].block, new_paths[i].call_ctx);
+				new_critical_blocks->insert(new_paths[i].block);
+			}
+
+			// When there is a new fringe or target, we activate its switch.
+			if (block < g->num_targets || r > 3 - config.div_level)
+			{ // Note this can happen multiple times for a block, 3 cases:
+				// 1. If first time it is activated, then switch is turned on.
+				// 2. If switch is already on, them nothing is done.
+				// 3. If switch was turned off before, then turn on again.
+					// Such case only occurs for r == 2. (e.i. context fringe)
+				div_blocks->switch_on(block);
+			}
+		}
+		if (config.reset_level == 1)
+		{
+			if (f > 0)
+				state.reset(f - 1); // state.reset(cf - 1); TODO: config
+			if (config.reset_target && t)
+				state.exploit();
+		} // TODO: reset_level == 2
+		if (state.is_init_cov())
+		{
+			if (config.init_cov_reset == 1)
+			{
+				if (f > 0 || t)
+					state.reset_cov_quant();
+			}
+			else if (config.init_cov_reset == 2)
+			{
+				if (cf > 0 || ct)
+					state.reset_cov_quant();
+			}
+		}
+
+		/*
+		Given a execution trace exerted by a program,
+		and try to see if there is something new;
+		it returns information about if fringe is created,
+		cf:
+			1 for a new context-sensitive block is covered,
+			2 for a new context-sensitive fringe is added,
+			3 for a new context-sensitive pro fringe is added;
+		ct:
+			1 for new context-sensitive target is reached
+		f:
+			1 for a new reachable block is covered,
+			2 for a new fringe is added for the first time,
+			3 for a new pro fringe is added for the first time,
+		t bit:
+			1 for new context-insensitive target is reached
+		*/
+
+		if (f >= 1) update_time.last_reachable = get_cur_time();
+		if (f >= 2) update_time.last_fringe = get_cur_time();
+		if (f >= 3) update_time.last_pro_fringe = get_cur_time();
+
+		if (t) update_time.last_target = get_cur_time();
+
+		if (cf >= 1) update_time.last_ctx_reachable = get_cur_time();
+		if (cf >= 2) update_time.last_ctx_fringe = get_cur_time();
+		if (cf >= 3) update_time.last_ctx_pro_fringe = get_cur_time();
+
+		if (ct) update_time.last_ctx_target = get_cur_time();
+
+		ret = cf >= 2 || ct;
+	}
+
+	// TODO: Coverage for Seed Isolation
+
+	bool has_cov = false;
+	if (num_clusters == 0 || (new_bits && new_bits[0]))
+	{ // If `num_clusters` is zero, or primary map has new bits,
+		// then the seed is non-extra,
+		// so we don't do seed isolation and consider all coverage.
+		has_cov |= path_fringes->fringe_coverage(path, seed);
+		has_cov |= path_pro_fringes->fringe_coverage(path, seed);
+		has_cov |= reached_targets->fringe_coverage(path, seed);
+		div_blocks->div_coverage(reached, seed);
+	}
+	else
+	{
+		rh::unordered_set<reach_t> new_bits_targets;
+		if (new_bits)
+		{ // If new_bits is not NULL, there is any virgin map update.
+			for (size_t i = 1; i < num_clusters; ++i)
+			{
+				if (new_bits[i])
+				{ // If there is any new bit, insert all blocks in the cluster.
+					const auto& ts = clusters.get_targets(cur_clusters[i]);
+					new_bits_targets.insert(ts.begin(), ts.end());
+				}
+			}
+		}
+		has_cov |= path_fringes->fringe_coverage(
+			path, seed, new_criticals.get(), &new_bits_targets);
+		has_cov |= path_pro_fringes->fringe_coverage(
+			path, seed, new_criticals.get(), &new_bits_targets);
+		has_cov |= reached_targets->fringe_coverage(
+			path, seed, new_criticals.get(), &new_bits_targets);
+		div_blocks->div_coverage(
+			reached, seed, new_critical_blocks.get(), &new_bits_targets);
+	}
+
+	// Reset `cov_quant` to 0 in initial coverage if any new fringe coverage
+	if (config.init_cov_reset == 3 && state.is_init_cov() && has_cov)
+		state.reset_cov_quant();
+
+	/*if (inc)
+	{
+		path_fringes->inc_freq(path);
+		reached_targets->inc_freq(path);
+	}*/
+
+	return ret;
+}
+
+u8 aflrun_end_cycle()
+{
+	return state.is_reset() || state.is_end_cov();
+}
+
+void aflrun_update_fuzzed_quant(u32 id, double fuzzed_quant)
+{
+	path_fringes->update_fuzzed_quant(id, fuzzed_quant);
+	path_pro_fringes->update_fuzzed_quant(id, fuzzed_quant);
+	reached_targets->update_fuzzed_quant(id, fuzzed_quant);
+	state.add_quant(fuzzed_quant);
+	if (id >= seed_quant.size())
+		seed_quant.resize(id + 1);
+	seed_quant[id] += fuzzed_quant;
+}
+
+void aflrun_update_fringe_score(u32 seed)
+{
+	path_fringes->update_fringe_score(seed);
+	path_pro_fringes->update_fringe_score(seed);
+	reached_targets->update_fringe_score(seed);
+}
+
+void aflrun_set_favored_seeds(const u32* seeds, u32 num, u8 mode)
+{
+	switch (mode)
+	{
+	case AFLRunState::kFringe:
+		return path_fringes->set_favored_seeds(seeds, num);
+	case AFLRunState::kProFringe:
+		return path_pro_fringes->set_favored_seeds(seeds, num);
+	case AFLRunState::kTarget:
+		return reached_targets->set_favored_seeds(seeds, num);
+	default:
+		abort();
+	}
+}
+
+u32 aflrun_cull_queue(u32* seeds, u32 num)
+{
+	switch (state.get_mode())
+	{
+	case AFLRunState::kFringe:
+		return path_fringes->cull_queue(seeds, num);
+	case AFLRunState::kProFringe:
+		return path_pro_fringes->cull_queue(seeds, num);
+	case AFLRunState::kTarget:
+		return reached_targets->cull_queue(seeds, num);
+	case AFLRunState::kUnite:
+		return cull_queue_unite(seeds, num);
+	default:
+		abort();
+	}
+}
+
+// Note that the virgin maps returned can be inaccurate,
+// which should not be used into `has_new_bits_mul`,
+// instead use ones returned by `aflrun_get_seed_virgins`.
+size_t aflrun_get_virgins(
+	const ctx_t* targets, size_t num, u8** ret_maps, size_t* ret_clusters)
+	// `ret_maps` and `ret_clusters` must have size at least `num`
+{
+	if (config.no_diversity)
+		return 0;
+	const ctx_t* t_end = targets + num;
+	// The maximum potential number of clusters is current number of cluster
+	// plus number of new context-sensitive targets, because each target
+	// can only increase the number of clusters by one.
+	bo::dynamic_bitset<> visited_clusters(clusters.size() + num);
+	visited_clusters[0] = true; // always skip primary cluster
+
+	size_t idx = 0;
+	for (const ctx_t* t = targets; t < t_end; ++t)
+	{
+		// Note that even if binary is compiled with AFLRUN_CTX_DIV,
+		// but fuzzer is not, it can still work correctly
+		size_t cluster = clusters.get_cluster(to_cluster_target(t));
+		if (visited_clusters[cluster])
+			continue;
+		visited_clusters[cluster] = true;
+
+		ret_clusters[idx] = cluster;
+		ret_maps[idx++] = clusters.get_virgin_map(cluster);
+	}
+	return idx;
+}
+
+// The maximum number of clusters of a seed is number of active diversity
+// blocks it cover, assuming each diversity block can create one cluster,
+// including primary cluster.
+size_t aflrun_max_clusters(u32 seed)
+{
+	auto it = div_blocks->seed_blocks.find(seed);
+	return 1 +
+		(it == div_blocks->seed_blocks.end() ? 0 : it->second.size());
+}
+
+// Basically same as above, except div blocks are fetched from `div_blocks`
+size_t aflrun_get_seed_virgins(u32 seed, u8** ret_maps, size_t* ret_clusters)
+{
+	if (config.no_diversity)
+		return 0;
+	auto it = div_blocks->seed_blocks.find(seed);
+	if (it == div_blocks->seed_blocks.end())
+		return 0;
+	bo::dynamic_bitset<> visited_clusters(clusters.size() + it->second.size());
+	visited_clusters[0] = true; // always skip primary cluster
+
+	size_t idx = 0;
+	for (auto t : it->second)
+	{
+		size_t cluster = clusters.get_cluster(t);
+		if (visited_clusters[cluster])
+			continue;
+		visited_clusters[cluster] = true;
+
+		ret_clusters[idx] = cluster;
+		ret_maps[idx++] = clusters.get_virgin_map(cluster);
+	}
+	return idx;
+}
+
+size_t aflrun_get_seed_tops(u32 seed, void*** ret_tops)
+{
+	if (config.no_diversity)
+		return 0;
+	auto it = div_blocks->seed_blocks.find(seed);
+	if (it == div_blocks->seed_blocks.end())
+		return 0;
+	bo::dynamic_bitset<> visited_clusters(clusters.size() + it->second.size());
+	visited_clusters[0] = true; // always skip primary cluster
+
+	size_t idx = 0;
+	for (auto t : it->second)
+	{
+		size_t cluster = clusters.get_cluster(t);
+		if (visited_clusters[cluster])
+			continue;
+		visited_clusters[cluster] = true;
+
+		ret_tops[idx++] = clusters.get_top_rated(cluster);
+	}
+	return idx;
+}
+
+size_t aflrun_get_num_clusters(void)
+{
+	size_t size = clusters.size();
+	size_t ret = 0;
+	for (size_t i = 0; i < size; ++i)
+	{
+		if (clusters.get_top_rated(i)) ++ret;
+	}
+	return ret;
+}
+
+size_t aflrun_get_all_tops(void*** ret_tops, u8 mode)
+{
+	if (config.no_diversity)
+		return 0;
+	return clusters.get_all_tops(ret_tops, mode);
+}
+
+void aflrun_set_num_active_seeds(u32 n)
+{
+	num_active_seeds = n;
+}
+
+void discover_word_mul(u8 *new_bits,
+	u64 *current, u64* const *virgins, size_t num, size_t idx, u8 modify)
+{
+	u64 or_all = 0;
+	unique_ptr<vector<u64>> and_bit_seq(nullptr);
+
+	for (size_t i = 0; i < num; ++i)
+	{
+		u64* virgin = virgins[i] + idx;
+
+		u64 tmp = *current & *virgin;
+
+		if (and_bit_seq != nullptr)
+			and_bit_seq->push_back(tmp);
+
+		if (tmp)
+		{
+			or_all |= tmp;
+
+			// For the first time we touched a virgin map,
+			// we create the sequence to store all `*current & *virgin` values.
+			// This is a lazy approach so that we don't create the sequence
+			// for most zero sequences.
+			if (and_bit_seq == nullptr)
+			{
+				and_bit_seq = make_unique<vector<u64>>();
+				and_bit_seq->reserve(num);
+				// Since this is the first time we touch virgin bits,
+				// all previous `*current & *virgin` values are zeros.
+				for (size_t j = 0; j < i; ++j)
+					and_bit_seq->push_back(0);
+				and_bit_seq->push_back(tmp);
+			}
+
+			u8* ret = new_bits + i;
+			if (likely(*ret < 2))
+			{
+				u8 *cur = (u8 *)current;
+				u8 *vir = (u8 *)virgin;
+
+				if ((cur[0] && vir[0] == 0xff) || (cur[1] && vir[1] == 0xff) ||
+					(cur[2] && vir[2] == 0xff) || (cur[3] && vir[3] == 0xff) ||
+					(cur[4] && vir[4] == 0xff) || (cur[5] && vir[5] == 0xff) ||
+					(cur[6] && vir[6] == 0xff) || (cur[7] && vir[7] == 0xff))
+					*ret = 2;
+				else
+					*ret = 1;
+			}
+			if (modify)
+				*virgin &= ~*current;
+		}
+	}
+
+	if (modify && or_all != 0)
+	{
+		clusters.add_bit_seq(or_all, std::move(and_bit_seq));
+	}
+}
+
+void aflrun_commit_bit_seqs(const size_t* cs, size_t num)
+{
+	clusters.commit_bit_seqs(cs, num);
+}