about summary refs log tree commit diff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/afl-fuzz.h82
-rw-r--r--include/aflrun.h111
-rw-r--r--include/config.h39
-rw-r--r--include/coverage-32.h1
-rw-r--r--include/coverage-64.h91
-rw-r--r--include/forkserver.h16
-rw-r--r--include/sharedmem.h25
-rw-r--r--include/trace.h19
-rw-r--r--include/types.h15
9 files changed, 361 insertions, 38 deletions
diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h
index edef9207..00a9d701 100644
--- a/include/afl-fuzz.h
+++ b/include/afl-fuzz.h
@@ -48,6 +48,7 @@
 
 #include <stdio.h>
 #include <unistd.h>
+#include <getopt.h>
 #include <stdlib.h>
 #include <string.h>
 #include <time.h>
@@ -163,6 +164,7 @@ struct queue_entry {
       has_new_cov,                      /* Triggers new coverage?           */
       var_behavior,                     /* Variable behavior?               */
       favored,                          /* Currently favored?               */
+      div_favored,                      /* Currently favored for diversity? */
       fs_redundant,                     /* Marked as redundant in the fs?   */
       is_ascii,                         /* Is the input just ascii text?    */
       disabled;                         /* Is disabled from fuzz selection  */
@@ -184,6 +186,7 @@ struct queue_entry {
       handicap,                         /* Number of queue cycles behind    */
       depth,                            /* Path depth                       */
       exec_cksum,                       /* Checksum of the execution trace  */
+      fuzzed_times,                     /* Number of times being tested     */
       stats_mutated;                    /* stats: # of mutations performed  */
 
   u8 *trace_mini;                       /* Trace bytes, if kept             */
@@ -193,7 +196,7 @@ struct queue_entry {
   u32 bitsmap_size;
 #endif
 
-  double perf_score,                    /* performance score                */
+  double perf_score, quant_score,       /* performance score for afl(run)    */
       weight;
 
   u8 *testcase_buf;                     /* The testcase buffer, if loaded.  */
@@ -203,6 +206,16 @@ struct queue_entry {
 
   struct queue_entry *mother;           /* queue entry this based on        */
 
+  /* 0: not tested (e.g. first calibration for imported seed);
+    1: partially tested (e.g. first calibration for mutated seed);
+    2: fully tested (e.g. non-first time calibration) */
+  u8 tested;
+
+  /* --- Used only in multiple-path mode --- */
+  u64 path_cksum;
+
+  u8 aflrun_extra;
+
 };
 
 struct extra_data {
@@ -430,6 +443,7 @@ typedef struct afl_state {
 
   afl_forkserver_t fsrv;
   sharedmem_t      shm;
+  aflrun_shm_t shm_run;
   sharedmem_t     *shm_fuzz;
   afl_env_vars_t   afl_env;
 
@@ -538,7 +552,11 @@ typedef struct afl_state {
 
   u8 *virgin_bits,                      /* Regions yet untouched by fuzzing */
       *virgin_tmout,                    /* Bits we haven't seen in tmouts   */
-      *virgin_crash;                    /* Bits we haven't seen in crashes  */
+      *virgin_crash,                    /* Bits we haven't seen in crashes  */
+      *virgin_reachables,               /* Similar to virgin_bits, but for
+                                                  reachable basic blocks */
+      *virgin_freachables,              /* Same as above but for functions  */
+      *virgin_ctx;                      /* Virgin bits for context-sensitive */
 
   double *alias_probability;            /* alias weighted probabilities     */
   u32    *alias_table;                /* alias weighted random lookup table */
@@ -559,6 +577,9 @@ typedef struct afl_state {
       queued_imported,                  /* Items imported via -S            */
       queued_favored,                   /* Paths deemed favorable           */
       queued_with_cov,                  /* Paths with new coverage bytes    */
+      queued_extra,                     /* Number of extra seeds of aflrun  */
+      queued_extra_disabled,            /* Number of extra seeds disabled   */
+      queued_aflrun,                    /* Number of seeds in aflrun queue  */
       pending_not_fuzzed,               /* Queued but not done yet          */
       pending_favored,                  /* Pending favored paths            */
       cur_skipped_items,                /* Abandoned inputs in cur cycle    */
@@ -568,7 +589,8 @@ typedef struct afl_state {
       var_byte_count,                   /* Bitmap bytes with var behavior   */
       current_entry,                    /* Current queue entry ID           */
       havoc_div,                        /* Cycle count divisor for havoc    */
-      max_det_extras;                   /* deterministic extra count (dicts)*/
+      max_det_extras,                   /* deterministic extra count (dicts)*/
+      aflrun_favored;                   /* Num of seeds selected by aflrun  */
 
   u64 total_crashes,                    /* Total number of crashes          */
       saved_crashes,                    /* Crashes with unique signatures   */
@@ -778,6 +800,45 @@ typedef struct afl_state {
   u32   bitsmap_size;
 #endif
 
+  u64 exec_time, fuzz_time, exec_time_short, fuzz_time_short, last_exec_time;
+
+  u32 fuzzed_times;          /* Number of times common_fuzz_stuff is called */
+
+  reach_t** reachable_to_targets;/* Map from reachable index to array
+                                          of targets to which it can reach*/
+  reach_t* reachable_to_size;    /* Map from index to array size */
+  char** reachable_names;
+  double* target_weights;
+
+  u64 total_perf_score;
+
+  struct queue_entry** aflrun_queue; /* Queue for fuzzing in order */
+  u32 aflrun_idx; /* `current_entry` for aflrun queue */
+  u32* aflrun_seeds; /* Map seed to its `fuzz_level` value */
+  double* perf_scores;
+
+  u8 force_cycle_end, is_aflrun;
+  double quantum_ratio;   /* actual quantum / planned quantum*/
+
+  u8** virgins; size_t* clusters;
+  struct queue_entry*** tops;
+  u8* new_bits;
+  size_t num_maps;
+  u8 div_score_changed;
+
+#ifdef USE_PYTHON
+  PyObject* aflrun_assign_energy;
+  PyObject* aflrun_assign_seed;
+#endif
+
+  u32 runs_in_current_cycle;
+  u32 min_num_exec;
+  u8 check_at_begin, log_at_begin;
+  u64 log_check_interval;
+  double trim_thr, queue_quant_thr;
+
+  char* temp_dir;
+
 } afl_state_t;
 
 struct custom_mutator {
@@ -826,7 +887,8 @@ struct custom_mutator {
    * @param buf_size Size of the test case
    * @return The amount of fuzzes to perform on this queue entry, 0 = skip
    */
-  u32 (*afl_custom_fuzz_count)(void *data, const u8 *buf, size_t buf_size);
+  u32 (*afl_custom_fuzz_count)(
+    void *data, const u8 *buf, size_t buf_size, u32 saved_max);
 
   /**
    * Perform custom mutations on a given input
@@ -1088,11 +1150,11 @@ void discover_word(u8 *ret, u32 *current, u32 *virgin);
 void init_count_class16(void);
 void minimize_bits(afl_state_t *, u8 *, u8 *);
 #ifndef SIMPLE_FILES
-u8 *describe_op(afl_state_t *, u8, size_t);
+u8 *describe_op(afl_state_t *, u8, u8, size_t);
 #endif
-u8 save_if_interesting(afl_state_t *, void *, u32, u8);
+u8 save_if_interesting(afl_state_t *, void *, u32, u8, u8);
 u8 has_new_bits(afl_state_t *, u8 *);
-u8 has_new_bits_unclassified(afl_state_t *, u8 *);
+u8 has_new_bits_mul(afl_state_t *, u8* const *, u8**, size_t, u8);
 
 /* Extras */
 
@@ -1117,6 +1179,7 @@ void show_stats(afl_state_t *);
 void show_stats_normal(afl_state_t *);
 void show_stats_pizza(afl_state_t *);
 void show_init_stats(afl_state_t *);
+void aflrun_write_to_log(afl_state_t *);
 
 /* StatsD */
 
@@ -1133,6 +1196,7 @@ u8   calibrate_case(afl_state_t *, struct queue_entry *, u8 *, u32, u8);
 u8   trim_case(afl_state_t *, struct queue_entry *, u8 *);
 u8   common_fuzz_stuff(afl_state_t *, u8 *, u32);
 fsrv_run_result_t fuzz_run_target(afl_state_t *, afl_forkserver_t *fsrv, u32);
+void aflrun_recover_virgin(afl_state_t* afl);
 
 /* Fuzz one */
 
@@ -1173,6 +1237,8 @@ void   save_cmdline(afl_state_t *, u32, char **);
 void   read_foreign_testcases(afl_state_t *, int);
 void   write_crash_readme(afl_state_t *afl);
 u8     check_if_text_buf(u8 *buf, u32 len);
+char* aflrun_find_temp(char* temp_dir);
+void aflrun_temp_dir_init(afl_state_t* afl, const char* temp_dir);
 
 /* CmpLog */
 
@@ -1289,6 +1355,8 @@ void queue_testcase_retake_mem(afl_state_t *afl, struct queue_entry *q, u8 *in,
 /* Add a new queue entry directly to the cache */
 
 void queue_testcase_store_mem(afl_state_t *afl, struct queue_entry *q, u8 *mem);
+u32 select_aflrun_seeds(afl_state_t *afl);
+int cmp_quant_score(const void* a, const void* b);
 
 #if TESTCASE_CACHE == 1
   #error define of TESTCASE_CACHE must be zero or larger than 1
diff --git a/include/aflrun.h b/include/aflrun.h
new file mode 100644
index 00000000..89921e35
--- /dev/null
+++ b/include/aflrun.h
@@ -0,0 +1,111 @@
+#ifndef _HAVE_AFL_RUN_H
+#define _HAVE_AFL_RUN_H
+
+#include "types.h"
+#include "config.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+	/* functions called at initialization */
+
+	void aflrun_load_config(const char* config_str,
+		u8* check_at_begin, u8* log_at_begin, u64* log_check_interval,
+		double* trim_thr, double* queue_quant_thr, u32* min_num_exec);
+
+	void aflrun_load_freachables(const char* temp_path,
+		reach_t* num_ftargets, reach_t* num_freachables);
+	void aflrun_load_edges(const char* bb_edges, reach_t num_reachables);
+	void aflrun_load_dists(const char* dir, reach_t num_targets,
+		reach_t num_reachables, char** reachable_names);
+
+	void aflrun_init_fringes(
+		reach_t num_reachables, reach_t num_targets);
+	void aflrun_init_groups(reach_t num_targets);
+
+	void aflrun_init_globals(void* afl,
+		reach_t num_targets, reach_t num_reachables,
+		reach_t num_ftargets, reach_t num_freachables,
+		u8* virgin_reachables, u8* virgin_freachables, u8* virgin_ctx,
+		char** reachable_names, reach_t** reachable_to_targets,
+		reach_t* reachable_to_size, const char* out_dir,
+		const double* target_weights, u32 map_size, u8* div_switch,
+		const char* cycle_time);
+
+	void aflrun_remove_seed(u32 seed);
+
+	/* functions used to update fringe */
+
+	// path-sensitive fringe, called for mutated input and new imported seed
+	// One thing to note is that we don't consider seed that varies in coverage
+	// among different runs, in which case we only use the coverage of the first
+	// run (e.i. `common_fuzz_stuff` for mutated input and sync input or
+	// first calibration for imported seed)
+	u8 aflrun_has_new_path(const u8* freached, const u8* reached, const u8* path,
+		const ctx_t* virgin_trace, size_t len, u8 inc, u32 seed,
+		const u8* new_bits, const size_t* clusters, size_t num_clusters);
+	u8 aflrun_end_cycle();
+	void aflrun_update_fuzzed_quant(u32 id, double fuzzed_quant);
+
+	/* functions for debugging and inspecting */
+
+	void aflrun_check_state(void);
+	void aflrun_log_fringes(const char* path, u8 prog);
+	void aflrun_get_state(int* cycle_count, u32* cov_quant,
+		size_t* div_num_invalid, size_t* div_num_fringes);
+	u64 aflrun_queue_cycle(void);
+	u8 aflrun_get_mode(void);
+	bool aflrun_is_uni(void);
+	void aflrun_get_reached(reach_t* num_reached, reach_t* num_freached,
+		reach_t* num_reached_targets, reach_t* num_freached_targets);
+	double aflrun_get_seed_quant(u32 seed);
+	void aflrun_get_time(u64* last_reachable, u64* last_fringe,
+		u64* last_pro_fringe, u64* last_target, u64* last_ctx_reachable,
+		u64* last_ctx_fringe, u64* last_ctx_pro_fringe, u64* last_ctx_target);
+
+	/* functions called at begining of each cycle to assign energy */
+
+	// calculate energy for each seed
+	void aflrun_assign_energy(u32 num_seeds, const u32* seeds, double* ret);
+	void aflrun_set_num_active_seeds(u32 n);
+	u8 aflrun_cycle_end(u8*);
+
+	// update score and queue culling
+	void aflrun_update_fringe_score(u32 seed);
+	u32 aflrun_cull_queue(u32* seeds, u32 num);
+	void aflrun_set_favored_seeds(const u32* seeds, u32 num, u8 mode);
+
+	/* Functions for the second diversity idea */
+
+	// Get virgin maps associated with given targets, result goes into `ret_maps`
+	size_t aflrun_get_virgins(
+		const ctx_t* targets, size_t num, u8** ret_maps, size_t* ret_clusters);
+	size_t aflrun_max_clusters(u32 seed);
+	size_t aflrun_get_seed_virgins(u32 seed, u8** ret_maps, size_t* ret_clusters);
+	size_t aflrun_get_seed_tops(u32 seed, void*** ret_tops);
+	size_t aflrun_get_num_clusters(void);
+	size_t aflrun_get_all_tops(void*** ret_tops, u8 mode);
+
+	// For target clustering
+#ifdef WORD_SIZE_64
+	void discover_word_mul(u8 *new_bits,
+		u64 *current, u64* const *virgins, size_t num, size_t idx, u8 modify);
+#else
+	#error "Please use 64-bit to compile AFLRun"
+#endif
+	void aflrun_commit_bit_seqs(const size_t* clusters, size_t num);
+
+	// AFL interfaces
+	u64 get_seed_fav_factor(void* afl_void, u32 seed);
+	double get_seed_perf_score(void* afl_void, u32 seed);
+	bool get_seed_div_favored(void* afl_void, u32 seed);
+	u8 get_seed_cov_favored(void* afl_void, u32 seed);
+	void disable_aflrun_extra(void* afl_void, u32 seed);
+	u64 get_cur_time(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !_HAVE_AFL_RUN_H */
\ No newline at end of file
diff --git a/include/config.h b/include/config.h
index 67b9f932..f359e67d 100644
--- a/include/config.h
+++ b/include/config.h
@@ -508,5 +508,44 @@
 
 #define AFL_TXT_STRING_MAX_MUTATIONS 6
 
+/* Shared memory used to record if each basic block and function
+   reachable to any target has been executed during one execution */
+#define SHM_RBB_ENV_VAR    "__AFLRUN_RBB_SHM_ID"
+#define SHM_RF_ENV_VAR     "__AFLRUN_RF_SHM_ID"
+
+#define SHM_TR_ENV_VAR     "__AFLRUN_TR_SHM_ID"
+#define SHM_VIR_ENV_VAR    "__AFLRUN_VIR_SHM_ID"
+#define SHM_VTR_ENV_VAR    "__AFLRUN_VTR_SHM_ID"
+
+#define SHM_TT_ENV_VAR     "__AFLRUN_TT_SHM_ID"
+#define SHM_DIV_ENV_VAR    "__AFLRUN_DIV_SHM_ID"
+
+#define CTX_SIZE_POW2       8
+#define CTX_SIZE            (1 << CTX_SIZE_POW2)
+#define CTX_NUM_BYTES       (CTX_SIZE / 8)
+#define CTX_IDX(block, ctx) ((block) * CTX_SIZE + (ctx))
+
+#define MAP_RF_SIZE(nr)     (((nr) + 7) / 8)
+#define MAP_RBB_SIZE(nr)    MAP_RF_SIZE((nr) + 1)
+// we need one more bit for telling if counting frequency
+#define MAP_TR_SIZE(nr)     ((nr) * CTX_NUM_BYTES)
+#ifndef AFLRUN_CTX
+#define MAP_VTR_SIZE(nr)    ((nr) * sizeof(ctx_t) + sizeof(trace_t))
+#else
+#define MAP_VTR_SIZE(nr)    (MAP_TR_SIZE(nr) * 8 * sizeof(ctx_t) + sizeof(trace_t))
+#endif // AFLRUN_CTX
+
+#define AFLRUN_SPLICE_TIMES(e, sc) \
+   (((e) * SPLICE_HAVOC) / (HAVOC_CYCLES + SPLICE_HAVOC * (sc)))
+#define AFLRUN_HAVOC_TIMES(e, sc) \
+   ((e) - AFLRUN_SPLICE_TIMES(e, (sc)) * (sc))
+#define AFLRUN_CUSTOM_TIMES(e, n) \
+   (((e) * HAVOC_CYCLES) / \
+   (HAVOC_CYCLES + SPLICE_HAVOC * SPLICE_CYCLES + HAVOC_CYCLES * (n)))
+
+#define QUANTUM_TIME 100000
+
+#define AFLRUN_TEMP_SIG "##SIG_AFLRUN_TEMP_DIR##="
+
 #endif                                                  /* ! _HAVE_CONFIG_H */
 
diff --git a/include/coverage-32.h b/include/coverage-32.h
index 89c08cdf..cca8e743 100644
--- a/include/coverage-32.h
+++ b/include/coverage-32.h
@@ -1,5 +1,6 @@
 #include "config.h"
 #include "types.h"
+#error "AFLRun does not support 32 bit fuzzing, please use 64 version"
 
 u32 skim(const u32 *virgin, const u32 *current, const u32 *current_end);
 u32 classify_word(u32 word);
diff --git a/include/coverage-64.h b/include/coverage-64.h
index aab79d79..a094531b 100644
--- a/include/coverage-64.h
+++ b/include/coverage-64.h
@@ -5,7 +5,8 @@
   #include <immintrin.h>
 #endif
 
-u32 skim(const u64 *virgin, const u64 *current, const u64 *current_end);
+u32 skim(const u64* const* virgins, size_t num,
+  const u64 *current, const u64 *current_end);
 u64 classify_word(u64 word);
 
 inline u64 classify_word(u64 word) {
@@ -74,6 +75,9 @@ inline void classify_counts(afl_forkserver_t *fsrv) {
 
 /* Updates the virgin bits, then reflects whether a new count or a new tuple is
  * seen in ret. */
+
+/* Updates the virgin bits, then reflects whether a new count or a new tuple is
+ * seen in ret. */
 inline void discover_word(u8 *ret, u64 *current, u64 *virgin) {
 
   /* Optimize for (*current & *virgin) == 0 - i.e., no bits in current bitmap
@@ -108,9 +112,11 @@ inline void discover_word(u8 *ret, u64 *current, u64 *virgin) {
 
 #if defined(__AVX512F__) && defined(__AVX512DQ__)
   #define PACK_SIZE 64
-inline u32 skim(const u64 *virgin, const u64 *current, const u64 *current_end) {
+inline u32 skim(const u64* const* virgins, size_t num,
+  const u64 *current, const u64 *current_end) {
 
-  for (; current != current_end; virgin += 8, current += 8) {
+  size_t idx = 0;
+  for (; current < current_end; idx += 8, current += 8) {
 
     __m512i  value = *(__m512i *)current;
     __mmask8 mask = _mm512_testn_epi64_mask(value, value);
@@ -118,18 +124,24 @@ inline u32 skim(const u64 *virgin, const u64 *current, const u64 *current_end) {
     /* All bytes are zero. */
     if (likely(mask == 0xff)) continue;
 
-      /* Look for nonzero bytes and check for new bits. */
   #define UNROLL(x)                                                            \
-    if (unlikely(!(mask & (1 << x)) && classify_word(current[x]) & virgin[x])) \
-    return 1
-    UNROLL(0);
-    UNROLL(1);
-    UNROLL(2);
-    UNROLL(3);
-    UNROLL(4);
-    UNROLL(5);
-    UNROLL(6);
-    UNROLL(7);
+    if (unlikely(!(mask & (1 << x)))) { \
+      u64 classified = classify_word(current[x]); \
+      for (size_t i = 0; i < num; ++i) { \
+        if (classified & virgins[i][idx + x]) \
+          return 1; \
+      } \
+    }
+
+    UNROLL(0)
+    UNROLL(1)
+    UNROLL(2)
+    UNROLL(3)
+    UNROLL(4)
+    UNROLL(5)
+    UNROLL(6)
+    UNROLL(7)
+
   #undef UNROLL
 
   }
@@ -142,11 +154,13 @@ inline u32 skim(const u64 *virgin, const u64 *current, const u64 *current_end) {
 
 #if !defined(PACK_SIZE) && defined(__AVX2__)
   #define PACK_SIZE 32
-inline u32 skim(const u64 *virgin, const u64 *current, const u64 *current_end) {
+inline u32 skim(const u64* const* virgins, size_t num,
+  const u64 *current, const u64 *current_end) {
 
   __m256i zeroes = _mm256_setzero_si256();
 
-  for (; current < current_end; virgin += 4, current += 4) {
+  size_t idx = 0;
+  for (; current < current_end; idx += 4, current += 4) {
 
     __m256i value = *(__m256i *)current;
     __m256i cmp = _mm256_cmpeq_epi64(value, zeroes);
@@ -156,14 +170,18 @@ inline u32 skim(const u64 *virgin, const u64 *current, const u64 *current_end) {
     if (likely(mask == (u32)-1)) continue;
 
     /* Look for nonzero bytes and check for new bits. */
-    if (unlikely(!(mask & 0xff) && classify_word(current[0]) & virgin[0]))
-      return 1;
-    if (unlikely(!(mask & 0xff00) && classify_word(current[1]) & virgin[1]))
-      return 1;
-    if (unlikely(!(mask & 0xff0000) && classify_word(current[2]) & virgin[2]))
-      return 1;
-    if (unlikely(!(mask & 0xff000000) && classify_word(current[3]) & virgin[3]))
-      return 1;
+    #define UNROLL(j) \
+    if (unlikely(!(mask & (0xff << (j * 8))))) { \
+      u64 classified = classify_word(current[j]); \
+      for (size_t i = 0; i < num; ++i) \
+        if (classified & virgins[i][idx + j]) \
+          return 1; \
+    }
+    UNROLL(0)
+    UNROLL(1)
+    UNROLL(2)
+    UNROLL(3)
+    #undef UNROLL
 
   }
 
@@ -175,15 +193,26 @@ inline u32 skim(const u64 *virgin, const u64 *current, const u64 *current_end) {
 
 #if !defined(PACK_SIZE)
   #define PACK_SIZE 32
-inline u32 skim(const u64 *virgin, const u64 *current, const u64 *current_end) {
-
-  for (; current < current_end; virgin += 4, current += 4) {
+inline u32 skim(const u64* const* virgins, size_t num,
+  const u64 *current, const u64 *current_end) {
+
+  size_t idx = 0;
+  for (; current < current_end; idx += 4, current += 4) {
+
+  #define UNROLL(j) \
+    if (unlikely(current[j])) { \
+      u64 classified = classify_word(current[j]); \
+      for (size_t i = 0; i < num; ++i) \
+        if (classified & virgins[i][idx + j]) \
+          return 1; \
+    }
 
-    if (unlikely(current[0] && classify_word(current[0]) & virgin[0])) return 1;
-    if (unlikely(current[1] && classify_word(current[1]) & virgin[1])) return 1;
-    if (unlikely(current[2] && classify_word(current[2]) & virgin[2])) return 1;
-    if (unlikely(current[3] && classify_word(current[3]) & virgin[3])) return 1;
+    UNROLL(0)
+    UNROLL(1)
+    UNROLL(2)
+    UNROLL(3)
 
+  #undef UNROLL
   }
 
   return 0;
diff --git a/include/forkserver.h b/include/forkserver.h
index 35bc1771..dbf5fc6e 100644
--- a/include/forkserver.h
+++ b/include/forkserver.h
@@ -32,6 +32,7 @@
 #include <stdbool.h>
 
 #include "types.h"
+#include "trace.h"
 
 #ifdef __linux__
 /**
@@ -82,6 +83,12 @@ typedef struct afl_forkserver {
   /* a program that includes afl-forkserver needs to define these */
 
   u8 *trace_bits;                       /* SHM with instrumentation bitmap  */
+  u8* trace_reachables;                 /* SHM to trace reachable BBs       */
+  u8* trace_freachables;                /* SHM to trace reachable Functions */
+  u8* trace_ctx;                    /* SHM to trace reachables with context */
+  trace_t* trace_virgin;               /* For each newly reached virgin block,
+ we record call context and path context, this is useful for fringe testing */
+  trace_t* trace_targets;              /* Reached targets in each run       */
 
   s32 fsrv_pid,                         /* PID of the fork server           */
       child_pid,                        /* PID of the fuzzed program        */
@@ -180,6 +187,15 @@ typedef struct afl_forkserver {
   char                 *nyx_aux_string;
 #endif
 
+  reach_t num_targets,           /* Number of target basic blocks */
+           num_reachables;            /* Number of basic blocks reachable
+                                          to any target*/
+  reach_t num_ftargets,          /* Number of target functions */
+           num_freachables;           /* Number of functions reachable
+                                          to any function target */
+
+  u8 testing;
+
 } afl_forkserver_t;
 
 typedef enum fsrv_run_result {
diff --git a/include/sharedmem.h b/include/sharedmem.h
index d32bd845..540f243b 100644
--- a/include/sharedmem.h
+++ b/include/sharedmem.h
@@ -29,6 +29,7 @@
 #define __AFL_SHAREDMEM_H
 
 #include "types.h"
+#include "trace.h"
 
 typedef struct sharedmem {
 
@@ -56,8 +57,32 @@ typedef struct sharedmem {
 
 } sharedmem_t;
 
+#ifdef USEMMAP
+#error "AFLRun Does not support USEMMAP currently"
+#endif
+
+typedef struct aflrun_shm {
+
+  /* aflrun id */
+  s32 shm_rbb_id, shm_rf_id, shm_tr_id,
+    shm_vir_id, shm_vtr_id, shm_tt_id, shm_div_id;
+
+  u8 *map_reachables;          /* SHM to trace reachable BBs */
+  u8 *map_freachables;         /* SHM to trace reachable Functions */
+  u8 *map_ctx;                 /* SHM to trace reachables with context */
+  trace_t *map_new_blocks;         /* For each newly reached virgin block,
+  we record call context and path context, this is useful for fringe testing */
+  u8 *map_virgin_ctx;                /* Virgin bits for context-sensitive */
+  trace_t *map_targets;        /* For each reached targets, we record relative
+  information, this is useful for target diversity */
+  u8 *div_switch; /* A switch to tell program if we should record diversity */
+
+} aflrun_shm_t;
+
 u8  *afl_shm_init(sharedmem_t *, size_t, unsigned char non_instrumented_mode);
 void afl_shm_deinit(sharedmem_t *);
+void aflrun_shm_init(aflrun_shm_t*, reach_t, reach_t, unsigned char);
+void aflrun_shm_deinit(aflrun_shm_t*);
 
 #endif
 
diff --git a/include/trace.h b/include/trace.h
new file mode 100644
index 00000000..f7253ac7
--- /dev/null
+++ b/include/trace.h
@@ -0,0 +1,19 @@
+#ifndef _HAVE_TRACE_H
+#define _HAVE_TRACE_H
+
+#ifdef __cplusplus
+#include <atomic>
+using namespace std;
+#else
+#include <stdatomic.h>
+#endif
+
+typedef struct _trace_t {
+#ifdef AFLRUN_OVERHEAD
+  atomic_ullong overhead;
+#endif // AFLRUN_OVERHEAD
+  atomic_size_t num;
+  ctx_t trace[];
+} trace_t;
+
+#endif
diff --git a/include/types.h b/include/types.h
index d6476d82..595d689b 100644
--- a/include/types.h
+++ b/include/types.h
@@ -23,8 +23,14 @@
 #ifndef _HAVE_TYPES_H
 #define _HAVE_TYPES_H
 
+#ifdef __cplusplus
+#include <cstdint>
+#include <cstdlib>
+#else
 #include <stdint.h>
 #include <stdlib.h>
+#endif
+
 #include "config.h"
 
 typedef uint8_t  u8;
@@ -192,5 +198,14 @@ typedef int128_t s128;
   #endif
 #endif
 
+typedef u32 reach_t;
+
+typedef struct {
+  reach_t block;
+  u32 call_ctx;
+} ctx_t;
+
+#define IS_SET(arr, i) (((arr)[(i) / 8] & (1 << ((i) % 8))) != 0)
+
 #endif                                                   /* ! _HAVE_TYPES_H */