aboutsummaryrefslogtreecommitdiff
path: root/custom_mutators
diff options
context:
space:
mode:
authorHuanyao Rong <r3tr0spect2019@qq.com>2024-06-21 21:03:37 -0700
committerNguyễn Gia Phong <cnx@loang.net>2024-12-03 11:17:44 +0900
commitd04bdf2e841fa6706c16bbba1cb6f6c73d647767 (patch)
tree23393adc733e6e9e61d7eb226c90258870bb0624 /custom_mutators
parentcee3c86d7d5f0a05ad6cbb1434dc13162a16e336 (diff)
downloadafl++-d04bdf2e841fa6706c16bbba1cb6f6c73d647767.tar.gz
Implement AFLRun
References: https://github.com/Mem2019/AFLRun/commit/f5bb87f78ef1 References: https://github.com/Mem2019/AFLRun/commit/3af5f11b5644
Diffstat (limited to 'custom_mutators')
-rw-r--r--custom_mutators/afl/.gitignore1
-rw-r--r--custom_mutators/afl/Makefile28
-rw-r--r--custom_mutators/afl/afl-mutator.so.c1317
-rw-r--r--custom_mutators/afl/alloc-inl.h570
-rw-r--r--custom_mutators/afl/config.h361
-rw-r--r--custom_mutators/afl/debug.h251
-rw-r--r--custom_mutators/afl/havoc.c89
-rw-r--r--custom_mutators/afl/types.h86
8 files changed, 2703 insertions, 0 deletions
diff --git a/custom_mutators/afl/.gitignore b/custom_mutators/afl/.gitignore
new file mode 100644
index 00000000..c5ffedf0
--- /dev/null
+++ b/custom_mutators/afl/.gitignore
@@ -0,0 +1 @@
+havoc
diff --git a/custom_mutators/afl/Makefile b/custom_mutators/afl/Makefile
new file mode 100644
index 00000000..fc656e8e
--- /dev/null
+++ b/custom_mutators/afl/Makefile
@@ -0,0 +1,28 @@
+PREFIX ?= /usr/local
+BIN_PATH = $(PREFIX)/bin
+HELPER_PATH = $(PREFIX)/lib/afl
+DOC_PATH = $(PREFIX)/share/doc/afl
+
+CFLAGS ?= -O3 -funroll-loops
+CFLAGS += -Wall -D_FORTIFY_SOURCE=2 -g -Wno-pointer-sign \
+ -DAFL_PATH=\"$(HELPER_PATH)\" -DDOC_PATH=\"$(DOC_PATH)\" \
+ -DBIN_PATH=\"$(BIN_PATH)\"
+
+ifneq "$(filter Linux GNU%,$(shell uname))" ""
+ LDFLAGS += -ldl -lm
+endif
+
+COMM_HDR = alloc-inl.h config.h debug.h types.h
+
+all: afl-mutator.so havoc
+clean:
+ rm -rf *.o *.so havoc
+
+afl-mutator.so.o: afl-mutator.so.c $(COMM_HDR)
+ $(CC) -fPIC $(CFLAGS) -c afl-mutator.so.c -o $@
+
+afl-mutator.so: afl-mutator.so.o
+ $(CC) -shared -fPIC $(CFLAGS) $@.o -o $@ $(LDFLAGS)
+
+havoc: havoc.c $(COMM_HDR) afl-mutator.so.o
+ $(CC) $(CFLAGS) $@.c afl-mutator.so.o -o $@ $(LDFLAGS) \ No newline at end of file
diff --git a/custom_mutators/afl/afl-mutator.so.c b/custom_mutators/afl/afl-mutator.so.c
new file mode 100644
index 00000000..8d012160
--- /dev/null
+++ b/custom_mutators/afl/afl-mutator.so.c
@@ -0,0 +1,1317 @@
+#include "types.h"
+#include "config.h"
+#include "debug.h"
+#include "alloc-inl.h"
+
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <errno.h>
+#include <signal.h>
+#include <dirent.h>
+#include <ctype.h>
+#include <fcntl.h>
+#include <termios.h>
+#include <dlfcn.h>
+#include <sched.h>
+#include <stdbool.h>
+
+#include <sys/wait.h>
+#include <sys/time.h>
+#include <sys/shm.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/resource.h>
+#include <sys/mman.h>
+#include <sys/ioctl.h>
+#include <sys/file.h>
+
+static s32 dev_urandom_fd = -1;
+static u32 rand_cnt;
+
+struct extra_data {
+ u8* data; /* Dictionary token data */
+ u32 len; /* Dictionary token length */
+ u32 hit_cnt; /* Use count in the corpus */
+};
+
+static struct extra_data* extras; /* Extra tokens to fuzz with */
+static u32 extras_cnt; /* Total number of tokens read */
+
+static struct extra_data* a_extras; /* Automatically selected extras */
+static u32 a_extras_cnt; /* Total number of tokens available */
+
+static u8* fuzz_buf;
+static u8** splice_bufs;
+static u32* splice_buf_sizes;
+static u32 num_splice_bufs = 0;
+static u64 cycle_time;
+
+/* Interesting values, as per config.h */
+
+static s8 interesting_8[] = { INTERESTING_8 };
+static s16 interesting_16[] = { INTERESTING_8, INTERESTING_16 };
+static s32 interesting_32[] = { INTERESTING_8, INTERESTING_16, INTERESTING_32 };
+
+static u64 init_time;
+
+static u8 no_splicing = 0;
+
+/************ MOpt Starts ************/
+static u64 limit_time_puppet = 0;
+static u64 orig_hit_cnt_puppet = 0;
+static u64 last_limit_time_start = 0;
+static u64 tmp_pilot_time = 0;
+static u64 total_pacemaker_time = 0;
+static u64 total_puppet_find = 0;
+static u64 temp_puppet_find = 0;
+static u64 most_time_key = 0;
+static u64 most_time_puppet = 0;
+static u64 old_hit_count = 0;
+static int SPLICE_CYCLES_puppet;
+static int limit_time_sig = 0;
+static int key_puppet = 0;
+static int key_module = 0;
+static double w_init = 0.9;
+static double w_end = 0.3;
+static double w_now;
+static int g_now = 0;
+static int g_max = 5000;
+#define operator_num 18
+#define swarm_num 5
+#define period_core 500000
+static u64 tmp_core_time = 0;
+static int swarm_now = 0 ;
+static double x_now[swarm_num][operator_num],
+ L_best[swarm_num][operator_num],
+ eff_best[swarm_num][operator_num],
+ G_best[operator_num],
+ v_now[swarm_num][operator_num],
+ probability_now[swarm_num][operator_num],
+ swarm_fitness[swarm_num];
+
+static u64 stage_finds_puppet[swarm_num][operator_num],
+ /* Patterns found per fuzz stage */
+ stage_finds_puppet_v2[swarm_num][operator_num],
+ stage_cycles_puppet_v2[swarm_num][operator_num],
+ stage_cycles_puppet_v3[swarm_num][operator_num],
+ stage_cycles_puppet[swarm_num][operator_num],
+ operator_finds_puppet[operator_num],
+ core_operator_finds_puppet[operator_num],
+ core_operator_finds_puppet_v2[operator_num],
+ core_operator_cycles_puppet[operator_num],
+ core_operator_cycles_puppet_v2[operator_num],
+ core_operator_cycles_puppet_v3[operator_num];
+ /* Execs per fuzz stage */
+
+
+#define RAND_C (rand()%1000*0.001)
+#define v_max 1
+#define v_min 0.05
+#define limit_time_bound 1.1
+#define SPLICE_CYCLES_puppet_up 25
+#define SPLICE_CYCLES_puppet_low 5
+#define STAGE_RANDOMBYTE 12
+#define STAGE_DELETEBYTE 13
+#define STAGE_Clone75 14
+#define STAGE_OverWrite75 15
+#define STAGE_OverWriteExtra 16
+#define STAGE_InsertExtra 17
+
+#define period_pilot 50000
+static double period_pilot_tmp = 5000.0;
+
+static int key_lv = 0;
+
+static int select_algorithm(int extras) {
+
+ int i_puppet, j_puppet;
+ //double total_puppet = 0.0;
+ //srandom(time(NULL));
+
+ u32 seed[2];
+
+ ck_read(dev_urandom_fd, &seed, sizeof(seed), "/dev/urandom");
+
+ srandom(seed[0]);
+
+ //double sele = ((double)(random()%10000)*0.0001);
+ //SAYF("select : %f\n",sele);
+ j_puppet = 0;
+ int operator_number = operator_num;
+ if (extras < 2) operator_number = operator_number - 2;
+ double range_sele = (double)probability_now[swarm_now][operator_number - 1];
+ double sele = ((double)(random() % 10000) * 0.0001 * range_sele);
+
+ for (i_puppet = 0; i_puppet < operator_number; i_puppet++)
+ {
+ if (unlikely(i_puppet == 0))
+ {
+ if (sele < probability_now[swarm_now][i_puppet])
+ break;
+ }
+ else
+ {
+ if (sele < probability_now[swarm_now][i_puppet])
+ {
+ j_puppet =1;
+ break;
+ }
+ }
+ }
+ if ((j_puppet ==1 && sele < probability_now[swarm_now][i_puppet-1]) ||
+ (i_puppet + 1 < operator_num &&
+ sele > probability_now[swarm_now][i_puppet + 1]))
+ FATAL("error select_algorithm");
+ return i_puppet;
+}
+
+static void show_mopt_stats(void) {
+ if (limit_time_sig == 1)
+ {
+ if (key_puppet == 0)
+ {
+ if (key_module == 0)
+ {
+ printf("%s", "MOpt-AFL (pilot_fuzzing)");
+ }
+ else if (key_module == 1)
+ {
+ printf("%s", "MOpt-AFL (core_fuzzing)");
+ }
+ else if (key_module == 2)
+ {
+ printf("%s", "MOpt-AFL (pso_updating)");
+ }
+ }
+ else
+ {
+ if (key_module == 0)
+ {
+ printf("%s", "MOpt-AFL + pacemaker (pilot_fuzzing)");
+ }
+ else if (key_module == 1)
+ {
+ printf("%s", "MOpt-AFL + pacemaker (core_fuzzing)");
+ }
+ else if (key_module == 2)
+ {
+ printf("%s", "MOpt-AFL + pacemaker (pso_updating)");
+ }
+ }
+ }
+ else
+ {
+ printf("%s", "AFL");
+ }
+}
+
+static void pso_updating(void) {
+
+ g_now += 1;
+ if (g_now > g_max) g_now = 0;
+ w_now = (w_init - w_end)*(g_max - g_now) / (g_max)+w_end;
+ int tmp_swarm, i, j;
+ u64 temp_operator_finds_puppet = 0;
+ for (i = 0; i < operator_num; i++)
+ {
+ operator_finds_puppet[i] = core_operator_finds_puppet[i];
+
+ for (j = 0; j < swarm_num; j++)
+ {
+ operator_finds_puppet[i] =
+ operator_finds_puppet[i] + stage_finds_puppet[j][i];
+ }
+ temp_operator_finds_puppet =
+ temp_operator_finds_puppet + operator_finds_puppet[i];
+ }
+
+ for (i = 0; i < operator_num; i++)
+ {
+ if (operator_finds_puppet[i])
+ G_best[i] = (double)((double)(operator_finds_puppet[i]) /
+ (double)(temp_operator_finds_puppet));
+ }
+
+ for (tmp_swarm = 0; tmp_swarm < swarm_num; tmp_swarm++)
+ {
+ double x_temp = 0.0;
+ for (i = 0; i < operator_num; i++)
+ {
+ probability_now[tmp_swarm][i] = 0.0;
+ v_now[tmp_swarm][i] = w_now * v_now[tmp_swarm][i] +
+ RAND_C * (L_best[tmp_swarm][i] - x_now[tmp_swarm][i]) +
+ RAND_C * (G_best[i] - x_now[tmp_swarm][i]);
+ x_now[tmp_swarm][i] += v_now[tmp_swarm][i];
+ if (x_now[tmp_swarm][i] > v_max)
+ x_now[tmp_swarm][i] = v_max;
+ else if (x_now[tmp_swarm][i] < v_min)
+ x_now[tmp_swarm][i] = v_min;
+ x_temp += x_now[tmp_swarm][i];
+ }
+
+ for (i = 0; i < operator_num; i++)
+ {
+ x_now[tmp_swarm][i] = x_now[tmp_swarm][i] / x_temp;
+ if (likely(i != 0))
+ probability_now[tmp_swarm][i] =
+ probability_now[tmp_swarm][i - 1] + x_now[tmp_swarm][i];
+ else
+ probability_now[tmp_swarm][i] = x_now[tmp_swarm][i];
+ }
+ if (probability_now[tmp_swarm][operator_num - 1] < 0.99 ||
+ probability_now[tmp_swarm][operator_num - 1] > 1.01)
+ FATAL("ERROR probability");
+ }
+ swarm_now = 0;
+ key_module = 0;
+}
+
+
+/* TODO:
+static u8 fuzz_one(char** argv) {
+ int key_val_lv = 0;
+ if (limit_time_sig == 0)
+ key_val_lv = normal_fuzz_one(argv);
+ else
+ {
+ if (key_module == 0)
+ key_val_lv = pilot_fuzzing(argv);
+ else if (key_module == 1)
+ key_val_lv = core_fuzzing(argv);
+ else if (key_module == 2)
+ pso_updating();
+ }
+
+ return key_val_lv;
+}
+
+TODO: add at initialization
+ { //default L
+ limit_time_sig = 1;
+ limit_time_puppet = 1;
+ u64 limit_time_puppet2 = limit_time_puppet * 60 * 1000;
+ if (limit_time_puppet2 < limit_time_puppet ) FATAL("limit_time overflow");
+ limit_time_puppet = limit_time_puppet2;
+ SAYF("default limit_time_puppet %llu\n",limit_time_puppet);
+ }
+
+srandom(time(NULL));
+
+case 'V':{
+ most_time_key = 1;
+ if (sscanf(optarg, "%llu", &most_time_puppet) < 1 ||
+ optarg[0] == '-') FATAL("Bad syntax used for -V");
+ }
+break;
+
+
+ case 'L': {
+
+ //if (limit_time_sig) FATAL("Multiple -L options not supported");
+ limit_time_sig = 1;
+
+ if (sscanf(optarg, "%llu", &limit_time_puppet) < 1 ||
+ optarg[0] == '-') FATAL("Bad syntax used for -L");
+
+ u64 limit_time_puppet2 = limit_time_puppet * 60 * 1000;
+
+ if (limit_time_puppet2 < limit_time_puppet ) FATAL("limit_time overflow");
+ limit_time_puppet = limit_time_puppet2;
+
+ SAYF("limit_time_puppet %llu\n",limit_time_puppet);
+
+ if (limit_time_puppet == 0 )
+ key_puppet = 1;
+
+
+ }
+ break;
+
+
+{ //initialize swarms
+ int i;
+ int tmp_swarm = 0;
+ swarm_now = 0;
+
+ if (g_now > g_max) g_now = 0;
+ w_now = (w_init - w_end)*(g_max - g_now) / (g_max)+w_end;
+
+ for (tmp_swarm = 0; tmp_swarm < swarm_num; tmp_swarm++)
+ {
+ double total_puppet_temp = 0.0;
+ swarm_fitness[tmp_swarm] = 0.0;
+
+ for (i = 0; i < operator_num; i++)
+ {
+ stage_finds_puppet[tmp_swarm][i] = 0;
+ probability_now[tmp_swarm][i] = 0.0;
+ x_now[tmp_swarm][i] = ((double)(random() % 7000)*0.0001 + 0.1);
+ total_puppet_temp += x_now[tmp_swarm][i];
+ v_now[tmp_swarm][i] = 0.1;
+ L_best[tmp_swarm][i] = 0.5;
+ G_best[i] = 0.5;
+ eff_best[tmp_swarm][i] = 0.0;
+
+ }
+
+
+ for (i = 0; i < operator_num; i++) {
+ stage_cycles_puppet_v2[tmp_swarm][i] = stage_cycles_puppet[tmp_swarm][i];
+ stage_finds_puppet_v2[tmp_swarm][i] = stage_finds_puppet[tmp_swarm][i];
+ x_now[tmp_swarm][i] = x_now[tmp_swarm][i] / total_puppet_temp;
+ }
+
+ double x_temp = 0.0;
+
+ for (i = 0; i < operator_num; i++)
+ {
+ probability_now[tmp_swarm][i] = 0.0;
+ v_now[tmp_swarm][i] = w_now * v_now[tmp_swarm][i] + RAND_C * (L_best[tmp_swarm][i] - x_now[tmp_swarm][i]) + RAND_C * (G_best[i] - x_now[tmp_swarm][i]);
+
+ x_now[tmp_swarm][i] += v_now[tmp_swarm][i];
+
+ if (x_now[tmp_swarm][i] > v_max)
+ x_now[tmp_swarm][i] = v_max;
+ else if (x_now[tmp_swarm][i] < v_min)
+ x_now[tmp_swarm][i] = v_min;
+
+ x_temp += x_now[tmp_swarm][i];
+ }
+
+ for (i = 0; i < operator_num; i++)
+ {
+ x_now[tmp_swarm][i] = x_now[tmp_swarm][i] / x_temp;
+ if (likely(i != 0))
+ probability_now[tmp_swarm][i] = probability_now[tmp_swarm][i - 1] + x_now[tmp_swarm][i];
+ else
+ probability_now[tmp_swarm][i] = x_now[tmp_swarm][i];
+ }
+ if (probability_now[tmp_swarm][operator_num - 1] < 0.99 || probability_now[tmp_swarm][operator_num - 1] > 1.01)
+ FATAL("ERROR probability");
+
+
+
+
+
+ }
+
+ for (i = 0; i < operator_num; i++)
+ {
+ core_operator_finds_puppet[i] = 0;
+ core_operator_finds_puppet_v2[i] = 0;
+ core_operator_cycles_puppet[i] = 0;
+ core_operator_cycles_puppet_v2[i] = 0;
+ core_operator_cycles_puppet_v3[i] = 0;
+ }
+
+ }
+
+*/
+
+/* TODO: fuzzing loop
+
+ u64 cur_ms_lv = get_cur_time();
+if(most_time_key ==1)
+{
+ if( most_time_puppet * 1000 < cur_ms_lv - start_time)
+ break;
+}
+
+*/
+
+/************ MOpt Ends ************/
+
+static inline u32 UR(u32 limit) {
+
+ if (unlikely(!rand_cnt--)) {
+
+ u32 seed[2];
+
+ ck_read(dev_urandom_fd, &seed, sizeof(seed), "/dev/urandom");
+
+ srandom(seed[0]);
+ rand_cnt = (RESEED_RNG / 2) + (seed[1] % RESEED_RNG);
+
+ }
+
+ return random() % limit;
+
+}
+
+#define CHK_FORMAT(_divisor, _limit_mult, _fmt, _cast) do { \
+ if (val < (_divisor) * (_limit_mult)) { \
+ sprintf(tmp[cur], _fmt, ((_cast)val) / (_divisor)); \
+ return tmp[cur]; \
+ } \
+ } while (0)
+
+#define FLIP_BIT(_ar, _b) do { \
+ u8* _arf = (u8*)(_ar); \
+ u32 _bf = (_b); \
+ _arf[(_bf) >> 3] ^= (128 >> ((_bf) & 7)); \
+ } while (0)
+
+
+/* Get unix time in milliseconds */
+
+static u64 get_cur_time(void) {
+
+ struct timeval tv;
+ struct timezone tz;
+
+ gettimeofday(&tv, &tz);
+
+ return (tv.tv_sec * 1000ULL) + (tv.tv_usec / 1000);
+
+}
+
+/* Helper to choose random block len for block operations in fuzz_one().
+ Doesn't return zero, provided that max_len is > 0. */
+
+static u32 choose_block_len(u32 limit) {
+
+ u32 min_value, max_value;
+ u32 rlim = MIN((get_cur_time() - init_time) / 1000 / cycle_time + 1, 3);
+
+ switch (UR(rlim)) {
+
+ case 0: min_value = 1;
+ max_value = HAVOC_BLK_SMALL;
+ break;
+
+ case 1: min_value = HAVOC_BLK_SMALL;
+ max_value = HAVOC_BLK_MEDIUM;
+ break;
+
+ default:
+
+ if (UR(10)) {
+
+ min_value = HAVOC_BLK_MEDIUM;
+ max_value = HAVOC_BLK_LARGE;
+
+ } else {
+
+ min_value = HAVOC_BLK_LARGE;
+ max_value = HAVOC_BLK_XL;
+
+ }
+
+ }
+
+ if (min_value >= limit) min_value = 1;
+
+ return min_value + UR(MIN(max_value, limit) - min_value + 1);
+
+}
+
+/* Describe integer as memory size. */
+
+static u8* DMS(u64 val) {
+
+ static u8 tmp[12][16];
+ static u8 cur;
+
+ cur = (cur + 1) % 12;
+
+ /* 0-9999 */
+ CHK_FORMAT(1, 10000, "%llu B", u64);
+
+ /* 10.0k - 99.9k */
+ CHK_FORMAT(1024, 99.95, "%0.01f kB", double);
+
+ /* 100k - 999k */
+ CHK_FORMAT(1024, 1000, "%llu kB", u64);
+
+ /* 1.00M - 9.99M */
+ CHK_FORMAT(1024 * 1024, 9.995, "%0.02f MB", double);
+
+ /* 10.0M - 99.9M */
+ CHK_FORMAT(1024 * 1024, 99.95, "%0.01f MB", double);
+
+ /* 100M - 999M */
+ CHK_FORMAT(1024 * 1024, 1000, "%llu MB", u64);
+
+ /* 1.00G - 9.99G */
+ CHK_FORMAT(1024LL * 1024 * 1024, 9.995, "%0.02f GB", double);
+
+ /* 10.0G - 99.9G */
+ CHK_FORMAT(1024LL * 1024 * 1024, 99.95, "%0.01f GB", double);
+
+ /* 100G - 999G */
+ CHK_FORMAT(1024LL * 1024 * 1024, 1000, "%llu GB", u64);
+
+ /* 1.00T - 9.99G */
+ CHK_FORMAT(1024LL * 1024 * 1024 * 1024, 9.995, "%0.02f TB", double);
+
+ /* 10.0T - 99.9T */
+ CHK_FORMAT(1024LL * 1024 * 1024 * 1024, 99.95, "%0.01f TB", double);
+
+#undef CHK_FORMAT
+
+ /* 100T+ */
+ strcpy(tmp[cur], "infty");
+ return tmp[cur];
+
+}
+
+/* Helper function for load_extras. */
+
+static int compare_extras_len(const void* p1, const void* p2) {
+ struct extra_data *e1 = (struct extra_data*)p1,
+ *e2 = (struct extra_data*)p2;
+
+ return e1->len - e2->len;
+}
+
+/* Read extras from a file, sort by size. */
+
+static void load_extras_file(u8* fname, u32* min_len, u32* max_len,
+ u32 dict_level) {
+
+ FILE* f;
+ u8 buf[MAX_LINE];
+ u8 *lptr;
+ u32 cur_line = 0;
+
+ f = fopen(fname, "r");
+
+ if (!f) PFATAL("Unable to open '%s'", fname);
+
+ while ((lptr = fgets(buf, MAX_LINE, f))) {
+
+ u8 *rptr, *wptr;
+ u32 klen = 0;
+
+ cur_line++;
+
+ /* Trim on left and right. */
+
+ while (isspace(*lptr)) lptr++;
+
+ rptr = lptr + strlen(lptr) - 1;
+ while (rptr >= lptr && isspace(*rptr)) rptr--;
+ rptr++;
+ *rptr = 0;
+
+ /* Skip empty lines and comments. */
+
+ if (!*lptr || *lptr == '#') continue;
+
+ /* All other lines must end with '"', which we can consume. */
+
+ rptr--;
+
+ if (rptr < lptr || *rptr != '"')
+ FATAL("Malformed name=\"value\" pair in line %u.", cur_line);
+
+ *rptr = 0;
+
+ /* Skip alphanumerics and dashes (label). */
+
+ while (isalnum(*lptr) || *lptr == '_') lptr++;
+
+ /* If @number follows, parse that. */
+
+ if (*lptr == '@') {
+
+ lptr++;
+ if (atoi(lptr) > dict_level) continue;
+ while (isdigit(*lptr)) lptr++;
+
+ }
+
+ /* Skip whitespace and = signs. */
+
+ while (isspace(*lptr) || *lptr == '=') lptr++;
+
+ /* Consume opening '"'. */
+
+ if (*lptr != '"')
+ FATAL("Malformed name=\"keyword\" pair in line %u.", cur_line);
+
+ lptr++;
+
+ if (!*lptr) FATAL("Empty keyword in line %u.", cur_line);
+
+ /* Okay, let's allocate memory and copy data between "...", handling
+ \xNN escaping, \\, and \". */
+
+ extras = ck_realloc_block(extras, (extras_cnt + 1) *
+ sizeof(struct extra_data));
+
+ wptr = extras[extras_cnt].data = ck_alloc(rptr - lptr);
+
+ while (*lptr) {
+
+ char* hexdigits = "0123456789abcdef";
+
+ switch (*lptr) {
+
+ case 1 ... 31:
+ case 128 ... 255:
+ FATAL("Non-printable characters in line %u.", cur_line);
+
+ case '\\':
+
+ lptr++;
+
+ if (*lptr == '\\' || *lptr == '"') {
+ *(wptr++) = *(lptr++);
+ klen++;
+ break;
+ }
+
+ if (*lptr != 'x' || !isxdigit(lptr[1]) || !isxdigit(lptr[2]))
+ FATAL("Invalid escaping (not \\xNN) in line %u.", cur_line);
+
+ *(wptr++) =
+ ((strchr(hexdigits, tolower(lptr[1])) - hexdigits) << 4) |
+ (strchr(hexdigits, tolower(lptr[2])) - hexdigits);
+
+ lptr += 3;
+ klen++;
+
+ break;
+
+ default:
+
+ *(wptr++) = *(lptr++);
+ klen++;
+
+ }
+
+ }
+
+ extras[extras_cnt].len = klen;
+
+ if (extras[extras_cnt].len > MAX_DICT_FILE)
+ FATAL("Keyword too big in line %u (%s, limit is %s)", cur_line,
+ DMS(klen), DMS(MAX_DICT_FILE));
+
+ if (*min_len > klen) *min_len = klen;
+ if (*max_len < klen) *max_len = klen;
+
+ extras_cnt++;
+
+ }
+
+ fclose(f);
+
+}
+
+
+/* Read extras from the extras directory and sort them by size. */
+
+static void load_extras(u8* dir) {
+
+ DIR* d;
+ struct dirent* de;
+ u32 min_len = MAX_DICT_FILE, max_len = 0, dict_level = 0;
+ u8* x;
+
+ /* If the name ends with @, extract level and continue. */
+
+ if ((x = strchr(dir, '@'))) {
+
+ *x = 0;
+ dict_level = atoi(x + 1);
+
+ }
+
+ ACTF("Loading extra dict for AFL from '%s' (level %u)...", dir, dict_level);
+
+ d = opendir(dir);
+
+ if (!d) {
+
+ if (errno == ENOTDIR) {
+ load_extras_file(dir, &min_len, &max_len, dict_level);
+ goto check_and_sort;
+ }
+
+ PFATAL("Unable to open '%s'", dir);
+
+ }
+
+ if (x) FATAL("Dictionary levels not supported for directories.");
+
+ while ((de = readdir(d))) {
+
+ struct stat st;
+ u8* fn = alloc_printf("%s/%s", dir, de->d_name);
+ s32 fd;
+
+ if (lstat(fn, &st) || access(fn, R_OK))
+ PFATAL("Unable to access '%s'", fn);
+
+ /* This also takes care of . and .. */
+ if (!S_ISREG(st.st_mode) || !st.st_size) {
+
+ ck_free(fn);
+ continue;
+
+ }
+
+ if (st.st_size > MAX_DICT_FILE)
+ FATAL("Extra '%s' is too big (%s, limit is %s)", fn,
+ DMS(st.st_size), DMS(MAX_DICT_FILE));
+
+ if (min_len > st.st_size) min_len = st.st_size;
+ if (max_len < st.st_size) max_len = st.st_size;
+
+ extras = ck_realloc_block(extras, (extras_cnt + 1) *
+ sizeof(struct extra_data));
+
+ extras[extras_cnt].data = ck_alloc(st.st_size);
+ extras[extras_cnt].len = st.st_size;
+
+ fd = open(fn, O_RDONLY);
+
+ if (fd < 0) PFATAL("Unable to open '%s'", fn);
+
+ ck_read(fd, extras[extras_cnt].data, st.st_size, fn);
+
+ close(fd);
+ ck_free(fn);
+
+ extras_cnt++;
+
+ }
+
+ closedir(d);
+
+check_and_sort:
+
+ if (!extras_cnt) FATAL("No usable files in '%s'", dir);
+
+ qsort(extras, extras_cnt, sizeof(struct extra_data), compare_extras_len);
+
+ OKF("Loaded %u extra tokens, size range %s to %s.", extras_cnt,
+ DMS(min_len), DMS(max_len));
+
+ if (max_len > 32)
+ WARNF("Some tokens are relatively large (%s) - consider trimming.",
+ DMS(max_len));
+
+ if (extras_cnt > MAX_DET_EXTRAS)
+ WARNF("More than %u tokens - will use them probabilistically.",
+ MAX_DET_EXTRAS);
+
+}
+
+// take *p_buf with size *p_len, return mutated buffer and size into them.
+static void afl_havoc(u8** p_buf, s32* p_len, u32 max_seed_size) {
+
+ s32 temp_len = *p_len;
+ u8* out_buf = *p_buf; // Note this should be allocated from AFL heap API
+
+ u32 use_stacking = 1 << (1 + UR(HAVOC_STACK_POW2));
+
+ for (s32 i = 0; i < use_stacking; i++) {
+
+ switch (UR(15 + ((extras_cnt + a_extras_cnt) ? 2 : 0))) {
+
+ case 0:
+
+ /* Flip a single bit somewhere. Spooky! */
+
+ FLIP_BIT(out_buf, UR(temp_len << 3));
+ break;
+
+ case 1:
+
+ /* Set byte to interesting value. */
+
+ out_buf[UR(temp_len)] = interesting_8[UR(sizeof(interesting_8))];
+ break;
+
+ case 2:
+
+ /* Set word to interesting value, randomly choosing endian. */
+
+ if (temp_len < 2) break;
+
+ if (UR(2)) {
+
+ *(u16*)(out_buf + UR(temp_len - 1)) =
+ interesting_16[UR(sizeof(interesting_16) >> 1)];
+
+ } else {
+
+ *(u16*)(out_buf + UR(temp_len - 1)) = SWAP16(
+ interesting_16[UR(sizeof(interesting_16) >> 1)]);
+
+ }
+
+ break;
+
+ case 3:
+
+ /* Set dword to interesting value, randomly choosing endian. */
+
+ if (temp_len < 4) break;
+
+ if (UR(2)) {
+
+ *(u32*)(out_buf + UR(temp_len - 3)) =
+ interesting_32[UR(sizeof(interesting_32) >> 2)];
+
+ } else {
+
+ *(u32*)(out_buf + UR(temp_len - 3)) = SWAP32(
+ interesting_32[UR(sizeof(interesting_32) >> 2)]);
+
+ }
+
+ break;
+
+ case 4:
+
+ /* Randomly subtract from byte. */
+
+ out_buf[UR(temp_len)] -= 1 + UR(ARITH_MAX);
+ break;
+
+ case 5:
+
+ /* Randomly add to byte. */
+
+ out_buf[UR(temp_len)] += 1 + UR(ARITH_MAX);
+ break;
+
+ case 6:
+
+ /* Randomly subtract from word, random endian. */
+
+ if (temp_len < 2) break;
+
+ if (UR(2)) {
+
+ u32 pos = UR(temp_len - 1);
+
+ *(u16*)(out_buf + pos) -= 1 + UR(ARITH_MAX);
+
+ } else {
+
+ u32 pos = UR(temp_len - 1);
+ u16 num = 1 + UR(ARITH_MAX);
+
+ *(u16*)(out_buf + pos) =
+ SWAP16(SWAP16(*(u16*)(out_buf + pos)) - num);
+
+ }
+
+ break;
+
+ case 7:
+
+ /* Randomly add to word, random endian. */
+
+ if (temp_len < 2) break;
+
+ if (UR(2)) {
+
+ u32 pos = UR(temp_len - 1);
+
+ *(u16*)(out_buf + pos) += 1 + UR(ARITH_MAX);
+
+ } else {
+
+ u32 pos = UR(temp_len - 1);
+ u16 num = 1 + UR(ARITH_MAX);
+
+ *(u16*)(out_buf + pos) =
+ SWAP16(SWAP16(*(u16*)(out_buf + pos)) + num);
+
+ }
+
+ break;
+
+ case 8:
+
+ /* Randomly subtract from dword, random endian. */
+
+ if (temp_len < 4) break;
+
+ if (UR(2)) {
+
+ u32 pos = UR(temp_len - 3);
+
+ *(u32*)(out_buf + pos) -= 1 + UR(ARITH_MAX);
+
+ } else {
+
+ u32 pos = UR(temp_len - 3);
+ u32 num = 1 + UR(ARITH_MAX);
+
+ *(u32*)(out_buf + pos) =
+ SWAP32(SWAP32(*(u32*)(out_buf + pos)) - num);
+
+ }
+
+ break;
+
+ case 9:
+
+ /* Randomly add to dword, random endian. */
+
+ if (temp_len < 4) break;
+
+ if (UR(2)) {
+
+ u32 pos = UR(temp_len - 3);
+
+ *(u32*)(out_buf + pos) += 1 + UR(ARITH_MAX);
+
+ } else {
+
+ u32 pos = UR(temp_len - 3);
+ u32 num = 1 + UR(ARITH_MAX);
+
+ *(u32*)(out_buf + pos) =
+ SWAP32(SWAP32(*(u32*)(out_buf + pos)) + num);
+
+ }
+
+ break;
+
+ case 10:
+
+ /* Just set a random byte to a random value. Because,
+ why not. We use XOR with 1-255 to eliminate the
+ possibility of a no-op. */
+
+ out_buf[UR(temp_len)] ^= 1 + UR(255);
+ break;
+
+ case 11 ... 12: {
+
+ /* Delete bytes. We're making this a bit more likely
+ than insertion (the next option) in hopes of keeping
+ files reasonably small. */
+
+ u32 del_from, del_len;
+
+ if (temp_len < 2) break;
+
+ /* Don't delete too much. */
+
+ del_len = choose_block_len(temp_len - 1);
+
+ del_from = UR(temp_len - del_len + 1);
+
+ memmove(out_buf + del_from, out_buf + del_from + del_len,
+ temp_len - del_from - del_len);
+
+ temp_len -= del_len;
+
+ break;
+
+ }
+
+ case 13:
+
+ if (temp_len + HAVOC_BLK_XL < max_seed_size) {
+
+ /* Clone bytes (75%) or insert a block of constant bytes (25%). */
+
+ u8 actually_clone = UR(4);
+ u32 clone_from, clone_to, clone_len;
+ u8* new_buf;
+
+ if (actually_clone) {
+
+ clone_len = choose_block_len(temp_len);
+ clone_from = UR(temp_len - clone_len + 1);
+
+ } else {
+
+ clone_len = choose_block_len(HAVOC_BLK_XL);
+ clone_from = 0;
+
+ }
+
+ clone_to = UR(temp_len);
+
+ new_buf = ck_alloc_nozero(temp_len + clone_len);
+
+ /* Head */
+
+ memcpy(new_buf, out_buf, clone_to);
+
+ /* Inserted part */
+
+ if (actually_clone)
+ memcpy(new_buf + clone_to, out_buf + clone_from, clone_len);
+ else
+ memset(new_buf + clone_to,
+ UR(2) ? UR(256) : out_buf[UR(temp_len)], clone_len);
+
+ /* Tail */
+ memcpy(new_buf + clone_to + clone_len, out_buf + clone_to,
+ temp_len - clone_to);
+
+ ck_free(out_buf);
+ out_buf = new_buf;
+ temp_len += clone_len;
+
+ }
+
+ break;
+
+ case 14: {
+
+ /* Overwrite bytes with a randomly selected chunk (75%) or fixed
+ bytes (25%). */
+
+ u32 copy_from, copy_to, copy_len;
+
+ if (temp_len < 2) break;
+
+ copy_len = choose_block_len(temp_len - 1);
+
+ copy_from = UR(temp_len - copy_len + 1);
+ copy_to = UR(temp_len - copy_len + 1);
+
+ if (UR(4)) {
+
+ if (copy_from != copy_to)
+ memmove(out_buf + copy_to, out_buf + copy_from, copy_len);
+
+ } else memset(out_buf + copy_to,
+ UR(2) ? UR(256) : out_buf[UR(temp_len)], copy_len);
+
+ break;
+
+ }
+
+ /* Values 15 and 16 can be selected only if there are any extras
+ present in the dictionaries. */
+
+ case 15: {
+
+ /* Overwrite bytes with an extra. */
+
+ if (!extras_cnt || (a_extras_cnt && UR(2))) {
+
+ /* No user-specified extras or odds in our favor. Let's use an
+ auto-detected one. */
+
+ u32 use_extra = UR(a_extras_cnt);
+ u32 extra_len = a_extras[use_extra].len;
+ u32 insert_at;
+
+ if (extra_len > temp_len) break;
+
+ insert_at = UR(temp_len - extra_len + 1);
+ memcpy(out_buf + insert_at, a_extras[use_extra].data, extra_len);
+
+ } else {
+
+ /* No auto extras or odds in our favor. Use the dictionary. */
+
+ u32 use_extra = UR(extras_cnt);
+ u32 extra_len = extras[use_extra].len;
+ u32 insert_at;
+
+ if (extra_len > temp_len) break;
+
+ insert_at = UR(temp_len - extra_len + 1);
+ memcpy(out_buf + insert_at, extras[use_extra].data, extra_len);
+
+ }
+
+ break;
+
+ }
+
+ case 16: {
+
+ u32 use_extra, extra_len, insert_at = UR(temp_len + 1);
+ u8* new_buf;
+
+ /* Insert an extra. Do the same dice-rolling stuff as for the
+ previous case. */
+
+ if (!extras_cnt || (a_extras_cnt && UR(2))) {
+
+ use_extra = UR(a_extras_cnt);
+ extra_len = a_extras[use_extra].len;
+
+ if (temp_len + extra_len >= max_seed_size) break;
+
+ new_buf = ck_alloc_nozero(temp_len + extra_len);
+
+ /* Head */
+ memcpy(new_buf, out_buf, insert_at);
+
+ /* Inserted part */
+ memcpy(new_buf + insert_at, a_extras[use_extra].data, extra_len);
+
+ } else {
+
+ use_extra = UR(extras_cnt);
+ extra_len = extras[use_extra].len;
+
+ if (temp_len + extra_len >= max_seed_size) break;
+
+ new_buf = ck_alloc_nozero(temp_len + extra_len);
+
+ /* Head */
+ memcpy(new_buf, out_buf, insert_at);
+
+ /* Inserted part */
+ memcpy(new_buf + insert_at, extras[use_extra].data, extra_len);
+
+ }
+
+ /* Tail */
+ memcpy(new_buf + insert_at + extra_len, out_buf + insert_at,
+ temp_len - insert_at);
+
+ ck_free(out_buf);
+ out_buf = new_buf;
+ temp_len += extra_len;
+
+ break;
+
+ }
+
+ }
+
+ }
+
+ *p_buf = out_buf;
+ *p_len = temp_len;
+
+}
+
+static void locate_diffs(
+ const u8* ptr1, const u8* ptr2, u32 len, s32* first, s32* last) {
+
+ s32 f_loc = -1;
+ s32 l_loc = -1;
+ u32 pos;
+
+ for (pos = 0; pos < len; pos++) {
+
+ if (*(ptr1++) != *(ptr2++)) {
+
+ if (f_loc == -1) f_loc = pos;
+ l_loc = pos;
+
+ }
+
+ }
+
+ *first = f_loc;
+ *last = l_loc;
+
+ return;
+
+}
+
+static void generate_splice(
+ u8 *in_buf, size_t len, u8 *add_buf, size_t add_buf_size) {
+
+ if (likely(num_splice_bufs >= SPLICE_CYCLES))
+ return;
+
+ u8* new_buf = ck_alloc_nozero(add_buf_size);
+ memcpy(new_buf, add_buf, add_buf_size);
+
+ /* Find a suitable splicing location, somewhere between the first and
+ the last differing byte. Bail out if the difference is just a single
+ byte or so. */
+
+ s32 f_diff, l_diff;
+ locate_diffs(in_buf, new_buf, MIN(len, add_buf_size), &f_diff, &l_diff);
+
+ if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) {
+
+ ck_free(new_buf);
+ return;
+
+ }
+
+ /* Split somewhere between the first and last differing byte. */
+
+ u32 split_at = f_diff + UR(l_diff - f_diff);
+
+ /* Do the thing. */
+
+ splice_buf_sizes[num_splice_bufs] = add_buf_size;
+ memcpy(new_buf, in_buf, split_at);
+ splice_bufs[num_splice_bufs++] = new_buf;
+
+}
+
+void *afl_custom_init(void* p, unsigned int s) {
+ dev_urandom_fd = open("/dev/urandom", O_RDONLY);
+ if (dev_urandom_fd < 0) PFATAL("Unable to open /dev/urandom");
+ u8 *extras_dir = getenv("AFL_DICT"); // TODO: parse /proc/self/cmdline instead
+ if (extras_dir) load_extras(extras_dir);
+ splice_bufs = ck_alloc(SPLICE_CYCLES * sizeof(u8*));
+ splice_buf_sizes = ck_alloc_nozero(SPLICE_CYCLES * sizeof(u32));
+ const char* s_cycle_time = getenv("AFLRUN_CYCLE_TIME");
+ cycle_time = (s_cycle_time == NULL) ? 600 : strtoull(s_cycle_time, NULL, 10);
+ if (cycle_time == 0) cycle_time = 600;
+ init_time = get_cur_time();
+ no_splicing = getenv("NO_SPLICING") != NULL;
+ return (void*)1;
+}
+
+void afl_custom_deinit(void* p) {
+ close(dev_urandom_fd);
+}
+
+// Prepare all splice input buffers in this function
+u32 afl_custom_fuzz_count(
+ void *data, const u8 *in_buf, size_t len, u32 saved_max) {
+
+ for (u32 i = 0; i < num_splice_bufs; ++i)
+ ck_free(splice_bufs[i]);
+
+ num_splice_bufs = 0;
+
+ // AFLRun will ignore this anyway
+ return saved_max;
+
+}
+
+size_t afl_custom_fuzz(void *data, u8 *buf, size_t buf_size, u8 **out_buf,
+ u8 *add_buf, size_t add_buf_size, size_t max_size) {
+
+ u8* input_buf; s32 temp_len;
+
+ if (!no_splicing)
+ generate_splice(buf, buf_size, add_buf, add_buf_size);
+
+ // Execute HAVOC and SPLICE interleavingly, with same expected ratio as AFL.
+ // The bias exists but is negligible.
+ if (buf_size <= 1 || num_splice_bufs == 0 ||
+ UR(HAVOC_CYCLES + SPLICE_HAVOC * SPLICE_CYCLES) < HAVOC_CYCLES) {
+ // HAVOC
+
+ input_buf = buf;
+ temp_len = buf_size;
+
+ } else {
+
+ u32 idx = UR(num_splice_bufs);
+ input_buf = splice_bufs[idx];
+ temp_len = splice_buf_sizes[idx];
+
+ }
+
+ fuzz_buf = ck_realloc(fuzz_buf, temp_len);
+ memcpy(fuzz_buf, input_buf, temp_len);
+ afl_havoc(&fuzz_buf, &temp_len, max_size);
+ *out_buf = fuzz_buf;
+ return temp_len;
+
+} \ No newline at end of file
diff --git a/custom_mutators/afl/alloc-inl.h b/custom_mutators/afl/alloc-inl.h
new file mode 100644
index 00000000..d3c125fb
--- /dev/null
+++ b/custom_mutators/afl/alloc-inl.h
@@ -0,0 +1,570 @@
+/*
+ american fuzzy lop - error-checking, memory-zeroing alloc routines
+ ------------------------------------------------------------------
+
+ Written and maintained by Michal Zalewski <lcamtuf@google.com>
+
+ Copyright 2013, 2014, 2015 Google Inc. All rights reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at:
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ This allocator is not designed to resist malicious attackers (the canaries
+ are small and predictable), but provides a robust and portable way to detect
+ use-after-free, off-by-one writes, stale pointers, and so on.
+
+ */
+
+#ifndef _HAVE_ALLOC_INL_H
+#define _HAVE_ALLOC_INL_H
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "config.h"
+#include "types.h"
+#include "debug.h"
+
+/* User-facing macro to sprintf() to a dynamically allocated buffer. */
+
+#define alloc_printf(_str...) ({ \
+ u8* _tmp; \
+ s32 _len = snprintf(NULL, 0, _str); \
+ if (_len < 0) FATAL("Whoa, snprintf() fails?!"); \
+ _tmp = ck_alloc(_len + 1); \
+ snprintf((char*)_tmp, _len + 1, _str); \
+ _tmp; \
+ })
+
+/* Macro to enforce allocation limits as a last-resort defense against
+ integer overflows. */
+
+#define ALLOC_CHECK_SIZE(_s) do { \
+ if ((_s) > MAX_ALLOC) \
+ ABORT("Bad alloc request: %u bytes", (_s)); \
+ } while (0)
+
+/* Macro to check malloc() failures and the like. */
+
+#define ALLOC_CHECK_RESULT(_r, _s) do { \
+ if (!(_r)) \
+ ABORT("Out of memory: can't allocate %u bytes", (_s)); \
+ } while (0)
+
+/* Magic tokens used to mark used / freed chunks. */
+
+#define ALLOC_MAGIC_C1 0xFF00FF00 /* Used head (dword) */
+#define ALLOC_MAGIC_F 0xFE00FE00 /* Freed head (dword) */
+#define ALLOC_MAGIC_C2 0xF0 /* Used tail (byte) */
+
+/* Positions of guard tokens in relation to the user-visible pointer. */
+
+#define ALLOC_C1(_ptr) (((u32*)(_ptr))[-2])
+#define ALLOC_S(_ptr) (((u32*)(_ptr))[-1])
+#define ALLOC_C2(_ptr) (((u8*)(_ptr))[ALLOC_S(_ptr)])
+
+#define ALLOC_OFF_HEAD 8
+#define ALLOC_OFF_TOTAL (ALLOC_OFF_HEAD + 1)
+
+/* Allocator increments for ck_realloc_block(). */
+
+#define ALLOC_BLK_INC 256
+
+/* Sanity-checking macros for pointers. */
+
+#define CHECK_PTR(_p) do { \
+ if (_p) { \
+ if (ALLOC_C1(_p) ^ ALLOC_MAGIC_C1) {\
+ if (ALLOC_C1(_p) == ALLOC_MAGIC_F) \
+ ABORT("Use after free."); \
+ else ABORT("Corrupted head alloc canary."); \
+ } \
+ if (ALLOC_C2(_p) ^ ALLOC_MAGIC_C2) \
+ ABORT("Corrupted tail alloc canary."); \
+ } \
+ } while (0)
+
+#define CHECK_PTR_EXPR(_p) ({ \
+ typeof (_p) _tmp = (_p); \
+ CHECK_PTR(_tmp); \
+ _tmp; \
+ })
+
+
+/* Allocate a buffer, explicitly not zeroing it. Returns NULL for zero-sized
+ requests. */
+
+static inline void* DFL_ck_alloc_nozero(u32 size) {
+
+ void* ret;
+
+ if (!size) return NULL;
+
+ ALLOC_CHECK_SIZE(size);
+ ret = malloc(size + ALLOC_OFF_TOTAL);
+ ALLOC_CHECK_RESULT(ret, size);
+
+ ret += ALLOC_OFF_HEAD;
+
+ ALLOC_C1(ret) = ALLOC_MAGIC_C1;
+ ALLOC_S(ret) = size;
+ ALLOC_C2(ret) = ALLOC_MAGIC_C2;
+
+ return ret;
+
+}
+
+
+/* Allocate a buffer, returning zeroed memory. */
+
+static inline void* DFL_ck_alloc(u32 size) {
+
+ void* mem;
+
+ if (!size) return NULL;
+ mem = DFL_ck_alloc_nozero(size);
+
+ return memset(mem, 0, size);
+
+}
+
+
+/* Free memory, checking for double free and corrupted heap. When DEBUG_BUILD
+ is set, the old memory will be also clobbered with 0xFF. */
+
+static inline void DFL_ck_free(void* mem) {
+
+ if (!mem) return;
+
+ CHECK_PTR(mem);
+
+#ifdef DEBUG_BUILD
+
+ /* Catch pointer issues sooner. */
+ memset(mem, 0xFF, ALLOC_S(mem));
+
+#endif /* DEBUG_BUILD */
+
+ ALLOC_C1(mem) = ALLOC_MAGIC_F;
+
+ free(mem - ALLOC_OFF_HEAD);
+
+}
+
+
+/* Re-allocate a buffer, checking for issues and zeroing any newly-added tail.
+ With DEBUG_BUILD, the buffer is always reallocated to a new addresses and the
+ old memory is clobbered with 0xFF. */
+
+static inline void* DFL_ck_realloc(void* orig, u32 size) {
+
+ void* ret;
+ u32 old_size = 0;
+
+ if (!size) {
+
+ DFL_ck_free(orig);
+ return NULL;
+
+ }
+
+ if (orig) {
+
+ CHECK_PTR(orig);
+
+#ifndef DEBUG_BUILD
+ ALLOC_C1(orig) = ALLOC_MAGIC_F;
+#endif /* !DEBUG_BUILD */
+
+ old_size = ALLOC_S(orig);
+ orig -= ALLOC_OFF_HEAD;
+
+ ALLOC_CHECK_SIZE(old_size);
+
+ }
+
+ ALLOC_CHECK_SIZE(size);
+
+#ifndef DEBUG_BUILD
+
+ ret = realloc(orig, size + ALLOC_OFF_TOTAL);
+ ALLOC_CHECK_RESULT(ret, size);
+
+#else
+
+ /* Catch pointer issues sooner: force relocation and make sure that the
+ original buffer is wiped. */
+
+ ret = malloc(size + ALLOC_OFF_TOTAL);
+ ALLOC_CHECK_RESULT(ret, size);
+
+ if (orig) {
+
+ memcpy(ret + ALLOC_OFF_HEAD, orig + ALLOC_OFF_HEAD, MIN(size, old_size));
+ memset(orig + ALLOC_OFF_HEAD, 0xFF, old_size);
+
+ ALLOC_C1(orig + ALLOC_OFF_HEAD) = ALLOC_MAGIC_F;
+
+ free(orig);
+
+ }
+
+#endif /* ^!DEBUG_BUILD */
+
+ ret += ALLOC_OFF_HEAD;
+
+ ALLOC_C1(ret) = ALLOC_MAGIC_C1;
+ ALLOC_S(ret) = size;
+ ALLOC_C2(ret) = ALLOC_MAGIC_C2;
+
+ if (size > old_size)
+ memset(ret + old_size, 0, size - old_size);
+
+ return ret;
+
+}
+
+
+/* Re-allocate a buffer with ALLOC_BLK_INC increments (used to speed up
+ repeated small reallocs without complicating the user code). */
+
+static inline void* DFL_ck_realloc_block(void* orig, u32 size) {
+
+#ifndef DEBUG_BUILD
+
+ if (orig) {
+
+ CHECK_PTR(orig);
+
+ if (ALLOC_S(orig) >= size) return orig;
+
+ size += ALLOC_BLK_INC;
+
+ }
+
+#endif /* !DEBUG_BUILD */
+
+ return DFL_ck_realloc(orig, size);
+
+}
+
+
+/* Create a buffer with a copy of a string. Returns NULL for NULL inputs. */
+
+static inline u8* DFL_ck_strdup(u8* str) {
+
+ void* ret;
+ u32 size;
+
+ if (!str) return NULL;
+
+ size = strlen((char*)str) + 1;
+
+ ALLOC_CHECK_SIZE(size);
+ ret = malloc(size + ALLOC_OFF_TOTAL);
+ ALLOC_CHECK_RESULT(ret, size);
+
+ ret += ALLOC_OFF_HEAD;
+
+ ALLOC_C1(ret) = ALLOC_MAGIC_C1;
+ ALLOC_S(ret) = size;
+ ALLOC_C2(ret) = ALLOC_MAGIC_C2;
+
+ return memcpy(ret, str, size);
+
+}
+
+
+/* Create a buffer with a copy of a memory block. Returns NULL for zero-sized
+ or NULL inputs. */
+
+static inline void* DFL_ck_memdup(void* mem, u32 size) {
+
+ void* ret;
+
+ if (!mem || !size) return NULL;
+
+ ALLOC_CHECK_SIZE(size);
+ ret = malloc(size + ALLOC_OFF_TOTAL);
+ ALLOC_CHECK_RESULT(ret, size);
+
+ ret += ALLOC_OFF_HEAD;
+
+ ALLOC_C1(ret) = ALLOC_MAGIC_C1;
+ ALLOC_S(ret) = size;
+ ALLOC_C2(ret) = ALLOC_MAGIC_C2;
+
+ return memcpy(ret, mem, size);
+
+}
+
+
+/* Create a buffer with a block of text, appending a NUL terminator at the end.
+ Returns NULL for zero-sized or NULL inputs. */
+
+static inline u8* DFL_ck_memdup_str(u8* mem, u32 size) {
+
+ u8* ret;
+
+ if (!mem || !size) return NULL;
+
+ ALLOC_CHECK_SIZE(size);
+ ret = malloc(size + ALLOC_OFF_TOTAL + 1);
+ ALLOC_CHECK_RESULT(ret, size);
+
+ ret += ALLOC_OFF_HEAD;
+
+ ALLOC_C1(ret) = ALLOC_MAGIC_C1;
+ ALLOC_S(ret) = size;
+ ALLOC_C2(ret) = ALLOC_MAGIC_C2;
+
+ memcpy(ret, mem, size);
+ ret[size] = 0;
+
+ return ret;
+
+}
+
+
+#ifndef DEBUG_BUILD
+
+/* In non-debug mode, we just do straightforward aliasing of the above functions
+ to user-visible names such as ck_alloc(). */
+
+#define ck_alloc DFL_ck_alloc
+#define ck_alloc_nozero DFL_ck_alloc_nozero
+#define ck_realloc DFL_ck_realloc
+#define ck_realloc_block DFL_ck_realloc_block
+#define ck_strdup DFL_ck_strdup
+#define ck_memdup DFL_ck_memdup
+#define ck_memdup_str DFL_ck_memdup_str
+#define ck_free DFL_ck_free
+
+#define alloc_report()
+
+#else
+
+/* In debugging mode, we also track allocations to detect memory leaks, and the
+ flow goes through one more layer of indirection. */
+
+/* Alloc tracking data structures: */
+
+#define ALLOC_BUCKETS 4096
+
+struct TRK_obj {
+ void *ptr;
+ char *file, *func;
+ u32 line;
+};
+
+#ifdef AFL_MAIN
+
+struct TRK_obj* TRK[ALLOC_BUCKETS];
+u32 TRK_cnt[ALLOC_BUCKETS];
+
+# define alloc_report() TRK_report()
+
+#else
+
+extern struct TRK_obj* TRK[ALLOC_BUCKETS];
+extern u32 TRK_cnt[ALLOC_BUCKETS];
+
+# define alloc_report()
+
+#endif /* ^AFL_MAIN */
+
+/* Bucket-assigning function for a given pointer: */
+
+#define TRKH(_ptr) (((((u32)(_ptr)) >> 16) ^ ((u32)(_ptr))) % ALLOC_BUCKETS)
+
+
+/* Add a new entry to the list of allocated objects. */
+
+static inline void TRK_alloc_buf(void* ptr, const char* file, const char* func,
+ u32 line) {
+
+ u32 i, bucket;
+
+ if (!ptr) return;
+
+ bucket = TRKH(ptr);
+
+ /* Find a free slot in the list of entries for that bucket. */
+
+ for (i = 0; i < TRK_cnt[bucket]; i++)
+
+ if (!TRK[bucket][i].ptr) {
+
+ TRK[bucket][i].ptr = ptr;
+ TRK[bucket][i].file = (char*)file;
+ TRK[bucket][i].func = (char*)func;
+ TRK[bucket][i].line = line;
+ return;
+
+ }
+
+ /* No space available - allocate more. */
+
+ TRK[bucket] = DFL_ck_realloc_block(TRK[bucket],
+ (TRK_cnt[bucket] + 1) * sizeof(struct TRK_obj));
+
+ TRK[bucket][i].ptr = ptr;
+ TRK[bucket][i].file = (char*)file;
+ TRK[bucket][i].func = (char*)func;
+ TRK[bucket][i].line = line;
+
+ TRK_cnt[bucket]++;
+
+}
+
+
+/* Remove entry from the list of allocated objects. */
+
+static inline void TRK_free_buf(void* ptr, const char* file, const char* func,
+ u32 line) {
+
+ u32 i, bucket;
+
+ if (!ptr) return;
+
+ bucket = TRKH(ptr);
+
+ /* Find the element on the list... */
+
+ for (i = 0; i < TRK_cnt[bucket]; i++)
+
+ if (TRK[bucket][i].ptr == ptr) {
+
+ TRK[bucket][i].ptr = 0;
+ return;
+
+ }
+
+ WARNF("ALLOC: Attempt to free non-allocated memory in %s (%s:%u)",
+ func, file, line);
+
+}
+
+
+/* Do a final report on all non-deallocated objects. */
+
+static inline void TRK_report(void) {
+
+ u32 i, bucket;
+
+ fflush(0);
+
+ for (bucket = 0; bucket < ALLOC_BUCKETS; bucket++)
+ for (i = 0; i < TRK_cnt[bucket]; i++)
+ if (TRK[bucket][i].ptr)
+ WARNF("ALLOC: Memory never freed, created in %s (%s:%u)",
+ TRK[bucket][i].func, TRK[bucket][i].file, TRK[bucket][i].line);
+
+}
+
+
+/* Simple wrappers for non-debugging functions: */
+
+static inline void* TRK_ck_alloc(u32 size, const char* file, const char* func,
+ u32 line) {
+
+ void* ret = DFL_ck_alloc(size);
+ TRK_alloc_buf(ret, file, func, line);
+ return ret;
+
+}
+
+
+static inline void* TRK_ck_realloc(void* orig, u32 size, const char* file,
+ const char* func, u32 line) {
+
+ void* ret = DFL_ck_realloc(orig, size);
+ TRK_free_buf(orig, file, func, line);
+ TRK_alloc_buf(ret, file, func, line);
+ return ret;
+
+}
+
+
+static inline void* TRK_ck_realloc_block(void* orig, u32 size, const char* file,
+ const char* func, u32 line) {
+
+ void* ret = DFL_ck_realloc_block(orig, size);
+ TRK_free_buf(orig, file, func, line);
+ TRK_alloc_buf(ret, file, func, line);
+ return ret;
+
+}
+
+
+static inline void* TRK_ck_strdup(u8* str, const char* file, const char* func,
+ u32 line) {
+
+ void* ret = DFL_ck_strdup(str);
+ TRK_alloc_buf(ret, file, func, line);
+ return ret;
+
+}
+
+
+static inline void* TRK_ck_memdup(void* mem, u32 size, const char* file,
+ const char* func, u32 line) {
+
+ void* ret = DFL_ck_memdup(mem, size);
+ TRK_alloc_buf(ret, file, func, line);
+ return ret;
+
+}
+
+
+static inline void* TRK_ck_memdup_str(void* mem, u32 size, const char* file,
+ const char* func, u32 line) {
+
+ void* ret = DFL_ck_memdup_str(mem, size);
+ TRK_alloc_buf(ret, file, func, line);
+ return ret;
+
+}
+
+
+static inline void TRK_ck_free(void* ptr, const char* file,
+ const char* func, u32 line) {
+
+ TRK_free_buf(ptr, file, func, line);
+ DFL_ck_free(ptr);
+
+}
+
+/* Aliasing user-facing names to tracking functions: */
+
+#define ck_alloc(_p1) \
+ TRK_ck_alloc(_p1, __FILE__, __FUNCTION__, __LINE__)
+
+#define ck_alloc_nozero(_p1) \
+ TRK_ck_alloc(_p1, __FILE__, __FUNCTION__, __LINE__)
+
+#define ck_realloc(_p1, _p2) \
+ TRK_ck_realloc(_p1, _p2, __FILE__, __FUNCTION__, __LINE__)
+
+#define ck_realloc_block(_p1, _p2) \
+ TRK_ck_realloc_block(_p1, _p2, __FILE__, __FUNCTION__, __LINE__)
+
+#define ck_strdup(_p1) \
+ TRK_ck_strdup(_p1, __FILE__, __FUNCTION__, __LINE__)
+
+#define ck_memdup(_p1, _p2) \
+ TRK_ck_memdup(_p1, _p2, __FILE__, __FUNCTION__, __LINE__)
+
+#define ck_memdup_str(_p1, _p2) \
+ TRK_ck_memdup_str(_p1, _p2, __FILE__, __FUNCTION__, __LINE__)
+
+#define ck_free(_p1) \
+ TRK_ck_free(_p1, __FILE__, __FUNCTION__, __LINE__)
+
+#endif /* ^!DEBUG_BUILD */
+
+#endif /* ! _HAVE_ALLOC_INL_H */
diff --git a/custom_mutators/afl/config.h b/custom_mutators/afl/config.h
new file mode 100644
index 00000000..c452db60
--- /dev/null
+++ b/custom_mutators/afl/config.h
@@ -0,0 +1,361 @@
+/*
+ american fuzzy lop - vaguely configurable bits
+ ----------------------------------------------
+
+ Written and maintained by Michal Zalewski <lcamtuf@google.com>
+
+ Copyright 2013, 2014, 2015, 2016 Google Inc. All rights reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at:
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ */
+
+#ifndef _HAVE_CONFIG_H
+#define _HAVE_CONFIG_H
+
+#include "types.h"
+
+/* Version string: */
+
+#define VERSION "2.52b"
+
+/******************************************************
+ * *
+ * Settings that may be of interest to power users: *
+ * *
+ ******************************************************/
+
+/* Comment out to disable terminal colors (note that this makes afl-analyze
+ a lot less nice): */
+
+#define USE_COLOR
+
+/* Comment out to disable fancy ANSI boxes and use poor man's 7-bit UI: */
+
+#define FANCY_BOXES
+
+/* Default timeout for fuzzed code (milliseconds). This is the upper bound,
+ also used for detecting hangs; the actual value is auto-scaled: */
+
+#define EXEC_TIMEOUT 1000
+
+/* Timeout rounding factor when auto-scaling (milliseconds): */
+
+#define EXEC_TM_ROUND 20
+
+/* Default memory limit for child process (MB): */
+
+#ifndef __x86_64__
+# define MEM_LIMIT 25
+#else
+# define MEM_LIMIT 50
+#endif /* ^!__x86_64__ */
+
+/* Default memory limit when running in QEMU mode (MB): */
+
+#define MEM_LIMIT_QEMU 200
+
+/* Number of calibration cycles per every new test case (and for test
+ cases that show variable behavior): */
+
+#define CAL_CYCLES 8
+#define CAL_CYCLES_LONG 40
+
+/* Number of subsequent timeouts before abandoning an input file: */
+
+#define TMOUT_LIMIT 250
+
+/* Maximum number of unique hangs or crashes to record: */
+
+#define KEEP_UNIQUE_HANG 500
+#define KEEP_UNIQUE_CRASH 5000
+
+/* Baseline number of random tweaks during a single 'havoc' stage: */
+
+#define HAVOC_CYCLES 256
+#define HAVOC_CYCLES_INIT 1024
+
+/* Maximum multiplier for the above (should be a power of two, beware
+ of 32-bit int overflows): */
+
+#define HAVOC_MAX_MULT 16
+
+/* Absolute minimum number of havoc cycles (after all adjustments): */
+
+#define HAVOC_MIN 16
+
+/* Maximum stacking for havoc-stage tweaks. The actual value is calculated
+ like this:
+
+ n = random between 1 and HAVOC_STACK_POW2
+ stacking = 2^n
+
+ In other words, the default (n = 7) produces 2, 4, 8, 16, 32, 64, or
+ 128 stacked tweaks: */
+
+#define HAVOC_STACK_POW2 7
+
+/* Caps on block sizes for cloning and deletion operations. Each of these
+ ranges has a 33% probability of getting picked, except for the first
+ two cycles where smaller blocks are favored: */
+
+#define HAVOC_BLK_SMALL 32
+#define HAVOC_BLK_MEDIUM 128
+#define HAVOC_BLK_LARGE 1500
+
+/* Extra-large blocks, selected very rarely (<5% of the time): */
+
+#define HAVOC_BLK_XL 32768
+
+/* Probabilities of skipping non-favored entries in the queue, expressed as
+ percentages: */
+
+#define SKIP_TO_NEW_PROB 99 /* ...when there are new, pending favorites */
+#define SKIP_NFAV_OLD_PROB 95 /* ...no new favs, cur entry already fuzzed */
+#define SKIP_NFAV_NEW_PROB 75 /* ...no new favs, cur entry not fuzzed yet */
+
+/* Splicing cycle count: */
+
+#define SPLICE_CYCLES 15
+
+/* Nominal per-splice havoc cycle length: */
+
+#define SPLICE_HAVOC 32
+
+/* Maximum offset for integer addition / subtraction stages: */
+
+#define ARITH_MAX 35
+
+/* Limits for the test case trimmer. The absolute minimum chunk size; and
+ the starting and ending divisors for chopping up the input file: */
+
+#define TRIM_MIN_BYTES 4
+#define TRIM_START_STEPS 16
+#define TRIM_END_STEPS 1024
+
+/* Maximum size of input file, in bytes (keep under 100MB): */
+
+#define MAX_FILE (1 * 1024 * 1024)
+
+/* The same, for the test case minimizer: */
+
+#define TMIN_MAX_FILE (10 * 1024 * 1024)
+
+/* Block normalization steps for afl-tmin: */
+
+#define TMIN_SET_MIN_SIZE 4
+#define TMIN_SET_STEPS 128
+
+/* Maximum dictionary token size (-x), in bytes: */
+
+#define MAX_DICT_FILE 128
+
+/* Length limits for auto-detected dictionary tokens: */
+
+#define MIN_AUTO_EXTRA 3
+#define MAX_AUTO_EXTRA 32
+
+/* Maximum number of user-specified dictionary tokens to use in deterministic
+ steps; past this point, the "extras/user" step will be still carried out,
+ but with proportionally lower odds: */
+
+#define MAX_DET_EXTRAS 200
+
+/* Maximum number of auto-extracted dictionary tokens to actually use in fuzzing
+ (first value), and to keep in memory as candidates. The latter should be much
+ higher than the former. */
+
+#define USE_AUTO_EXTRAS 50
+#define MAX_AUTO_EXTRAS (USE_AUTO_EXTRAS * 10)
+
+/* Scaling factor for the effector map used to skip some of the more
+ expensive deterministic steps. The actual divisor is set to
+ 2^EFF_MAP_SCALE2 bytes: */
+
+#define EFF_MAP_SCALE2 3
+
+/* Minimum input file length at which the effector logic kicks in: */
+
+#define EFF_MIN_LEN 128
+
+/* Maximum effector density past which everything is just fuzzed
+ unconditionally (%): */
+
+#define EFF_MAX_PERC 90
+
+/* UI refresh frequency (Hz): */
+
+#define UI_TARGET_HZ 5
+
+/* Fuzzer stats file and plot update intervals (sec): */
+
+#define STATS_UPDATE_SEC 60
+#define PLOT_UPDATE_SEC 5
+
+/* Smoothing divisor for CPU load and exec speed stats (1 - no smoothing). */
+
+#define AVG_SMOOTHING 16
+
+/* Sync interval (every n havoc cycles): */
+
+#define SYNC_INTERVAL 5
+
+/* Output directory reuse grace period (minutes): */
+
+#define OUTPUT_GRACE 25
+
+/* Uncomment to use simple file names (id_NNNNNN): */
+
+// #define SIMPLE_FILES
+
+/* List of interesting values to use in fuzzing. */
+
+#define INTERESTING_8 \
+ -128, /* Overflow signed 8-bit when decremented */ \
+ -1, /* */ \
+ 0, /* */ \
+ 1, /* */ \
+ 16, /* One-off with common buffer size */ \
+ 32, /* One-off with common buffer size */ \
+ 64, /* One-off with common buffer size */ \
+ 100, /* One-off with common buffer size */ \
+ 127 /* Overflow signed 8-bit when incremented */
+
+#define INTERESTING_16 \
+ -32768, /* Overflow signed 16-bit when decremented */ \
+ -129, /* Overflow signed 8-bit */ \
+ 128, /* Overflow signed 8-bit */ \
+ 255, /* Overflow unsig 8-bit when incremented */ \
+ 256, /* Overflow unsig 8-bit */ \
+ 512, /* One-off with common buffer size */ \
+ 1000, /* One-off with common buffer size */ \
+ 1024, /* One-off with common buffer size */ \
+ 4096, /* One-off with common buffer size */ \
+ 32767 /* Overflow signed 16-bit when incremented */
+
+#define INTERESTING_32 \
+ -2147483648LL, /* Overflow signed 32-bit when decremented */ \
+ -100663046, /* Large negative number (endian-agnostic) */ \
+ -32769, /* Overflow signed 16-bit */ \
+ 32768, /* Overflow signed 16-bit */ \
+ 65535, /* Overflow unsig 16-bit when incremented */ \
+ 65536, /* Overflow unsig 16 bit */ \
+ 100663045, /* Large positive number (endian-agnostic) */ \
+ 2147483647 /* Overflow signed 32-bit when incremented */
+
+/***********************************************************
+ * *
+ * Really exotic stuff you probably don't want to touch: *
+ * *
+ ***********************************************************/
+
+/* Call count interval between reseeding the libc PRNG from /dev/urandom: */
+
+#define RESEED_RNG 10000
+
+/* Maximum line length passed from GCC to 'as' and used for parsing
+ configuration files: */
+
+#define MAX_LINE 8192
+
+/* Environment variable used to pass SHM ID to the called program. */
+
+#define SHM_ENV_VAR "__AFL_SHM_ID"
+
+/* Other less interesting, internal-only variables. */
+
+#define CLANG_ENV_VAR "__AFL_CLANG_MODE"
+#define AS_LOOP_ENV_VAR "__AFL_AS_LOOPCHECK"
+#define PERSIST_ENV_VAR "__AFL_PERSISTENT"
+#define DEFER_ENV_VAR "__AFL_DEFER_FORKSRV"
+
+/* In-code signatures for deferred and persistent mode. */
+
+#define PERSIST_SIG "##SIG_AFL_PERSISTENT##"
+#define DEFER_SIG "##SIG_AFL_DEFER_FORKSRV##"
+
+/* Distinctive bitmap signature used to indicate failed execution: */
+
+#define EXEC_FAIL_SIG 0xfee1dead
+
+/* Distinctive exit code used to indicate MSAN trip condition: */
+
+#define MSAN_ERROR 86
+
+/* Designated file descriptors for forkserver commands (the application will
+ use FORKSRV_FD and FORKSRV_FD + 1): */
+
+#define FORKSRV_FD 198
+
+/* Fork server init timeout multiplier: we'll wait the user-selected
+ timeout plus this much for the fork server to spin up. */
+
+#define FORK_WAIT_MULT 10
+
+/* Calibration timeout adjustments, to be a bit more generous when resuming
+ fuzzing sessions or trying to calibrate already-added internal finds.
+ The first value is a percentage, the other is in milliseconds: */
+
+#define CAL_TMOUT_PERC 125
+#define CAL_TMOUT_ADD 50
+
+/* Number of chances to calibrate a case before giving up: */
+
+#define CAL_CHANCES 3
+
+/* Map size for the traced binary (2^MAP_SIZE_POW2). Must be greater than
+ 2; you probably want to keep it under 18 or so for performance reasons
+ (adjusting AFL_INST_RATIO when compiling is probably a better way to solve
+ problems with complex programs). You need to recompile the target binary
+ after changing this - otherwise, SEGVs may ensue. */
+
+#define MAP_SIZE_POW2 16
+#define MAP_SIZE (1 << MAP_SIZE_POW2)
+
+/* Maximum allocator request size (keep well under INT_MAX): */
+
+#define MAX_ALLOC 0x40000000
+
+/* A made-up hashing seed: */
+
+#define HASH_CONST 0xa5b35705
+
+/* Constants for afl-gotcpu to control busy loop timing: */
+
+#define CTEST_TARGET_MS 5000
+#define CTEST_CORE_TRG_MS 1000
+#define CTEST_BUSY_CYCLES (10 * 1000 * 1000)
+
+/* Uncomment this to use inferior block-coverage-based instrumentation. Note
+ that you need to recompile the target binary for this to have any effect: */
+
+// #define COVERAGE_ONLY
+
+/* Uncomment this to ignore hit counts and output just one bit per tuple.
+ As with the previous setting, you will need to recompile the target
+ binary: */
+
+// #define SKIP_COUNTS
+
+/* Uncomment this to use instrumentation data to record newly discovered paths,
+ but do not use them as seeds for fuzzing. This is useful for conveniently
+ measuring coverage that could be attained by a "dumb" fuzzing algorithm: */
+
+// #define IGNORE_FINDS
+
+#define MAX_FACTOR 32
+
+/* This enables tracing of the executed CG and CFG edges. In some cases,
+ the CG and CFGs are that LLVM produces are incomplete due to register-
+ indirect jumps or calls. To add edges use script/add_edges.py. Don't
+ forget to set environment variable AFLGO_PROFILER_FILE.
+
+ $ export AFLGO_PROFILER_FILE=<your-file> */
+
+//#define AFLGO_TRACING
+
+#endif /* ! _HAVE_CONFIG_H */
diff --git a/custom_mutators/afl/debug.h b/custom_mutators/afl/debug.h
new file mode 100644
index 00000000..a943a573
--- /dev/null
+++ b/custom_mutators/afl/debug.h
@@ -0,0 +1,251 @@
+/*
+ american fuzzy lop - debug / error handling macros
+ --------------------------------------------------
+
+ Written and maintained by Michal Zalewski <lcamtuf@google.com>
+
+ Copyright 2013, 2014, 2015, 2016 Google Inc. All rights reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at:
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ */
+
+#ifndef _HAVE_DEBUG_H
+#define _HAVE_DEBUG_H
+
+#include <errno.h>
+
+#include "types.h"
+#include "config.h"
+
+/*******************
+ * Terminal colors *
+ *******************/
+
+#ifdef USE_COLOR
+
+# define cBLK "\x1b[0;30m"
+# define cRED "\x1b[0;31m"
+# define cGRN "\x1b[0;32m"
+# define cBRN "\x1b[0;33m"
+# define cBLU "\x1b[0;34m"
+# define cMGN "\x1b[0;35m"
+# define cCYA "\x1b[0;36m"
+# define cLGR "\x1b[0;37m"
+# define cGRA "\x1b[1;90m"
+# define cLRD "\x1b[1;91m"
+# define cLGN "\x1b[1;92m"
+# define cYEL "\x1b[1;93m"
+# define cLBL "\x1b[1;94m"
+# define cPIN "\x1b[1;95m"
+# define cLCY "\x1b[1;96m"
+# define cBRI "\x1b[1;97m"
+# define cRST "\x1b[0m"
+
+# define bgBLK "\x1b[40m"
+# define bgRED "\x1b[41m"
+# define bgGRN "\x1b[42m"
+# define bgBRN "\x1b[43m"
+# define bgBLU "\x1b[44m"
+# define bgMGN "\x1b[45m"
+# define bgCYA "\x1b[46m"
+# define bgLGR "\x1b[47m"
+# define bgGRA "\x1b[100m"
+# define bgLRD "\x1b[101m"
+# define bgLGN "\x1b[102m"
+# define bgYEL "\x1b[103m"
+# define bgLBL "\x1b[104m"
+# define bgPIN "\x1b[105m"
+# define bgLCY "\x1b[106m"
+# define bgBRI "\x1b[107m"
+
+#else
+
+# define cBLK ""
+# define cRED ""
+# define cGRN ""
+# define cBRN ""
+# define cBLU ""
+# define cMGN ""
+# define cCYA ""
+# define cLGR ""
+# define cGRA ""
+# define cLRD ""
+# define cLGN ""
+# define cYEL ""
+# define cLBL ""
+# define cPIN ""
+# define cLCY ""
+# define cBRI ""
+# define cRST ""
+
+# define bgBLK ""
+# define bgRED ""
+# define bgGRN ""
+# define bgBRN ""
+# define bgBLU ""
+# define bgMGN ""
+# define bgCYA ""
+# define bgLGR ""
+# define bgGRA ""
+# define bgLRD ""
+# define bgLGN ""
+# define bgYEL ""
+# define bgLBL ""
+# define bgPIN ""
+# define bgLCY ""
+# define bgBRI ""
+
+#endif /* ^USE_COLOR */
+
+/*************************
+ * Box drawing sequences *
+ *************************/
+
+#ifdef FANCY_BOXES
+
+# define SET_G1 "\x1b)0" /* Set G1 for box drawing */
+# define RESET_G1 "\x1b)B" /* Reset G1 to ASCII */
+# define bSTART "\x0e" /* Enter G1 drawing mode */
+# define bSTOP "\x0f" /* Leave G1 drawing mode */
+# define bH "q" /* Horizontal line */
+# define bV "x" /* Vertical line */
+# define bLT "l" /* Left top corner */
+# define bRT "k" /* Right top corner */
+# define bLB "m" /* Left bottom corner */
+# define bRB "j" /* Right bottom corner */
+# define bX "n" /* Cross */
+# define bVR "t" /* Vertical, branch right */
+# define bVL "u" /* Vertical, branch left */
+# define bHT "v" /* Horizontal, branch top */
+# define bHB "w" /* Horizontal, branch bottom */
+
+#else
+
+# define SET_G1 ""
+# define RESET_G1 ""
+# define bSTART ""
+# define bSTOP ""
+# define bH "-"
+# define bV "|"
+# define bLT "+"
+# define bRT "+"
+# define bLB "+"
+# define bRB "+"
+# define bX "+"
+# define bVR "+"
+# define bVL "+"
+# define bHT "+"
+# define bHB "+"
+
+#endif /* ^FANCY_BOXES */
+
+/***********************
+ * Misc terminal codes *
+ ***********************/
+
+#define TERM_HOME "\x1b[H"
+#define TERM_CLEAR TERM_HOME "\x1b[2J"
+#define cEOL "\x1b[0K"
+#define CURSOR_HIDE "\x1b[?25l"
+#define CURSOR_SHOW "\x1b[?25h"
+
+/************************
+ * Debug & error macros *
+ ************************/
+
+/* Just print stuff to the appropriate stream. */
+
+#ifdef MESSAGES_TO_STDOUT
+# define SAYF(x...) printf(x)
+#else
+# define SAYF(x...) fprintf(stderr, x)
+#endif /* ^MESSAGES_TO_STDOUT */
+
+/* Show a prefixed warning. */
+
+#define WARNF(x...) do { \
+ SAYF(cYEL "[!] " cBRI "WARNING: " cRST x); \
+ SAYF(cRST "\n"); \
+ } while (0)
+
+/* Show a prefixed "doing something" message. */
+
+#define ACTF(x...) do { \
+ SAYF(cLBL "[*] " cRST x); \
+ SAYF(cRST "\n"); \
+ } while (0)
+
+/* Show a prefixed "success" message. */
+
+#define OKF(x...) do { \
+ SAYF(cLGN "[+] " cRST x); \
+ SAYF(cRST "\n"); \
+ } while (0)
+
+/* Show a prefixed fatal error message (not used in afl). */
+
+#define BADF(x...) do { \
+ SAYF(cLRD "\n[-] " cRST x); \
+ SAYF(cRST "\n"); \
+ } while (0)
+
+/* Die with a verbose non-OS fatal error message. */
+
+#define FATAL(x...) do { \
+ SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD "\n[-] PROGRAM ABORT : " \
+ cBRI x); \
+ SAYF(cLRD "\n Location : " cRST "%s(), %s:%u\n\n", \
+ __FUNCTION__, __FILE__, __LINE__); \
+ exit(1); \
+ } while (0)
+
+/* Die by calling abort() to provide a core dump. */
+
+#define ABORT(x...) do { \
+ SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD "\n[-] PROGRAM ABORT : " \
+ cBRI x); \
+ SAYF(cLRD "\n Stop location : " cRST "%s(), %s:%u\n\n", \
+ __FUNCTION__, __FILE__, __LINE__); \
+ abort(); \
+ } while (0)
+
+/* Die while also including the output of perror(). */
+
+#define PFATAL(x...) do { \
+ fflush(stdout); \
+ SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD "\n[-] SYSTEM ERROR : " \
+ cBRI x); \
+ SAYF(cLRD "\n Stop location : " cRST "%s(), %s:%u\n", \
+ __FUNCTION__, __FILE__, __LINE__); \
+ SAYF(cLRD " OS message : " cRST "%s\n", strerror(errno)); \
+ exit(1); \
+ } while (0)
+
+/* Die with FAULT() or PFAULT() depending on the value of res (used to
+ interpret different failure modes for read(), write(), etc). */
+
+#define RPFATAL(res, x...) do { \
+ if (res < 0) PFATAL(x); else FATAL(x); \
+ } while (0)
+
+/* Error-checking versions of read() and write() that call RPFATAL() as
+ appropriate. */
+
+#define ck_write(fd, buf, len, fn) do { \
+ u32 _len = (len); \
+ s32 _res = write(fd, buf, _len); \
+ if (_res != _len) RPFATAL(_res, "Short write to %s", fn); \
+ } while (0)
+
+#define ck_read(fd, buf, len, fn) do { \
+ u32 _len = (len); \
+ s32 _res = read(fd, buf, _len); \
+ if (_res != _len) RPFATAL(_res, "Short read from %s", fn); \
+ } while (0)
+
+#endif /* ! _HAVE_DEBUG_H */
diff --git a/custom_mutators/afl/havoc.c b/custom_mutators/afl/havoc.c
new file mode 100644
index 00000000..c95e805b
--- /dev/null
+++ b/custom_mutators/afl/havoc.c
@@ -0,0 +1,89 @@
+#include "types.h"
+#include "config.h"
+#include "debug.h"
+#include "alloc-inl.h"
+
+#include <stdio.h>
+#include <stdbool.h>
+
+void *afl_custom_init(void* p, unsigned int s);
+void afl_custom_deinit(void* p);
+size_t afl_custom_fuzz(void *data, u8 *buf, size_t buf_size, u8 **out_buf,
+ u8 *add_buf, size_t add_buf_size, size_t max_size);
+
+u8* buf; long size;
+bool read_seed(const char* file)
+{
+ FILE* f = fopen(file, "rb");
+ if (f == NULL)
+ {
+ perror("fopen failed");
+ return false;
+ }
+ int r = fseek(f, 0, SEEK_END);
+ if (r != 0)
+ {
+ perror("fseek failed");
+ return false;
+ }
+ size = ftell(f);
+ if (size < 0)
+ {
+ perror("ftell failed");
+ return false;
+ }
+ r = fseek(f, 0, SEEK_SET);
+ if (r != 0)
+ {
+ perror("fseek failed");
+ return false;
+ }
+ buf = malloc(size);
+ if (fread(buf, 1, size, f) != size)
+ {
+ perror("fread failed");
+ return false;
+ }
+ fclose(f);
+ return true;
+}
+
+int main(int argc, char const *argv[])
+{
+ if (argc < 4)
+ {
+ fprintf(stderr, "Usage: havoc seed times outdir [-x dict]\n");
+ return 1;
+ }
+ afl_custom_init(NULL, 0);
+ if (!read_seed(argv[1]))
+ return 1;
+ size_t times = strtoul(argv[2], NULL, 10);
+ for (size_t i = 0; i < times; ++i)
+ {
+ u8* out_buf;
+ size_t out_len = afl_custom_fuzz(
+ NULL, buf, size, &out_buf, buf, size, MAX_FILE);
+ u8* out_file = alloc_printf("%s/id:%.6lu.bin", argv[3], i);
+
+ FILE* f = fopen(out_file, "wb");
+ if (f == NULL)
+ {
+ perror("fopen failed");
+ return 1;
+ }
+ if (fwrite(out_buf, 1, out_len, f) != out_len)
+ {
+ perror("fwrite failed");
+ return 1;
+ }
+ if (fclose(f))
+ {
+ perror("fclose failed");
+ return 1;
+ }
+ ck_free(out_file);
+ }
+ afl_custom_deinit(NULL);
+ return 0;
+} \ No newline at end of file
diff --git a/custom_mutators/afl/types.h b/custom_mutators/afl/types.h
new file mode 100644
index 00000000..784d3a7a
--- /dev/null
+++ b/custom_mutators/afl/types.h
@@ -0,0 +1,86 @@
+/*
+ american fuzzy lop - type definitions and minor macros
+ ------------------------------------------------------
+
+ Written and maintained by Michal Zalewski <lcamtuf@google.com>
+
+ Copyright 2013, 2014, 2015 Google Inc. All rights reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at:
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ */
+
+#ifndef _HAVE_TYPES_H
+#define _HAVE_TYPES_H
+
+#include <stdint.h>
+#include <stdlib.h>
+
+typedef uint8_t u8;
+typedef uint16_t u16;
+typedef uint32_t u32;
+
+/*
+
+ Ugh. There is an unintended compiler / glibc #include glitch caused by
+ combining the u64 type an %llu in format strings, necessitating a workaround.
+
+ In essence, the compiler is always looking for 'unsigned long long' for %llu.
+ On 32-bit systems, the u64 type (aliased to uint64_t) is expanded to
+ 'unsigned long long' in <bits/types.h>, so everything checks out.
+
+ But on 64-bit systems, it is #ifdef'ed in the same file as 'unsigned long'.
+ Now, it only happens in circumstances where the type happens to have the
+ expected bit width, *but* the compiler does not know that... and complains
+ about 'unsigned long' being unsafe to pass to %llu.
+
+ */
+
+#ifdef __x86_64__
+typedef unsigned long long u64;
+#else
+typedef uint64_t u64;
+#endif /* ^__x86_64__ */
+
+typedef int8_t s8;
+typedef int16_t s16;
+typedef int32_t s32;
+typedef int64_t s64;
+
+#ifndef MIN
+# define MIN(_a,_b) ((_a) > (_b) ? (_b) : (_a))
+# define MAX(_a,_b) ((_a) > (_b) ? (_a) : (_b))
+#endif /* !MIN */
+
+#define SWAP16(_x) ({ \
+ u16 _ret = (_x); \
+ (u16)((_ret << 8) | (_ret >> 8)); \
+ })
+
+#define SWAP32(_x) ({ \
+ u32 _ret = (_x); \
+ (u32)((_ret << 24) | (_ret >> 24) | \
+ ((_ret << 8) & 0x00FF0000) | \
+ ((_ret >> 8) & 0x0000FF00)); \
+ })
+
+#ifdef AFL_LLVM_PASS
+# define AFL_R(x) (random() % (x))
+#else
+# define R(x) (random() % (x))
+#endif /* ^AFL_LLVM_PASS */
+
+#define STRINGIFY_INTERNAL(x) #x
+#define STRINGIFY(x) STRINGIFY_INTERNAL(x)
+
+#define MEM_BARRIER() \
+ asm volatile("" ::: "memory")
+
+#define likely(_x) __builtin_expect(!!(_x), 1)
+#define unlikely(_x) __builtin_expect(!!(_x), 0)
+
+#endif /* ! _HAVE_TYPES_H */