about summary refs log tree commit diff
diff options
context:
space:
mode:
authorDominik Maier <domenukk@gmail.com>2020-03-09 11:24:10 +0100
committerGitHub <noreply@github.com>2020-03-09 11:24:10 +0100
commitdba3595c0ae26795a78753ea33ff0c3edf9d6328 (patch)
tree03bcaf132705d6de222ef8d6eff9b5bc2f03ce54
parentc159b872ef17d4c09238f99ac11021e12975cb3a (diff)
downloadafl++-dba3595c0ae26795a78753ea33ff0c3edf9d6328.tar.gz
AFL without globals (#220)
* moved globals to afl, shm and fsrv 

* moved argv to afl state, less bugs

* fixed unicorn docu

* lists everywhere

* merged custom mutators

* fixed leaks in afl-fuzz
-rw-r--r--include/afl-fuzz.h761
-rw-r--r--include/afl-prealloc.h101
-rw-r--r--include/common.h11
-rw-r--r--include/forkserver.h41
-rw-r--r--include/list.h133
-rw-r--r--include/sharedmem.h29
-rw-r--r--src/afl-analyze.c14
-rw-r--r--src/afl-common.c36
-rw-r--r--src/afl-forkserver.c133
-rw-r--r--src/afl-fuzz-bitmap.c148
-rw-r--r--src/afl-fuzz-cmplog.c212
-rw-r--r--src/afl-fuzz-extras.c118
-rw-r--r--src/afl-fuzz-globals.c312
-rw-r--r--src/afl-fuzz-init.c478
-rw-r--r--src/afl-fuzz-mutators.c184
-rw-r--r--src/afl-fuzz-one.c2077
-rw-r--r--src/afl-fuzz-python.c68
-rw-r--r--src/afl-fuzz-queue.c128
-rw-r--r--src/afl-fuzz-redqueen.c175
-rw-r--r--src/afl-fuzz-run.c348
-rw-r--r--src/afl-fuzz-stats.c398
-rw-r--r--src/afl-fuzz.c571
-rw-r--r--src/afl-sharedmem.c131
-rw-r--r--src/afl-showmap.c253
-rw-r--r--src/afl-tmin.c199
-rw-r--r--unicorn_mode/samples/c/harness.c4
26 files changed, 3610 insertions, 3453 deletions
diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h
index d5ad4653..c773d085 100644
--- a/include/afl-fuzz.h
+++ b/include/afl-fuzz.h
@@ -46,6 +46,7 @@
 #include "sharedmem.h"
 #include "forkserver.h"
 #include "common.h"
+#include "list.h"
 
 #include <stdio.h>
 #include <unistd.h>
@@ -106,6 +107,11 @@
 #define CASE_PREFIX "id_"
 #endif                                                    /* ^!SIMPLE_FILES */
 
+extern s8  interesting_8[INTERESTING_8_LEN];
+extern s16 interesting_16[INTERESTING_8_LEN + INTERESTING_16_LEN];
+extern s32 interesting_32[INTERESTING_8_LEN + INTERESTING_16_LEN + INTERESTING_32_LEN];
+
+
 struct queue_entry {
 
   u8* fname;                            /* File name for the test case      */
@@ -198,45 +204,10 @@ enum {
 
 };
 
-/* MOpt:
-   Lots of globals, but mostly for the status UI and other things where it
-   really makes no sense to haul them around as function parameters. */
-extern u64 limit_time_puppet, orig_hit_cnt_puppet, last_limit_time_start,
-    tmp_pilot_time, total_pacemaker_time, total_puppet_find, temp_puppet_find,
-    most_time_key, most_time, most_execs_key, most_execs, old_hit_count;
-
-extern s32 SPLICE_CYCLES_puppet, limit_time_sig, key_puppet, key_module;
-
-extern double w_init, w_end, w_now;
-
-extern s32 g_now;
-extern s32 g_max;
-
 #define operator_num 16
 #define swarm_num 5
 #define period_core 500000
 
-extern u64 tmp_core_time;
-extern s32 swarm_now;
-
-extern double x_now[swarm_num][operator_num], L_best[swarm_num][operator_num],
-    eff_best[swarm_num][operator_num], G_best[operator_num],
-    v_now[swarm_num][operator_num], probability_now[swarm_num][operator_num],
-    swarm_fitness[swarm_num];
-
-extern u64 stage_finds_puppet[swarm_num][operator_num], /* Patterns found per
-                                                           fuzz stage    */
-    stage_finds_puppet_v2[swarm_num][operator_num],
-    stage_cycles_puppet_v2[swarm_num][operator_num],
-    stage_cycles_puppet_v3[swarm_num][operator_num],
-    stage_cycles_puppet[swarm_num][operator_num],
-    operator_finds_puppet[operator_num],
-    core_operator_finds_puppet[operator_num],
-    core_operator_finds_puppet_v2[operator_num],
-    core_operator_cycles_puppet[operator_num],
-    core_operator_cycles_puppet_v2[operator_num],
-    core_operator_cycles_puppet_v3[operator_num];   /* Execs per fuzz stage */
-
 #define RAND_C (rand() % 1000 * 0.001)
 #define v_max 1
 #define v_min 0.05
@@ -249,37 +220,6 @@ extern u64 stage_finds_puppet[swarm_num][operator_num], /* Patterns found per
 #define STAGE_OverWrite75 15
 #define period_pilot 50000
 
-extern double period_pilot_tmp;
-extern s32    key_lv;
-
-extern u8 *in_dir,                      /* Input directory with test cases  */
-    *out_dir,                           /* Working & output directory       */
-    *tmp_dir,                           /* Temporary directory for input    */
-    *sync_dir,                          /* Synchronization directory        */
-    *sync_id,                           /* Fuzzer ID                        */
-    *power_name,                        /* Power schedule name              */
-    *use_banner,                        /* Display banner                   */
-    *in_bitmap,                         /* Input bitmap                     */
-    *file_extension,                    /* File extension                   */
-    *orig_cmdline,                      /* Original command line            */
-    *doc_path,                          /* Path to documentation dir        */
-    *infoexec,                         /* Command to execute on a new crash */
-    *out_file;                          /* File to fuzz, if any             */
-
-extern u32 exec_tmout;                  /* Configurable exec timeout (ms)   */
-extern u32 hang_tmout;                  /* Timeout used for hang det (ms)   */
-
-extern u64 mem_limit;                   /* Memory cap for child (MB)        */
-
-extern u8 cal_cycles,                   /* Calibration cycles defaults      */
-    cal_cycles_long,                    /* Calibration cycles defaults      */
-    no_unlink,                          /* do not unlink cur_input          */
-    use_stdin,                          /* use stdin for sending data       */
-    debug,                              /* Debug mode                       */
-    custom_only;                        /* Custom mutator only mode         */
-
-extern u32 stats_update_freq;           /* Stats update frequency (execs)   */
-
 enum {
 
   /* 00 */ EXPLORE, /* AFL default, Exploration-based constant schedule */
@@ -293,172 +233,316 @@ enum {
 
 };
 
-extern char* power_names[POWER_SCHEDULES_NUM];
+extern u8 *doc_path;                  /* gath to documentation dir        */
+
+/* Python stuff */
+#ifdef USE_PYTHON
 
-extern u8 schedule;                     /* Power schedule (default: EXPLORE)*/
-extern u8 havoc_max_mult;
-
-extern u8 use_radamsa;
-extern size_t (*radamsa_mutate_ptr)(u8*, size_t, u8*, size_t, u32);
-
-extern u8 skip_deterministic,           /* Skip deterministic stages?       */
-    force_deterministic,                /* Force deterministic stages?      */
-    use_splicing,                       /* Recombine input files?           */
-    dumb_mode,                          /* Run in non-instrumented mode?    */
-    score_changed,                      /* Scoring for favorites changed?   */
-    kill_signal,                        /* Signal that killed the child     */
-    resuming_fuzz,                      /* Resuming an older fuzzing job?   */
-    timeout_given,                      /* Specific timeout given?          */
-    not_on_tty,                         /* stdout is not a tty              */
-    term_too_small,                     /* terminal dimensions too small    */
-    no_forkserver,                      /* Disable forkserver?              */
-    crash_mode,                         /* Crash mode! Yeah!                */
-    in_place_resume,                    /* Attempt in-place resume?         */
-    autoresume,                         /* Resume if out_dir exists?        */
-    auto_changed,                       /* Auto-generated tokens changed?   */
-    no_cpu_meter_red,                   /* Feng shui on the status screen   */
-    no_arith,                           /* Skip most arithmetic ops         */
-    shuffle_queue,                      /* Shuffle input queue?             */
-    bitmap_changed,                     /* Time to update bitmap?           */
-    qemu_mode,                          /* Running in QEMU mode?            */
-    unicorn_mode,                       /* Running in Unicorn mode?         */
-    use_wine,                           /* Use WINE with QEMU mode          */
-    skip_requested,                     /* Skip request, via SIGUSR1        */
-    run_over10m,                        /* Run time over 10 minutes?        */
-    persistent_mode,                    /* Running in persistent mode?      */
-    deferred_mode,                      /* Deferred forkserver mode?        */
-    fixed_seed,                         /* do not reseed                    */
-    fast_cal,                           /* Try to calibrate faster?         */
-    uses_asan,                          /* Target uses ASAN?                */
-    disable_trim;                       /* Never trim in fuzz_one           */
-
-extern s32 out_fd,                      /* Persistent fd for out_file       */
-#ifndef HAVE_ARC4RANDOM
-    dev_urandom_fd,                     /* Persistent fd for /dev/urandom   */
+// because Python sets stuff it should not ...
+#ifdef _POSIX_C_SOURCE
+#define _SAVE_POSIX_C_SOURCE _POSIX_C_SOURCE
+#undef _POSIX_C_SOURCE
 #endif
-    dev_null_fd,                        /* Persistent fd for /dev/null      */
-    fsrv_ctl_fd,                        /* Fork server control pipe (write) */
-    fsrv_st_fd;                         /* Fork server status pipe (read)   */
-
-extern s32 forksrv_pid,                 /* PID of the fork server           */
-    child_pid,                          /* PID of the fuzzed program        */
-    out_dir_fd;                         /* FD of the lock file              */
-
-extern u8* trace_bits;                  /* SHM with instrumentation bitmap  */
-
-extern u8 virgin_bits[MAP_SIZE],        /* Regions yet untouched by fuzzing */
-    virgin_tmout[MAP_SIZE],             /* Bits we haven't seen in tmouts   */
-    virgin_crash[MAP_SIZE];             /* Bits we haven't seen in crashes  */
-
-extern u8 var_bytes[MAP_SIZE];          /* Bytes that appear to be variable */
-
-extern volatile u8 stop_soon,           /* Ctrl-C pressed?                  */
-    clear_screen,                       /* Window resized?                  */
-    child_timed_out;                    /* Traced process timed out?        */
-
-extern u32 queued_paths,                /* Total number of queued testcases */
-    queued_variable,                    /* Testcases with variable behavior */
-    queued_at_start,                    /* Total number of initial inputs   */
-    queued_discovered,                  /* Items discovered during this run */
-    queued_imported,                    /* Items imported via -S            */
-    queued_favored,                     /* Paths deemed favorable           */
-    queued_with_cov,                    /* Paths with new coverage bytes    */
-    pending_not_fuzzed,                 /* Queued but not done yet          */
-    pending_favored,                    /* Pending favored paths            */
-    cur_skipped_paths,                  /* Abandoned inputs in cur cycle    */
-    cur_depth,                          /* Current path depth               */
-    max_depth,                          /* Max path depth                   */
-    useless_at_start,                   /* Number of useless starting paths */
-    var_byte_count,                     /* Bitmap bytes with var behavior   */
-    current_entry,                      /* Current queue entry ID           */
-    havoc_div;                          /* Cycle count divisor for havoc    */
-
-extern u64 total_crashes,               /* Total number of crashes          */
-    unique_crashes,                     /* Crashes with unique signatures   */
-    total_tmouts,                       /* Total number of timeouts         */
-    unique_tmouts,                      /* Timeouts with unique signatures  */
-    unique_hangs,                       /* Hangs with unique signatures     */
-    total_execs,                        /* Total execve() calls             */
-    slowest_exec_ms,                    /* Slowest testcase non hang in ms  */
-    start_time,                         /* Unix start time (ms)             */
-    last_path_time,                     /* Time for most recent path (ms)   */
-    last_crash_time,                    /* Time for most recent crash (ms)  */
-    last_hang_time,                     /* Time for most recent hang (ms)   */
-    last_crash_execs,                   /* Exec counter at last crash       */
-    queue_cycle,                        /* Queue round counter              */
-    cycles_wo_finds,                    /* Cycles without any new paths     */
-    trim_execs,                         /* Execs done to trim input files   */
-    bytes_trim_in,                      /* Bytes coming into the trimmer    */
-    bytes_trim_out,                     /* Bytes coming outa the trimmer    */
-    blocks_eff_total,                   /* Blocks subject to effector maps  */
-    blocks_eff_select;                  /* Blocks selected as fuzzable      */
-
-extern u32 subseq_tmouts;               /* Number of timeouts in a row      */
-
-extern u8 *stage_name,                  /* Name of the current fuzz stage   */
-    *stage_short,                       /* Short stage name                 */
-    *syncing_party;                     /* Currently syncing with...        */
-
-extern s32 stage_cur, stage_max;        /* Stage progression                */
-extern s32 splicing_with;               /* Splicing with which test case?   */
-
-extern u32 master_id, master_max;       /* Master instance job splitting    */
-
-extern u32 syncing_case;                /* Syncing with case #...           */
-
-extern s32 stage_cur_byte,              /* Byte offset of current stage op  */
-    stage_cur_val;                      /* Value used for stage op          */
-
-extern u8 stage_val_type;               /* Value type (STAGE_VAL_*)         */
-
-extern u64 stage_finds[32],             /* Patterns found per fuzz stage    */
-    stage_cycles[32];                   /* Execs per fuzz stage             */
-
-#ifndef HAVE_ARC4RANDOM
-extern u32 rand_cnt;                    /* Random number counter            */
+#ifdef _XOPEN_SOURCE
+#define _SAVE_XOPEN_SOURCE _XOPEN_SOURCE
+#undef _XOPEN_SOURCE
 #endif
 
-extern u32 rand_seed[2];
-extern s64 init_seed;
+#include <Python.h>
 
-extern u64 total_cal_us,                /* Total calibration time (us)      */
-    total_cal_cycles;                   /* Total calibration cycles         */
+#ifdef _SAVE_POSIX_C_SOURCE
+#ifdef _POSIX_C_SOURCE
+#undef _POSIX_C_SOURCE
+#endif
+#define _POSIX_C_SOURCE _SAVE_POSIX_C_SOURCE
+#endif
+#ifdef _SAVE_XOPEN_SOURCE
+#ifdef _XOPEN_SOURCE
+#undef _XOPEN_SOURCE
+#endif
+#define _XOPEN_SOURCE _SAVE_XOPEN_SOURCE
+#endif
 
-extern u64 total_bitmap_size,           /* Total bit count for all bitmaps  */
-    total_bitmap_entries;               /* Number of bitmaps counted        */
 
-extern s32 cpu_core_count;              /* CPU core count                   */
+enum {
+
+  /* 00 */ PY_FUNC_INIT,
+  /* 01 */ PY_FUNC_FUZZ,
+  /* 02 */ PY_FUNC_PRE_SAVE,
+  /* 03 */ PY_FUNC_INIT_TRIM,
+  /* 04 */ PY_FUNC_POST_TRIM,
+  /* 05 */ PY_FUNC_TRIM,
+  /* 06 */ PY_FUNC_HAVOC_MUTATION,
+  /* 07 */ PY_FUNC_HAVOC_MUTATION_PROBABILITY,
+  /* 08 */ PY_FUNC_QUEUE_GET,
+  /* 09 */ PY_FUNC_QUEUE_NEW_ENTRY,
+  PY_FUNC_COUNT
+
+};
+
+#endif
+
+typedef struct MOpt_globals {
+
+  u64*      finds;
+  u64*      finds_v2;
+  u64*      cycles;
+  u64*      cycles_v2;
+  u64*      cycles_v3;
+  u32       is_pilot_mode;
+  u64*      pTime;
+  u64 period;
+  char*     havoc_stagename;
+  char*     splice_stageformat;
+  char*     havoc_stagenameshort;
+  char*     splice_stagenameshort;
+
+} MOpt_globals_t;
+
+extern char* power_names[POWER_SCHEDULES_NUM];
+
+typedef struct afl_state {
+
+  /* Position of this state in the global states list */
+  u32 _id;
+
+  afl_forkserver_t fsrv;
+  sharedmem_t shm;
+
+  char** argv;                              /* argv if needed */
+
+  /* MOpt:
+    Lots of globals, but mostly for the status UI and other things where it
+    really makes no sense to haul them around as function parameters. */
+  u64 limit_time_puppet, orig_hit_cnt_puppet, last_limit_time_start,
+      tmp_pilot_time, total_pacemaker_time, total_puppet_find, temp_puppet_find,
+      most_time_key, most_time, most_execs_key, most_execs, old_hit_count;
+
+  MOpt_globals_t mopt_globals_core, mopt_globals_pilot;
+
+  s32 SPLICE_CYCLES_puppet, limit_time_sig, key_puppet, key_module;
+
+  double w_init, w_end, w_now;
+
+  s32 g_now;
+  s32 g_max;
+
+  u64 tmp_core_time;
+  s32 swarm_now;
+
+  double x_now[swarm_num][operator_num], L_best[swarm_num][operator_num],
+      eff_best[swarm_num][operator_num], G_best[operator_num],
+      v_now[swarm_num][operator_num], probability_now[swarm_num][operator_num],
+      swarm_fitness[swarm_num];
+
+  u64 stage_finds_puppet[swarm_num][operator_num], /* Patterns found per
+                                                            fuzz stage    */
+      stage_finds_puppet_v2[swarm_num][operator_num],
+      stage_cycles_puppet_v2[swarm_num][operator_num],
+      stage_cycles_puppet_v3[swarm_num][operator_num],
+      stage_cycles_puppet[swarm_num][operator_num],
+      operator_finds_puppet[operator_num],
+      core_operator_finds_puppet[operator_num],
+      core_operator_finds_puppet_v2[operator_num],
+      core_operator_cycles_puppet[operator_num],
+      core_operator_cycles_puppet_v2[operator_num],
+      core_operator_cycles_puppet_v3[operator_num];   /* Execs per fuzz stage */
+
+  double period_pilot_tmp;
+  s32    key_lv;
+
+  u8 *in_dir,                      /* Input directory with test cases  */
+      *out_dir,                    /* Working & output directory       */
+      *tmp_dir,                    /* Temporary directory for input    */
+      *sync_dir,                   /* Synchronization directory        */
+      *sync_id,                    /* Fuzzer ID                        */
+      *power_name,                 /* Power schedule name              */
+      *use_banner,                 /* Display banner                   */
+      *in_bitmap,                  /* Input bitmap                     */
+      *file_extension,             /* File extension                   */
+      *orig_cmdline,               /* Original command line            */
+      *infoexec;                   /* Command to execute on a new crash */
+
+  u32 hang_tmout;                  /* Timeout used for hang det (ms)   */
+
+  u8 cal_cycles,                   /* Calibration cycles defaults      */
+      cal_cycles_long,             /* Calibration cycles defaults      */
+      no_unlink,                   /* do not unlink cur_input          */
+      debug,                       /* Debug mode                       */
+      custom_only,                 /* Custom mutator only mode         */
+      python_only;                 /* Python-only mode                 */
+
+  u32 stats_update_freq;           /* Stats update frequency (execs)   */
+
+  u8 schedule;                     /* Power schedule (default: EXPLORE)*/
+  u8 havoc_max_mult;
+
+  u8 use_radamsa;
+  size_t (*radamsa_mutate_ptr)(u8*, size_t, u8*, size_t, u32);
+
+  u8 skip_deterministic,           /* Skip deterministic stages?       */
+      force_deterministic,         /* Force deterministic stages?      */
+      use_splicing,                /* Recombine input files?           */
+      dumb_mode,                   /* Run in non-instrumented mode?    */
+      score_changed,               /* Scoring for favorites changed?   */
+      kill_signal,                 /* Signal that killed the child     */
+      resuming_fuzz,               /* Resuming an older fuzzing job?   */
+      timeout_given,               /* Specific timeout given?          */
+      not_on_tty,                  /* stdout is not a tty              */
+      term_too_small,              /* terminal dimensions too small    */
+      no_forkserver,               /* Disable forkserver?              */
+      crash_mode,                  /* Crash mode! Yeah!                */
+      in_place_resume,             /* Attempt in-place resume?         */
+      autoresume,                  /* Resume if afl->out_dir exists?   */
+      auto_changed,                /* Auto-generated tokens changed?   */
+      no_cpu_meter_red,            /* Feng shui on the status screen   */
+      no_arith,                    /* Skip most arithmetic ops         */
+      shuffle_queue,               /* Shuffle input queue?             */
+      bitmap_changed,              /* Time to update bitmap?           */
+      qemu_mode,                   /* Running in QEMU mode?            */
+      unicorn_mode,                /* Running in Unicorn mode?         */
+      use_wine,                    /* Use WINE with QEMU mode          */
+      skip_requested,              /* Skip request, via SIGUSR1        */
+      run_over10m,                 /* Run time over 10 minutes?        */
+      persistent_mode,             /* Running in persistent mode?      */
+      deferred_mode,               /* Deferred forkserver mode?        */
+      fixed_seed,                  /* do not reseed                    */
+      fast_cal,                    /* Try to calibrate faster?         */
+      disable_trim;                /* Never trim in fuzz_one           */
+
+  u8 virgin_bits[MAP_SIZE],        /* Regions yet untouched by fuzzing */
+      virgin_tmout[MAP_SIZE],      /* Bits we haven't seen in tmouts   */
+      virgin_crash[MAP_SIZE];      /* Bits we haven't seen in crashes  */
+
+  u8 var_bytes[MAP_SIZE];          /* Bytes that appear to be variable */
+
+  volatile u8 stop_soon,           /* Ctrl-C pressed?                  */
+      clear_screen;                /* Window resized?                  */
+
+  u32 queued_paths,                /* Total number of queued testcases */
+      queued_variable,             /* Testcases with variable behavior */
+      queued_at_start,             /* Total number of initial inputs   */
+      queued_discovered,           /* Items discovered during this run */
+      queued_imported,             /* Items imported via -S            */
+      queued_favored,              /* Paths deemed favorable           */
+      queued_with_cov,             /* Paths with new coverage bytes    */
+      pending_not_fuzzed,          /* Queued but not done yet          */
+      pending_favored,             /* Pending favored paths            */
+      cur_skipped_paths,           /* Abandoned inputs in cur cycle    */
+      cur_depth,                   /* Current path depth               */
+      max_depth,                   /* Max path depth                   */
+      useless_at_start,            /* Number of useless starting paths */
+      var_byte_count,              /* Bitmap bytes with var behavior   */
+      current_entry,               /* Current queue entry ID           */
+      havoc_div;                   /* Cycle count divisor for havoc    */
+
+  u64 total_crashes,               /* Total number of crashes          */
+      unique_crashes,              /* Crashes with unique signatures   */
+      total_tmouts,                /* Total number of timeouts         */
+      unique_tmouts,               /* Timeouts with unique signatures  */
+      unique_hangs,                /* Hangs with unique signatures     */
+      total_execs,                 /* Total execve() calls             */
+      slowest_exec_ms,             /* Slowest testcase non hang in ms  */
+      start_time,                  /* Unix start time (ms)             */
+      last_path_time,              /* Time for most recent path (ms)   */
+      last_crash_time,             /* Time for most recent crash (ms)  */
+      last_hang_time,              /* Time for most recent hang (ms)   */
+      last_crash_execs,            /* Exec counter at last crash       */
+      queue_cycle,                 /* Queue round counter              */
+      cycles_wo_finds,             /* Cycles without any new paths     */
+      trim_execs,                  /* Execs done to trim input files   */
+      bytes_trim_in,               /* Bytes coming into the trimmer    */
+      bytes_trim_out,              /* Bytes coming outa the trimmer    */
+      blocks_eff_total,            /* Blocks subject to effector maps  */
+      blocks_eff_select;           /* Blocks selected as fuzzable      */
+
+  u32 subseq_tmouts;               /* Number of timeouts in a row      */
+
+  u8 *stage_name,                  /* Name of the current fuzz stage   */
+      *stage_short,                /* Short stage name                 */
+      *syncing_party;              /* Currently syncing with...        */
+
+  u8 stage_name_buf64[64];         /* A name buf with len 64 if needed */
+
+  s32 stage_cur, stage_max;        /* Stage progression                */
+  s32 splicing_with;               /* Splicing with which test case?   */
+
+  u32 master_id, master_max;       /* Master instance job splitting    */
+
+  u32 syncing_case;                /* Syncing with case #...           */
+
+  s32 stage_cur_byte,              /* Byte offset of current stage op  */
+      stage_cur_val;               /* Value used for stage op          */
+
+  u8 stage_val_type;               /* Value type (STAGE_VAL_*)         */
+
+  u64 stage_finds[32],             /* Patterns found per fuzz stage    */
+      stage_cycles[32];            /* Execs per fuzz stage             */
+
+  #ifndef HAVE_ARC4RANDOM
+  u32 rand_cnt;                    /* Random number counter            */
+  #endif
+
+  u32 rand_seed[2];
+  s64 init_seed;
+
+  u64 total_cal_us,                /* Total calibration time (us)      */
+      total_cal_cycles;            /* Total calibration cycles         */
+
+  u64 total_bitmap_size,           /* Total bit count for all bitmaps  */
+      total_bitmap_entries;        /* Number of bitmaps counted        */
+
+  s32 cpu_core_count;              /* CPU core count                   */
 
 #ifdef HAVE_AFFINITY
+  s32 cpu_aff;                     /* Selected CPU core                */
+#endif                                              /* HAVE_AFFINITY */
 
-extern s32 cpu_aff;                     /* Selected CPU core                */
+  struct queue_entry *queue,       /* Fuzzing queue (linked list)      */
+      *queue_cur,                  /* Current offset within the queue  */
+      *queue_top,                  /* Top of the list                  */
+      *q_prev100;                  /* Previous 100 marker              */
 
-#endif                                                     /* HAVE_AFFINITY */
+  struct queue_entry*
+      top_rated[MAP_SIZE];         /* Top entries for bitmap bytes     */
 
-extern FILE* plot_file;                 /* Gnuplot output file              */
+  struct extra_data* extras;       /* Extra tokens to fuzz with        */
+  u32                extras_cnt;   /* Total number of tokens read      */
 
-extern struct queue_entry *queue,       /* Fuzzing queue (linked list)      */
-    *queue_cur,                         /* Current offset within the queue  */
-    *queue_top,                         /* Top of the list                  */
-    *q_prev100;                         /* Previous 100 marker              */
+  struct extra_data* a_extras;     /* Automatically selected extras    */
+  u32                a_extras_cnt; /* Total number of tokens available */
 
-extern struct queue_entry*
-    top_rated[MAP_SIZE];                /* Top entries for bitmap bytes     */
+  u8* (*post_handler)(u8* buf, u32* len);
 
-extern struct extra_data* extras;       /* Extra tokens to fuzz with        */
-extern u32                extras_cnt;   /* Total number of tokens read      */
+  /* CmpLog */
 
-extern struct extra_data* a_extras;     /* Automatically selected extras    */
-extern u32                a_extras_cnt; /* Total number of tokens available */
+  u8* cmplog_binary;
+  s32 cmplog_child_pid, cmplog_fsrv_pid;
 
-u8* (*post_handler)(u8* buf, u32* len);
+  /* Custom mutators */
+  struct custom_mutator* mutator;
 
-/* CmpLog */
+  /* cmplog forkserver ids */
+  s32 cmplog_fsrv_ctl_fd, cmplog_fsrv_st_fd;
 
-extern u8* cmplog_binary;
-extern s32 cmplog_child_pid, cmplog_forksrv_pid;
+  u8 describe_op_buf_256[256]; /* describe_op will use this to return a string up to 256 */
 
-/* Custom mutators */
+#ifdef USE_PYTHON
+  /* Python Mutators */
+  PyObject *py_module;
+  PyObject *py_functions[PY_FUNC_COUNT];
+#endif
+
+#ifdef _AFL_DOCUMENT_MUTATIONS
+  u8  do_document;
+  u32 document_counter;
+#endif
+
+} afl_state_t;
+
+/* A global pointer to all instances is needed (for now) for signals to arrive */
+
+extern list_t afl_states;
 
 struct custom_mutator {
 
@@ -474,7 +558,7 @@ struct custom_mutator {
    *
    * @param seed Seed used for the mutation.
    */
-  void (*afl_custom_init)(unsigned int seed);
+  void (*afl_custom_init)(afl_state_t *afl, unsigned int seed);
 
   /**
    * Perform custom mutations on a given input
@@ -490,7 +574,7 @@ struct custom_mutator {
    * not produce data larger than max_size.
    * @return Size of the mutated output.
    */
-  size_t (*afl_custom_fuzz)(u8** buf, size_t buf_size, u8* add_buf,
+  size_t (*afl_custom_fuzz)(afl_state_t *afl, u8** buf, size_t buf_size, u8* add_buf,
                             size_t add_buf_size, size_t max_size);
 
   /**
@@ -507,7 +591,7 @@ struct custom_mutator {
    *     will release the memory after saving the test case.
    * @return Size of the output buffer after processing
    */
-  size_t (*afl_custom_pre_save)(u8* buf, size_t buf_size, u8** out_buf);
+  size_t (*afl_custom_pre_save)(afl_state_t *afl, u8* buf, size_t buf_size, u8** out_buf);
 
   /**
    * This method is called at the start of each trimming operation and receives
@@ -529,7 +613,7 @@ struct custom_mutator {
    * @param buf_size Size of the test case
    * @return The amount of possible iteration steps to trim the input
    */
-  u32 (*afl_custom_init_trim)(u8* buf, size_t buf_size);
+  u32 (*afl_custom_init_trim)(afl_state_t *afl, u8* buf, size_t buf_size);
 
   /**
    * This method is called for each trimming operation. It doesn't have any
@@ -547,7 +631,7 @@ struct custom_mutator {
    *     the memory after saving the test case.
    * @param[out] out_buf_size Pointer to the size of the trimmed test case
    */
-  void (*afl_custom_trim)(u8** out_buf, size_t* out_buf_size);
+  void (*afl_custom_trim)(afl_state_t *afl, u8** out_buf, size_t* out_buf_size);
 
   /**
    * This method is called after each trim operation to inform you if your
@@ -560,8 +644,8 @@ struct custom_mutator {
    * @return The next trim iteration index (from 0 to the maximum amount of
    *     steps returned in init_trim)
    */
-  u32 (*afl_custom_post_trim)(u8 success);
-
+  u32 (*afl_custom_post_trim)(afl_state_t *afl, u8 success);
+  
   /**
    * Perform a single custom mutation on a given input.
    * This mutation is stacked with the other muatations in havoc.
@@ -575,9 +659,8 @@ struct custom_mutator {
    *     not produce data larger than max_size.
    * @return Size of the mutated output.
    */
-  size_t (*afl_custom_havoc_mutation)(u8** buf, size_t buf_size,
-                                      size_t max_size);
-
+  size_t (*afl_custom_havoc_mutation)(afl_state_t *afl, u8** buf, size_t buf_size, size_t max_size);
+  
   /**
    * Return the probability (in percentage) that afl_custom_havoc_mutation
    * is called in havoc. By default it is 6 %.
@@ -586,7 +669,7 @@ struct custom_mutator {
    *
    * @return The probability (0-100).
    */
-  u8 (*afl_custom_havoc_mutation_probability)(void);
+  u8 (*afl_custom_havoc_mutation_probability)(afl_state_t *afl);
 
   /**
    * Determine whether the fuzzer should fuzz the current queue entry or not.
@@ -597,7 +680,7 @@ struct custom_mutator {
    * @return Return True(1) if the fuzzer will fuzz the queue entry, and
    *     False(0) otherwise.
    */
-  u8 (*afl_custom_queue_get)(const u8* filename);
+  u8 (*afl_custom_queue_get)(afl_state_t *afl, const u8* filename);
 
   /**
    * Allow for additional analysis (e.g. calling a different tool that does a
@@ -609,112 +692,57 @@ struct custom_mutator {
    * @param filename_orig_queue File name of the original queue entry. This
    *     argument can be NULL while initializing the fuzzer
    */
-  void (*afl_custom_queue_new_entry)(const u8* filename_new_queue,
+  void (*afl_custom_queue_new_entry)(afl_state_t *afl, const u8* filename_new_queue,
                                      const u8* filename_orig_queue);
 
 };
 
-extern struct custom_mutator* mutator;
-
-/* Interesting values, as per config.h */
-
-extern s8  interesting_8[INTERESTING_8_LEN];
-extern s16 interesting_16[INTERESTING_8_LEN + INTERESTING_16_LEN];
-extern s32
-    interesting_32[INTERESTING_8_LEN + INTERESTING_16_LEN + INTERESTING_32_LEN];
-
-/* Python stuff */
-#ifdef USE_PYTHON
-
-// because Python sets stuff it should not ...
-#ifdef _POSIX_C_SOURCE
-#define _SAVE_POSIX_C_SOURCE _POSIX_C_SOURCE
-#undef _POSIX_C_SOURCE
-#endif
-#ifdef _XOPEN_SOURCE
-#define _SAVE_XOPEN_SOURCE _XOPEN_SOURCE
-#undef _XOPEN_SOURCE
-#endif
-
-#include <Python.h>
-
-#ifdef _SAVE_POSIX_C_SOURCE
-#ifdef _POSIX_C_SOURCE
-#undef _POSIX_C_SOURCE
-#endif
-#define _POSIX_C_SOURCE _SAVE_POSIX_C_SOURCE
-#endif
-#ifdef _SAVE_XOPEN_SOURCE
-#ifdef _XOPEN_SOURCE
-#undef _XOPEN_SOURCE
-#endif
-#define _XOPEN_SOURCE _SAVE_XOPEN_SOURCE
-#endif
-
-extern PyObject* py_module;
 
-enum {
 
-  /* 00 */ PY_FUNC_INIT,
-  /* 01 */ PY_FUNC_FUZZ,
-  /* 02 */ PY_FUNC_PRE_SAVE,
-  /* 03 */ PY_FUNC_INIT_TRIM,
-  /* 04 */ PY_FUNC_POST_TRIM,
-  /* 05 */ PY_FUNC_TRIM,
-  /* 06 */ PY_FUNC_HAVOC_MUTATION,
-  /* 07 */ PY_FUNC_HAVOC_MUTATION_PROBABILITY,
-  /* 08 */ PY_FUNC_QUEUE_GET,
-  /* 09 */ PY_FUNC_QUEUE_NEW_ENTRY,
-  PY_FUNC_COUNT
-
-};
-
-extern PyObject* py_functions[PY_FUNC_COUNT];
-
-#endif
+void afl_state_init(afl_state_t *);
+void afl_state_deinit(afl_state_t*);
 
 /**** Prototypes ****/
 
 /* Custom mutators */
-void setup_custom_mutator(void);
-void destroy_custom_mutator(void);
-u8   trim_case_custom(char** argv, struct queue_entry* q, u8* in_buf);
+void setup_custom_mutator(afl_state_t*);
+void destroy_custom_mutator(afl_state_t*);
+u8   trim_case_custom(afl_state_t *, struct queue_entry* q, u8* in_buf);
 
 /* Python */
 #ifdef USE_PYTHON
 
-int  init_py_module(u8*);
-void finalize_py_module();
+int    init_py_module(afl_state_t*, u8*);
+void   finalize_py_module(afl_state_t*);
 
-void   init_py(unsigned int);
-size_t fuzz_py(u8**, size_t, u8*, size_t, size_t);
-size_t pre_save_py(u8*, size_t, u8**);
-u32    init_trim_py(u8*, size_t);
-u32    post_trim_py(u8);
-void   trim_py(u8**, size_t*);
-size_t havoc_mutation_py(u8**, size_t, size_t);
-u8     havoc_mutation_probability_py(void);
-u8     queue_get_py(const u8*);
-void   queue_new_entry_py(const u8*, const u8*);
+void   init_py(afl_state_t*, unsigned int);
+size_t fuzz_py(afl_state_t*, u8**, size_t, u8*, size_t, size_t);
+size_t pre_save_py(afl_state_t*, u8*, size_t, u8**);
+u32    init_trim_py(afl_state_t*, u8*, size_t);
+u32    post_trim_py(afl_state_t*, u8);
+void   trim_py(afl_state_t*, u8**, size_t*);
+size_t havoc_mutation_py(afl_state_t *, u8**, size_t, size_t);
+u8     havoc_mutation_probability_py(afl_state_t*);
+u8     queue_get_py(afl_state_t*, const u8*);
+void   queue_new_entry_py(afl_state_t*, const u8*, const u8*);
 
 #endif
 
 /* Queue */
 
-void mark_as_det_done(struct queue_entry*);
-void mark_as_variable(struct queue_entry*);
-void mark_as_redundant(struct queue_entry*, u8);
-void add_to_queue(u8*, u32, u8);
-void destroy_queue(void);
-void update_bitmap_score(struct queue_entry*);
-void cull_queue(void);
-u32  calculate_score(struct queue_entry*);
+void mark_as_det_done(afl_state_t*, struct queue_entry*);
+void mark_as_variable(afl_state_t*, struct queue_entry*);
+void mark_as_redundant(afl_state_t*, struct queue_entry*, u8);
+void add_to_queue(afl_state_t*, u8*, u32, u8);
+void destroy_queue(afl_state_t*);
+void update_bitmap_score(afl_state_t*, struct queue_entry*);
+void cull_queue(afl_state_t*);
+u32  calculate_score(afl_state_t*, struct queue_entry*);
 
 /* Bitmap */
 
-void write_bitmap(void);
-void read_bitmap(u8*);
-u8   has_new_bits(u8*);
+void read_bitmap(afl_state_t*, u8*);
+void write_bitmap(afl_state_t*);
 u32  count_bits(u8*);
 u32  count_bytes(u8*);
 u32  count_non_255_bytes(u8*);
@@ -728,9 +756,10 @@ void classify_counts(u32*);
 void init_count_class16(void);
 void minimize_bits(u8*, u8*);
 #ifndef SIMPLE_FILES
-u8* describe_op(u8);
+u8* describe_op(afl_state_t*, u8);
 #endif
-u8 save_if_interesting(char**, void*, u32, u8);
+u8 save_if_interesting(afl_state_t*, void*, u32, u8);
+u8 has_new_bits(afl_state_t *, u8*);
 
 /* Misc */
 
@@ -741,75 +770,71 @@ u8* DTD(u64, u64);
 
 /* Extras */
 
-void load_extras_file(u8*, u32*, u32*, u32);
-void load_extras(u8*);
-void maybe_add_auto(u8*, u32);
-void save_auto(void);
-void load_auto(void);
-void destroy_extras(void);
+void load_extras_file(afl_state_t*, u8*, u32*, u32*, u32);
+void load_extras(afl_state_t*, u8*);
+void maybe_add_auto(afl_state_t*, u8*, u32);
+void save_auto(afl_state_t*);
+void load_auto(afl_state_t*);
+void destroy_extras(afl_state_t*);
 
 /* Stats */
 
-void write_stats_file(double, double, double);
-void maybe_update_plot_file(double, double);
-void show_stats(void);
-void show_init_stats(void);
+void write_stats_file(afl_state_t*, double, double, double);
+void maybe_update_plot_file(afl_state_t*, double, double);
+void show_stats(afl_state_t*);
+void show_init_stats(afl_state_t*);
 
 /* Run */
 
-u8   run_target(char**, u32);
-void write_to_testcase(void*, u32);
-void write_with_gap(void*, u32, u32, u32);
-u8   calibrate_case(char**, struct queue_entry*, u8*, u32, u8);
-void sync_fuzzers(char**);
-u8   trim_case(char**, struct queue_entry*, u8*);
-u8   common_fuzz_stuff(char**, u8*, u32);
+u8   run_target(afl_state_t*, u32);
+void write_to_testcase(afl_state_t*, void*, u32);
+u8   calibrate_case(afl_state_t*, struct queue_entry*, u8*, u32, u8);
+void sync_fuzzers(afl_state_t*);
+u8   trim_case(afl_state_t*, struct queue_entry*, u8*);
+u8   common_fuzz_stuff(afl_state_t*, u8*, u32);
 
 /* Fuzz one */
 
-u8   fuzz_one_original(char**);
-u8   pilot_fuzzing(char**);
-u8   core_fuzzing(char**);
-void pso_updating(void);
-u8   fuzz_one(char**);
+u8   fuzz_one_original(afl_state_t*);
+u8   pilot_fuzzing(afl_state_t*);
+u8   core_fuzzing(afl_state_t*);
+void pso_updating(afl_state_t*);
+u8   fuzz_one(afl_state_t*);
 
 /* Init */
 
 #ifdef HAVE_AFFINITY
-void bind_to_free_cpu(void);
+void bind_to_free_cpu(afl_state_t*);
 #endif
-void   setup_post(void);
-void   read_testcases(void);
-void   perform_dry_run(char**);
-void   pivot_inputs(void);
-u32    find_start_position(void);
-void   find_timeout(void);
+void   setup_post(afl_state_t*);
+void   read_testcases(afl_state_t*);
+void   perform_dry_run(afl_state_t*);
+void   pivot_inputs(afl_state_t*);
+u32    find_start_position(afl_state_t*);
+void   find_timeout(afl_state_t*);
 double get_runnable_processes(void);
-void   nuke_resume_dir(void);
-void   setup_dirs_fds(void);
-void   setup_cmdline_file(char**);
-void   setup_stdio_file(void);
+void   nuke_resume_dir(afl_state_t*);
+void   setup_dirs_fds(afl_state_t*);
+void   setup_cmdline_file(afl_state_t*, char**);
+void   setup_stdio_file(afl_state_t*);
 void   check_crash_handling(void);
-void   check_cpu_governor(void);
-void   get_core_count(void);
-void   fix_up_sync(void);
+void   check_cpu_governor(afl_state_t*);
+void   get_core_count(afl_state_t*);
+void   fix_up_sync(afl_state_t*);
 void   check_asan_opts(void);
-void   check_binary(u8*);
-void   fix_up_banner(u8*);
-void   check_if_tty(void);
+void   check_binary(afl_state_t*, u8*);
+void   fix_up_banner(afl_state_t*, u8*);
+void   check_if_tty(afl_state_t*);
 void   setup_signal_handlers(void);
-char** get_qemu_argv(u8*, char**, int);
-char** get_wine_argv(u8*, char**, int);
-void   save_cmdline(u32, char**);
+void   save_cmdline(afl_state_t*, u32, char**);
 
 /* CmpLog */
 
-void init_cmplog_forkserver(char** argv);
-u8   common_fuzz_cmplog_stuff(char** argv, u8* out_buf, u32 len);
+void init_cmplog_forkserver(afl_state_t *afl);
+u8   common_fuzz_cmplog_stuff(afl_state_t *afl, u8 *out_buf, u32 len);
 
 /* RedQueen */
-
-u8 input_to_state_stage(char** argv, u8* orig_buf, u8* buf, u32 len,
+u8 input_to_state_stage(afl_state_t *afl, u8* orig_buf, u8* buf, u32 len,
                         u32 exec_cksum);
 
 /**** Inline routines ****/
@@ -817,20 +842,20 @@ u8 input_to_state_stage(char** argv, u8* orig_buf, u8* buf, u32 len,
 /* Generate a random number (from 0 to limit - 1). This may
    have slight bias. */
 
-static inline u32 UR(u32 limit) {
+static inline u32 UR(afl_state_t *afl, u32 limit) {
 
 #ifdef HAVE_ARC4RANDOM
-  if (fixed_seed) { return random() % limit; }
+  if (afl->fixed_seed) { return random() % limit; }
 
   /* The boundary not being necessarily a power of 2,
      we need to ensure the result uniformity. */
   return arc4random_uniform(limit);
 #else
-  if (!fixed_seed && unlikely(!rand_cnt--)) {
+  if (!afl->fixed_seed && unlikely(!afl->rand_cnt--)) {
 
-    ck_read(dev_urandom_fd, &rand_seed, sizeof(rand_seed), "/dev/urandom");
-    srandom(rand_seed[0]);
-    rand_cnt = (RESEED_RNG / 2) + (rand_seed[1] % RESEED_RNG);
+    ck_read(afl->fsrv.dev_urandom_fd, &afl->rand_seed, sizeof(afl->rand_seed), "/dev/urandom");
+    srandom(afl->rand_seed[0]);
+    afl->rand_cnt = (RESEED_RNG / 2) + (afl->rand_seed[1] % RESEED_RNG);
 
   }
 
@@ -839,10 +864,10 @@ static inline u32 UR(u32 limit) {
 
 }
 
-static inline u32 get_rand_seed() {
+static inline u32 get_rand_seed(afl_state_t *afl) {
 
-  if (fixed_seed) return (u32)init_seed;
-  return rand_seed[0];
+  if (afl->fixed_seed) return (u32)afl->init_seed;
+  return afl->rand_seed[0];
 
 }
 
@@ -858,10 +883,4 @@ static u64 next_p2(u64 val) {
 
 }
 
-#ifdef _AFL_DOCUMENT_MUTATIONS
-extern u8  do_document;
-extern u32 document_counter;
-#endif
-
-#endif
-
+#endif
\ No newline at end of file
diff --git a/include/afl-prealloc.h b/include/afl-prealloc.h
new file mode 100644
index 00000000..712cdec6
--- /dev/null
+++ b/include/afl-prealloc.h
@@ -0,0 +1,101 @@
+/* If we know we'll reuse small elements often, we'll just preallocate a buffer, then fall back to malloc */
+// TODO: Replace free status check with bitmask+CLZ
+
+#ifndef AFL_PREALLOC_H
+#define AFL_PREALLOC_H
+
+#include <stdio.h>
+#include <stdbool.h>
+#include <string.h>
+
+#include "debug.h"
+
+typedef enum prealloc_status {
+  PRE_STATUS_UNUSED = 0,/* free in buf */
+  PRE_STATUS_USED,      /* used in buf */
+  PRE_STATUS_MALLOC     /* system malloc */
+} pre_status_t;
+
+
+/* Adds the entry used for prealloc bookkeeping to this struct */
+
+#define PREALLOCABLE ;pre_status_t pre_status; /* prealloc status of this instance */
+
+
+/* allocate an element of type *el_ptr, to this variable.
+    Uses (and reuses) the given prealloc_buf before hitting libc's malloc.
+    prealloc_buf must be the pointer to an array with type `type`.
+    `type` must be a struct with uses PREALLOCABLE (a pre_status_t pre_status member).
+    prealloc_size must be the array size.
+    prealloc_counter must be a variable initialized with 0 (of any name). 
+    */
+
+#define PRE_ALLOC(el_ptr, prealloc_buf, prealloc_size, prealloc_counter) do {   \
+                                                                                \
+  if ((prealloc_counter) >= (prealloc_size)) {                                  \
+                                                                                \
+    el_ptr = malloc(sizeof(*el_ptr));                                           \
+    el_ptr->pre_status = PRE_STATUS_MALLOC;                                     \
+                                                                                \
+  } else {                                                                      \
+                                                                                \
+    /* Find one of our preallocated elements */                                 \
+    u32 i;                                                                      \
+    for (i = 0; i < (prealloc_size); i++) {                                     \
+                                                                                \
+      el_ptr = &((prealloc_buf)[i]);                                            \
+      if (el_ptr->pre_status == PRE_STATUS_UNUSED) {                            \
+                                                                                \
+        (prealloc_counter)++;                                                   \
+        el_ptr->pre_status = PRE_STATUS_USED;                                   \
+        break;                                                                  \
+                                                                                \
+      }                                                                         \
+    }                                                                           \
+  }                                                                             \
+                                                                                \
+  if(!el_ptr) {                                                                 \
+    FATAL("BUG in list.h -> no element found or allocated!");                   \
+  }                                                                             \
+} while(0);
+
+
+/* Take a chosen (free) element from the prealloc_buf directly */
+
+#define PRE_ALLOC_FORCE(el_ptr, prealloc_counter) do {  \
+  if ((el_ptr)->pre_status != PRE_STATUS_UNUSED) {      \
+    FATAL("PRE_ALLOC_FORCE element already allocated"); \
+  }                                                     \
+  (el_ptr)->pre_status = PRE_STATUS_USED;               \
+  (prealloc_counter)++;                                 \
+} while(0);
+
+
+/* free an preallocated element */
+
+#define PRE_FREE(el_ptr, prealloc_counter) do {     \
+                                                    \
+  switch ((el_ptr)->pre_status) {                   \
+                                                    \
+    case PRE_STATUS_USED: {                         \
+      (el_ptr)->pre_status = PRE_STATUS_UNUSED;     \
+      (prealloc_counter)--;                         \
+      if ((prealloc_counter) < 0) {                 \
+        FATAL("Inconsistent data in PRE_FREE");     \
+      }                                             \
+      break;                                        \
+    }                                               \
+    case PRE_STATUS_MALLOC: {                       \
+      (el_ptr)->pre_status = PRE_STATUS_UNUSED;     \
+      free((el_ptr));                               \
+      break;                                        \
+    }                                               \
+    default: {                                      \
+      FATAL("Double Free Detected");                \
+      break;                                        \
+    }                                               \
+                                                    \
+  }                                                 \
+} while(0);
+
+#endif
diff --git a/include/common.h b/include/common.h
index 0d7f4f0b..780e083b 100644
--- a/include/common.h
+++ b/include/common.h
@@ -28,16 +28,14 @@
 
 #include <sys/time.h>
 #include "types.h"
+#include "stdbool.h"
 
-extern u8* target_path;                 /* Path to target binary            */
-
-void detect_file_args(char** argv, u8* prog_in);
+void detect_file_args(char** argv, u8* prog_in, u8 use_stdin);
 void check_environment_vars(char** env);
 
-char** get_qemu_argv(u8* own_loc, char** argv, int argc);
-char** get_wine_argv(u8* own_loc, char** argv, int argc);
+char** get_qemu_argv(u8* own_loc, u8 **target_path_p, int argc, char **argv);
+char** get_wine_argv(u8* own_loc, u8 **target_path_p, int argc, char **argv);
 char*  get_afl_env(char* env);
-#endif
 
 /* Get unix time in milliseconds */
 
@@ -65,3 +63,4 @@ static u64 get_cur_time_us(void) {
 
 }
 
+#endif
diff --git a/include/forkserver.h b/include/forkserver.h
index 0fdcba48..3587427b 100644
--- a/include/forkserver.h
+++ b/include/forkserver.h
@@ -27,8 +27,47 @@
 #ifndef __AFL_FORKSERVER_H
 #define __AFL_FORKSERVER_H
 
+#include <stdio.h>
+
+typedef struct afl_forkserver {
+
+  /* a program that includes afl-forkserver needs to define these */
+
+  u8 uses_asan;                  /* Target uses ASAN?                */
+  u8* trace_bits;                /* SHM with instrumentation bitmap  */
+  u8 use_stdin;                  /* use stdin for sending data       */
+
+s32 fsrv_pid,                 /* PID of the fork server           */
+    child_pid,                   /* PID of the fuzzed program        */
+    out_dir_fd;                  /* FD of the lock file              */
+
+s32 out_fd,                      /* Persistent fd for afl->fsrv.out_file       */
+#ifndef HAVE_ARC4RANDOM
+    dev_urandom_fd,              /* Persistent fd for /dev/urandom   */
+#endif
+    dev_null_fd,                 /* Persistent fd for /dev/null      */
+    fsrv_ctl_fd,                 /* Fork server control pipe (write) */
+    fsrv_st_fd;                  /* Fork server status pipe (read)   */
+
+  u32 exec_tmout;                  /* Configurable exec timeout (ms)   */
+  u64 mem_limit;                   /* Memory cap for child (MB)        */
+
+  u8 *out_file,                    /* File to fuzz, if any             */
+     *target_path;                 /* Path of the target */
+
+  FILE* plot_file;                 /* Gnuplot output file              */
+
+  u8  child_timed_out;             /* Traced process timed out?        */
+
+} afl_forkserver_t;
+
+
+
 void handle_timeout(int sig);
-void init_forkserver(char **argv);
+void afl_fsrv_init(afl_forkserver_t *fsrv);
+void afl_fsrv_start(afl_forkserver_t *fsrv, char **argv);
+void afl_fsrv_deinit(afl_forkserver_t *fsrv);
+void afl_fsrv_killall();
 
 #ifdef __APPLE__
 #define MSG_FORK_ON_APPLE                                                    \
diff --git a/include/list.h b/include/list.h
new file mode 100644
index 00000000..7184850f
--- /dev/null
+++ b/include/list.h
@@ -0,0 +1,133 @@
+#ifndef AFL_LIST
+#define AFL_LIST
+
+#include <stdio.h>
+#include <stdbool.h>
+#include <string.h>
+
+#include "debug.h"
+#include "afl-prealloc.h"
+
+#define LIST_PREALLOC_SIZE (64)  /* How many elements to allocate before malloc is needed */
+
+typedef struct list_element {
+  PREALLOCABLE;
+
+  struct list_element *prev;
+  struct list_element *next;
+  void *data;
+
+} element_t;
+
+typedef struct list {
+
+  element_t element_prealloc_buf[LIST_PREALLOC_SIZE];
+  u32 element_prealloc_count;
+
+} list_t;
+
+static inline element_t *get_head(list_t *list) {
+
+  return &list->element_prealloc_buf[0];
+
+}
+
+static void list_free_el(list_t *list, element_t *el) {
+
+  PRE_FREE(el, list->element_prealloc_count);
+
+}
+
+static void list_append(list_t *list, void *el) {
+
+  element_t *head = get_head(list);
+  if (!head->next) {
+
+    /* initialize */
+
+    memset(list, 0, sizeof(list_t));
+    PRE_ALLOC_FORCE(head, list->element_prealloc_count);
+    head->next = head->prev = head;
+
+  }
+
+  element_t *el_box = NULL;
+  PRE_ALLOC(el_box, list->element_prealloc_buf, LIST_PREALLOC_SIZE, list->element_prealloc_count);
+  if (!el_box) FATAL("failed to allocate list element");
+  el_box->data = el;
+  el_box->next = head;
+  el_box->prev = head->prev;
+  head->prev->next = el_box;
+  head->prev = el_box;
+
+}
+
+/* Simple foreach. 
+   Pointer to the current element is in `el`,
+   casted to (a pointer) of the given `type`.
+   A return from this block will return from calling func.
+*/
+
+#define LIST_FOREACH(list, type, block) do { \
+  list_t *li = (list);                       \
+  element_t *head = get_head((li));          \
+  element_t *el_box = (head)->next;          \
+  if (!el_box)                               \
+    FATAL("foreach over uninitialized list");\
+  while(el_box != head) {                    \
+    type *el = (type *)((el_box)->data);     \
+    /* get next so el_box can be unlinked */ \
+    element_t *next = el_box->next;          \
+    {block};                                 \
+    el_box = next;                           \
+  }                                          \
+} while(0);
+
+/* In foreach: remove the current el from the list */
+
+#define LIST_REMOVE_CURRENT_EL_IN_FOREACH() do { \
+    el_box->prev->next = next;                   \
+    el_box->next->prev = el_box->prev;           \
+    list_free_el(li, el_box);                    \
+} while(0);
+
+/* Same as foreach, but will clear list in the process */
+
+#define LIST_FOREACH_CLEAR(list, type, block) do { \
+  LIST_FOREACH((list), type, {                     \
+    {block};                                       \
+    LIST_REMOVE_CURRENT_EL_IN_FOREACH();           \
+  });                                              \
+} while(0);
+
+/* remove an item from the list */
+
+static void list_remove(list_t *list, void *remove_me) {
+
+  LIST_FOREACH(list, void, {
+    if (el == remove_me) {
+      el_box->prev->next = el_box->next;
+      el_box->next->prev = el_box->prev;
+      el_box->data = NULL;
+      list_free_el(list, el_box);
+      return;
+    }
+  });
+
+  FATAL ("List item to be removed not in list");
+
+}
+
+/* Returns true if el is in list */
+
+static bool list_contains(list_t *list, void *contains_me) {
+
+  LIST_FOREACH(list, void, {
+    if (el == contains_me) return true;
+  });
+
+  return false;
+
+}
+
+#endif
\ No newline at end of file
diff --git a/include/sharedmem.h b/include/sharedmem.h
index 8c1c2b20..b90f7d87 100644
--- a/include/sharedmem.h
+++ b/include/sharedmem.h
@@ -27,11 +27,32 @@
 #ifndef __AFL_SHAREDMEM_H
 #define __AFL_SHAREDMEM_H
 
-void setup_shm(unsigned char dumb_mode);
-void remove_shm(void);
+typedef struct sharedmem {
 
-extern int             cmplog_mode;
-extern struct cmp_map* cmp_map;
+  //extern unsigned char *trace_bits;
+
+  #ifdef USEMMAP
+  /* ================ Proteas ================ */
+  int            g_shm_fd;
+  char           g_shm_file_path[L_tmpnam];
+  /* ========================================= */
+  #else
+  s32 shm_id;                     /* ID of the SHM region              */
+  s32 cmplog_shm_id;
+  #endif
+
+  u8 *map;                   /* shared memory region */
+
+  size_t         size_alloc; /* actual allocated size */
+  size_t         size_used;  /* in use by shmem app */
+
+  int             cmplog_mode;
+  struct cmp_map *cmp_map;
+
+} sharedmem_t;
+
+u8 *afl_shm_init(sharedmem_t*, size_t, unsigned char dumb_mode);
+void afl_shm_deinit(sharedmem_t*);
 
 #endif
 
diff --git a/src/afl-analyze.c b/src/afl-analyze.c
index 94c055a6..9e64a7a5 100644
--- a/src/afl-analyze.c
+++ b/src/afl-analyze.c
@@ -84,6 +84,8 @@ static volatile u8 stop_soon,          /* Ctrl-C pressed?                   */
 
 static u8 qemu_mode;
 
+static u8 *target_path;
+
 /* Constants used for describing byte behavior. */
 
 #define RESP_NONE 0x00                 /* Changing byte is a no-op.         */
@@ -998,21 +1000,23 @@ int main(int argc, char** argv, char** envp) {
   use_hex_offsets = !!get_afl_env("AFL_ANALYZE_HEX");
 
   check_environment_vars(envp);
-  setup_shm(0);
+
+  sharedmem_t shm = {0};
+  trace_bits = afl_shm_init(&shm, MAP_SIZE, 0);
   atexit(at_exit_handler);
   setup_signal_handlers();
 
   set_up_environment();
 
   find_binary(argv[optind]);
-  detect_file_args(argv + optind, prog_in);
+  detect_file_args(argv + optind, prog_in, use_stdin);
 
   if (qemu_mode) {
 
     if (use_wine)
-      use_argv = get_wine_argv(argv[0], argv + optind, argc - optind);
+      use_argv = get_wine_argv(argv[0], &target_path, argc - optind, argv + optind);
     else
-      use_argv = get_qemu_argv(argv[0], argv + optind, argc - optind);
+      use_argv = get_qemu_argv(argv[0], &target_path, argc - optind, argv + optind);
 
   } else
 
@@ -1037,6 +1041,8 @@ int main(int argc, char** argv, char** envp) {
 
   OKF("We're done here. Have a nice day!\n");
 
+  afl_shm_deinit(&shm);
+
   exit(0);
 
 }
diff --git a/src/afl-common.c b/src/afl-common.c
index 9d6e52b1..fc495b60 100644
--- a/src/afl-common.c
+++ b/src/afl-common.c
@@ -36,11 +36,9 @@
 #include <unistd.h>
 #endif
 
-u8*       target_path;                  /* Path to target binary            */
-extern u8 use_stdin;
 extern u8 be_quiet;
 
-void detect_file_args(char** argv, u8* prog_in) {
+void detect_file_args(char **argv, u8 *prog_in, u8 use_stdin) {
 
   u32 i = 0;
 #ifdef __GLIBC__
@@ -64,6 +62,8 @@ void detect_file_args(char** argv, u8* prog_in) {
 
   if (!cwd) PFATAL("getcwd() failed");
 
+  // TODO: free allocs below... somewhere.
+
   while (argv[i]) {
 
     u8* aa_loc = strstr(argv[i], "@@");
@@ -87,6 +87,8 @@ void detect_file_args(char** argv, u8* prog_in) {
 
         /* Construct a replacement argv value. */
 
+        // TODO: n_arg is never freed
+
         *aa_loc = 0;
         n_arg = alloc_printf("%s%s%s", argv[i], aa_subst, aa_loc + 2);
         argv[i] = n_arg;
@@ -108,14 +110,14 @@ void detect_file_args(char** argv, u8* prog_in) {
 
 /* Rewrite argv for QEMU. */
 
-char** get_qemu_argv(u8* own_loc, char** argv, int argc) {
+char** get_qemu_argv(u8* own_loc, u8 **target_path_p, int argc, char **argv) {
 
   char** new_argv = ck_alloc(sizeof(char*) * (argc + 4));
   u8 *   tmp, *cp = NULL, *rsl, *own_copy;
 
   memcpy(new_argv + 3, argv + 1, (int)(sizeof(char*)) * argc);
 
-  new_argv[2] = target_path;
+  new_argv[2] = *target_path_p;
   new_argv[1] = "--";
 
   /* Now we need to actually find the QEMU binary to put in argv[0]. */
@@ -128,7 +130,7 @@ char** get_qemu_argv(u8* own_loc, char** argv, int argc) {
 
     if (access(cp, X_OK)) FATAL("Unable to find '%s'", tmp);
 
-    target_path = new_argv[0] = cp;
+    *target_path_p = new_argv[0] = cp;
     return new_argv;
 
   }
@@ -145,7 +147,7 @@ char** get_qemu_argv(u8* own_loc, char** argv, int argc) {
 
     if (!access(cp, X_OK)) {
 
-      target_path = new_argv[0] = cp;
+      *target_path_p = new_argv[0] = cp;
       return new_argv;
 
     }
@@ -156,8 +158,9 @@ char** get_qemu_argv(u8* own_loc, char** argv, int argc) {
 
   if (!access(BIN_PATH "/afl-qemu-trace", X_OK)) {
 
-    if (cp != NULL) ck_free(cp);
-    target_path = new_argv[0] = ck_strdup(BIN_PATH "/afl-qemu-trace");
+    if (cp) ck_free(cp);
+    *target_path_p = new_argv[0] = ck_strdup(BIN_PATH "/afl-qemu-trace");
+
     return new_argv;
 
   }
@@ -165,7 +168,7 @@ char** get_qemu_argv(u8* own_loc, char** argv, int argc) {
   SAYF("\n" cLRD "[-] " cRST
        "Oops, unable to find the 'afl-qemu-trace' binary. The binary must be "
        "built\n"
-       "    separately by following the instructions in qemu_mode/README.md. "
+       "    separately by following the instructions in afl->qemu_mode/README.md. "
        "If you\n"
        "    already have the binary installed, you may need to specify "
        "AFL_PATH in the\n"
@@ -184,14 +187,14 @@ char** get_qemu_argv(u8* own_loc, char** argv, int argc) {
 
 /* Rewrite argv for Wine+QEMU. */
 
-char** get_wine_argv(u8* own_loc, char** argv, int argc) {
+char** get_wine_argv(u8* own_loc, u8 **target_path_p, int argc, char **argv) {
 
   char** new_argv = ck_alloc(sizeof(char*) * (argc + 3));
   u8 *   tmp, *cp = NULL, *rsl, *own_copy;
 
   memcpy(new_argv + 2, argv + 1, (int)(sizeof(char*)) * argc);
 
-  new_argv[1] = target_path;
+  new_argv[1] = *target_path_p;
 
   /* Now we need to actually find the QEMU binary to put in argv[0]. */
 
@@ -209,7 +212,7 @@ char** get_wine_argv(u8* own_loc, char** argv, int argc) {
 
     if (access(cp, X_OK)) FATAL("Unable to find '%s'", tmp);
 
-    target_path = new_argv[0] = cp;
+    *target_path_p = new_argv[0] = cp;
     return new_argv;
 
   }
@@ -232,7 +235,7 @@ char** get_wine_argv(u8* own_loc, char** argv, int argc) {
 
       if (!access(cp, X_OK)) {
 
-        target_path = new_argv[0] = cp;
+        *target_path_p = new_argv[0] = cp;
         return new_argv;
 
       }
@@ -251,7 +254,7 @@ char** get_wine_argv(u8* own_loc, char** argv, int argc) {
 
     if (!access(ncp, X_OK)) {
 
-      target_path = new_argv[0] = ck_strdup(ncp);
+      *target_path_p = new_argv[0] = ck_strdup(ncp);
       return new_argv;
 
     }
@@ -261,7 +264,7 @@ char** get_wine_argv(u8* own_loc, char** argv, int argc) {
   SAYF("\n" cLRD "[-] " cRST
        "Oops, unable to find the '%s' binary. The binary must be "
        "built\n"
-       "    separately by following the instructions in qemu_mode/README.md. "
+       "    separately by following the instructions in afl->qemu_mode/README.md. "
        "If you\n"
        "    already have the binary installed, you may need to specify "
        "AFL_PATH in the\n"
@@ -326,4 +329,3 @@ char* get_afl_env(char* env) {
   return val;
 
 }
-
diff --git a/src/afl-forkserver.c b/src/afl-forkserver.c
index bec31c65..7edcde5e 100644
--- a/src/afl-forkserver.c
+++ b/src/afl-forkserver.c
@@ -28,6 +28,7 @@
 #include "types.h"
 #include "debug.h"
 #include "common.h"
+#include "list.h"
 #include "forkserver.h"
 
 #include <stdio.h>
@@ -41,27 +42,10 @@
 #include <sys/wait.h>
 #include <sys/resource.h>
 
-/* a program that includes afl-forkserver needs to define these */
-extern u8  uses_asan;
-extern u8 *trace_bits;
-extern u8  use_stdin;
-
-extern s32 forksrv_pid, child_pid, fsrv_ctl_fd, fsrv_st_fd;
-extern s32 out_fd, out_dir_fd, dev_null_fd;     /* initialize these with -1 */
-#ifndef HAVE_ARC4RANDOM
-extern s32 dev_urandom_fd;
-#endif
-extern u32   exec_tmout;
-extern u64   mem_limit;
-extern u8 *  out_file, *target_path, *doc_path;
-extern FILE *plot_file;
-
-/* we need this internally but can be defined and read extern in the main source
- */
-u8 child_timed_out;
-
 /* Describe integer as memory size. */
 
+extern u8 *doc_path;
+
 u8 *forkserver_DMS(u64 val) {
 
   static u8 tmp[12][16];
@@ -122,25 +106,40 @@ u8 *forkserver_DMS(u64 val) {
 
 }
 
+list_t fsrv_list = {0};
+
 /* the timeout handler */
 
 void handle_timeout(int sig) {
 
-  if (child_pid > 0) {
+  LIST_FOREACH(&fsrv_list, afl_forkserver_t, {
 
-    child_timed_out = 1;
-    kill(child_pid, SIGKILL);
+    //TODO: We need a proper timer to handle multiple timeouts
+    if (el->child_pid > 0) {
 
-  } else if (child_pid == -1 && forksrv_pid > 0) {
+      el->child_timed_out = 1;
+      kill(el->child_pid, SIGKILL);
 
-    child_timed_out = 1;
-    kill(forksrv_pid, SIGKILL);
+    } else if (el->child_pid == -1 && el->fsrv_pid > 0) {
 
-  }
+      el->child_timed_out = 1;
+      kill(el->fsrv_pid, SIGKILL);
+
+    }
+
+  });
 
 }
 
-/* Spin up fork server (instrumented mode only). The idea is explained here:
+/* Initializes the struct */
+
+void afl_fsrv_init(afl_forkserver_t *fsrv) {
+
+  list_append(&fsrv_list, fsrv);
+
+}
+
+/* Spins up fork server (instrumented mode only). The idea is explained here:
 
    http://lcamtuf.blogspot.com/2014/10/fuzzing-binaries-without-execve.html
 
@@ -148,7 +147,7 @@ void handle_timeout(int sig) {
    cloning a stopped child. So, we just execute once, and then send commands
    through a pipe. The other part of this logic is in afl-as.h / llvm_mode */
 
-void init_forkserver(char **argv) {
+void afl_fsrv_start(afl_forkserver_t *fsrv, char **argv) {
 
   static struct itimerval it;
   int                     st_pipe[2], ctl_pipe[2];
@@ -159,12 +158,12 @@ void init_forkserver(char **argv) {
 
   if (pipe(st_pipe) || pipe(ctl_pipe)) PFATAL("pipe() failed");
 
-  child_timed_out = 0;
-  forksrv_pid = fork();
+  fsrv->child_timed_out = 0;
+  fsrv->fsrv_pid = fork();
 
-  if (forksrv_pid < 0) PFATAL("fork() failed");
+  if (fsrv->fsrv_pid < 0) PFATAL("fork() failed");
 
-  if (!forksrv_pid) {
+  if (!fsrv->fsrv_pid) {
 
     /* CHILD PROCESS */
 
@@ -180,9 +179,9 @@ void init_forkserver(char **argv) {
 
     }
 
-    if (mem_limit) {
+    if (fsrv->mem_limit) {
 
-      r.rlim_max = r.rlim_cur = ((rlim_t)mem_limit) << 20;
+      r.rlim_max = r.rlim_cur = ((rlim_t)fsrv->mem_limit) << 20;
 
 #ifdef RLIMIT_AS
       setrlimit(RLIMIT_AS, &r);                            /* Ignore errors */
@@ -209,19 +208,19 @@ void init_forkserver(char **argv) {
 
     if (!get_afl_env("AFL_DEBUG_CHILD_OUTPUT")) {
 
-      dup2(dev_null_fd, 1);
-      dup2(dev_null_fd, 2);
+      dup2(fsrv->dev_null_fd, 1);
+      dup2(fsrv->dev_null_fd, 2);
 
     }
 
-    if (!use_stdin) {
+    if (!fsrv->use_stdin) {
 
-      dup2(dev_null_fd, 0);
+      dup2(fsrv->dev_null_fd, 0);
 
     } else {
 
-      dup2(out_fd, 0);
-      close(out_fd);
+      dup2(fsrv->out_fd, 0);
+      close(fsrv->out_fd);
 
     }
 
@@ -235,12 +234,12 @@ void init_forkserver(char **argv) {
     close(st_pipe[0]);
     close(st_pipe[1]);
 
-    close(out_dir_fd);
-    close(dev_null_fd);
+    close(fsrv->out_dir_fd);
+    close(fsrv->dev_null_fd);
 #ifndef HAVE_ARC4RANDOM
-    close(dev_urandom_fd);
+    close(fsrv->dev_urandom_fd);
 #endif
-    close(plot_file == NULL ? -1 : fileno(plot_file));
+    close(fsrv->plot_file == NULL ? -1 : fileno(fsrv->plot_file));
 
     /* This should improve performance a bit, since it stops the linker from
        doing extra work post-fork(). */
@@ -269,12 +268,12 @@ void init_forkserver(char **argv) {
            "msan_track_origins=0",
            0);
 
-    execv(target_path, argv);
+    execv(fsrv->target_path, argv);
 
     /* Use a distinctive bitmap signature to tell the parent about execv()
        falling through. */
 
-    *(u32 *)trace_bits = EXEC_FAIL_SIG;
+    *(u32 *)fsrv->trace_bits = EXEC_FAIL_SIG;
     exit(0);
 
   }
@@ -286,21 +285,21 @@ void init_forkserver(char **argv) {
   close(ctl_pipe[0]);
   close(st_pipe[1]);
 
-  fsrv_ctl_fd = ctl_pipe[1];
-  fsrv_st_fd = st_pipe[0];
+  fsrv->fsrv_ctl_fd = ctl_pipe[1];
+  fsrv->fsrv_st_fd = st_pipe[0];
 
   /* Wait for the fork server to come up, but don't wait too long. */
 
-  if (exec_tmout) {
+  if (fsrv->exec_tmout) {
 
-    it.it_value.tv_sec = ((exec_tmout * FORK_WAIT_MULT) / 1000);
-    it.it_value.tv_usec = ((exec_tmout * FORK_WAIT_MULT) % 1000) * 1000;
+    it.it_value.tv_sec = ((fsrv->exec_tmout * FORK_WAIT_MULT) / 1000);
+    it.it_value.tv_usec = ((fsrv->exec_tmout * FORK_WAIT_MULT) % 1000) * 1000;
 
   }
 
   setitimer(ITIMER_REAL, &it, NULL);
 
-  rlen = read(fsrv_st_fd, &status, 4);
+  rlen = read(fsrv->fsrv_st_fd, &status, 4);
 
   it.it_value.tv_sec = 0;
   it.it_value.tv_usec = 0;
@@ -317,14 +316,14 @@ void init_forkserver(char **argv) {
 
   }
 
-  if (child_timed_out)
+  if (fsrv->child_timed_out)
     FATAL("Timeout while initializing fork server (adjusting -t may help)");
 
-  if (waitpid(forksrv_pid, &status, 0) <= 0) PFATAL("waitpid() failed");
+  if (waitpid(fsrv->fsrv_pid, &status, 0) <= 0) PFATAL("waitpid() failed");
 
   if (WIFSIGNALED(status)) {
 
-    if (mem_limit && mem_limit < 500 && uses_asan) {
+    if (fsrv->mem_limit && fsrv->mem_limit < 500 && fsrv->uses_asan) {
 
       SAYF("\n" cLRD "[-] " cRST
            "Whoops, the target binary crashed suddenly, "
@@ -336,7 +335,7 @@ void init_forkserver(char **argv) {
            "    %s/notes_for_asan.md for help.\n",
            doc_path);
 
-    } else if (!mem_limit) {
+    } else if (!fsrv->mem_limit) {
 
       SAYF("\n" cLRD "[-] " cRST
            "Whoops, the target binary crashed suddenly, "
@@ -389,7 +388,7 @@ void init_forkserver(char **argv) {
            "options\n"
            "      fail, poke <afl-users@googlegroups.com> for troubleshooting "
            "tips.\n",
-           forkserver_DMS(mem_limit << 20), mem_limit - 1);
+           forkserver_DMS(fsrv->mem_limit << 20), fsrv->mem_limit - 1);
 
     }
 
@@ -397,10 +396,10 @@ void init_forkserver(char **argv) {
 
   }
 
-  if (*(u32 *)trace_bits == EXEC_FAIL_SIG)
+  if (*(u32 *)fsrv->trace_bits == EXEC_FAIL_SIG)
     FATAL("Unable to execute target application ('%s')", argv[0]);
 
-  if (mem_limit && mem_limit < 500 && uses_asan) {
+  if (fsrv->mem_limit && fsrv->mem_limit < 500 && fsrv->uses_asan) {
 
     SAYF("\n" cLRD "[-] " cRST
          "Hmm, looks like the target binary terminated "
@@ -412,7 +411,7 @@ void init_forkserver(char **argv) {
          "    read %s/notes_for_asan.md for help.\n",
          doc_path);
 
-  } else if (!mem_limit) {
+  } else if (!fsrv->mem_limit) {
 
     SAYF("\n" cLRD "[-] " cRST
          "Hmm, looks like the target binary terminated "
@@ -455,7 +454,7 @@ void init_forkserver(char **argv) {
               "never\n"
               "      reached before the program terminates.\n\n"
             : "",
-        forkserver_DMS(mem_limit << 20), mem_limit - 1);
+        forkserver_DMS(fsrv->mem_limit << 20), fsrv->mem_limit - 1);
 
   }
 
@@ -463,3 +462,15 @@ void init_forkserver(char **argv) {
 
 }
 
+void afl_fsrv_killall() {
+
+  LIST_FOREACH(&fsrv_list, afl_forkserver_t, {
+    if (el->child_pid > 0) kill(el->child_pid, SIGKILL);
+  });
+}
+
+void afl_fsrv_deinit(afl_forkserver_t *fsrv) {
+
+  list_remove(&fsrv_list, fsrv);
+
+}
diff --git a/src/afl-fuzz-bitmap.c b/src/afl-fuzz-bitmap.c
index 3ffda284..4fba7810 100644
--- a/src/afl-fuzz-bitmap.c
+++ b/src/afl-fuzz-bitmap.c
@@ -29,20 +29,20 @@
    -B option, to focus a separate fuzzing session on a particular
    interesting input without rediscovering all the others. */
 
-void write_bitmap(void) {
+void write_bitmap(afl_state_t *afl) {
 
   u8* fname;
   s32 fd;
 
-  if (!bitmap_changed) return;
-  bitmap_changed = 0;
+  if (!afl->bitmap_changed) return;
+  afl->bitmap_changed = 0;
 
-  fname = alloc_printf("%s/fuzz_bitmap", out_dir);
+  fname = alloc_printf("%s/fuzz_bitmap", afl->out_dir);
   fd = open(fname, O_WRONLY | O_CREAT | O_TRUNC, 0600);
 
   if (fd < 0) PFATAL("Unable to open '%s'", fname);
 
-  ck_write(fd, virgin_bits, MAP_SIZE, fname);
+  ck_write(fd, afl->virgin_bits, MAP_SIZE, fname);
 
   close(fd);
   ck_free(fname);
@@ -51,13 +51,13 @@ void write_bitmap(void) {
 
 /* Read bitmap from file. This is for the -B option again. */
 
-void read_bitmap(u8* fname) {
+void read_bitmap(afl_state_t *afl, u8* fname) {
 
   s32 fd = open(fname, O_RDONLY);
 
   if (fd < 0) PFATAL("Unable to open '%s'", fname);
 
-  ck_read(fd, virgin_bits, MAP_SIZE, fname);
+  ck_read(fd, afl->virgin_bits, MAP_SIZE, fname);
 
   close(fd);
 
@@ -71,18 +71,18 @@ void read_bitmap(u8* fname) {
    This function is called after every exec() on a fairly large buffer, so
    it needs to be fast. We do this in 32-bit and 64-bit flavors. */
 
-u8 has_new_bits(u8* virgin_map) {
+u8 has_new_bits(afl_state_t *afl, u8* virgin_map) {
 
 #ifdef WORD_SIZE_64
 
-  u64* current = (u64*)trace_bits;
+  u64* current = (u64*)afl->fsrv.trace_bits;
   u64* virgin = (u64*)virgin_map;
 
   u32 i = (MAP_SIZE >> 3);
 
 #else
 
-  u32* current = (u32*)trace_bits;
+  u32* current = (u32*)afl->fsrv.trace_bits;
   u32* virgin = (u32*)virgin_map;
 
   u32 i = (MAP_SIZE >> 2);
@@ -138,7 +138,7 @@ u8 has_new_bits(u8* virgin_map) {
 
   }
 
-  if (ret && virgin_map == virgin_bits) bitmap_changed = 1;
+  if (ret && virgin_map == afl->virgin_bits) afl->bitmap_changed = 1;
 
   return ret;
 
@@ -415,35 +415,35 @@ void minimize_bits(u8* dst, u8* src) {
 /* Construct a file name for a new test case, capturing the operation
    that led to its discovery. Uses a static buffer. */
 
-u8* describe_op(u8 hnb) {
+u8* describe_op(afl_state_t *afl, u8 hnb) {
 
-  static u8 ret[256];
+  u8 *ret = afl->describe_op_buf_256;
 
-  if (syncing_party) {
+  if (afl->syncing_party) {
 
-    sprintf(ret, "sync:%s,src:%06u", syncing_party, syncing_case);
+    sprintf(ret, "sync:%s,src:%06u", afl->syncing_party, afl->syncing_case);
 
   } else {
 
-    sprintf(ret, "src:%06u", current_entry);
+    sprintf(ret, "src:%06u", afl->current_entry);
 
-    sprintf(ret + strlen(ret), ",time:%llu", get_cur_time() - start_time);
+    sprintf(ret + strlen(ret), ",time:%llu", get_cur_time() - afl->start_time);
 
-    if (splicing_with >= 0) sprintf(ret + strlen(ret), "+%06d", splicing_with);
+    if (afl->splicing_with >= 0) sprintf(ret + strlen(ret), "+%06d", afl->splicing_with);
 
-    sprintf(ret + strlen(ret), ",op:%s", stage_short);
+    sprintf(ret + strlen(ret), ",op:%s", afl->stage_short);
 
-    if (stage_cur_byte >= 0) {
+    if (afl->stage_cur_byte >= 0) {
 
-      sprintf(ret + strlen(ret), ",pos:%d", stage_cur_byte);
+      sprintf(ret + strlen(ret), ",pos:%d", afl->stage_cur_byte);
 
-      if (stage_val_type != STAGE_VAL_NONE)
+      if (afl->stage_val_type != STAGE_VAL_NONE)
         sprintf(ret + strlen(ret), ",val:%s%+d",
-                (stage_val_type == STAGE_VAL_BE) ? "be:" : "", stage_cur_val);
+                (afl->stage_val_type == STAGE_VAL_BE) ? "be:" : "", afl->stage_cur_val);
 
     } else
 
-      sprintf(ret + strlen(ret), ",rep:%d", stage_cur_val);
+      sprintf(ret + strlen(ret), ",rep:%d", afl->stage_cur_val);
 
   }
 
@@ -457,9 +457,9 @@ u8* describe_op(u8 hnb) {
 
 /* Write a message accompanying the crash directory :-) */
 
-static void write_crash_readme(void) {
+static void write_crash_readme(afl_state_t *afl) {
 
-  u8*   fn = alloc_printf("%s/crashes/README.txt", out_dir);
+  u8*   fn = alloc_printf("%s/crashes/README.txt", afl->out_dir);
   s32   fd;
   FILE* f;
 
@@ -499,7 +499,7 @@ static void write_crash_readme(void) {
 
       "  https://github.com/vanhauser-thc/AFLplusplus\n\n",
 
-      orig_cmdline, DMS(mem_limit << 20));                 /* ignore errors */
+      afl->orig_cmdline, DMS(afl->fsrv.mem_limit << 20));                 /* ignore errors */
 
   fclose(f);
 
@@ -509,7 +509,7 @@ static void write_crash_readme(void) {
    save or queue the input test case for further analysis if so. Returns 1 if
    entry is saved, 0 otherwise. */
 
-u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) {
+u8 save_if_interesting(afl_state_t *afl, void* mem, u32 len, u8 fault) {
 
   if (len == 0) return 0;
 
@@ -519,9 +519,9 @@ u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) {
   u8  keeping = 0, res;
 
   /* Update path frequency. */
-  u32 cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
+  u32 cksum = hash32(afl->fsrv.trace_bits, MAP_SIZE, HASH_CONST);
 
-  struct queue_entry* q = queue;
+  struct queue_entry* q = afl->queue;
   while (q) {
 
     if (q->exec_cksum == cksum) {
@@ -535,44 +535,44 @@ u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) {
 
   }
 
-  if (fault == crash_mode) {
+  if (fault == afl->crash_mode) {
 
     /* Keep only if there are new bits in the map, add to queue for
        future fuzzing, etc. */
 
-    if (!(hnb = has_new_bits(virgin_bits))) {
+    if (!(hnb = has_new_bits(afl, afl->virgin_bits))) {
 
-      if (crash_mode) ++total_crashes;
+      if (afl->crash_mode) ++afl->total_crashes;
       return 0;
 
     }
 
 #ifndef SIMPLE_FILES
 
-    fn = alloc_printf("%s/queue/id:%06u,%s", out_dir, queued_paths,
-                      describe_op(hnb));
+    fn = alloc_printf("%s/queue/id:%06u,%s", afl->out_dir, afl->queued_paths,
+                      describe_op(afl, hnb));
 
 #else
 
-    fn = alloc_printf("%s/queue/id_%06u", out_dir, queued_paths);
+    fn = alloc_printf("%s/queue/id_%06u", afl->out_dir, afl->queued_paths);
 
 #endif                                                    /* ^!SIMPLE_FILES */
 
-    add_to_queue(fn, len, 0);
+    add_to_queue(afl, fn, len, 0);
 
     if (hnb == 2) {
 
-      queue_top->has_new_cov = 1;
-      ++queued_with_cov;
+      afl->queue_top->has_new_cov = 1;
+      ++afl->queued_with_cov;
 
     }
 
-    queue_top->exec_cksum = cksum;
+    afl->queue_top->exec_cksum = cksum;
 
     /* Try to calibrate inline; this also calls update_bitmap_score() when
        successful. */
 
-    res = calibrate_case(argv, queue_top, mem, queue_cycle - 1, 0);
+    res = calibrate_case(afl, afl->queue_top, mem, afl->queue_cycle - 1, 0);
 
     if (res == FAULT_ERROR) FATAL("Unable to execute target application");
 
@@ -594,58 +594,58 @@ u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) {
          hang-specific bitmap as a signal of uniqueness. In "dumb" mode, we
          just keep everything. */
 
-      ++total_tmouts;
+      ++afl->total_tmouts;
 
-      if (unique_hangs >= KEEP_UNIQUE_HANG) return keeping;
+      if (afl->unique_hangs >= KEEP_UNIQUE_HANG) return keeping;
 
-      if (!dumb_mode) {
+      if (!afl->dumb_mode) {
 
 #ifdef WORD_SIZE_64
-        simplify_trace((u64*)trace_bits);
+        simplify_trace((u64*)afl->fsrv.trace_bits);
 #else
-        simplify_trace((u32*)trace_bits);
+        simplify_trace((u32*)afl->fsrv.trace_bits);
 #endif                                                     /* ^WORD_SIZE_64 */
 
-        if (!has_new_bits(virgin_tmout)) return keeping;
+        if (!has_new_bits(afl, afl->virgin_tmout)) return keeping;
 
       }
 
-      ++unique_tmouts;
+      ++afl->unique_tmouts;
 
       /* Before saving, we make sure that it's a genuine hang by re-running
          the target with a more generous timeout (unless the default timeout
          is already generous). */
 
-      if (exec_tmout < hang_tmout) {
+      if (afl->fsrv.exec_tmout < afl->hang_tmout) {
 
         u8 new_fault;
-        write_to_testcase(mem, len);
-        new_fault = run_target(argv, hang_tmout);
+        write_to_testcase(afl, mem, len);
+        new_fault = run_target(afl, afl->hang_tmout);
 
         /* A corner case that one user reported bumping into: increasing the
            timeout actually uncovers a crash. Make sure we don't discard it if
            so. */
 
-        if (!stop_soon && new_fault == FAULT_CRASH) goto keep_as_crash;
+        if (!afl->stop_soon && new_fault == FAULT_CRASH) goto keep_as_crash;
 
-        if (stop_soon || new_fault != FAULT_TMOUT) return keeping;
+        if (afl->stop_soon || new_fault != FAULT_TMOUT) return keeping;
 
       }
 
 #ifndef SIMPLE_FILES
 
-      fn = alloc_printf("%s/hangs/id:%06llu,%s", out_dir, unique_hangs,
-                        describe_op(0));
+      fn = alloc_printf("%s/hangs/id:%06llu,%s", afl->out_dir, afl->unique_hangs,
+                        describe_op(afl, 0));
 
 #else
 
-      fn = alloc_printf("%s/hangs/id_%06llu", out_dir, unique_hangs);
+      fn = alloc_printf("%s/hangs/id_%06llu", afl->out_dir, afl->unique_hangs);
 
 #endif                                                    /* ^!SIMPLE_FILES */
 
-      ++unique_hangs;
+      ++afl->unique_hangs;
 
-      last_hang_time = get_cur_time();
+      afl->last_hang_time = get_cur_time();
 
       break;
 
@@ -657,41 +657,41 @@ u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) {
          except for slightly different limits and no need to re-run test
          cases. */
 
-      ++total_crashes;
+      ++afl->total_crashes;
 
-      if (unique_crashes >= KEEP_UNIQUE_CRASH) return keeping;
+      if (afl->unique_crashes >= KEEP_UNIQUE_CRASH) return keeping;
 
-      if (!dumb_mode) {
+      if (!afl->dumb_mode) {
 
 #ifdef WORD_SIZE_64
-        simplify_trace((u64*)trace_bits);
+        simplify_trace((u64*)afl->fsrv.trace_bits);
 #else
-        simplify_trace((u32*)trace_bits);
+        simplify_trace((u32*)afl->fsrv.trace_bits);
 #endif                                                     /* ^WORD_SIZE_64 */
 
-        if (!has_new_bits(virgin_crash)) return keeping;
+        if (!has_new_bits(afl, afl->virgin_crash)) return keeping;
 
       }
 
-      if (!unique_crashes) write_crash_readme();
+      if (!afl->unique_crashes) write_crash_readme(afl);
 
 #ifndef SIMPLE_FILES
 
-      fn = alloc_printf("%s/crashes/id:%06llu,sig:%02u,%s", out_dir,
-                        unique_crashes, kill_signal, describe_op(0));
+      fn = alloc_printf("%s/crashes/id:%06llu,sig:%02u,%s", afl->out_dir,
+                        afl->unique_crashes, afl->kill_signal, describe_op(afl, 0));
 
 #else
 
-      fn = alloc_printf("%s/crashes/id_%06llu_%02u", out_dir, unique_crashes,
-                        kill_signal);
+      fn = alloc_printf("%s/crashes/id_%06llu_%02u", afl->out_dir, afl->unique_crashes,
+                        afl->kill_signal);
 
 #endif                                                    /* ^!SIMPLE_FILES */
 
-      ++unique_crashes;
-      if (infoexec) {  // if the user wants to be informed on new crashes - do
+      ++afl->unique_crashes;
+      if (afl->infoexec) {  // if the user wants to be informed on new crashes - do
 #if !TARGET_OS_IPHONE
                        // that
-        if (system(infoexec) == -1)
+        if (system(afl->infoexec) == -1)
           hnb += 0;  // we dont care if system errors, but we dont want a
                      // compiler warning either
 #else
@@ -700,8 +700,8 @@ u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) {
 
       }
 
-      last_crash_time = get_cur_time();
-      last_crash_execs = total_execs;
+      afl->last_crash_time = get_cur_time();
+      afl->last_crash_execs = afl->total_execs;
 
       break;
 
diff --git a/src/afl-fuzz-cmplog.c b/src/afl-fuzz-cmplog.c
index ec4d2ecc..08c48fc4 100644
--- a/src/afl-fuzz-cmplog.c
+++ b/src/afl-fuzz-cmplog.c
@@ -27,9 +27,7 @@
 #include "afl-fuzz.h"
 #include "cmplog.h"
 
-static s32 cmplog_fsrv_ctl_fd, cmplog_fsrv_st_fd;
-
-void init_cmplog_forkserver(char** argv) {
+void init_cmplog_forkserver(afl_state_t *afl) {
 
   static struct itimerval it;
   int                     st_pipe[2], ctl_pipe[2];
@@ -40,12 +38,12 @@ void init_cmplog_forkserver(char** argv) {
 
   if (pipe(st_pipe) || pipe(ctl_pipe)) PFATAL("pipe() failed");
 
-  child_timed_out = 0;
-  cmplog_forksrv_pid = fork();
+  afl->fsrv.child_timed_out = 0;
+  afl->cmplog_fsrv_pid = fork();
 
-  if (cmplog_forksrv_pid < 0) PFATAL("fork() failed");
+  if (afl->cmplog_fsrv_pid < 0) PFATAL("fork() failed");
 
-  if (!cmplog_forksrv_pid) {
+  if (!afl->cmplog_fsrv_pid) {
 
     /* CHILD PROCESS */
 
@@ -61,9 +59,9 @@ void init_cmplog_forkserver(char** argv) {
 
     }
 
-    if (mem_limit) {
+    if (afl->fsrv.mem_limit) {
 
-      r.rlim_max = r.rlim_cur = ((rlim_t)mem_limit) << 20;
+      r.rlim_max = r.rlim_cur = ((rlim_t)afl->fsrv.mem_limit) << 20;
 
 #ifdef RLIMIT_AS
       setrlimit(RLIMIT_AS, &r);                            /* Ignore errors */
@@ -83,26 +81,26 @@ void init_cmplog_forkserver(char** argv) {
     //    r.rlim_max = r.rlim_cur = 0;
     //    setrlimit(RLIMIT_CORE, &r);                      /* Ignore errors */
 
-    /* Isolate the process and configure standard descriptors. If out_file is
-       specified, stdin is /dev/null; otherwise, out_fd is cloned instead. */
+    /* Isolate the process and configure standard descriptors. If afl->fsrv.out_file is
+       specified, stdin is /dev/null; otherwise, afl->fsrv.out_fd is cloned instead. */
 
     setsid();
 
     if (!get_afl_env("AFL_DEBUG_CHILD_OUTPUT")) {
 
-      dup2(dev_null_fd, 1);
-      dup2(dev_null_fd, 2);
+      dup2(afl->fsrv.dev_null_fd, 1);
+      dup2(afl->fsrv.dev_null_fd, 2);
 
     }
 
-    if (!use_stdin) {
+    if (!afl->fsrv.use_stdin) {
 
-      dup2(dev_null_fd, 0);
+      dup2(afl->fsrv.dev_null_fd, 0);
 
     } else {
 
-      dup2(out_fd, 0);
-      close(out_fd);
+      dup2(afl->fsrv.out_fd, 0);
+      close(afl->fsrv.out_fd);
 
     }
 
@@ -116,12 +114,12 @@ void init_cmplog_forkserver(char** argv) {
     close(st_pipe[0]);
     close(st_pipe[1]);
 
-    close(out_dir_fd);
-    close(dev_null_fd);
+    close(afl->fsrv.out_dir_fd);
+    close(afl->fsrv.dev_null_fd);
 #ifndef HAVE_ARC4RANDOM
-    close(dev_urandom_fd);
+    close(afl->fsrv.dev_urandom_fd);
 #endif
-    close(plot_file == NULL ? -1 : fileno(plot_file));
+    close(afl->fsrv.plot_file == NULL ? -1 : fileno(afl->fsrv.plot_file));
 
     /* This should improve performance a bit, since it stops the linker from
        doing extra work post-fork(). */
@@ -152,13 +150,13 @@ void init_cmplog_forkserver(char** argv) {
 
     setenv("___AFL_EINS_ZWEI_POLIZEI___", "1", 1);
 
-    if (!qemu_mode) argv[0] = cmplog_binary;
-    execv(argv[0], argv);
+    if (!afl->qemu_mode) afl->argv[0] = afl->cmplog_binary;
+    execv(afl->argv[0], afl->argv);
 
     /* Use a distinctive bitmap signature to tell the parent about execv()
        falling through. */
 
-    *(u32*)trace_bits = EXEC_FAIL_SIG;
+    *(u32*)afl->fsrv.trace_bits = EXEC_FAIL_SIG;
     exit(0);
 
   }
@@ -170,21 +168,21 @@ void init_cmplog_forkserver(char** argv) {
   close(ctl_pipe[0]);
   close(st_pipe[1]);
 
-  cmplog_fsrv_ctl_fd = ctl_pipe[1];
-  cmplog_fsrv_st_fd = st_pipe[0];
+  afl->cmplog_fsrv_ctl_fd = ctl_pipe[1];
+  afl->cmplog_fsrv_st_fd = st_pipe[0];
 
   /* Wait for the fork server to come up, but don't wait too long. */
 
-  if (exec_tmout) {
+  if (afl->fsrv.exec_tmout) {
 
-    it.it_value.tv_sec = ((exec_tmout * FORK_WAIT_MULT) / 1000);
-    it.it_value.tv_usec = ((exec_tmout * FORK_WAIT_MULT) % 1000) * 1000;
+    it.it_value.tv_sec = ((afl->fsrv.exec_tmout * FORK_WAIT_MULT) / 1000);
+    it.it_value.tv_usec = ((afl->fsrv.exec_tmout * FORK_WAIT_MULT) % 1000) * 1000;
 
   }
 
   setitimer(ITIMER_REAL, &it, NULL);
 
-  rlen = read(cmplog_fsrv_st_fd, &status, 4);
+  rlen = read(afl->cmplog_fsrv_st_fd, &status, 4);
 
   it.it_value.tv_sec = 0;
   it.it_value.tv_usec = 0;
@@ -201,16 +199,16 @@ void init_cmplog_forkserver(char** argv) {
 
   }
 
-  if (child_timed_out)
+  if (afl->fsrv.child_timed_out)
     FATAL(
         "Timeout while initializing cmplog fork server (adjusting -t may "
         "help)");
 
-  if (waitpid(cmplog_forksrv_pid, &status, 0) <= 0) PFATAL("waitpid() failed");
+  if (waitpid(afl->cmplog_fsrv_pid, &status, 0) <= 0) PFATAL("waitpid() failed");
 
   if (WIFSIGNALED(status)) {
 
-    if (mem_limit && mem_limit < 500 && uses_asan) {
+    if (afl->fsrv.mem_limit && afl->fsrv.mem_limit < 500 && afl->fsrv.uses_asan) {
 
       SAYF("\n" cLRD "[-] " cRST
            "Whoops, the target binary crashed suddenly, "
@@ -222,7 +220,7 @@ void init_cmplog_forkserver(char** argv) {
            "    %s/notes_for_asan.md for help.\n",
            doc_path);
 
-    } else if (!mem_limit) {
+    } else if (!afl->fsrv.mem_limit) {
 
       SAYF("\n" cLRD "[-] " cRST
            "Whoops, the target binary crashed suddenly, "
@@ -275,7 +273,7 @@ void init_cmplog_forkserver(char** argv) {
            "options\n"
            "      fail, poke <afl-users@googlegroups.com> for troubleshooting "
            "tips.\n",
-           DMS(mem_limit << 20), mem_limit - 1);
+           DMS(afl->fsrv.mem_limit << 20), afl->fsrv.mem_limit - 1);
 
     }
 
@@ -283,10 +281,10 @@ void init_cmplog_forkserver(char** argv) {
 
   }
 
-  if (*(u32*)trace_bits == EXEC_FAIL_SIG)
-    FATAL("Unable to execute target application ('%s')", argv[0]);
+  if (*(u32*)afl->fsrv.trace_bits == EXEC_FAIL_SIG)
+    FATAL("Unable to execute target application ('%s')", afl->argv[0]);
 
-  if (mem_limit && mem_limit < 500 && uses_asan) {
+  if (afl->fsrv.mem_limit && afl->fsrv.mem_limit < 500 && afl->fsrv.uses_asan) {
 
     SAYF("\n" cLRD "[-] " cRST
          "Hmm, looks like the target binary terminated "
@@ -298,7 +296,7 @@ void init_cmplog_forkserver(char** argv) {
          "    read %s/notes_for_asan.md for help.\n",
          doc_path);
 
-  } else if (!mem_limit) {
+  } else if (!afl->fsrv.mem_limit) {
 
     SAYF("\n" cLRD "[-] " cRST
          "Hmm, looks like the target binary terminated "
@@ -341,7 +339,7 @@ void init_cmplog_forkserver(char** argv) {
               "never\n"
               "      reached before the program terminates.\n\n"
             : "",
-        DMS(mem_limit << 20), mem_limit - 1);
+        DMS(afl->fsrv.mem_limit << 20), afl->fsrv.mem_limit - 1);
 
   }
 
@@ -349,7 +347,7 @@ void init_cmplog_forkserver(char** argv) {
 
 }
 
-u8 run_cmplog_target(char** argv, u32 timeout) {
+u8 run_cmplog_target(afl_state_t *afl, u32 timeout) {
 
   static struct itimerval it;
   static u32              prev_timed_out = 0;
@@ -358,13 +356,13 @@ u8 run_cmplog_target(char** argv, u32 timeout) {
   int status = 0;
   u32 tb4;
 
-  child_timed_out = 0;
+  afl->fsrv.child_timed_out = 0;
 
-  /* After this memset, trace_bits[] are effectively volatile, so we
+  /* After this memset, afl->fsrv.trace_bits[] are effectively volatile, so we
      must prevent any earlier operations from venturing into that
      territory. */
 
-  memset(trace_bits, 0, MAP_SIZE);
+  memset(afl->fsrv.trace_bits, 0, MAP_SIZE);
   MEM_BARRIER();
 
   /* If we're running in "dumb" mode, we can't rely on the fork server
@@ -372,19 +370,19 @@ u8 run_cmplog_target(char** argv, u32 timeout) {
      execve(). There is a bit of code duplication between here and
      init_forkserver(), but c'est la vie. */
 
-  if (dumb_mode == 1 || no_forkserver) {
+  if (afl->dumb_mode == 1 || afl->no_forkserver) {
 
-    cmplog_child_pid = fork();
+    afl->cmplog_child_pid = fork();
 
-    if (cmplog_child_pid < 0) PFATAL("fork() failed");
+    if (afl->cmplog_child_pid < 0) PFATAL("fork() failed");
 
-    if (!cmplog_child_pid) {
+    if (!afl->cmplog_child_pid) {
 
       struct rlimit r;
 
-      if (mem_limit) {
+      if (afl->fsrv.mem_limit) {
 
-        r.rlim_max = r.rlim_cur = ((rlim_t)mem_limit) << 20;
+        r.rlim_max = r.rlim_cur = ((rlim_t)afl->fsrv.mem_limit) << 20;
 
 #ifdef RLIMIT_AS
 
@@ -402,33 +400,33 @@ u8 run_cmplog_target(char** argv, u32 timeout) {
 
       setrlimit(RLIMIT_CORE, &r);                          /* Ignore errors */
 
-      /* Isolate the process and configure standard descriptors. If out_file is
-         specified, stdin is /dev/null; otherwise, out_fd is cloned instead. */
+      /* Isolate the process and configure standard descriptors. If afl->fsrv.out_file is
+         specified, stdin is /dev/null; otherwise, afl->fsrv.out_fd is cloned instead. */
 
       setsid();
 
-      dup2(dev_null_fd, 1);
-      dup2(dev_null_fd, 2);
+      dup2(afl->fsrv.dev_null_fd, 1);
+      dup2(afl->fsrv.dev_null_fd, 2);
 
-      if (out_file) {
+      if (afl->fsrv.out_file) {
 
-        dup2(dev_null_fd, 0);
+        dup2(afl->fsrv.dev_null_fd, 0);
 
       } else {
 
-        dup2(out_fd, 0);
-        close(out_fd);
+        dup2(afl->fsrv.out_fd, 0);
+        close(afl->fsrv.out_fd);
 
       }
 
       /* On Linux, would be faster to use O_CLOEXEC. Maybe TODO. */
 
-      close(dev_null_fd);
-      close(out_dir_fd);
+      close(afl->fsrv.dev_null_fd);
+      close(afl->fsrv.out_dir_fd);
 #ifndef HAVE_ARC4RANDOM
-      close(dev_urandom_fd);
+      close(afl->fsrv.dev_urandom_fd);
 #endif
-      close(fileno(plot_file));
+      close(fileno(afl->fsrv.plot_file));
 
       /* Set sane defaults for ASAN if nothing else specified. */
 
@@ -445,13 +443,13 @@ u8 run_cmplog_target(char** argv, u32 timeout) {
 
       setenv("___AFL_EINS_ZWEI_POLIZEI___", "1", 1);
 
-      if (!qemu_mode) argv[0] = cmplog_binary;
-      execv(argv[0], argv);
+      if (!afl->qemu_mode) afl->argv[0] = afl->cmplog_binary;
+      execv(afl->argv[0], afl->argv);
 
       /* Use a distinctive bitmap value to tell the parent about execv()
          falling through. */
 
-      *(u32*)trace_bits = EXEC_FAIL_SIG;
+      *(u32*)afl->fsrv.trace_bits = EXEC_FAIL_SIG;
       exit(0);
 
     }
@@ -463,23 +461,23 @@ u8 run_cmplog_target(char** argv, u32 timeout) {
     /* In non-dumb mode, we have the fork server up and running, so simply
        tell it to have at it, and then read back PID. */
 
-    if ((res = write(cmplog_fsrv_ctl_fd, &prev_timed_out, 4)) != 4) {
+    if ((res = write(afl->cmplog_fsrv_ctl_fd, &prev_timed_out, 4)) != 4) {
 
-      if (stop_soon) return 0;
+      if (afl->stop_soon) return 0;
       RPFATAL(res,
               "Unable to request new process from cmplog fork server (OOM?)");
 
     }
 
-    if ((res = read(cmplog_fsrv_st_fd, &cmplog_child_pid, 4)) != 4) {
+    if ((res = read(afl->cmplog_fsrv_st_fd, &afl->cmplog_child_pid, 4)) != 4) {
 
-      if (stop_soon) return 0;
+      if (afl->stop_soon) return 0;
       RPFATAL(res,
               "Unable to request new process from cmplog fork server (OOM?)");
 
     }
 
-    if (cmplog_child_pid <= 0)
+    if (afl->cmplog_child_pid <= 0)
       FATAL("Cmplog fork server is misbehaving (OOM?)");
 
   }
@@ -492,20 +490,20 @@ u8 run_cmplog_target(char** argv, u32 timeout) {
 
   setitimer(ITIMER_REAL, &it, NULL);
 
-  /* The SIGALRM handler simply kills the cmplog_child_pid and sets
-   * child_timed_out. */
+  /* The SIGALRM handler simply kills the afl->cmplog_child_pid and sets
+   * afl->fsrv.child_timed_out. */
 
-  if (dumb_mode == 1 || no_forkserver) {
+  if (afl->dumb_mode == 1 || afl->no_forkserver) {
 
-    if (waitpid(cmplog_child_pid, &status, 0) <= 0) PFATAL("waitpid() failed");
+    if (waitpid(afl->cmplog_child_pid, &status, 0) <= 0) PFATAL("waitpid() failed");
 
   } else {
 
     s32 res;
 
-    if ((res = read(cmplog_fsrv_st_fd, &status, 4)) != 4) {
+    if ((res = read(afl->cmplog_fsrv_st_fd, &status, 4)) != 4) {
 
-      if (stop_soon) return 0;
+      if (afl->stop_soon) return 0;
       SAYF(
           "\n" cLRD "[-] " cRST
           "Unable to communicate with fork server. Some possible reasons:\n\n"
@@ -520,50 +518,50 @@ u8 run_cmplog_target(char** argv, u32 timeout) {
           "culprit.\n\n"
           "If all else fails you can disable the fork server via "
           "AFL_NO_FORKSRV=1.\n",
-          mem_limit);
+          afl->fsrv.mem_limit);
       RPFATAL(res, "Unable to communicate with fork server");
 
     }
 
   }
 
-  if (!WIFSTOPPED(status)) cmplog_child_pid = 0;
+  if (!WIFSTOPPED(status)) afl->cmplog_child_pid = 0;
 
   getitimer(ITIMER_REAL, &it);
   exec_ms =
       (u64)timeout - (it.it_value.tv_sec * 1000 + it.it_value.tv_usec / 1000);
-  if (slowest_exec_ms < exec_ms) slowest_exec_ms = exec_ms;
+  if (afl->slowest_exec_ms < exec_ms) afl->slowest_exec_ms = exec_ms;
 
   it.it_value.tv_sec = 0;
   it.it_value.tv_usec = 0;
 
   setitimer(ITIMER_REAL, &it, NULL);
 
-  ++total_execs;
+  ++afl->total_execs;
 
-  /* Any subsequent operations on trace_bits must not be moved by the
-     compiler below this point. Past this location, trace_bits[] behave
+  /* Any subsequent operations on afl->fsrv.trace_bits must not be moved by the
+     compiler below this point. Past this location, afl->fsrv.trace_bits[] behave
      very normally and do not have to be treated as volatile. */
 
   MEM_BARRIER();
 
-  tb4 = *(u32*)trace_bits;
+  tb4 = *(u32*)afl->fsrv.trace_bits;
 
 #ifdef WORD_SIZE_64
-  classify_counts((u64*)trace_bits);
+  classify_counts((u64*)afl->fsrv.trace_bits);
 #else
-  classify_counts((u32*)trace_bits);
+  classify_counts((u32*)afl->fsrv.trace_bits);
 #endif                                                     /* ^WORD_SIZE_64 */
 
-  prev_timed_out = child_timed_out;
+  prev_timed_out = afl->fsrv.child_timed_out;
 
   /* Report outcome to caller. */
 
-  if (WIFSIGNALED(status) && !stop_soon) {
+  if (WIFSIGNALED(status) && !afl->stop_soon) {
 
-    kill_signal = WTERMSIG(status);
+    afl->kill_signal = WTERMSIG(status);
 
-    if (child_timed_out && kill_signal == SIGKILL) return FAULT_TMOUT;
+    if (afl->fsrv.child_timed_out && afl->kill_signal == SIGKILL) return FAULT_TMOUT;
 
     return FAULT_CRASH;
 
@@ -572,67 +570,67 @@ u8 run_cmplog_target(char** argv, u32 timeout) {
   /* A somewhat nasty hack for MSAN, which doesn't support abort_on_error and
      must use a special exit code. */
 
-  if (uses_asan && WEXITSTATUS(status) == MSAN_ERROR) {
+  if (afl->fsrv.uses_asan && WEXITSTATUS(status) == MSAN_ERROR) {
 
-    kill_signal = 0;
+    afl->kill_signal = 0;
     return FAULT_CRASH;
 
   }
 
-  if ((dumb_mode == 1 || no_forkserver) && tb4 == EXEC_FAIL_SIG)
+  if ((afl->dumb_mode == 1 || afl->no_forkserver) && tb4 == EXEC_FAIL_SIG)
     return FAULT_ERROR;
 
   return FAULT_NONE;
 
 }
 
-u8 common_fuzz_cmplog_stuff(char** argv, u8* out_buf, u32 len) {
+u8 common_fuzz_cmplog_stuff(afl_state_t *afl, u8* out_buf, u32 len) {
 
   u8 fault;
 
-  if (post_handler) {
+  if (afl->post_handler) {
 
-    out_buf = post_handler(out_buf, &len);
+    out_buf = afl->post_handler(out_buf, &len);
     if (!out_buf || !len) return 0;
 
   }
 
-  write_to_testcase(out_buf, len);
+  write_to_testcase(afl, out_buf, len);
 
-  fault = run_cmplog_target(argv, exec_tmout);
+  fault = run_cmplog_target(afl, afl->fsrv.exec_tmout);
 
-  if (stop_soon) return 1;
+  if (afl->stop_soon) return 1;
 
   if (fault == FAULT_TMOUT) {
 
-    if (subseq_tmouts++ > TMOUT_LIMIT) {
+    if (afl->subseq_tmouts++ > TMOUT_LIMIT) {
 
-      ++cur_skipped_paths;
+      ++afl->cur_skipped_paths;
       return 1;
 
     }
 
   } else
 
-    subseq_tmouts = 0;
+    afl->subseq_tmouts = 0;
 
   /* Users can hit us with SIGUSR1 to request the current input
      to be abandoned. */
 
-  if (skip_requested) {
+  if (afl->skip_requested) {
 
-    skip_requested = 0;
-    ++cur_skipped_paths;
+    afl->skip_requested = 0;
+    ++afl->cur_skipped_paths;
     return 1;
 
   }
 
   /* This handles FAULT_ERROR for us: */
 
-  /* queued_discovered += save_if_interesting(argv, out_buf, len, fault);
+  /* afl->queued_discovered += save_if_interesting(afl, argv, out_buf, len, fault);
 
-  if (!(stage_cur % stats_update_freq) || stage_cur + 1 == stage_max)
-    show_stats(); */
+  if (!(afl->stage_cur % afl->stats_update_freq) || afl->stage_cur + 1 == afl->stage_max)
+    show_stats(afl); */
 
   return 0;
 
diff --git a/src/afl-fuzz-extras.c b/src/afl-fuzz-extras.c
index 6c6dc28c..8c8e085e 100644
--- a/src/afl-fuzz-extras.c
+++ b/src/afl-fuzz-extras.c
@@ -45,7 +45,7 @@ static int compare_extras_use_d(const void* p1, const void* p2) {
 
 /* Read extras from a file, sort by size. */
 
-void load_extras_file(u8* fname, u32* min_len, u32* max_len, u32 dict_level) {
+void load_extras_file(afl_state_t *afl, u8* fname, u32* min_len, u32* max_len, u32 dict_level) {
 
   FILE* f;
   u8    buf[MAX_LINE];
@@ -120,10 +120,10 @@ void load_extras_file(u8* fname, u32* min_len, u32* max_len, u32 dict_level) {
     /* Okay, let's allocate memory and copy data between "...", handling
        \xNN escaping, \\, and \". */
 
-    extras =
-        ck_realloc_block(extras, (extras_cnt + 1) * sizeof(struct extra_data));
+    afl->extras =
+        ck_realloc_block(afl->extras, (afl->extras_cnt + 1) * sizeof(struct extra_data));
 
-    wptr = extras[extras_cnt].data = ck_alloc(rptr - lptr);
+    wptr = afl->extras[afl->extras_cnt].data = ck_alloc(rptr - lptr);
 
     while (*lptr) {
 
@@ -164,16 +164,16 @@ void load_extras_file(u8* fname, u32* min_len, u32* max_len, u32 dict_level) {
 
     }
 
-    extras[extras_cnt].len = klen;
+    afl->extras[afl->extras_cnt].len = klen;
 
-    if (extras[extras_cnt].len > MAX_DICT_FILE)
+    if (afl->extras[afl->extras_cnt].len > MAX_DICT_FILE)
       FATAL("Keyword too big in line %u (%s, limit is %s)", cur_line, DMS(klen),
             DMS(MAX_DICT_FILE));
 
     if (*min_len > klen) *min_len = klen;
     if (*max_len < klen) *max_len = klen;
 
-    ++extras_cnt;
+    ++afl->extras_cnt;
 
   }
 
@@ -183,7 +183,7 @@ void load_extras_file(u8* fname, u32* min_len, u32* max_len, u32 dict_level) {
 
 /* Read extras from the extras directory and sort them by size. */
 
-void load_extras(u8* dir) {
+void load_extras(afl_state_t *afl, u8* dir) {
 
   DIR*           d;
   struct dirent* de;
@@ -207,7 +207,7 @@ void load_extras(u8* dir) {
 
     if (errno == ENOTDIR) {
 
-      load_extras_file(dir, &min_len, &max_len, dict_level);
+      load_extras_file(afl, dir, &min_len, &max_len, dict_level);
       goto check_and_sort;
 
     }
@@ -241,22 +241,22 @@ void load_extras(u8* dir) {
     if (min_len > st.st_size) min_len = st.st_size;
     if (max_len < st.st_size) max_len = st.st_size;
 
-    extras =
-        ck_realloc_block(extras, (extras_cnt + 1) * sizeof(struct extra_data));
+    afl->extras =
+        ck_realloc_block(afl->extras, (afl->extras_cnt + 1) * sizeof(struct extra_data));
 
-    extras[extras_cnt].data = ck_alloc(st.st_size);
-    extras[extras_cnt].len = st.st_size;
+    afl->extras[afl->extras_cnt].data = ck_alloc(st.st_size);
+    afl->extras[afl->extras_cnt].len = st.st_size;
 
     fd = open(fn, O_RDONLY);
 
     if (fd < 0) PFATAL("Unable to open '%s'", fn);
 
-    ck_read(fd, extras[extras_cnt].data, st.st_size, fn);
+    ck_read(fd, afl->extras[afl->extras_cnt].data, st.st_size, fn);
 
     close(fd);
     ck_free(fn);
 
-    ++extras_cnt;
+    ++afl->extras_cnt;
 
   }
 
@@ -264,24 +264,24 @@ void load_extras(u8* dir) {
 
 check_and_sort:
 
-  if (!extras_cnt) FATAL("No usable files in '%s'", dir);
+  if (!afl->extras_cnt) FATAL("No usable files in '%s'", dir);
 
-  qsort(extras, extras_cnt, sizeof(struct extra_data), compare_extras_len);
+  qsort(afl->extras, afl->extras_cnt, sizeof(struct extra_data), compare_extras_len);
 
-  OKF("Loaded %u extra tokens, size range %s to %s.", extras_cnt, DMS(min_len),
+  OKF("Loaded %u extra tokens, size range %s to %s.", afl->extras_cnt, DMS(min_len),
       DMS(max_len));
 
   if (max_len > 32)
     WARNF("Some tokens are relatively large (%s) - consider trimming.",
           DMS(max_len));
 
-  if (extras_cnt > MAX_DET_EXTRAS)
+  if (afl->extras_cnt > MAX_DET_EXTRAS)
     WARNF("More than %d tokens - will use them probabilistically.",
           MAX_DET_EXTRAS);
 
 }
 
-/* Helper function for maybe_add_auto() */
+/* Helper function for maybe_add_auto(afl, ) */
 
 static inline u8 memcmp_nocase(u8* m1, u8* m2, u32 len) {
 
@@ -293,7 +293,7 @@ static inline u8 memcmp_nocase(u8* m1, u8* m2, u32 len) {
 
 /* Maybe add automatic extra. */
 
-void maybe_add_auto(u8* mem, u32 len) {
+void maybe_add_auto(afl_state_t *afl, u8* mem, u32 len) {
 
   u32 i;
 
@@ -336,22 +336,22 @@ void maybe_add_auto(u8* mem, u32 len) {
      match. We optimize by exploiting the fact that extras[] are sorted
      by size. */
 
-  for (i = 0; i < extras_cnt; ++i)
-    if (extras[i].len >= len) break;
+  for (i = 0; i < afl->extras_cnt; ++i)
+    if (afl->extras[i].len >= len) break;
 
-  for (; i < extras_cnt && extras[i].len == len; ++i)
-    if (!memcmp_nocase(extras[i].data, mem, len)) return;
+  for (; i < afl->extras_cnt && afl->extras[i].len == len; ++i)
+    if (!memcmp_nocase(afl->extras[i].data, mem, len)) return;
 
-  /* Last but not least, check a_extras[] for matches. There are no
+  /* Last but not least, check afl->a_extras[] for matches. There are no
      guarantees of a particular sort order. */
 
-  auto_changed = 1;
+  afl->auto_changed = 1;
 
-  for (i = 0; i < a_extras_cnt; ++i) {
+  for (i = 0; i < afl->a_extras_cnt; ++i) {
 
-    if (a_extras[i].len == len && !memcmp_nocase(a_extras[i].data, mem, len)) {
+    if (afl->a_extras[i].len == len && !memcmp_nocase(afl->a_extras[i].data, mem, len)) {
 
-      a_extras[i].hit_cnt++;
+      afl->a_extras[i].hit_cnt++;
       goto sort_a_extras;
 
     }
@@ -362,24 +362,24 @@ void maybe_add_auto(u8* mem, u32 len) {
      append it if we have room. Otherwise, let's randomly evict some other
      entry from the bottom half of the list. */
 
-  if (a_extras_cnt < MAX_AUTO_EXTRAS) {
+  if (afl->a_extras_cnt < MAX_AUTO_EXTRAS) {
 
-    a_extras = ck_realloc_block(a_extras,
-                                (a_extras_cnt + 1) * sizeof(struct extra_data));
+    afl->a_extras = ck_realloc_block(afl->a_extras,
+                                (afl->a_extras_cnt + 1) * sizeof(struct extra_data));
 
-    a_extras[a_extras_cnt].data = ck_memdup(mem, len);
-    a_extras[a_extras_cnt].len = len;
-    ++a_extras_cnt;
+    afl->a_extras[afl->a_extras_cnt].data = ck_memdup(mem, len);
+    afl->a_extras[afl->a_extras_cnt].len = len;
+    ++afl->a_extras_cnt;
 
   } else {
 
-    i = MAX_AUTO_EXTRAS / 2 + UR((MAX_AUTO_EXTRAS + 1) / 2);
+    i = MAX_AUTO_EXTRAS / 2 + UR(afl, (MAX_AUTO_EXTRAS + 1) / 2);
 
-    ck_free(a_extras[i].data);
+    ck_free(afl->a_extras[i].data);
 
-    a_extras[i].data = ck_memdup(mem, len);
-    a_extras[i].len = len;
-    a_extras[i].hit_cnt = 0;
+    afl->a_extras[i].data = ck_memdup(mem, len);
+    afl->a_extras[i].len = len;
+    afl->a_extras[i].hit_cnt = 0;
 
   }
 
@@ -387,35 +387,35 @@ sort_a_extras:
 
   /* First, sort all auto extras by use count, descending order. */
 
-  qsort(a_extras, a_extras_cnt, sizeof(struct extra_data),
+  qsort(afl->a_extras, afl->a_extras_cnt, sizeof(struct extra_data),
         compare_extras_use_d);
 
   /* Then, sort the top USE_AUTO_EXTRAS entries by size. */
 
-  qsort(a_extras, MIN(USE_AUTO_EXTRAS, a_extras_cnt), sizeof(struct extra_data),
+  qsort(afl->a_extras, MIN(USE_AUTO_EXTRAS, afl->a_extras_cnt), sizeof(struct extra_data),
         compare_extras_len);
 
 }
 
 /* Save automatically generated extras. */
 
-void save_auto(void) {
+void save_auto(afl_state_t *afl) {
 
   u32 i;
 
-  if (!auto_changed) return;
-  auto_changed = 0;
+  if (!afl->auto_changed) return;
+  afl->auto_changed = 0;
 
-  for (i = 0; i < MIN(USE_AUTO_EXTRAS, a_extras_cnt); ++i) {
+  for (i = 0; i < MIN(USE_AUTO_EXTRAS, afl->a_extras_cnt); ++i) {
 
-    u8* fn = alloc_printf("%s/queue/.state/auto_extras/auto_%06u", out_dir, i);
+    u8* fn = alloc_printf("%s/queue/.state/auto_extras/auto_%06u", afl->out_dir, i);
     s32 fd;
 
     fd = open(fn, O_WRONLY | O_CREAT | O_TRUNC, 0600);
 
     if (fd < 0) PFATAL("Unable to create '%s'", fn);
 
-    ck_write(fd, a_extras[i].data, a_extras[i].len, fn);
+    ck_write(fd, afl->a_extras[i].data, afl->a_extras[i].len, fn);
 
     close(fd);
     ck_free(fn);
@@ -426,14 +426,14 @@ void save_auto(void) {
 
 /* Load automatically generated extras. */
 
-void load_auto(void) {
+void load_auto(afl_state_t *afl) {
 
   u32 i;
 
   for (i = 0; i < USE_AUTO_EXTRAS; ++i) {
 
     u8  tmp[MAX_AUTO_EXTRA + 1];
-    u8* fn = alloc_printf("%s/.state/auto_extras/auto_%06u", in_dir, i);
+    u8* fn = alloc_printf("%s/.state/auto_extras/auto_%06u", afl->in_dir, i);
     s32 fd, len;
 
     fd = open(fn, O_RDONLY, 0600);
@@ -454,7 +454,7 @@ void load_auto(void) {
     if (len < 0) PFATAL("Unable to read from '%s'", fn);
 
     if (len >= MIN_AUTO_EXTRA && len <= MAX_AUTO_EXTRA)
-      maybe_add_auto(tmp, len);
+      maybe_add_auto(afl, tmp, len);
 
     close(fd);
     ck_free(fn);
@@ -470,19 +470,19 @@ void load_auto(void) {
 
 /* Destroy extras. */
 
-void destroy_extras(void) {
+void destroy_extras(afl_state_t *afl) {
 
   u32 i;
 
-  for (i = 0; i < extras_cnt; ++i)
-    ck_free(extras[i].data);
+  for (i = 0; i < afl->extras_cnt; ++i)
+    ck_free(afl->extras[i].data);
 
-  ck_free(extras);
+  ck_free(afl->extras);
 
-  for (i = 0; i < a_extras_cnt; ++i)
-    ck_free(a_extras[i].data);
+  for (i = 0; i < afl->a_extras_cnt; ++i)
+    ck_free(afl->a_extras[i].data);
 
-  ck_free(a_extras);
+  ck_free(afl->a_extras);
 
 }
 
diff --git a/src/afl-fuzz-globals.c b/src/afl-fuzz-globals.c
index 9412463d..004af2b4 100644
--- a/src/afl-fuzz-globals.c
+++ b/src/afl-fuzz-globals.c
@@ -25,255 +25,103 @@
 
 #include "afl-fuzz.h"
 
-/* MOpt:
-   Lots of globals, but mostly for the status UI and other things where it
-   really makes no sense to haul them around as function parameters. */
-u64 limit_time_puppet, orig_hit_cnt_puppet, last_limit_time_start,
-    tmp_pilot_time, total_pacemaker_time, total_puppet_find, temp_puppet_find,
-    most_time_key, most_time, most_execs_key, most_execs, old_hit_count;
-
-s32 SPLICE_CYCLES_puppet, limit_time_sig, key_puppet, key_module;
-
-double w_init = 0.9, w_end = 0.3, w_now;
-
-s32 g_now;
-s32 g_max = 5000;
-
-u64 tmp_core_time;
-s32 swarm_now;
-
-double x_now[swarm_num][operator_num], L_best[swarm_num][operator_num],
-    eff_best[swarm_num][operator_num], G_best[operator_num],
-    v_now[swarm_num][operator_num], probability_now[swarm_num][operator_num],
-    swarm_fitness[swarm_num];
-
-u64 stage_finds_puppet[swarm_num]
-                      [operator_num],   /* Patterns found per fuzz stage    */
-    stage_finds_puppet_v2[swarm_num][operator_num],
-    stage_cycles_puppet_v2[swarm_num][operator_num],
-    stage_cycles_puppet_v3[swarm_num][operator_num],
-    stage_cycles_puppet[swarm_num][operator_num],
-    operator_finds_puppet[operator_num],
-    core_operator_finds_puppet[operator_num],
-    core_operator_finds_puppet_v2[operator_num],
-    core_operator_cycles_puppet[operator_num],
-    core_operator_cycles_puppet_v2[operator_num],
-    core_operator_cycles_puppet_v3[operator_num];   /* Execs per fuzz stage */
-
-double period_pilot_tmp = 5000.0;
-s32    key_lv;
-
-u8 *in_dir,                             /* Input directory with test cases  */
-    *out_dir,                           /* Working & output directory       */
-    *tmp_dir,                           /* Temporary directory for input    */
-    *sync_dir,                          /* Synchronization directory        */
-    *sync_id,                           /* Fuzzer ID                        */
-    *power_name,                        /* Power schedule name              */
-    *use_banner,                        /* Display banner                   */
-    *in_bitmap,                         /* Input bitmap                     */
-    *file_extension,                    /* File extension                   */
-    *orig_cmdline;                      /* Original command line            */
-u8 *doc_path,                           /* Path to documentation dir        */
-    *infoexec,                         /* Command to execute on a new crash */
-    *out_file;                          /* File to fuzz, if any             */
-
-u32 exec_tmout = EXEC_TIMEOUT;          /* Configurable exec timeout (ms)   */
-u32 hang_tmout = EXEC_TIMEOUT;          /* Timeout used for hang det (ms)   */
-
-u64 mem_limit = MEM_LIMIT;              /* Memory cap for child (MB)        */
-
-u8 cal_cycles = CAL_CYCLES,             /* Calibration cycles defaults      */
-    cal_cycles_long = CAL_CYCLES_LONG,  /* Calibration cycles defaults      */
-    debug,                              /* Debug mode                       */
-    no_unlink,                          /* do not unlink cur_input          */
-    use_stdin = 1,                      /* use stdin for sending data       */
-    be_quiet,                           /* is AFL_QUIET set?                */
-    custom_only;                        /* Custom mutator only mode         */
-
-u32 stats_update_freq = 1;              /* Stats update frequency (execs)   */
+s8  interesting_8[] = {INTERESTING_8};
+s16 interesting_16[] = {INTERESTING_8, INTERESTING_16};
+s32 interesting_32[] = {INTERESTING_8, INTERESTING_16, INTERESTING_32};
 
 char *power_names[POWER_SCHEDULES_NUM] = {"explore", "fast", "coe",
                                           "lin",     "quad", "exploit"};
 
-u8 schedule = EXPLORE;                  /* Power schedule (default: EXPLORE)*/
-u8 havoc_max_mult = HAVOC_MAX_MULT;
-
-u8 use_radamsa;
-size_t (*radamsa_mutate_ptr)(u8 *, size_t, u8 *, size_t, u32);
-
-u8 skip_deterministic,                  /* Skip deterministic stages?       */
-    force_deterministic,                /* Force deterministic stages?      */
-    use_splicing,                       /* Recombine input files?           */
-    dumb_mode,                          /* Run in non-instrumented mode?    */
-    score_changed,                      /* Scoring for favorites changed?   */
-    kill_signal,                        /* Signal that killed the child     */
-    resuming_fuzz,                      /* Resuming an older fuzzing job?   */
-    timeout_given,                      /* Specific timeout given?          */
-    not_on_tty,                         /* stdout is not a tty              */
-    term_too_small,                     /* terminal dimensions too small    */
-    no_forkserver,                      /* Disable forkserver?              */
-    crash_mode,                         /* Crash mode! Yeah!                */
-    in_place_resume,                    /* Attempt in-place resume?         */
-    autoresume,                         /* Resume if out_dir exists?        */
-    auto_changed,                       /* Auto-generated tokens changed?   */
-    no_cpu_meter_red,                   /* Feng shui on the status screen   */
-    no_arith,                           /* Skip most arithmetic ops         */
-    shuffle_queue,                      /* Shuffle input queue?             */
-    bitmap_changed = 1,                 /* Time to update bitmap?           */
-    qemu_mode,                          /* Running in QEMU mode?            */
-    unicorn_mode,                       /* Running in Unicorn mode?         */
-    use_wine,                           /* Use WINE with QEMU mode          */
-    skip_requested,                     /* Skip request, via SIGUSR1        */
-    run_over10m,                        /* Run time over 10 minutes?        */
-    persistent_mode,                    /* Running in persistent mode?      */
-    deferred_mode,                      /* Deferred forkserver mode?        */
-    fixed_seed,                         /* do not reseed                    */
-    fast_cal,                           /* Try to calibrate faster?         */
-    uses_asan,                          /* Target uses ASAN?                */
-    disable_trim;                       /* Never trim in fuzz_one           */
-
-s32 out_fd,                             /* Persistent fd for out_file       */
-#ifndef HAVE_ARC4RANDOM
-    dev_urandom_fd = -1,                /* Persistent fd for /dev/urandom   */
-#endif
-    dev_null_fd = -1,                   /* Persistent fd for /dev/null      */
-    fsrv_ctl_fd,                        /* Fork server control pipe (write) */
-    fsrv_st_fd;                         /* Fork server status pipe (read)   */
-
-s32 forksrv_pid,                        /* PID of the fork server           */
-    child_pid = -1,                     /* PID of the fuzzed program        */
-    out_dir_fd = -1;                    /* FD of the lock file              */
-
-u8 *trace_bits;                         /* SHM with instrumentation bitmap  */
-
-u8 virgin_bits[MAP_SIZE],               /* Regions yet untouched by fuzzing */
-    virgin_tmout[MAP_SIZE],             /* Bits we haven't seen in tmouts   */
-    virgin_crash[MAP_SIZE];             /* Bits we haven't seen in crashes  */
-
-u8 var_bytes[MAP_SIZE];                 /* Bytes that appear to be variable */
-
-volatile u8 stop_soon,                  /* Ctrl-C pressed?                  */
-    clear_screen = 1,                   /* Window resized?                  */
-    child_timed_out;                    /* Traced process timed out?        */
-
-u32 queued_paths,                       /* Total number of queued testcases */
-    queued_variable,                    /* Testcases with variable behavior */
-    queued_at_start,                    /* Total number of initial inputs   */
-    queued_discovered,                  /* Items discovered during this run */
-    queued_imported,                    /* Items imported via -S            */
-    queued_favored,                     /* Paths deemed favorable           */
-    queued_with_cov,                    /* Paths with new coverage bytes    */
-    pending_not_fuzzed,                 /* Queued but not done yet          */
-    pending_favored,                    /* Pending favored paths            */
-    cur_skipped_paths,                  /* Abandoned inputs in cur cycle    */
-    cur_depth,                          /* Current path depth               */
-    max_depth,                          /* Max path depth                   */
-    useless_at_start,                   /* Number of useless starting paths */
-    var_byte_count,                     /* Bitmap bytes with var behavior   */
-    current_entry,                      /* Current queue entry ID           */
-    havoc_div = 1;                      /* Cycle count divisor for havoc    */
-
-u64 total_crashes,                      /* Total number of crashes          */
-    unique_crashes,                     /* Crashes with unique signatures   */
-    total_tmouts,                       /* Total number of timeouts         */
-    unique_tmouts,                      /* Timeouts with unique signatures  */
-    unique_hangs,                       /* Hangs with unique signatures     */
-    total_execs,                        /* Total execve() calls             */
-    slowest_exec_ms,                    /* Slowest testcase non hang in ms  */
-    start_time,                         /* Unix start time (ms)             */
-    last_path_time,                     /* Time for most recent path (ms)   */
-    last_crash_time,                    /* Time for most recent crash (ms)  */
-    last_hang_time,                     /* Time for most recent hang (ms)   */
-    last_crash_execs,                   /* Exec counter at last crash       */
-    queue_cycle,                        /* Queue round counter              */
-    cycles_wo_finds,                    /* Cycles without any new paths     */
-    trim_execs,                         /* Execs done to trim input files   */
-    bytes_trim_in,                      /* Bytes coming into the trimmer    */
-    bytes_trim_out,                     /* Bytes coming outa the trimmer    */
-    blocks_eff_total,                   /* Blocks subject to effector maps  */
-    blocks_eff_select;                  /* Blocks selected as fuzzable      */
-
-u32 subseq_tmouts;                      /* Number of timeouts in a row      */
-
-u8 *stage_name = "init",                /* Name of the current fuzz stage   */
-    *stage_short,                       /* Short stage name                 */
-    *syncing_party;                     /* Currently syncing with...        */
-
-s32 stage_cur, stage_max;               /* Stage progression                */
-s32 splicing_with = -1;                 /* Splicing with which test case?   */
-
-u32 master_id, master_max;              /* Master instance job splitting    */
-
-u32 syncing_case;                       /* Syncing with case #...           */
-
-s32 stage_cur_byte,                     /* Byte offset of current stage op  */
-    stage_cur_val;                      /* Value used for stage op          */
-
-u8 stage_val_type;                      /* Value type (STAGE_VAL_*)         */
-
-u64 stage_finds[32],                    /* Patterns found per fuzz stage    */
-    stage_cycles[32];                   /* Execs per fuzz stage             */
-
-#ifndef HAVE_ARC4RANDOM
-u32 rand_cnt;                           /* Random number counter            */
-#endif
-
-u32 rand_seed[2];
-s64 init_seed;
-
-u64 total_cal_us,                       /* Total calibration time (us)      */
-    total_cal_cycles;                   /* Total calibration cycles         */
-
-u64 total_bitmap_size,                  /* Total bit count for all bitmaps  */
-    total_bitmap_entries;               /* Number of bitmaps counted        */
-
-s32 cpu_core_count;                     /* CPU core count                   */
+u8 *doc_path = NULL;                  /* gath to documentation dir        */
+
+/* Initialize MOpt "globals" for this afl state */
+
+static void init_mopt_globals(afl_state_t *afl){ 
+
+  MOpt_globals_t *core = &afl->mopt_globals_pilot;
+  core->finds = afl->core_operator_finds_puppet;
+  core->finds_v2 = afl->core_operator_finds_puppet_v2;
+  core->cycles = afl->core_operator_cycles_puppet;
+  core->cycles_v2 = afl->core_operator_cycles_puppet_v2;
+  core->cycles_v3 = afl->core_operator_cycles_puppet_v3;
+  core->is_pilot_mode = 0;
+  core->pTime = &afl->tmp_core_time;
+  core->period = period_core;
+  core->havoc_stagename = "MOpt-core-havoc";
+  core->splice_stageformat = "MOpt-core-splice %u";
+  core->havoc_stagenameshort = "MOpt_core_havoc";
+  core->splice_stagenameshort = "MOpt_core_splice";
+
+  MOpt_globals_t *pilot = &afl->mopt_globals_pilot;
+  pilot->finds = afl->stage_finds_puppet[0];
+  pilot->finds_v2 = afl->stage_finds_puppet_v2[0];
+  pilot->cycles = afl->stage_cycles_puppet[0];
+  pilot->cycles_v2 = afl->stage_cycles_puppet_v2[0];
+  pilot->cycles_v3 = afl->stage_cycles_puppet_v3[0];
+  pilot->is_pilot_mode = 1;
+  pilot->pTime = &afl->tmp_pilot_time;
+  pilot->period = period_pilot;
+  pilot->havoc_stagename = "MOpt-havoc";
+  pilot->splice_stageformat = "MOpt-splice %u";
+  pilot->havoc_stagenameshort = "MOpt_havoc";
+  pilot->splice_stagenameshort = "MOpt_splice";
+
+}
+
+/* A global pointer to all instances is needed (for now) for signals to arrive */
+
+list_t afl_states = {0};
+
+/* Initializes an afl_state_t. */
+
+void afl_state_init(afl_state_t *afl) {
+    
+    afl->w_init = 0.9;
+    afl->w_end = 0.3;
+    afl->g_max = 5000;
+    afl->period_pilot_tmp = 5000.0;
+    afl->schedule = EXPLORE;                 /* Power schedule (default: EXPLORE)*/
+    afl->havoc_max_mult = HAVOC_MAX_MULT;
+
+    afl->clear_screen = 1;                   /* Window resized?                  */
+    afl->havoc_div = 1;                      /* Cycle count divisor for havoc    */
+    afl->stage_name = "init";                /* Name of the current fuzz stage   */
+    afl->splicing_with = -1;                 /* Splicing with which test case?   */
 
 #ifdef HAVE_AFFINITY
+    afl->cpu_aff = -1;                       /* Selected CPU core                */
+#endif                                                      /* HAVE_AFFINITY */
 
-s32 cpu_aff = -1;                       /* Selected CPU core                */
+    afl->fsrv.use_stdin = 1;
 
-#endif                                                     /* HAVE_AFFINITY */
+    afl->cal_cycles = CAL_CYCLES;
+    afl->cal_cycles_long = CAL_CYCLES_LONG;
 
-FILE *plot_file;                        /* Gnuplot output file              */
+    afl->fsrv.exec_tmout = EXEC_TIMEOUT;
+    afl->hang_tmout = EXEC_TIMEOUT;
 
-struct queue_entry *queue,              /* Fuzzing queue (linked list)      */
-    *queue_cur,                         /* Current offset within the queue  */
-    *queue_top,                         /* Top of the list                  */
-    *q_prev100;                         /* Previous 100 marker              */
+    afl->fsrv.mem_limit = MEM_LIMIT;
 
-struct queue_entry *top_rated[MAP_SIZE]; /* Top entries for bitmap bytes     */
+    afl->stats_update_freq = 1;
 
-struct extra_data *extras;              /* Extra tokens to fuzz with        */
-u32                extras_cnt;          /* Total number of tokens read      */
+#ifndef HAVE_ARC4RANDOM
+    afl->fsrv.dev_urandom_fd = -1;
+#endif
+    afl->fsrv.dev_null_fd = -1;
 
-struct extra_data *a_extras;            /* Automatically selected extras    */
-u32                a_extras_cnt;        /* Total number of tokens available */
+    afl->fsrv.child_pid = -1;
+    afl->fsrv.out_dir_fd = -1;
 
-u8 *(*post_handler)(u8 *buf, u32 *len);
+    init_mopt_globals(afl);
 
-u8 *cmplog_binary;
-s32 cmplog_child_pid, cmplog_forksrv_pid;
+    list_append(&afl_states, afl);
 
-/* Custom mutator */
-struct custom_mutator *mutator;
+}
 
-/* Interesting values, as per config.h */
+/* Removes this afl_state instance and frees it. */
 
-s8  interesting_8[] = {INTERESTING_8};
-s16 interesting_16[] = {INTERESTING_8, INTERESTING_16};
-s32 interesting_32[] = {INTERESTING_8, INTERESTING_16, INTERESTING_32};
+void afl_state_deinit(afl_state_t *afl) {
 
-/* Python stuff */
-#ifdef USE_PYTHON
-
-PyObject *py_module;
-PyObject *py_functions[PY_FUNC_COUNT];
-
-#endif
-
-#ifdef _AFL_DOCUMENT_MUTATIONS
-u8  do_document;
-u32 document_counter;
-#endif
+    list_remove(&afl_states, afl);
 
+}
\ No newline at end of file
diff --git a/src/afl-fuzz-init.c b/src/afl-fuzz-init.c
index 8cabd9eb..6cd0cefa 100644
--- a/src/afl-fuzz-init.c
+++ b/src/afl-fuzz-init.c
@@ -30,7 +30,7 @@
 /* Build a list of processes bound to specific cores. Returns -1 if nothing
    can be found. Assumes an upper bound of 4k CPUs. */
 
-void bind_to_free_cpu(void) {
+void bind_to_free_cpu(afl_state_t *afl) {
 
 #if defined(__linux__) || defined(__FreeBSD__) || defined(__DragonFly__)
   cpu_set_t c;
@@ -41,7 +41,7 @@ void bind_to_free_cpu(void) {
   u8  cpu_used[4096] = {0};
   u32 i;
 
-  if (cpu_core_count < 2) return;
+  if (afl->cpu_core_count < 2) return;
 
   if (getenv("AFL_NO_AFFINITY")) {
 
@@ -188,12 +188,12 @@ void bind_to_free_cpu(void) {
 
   try:
 #ifndef __ANDROID__
-    for (i = cpu_start; i < cpu_core_count; i++)
+    for (i = cpu_start; i < afl->cpu_core_count; i++)
       if (!cpu_used[i]) break;
-  if (i == cpu_core_count) {
+  if (i == afl->cpu_core_count) {
 
 #else
-    for (i = cpu_core_count - cpu_start - 1; i > -1; i--)
+    for (i = afl->cpu_core_count - cpu_start - 1; i > -1; i--)
       if (!cpu_used[i]) break;
   if (i == -1) {
 
@@ -206,14 +206,14 @@ void bind_to_free_cpu(void) {
          "    another fuzzer on this machine is probably a bad plan, but if "
          "you are\n"
          "    absolutely sure, you can set AFL_NO_AFFINITY and try again.\n",
-         cpu_core_count);
+         afl->cpu_core_count);
     FATAL("No more free CPU cores");
 
   }
 
   OKF("Found a free CPU core, try binding to #%u.", i);
 
-  cpu_aff = i;
+  afl->cpu_aff = i;
 
 #if defined(__linux__) || defined(__FreeBSD__) || defined(__DragonFly__)
   CPU_ZERO(&c);
@@ -227,7 +227,7 @@ void bind_to_free_cpu(void) {
 #if defined(__linux__)
   if (sched_setaffinity(0, sizeof(c), &c)) {
 
-    if (cpu_start == cpu_core_count)
+    if (cpu_start == afl->cpu_core_count)
       PFATAL("sched_setaffinity failed for CPU %d, exit", i);
     WARNF("sched_setaffinity failed to CPU %d, trying next CPU", i);
     cpu_start++;
@@ -239,7 +239,7 @@ void bind_to_free_cpu(void) {
 #elif defined(__FreeBSD__) || defined(__DragonFly__)
   if (pthread_setaffinity_np(pthread_self(), sizeof(c), &c)) {
 
-    if (cpu_start == cpu_core_count)
+    if (cpu_start == afl->cpu_core_count)
       PFATAL("pthread_setaffinity failed for cpu %d, exit", i);
     WARNF("pthread_setaffinity failed to CPU %d, trying next CPU", i);
     cpu_start++;
@@ -251,7 +251,7 @@ void bind_to_free_cpu(void) {
 #elif defined(__NetBSD__)
 if (pthread_setaffinity_np(pthread_self(), cpuset_size(c), c)) {
 
-  if (cpu_start == cpu_core_count)
+  if (cpu_start == afl->cpu_core_count)
     PFATAL("pthread_setaffinity failed for cpu %d, exit", i);
   WARNF("pthread_setaffinity failed to CPU %d, trying next CPU", i);
   cpu_start++;
@@ -272,7 +272,7 @@ cpuset_destroy(c);
 
 /* Load postprocessor, if available. */
 
-void setup_post(void) {
+void setup_post(afl_state_t *afl) {
 
   void* dh;
   u8*   fn = get_afl_env("AFL_POST_LIBRARY");
@@ -285,12 +285,12 @@ void setup_post(void) {
   dh = dlopen(fn, RTLD_NOW);
   if (!dh) FATAL("%s", dlerror());
 
-  post_handler = dlsym(dh, "afl_postprocess");
-  if (!post_handler) FATAL("Symbol 'afl_postprocess' not found.");
+  afl->post_handler = dlsym(dh, "afl_postprocess");
+  if (!afl->post_handler) FATAL("Symbol 'afl_postprocess' not found.");
 
   /* Do a quick test. It's better to segfault now than later =) */
 
-  post_handler("hello", &tlen);
+  afl->post_handler("hello", &tlen);
 
   OKF("Postprocessor installed successfully.");
 
@@ -298,13 +298,13 @@ void setup_post(void) {
 
 /* Shuffle an array of pointers. Might be slightly biased. */
 
-static void shuffle_ptrs(void** ptrs, u32 cnt) {
+static void shuffle_ptrs(afl_state_t *afl, void** ptrs, u32 cnt) {
 
   u32 i;
 
   for (i = 0; i < cnt - 2; ++i) {
 
-    u32   j = i + UR(cnt - i);
+    u32   j = i + UR(afl, cnt - i);
     void* s = ptrs[i];
     ptrs[i] = ptrs[j];
     ptrs[j] = s;
@@ -316,7 +316,7 @@ static void shuffle_ptrs(void** ptrs, u32 cnt) {
 /* Read all testcases from the input directory, then queue them for testing.
    Called at startup. */
 
-void read_testcases(void) {
+void read_testcases(afl_state_t *afl) {
 
   struct dirent** nl;
   s32             nl_cnt;
@@ -325,19 +325,19 @@ void read_testcases(void) {
 
   /* Auto-detect non-in-place resumption attempts. */
 
-  fn1 = alloc_printf("%s/queue", in_dir);
+  fn1 = alloc_printf("%s/queue", afl->in_dir);
   if (!access(fn1, F_OK))
-    in_dir = fn1;
+    afl->in_dir = fn1;
   else
     ck_free(fn1);
 
-  ACTF("Scanning '%s'...", in_dir);
+  ACTF("Scanning '%s'...", afl->in_dir);
 
   /* We use scandir() + alphasort() rather than readdir() because otherwise,
      the ordering  of test cases would vary somewhat randomly and would be
      difficult to control. */
 
-  nl_cnt = scandir(in_dir, &nl, NULL, alphasort);
+  nl_cnt = scandir(afl->in_dir, &nl, NULL, alphasort);
 
   if (nl_cnt < 0) {
 
@@ -352,14 +352,14 @@ void read_testcases(void) {
            "the input\n"
            "    directory.\n");
 
-    PFATAL("Unable to open '%s'", in_dir);
+    PFATAL("Unable to open '%s'", afl->in_dir);
 
   }
 
-  if (shuffle_queue && nl_cnt > 1) {
+  if (afl->shuffle_queue && nl_cnt > 1) {
 
     ACTF("Shuffling queue...");
-    shuffle_ptrs((void**)nl, nl_cnt);
+    shuffle_ptrs(afl, (void**)nl, nl_cnt);
 
   }
 
@@ -367,9 +367,9 @@ void read_testcases(void) {
 
     struct stat st;
 
-    u8* fn2 = alloc_printf("%s/%s", in_dir, nl[i]->d_name);
+    u8* fn2 = alloc_printf("%s/%s", afl->in_dir, nl[i]->d_name);
     u8* dfn =
-        alloc_printf("%s/.state/deterministic_done/%s", in_dir, nl[i]->d_name);
+        alloc_printf("%s/.state/deterministic_done/%s", afl->in_dir, nl[i]->d_name);
 
     u8 passed_det = 0;
 
@@ -400,13 +400,13 @@ void read_testcases(void) {
     if (!access(dfn, F_OK)) passed_det = 1;
     ck_free(dfn);
 
-    add_to_queue(fn2, st.st_size, passed_det);
+    add_to_queue(afl, fn2, st.st_size, passed_det);
 
   }
 
   free(nl);                                                  /* not tracked */
 
-  if (!queued_paths) {
+  if (!afl->queued_paths) {
 
     SAYF("\n" cLRD "[-] " cRST
          "Looks like there are no valid test cases in the input directory! The "
@@ -417,25 +417,25 @@ void read_testcases(void) {
          "in the\n"
          "    input directory.\n");
 
-    FATAL("No usable test cases in '%s'", in_dir);
+    FATAL("No usable test cases in '%s'", afl->in_dir);
 
   }
 
-  last_path_time = 0;
-  queued_at_start = queued_paths;
+  afl->last_path_time = 0;
+  afl->queued_at_start = afl->queued_paths;
 
 }
 
 /* Examine map coverage. Called once, for first test case. */
 
-static void check_map_coverage(void) {
+static void check_map_coverage(afl_state_t *afl) {
 
   u32 i;
 
-  if (count_bytes(trace_bits) < 100) return;
+  if (count_bytes(afl->fsrv.trace_bits) < 100) return;
 
   for (i = (1 << (MAP_SIZE_POW2 - 1)); i < MAP_SIZE; ++i)
-    if (trace_bits[i]) return;
+    if (afl->fsrv.trace_bits[i]) return;
 
   WARNF("Recompile binary with newer version of afl to improve coverage!");
 
@@ -444,9 +444,9 @@ static void check_map_coverage(void) {
 /* Perform dry run of all test cases to confirm that the app is working as
    expected. This is done only for the initial inputs, and only once. */
 
-void perform_dry_run(char** argv) {
+void perform_dry_run(afl_state_t *afl) {
 
-  struct queue_entry* q = queue;
+  struct queue_entry* q = afl->queue;
   u32                 cal_failures = 0;
   u8*                 skip_crashes = get_afl_env("AFL_SKIP_CRASHES");
 
@@ -470,12 +470,12 @@ void perform_dry_run(char** argv) {
 
     close(fd);
 
-    res = calibrate_case(argv, q, use_mem, 0, 1);
+    res = calibrate_case(afl, q, use_mem, 0, 1);
     ck_free(use_mem);
 
-    if (stop_soon) return;
+    if (afl->stop_soon) return;
 
-    if (res == crash_mode || res == FAULT_NOBITS)
+    if (res == afl->crash_mode || res == FAULT_NOBITS)
       SAYF(cGRA "    len = %u, map size = %u, exec speed = %llu us\n" cRST,
            q->len, q->bitmap_size, q->exec_us);
 
@@ -483,21 +483,21 @@ void perform_dry_run(char** argv) {
 
       case FAULT_NONE:
 
-        if (q == queue) check_map_coverage();
+        if (q == afl->queue) check_map_coverage(afl);
 
-        if (crash_mode) FATAL("Test case '%s' does *NOT* crash", fn);
+        if (afl->crash_mode) FATAL("Test case '%s' does *NOT* crash", fn);
 
         break;
 
       case FAULT_TMOUT:
 
-        if (timeout_given) {
+        if (afl->timeout_given) {
 
-          /* The -t nn+ syntax in the command line sets timeout_given to '2' and
+          /* The -t nn+ syntax in the command line sets afl->timeout_given to '2' and
              instructs afl-fuzz to tolerate but skip queue entries that time
              out. */
 
-          if (timeout_given > 1) {
+          if (afl->timeout_given > 1) {
 
             WARNF("Test case results in a timeout (skipping)");
             q->cal_failed = CAL_CHANCES;
@@ -516,7 +516,7 @@ void perform_dry_run(char** argv) {
                "    what you are doing and want to simply skip the unruly test "
                "cases, append\n"
                "    '+' at the end of the value passed to -t ('-t %u+').\n",
-               exec_tmout, exec_tmout);
+               afl->fsrv.exec_tmout, afl->fsrv.exec_tmout);
 
           FATAL("Test case '%s' results in a timeout", fn);
 
@@ -532,7 +532,7 @@ void perform_dry_run(char** argv) {
                "    If this test case is just a fluke, the other option is to "
                "just avoid it\n"
                "    altogether, and find one that is less of a CPU hog.\n",
-               exec_tmout);
+               afl->fsrv.exec_tmout);
 
           FATAL("Test case '%s' results in a timeout", fn);
 
@@ -540,7 +540,7 @@ void perform_dry_run(char** argv) {
 
       case FAULT_CRASH:
 
-        if (crash_mode) break;
+        if (afl->crash_mode) break;
 
         if (skip_crashes) {
 
@@ -551,7 +551,7 @@ void perform_dry_run(char** argv) {
 
         }
 
-        if (mem_limit) {
+        if (afl->fsrv.mem_limit) {
 
           SAYF("\n" cLRD "[-] " cRST
                "Oops, the program crashed with one of the test cases provided. "
@@ -593,7 +593,7 @@ void perform_dry_run(char** argv) {
                "other options\n"
                "      fail, poke <afl-users@googlegroups.com> for "
                "troubleshooting tips.\n",
-               DMS(mem_limit << 20), mem_limit - 1, doc_path);
+               DMS(afl->fsrv.mem_limit << 20), afl->fsrv.mem_limit - 1, doc_path);
 
         } else {
 
@@ -630,15 +630,15 @@ void perform_dry_run(char** argv) {
 
       case FAULT_ERROR:
 
-        FATAL("Unable to execute target application ('%s')", argv[0]);
+        FATAL("Unable to execute target application ('%s')", afl->argv[0]);
 
       case FAULT_NOINST: FATAL("No instrumentation detected");
 
       case FAULT_NOBITS:
 
-        ++useless_at_start;
+        ++afl->useless_at_start;
 
-        if (!in_bitmap && !shuffle_queue)
+        if (!afl->in_bitmap && !afl->shuffle_queue)
           WARNF("No new instrumentation output, test case may be useless.");
 
         break;
@@ -653,15 +653,15 @@ void perform_dry_run(char** argv) {
 
   if (cal_failures) {
 
-    if (cal_failures == queued_paths)
+    if (cal_failures == afl->queued_paths)
       FATAL("All test cases time out%s, giving up!",
             skip_crashes ? " or crash" : "");
 
     WARNF("Skipped %u test cases (%0.02f%%) due to timeouts%s.", cal_failures,
-          ((double)cal_failures) * 100 / queued_paths,
+          ((double)cal_failures) * 100 / afl->queued_paths,
           skip_crashes ? " or crashes" : "");
 
-    if (cal_failures * 5 > queued_paths)
+    if (cal_failures * 5 > afl->queued_paths)
       WARNF(cLRD "High percentage of rejected test cases, check settings!");
 
   }
@@ -702,9 +702,9 @@ static void link_or_copy(u8* old_path, u8* new_path) {
 /* Create hard links for input test cases in the output directory, choosing
    good names and pivoting accordingly. */
 
-void pivot_inputs(void) {
+void pivot_inputs(afl_state_t *afl) {
 
-  struct queue_entry* q = queue;
+  struct queue_entry* q = afl->queue;
   u32                 id = 0;
 
   ACTF("Creating hard links for all input files...");
@@ -729,8 +729,8 @@ void pivot_inputs(void) {
       u8* src_str;
       u32 src_id;
 
-      resuming_fuzz = 1;
-      nfn = alloc_printf("%s/queue/%s", out_dir, rsl);
+      afl->resuming_fuzz = 1;
+      nfn = alloc_printf("%s/queue/%s", afl->out_dir, rsl);
 
       /* Since we're at it, let's also try to find parent and figure out the
          appropriate depth for this entry. */
@@ -739,12 +739,12 @@ void pivot_inputs(void) {
 
       if (src_str && sscanf(src_str + 1, "%06u", &src_id) == 1) {
 
-        struct queue_entry* s = queue;
+        struct queue_entry* s = afl->queue;
         while (src_id-- && s)
           s = s->next;
         if (s) q->depth = s->depth + 1;
 
-        if (max_depth < q->depth) max_depth = q->depth;
+        if (afl->max_depth < q->depth) afl->max_depth = q->depth;
 
       }
 
@@ -761,12 +761,12 @@ void pivot_inputs(void) {
         use_name += 6;
       else
         use_name = rsl;
-      nfn = alloc_printf("%s/queue/id:%06u,time:0,orig:%s", out_dir, id,
+      nfn = alloc_printf("%s/queue/id:%06u,time:0,orig:%s", afl->out_dir, id,
                          use_name);
 
 #else
 
-      nfn = alloc_printf("%s/queue/id_%06u", out_dir, id);
+      nfn = alloc_printf("%s/queue/id_%06u", afl->out_dir, id);
 
 #endif                                                    /* ^!SIMPLE_FILES */
 
@@ -780,21 +780,21 @@ void pivot_inputs(void) {
 
     /* Make sure that the passed_det value carries over, too. */
 
-    if (q->passed_det) mark_as_det_done(q);
+    if (q->passed_det) mark_as_det_done(afl, q);
 
     q = q->next;
     ++id;
 
   }
 
-  if (in_place_resume) nuke_resume_dir();
+  if (afl->in_place_resume) nuke_resume_dir(afl);
 
 }
 
 /* When resuming, try to find the queue position to start from. This makes sense
    only when resuming, and when we can find the original fuzzer_stats. */
 
-u32 find_start_position(void) {
+u32 find_start_position(afl_state_t *afl) {
 
   static u8 tmp[4096];                   /* Ought to be enough for anybody. */
 
@@ -802,12 +802,12 @@ u32 find_start_position(void) {
   s32 fd, i;
   u32 ret;
 
-  if (!resuming_fuzz) return 0;
+  if (!afl->resuming_fuzz) return 0;
 
-  if (in_place_resume)
-    fn = alloc_printf("%s/fuzzer_stats", out_dir);
+  if (afl->in_place_resume)
+    fn = alloc_printf("%s/fuzzer_stats", afl->out_dir);
   else
-    fn = alloc_printf("%s/../fuzzer_stats", in_dir);
+    fn = alloc_printf("%s/../fuzzer_stats", afl->in_dir);
 
   fd = open(fn, O_RDONLY);
   ck_free(fn);
@@ -822,7 +822,7 @@ u32 find_start_position(void) {
   if (!off) return 0;
 
   ret = atoi(off + 20);
-  if (ret >= queued_paths) ret = 0;
+  if (ret >= afl->queued_paths) ret = 0;
   return ret;
 
 }
@@ -831,7 +831,7 @@ u32 find_start_position(void) {
    -t given, we don't want to keep auto-scaling the timeout over and over
    again to prevent it from growing due to random flukes. */
 
-void find_timeout(void) {
+void find_timeout(afl_state_t *afl) {
 
   static u8 tmp[4096];                   /* Ought to be enough for anybody. */
 
@@ -839,12 +839,12 @@ void find_timeout(void) {
   s32 fd, i;
   u32 ret;
 
-  if (!resuming_fuzz) return;
+  if (!afl->resuming_fuzz) return;
 
-  if (in_place_resume)
-    fn = alloc_printf("%s/fuzzer_stats", out_dir);
+  if (afl->in_place_resume)
+    fn = alloc_printf("%s/fuzzer_stats", afl->out_dir);
   else
-    fn = alloc_printf("%s/../fuzzer_stats", in_dir);
+    fn = alloc_printf("%s/../fuzzer_stats", afl->in_dir);
 
   fd = open(fn, O_RDONLY);
   ck_free(fn);
@@ -861,8 +861,8 @@ void find_timeout(void) {
   ret = atoi(off + 20);
   if (ret <= 4) return;
 
-  exec_tmout = ret;
-  timeout_given = 3;
+  afl->fsrv.exec_tmout = ret;
+  afl->timeout_given = 3;
 
 }
 
@@ -953,31 +953,31 @@ double get_runnable_processes(void) {
 
 /* Delete the temporary directory used for in-place session resume. */
 
-void nuke_resume_dir(void) {
+void nuke_resume_dir(afl_state_t *afl) {
 
   u8* fn;
 
-  fn = alloc_printf("%s/_resume/.state/deterministic_done", out_dir);
+  fn = alloc_printf("%s/_resume/.state/deterministic_done", afl->out_dir);
   if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed;
   ck_free(fn);
 
-  fn = alloc_printf("%s/_resume/.state/auto_extras", out_dir);
+  fn = alloc_printf("%s/_resume/.state/auto_extras", afl->out_dir);
   if (delete_files(fn, "auto_")) goto dir_cleanup_failed;
   ck_free(fn);
 
-  fn = alloc_printf("%s/_resume/.state/redundant_edges", out_dir);
+  fn = alloc_printf("%s/_resume/.state/redundant_edges", afl->out_dir);
   if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed;
   ck_free(fn);
 
-  fn = alloc_printf("%s/_resume/.state/variable_behavior", out_dir);
+  fn = alloc_printf("%s/_resume/.state/variable_behavior", afl->out_dir);
   if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed;
   ck_free(fn);
 
-  fn = alloc_printf("%s/_resume/.state", out_dir);
+  fn = alloc_printf("%s/_resume/.state", afl->out_dir);
   if (rmdir(fn) && errno != ENOENT) goto dir_cleanup_failed;
   ck_free(fn);
 
-  fn = alloc_printf("%s/_resume", out_dir);
+  fn = alloc_printf("%s/_resume", afl->out_dir);
   if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed;
   ck_free(fn);
 
@@ -993,30 +993,30 @@ dir_cleanup_failed:
    is not currently running, and if the last run time isn't too great.
    Resume fuzzing if `-` is set as in_dir or if AFL_AUTORESUME is set */
 
-static void handle_existing_out_dir(void) {
+static void handle_existing_out_dir(afl_state_t *afl) {
 
   FILE* f;
-  u8*   fn = alloc_printf("%s/fuzzer_stats", out_dir);
+  u8*   fn = alloc_printf("%s/fuzzer_stats", afl->out_dir);
 
   /* See if the output directory is locked. If yes, bail out. If not,
      create a lock that will persist for the lifetime of the process
      (this requires leaving the descriptor open).*/
 
-  out_dir_fd = open(out_dir, O_RDONLY);
-  if (out_dir_fd < 0) PFATAL("Unable to open '%s'", out_dir);
+  afl->fsrv.out_dir_fd = open(afl->out_dir, O_RDONLY);
+  if (afl->fsrv.out_dir_fd < 0) PFATAL("Unable to open '%s'", afl->out_dir);
 
 #ifndef __sun
 
-  if (flock(out_dir_fd, LOCK_EX | LOCK_NB) && errno == EWOULDBLOCK) {
+  if (flock(afl->fsrv.out_dir_fd, LOCK_EX | LOCK_NB) && errno == EWOULDBLOCK) {
 
     SAYF("\n" cLRD "[-] " cRST
          "Looks like the job output directory is being actively used by "
          "another\n"
          "    instance of afl-fuzz. You will need to choose a different %s\n"
          "    or stop the other process first.\n",
-         sync_id ? "fuzzer ID" : "output location");
+         afl->sync_id ? "fuzzer ID" : "output location");
 
-    FATAL("Directory '%s' is in use", out_dir);
+    FATAL("Directory '%s' is in use", afl->out_dir);
 
   }
 
@@ -1039,16 +1039,16 @@ static void handle_existing_out_dir(void) {
     /* Autoresume treats a normal run as in_place_resume if a valid out dir
      * already exists */
 
-    if (!in_place_resume && autoresume) {
+    if (!afl->in_place_resume && afl->autoresume) {
 
       OKF("Detected prior run with AFL_AUTORESUME set. Resuming.");
-      in_place_resume = 1;
+      afl->in_place_resume = 1;
 
     }
 
     /* Let's see how much work is at stake. */
 
-    if (!in_place_resume && last_update - start_time2 > OUTPUT_GRACE * 60) {
+    if (!afl->in_place_resume && last_update - start_time2 > OUTPUT_GRACE * 60) {
 
       SAYF("\n" cLRD "[-] " cRST
            "The job output directory already exists and contains the results "
@@ -1066,7 +1066,7 @@ static void handle_existing_out_dir(void) {
            "    try again.\n",
            OUTPUT_GRACE);
 
-      FATAL("At-risk data found in '%s'", out_dir);
+      FATAL("At-risk data found in '%s'", afl->out_dir);
 
     }
 
@@ -1080,13 +1080,13 @@ static void handle_existing_out_dir(void) {
      incomplete due to an earlier abort, so we want to use the old _resume/
      dir instead, and we let rename() fail silently. */
 
-  if (in_place_resume) {
+  if (afl->in_place_resume) {
 
-    u8* orig_q = alloc_printf("%s/queue", out_dir);
+    u8* orig_q = alloc_printf("%s/queue", afl->out_dir);
 
-    in_dir = alloc_printf("%s/_resume", out_dir);
+    afl->in_dir = alloc_printf("%s/_resume", afl->out_dir);
 
-    rename(orig_q, in_dir);                                /* Ignore errors */
+    rename(orig_q, afl->in_dir);                                /* Ignore errors */
 
     OKF("Output directory exists, will attempt session resume.");
 
@@ -1101,61 +1101,61 @@ static void handle_existing_out_dir(void) {
   ACTF("Deleting old session data...");
 
   /* Okay, let's get the ball rolling! First, we need to get rid of the entries
-     in <out_dir>/.synced/.../id:*, if any are present. */
+     in <afl->out_dir>/.synced/.../id:*, if any are present. */
 
-  if (!in_place_resume) {
+  if (!afl->in_place_resume) {
 
-    fn = alloc_printf("%s/.synced", out_dir);
+    fn = alloc_printf("%s/.synced", afl->out_dir);
     if (delete_files(fn, NULL)) goto dir_cleanup_failed;
     ck_free(fn);
 
   }
 
-  /* Next, we need to clean up <out_dir>/queue/.state/ subdirectories: */
+  /* Next, we need to clean up <afl->out_dir>/queue/.state/ subdirectories: */
 
-  fn = alloc_printf("%s/queue/.state/deterministic_done", out_dir);
+  fn = alloc_printf("%s/queue/.state/deterministic_done", afl->out_dir);
   if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed;
   ck_free(fn);
 
-  fn = alloc_printf("%s/queue/.state/auto_extras", out_dir);
+  fn = alloc_printf("%s/queue/.state/auto_extras", afl->out_dir);
   if (delete_files(fn, "auto_")) goto dir_cleanup_failed;
   ck_free(fn);
 
-  fn = alloc_printf("%s/queue/.state/redundant_edges", out_dir);
+  fn = alloc_printf("%s/queue/.state/redundant_edges", afl->out_dir);
   if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed;
   ck_free(fn);
 
-  fn = alloc_printf("%s/queue/.state/variable_behavior", out_dir);
+  fn = alloc_printf("%s/queue/.state/variable_behavior", afl->out_dir);
   if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed;
   ck_free(fn);
 
   /* Then, get rid of the .state subdirectory itself (should be empty by now)
-     and everything matching <out_dir>/queue/id:*. */
+     and everything matching <afl->out_dir>/queue/id:*. */
 
-  fn = alloc_printf("%s/queue/.state", out_dir);
+  fn = alloc_printf("%s/queue/.state", afl->out_dir);
   if (rmdir(fn) && errno != ENOENT) goto dir_cleanup_failed;
   ck_free(fn);
 
-  fn = alloc_printf("%s/queue", out_dir);
+  fn = alloc_printf("%s/queue", afl->out_dir);
   if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed;
   ck_free(fn);
 
-  /* All right, let's do <out_dir>/crashes/id:* and <out_dir>/hangs/id:*. */
+  /* All right, let's do <afl->out_dir>/crashes/id:* and <afl->out_dir>/hangs/id:*. */
 
-  if (!in_place_resume) {
+  if (!afl->in_place_resume) {
 
-    fn = alloc_printf("%s/crashes/README.txt", out_dir);
+    fn = alloc_printf("%s/crashes/README.txt", afl->out_dir);
     unlink(fn);                                            /* Ignore errors */
     ck_free(fn);
 
   }
 
-  fn = alloc_printf("%s/crashes", out_dir);
+  fn = alloc_printf("%s/crashes", afl->out_dir);
 
   /* Make backup of the crashes directory if it's not empty and if we're
      doing in-place resume. */
 
-  if (in_place_resume && rmdir(fn)) {
+  if (afl->in_place_resume && rmdir(fn)) {
 
     time_t     cur_t = time(0);
     struct tm* t = localtime(&cur_t);
@@ -1182,11 +1182,11 @@ static void handle_existing_out_dir(void) {
   if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed;
   ck_free(fn);
 
-  fn = alloc_printf("%s/hangs", out_dir);
+  fn = alloc_printf("%s/hangs", afl->out_dir);
 
   /* Backup hangs, too. */
 
-  if (in_place_resume && rmdir(fn)) {
+  if (afl->in_place_resume && rmdir(fn)) {
 
     time_t     cur_t = time(0);
     struct tm* t = localtime(&cur_t);
@@ -1215,36 +1215,36 @@ static void handle_existing_out_dir(void) {
 
   /* And now, for some finishing touches. */
 
-  if (file_extension) {
+  if (afl->file_extension) {
 
-    fn = alloc_printf("%s/.cur_input.%s", tmp_dir, file_extension);
+    fn = alloc_printf("%s/.cur_input.%s", afl->tmp_dir, afl->file_extension);
 
   } else {
 
-    fn = alloc_printf("%s/.cur_input", tmp_dir);
+    fn = alloc_printf("%s/.cur_input", afl->tmp_dir);
 
   }
 
   if (unlink(fn) && errno != ENOENT) goto dir_cleanup_failed;
   ck_free(fn);
 
-  fn = alloc_printf("%s/fuzz_bitmap", out_dir);
+  fn = alloc_printf("%s/fuzz_bitmap", afl->out_dir);
   if (unlink(fn) && errno != ENOENT) goto dir_cleanup_failed;
   ck_free(fn);
 
-  if (!in_place_resume) {
+  if (!afl->in_place_resume) {
 
-    fn = alloc_printf("%s/fuzzer_stats", out_dir);
+    fn = alloc_printf("%s/fuzzer_stats", afl->out_dir);
     if (unlink(fn) && errno != ENOENT) goto dir_cleanup_failed;
     ck_free(fn);
 
   }
 
-  fn = alloc_printf("%s/plot_data", out_dir);
+  fn = alloc_printf("%s/plot_data", afl->out_dir);
   if (unlink(fn) && errno != ENOENT) goto dir_cleanup_failed;
   ck_free(fn);
 
-  fn = alloc_printf("%s/cmdline", out_dir);
+  fn = alloc_printf("%s/cmdline", afl->out_dir);
   if (unlink(fn) && errno != ENOENT) goto dir_cleanup_failed;
   ck_free(fn);
 
@@ -1275,32 +1275,32 @@ dir_cleanup_failed:
 
 /* Prepare output directories and fds. */
 
-void setup_dirs_fds(void) {
+void setup_dirs_fds(afl_state_t *afl) {
 
   u8* tmp;
   s32 fd;
 
   ACTF("Setting up output directories...");
 
-  if (sync_id && mkdir(sync_dir, 0700) && errno != EEXIST)
-    PFATAL("Unable to create '%s'", sync_dir);
+  if (afl->sync_id && mkdir(afl->sync_dir, 0700) && errno != EEXIST)
+    PFATAL("Unable to create '%s'", afl->sync_dir);
 
-  if (mkdir(out_dir, 0700)) {
+  if (mkdir(afl->out_dir, 0700)) {
 
-    if (errno != EEXIST) PFATAL("Unable to create '%s'", out_dir);
+    if (errno != EEXIST) PFATAL("Unable to create '%s'", afl->out_dir);
 
-    handle_existing_out_dir();
+    handle_existing_out_dir(afl);
 
   } else {
 
-    if (in_place_resume)
+    if (afl->in_place_resume)
       FATAL("Resume attempted but old output directory not found");
 
-    out_dir_fd = open(out_dir, O_RDONLY);
+    afl->fsrv.out_dir_fd = open(afl->out_dir, O_RDONLY);
 
 #ifndef __sun
 
-    if (out_dir_fd < 0 || flock(out_dir_fd, LOCK_EX | LOCK_NB))
+    if (afl->fsrv.out_dir_fd < 0 || flock(afl->fsrv.out_dir_fd, LOCK_EX | LOCK_NB))
       PFATAL("Unable to flock() output directory.");
 
 #endif                                                            /* !__sun */
@@ -1309,49 +1309,49 @@ void setup_dirs_fds(void) {
 
   /* Queue directory for any starting & discovered paths. */
 
-  tmp = alloc_printf("%s/queue", out_dir);
+  tmp = alloc_printf("%s/queue", afl->out_dir);
   if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp);
   ck_free(tmp);
 
   /* Top-level directory for queue metadata used for session
      resume and related tasks. */
 
-  tmp = alloc_printf("%s/queue/.state/", out_dir);
+  tmp = alloc_printf("%s/queue/.state/", afl->out_dir);
   if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp);
   ck_free(tmp);
 
   /* Directory for flagging queue entries that went through
      deterministic fuzzing in the past. */
 
-  tmp = alloc_printf("%s/queue/.state/deterministic_done/", out_dir);
+  tmp = alloc_printf("%s/queue/.state/deterministic_done/", afl->out_dir);
   if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp);
   ck_free(tmp);
 
   /* Directory with the auto-selected dictionary entries. */
 
-  tmp = alloc_printf("%s/queue/.state/auto_extras/", out_dir);
+  tmp = alloc_printf("%s/queue/.state/auto_extras/", afl->out_dir);
   if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp);
   ck_free(tmp);
 
   /* The set of paths currently deemed redundant. */
 
-  tmp = alloc_printf("%s/queue/.state/redundant_edges/", out_dir);
+  tmp = alloc_printf("%s/queue/.state/redundant_edges/", afl->out_dir);
   if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp);
   ck_free(tmp);
 
   /* The set of paths showing variable behavior. */
 
-  tmp = alloc_printf("%s/queue/.state/variable_behavior/", out_dir);
+  tmp = alloc_printf("%s/queue/.state/variable_behavior/", afl->out_dir);
   if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp);
   ck_free(tmp);
 
   /* Sync directory for keeping track of cooperating fuzzers. */
 
-  if (sync_id) {
+  if (afl->sync_id) {
 
-    tmp = alloc_printf("%s/.synced/", out_dir);
+    tmp = alloc_printf("%s/.synced/", afl->out_dir);
 
-    if (mkdir(tmp, 0700) && (!in_place_resume || errno != EEXIST))
+    if (mkdir(tmp, 0700) && (!afl->in_place_resume || errno != EEXIST))
       PFATAL("Unable to create '%s'", tmp);
 
     ck_free(tmp);
@@ -1360,37 +1360,37 @@ void setup_dirs_fds(void) {
 
   /* All recorded crashes. */
 
-  tmp = alloc_printf("%s/crashes", out_dir);
+  tmp = alloc_printf("%s/crashes", afl->out_dir);
   if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp);
   ck_free(tmp);
 
   /* All recorded hangs. */
 
-  tmp = alloc_printf("%s/hangs", out_dir);
+  tmp = alloc_printf("%s/hangs", afl->out_dir);
   if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp);
   ck_free(tmp);
 
   /* Generally useful file descriptors. */
 
-  dev_null_fd = open("/dev/null", O_RDWR);
-  if (dev_null_fd < 0) PFATAL("Unable to open /dev/null");
+  afl->fsrv.dev_null_fd = open("/dev/null", O_RDWR);
+  if (afl->fsrv.dev_null_fd < 0) PFATAL("Unable to open /dev/null");
 
 #ifndef HAVE_ARC4RANDOM
-  dev_urandom_fd = open("/dev/urandom", O_RDONLY);
-  if (dev_urandom_fd < 0) PFATAL("Unable to open /dev/urandom");
+  afl->fsrv.dev_urandom_fd = open("/dev/urandom", O_RDONLY);
+  if (afl->fsrv.dev_urandom_fd < 0) PFATAL("Unable to open /dev/urandom");
 #endif
 
   /* Gnuplot output file. */
 
-  tmp = alloc_printf("%s/plot_data", out_dir);
+  tmp = alloc_printf("%s/plot_data", afl->out_dir);
   fd = open(tmp, O_WRONLY | O_CREAT | O_EXCL, 0600);
   if (fd < 0) PFATAL("Unable to create '%s'", tmp);
   ck_free(tmp);
 
-  plot_file = fdopen(fd, "w");
-  if (!plot_file) PFATAL("fdopen() failed");
+  afl->fsrv.plot_file = fdopen(fd, "w");
+  if (!afl->fsrv.plot_file) PFATAL("fdopen() failed");
 
-  fprintf(plot_file,
+  fprintf(afl->fsrv.plot_file,
           "# unix_time, cycles_done, cur_path, paths_total, "
           "pending_total, pending_favs, map_size, unique_crashes, "
           "unique_hangs, max_depth, execs_per_sec\n");
@@ -1398,7 +1398,7 @@ void setup_dirs_fds(void) {
 
 }
 
-void setup_cmdline_file(char** argv) {
+void setup_cmdline_file(afl_state_t *afl, char **argv) {
 
   u8* tmp;
   s32 fd;
@@ -1407,7 +1407,7 @@ void setup_cmdline_file(char** argv) {
   FILE* cmdline_file = NULL;
 
   /* Store the command line to reproduce our findings */
-  tmp = alloc_printf("%s/cmdline", out_dir);
+  tmp = alloc_printf("%s/cmdline", afl->out_dir);
   fd = open(tmp, O_WRONLY | O_CREAT | O_EXCL, 0600);
   if (fd < 0) PFATAL("Unable to create '%s'", tmp);
   ck_free(tmp);
@@ -1428,24 +1428,24 @@ void setup_cmdline_file(char** argv) {
 
 /* Setup the output file for fuzzed data, if not using -f. */
 
-void setup_stdio_file(void) {
+void setup_stdio_file(afl_state_t *afl) {
 
   u8* fn;
-  if (file_extension) {
+  if (afl->file_extension) {
 
-    fn = alloc_printf("%s/.cur_input.%s", tmp_dir, file_extension);
+    fn = alloc_printf("%s/.cur_input.%s", afl->tmp_dir, afl->file_extension);
 
   } else {
 
-    fn = alloc_printf("%s/.cur_input", tmp_dir);
+    fn = alloc_printf("%s/.cur_input", afl->tmp_dir);
 
   }
 
   unlink(fn);                                              /* Ignore errors */
 
-  out_fd = open(fn, O_RDWR | O_CREAT | O_EXCL, 0600);
+  afl->fsrv.out_fd = open(fn, O_RDWR | O_CREAT | O_EXCL, 0600);
 
-  if (out_fd < 0) PFATAL("Unable to create '%s'", fn);
+  if (afl->fsrv.out_fd < 0) PFATAL("Unable to create '%s'", fn);
 
   ck_free(fn);
 
@@ -1527,7 +1527,7 @@ void check_crash_handling(void) {
 
 /* Check CPU governor. */
 
-void check_cpu_governor(void) {
+void check_cpu_governor(afl_state_t *afl) {
 
 #ifdef __linux__
   FILE* f;
@@ -1536,8 +1536,8 @@ void check_cpu_governor(void) {
 
   if (get_afl_env("AFL_SKIP_CPUFREQ")) return;
 
-  if (cpu_aff > 0)
-    snprintf(tmp, sizeof(tmp), "%s%d%s", "/sys/devices/system/cpu/cpu", cpu_aff,
+  if (afl->cpu_aff > 0)
+    snprintf(tmp, sizeof(tmp), "%s%d%s", "/sys/devices/system/cpu/cpu", afl->cpu_aff,
              "/cpufreq/scaling_governor");
   else
     snprintf(tmp, sizeof(tmp), "%s",
@@ -1545,9 +1545,9 @@ void check_cpu_governor(void) {
   f = fopen("/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor", "r");
   if (!f) {
 
-    if (cpu_aff > 0)
+    if (afl->cpu_aff > 0)
       snprintf(tmp, sizeof(tmp), "%s%d%s",
-               "/sys/devices/system/cpu/cpufreq/policy", cpu_aff,
+               "/sys/devices/system/cpu/cpufreq/policy", afl->cpu_aff,
                "/scaling_governor");
     else
       snprintf(tmp, sizeof(tmp), "%s",
@@ -1650,24 +1650,24 @@ void check_cpu_governor(void) {
 
 /* Count the number of logical CPU cores. */
 
-void get_core_count(void) {
+void get_core_count(afl_state_t *afl) {
 
 #if defined(__APPLE__) || defined(__FreeBSD__) || defined(__OpenBSD__) || \
     defined(__DragonFly__)
 
-  size_t s = sizeof(cpu_core_count);
+  size_t s = sizeof(afl->cpu_core_count);
 
   /* On *BSD systems, we can just use a sysctl to get the number of CPUs. */
 
 #ifdef __APPLE__
 
-  if (sysctlbyname("hw.logicalcpu", &cpu_core_count, &s, NULL, 0) < 0) return;
+  if (sysctlbyname("hw.logicalcpu", &afl->cpu_core_count, &s, NULL, 0) < 0) return;
 
 #else
 
   int s_name[2] = {CTL_HW, HW_NCPU};
 
-  if (sysctl(s_name, 2, &cpu_core_count, &s, NULL, 0) < 0) return;
+  if (sysctl(s_name, 2, &afl->cpu_core_count, &s, NULL, 0) < 0) return;
 
 #endif                                                        /* ^__APPLE__ */
 
@@ -1675,7 +1675,7 @@ void get_core_count(void) {
 
 #ifdef HAVE_AFFINITY
 
-  cpu_core_count = sysconf(_SC_NPROCESSORS_ONLN);
+  afl->cpu_core_count = sysconf(_SC_NPROCESSORS_ONLN);
 
 #else
 
@@ -1685,7 +1685,7 @@ void get_core_count(void) {
   if (!f) return;
 
   while (fgets(tmp, sizeof(tmp), f))
-    if (!strncmp(tmp, "cpu", 3) && isdigit(tmp[3])) ++cpu_core_count;
+    if (!strncmp(tmp, "cpu", 3) && isdigit(tmp[3])) ++afl->cpu_core_count;
 
   fclose(f);
 
@@ -1693,7 +1693,7 @@ void get_core_count(void) {
 
 #endif                        /* ^(__APPLE__ || __FreeBSD__ || __OpenBSD__) */
 
-  if (cpu_core_count > 0) {
+  if (afl->cpu_core_count > 0) {
 
     u32 cur_runnable = 0;
 
@@ -1709,16 +1709,16 @@ void get_core_count(void) {
 #endif                           /* __APPLE__ || __FreeBSD__ || __OpenBSD__ */
 
     OKF("You have %d CPU core%s and %u runnable tasks (utilization: %0.0f%%).",
-        cpu_core_count, cpu_core_count > 1 ? "s" : "", cur_runnable,
-        cur_runnable * 100.0 / cpu_core_count);
+        afl->cpu_core_count, afl->cpu_core_count > 1 ? "s" : "", cur_runnable,
+        cur_runnable * 100.0 / afl->cpu_core_count);
 
-    if (cpu_core_count > 1) {
+    if (afl->cpu_core_count > 1) {
 
-      if (cur_runnable > cpu_core_count * 1.5) {
+      if (cur_runnable > afl->cpu_core_count * 1.5) {
 
         WARNF("System under apparent load, performance may be spotty.");
 
-      } else if (cur_runnable + 1 <= cpu_core_count) {
+      } else if (cur_runnable + 1 <= afl->cpu_core_count) {
 
         OKF("Try parallel jobs - see %s/parallel_fuzzing.md.", doc_path);
 
@@ -1728,24 +1728,24 @@ void get_core_count(void) {
 
   } else {
 
-    cpu_core_count = 0;
+    afl->cpu_core_count = 0;
     WARNF("Unable to figure out the number of CPU cores.");
 
   }
 
 }
 
-/* Validate and fix up out_dir and sync_dir when using -S. */
+/* Validate and fix up afl->out_dir and sync_dir when using -S. */
 
-void fix_up_sync(void) {
+void fix_up_sync(afl_state_t *afl) {
 
-  u8* x = sync_id;
+  u8* x = afl->sync_id;
 
-  if (dumb_mode) FATAL("-S / -M and -n are mutually exclusive");
+  if (afl->dumb_mode) FATAL("-S / -M and -n are mutually exclusive");
 
-  if (skip_deterministic) {
+  if (afl->skip_deterministic) {
 
-    if (force_deterministic) FATAL("use -S instead of -M -d");
+    if (afl->force_deterministic) FATAL("use -S instead of -M -d");
     // else
     //  FATAL("-S already implies -d");
 
@@ -1760,17 +1760,17 @@ void fix_up_sync(void) {
 
   }
 
-  if (strlen(sync_id) > 32) FATAL("Fuzzer ID too long");
+  if (strlen(afl->sync_id) > 32) FATAL("Fuzzer ID too long");
 
-  x = alloc_printf("%s/%s", out_dir, sync_id);
+  x = alloc_printf("%s/%s", afl->out_dir, afl->sync_id);
 
-  sync_dir = out_dir;
-  out_dir = x;
+  afl->sync_dir = afl->out_dir;
+  afl->out_dir = x;
 
-  if (!force_deterministic) {
+  if (!afl->force_deterministic) {
 
-    skip_deterministic = 1;
-    use_splicing = 1;
+    afl->skip_deterministic = 1;
+    afl->use_splicing = 1;
 
   }
 
@@ -1780,7 +1780,7 @@ void fix_up_sync(void) {
 
 static void handle_resize(int sig) {
 
-  clear_screen = 1;
+  LIST_FOREACH(&afl_states, afl_state_t, { el->clear_screen; });
 
 }
 
@@ -1819,12 +1819,16 @@ void check_asan_opts(void) {
 
 static void handle_stop_sig(int sig) {
 
-  stop_soon = 1;
+  LIST_FOREACH(&afl_states, afl_state_t, {
 
-  if (child_pid > 0) kill(child_pid, SIGKILL);
-  if (forksrv_pid > 0) kill(forksrv_pid, SIGKILL);
-  if (cmplog_child_pid > 0) kill(cmplog_child_pid, SIGKILL);
-  if (cmplog_forksrv_pid > 0) kill(cmplog_forksrv_pid, SIGKILL);
+    el->stop_soon = 1;
+
+    if (el->fsrv.child_pid > 0) kill(el->fsrv.child_pid, SIGKILL);
+    if (el->fsrv.fsrv_pid > 0) kill(el->fsrv.fsrv_pid, SIGKILL);
+    if (el->cmplog_child_pid > 0) kill(el->cmplog_child_pid, SIGKILL);
+    if (el->cmplog_fsrv_pid > 0) kill(el->cmplog_fsrv_pid, SIGKILL);
+
+  });
 
 }
 
@@ -1832,7 +1836,7 @@ static void handle_stop_sig(int sig) {
 
 static void handle_skipreq(int sig) {
 
-  skip_requested = 1;
+  LIST_FOREACH(&afl_states, afl_state_t, { el->skip_requested = 1; });
 
 }
 
@@ -1840,7 +1844,7 @@ static void handle_skipreq(int sig) {
    isn't a shell script - a common and painful mistake. We also check for
    a valid ELF header and for evidence of AFL instrumentation. */
 
-void check_binary(u8* fname) {
+void check_binary(afl_state_t *afl, u8* fname) {
 
   u8*         env_path = 0;
   struct stat st;
@@ -1853,8 +1857,8 @@ void check_binary(u8* fname) {
 
   if (strchr(fname, '/') || !(env_path = getenv("PATH"))) {
 
-    target_path = ck_strdup(fname);
-    if (stat(target_path, &st) || !S_ISREG(st.st_mode) ||
+    afl->fsrv.target_path = ck_strdup(fname);
+    if (stat(afl->fsrv.target_path, &st) || !S_ISREG(st.st_mode) ||
         !(st.st_mode & 0111) || (f_len = st.st_size) < 4)
       FATAL("Program '%s' not found or not executable", fname);
 
@@ -1877,40 +1881,40 @@ void check_binary(u8* fname) {
       env_path = delim;
 
       if (cur_elem[0])
-        target_path = alloc_printf("%s/%s", cur_elem, fname);
+        afl->fsrv.target_path = alloc_printf("%s/%s", cur_elem, fname);
       else
-        target_path = ck_strdup(fname);
+        afl->fsrv.target_path = ck_strdup(fname);
 
       ck_free(cur_elem);
 
-      if (!stat(target_path, &st) && S_ISREG(st.st_mode) &&
+      if (!stat(afl->fsrv.target_path, &st) && S_ISREG(st.st_mode) &&
           (st.st_mode & 0111) && (f_len = st.st_size) >= 4)
         break;
 
-      ck_free(target_path);
-      target_path = 0;
+      ck_free(afl->fsrv.target_path);
+      afl->fsrv.target_path = 0;
 
     }
 
-    if (!target_path) FATAL("Program '%s' not found or not executable", fname);
+    if (!afl->fsrv.target_path) FATAL("Program '%s' not found or not executable", fname);
 
   }
 
-  if (get_afl_env("AFL_SKIP_BIN_CHECK") || use_wine) return;
+  if (get_afl_env("AFL_SKIP_BIN_CHECK") || afl->use_wine) return;
 
   /* Check for blatant user errors. */
 
-  if ((!strncmp(target_path, "/tmp/", 5) && !strchr(target_path + 5, '/')) ||
-      (!strncmp(target_path, "/var/tmp/", 9) && !strchr(target_path + 9, '/')))
+  if ((!strncmp(afl->fsrv.target_path, "/tmp/", 5) && !strchr(afl->fsrv.target_path + 5, '/')) ||
+      (!strncmp(afl->fsrv.target_path, "/var/tmp/", 9) && !strchr(afl->fsrv.target_path + 9, '/')))
     FATAL("Please don't keep binaries in /tmp or /var/tmp");
 
-  fd = open(target_path, O_RDONLY);
+  fd = open(afl->fsrv.target_path, O_RDONLY);
 
-  if (fd < 0) PFATAL("Unable to open '%s'", target_path);
+  if (fd < 0) PFATAL("Unable to open '%s'", afl->fsrv.target_path);
 
   f_data = mmap(0, f_len, PROT_READ, MAP_PRIVATE, fd, 0);
 
-  if (f_data == MAP_FAILED) PFATAL("Unable to mmap file '%s'", target_path);
+  if (f_data == MAP_FAILED) PFATAL("Unable to mmap file '%s'", afl->fsrv.target_path);
 
   close(fd);
 
@@ -1932,14 +1936,14 @@ void check_binary(u8* fname) {
          "the wrapper\n"
          "    in a compiled language instead.\n");
 
-    FATAL("Program '%s' is a shell script", target_path);
+    FATAL("Program '%s' is a shell script", afl->fsrv.target_path);
 
   }
 
 #ifndef __APPLE__
 
   if (f_data[0] != 0x7f || memcmp(f_data + 1, "ELF", 3))
-    FATAL("Program '%s' is not an ELF binary", target_path);
+    FATAL("Program '%s' is not an ELF binary", afl->fsrv.target_path);
 
 #else
 
@@ -1947,12 +1951,12 @@ void check_binary(u8* fname) {
   if ((f_data[0] != 0xCF || f_data[1] != 0xFA || f_data[2] != 0xED) &&
       (f_data[0] != 0xCA || f_data[1] != 0xFE || f_data[2] != 0xBA))
     FATAL("Program '%s' is not a 64-bit or universal Mach-O binary",
-          target_path);
+          afl->fsrv.target_path);
 #endif
 
 #endif                                                       /* ^!__APPLE__ */
 
-  if (!qemu_mode && !unicorn_mode && !dumb_mode &&
+  if (!afl->qemu_mode && !afl->unicorn_mode && !afl->dumb_mode &&
       !memmem(f_data, f_len, SHM_ENV_VAR, strlen(SHM_ENV_VAR) + 1)) {
 
     SAYF("\n" cLRD "[-] " cRST
@@ -1979,7 +1983,7 @@ void check_binary(u8* fname) {
 
   }
 
-  if ((qemu_mode) &&
+  if ((afl->qemu_mode) &&
       memmem(f_data, f_len, SHM_ENV_VAR, strlen(SHM_ENV_VAR) + 1)) {
 
     SAYF("\n" cLRD "[-] " cRST
@@ -1995,7 +1999,7 @@ void check_binary(u8* fname) {
 
   if (memmem(f_data, f_len, "libasan.so", 10) ||
       memmem(f_data, f_len, "__msan_init", 11))
-    uses_asan = 1;
+    afl->fsrv.uses_asan = 1;
 
   /* Detect persistent & deferred init signatures in the binary. */
 
@@ -2003,7 +2007,7 @@ void check_binary(u8* fname) {
 
     OKF(cPIN "Persistent mode binary detected.");
     setenv(PERSIST_ENV_VAR, "1", 1);
-    persistent_mode = 1;
+    afl->persistent_mode = 1;
 
   } else if (getenv("AFL_PERSISTENT")) {
 
@@ -2015,7 +2019,7 @@ void check_binary(u8* fname) {
 
     OKF(cPIN "Deferred forkserver binary detected.");
     setenv(DEFER_ENV_VAR, "1", 1);
-    deferred_mode = 1;
+    afl->deferred_mode = 1;
 
   } else if (getenv("AFL_DEFER_FORKSRV")) {
 
@@ -2029,31 +2033,31 @@ void check_binary(u8* fname) {
 
 /* Trim and possibly create a banner for the run. */
 
-void fix_up_banner(u8* name) {
+void fix_up_banner(afl_state_t *afl, u8* name) {
 
-  if (!use_banner) {
+  if (!afl->use_banner) {
 
-    if (sync_id) {
+    if (afl->sync_id) {
 
-      use_banner = sync_id;
+      afl->use_banner = afl->sync_id;
 
     } else {
 
       u8* trim = strrchr(name, '/');
       if (!trim)
-        use_banner = name;
+        afl->use_banner = name;
       else
-        use_banner = trim + 1;
+        afl->use_banner = trim + 1;
 
     }
 
   }
 
-  if (strlen(use_banner) > 32) {
+  if (strlen(afl->use_banner) > 32) {
 
     u8* tmp = ck_alloc(36);
-    sprintf(tmp, "%.32s...", use_banner);
-    use_banner = tmp;
+    sprintf(tmp, "%.32s...", afl->use_banner);
+    afl->use_banner = tmp;
 
   }
 
@@ -2061,14 +2065,14 @@ void fix_up_banner(u8* name) {
 
 /* Check if we're on TTY. */
 
-void check_if_tty(void) {
+void check_if_tty(afl_state_t *afl) {
 
   struct winsize ws;
 
   if (get_afl_env("AFL_NO_UI")) {
 
     OKF("Disabling the UI because AFL_NO_UI is set.");
-    not_on_tty = 1;
+    afl->not_on_tty = 1;
     return;
 
   }
@@ -2079,7 +2083,7 @@ void check_if_tty(void) {
 
       OKF("Looks like we're not running on a tty, so I'll be a bit less "
           "verbose.");
-      not_on_tty = 1;
+      afl->not_on_tty = 1;
 
     }
 
@@ -2135,7 +2139,7 @@ void setup_signal_handlers(void) {
 
 /* Make a copy of the current command line. */
 
-void save_cmdline(u32 argc, char** argv) {
+void save_cmdline(afl_state_t *afl, u32 argc, char **argv) {
 
   u32 len = 1, i;
   u8* buf;
@@ -2143,7 +2147,7 @@ void save_cmdline(u32 argc, char** argv) {
   for (i = 0; i < argc; ++i)
     len += strlen(argv[i]) + 1;
 
-  buf = orig_cmdline = ck_alloc(len);
+  buf = afl->orig_cmdline = ck_alloc(len);
 
   for (i = 0; i < argc; ++i) {
 
diff --git a/src/afl-fuzz-mutators.c b/src/afl-fuzz-mutators.c
index b31e678b..b41d4d2b 100644
--- a/src/afl-fuzz-mutators.c
+++ b/src/afl-fuzz-mutators.c
@@ -25,25 +25,24 @@
 
 #include "afl-fuzz.h"
 
-void load_custom_mutator(const char*);
+void load_custom_mutator(afl_state_t*, const char*);
 #ifdef USE_PYTHON
-void load_custom_mutator_py(const char*);
+void load_custom_mutator_py(afl_state_t*, const char*);
 #endif
 
-void setup_custom_mutator(void) {
+void setup_custom_mutator(afl_state_t *afl) {
 
   /* Try mutator library first */
   u8* fn = getenv("AFL_CUSTOM_MUTATOR_LIBRARY");
 
   if (fn) {
-
-    if (limit_time_sig)
+    if (afl->limit_time_sig)
       FATAL(
           "MOpt and custom mutator are mutually exclusive. We accept pull "
           "requests that integrates MOpt with the optional mutators "
           "(custom/radamsa/redquenn/...).");
 
-    load_custom_mutator(fn);
+    load_custom_mutator(afl, fn);
 
     return;
 
@@ -55,16 +54,16 @@ void setup_custom_mutator(void) {
 
   if (module_name) {
 
-    if (limit_time_sig)
+    if (afl->limit_time_sig)
       FATAL(
           "MOpt and Python mutator are mutually exclusive. We accept pull "
           "requests that integrates MOpt with the optional mutators "
           "(custom/radamsa/redquenn/...).");
 
-    if (init_py_module(module_name))
+    if (init_py_module(afl, module_name))
       FATAL("Failed to initialize Python module");
 
-    load_custom_mutator_py(module_name);
+    load_custom_mutator_py(afl, module_name);
 
   }
 
@@ -75,82 +74,81 @@ void setup_custom_mutator(void) {
 
 }
 
-void destroy_custom_mutator(void) {
+void destroy_custom_mutator(afl_state_t *afl) {
 
-  if (mutator) {
+  if (afl->mutator) {
 
-    if (mutator->dh)
-      dlclose(mutator->dh);
+    if (afl->mutator->dh)
+      dlclose(afl->mutator->dh);
     else {
 
       /* Python mutator */
 #ifdef USE_PYTHON
-      finalize_py_module();
+      finalize_py_module(afl);
 #endif
 
     }
 
-    ck_free(mutator);
-
+    ck_free(afl->mutator);
   }
 
 }
 
-void load_custom_mutator(const char* fn) {
+void load_custom_mutator(afl_state_t *afl, const char *fn) {
 
   void* dh;
-  mutator = ck_alloc(sizeof(struct custom_mutator));
+  afl->mutator = ck_alloc(sizeof(struct custom_mutator));
 
-  mutator->name = fn;
+  afl->mutator->name = fn;
   ACTF("Loading custom mutator library from '%s'...", fn);
 
   dh = dlopen(fn, RTLD_NOW);
   if (!dh) FATAL("%s", dlerror());
-  mutator->dh = dh;
+  afl->mutator->dh = dh;
 
   /* Mutator */
   /* "afl_custom_init", optional for backward compatibility */
-  mutator->afl_custom_init = dlsym(dh, "afl_custom_init");
-  if (!mutator->afl_custom_init) WARNF("Symbol 'afl_custom_init' not found.");
+  afl->mutator->afl_custom_init = dlsym(dh, "afl_custom_init");
+  if (!afl->mutator->afl_custom_init) WARNF("Symbol 'afl_custom_init' not found.");
 
   /* "afl_custom_fuzz" or "afl_custom_mutator", required */
-  mutator->afl_custom_fuzz = dlsym(dh, "afl_custom_fuzz");
-  if (!mutator->afl_custom_fuzz) {
+  afl->mutator->afl_custom_fuzz = dlsym(dh, "afl_custom_fuzz");
+  if (!afl->mutator->afl_custom_fuzz) {
 
     /* Try "afl_custom_mutator" for backward compatibility */
     WARNF("Symbol 'afl_custom_fuzz' not found. Try 'afl_custom_mutator'.");
 
-    mutator->afl_custom_fuzz = dlsym(dh, "afl_custom_mutator");
-    if (!mutator->afl_custom_fuzz)
+    afl->mutator->afl_custom_fuzz = dlsym(dh, "afl_custom_mutator");
+    if (!afl->mutator->afl_custom_fuzz)
       FATAL("Symbol 'afl_custom_mutator' not found.");
 
   }
 
   /* "afl_custom_pre_save", optional */
-  mutator->afl_custom_pre_save = dlsym(dh, "afl_custom_pre_save");
-  if (!mutator->afl_custom_pre_save)
+  afl->mutator->afl_custom_pre_save = dlsym(dh, "afl_custom_pre_save");
+  if (!afl->mutator->afl_custom_pre_save)
     WARNF("Symbol 'afl_custom_pre_save' not found.");
 
   u8 notrim = 0;
   /* "afl_custom_init_trim", optional */
-  mutator->afl_custom_init_trim = dlsym(dh, "afl_custom_init_trim");
-  if (!mutator->afl_custom_init_trim)
+  afl->mutator->afl_custom_init_trim = dlsym(dh, "afl_custom_init_trim");
+  if (!afl->mutator->afl_custom_init_trim)
     WARNF("Symbol 'afl_custom_init_trim' not found.");
 
   /* "afl_custom_trim", optional */
-  mutator->afl_custom_trim = dlsym(dh, "afl_custom_trim");
-  if (!mutator->afl_custom_trim) WARNF("Symbol 'afl_custom_trim' not found.");
+  afl->mutator->afl_custom_trim = dlsym(dh, "afl_custom_trim");
+  if (!afl->mutator->afl_custom_trim) WARNF("Symbol 'afl_custom_trim' not found.");
 
   /* "afl_custom_post_trim", optional */
-  mutator->afl_custom_post_trim = dlsym(dh, "afl_custom_post_trim");
-  if (!mutator->afl_custom_post_trim)
+  afl->mutator->afl_custom_post_trim = dlsym(dh, "afl_custom_post_trim");
+  if (!afl->mutator->afl_custom_post_trim)
     WARNF("Symbol 'afl_custom_post_trim' not found.");
 
   if (notrim) {
 
-    mutator->afl_custom_init_trim = NULL;
-    mutator->afl_custom_trim = NULL;
-    mutator->afl_custom_post_trim = NULL;
+    afl->mutator->afl_custom_init_trim = NULL;
+    afl->mutator->afl_custom_trim = NULL;
+    afl->mutator->afl_custom_post_trim = NULL;
     WARNF(
         "Custom mutator does not implement all three trim APIs, standard "
         "trimming will be used.");
@@ -158,34 +156,35 @@ void load_custom_mutator(const char* fn) {
   }
 
   /* "afl_custom_havoc_mutation", optional */
-  mutator->afl_custom_havoc_mutation = dlsym(dh, "afl_custom_havoc_mutation");
-  if (!mutator->afl_custom_havoc_mutation)
+  afl->mutator->afl_custom_havoc_mutation = dlsym(dh, "afl_custom_havoc_mutation");
+  if (!afl->mutator->afl_custom_havoc_mutation)
     WARNF("Symbol 'afl_custom_havoc_mutation' not found.");
 
   /* "afl_custom_havoc_mutation", optional */
-  mutator->afl_custom_havoc_mutation_probability =
+  afl->mutator->afl_custom_havoc_mutation_probability = 
       dlsym(dh, "afl_custom_havoc_mutation_probability");
-  if (!mutator->afl_custom_havoc_mutation_probability)
+  if (!afl->mutator->afl_custom_havoc_mutation_probability)
     WARNF("Symbol 'afl_custom_havoc_mutation_probability' not found.");
 
   /* "afl_custom_queue_get", optional */
-  mutator->afl_custom_queue_get = dlsym(dh, "afl_custom_queue_get");
-  if (!mutator->afl_custom_queue_get)
+  afl->mutator->afl_custom_queue_get = dlsym(dh, "afl_custom_queue_get");
+  if (!afl->mutator->afl_custom_queue_get)
     WARNF("Symbol 'afl_custom_queue_get' not found.");
 
   /* "afl_custom_queue_new_entry", optional */
-  mutator->afl_custom_queue_new_entry = dlsym(dh, "afl_custom_queue_new_entry");
-  if (!mutator->afl_custom_queue_new_entry)
+  afl->mutator->afl_custom_queue_new_entry = dlsym(dh, "afl_custom_queue_new_entry");
+  if (!afl->mutator->afl_custom_queue_new_entry)
     WARNF("Symbol 'afl_custom_queue_new_entry' not found");
 
   OKF("Custom mutator '%s' installed successfully.", fn);
 
   /* Initialize the custom mutator */
-  if (mutator->afl_custom_init) mutator->afl_custom_init(UR(0xFFFFFFFF));
+  if (afl->mutator->afl_custom_init)
+    afl->mutator->afl_custom_init(afl, UR(afl, 0xFFFFFFFF));
 
 }
 
-u8 trim_case_custom(char** argv, struct queue_entry* q, u8* in_buf) {
+u8 trim_case_custom(afl_state_t *afl, struct queue_entry* q, u8* in_buf) {
 
   static u8 tmp[64];
   static u8 clean_trace[MAP_SIZE];
@@ -194,18 +193,18 @@ u8 trim_case_custom(char** argv, struct queue_entry* q, u8* in_buf) {
   u32 trim_exec = 0;
   u32 orig_len = q->len;
 
-  stage_name = tmp;
-  bytes_trim_in += q->len;
+  afl->stage_name = tmp;
+  afl->bytes_trim_in += q->len;
 
   /* Initialize trimming in the custom mutator */
-  stage_cur = 0;
-  stage_max = mutator->afl_custom_init_trim(in_buf, q->len);
+  afl->stage_cur = 0;
+  afl->stage_max = afl->mutator->afl_custom_init_trim(afl, in_buf, q->len);
 
-  if (not_on_tty && debug)
-    SAYF("[Custom Trimming] START: Max %d iterations, %u bytes", stage_max,
+  if (afl->not_on_tty && afl->debug)
+    SAYF("[Custom Trimming] START: Max %d iterations, %u bytes", afl->stage_max,
          q->len);
 
-  while (stage_cur < stage_max) {
+  while (afl->stage_cur < afl->stage_max) {
 
     sprintf(tmp, "ptrim %s", DI(trim_exec));
 
@@ -214,26 +213,26 @@ u8 trim_case_custom(char** argv, struct queue_entry* q, u8* in_buf) {
     u8*    retbuf = NULL;
     size_t retlen = 0;
 
-    mutator->afl_custom_trim(&retbuf, &retlen);
+    afl->mutator->afl_custom_trim(afl, &retbuf, &retlen);
 
     if (retlen > orig_len)
       FATAL(
           "Trimmed data returned by custom mutator is larger than original "
           "data");
 
-    write_to_testcase(retbuf, retlen);
+    write_to_testcase(afl, retbuf, retlen);
 
-    fault = run_target(argv, exec_tmout);
-    ++trim_execs;
+    fault = run_target(afl, afl->fsrv.exec_tmout);
+    ++afl->trim_execs;
 
-    if (stop_soon || fault == FAULT_ERROR) {
+    if (afl->stop_soon || fault == FAULT_ERROR) {
 
       free(retbuf);
       goto abort_trimming;
 
     }
 
-    cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
+    cksum = hash32(afl->fsrv.trace_bits, MAP_SIZE, HASH_CONST);
 
     if (cksum == q->exec_cksum) {
 
@@ -246,24 +245,24 @@ u8 trim_case_custom(char** argv, struct queue_entry* q, u8* in_buf) {
       if (!needs_write) {
 
         needs_write = 1;
-        memcpy(clean_trace, trace_bits, MAP_SIZE);
+        memcpy(clean_trace, afl->fsrv.trace_bits, MAP_SIZE);
 
       }
 
       /* Tell the custom mutator that the trimming was successful */
-      stage_cur = mutator->afl_custom_post_trim(1);
+      afl->stage_cur = afl->mutator->afl_custom_post_trim(afl, 1);
 
-      if (not_on_tty && debug)
+      if (afl->not_on_tty && afl->debug)
         SAYF("[Custom Trimming] SUCCESS: %d/%d iterations (now at %u bytes)",
-             stage_cur, stage_max, q->len);
+             afl->stage_cur, afl->stage_max, q->len);
 
     } else {
 
       /* Tell the custom mutator that the trimming was unsuccessful */
-      stage_cur = mutator->afl_custom_post_trim(0);
-      if (not_on_tty && debug)
-        SAYF("[Custom Trimming] FAILURE: %d/%d iterations", stage_cur,
-             stage_max);
+      afl->stage_cur = afl->mutator->afl_custom_post_trim(afl, 0);
+      if (afl->not_on_tty && afl->debug)
+        SAYF("[Custom Trimming] FAILURE: %d/%d iterations", afl->stage_cur,
+             afl->stage_max);
 
     }
 
@@ -271,11 +270,11 @@ u8 trim_case_custom(char** argv, struct queue_entry* q, u8* in_buf) {
 
     /* Since this can be slow, update the screen every now and then. */
 
-    if (!(trim_exec++ % stats_update_freq)) show_stats();
+    if (!(trim_exec++ % afl->stats_update_freq)) show_stats(afl);
 
   }
 
-  if (not_on_tty && debug)
+  if (afl->not_on_tty && afl->debug)
     SAYF("[Custom Trimming] DONE: %u bytes -> %u bytes", orig_len, q->len);
 
   /* If we have made changes to in_buf, we also need to update the on-disk
@@ -294,60 +293,65 @@ u8 trim_case_custom(char** argv, struct queue_entry* q, u8* in_buf) {
     ck_write(fd, in_buf, q->len, q->fname);
     close(fd);
 
-    memcpy(trace_bits, clean_trace, MAP_SIZE);
-    update_bitmap_score(q);
+    memcpy(afl->fsrv.trace_bits, clean_trace, MAP_SIZE);
+    update_bitmap_score(afl, q);
 
   }
 
 abort_trimming:
 
-  bytes_trim_out += q->len;
+  afl->bytes_trim_out += q->len;
   return fault;
 
 }
 
 #ifdef USE_PYTHON
-void load_custom_mutator_py(const char* module_name) {
+void load_custom_mutator_py(afl_state_t *afl, const char* module_name) {
+
+  PyObject **py_functions = afl->py_functions;
 
-  mutator = ck_alloc(sizeof(struct custom_mutator));
+  afl->mutator = ck_alloc(sizeof(struct custom_mutator));
 
-  mutator->name = module_name;
+  afl->mutator->name = module_name;
   ACTF("Loading Python mutator library from '%s'...", module_name);
 
-  if (py_functions[PY_FUNC_INIT]) mutator->afl_custom_init = init_py;
+  if (py_functions[PY_FUNC_INIT])
+    afl->mutator->afl_custom_init = init_py;
 
   /* "afl_custom_fuzz" should not be NULL, but the interface of Python mutator
      is quite different from the custom mutator. */
-  mutator->afl_custom_fuzz = fuzz_py;
+  afl->mutator->afl_custom_fuzz = fuzz_py;
 
   if (py_functions[PY_FUNC_PRE_SAVE])
-    mutator->afl_custom_pre_save = pre_save_py;
+    afl->mutator->afl_custom_pre_save = pre_save_py;
 
   if (py_functions[PY_FUNC_INIT_TRIM])
-    mutator->afl_custom_init_trim = init_trim_py;
+    afl->mutator->afl_custom_init_trim = init_trim_py;
 
   if (py_functions[PY_FUNC_POST_TRIM])
-    mutator->afl_custom_post_trim = post_trim_py;
-
-  if (py_functions[PY_FUNC_TRIM]) mutator->afl_custom_trim = trim_py;
+    afl->mutator->afl_custom_post_trim = post_trim_py;
 
+  if (py_functions[PY_FUNC_TRIM])
+    afl->mutator->afl_custom_trim = trim_py;
+  
   if (py_functions[PY_FUNC_HAVOC_MUTATION])
-    mutator->afl_custom_havoc_mutation = havoc_mutation_py;
-
+    afl->mutator->afl_custom_havoc_mutation = havoc_mutation_py;
+  
   if (py_functions[PY_FUNC_HAVOC_MUTATION_PROBABILITY])
-    mutator->afl_custom_havoc_mutation_probability =
-        havoc_mutation_probability_py;
+    afl->mutator->afl_custom_havoc_mutation_probability = 
+      havoc_mutation_probability_py;
 
   if (py_functions[PY_FUNC_QUEUE_GET])
-    mutator->afl_custom_queue_get = queue_get_py;
+    afl->mutator->afl_custom_queue_get = queue_get_py;
 
   if (py_functions[PY_FUNC_QUEUE_NEW_ENTRY])
-    mutator->afl_custom_queue_new_entry = queue_new_entry_py;
+    afl->mutator->afl_custom_queue_new_entry = queue_new_entry_py;
 
   OKF("Python mutator '%s' installed successfully.", module_name);
 
   /* Initialize the custom mutator */
-  if (mutator->afl_custom_init) mutator->afl_custom_init(UR(0xFFFFFFFF));
+  if (afl->mutator->afl_custom_init)
+    afl->mutator->afl_custom_init(afl, UR(afl, 0xFFFFFFFF));
 
 }
 
diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c
index c6dbb858..19cdddb9 100644
--- a/src/afl-fuzz-one.c
+++ b/src/afl-fuzz-one.c
@@ -27,21 +27,21 @@
 
 /* MOpt */
 
-int select_algorithm(void) {
+int select_algorithm(afl_state_t *afl) {
 
   int i_puppet, j_puppet;
 
-  double sele = ((double)(UR(10000)) * 0.0001);
+  double sele = ((double)(UR(afl, 10000)) * 0.0001);
   j_puppet = 0;
   for (i_puppet = 0; i_puppet < operator_num; ++i_puppet) {
 
     if (unlikely(i_puppet == 0)) {
 
-      if (sele < probability_now[swarm_now][i_puppet]) break;
+      if (sele < afl->probability_now[afl->swarm_now][i_puppet]) break;
 
     } else {
 
-      if (sele < probability_now[swarm_now][i_puppet]) {
+      if (sele < afl->probability_now[afl->swarm_now][i_puppet]) {
 
         j_puppet = 1;
         break;
@@ -52,7 +52,7 @@ int select_algorithm(void) {
 
   }
 
-  if (j_puppet == 1 && sele < probability_now[swarm_now][i_puppet - 1])
+  if (j_puppet == 1 && sele < afl->probability_now[afl->swarm_now][i_puppet - 1])
     FATAL("error select_algorithm");
   return i_puppet;
 
@@ -61,14 +61,14 @@ int select_algorithm(void) {
 /* Helper to choose random block len for block operations in fuzz_one().
    Doesn't return zero, provided that max_len is > 0. */
 
-static u32 choose_block_len(u32 limit) {
+static u32 choose_block_len(afl_state_t *afl, u32 limit) {
 
   u32 min_value, max_value;
-  u32 rlim = MIN(queue_cycle, 3);
+  u32 rlim = MIN(afl->queue_cycle, 3);
 
-  if (!run_over10m) rlim = 1;
+  if (!afl->run_over10m) rlim = 1;
 
-  switch (UR(rlim)) {
+  switch (UR(afl, rlim)) {
 
     case 0:
       min_value = 1;
@@ -82,7 +82,7 @@ static u32 choose_block_len(u32 limit) {
 
     default:
 
-      if (UR(10)) {
+      if (UR(afl, 10)) {
 
         min_value = HAVOC_BLK_MEDIUM;
         max_value = HAVOC_BLK_LARGE;
@@ -98,7 +98,7 @@ static u32 choose_block_len(u32 limit) {
 
   if (min_value >= limit) min_value = 1;
 
-  return min_value + UR(MIN(max_value, limit) - min_value + 1);
+  return min_value + UR(afl, MIN(max_value, limit) - min_value + 1);
 
 }
 
@@ -334,7 +334,7 @@ static void locate_diffs(u8* ptr1, u8* ptr2, u32 len, s32* first, s32* last) {
    function is a tad too long... returns 0 if fuzzed successfully, 1 if
    skipped or bailed out. */
 
-u8 fuzz_one_original(char** argv) {
+u8 fuzz_one_original(afl_state_t *afl) {
 
   s32 len, fd, temp_len, i, j;
   u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0;
@@ -351,43 +351,44 @@ u8 fuzz_one_original(char** argv) {
   /* In IGNORE_FINDS mode, skip any entries that weren't in the
      initial data set. */
 
-  if (queue_cur->depth > 1) return 1;
+  if (afl->queue_cur->depth > 1) return 1;
 
 #else
 
-  if (mutator && mutator->afl_custom_queue_get) {
+  if (afl->mutator && afl->mutator->afl_custom_queue_get) {
 
     /* The custom mutator will decide to skip this test case or not. */
 
-    if (!mutator->afl_custom_queue_get(queue_cur->fname)) return 1;
+    if (!afl->mutator->afl_custom_queue_get(afl, afl->queue_cur->fname))
+      return 1;
 
   }
 
-  if (pending_favored) {
+  if (afl->pending_favored) {
 
     /* If we have any favored, non-fuzzed new arrivals in the queue,
        possibly skip to them at the expense of already-fuzzed or non-favored
        cases. */
 
-    if (((queue_cur->was_fuzzed > 0 || queue_cur->fuzz_level > 0) ||
-         !queue_cur->favored) &&
-        UR(100) < SKIP_TO_NEW_PROB)
+    if (((afl->queue_cur->was_fuzzed > 0 || afl->queue_cur->fuzz_level > 0) ||
+         !afl->queue_cur->favored) &&
+        UR(afl, 100) < SKIP_TO_NEW_PROB)
       return 1;
 
-  } else if (!dumb_mode && !queue_cur->favored && queued_paths > 10) {
+  } else if (!afl->dumb_mode && !afl->queue_cur->favored && afl->queued_paths > 10) {
 
     /* Otherwise, still possibly skip non-favored cases, albeit less often.
        The odds of skipping stuff are higher for already-fuzzed inputs and
        lower for never-fuzzed entries. */
 
-    if (queue_cycle > 1 &&
-        (queue_cur->fuzz_level == 0 || queue_cur->was_fuzzed)) {
+    if (afl->queue_cycle > 1 &&
+        (afl->queue_cur->fuzz_level == 0 || afl->queue_cur->was_fuzzed)) {
 
-      if (UR(100) < SKIP_NFAV_NEW_PROB) return 1;
+      if (UR(afl, 100) < SKIP_NFAV_NEW_PROB) return 1;
 
     } else {
 
-      if (UR(100) < SKIP_NFAV_OLD_PROB) return 1;
+      if (UR(afl, 100) < SKIP_NFAV_OLD_PROB) return 1;
 
     }
 
@@ -395,26 +396,26 @@ u8 fuzz_one_original(char** argv) {
 
 #endif                                                     /* ^IGNORE_FINDS */
 
-  if (not_on_tty) {
+  if (afl->not_on_tty) {
 
     ACTF("Fuzzing test case #%u (%u total, %llu uniq crashes found)...",
-         current_entry, queued_paths, unique_crashes);
+         afl->current_entry, afl->queued_paths, afl->unique_crashes);
     fflush(stdout);
 
   }
 
   /* Map the test case into memory. */
 
-  fd = open(queue_cur->fname, O_RDONLY);
+  fd = open(afl->queue_cur->fname, O_RDONLY);
 
-  if (fd < 0) PFATAL("Unable to open '%s'", queue_cur->fname);
+  if (fd < 0) PFATAL("Unable to open '%s'", afl->queue_cur->fname);
 
-  len = queue_cur->len;
+  len = afl->queue_cur->len;
 
   orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
 
   if (orig_in == MAP_FAILED)
-    PFATAL("Unable to mmap '%s' with len %d", queue_cur->fname, len);
+    PFATAL("Unable to mmap '%s' with len %d", afl->queue_cur->fname, len);
 
   close(fd);
 
@@ -424,29 +425,29 @@ u8 fuzz_one_original(char** argv) {
 
   out_buf = ck_alloc_nozero(len);
 
-  subseq_tmouts = 0;
+  afl->subseq_tmouts = 0;
 
-  cur_depth = queue_cur->depth;
+  afl->cur_depth = afl->queue_cur->depth;
 
   /*******************************************
    * CALIBRATION (only if failed earlier on) *
    *******************************************/
 
-  if (queue_cur->cal_failed) {
+  if (afl->queue_cur->cal_failed) {
 
     u8 res = FAULT_TMOUT;
 
-    if (queue_cur->cal_failed < CAL_CHANCES) {
+    if (afl->queue_cur->cal_failed < CAL_CHANCES) {
 
-      res = calibrate_case(argv, queue_cur, in_buf, queue_cycle - 1, 0);
+      res = calibrate_case(afl, afl->queue_cur, in_buf, afl->queue_cycle - 1, 0);
 
       if (res == FAULT_ERROR) FATAL("Unable to execute target application");
 
     }
 
-    if (stop_soon || res != crash_mode) {
+    if (afl->stop_soon || res != afl->crash_mode) {
 
-      ++cur_skipped_paths;
+      ++afl->cur_skipped_paths;
       goto abandon_entry;
 
     }
@@ -457,24 +458,24 @@ u8 fuzz_one_original(char** argv) {
    * TRIMMING *
    ************/
 
-  if (!dumb_mode && !queue_cur->trim_done && !disable_trim) {
+  if (!afl->dumb_mode && !afl->queue_cur->trim_done && !afl->disable_trim) {
 
-    u8 res = trim_case(argv, queue_cur, in_buf);
+    u8 res = trim_case(afl, afl->queue_cur, in_buf);
 
     if (res == FAULT_ERROR) FATAL("Unable to execute target application");
 
-    if (stop_soon) {
+    if (afl->stop_soon) {
 
-      ++cur_skipped_paths;
+      ++afl->cur_skipped_paths;
       goto abandon_entry;
 
     }
 
     /* Don't retry trimming, even if it failed. */
 
-    queue_cur->trim_done = 1;
+    afl->queue_cur->trim_done = 1;
 
-    len = queue_cur->len;
+    len = afl->queue_cur->len;
 
   }
 
@@ -484,15 +485,15 @@ u8 fuzz_one_original(char** argv) {
    * PERFORMANCE SCORE *
    *********************/
 
-  orig_perf = perf_score = calculate_score(queue_cur);
+  orig_perf = perf_score = calculate_score(afl, afl->queue_cur);
 
   if (perf_score == 0) goto abandon_entry;
 
-  if (use_radamsa > 1) goto radamsa_stage;
+  if (afl->use_radamsa > 1) goto radamsa_stage;
 
-  if (cmplog_mode) {
+  if (afl->shm.cmplog_mode) {
 
-    if (input_to_state_stage(argv, in_buf, out_buf, len, queue_cur->exec_cksum))
+    if (input_to_state_stage(afl, in_buf, out_buf, len, afl->queue_cur->exec_cksum))
       goto abandon_entry;
 
   }
@@ -502,12 +503,12 @@ u8 fuzz_one_original(char** argv) {
      if it has gone through deterministic testing in earlier, resumed runs
      (passed_det). */
 
-  if (skip_deterministic ||
-      ((!queue_cur->passed_det) &&
-       perf_score < (queue_cur->depth * 30 <= havoc_max_mult * 100
-                         ? queue_cur->depth * 30
-                         : havoc_max_mult * 100)) ||
-      queue_cur->passed_det) {
+  if (afl->skip_deterministic ||
+      ((!afl->queue_cur->passed_det) &&
+       perf_score < (afl->queue_cur->depth * 30 <= afl->havoc_max_mult * 100
+                         ? afl->queue_cur->depth * 30
+                         : afl->havoc_max_mult * 100)) ||
+      afl->queue_cur->passed_det) {
 
     goto custom_mutator_stage;
 
@@ -516,7 +517,7 @@ u8 fuzz_one_original(char** argv) {
   /* Skip deterministic fuzzing if exec path checksum puts this out of scope
      for this master instance. */
 
-  if (master_max && (queue_cur->exec_cksum % master_max) != master_id - 1) {
+  if (afl->master_max && (afl->queue_cur->exec_cksum % afl->master_max) != afl->master_id - 1) {
 
     goto custom_mutator_stage;
 
@@ -539,25 +540,25 @@ u8 fuzz_one_original(char** argv) {
 
   /* Single walking bit. */
 
-  stage_short = "flip1";
-  stage_max = len << 3;
-  stage_name = "bitflip 1/1";
+  afl->stage_short = "flip1";
+  afl->stage_max = len << 3;
+  afl->stage_name = "bitflip 1/1";
 
-  stage_val_type = STAGE_VAL_NONE;
+  afl->stage_val_type = STAGE_VAL_NONE;
 
-  orig_hit_cnt = queued_paths + unique_crashes;
+  orig_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  prev_cksum = queue_cur->exec_cksum;
+  prev_cksum = afl->queue_cur->exec_cksum;
 
-  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+  for (afl->stage_cur = 0; afl->stage_cur < afl->stage_max; ++afl->stage_cur) {
 
-    stage_cur_byte = stage_cur >> 3;
+    afl->stage_cur_byte = afl->stage_cur >> 3;
 
-    FLIP_BIT(out_buf, stage_cur);
+    FLIP_BIT(out_buf, afl->stage_cur);
 
-    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+    if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
 
-    FLIP_BIT(out_buf, stage_cur);
+    FLIP_BIT(out_buf, afl->stage_cur);
 
     /* While flipping the least significant bit in every byte, pull of an extra
        trick to detect possible syntax tokens. In essence, the idea is that if
@@ -586,20 +587,20 @@ u8 fuzz_one_original(char** argv) {
 
       */
 
-    if (!dumb_mode && (stage_cur & 7) == 7) {
+    if (!afl->dumb_mode && (afl->stage_cur & 7) == 7) {
 
-      u32 cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
+      u32 cksum = hash32(afl->fsrv.trace_bits, MAP_SIZE, HASH_CONST);
 
-      if (stage_cur == stage_max - 1 && cksum == prev_cksum) {
+      if (afl->stage_cur == afl->stage_max - 1 && cksum == prev_cksum) {
 
         /* If at end of file and we are still collecting a string, grab the
            final character and force output. */
 
-        if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3];
+        if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[afl->stage_cur >> 3];
         ++a_len;
 
         if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA)
-          maybe_add_auto(a_collect, a_len);
+          maybe_add_auto(afl, a_collect, a_len);
 
       } else if (cksum != prev_cksum) {
 
@@ -607,7 +608,7 @@ u8 fuzz_one_original(char** argv) {
            worthwhile queued up, and collect that if the answer is yes. */
 
         if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA)
-          maybe_add_auto(a_collect, a_len);
+          maybe_add_auto(afl, a_collect, a_len);
 
         a_len = 0;
         prev_cksum = cksum;
@@ -617,9 +618,9 @@ u8 fuzz_one_original(char** argv) {
       /* Continue collecting string, but only if the bit flip actually made
          any difference - we don't want no-op tokens. */
 
-      if (cksum != queue_cur->exec_cksum) {
+      if (cksum != afl->queue_cur->exec_cksum) {
 
-        if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3];
+        if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[afl->stage_cur >> 3];
         ++a_len;
 
       }
@@ -628,68 +629,68 @@ u8 fuzz_one_original(char** argv) {
 
   }
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_FLIP1] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_FLIP1] += stage_max;
+  afl->stage_finds[STAGE_FLIP1] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_FLIP1] += afl->stage_max;
 
   /* Two walking bits. */
 
-  stage_name = "bitflip 2/1";
-  stage_short = "flip2";
-  stage_max = (len << 3) - 1;
+  afl->stage_name = "bitflip 2/1";
+  afl->stage_short = "flip2";
+  afl->stage_max = (len << 3) - 1;
 
   orig_hit_cnt = new_hit_cnt;
 
-  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+  for (afl->stage_cur = 0; afl->stage_cur < afl->stage_max; ++afl->stage_cur) {
 
-    stage_cur_byte = stage_cur >> 3;
+    afl->stage_cur_byte = afl->stage_cur >> 3;
 
-    FLIP_BIT(out_buf, stage_cur);
-    FLIP_BIT(out_buf, stage_cur + 1);
+    FLIP_BIT(out_buf, afl->stage_cur);
+    FLIP_BIT(out_buf, afl->stage_cur + 1);
 
-    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+    if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
 
-    FLIP_BIT(out_buf, stage_cur);
-    FLIP_BIT(out_buf, stage_cur + 1);
+    FLIP_BIT(out_buf, afl->stage_cur);
+    FLIP_BIT(out_buf, afl->stage_cur + 1);
 
   }
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_FLIP2] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_FLIP2] += stage_max;
+  afl->stage_finds[STAGE_FLIP2] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_FLIP2] += afl->stage_max;
 
   /* Four walking bits. */
 
-  stage_name = "bitflip 4/1";
-  stage_short = "flip4";
-  stage_max = (len << 3) - 3;
+  afl->stage_name = "bitflip 4/1";
+  afl->stage_short = "flip4";
+  afl->stage_max = (len << 3) - 3;
 
   orig_hit_cnt = new_hit_cnt;
 
-  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+  for (afl->stage_cur = 0; afl->stage_cur < afl->stage_max; ++afl->stage_cur) {
 
-    stage_cur_byte = stage_cur >> 3;
+    afl->stage_cur_byte = afl->stage_cur >> 3;
 
-    FLIP_BIT(out_buf, stage_cur);
-    FLIP_BIT(out_buf, stage_cur + 1);
-    FLIP_BIT(out_buf, stage_cur + 2);
-    FLIP_BIT(out_buf, stage_cur + 3);
+    FLIP_BIT(out_buf, afl->stage_cur);
+    FLIP_BIT(out_buf, afl->stage_cur + 1);
+    FLIP_BIT(out_buf, afl->stage_cur + 2);
+    FLIP_BIT(out_buf, afl->stage_cur + 3);
 
-    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+    if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
 
-    FLIP_BIT(out_buf, stage_cur);
-    FLIP_BIT(out_buf, stage_cur + 1);
-    FLIP_BIT(out_buf, stage_cur + 2);
-    FLIP_BIT(out_buf, stage_cur + 3);
+    FLIP_BIT(out_buf, afl->stage_cur);
+    FLIP_BIT(out_buf, afl->stage_cur + 1);
+    FLIP_BIT(out_buf, afl->stage_cur + 2);
+    FLIP_BIT(out_buf, afl->stage_cur + 3);
 
   }
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_FLIP4] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_FLIP4] += stage_max;
+  afl->stage_finds[STAGE_FLIP4] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_FLIP4] += afl->stage_max;
 
   /* Effector map setup. These macros calculate:
 
@@ -719,47 +720,47 @@ u8 fuzz_one_original(char** argv) {
 
   /* Walking byte. */
 
-  stage_name = "bitflip 8/8";
-  stage_short = "flip8";
-  stage_max = len;
+  afl->stage_name = "bitflip 8/8";
+  afl->stage_short = "flip8";
+  afl->stage_max = len;
 
   orig_hit_cnt = new_hit_cnt;
 
-  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+  for (afl->stage_cur = 0; afl->stage_cur < afl->stage_max; ++afl->stage_cur) {
 
-    stage_cur_byte = stage_cur;
+    afl->stage_cur_byte = afl->stage_cur;
 
-    out_buf[stage_cur] ^= 0xFF;
+    out_buf[afl->stage_cur] ^= 0xFF;
 
-    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+    if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
 
     /* We also use this stage to pull off a simple trick: we identify
        bytes that seem to have no effect on the current execution path
        even when fully flipped - and we skip them during more expensive
        deterministic stages, such as arithmetics or known ints. */
 
-    if (!eff_map[EFF_APOS(stage_cur)]) {
+    if (!eff_map[EFF_APOS(afl->stage_cur)]) {
 
       u32 cksum;
 
       /* If in dumb mode or if the file is very short, just flag everything
          without wasting time on checksums. */
 
-      if (!dumb_mode && len >= EFF_MIN_LEN)
-        cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
+      if (!afl->dumb_mode && len >= EFF_MIN_LEN)
+        cksum = hash32(afl->fsrv.trace_bits, MAP_SIZE, HASH_CONST);
       else
-        cksum = ~queue_cur->exec_cksum;
+        cksum = ~afl->queue_cur->exec_cksum;
 
-      if (cksum != queue_cur->exec_cksum) {
+      if (cksum != afl->queue_cur->exec_cksum) {
 
-        eff_map[EFF_APOS(stage_cur)] = 1;
+        eff_map[EFF_APOS(afl->stage_cur)] = 1;
         ++eff_cnt;
 
       }
 
     }
 
-    out_buf[stage_cur] ^= 0xFF;
+    out_buf[afl->stage_cur] ^= 0xFF;
 
   }
 
@@ -772,29 +773,29 @@ u8 fuzz_one_original(char** argv) {
 
     memset(eff_map, 1, EFF_ALEN(len));
 
-    blocks_eff_select += EFF_ALEN(len);
+    afl->blocks_eff_select += EFF_ALEN(len);
 
   } else {
 
-    blocks_eff_select += eff_cnt;
+    afl->blocks_eff_select += eff_cnt;
 
   }
 
-  blocks_eff_total += EFF_ALEN(len);
+  afl->blocks_eff_total += EFF_ALEN(len);
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_FLIP8] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_FLIP8] += stage_max;
+  afl->stage_finds[STAGE_FLIP8] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_FLIP8] += afl->stage_max;
 
   /* Two walking bytes. */
 
   if (len < 2) goto skip_bitflip;
 
-  stage_name = "bitflip 16/8";
-  stage_short = "flip16";
-  stage_cur = 0;
-  stage_max = len - 1;
+  afl->stage_name = "bitflip 16/8";
+  afl->stage_short = "flip16";
+  afl->stage_cur = 0;
+  afl->stage_max = len - 1;
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -804,35 +805,35 @@ u8 fuzz_one_original(char** argv) {
 
     if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
 
-      --stage_max;
+      --afl->stage_max;
       continue;
 
     }
 
-    stage_cur_byte = i;
+    afl->stage_cur_byte = i;
 
     *(u16*)(out_buf + i) ^= 0xFFFF;
 
-    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-    ++stage_cur;
+    if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+    ++afl->stage_cur;
 
     *(u16*)(out_buf + i) ^= 0xFFFF;
 
   }
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_FLIP16] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_FLIP16] += stage_max;
+  afl->stage_finds[STAGE_FLIP16] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_FLIP16] += afl->stage_max;
 
   if (len < 4) goto skip_bitflip;
 
   /* Four walking bytes. */
 
-  stage_name = "bitflip 32/8";
-  stage_short = "flip32";
-  stage_cur = 0;
-  stage_max = len - 3;
+  afl->stage_name = "bitflip 32/8";
+  afl->stage_short = "flip32";
+  afl->stage_cur = 0;
+  afl->stage_max = len - 3;
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -842,30 +843,30 @@ u8 fuzz_one_original(char** argv) {
     if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
         !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
 
-      --stage_max;
+      --afl->stage_max;
       continue;
 
     }
 
-    stage_cur_byte = i;
+    afl->stage_cur_byte = i;
 
     *(u32*)(out_buf + i) ^= 0xFFFFFFFF;
 
-    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-    ++stage_cur;
+    if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+    ++afl->stage_cur;
 
     *(u32*)(out_buf + i) ^= 0xFFFFFFFF;
 
   }
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_FLIP32] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_FLIP32] += stage_max;
+  afl->stage_finds[STAGE_FLIP32] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_FLIP32] += afl->stage_max;
 
 skip_bitflip:
 
-  if (no_arith) goto skip_arith;
+  if (afl->no_arith) goto skip_arith;
 
   /**********************
    * ARITHMETIC INC/DEC *
@@ -873,12 +874,12 @@ skip_bitflip:
 
   /* 8-bit arithmetics. */
 
-  stage_name = "arith 8/8";
-  stage_short = "arith8";
-  stage_cur = 0;
-  stage_max = 2 * len * ARITH_MAX;
+  afl->stage_name = "arith 8/8";
+  afl->stage_short = "arith8";
+  afl->stage_cur = 0;
+  afl->stage_max = 2 * len * ARITH_MAX;
 
-  stage_val_type = STAGE_VAL_LE;
+  afl->stage_val_type = STAGE_VAL_LE;
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -890,12 +891,12 @@ skip_bitflip:
 
     if (!eff_map[EFF_APOS(i)]) {
 
-      stage_max -= 2 * ARITH_MAX;
+      afl->stage_max -= 2 * ARITH_MAX;
       continue;
 
     }
 
-    stage_cur_byte = i;
+    afl->stage_cur_byte = i;
 
     for (j = 1; j <= ARITH_MAX; ++j) {
 
@@ -906,29 +907,29 @@ skip_bitflip:
 
       if (!could_be_bitflip(r)) {
 
-        stage_cur_val = j;
+        afl->stage_cur_val = j;
         out_buf[i] = orig + j;
 
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
+        if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+        ++afl->stage_cur;
 
       } else
 
-        --stage_max;
+        --afl->stage_max;
 
       r = orig ^ (orig - j);
 
       if (!could_be_bitflip(r)) {
 
-        stage_cur_val = -j;
+        afl->stage_cur_val = -j;
         out_buf[i] = orig - j;
 
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
+        if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+        ++afl->stage_cur;
 
       } else
 
-        --stage_max;
+        --afl->stage_max;
 
       out_buf[i] = orig;
 
@@ -936,19 +937,19 @@ skip_bitflip:
 
   }
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_ARITH8] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_ARITH8] += stage_max;
+  afl->stage_finds[STAGE_ARITH8] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_ARITH8] += afl->stage_max;
 
   /* 16-bit arithmetics, both endians. */
 
   if (len < 2) goto skip_arith;
 
-  stage_name = "arith 16/8";
-  stage_short = "arith16";
-  stage_cur = 0;
-  stage_max = 4 * (len - 1) * ARITH_MAX;
+  afl->stage_name = "arith 16/8";
+  afl->stage_short = "arith16";
+  afl->stage_cur = 0;
+  afl->stage_max = 4 * (len - 1) * ARITH_MAX;
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -960,12 +961,12 @@ skip_bitflip:
 
     if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
 
-      stage_max -= 4 * ARITH_MAX;
+      afl->stage_max -= 4 * ARITH_MAX;
       continue;
 
     }
 
-    stage_cur_byte = i;
+    afl->stage_cur_byte = i;
 
     for (j = 1; j <= ARITH_MAX; ++j) {
 
@@ -978,59 +979,59 @@ skip_bitflip:
          & 0xff overflow checks) and if it couldn't be a product of
          a bitflip. */
 
-      stage_val_type = STAGE_VAL_LE;
+      afl->stage_val_type = STAGE_VAL_LE;
 
       if ((orig & 0xff) + j > 0xff && !could_be_bitflip(r1)) {
 
-        stage_cur_val = j;
+        afl->stage_cur_val = j;
         *(u16*)(out_buf + i) = orig + j;
 
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
+        if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+        ++afl->stage_cur;
 
       } else
 
-        --stage_max;
+        --afl->stage_max;
 
       if ((orig & 0xff) < j && !could_be_bitflip(r2)) {
 
-        stage_cur_val = -j;
+        afl->stage_cur_val = -j;
         *(u16*)(out_buf + i) = orig - j;
 
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
+        if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+        ++afl->stage_cur;
 
       } else
 
-        --stage_max;
+        --afl->stage_max;
 
       /* Big endian comes next. Same deal. */
 
-      stage_val_type = STAGE_VAL_BE;
+      afl->stage_val_type = STAGE_VAL_BE;
 
       if ((orig >> 8) + j > 0xff && !could_be_bitflip(r3)) {
 
-        stage_cur_val = j;
+        afl->stage_cur_val = j;
         *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) + j);
 
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
+        if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+        ++afl->stage_cur;
 
       } else
 
-        --stage_max;
+        --afl->stage_max;
 
       if ((orig >> 8) < j && !could_be_bitflip(r4)) {
 
-        stage_cur_val = -j;
+        afl->stage_cur_val = -j;
         *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) - j);
 
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
+        if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+        ++afl->stage_cur;
 
       } else
 
-        --stage_max;
+        --afl->stage_max;
 
       *(u16*)(out_buf + i) = orig;
 
@@ -1038,19 +1039,19 @@ skip_bitflip:
 
   }
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_ARITH16] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_ARITH16] += stage_max;
+  afl->stage_finds[STAGE_ARITH16] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_ARITH16] += afl->stage_max;
 
   /* 32-bit arithmetics, both endians. */
 
   if (len < 4) goto skip_arith;
 
-  stage_name = "arith 32/8";
-  stage_short = "arith32";
-  stage_cur = 0;
-  stage_max = 4 * (len - 3) * ARITH_MAX;
+  afl->stage_name = "arith 32/8";
+  afl->stage_short = "arith32";
+  afl->stage_cur = 0;
+  afl->stage_max = 4 * (len - 3) * ARITH_MAX;
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -1063,12 +1064,12 @@ skip_bitflip:
     if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
         !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
 
-      stage_max -= 4 * ARITH_MAX;
+      afl->stage_max -= 4 * ARITH_MAX;
       continue;
 
     }
 
-    stage_cur_byte = i;
+    afl->stage_cur_byte = i;
 
     for (j = 1; j <= ARITH_MAX; ++j) {
 
@@ -1079,59 +1080,59 @@ skip_bitflip:
       /* Little endian first. Same deal as with 16-bit: we only want to
          try if the operation would have effect on more than two bytes. */
 
-      stage_val_type = STAGE_VAL_LE;
+      afl->stage_val_type = STAGE_VAL_LE;
 
       if ((orig & 0xffff) + j > 0xffff && !could_be_bitflip(r1)) {
 
-        stage_cur_val = j;
+        afl->stage_cur_val = j;
         *(u32*)(out_buf + i) = orig + j;
 
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
+        if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+        ++afl->stage_cur;
 
       } else
 
-        --stage_max;
+        --afl->stage_max;
 
       if ((orig & 0xffff) < j && !could_be_bitflip(r2)) {
 
-        stage_cur_val = -j;
+        afl->stage_cur_val = -j;
         *(u32*)(out_buf + i) = orig - j;
 
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
+        if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+        ++afl->stage_cur;
 
       } else
 
-        --stage_max;
+        --afl->stage_max;
 
       /* Big endian next. */
 
-      stage_val_type = STAGE_VAL_BE;
+      afl->stage_val_type = STAGE_VAL_BE;
 
       if ((SWAP32(orig) & 0xffff) + j > 0xffff && !could_be_bitflip(r3)) {
 
-        stage_cur_val = j;
+        afl->stage_cur_val = j;
         *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) + j);
 
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
+        if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+        ++afl->stage_cur;
 
       } else
 
-        --stage_max;
+        --afl->stage_max;
 
       if ((SWAP32(orig) & 0xffff) < j && !could_be_bitflip(r4)) {
 
-        stage_cur_val = -j;
+        afl->stage_cur_val = -j;
         *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) - j);
 
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
+        if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+        ++afl->stage_cur;
 
       } else
 
-        --stage_max;
+        --afl->stage_max;
 
       *(u32*)(out_buf + i) = orig;
 
@@ -1139,10 +1140,10 @@ skip_bitflip:
 
   }
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_ARITH32] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_ARITH32] += stage_max;
+  afl->stage_finds[STAGE_ARITH32] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_ARITH32] += afl->stage_max;
 
 skip_arith:
 
@@ -1150,12 +1151,12 @@ skip_arith:
    * INTERESTING VALUES *
    **********************/
 
-  stage_name = "interest 8/8";
-  stage_short = "int8";
-  stage_cur = 0;
-  stage_max = len * sizeof(interesting_8);
+  afl->stage_name = "interest 8/8";
+  afl->stage_short = "int8";
+  afl->stage_cur = 0;
+  afl->stage_max = len * sizeof(interesting_8);
 
-  stage_val_type = STAGE_VAL_LE;
+  afl->stage_val_type = STAGE_VAL_LE;
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -1169,12 +1170,12 @@ skip_arith:
 
     if (!eff_map[EFF_APOS(i)]) {
 
-      stage_max -= sizeof(interesting_8);
+      afl->stage_max -= sizeof(interesting_8);
       continue;
 
     }
 
-    stage_cur_byte = i;
+    afl->stage_cur_byte = i;
 
     for (j = 0; j < sizeof(interesting_8); ++j) {
 
@@ -1183,36 +1184,36 @@ skip_arith:
       if (could_be_bitflip(orig ^ (u8)interesting_8[j]) ||
           could_be_arith(orig, (u8)interesting_8[j], 1)) {
 
-        --stage_max;
+        --afl->stage_max;
         continue;
 
       }
 
-      stage_cur_val = interesting_8[j];
+      afl->stage_cur_val = interesting_8[j];
       out_buf[i] = interesting_8[j];
 
-      if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+      if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
 
       out_buf[i] = orig;
-      ++stage_cur;
+      ++afl->stage_cur;
 
     }
 
   }
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_INTEREST8] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_INTEREST8] += stage_max;
+  afl->stage_finds[STAGE_INTEREST8] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_INTEREST8] += afl->stage_max;
 
   /* Setting 16-bit integers, both endians. */
 
-  if (no_arith || len < 2) goto skip_interest;
+  if (afl->no_arith || len < 2) goto skip_interest;
 
-  stage_name = "interest 16/8";
-  stage_short = "int16";
-  stage_cur = 0;
-  stage_max = 2 * (len - 1) * (sizeof(interesting_16) >> 1);
+  afl->stage_name = "interest 16/8";
+  afl->stage_short = "int16";
+  afl->stage_cur = 0;
+  afl->stage_max = 2 * (len - 1) * (sizeof(interesting_16) >> 1);
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -1224,16 +1225,16 @@ skip_arith:
 
     if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
 
-      stage_max -= sizeof(interesting_16);
+      afl->stage_max -= sizeof(interesting_16);
       continue;
 
     }
 
-    stage_cur_byte = i;
+    afl->stage_cur_byte = i;
 
     for (j = 0; j < sizeof(interesting_16) / 2; ++j) {
 
-      stage_cur_val = interesting_16[j];
+      afl->stage_cur_val = interesting_16[j];
 
       /* Skip if this could be a product of a bitflip, arithmetics,
          or single-byte interesting value insertion. */
@@ -1242,31 +1243,31 @@ skip_arith:
           !could_be_arith(orig, (u16)interesting_16[j], 2) &&
           !could_be_interest(orig, (u16)interesting_16[j], 2, 0)) {
 
-        stage_val_type = STAGE_VAL_LE;
+        afl->stage_val_type = STAGE_VAL_LE;
 
         *(u16*)(out_buf + i) = interesting_16[j];
 
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
+        if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+        ++afl->stage_cur;
 
       } else
 
-        --stage_max;
+        --afl->stage_max;
 
       if ((u16)interesting_16[j] != SWAP16(interesting_16[j]) &&
           !could_be_bitflip(orig ^ SWAP16(interesting_16[j])) &&
           !could_be_arith(orig, SWAP16(interesting_16[j]), 2) &&
           !could_be_interest(orig, SWAP16(interesting_16[j]), 2, 1)) {
 
-        stage_val_type = STAGE_VAL_BE;
+        afl->stage_val_type = STAGE_VAL_BE;
 
         *(u16*)(out_buf + i) = SWAP16(interesting_16[j]);
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
+        if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+        ++afl->stage_cur;
 
       } else
 
-        --stage_max;
+        --afl->stage_max;
 
     }
 
@@ -1274,19 +1275,19 @@ skip_arith:
 
   }
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_INTEREST16] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_INTEREST16] += stage_max;
+  afl->stage_finds[STAGE_INTEREST16] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_INTEREST16] += afl->stage_max;
 
   if (len < 4) goto skip_interest;
 
   /* Setting 32-bit integers, both endians. */
 
-  stage_name = "interest 32/8";
-  stage_short = "int32";
-  stage_cur = 0;
-  stage_max = 2 * (len - 3) * (sizeof(interesting_32) >> 2);
+  afl->stage_name = "interest 32/8";
+  afl->stage_short = "int32";
+  afl->stage_cur = 0;
+  afl->stage_max = 2 * (len - 3) * (sizeof(interesting_32) >> 2);
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -1299,16 +1300,16 @@ skip_arith:
     if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
         !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
 
-      stage_max -= sizeof(interesting_32) >> 1;
+      afl->stage_max -= sizeof(interesting_32) >> 1;
       continue;
 
     }
 
-    stage_cur_byte = i;
+    afl->stage_cur_byte = i;
 
     for (j = 0; j < sizeof(interesting_32) / 4; ++j) {
 
-      stage_cur_val = interesting_32[j];
+      afl->stage_cur_val = interesting_32[j];
 
       /* Skip if this could be a product of a bitflip, arithmetics,
          or word interesting value insertion. */
@@ -1317,31 +1318,31 @@ skip_arith:
           !could_be_arith(orig, interesting_32[j], 4) &&
           !could_be_interest(orig, interesting_32[j], 4, 0)) {
 
-        stage_val_type = STAGE_VAL_LE;
+        afl->stage_val_type = STAGE_VAL_LE;
 
         *(u32*)(out_buf + i) = interesting_32[j];
 
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
+        if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+        ++afl->stage_cur;
 
       } else
 
-        --stage_max;
+        --afl->stage_max;
 
       if ((u32)interesting_32[j] != SWAP32(interesting_32[j]) &&
           !could_be_bitflip(orig ^ SWAP32(interesting_32[j])) &&
           !could_be_arith(orig, SWAP32(interesting_32[j]), 4) &&
           !could_be_interest(orig, SWAP32(interesting_32[j]), 4, 1)) {
 
-        stage_val_type = STAGE_VAL_BE;
+        afl->stage_val_type = STAGE_VAL_BE;
 
         *(u32*)(out_buf + i) = SWAP32(interesting_32[j]);
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
+        if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+        ++afl->stage_cur;
 
       } else
 
-        --stage_max;
+        --afl->stage_max;
 
     }
 
@@ -1349,10 +1350,10 @@ skip_arith:
 
   }
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_INTEREST32] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_INTEREST32] += stage_max;
+  afl->stage_finds[STAGE_INTEREST32] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_INTEREST32] += afl->stage_max;
 
 skip_interest:
 
@@ -1360,16 +1361,16 @@ skip_interest:
    * DICTIONARY STUFF *
    ********************/
 
-  if (!extras_cnt) goto skip_user_extras;
+  if (!afl->extras_cnt) goto skip_user_extras;
 
   /* Overwrite with user-supplied extras. */
 
-  stage_name = "user extras (over)";
-  stage_short = "ext_UO";
-  stage_cur = 0;
-  stage_max = extras_cnt * len;
+  afl->stage_name = "user extras (over)";
+  afl->stage_short = "ext_UO";
+  afl->stage_cur = 0;
+  afl->stage_max = afl->extras_cnt * len;
 
-  stage_val_type = STAGE_VAL_NONE;
+  afl->stage_val_type = STAGE_VAL_NONE;
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -1377,36 +1378,36 @@ skip_interest:
 
     u32 last_len = 0;
 
-    stage_cur_byte = i;
+    afl->stage_cur_byte = i;
 
     /* Extras are sorted by size, from smallest to largest. This means
        that we don't have to worry about restoring the buffer in
        between writes at a particular offset determined by the outer
        loop. */
 
-    for (j = 0; j < extras_cnt; ++j) {
+    for (j = 0; j < afl->extras_cnt; ++j) {
 
-      /* Skip extras probabilistically if extras_cnt > MAX_DET_EXTRAS. Also
+      /* Skip extras probabilistically if afl->extras_cnt > MAX_DET_EXTRAS. Also
          skip them if there's no room to insert the payload, if the token
          is redundant, or if its entire span has no bytes set in the effector
          map. */
 
-      if ((extras_cnt > MAX_DET_EXTRAS && UR(extras_cnt) >= MAX_DET_EXTRAS) ||
-          extras[j].len > len - i ||
-          !memcmp(extras[j].data, out_buf + i, extras[j].len) ||
-          !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, extras[j].len))) {
+      if ((afl->extras_cnt > MAX_DET_EXTRAS && UR(afl, afl->extras_cnt) >= MAX_DET_EXTRAS) ||
+          afl->extras[j].len > len - i ||
+          !memcmp(afl->extras[j].data, out_buf + i, afl->extras[j].len) ||
+          !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, afl->extras[j].len))) {
 
-        --stage_max;
+        --afl->stage_max;
         continue;
 
       }
 
-      last_len = extras[j].len;
-      memcpy(out_buf + i, extras[j].data, last_len);
+      last_len = afl->extras[j].len;
+      memcpy(out_buf + i, afl->extras[j].data, last_len);
 
-      if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+      if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
 
-      ++stage_cur;
+      ++afl->stage_cur;
 
     }
 
@@ -1415,17 +1416,17 @@ skip_interest:
 
   }
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_EXTRAS_UO] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_EXTRAS_UO] += stage_max;
+  afl->stage_finds[STAGE_EXTRAS_UO] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_EXTRAS_UO] += afl->stage_max;
 
   /* Insertion of user-supplied extras. */
 
-  stage_name = "user extras (insert)";
-  stage_short = "ext_UI";
-  stage_cur = 0;
-  stage_max = extras_cnt * (len + 1);
+  afl->stage_name = "user extras (insert)";
+  afl->stage_short = "ext_UI";
+  afl->stage_cur = 0;
+  afl->stage_max = afl->extras_cnt * (len + 1);
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -1433,31 +1434,31 @@ skip_interest:
 
   for (i = 0; i <= len; ++i) {
 
-    stage_cur_byte = i;
+    afl->stage_cur_byte = i;
 
-    for (j = 0; j < extras_cnt; ++j) {
+    for (j = 0; j < afl->extras_cnt; ++j) {
 
-      if (len + extras[j].len > MAX_FILE) {
+      if (len + afl->extras[j].len > MAX_FILE) {
 
-        --stage_max;
+        --afl->stage_max;
         continue;
 
       }
 
       /* Insert token */
-      memcpy(ex_tmp + i, extras[j].data, extras[j].len);
+      memcpy(ex_tmp + i, afl->extras[j].data, afl->extras[j].len);
 
       /* Copy tail */
-      memcpy(ex_tmp + i + extras[j].len, out_buf + i, len - i);
+      memcpy(ex_tmp + i + afl->extras[j].len, out_buf + i, len - i);
 
-      if (common_fuzz_stuff(argv, ex_tmp, len + extras[j].len)) {
+      if (common_fuzz_stuff(afl, ex_tmp, len + afl->extras[j].len)) {
 
         ck_free(ex_tmp);
         goto abandon_entry;
 
       }
 
-      ++stage_cur;
+      ++afl->stage_cur;
 
     }
 
@@ -1468,21 +1469,21 @@ skip_interest:
 
   ck_free(ex_tmp);
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_EXTRAS_UI] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_EXTRAS_UI] += stage_max;
+  afl->stage_finds[STAGE_EXTRAS_UI] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_EXTRAS_UI] += afl->stage_max;
 
 skip_user_extras:
 
-  if (!a_extras_cnt) goto skip_extras;
+  if (!afl->a_extras_cnt) goto skip_extras;
 
-  stage_name = "auto extras (over)";
-  stage_short = "ext_AO";
-  stage_cur = 0;
-  stage_max = MIN(a_extras_cnt, USE_AUTO_EXTRAS) * len;
+  afl->stage_name = "auto extras (over)";
+  afl->stage_short = "ext_AO";
+  afl->stage_cur = 0;
+  afl->stage_max = MIN(afl->a_extras_cnt, USE_AUTO_EXTRAS) * len;
 
-  stage_val_type = STAGE_VAL_NONE;
+  afl->stage_val_type = STAGE_VAL_NONE;
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -1490,28 +1491,28 @@ skip_user_extras:
 
     u32 last_len = 0;
 
-    stage_cur_byte = i;
+    afl->stage_cur_byte = i;
 
-    for (j = 0; j < MIN(a_extras_cnt, USE_AUTO_EXTRAS); ++j) {
+    for (j = 0; j < MIN(afl->a_extras_cnt, USE_AUTO_EXTRAS); ++j) {
 
       /* See the comment in the earlier code; extras are sorted by size. */
 
-      if (a_extras[j].len > len - i ||
-          !memcmp(a_extras[j].data, out_buf + i, a_extras[j].len) ||
+      if (afl->a_extras[j].len > len - i ||
+          !memcmp(afl->a_extras[j].data, out_buf + i, afl->a_extras[j].len) ||
           !memchr(eff_map + EFF_APOS(i), 1,
-                  EFF_SPAN_ALEN(i, a_extras[j].len))) {
+                  EFF_SPAN_ALEN(i, afl->a_extras[j].len))) {
 
-        --stage_max;
+        --afl->stage_max;
         continue;
 
       }
 
-      last_len = a_extras[j].len;
-      memcpy(out_buf + i, a_extras[j].data, last_len);
+      last_len = afl->a_extras[j].len;
+      memcpy(out_buf + i, afl->a_extras[j].data, last_len);
 
-      if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+      if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
 
-      ++stage_cur;
+      ++afl->stage_cur;
 
     }
 
@@ -1520,10 +1521,10 @@ skip_user_extras:
 
   }
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_EXTRAS_AO] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_EXTRAS_AO] += stage_max;
+  afl->stage_finds[STAGE_EXTRAS_AO] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_EXTRAS_AO] += afl->stage_max;
 
 skip_extras:
 
@@ -1531,28 +1532,28 @@ skip_extras:
      we're properly done with deterministic steps and can mark it as such
      in the .state/ directory. */
 
-  if (!queue_cur->passed_det) mark_as_det_done(queue_cur);
+  if (!afl->queue_cur->passed_det) mark_as_det_done(afl, afl->queue_cur);
 
 custom_mutator_stage:
   /*******************
    * CUSTOM MUTATORS *
    *******************/
 
-  if (!mutator) goto havoc_stage;
-  if (!mutator->afl_custom_fuzz) goto havoc_stage;
+  if (!afl->mutator) goto havoc_stage;
+  if (!afl->mutator->afl_custom_fuzz) goto havoc_stage;
 
-  stage_name = "custom mutator";
-  stage_short = "custom";
-  stage_max = HAVOC_CYCLES * perf_score / havoc_div / 100;
-  stage_val_type = STAGE_VAL_NONE;
+  afl->stage_name = "custom mutator";
+  afl->stage_short = "custom";
+  afl->stage_max = HAVOC_CYCLES * perf_score / afl->havoc_div / 100;
+  afl->stage_val_type = STAGE_VAL_NONE;
 
-  if (stage_max < HAVOC_MIN) stage_max = HAVOC_MIN;
+  if (afl->stage_max < HAVOC_MIN) afl->stage_max = HAVOC_MIN;
 
   const u32 max_seed_size = MAX_FILE;
 
-  orig_hit_cnt = queued_paths + unique_crashes;
-
-  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+  orig_hit_cnt = afl->queued_paths + afl->unique_crashes;
+  
+  for (afl->stage_cur = 0; afl->stage_cur < afl->stage_max; ++afl->stage_cur) {
 
     struct queue_entry* target;
     u32                 tid;
@@ -1562,11 +1563,11 @@ custom_mutator_stage:
     /* Pick a random other queue entry for passing to external API */
     do {
 
-      tid = UR(queued_paths);
+      tid = UR(afl, afl->queued_paths);
 
-    } while (tid == current_entry && queued_paths > 1);
+    } while (tid == afl->current_entry && afl->queued_paths > 1);
 
-    target = queue;
+    target = afl->queue;
 
     while (tid >= 100) {
 
@@ -1580,11 +1581,11 @@ custom_mutator_stage:
 
     /* Make sure that the target has a reasonable length. */
 
-    while (target && (target->len < 2 || target == queue_cur) &&
-           queued_paths > 1) {
+    while (target && (target->len < 2 || target == afl->queue_cur) &&
+           afl->queued_paths > 1) {
 
       target = target->next;
-      ++splicing_with;
+      ++afl->splicing_with;
 
     }
 
@@ -1596,15 +1597,16 @@ custom_mutator_stage:
     new_buf = ck_alloc_nozero(target->len);
     ck_read(fd, new_buf, target->len, target->fname);
     close(fd);
-
-    size_t mutated_size = mutator->afl_custom_fuzz(&out_buf, len, new_buf,
-                                                   target->len, max_seed_size);
+    
+    size_t mutated_size = afl->mutator->afl_custom_fuzz(afl, &out_buf, len,
+                                                   new_buf, target->len,
+                                                   max_seed_size);
 
     ck_free(new_buf);
 
     if (mutated_size > 0) {
 
-      if (common_fuzz_stuff(argv, out_buf, (u32)mutated_size)) {
+      if (common_fuzz_stuff(afl, out_buf, (u32)mutated_size)) {
 
         goto abandon_entry;
 
@@ -1613,16 +1615,16 @@ custom_mutator_stage:
       /* If we're finding new stuff, let's run for a bit longer, limits
          permitting. */
 
-      if (queued_paths != havoc_queued) {
+      if (afl->queued_paths != havoc_queued) {
 
-        if (perf_score <= havoc_max_mult * 100) {
+        if (perf_score <= afl->havoc_max_mult * 100) {
 
-          stage_max *= 2;
+          afl->stage_max *= 2;
           perf_score *= 2;
 
         }
 
-        havoc_queued = queued_paths;
+        havoc_queued = afl->queued_paths;
 
       }
 
@@ -1633,12 +1635,12 @@ custom_mutator_stage:
 
   }
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_CUSTOM_MUTATOR] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_CUSTOM_MUTATOR] += stage_max;
+  afl->stage_finds[STAGE_CUSTOM_MUTATOR] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_CUSTOM_MUTATOR] += afl->stage_max;
 
-  if (custom_only) {
+  if (afl->custom_only) {
 
     /* Skip other stages */
     ret_val = 0;
@@ -1652,45 +1654,43 @@ custom_mutator_stage:
 
 havoc_stage:
 
-  stage_cur_byte = -1;
+  afl->stage_cur_byte = -1;
 
   /* The havoc stage mutation code is also invoked when splicing files; if the
      splice_cycle variable is set, generate different descriptions and such. */
 
   if (!splice_cycle) {
 
-    stage_name = "havoc";
-    stage_short = "havoc";
-    stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * perf_score /
-                havoc_div / 100;
+    afl->stage_name = "havoc";
+    afl->stage_short = "havoc";
+    afl->stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * perf_score /
+                afl->havoc_div / 100;
 
   } else {
 
-    static u8 tmp[32];
-
     perf_score = orig_perf;
 
-    sprintf(tmp, "splice %u", splice_cycle);
-    stage_name = tmp;
-    stage_short = "splice";
-    stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100;
+    snprintf(afl->stage_name_buf64, 64, "splice %u", splice_cycle);
+    afl->stage_name = afl->stage_name_buf64;
+    afl->stage_short = "splice";
+    afl->stage_max = SPLICE_HAVOC * perf_score / afl->havoc_div / 100;
 
   }
 
-  if (stage_max < HAVOC_MIN) stage_max = HAVOC_MIN;
+  if (afl->stage_max < HAVOC_MIN) afl->stage_max = HAVOC_MIN;
 
   temp_len = len;
 
-  orig_hit_cnt = queued_paths + unique_crashes;
+  orig_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  havoc_queued = queued_paths;
+  havoc_queued = afl->queued_paths;
 
-  u8 stacked_custom = (mutator && mutator->afl_custom_havoc_mutation);
-  u8 stacked_custom_prob = 6;  // like one of the default mutations in havoc
+  u8 stacked_custom = (afl->mutator && afl->mutator->afl_custom_havoc_mutation);
+  u8 stacked_custom_prob = 6; // like one of the default mutations in havoc
 
-  if (stacked_custom && mutator->afl_custom_havoc_mutation_probability) {
+  if (stacked_custom && afl->mutator->afl_custom_havoc_mutation_probability) {
 
-    stacked_custom_prob = mutator->afl_custom_havoc_mutation_probability();
+    stacked_custom_prob = afl->mutator->afl_custom_havoc_mutation_probability(afl);
     if (stacked_custom_prob > 100)
       FATAL(
           "The probability returned by afl_custom_havoc_mutation_propability "
@@ -1701,35 +1701,35 @@ havoc_stage:
   /* We essentially just do several thousand runs (depending on perf_score)
      where we take the input file and make random stacked tweaks. */
 
-  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+  for (afl->stage_cur = 0; afl->stage_cur < afl->stage_max; ++afl->stage_cur) {
 
-    u32 use_stacking = 1 << (1 + UR(HAVOC_STACK_POW2));
+    u32 use_stacking = 1 << (1 + UR(afl, HAVOC_STACK_POW2));
 
-    stage_cur_val = use_stacking;
+    afl->stage_cur_val = use_stacking;
 
     for (i = 0; i < use_stacking; ++i) {
-
-      if (stacked_custom && UR(100) < stacked_custom_prob) {
-
-        temp_len =
-            mutator->afl_custom_havoc_mutation(&out_buf, temp_len, MAX_FILE);
-
+    
+      if (stacked_custom && UR(afl, 100) < stacked_custom_prob) {
+      
+        temp_len = afl->mutator->afl_custom_havoc_mutation(afl, &out_buf, temp_len,
+                                                      MAX_FILE);
+      
       }
 
-      switch (UR(15 + ((extras_cnt + a_extras_cnt) ? 2 : 0))) {
+      switch (UR(afl, 15 + ((afl->extras_cnt + afl->a_extras_cnt) ? 2 : 0))) {
 
         case 0:
 
           /* Flip a single bit somewhere. Spooky! */
 
-          FLIP_BIT(out_buf, UR(temp_len << 3));
+          FLIP_BIT(out_buf, UR(afl, temp_len << 3));
           break;
 
         case 1:
 
           /* Set byte to interesting value. */
 
-          out_buf[UR(temp_len)] = interesting_8[UR(sizeof(interesting_8))];
+          out_buf[UR(afl, temp_len)] = interesting_8[UR(afl, sizeof(interesting_8))];
           break;
 
         case 2:
@@ -1738,15 +1738,15 @@ havoc_stage:
 
           if (temp_len < 2) break;
 
-          if (UR(2)) {
+          if (UR(afl, 2)) {
 
-            *(u16*)(out_buf + UR(temp_len - 1)) =
-                interesting_16[UR(sizeof(interesting_16) >> 1)];
+            *(u16*)(out_buf + UR(afl, temp_len - 1)) =
+                interesting_16[UR(afl, sizeof(interesting_16) >> 1)];
 
           } else {
 
-            *(u16*)(out_buf + UR(temp_len - 1)) =
-                SWAP16(interesting_16[UR(sizeof(interesting_16) >> 1)]);
+            *(u16*)(out_buf + UR(afl, temp_len - 1)) =
+                SWAP16(interesting_16[UR(afl, sizeof(interesting_16) >> 1)]);
 
           }
 
@@ -1758,15 +1758,15 @@ havoc_stage:
 
           if (temp_len < 4) break;
 
-          if (UR(2)) {
+          if (UR(afl, 2)) {
 
-            *(u32*)(out_buf + UR(temp_len - 3)) =
-                interesting_32[UR(sizeof(interesting_32) >> 2)];
+            *(u32*)(out_buf + UR(afl, temp_len - 3)) =
+                interesting_32[UR(afl, sizeof(interesting_32) >> 2)];
 
           } else {
 
-            *(u32*)(out_buf + UR(temp_len - 3)) =
-                SWAP32(interesting_32[UR(sizeof(interesting_32) >> 2)]);
+            *(u32*)(out_buf + UR(afl, temp_len - 3)) =
+                SWAP32(interesting_32[UR(afl, sizeof(interesting_32) >> 2)]);
 
           }
 
@@ -1776,14 +1776,14 @@ havoc_stage:
 
           /* Randomly subtract from byte. */
 
-          out_buf[UR(temp_len)] -= 1 + UR(ARITH_MAX);
+          out_buf[UR(afl, temp_len)] -= 1 + UR(afl, ARITH_MAX);
           break;
 
         case 5:
 
           /* Randomly add to byte. */
 
-          out_buf[UR(temp_len)] += 1 + UR(ARITH_MAX);
+          out_buf[UR(afl, temp_len)] += 1 + UR(afl, ARITH_MAX);
           break;
 
         case 6:
@@ -1792,16 +1792,16 @@ havoc_stage:
 
           if (temp_len < 2) break;
 
-          if (UR(2)) {
+          if (UR(afl, 2)) {
 
-            u32 pos = UR(temp_len - 1);
+            u32 pos = UR(afl, temp_len - 1);
 
-            *(u16*)(out_buf + pos) -= 1 + UR(ARITH_MAX);
+            *(u16*)(out_buf + pos) -= 1 + UR(afl, ARITH_MAX);
 
           } else {
 
-            u32 pos = UR(temp_len - 1);
-            u16 num = 1 + UR(ARITH_MAX);
+            u32 pos = UR(afl, temp_len - 1);
+            u16 num = 1 + UR(afl, ARITH_MAX);
 
             *(u16*)(out_buf + pos) =
                 SWAP16(SWAP16(*(u16*)(out_buf + pos)) - num);
@@ -1816,16 +1816,16 @@ havoc_stage:
 
           if (temp_len < 2) break;
 
-          if (UR(2)) {
+          if (UR(afl, 2)) {
 
-            u32 pos = UR(temp_len - 1);
+            u32 pos = UR(afl, temp_len - 1);
 
-            *(u16*)(out_buf + pos) += 1 + UR(ARITH_MAX);
+            *(u16*)(out_buf + pos) += 1 + UR(afl, ARITH_MAX);
 
           } else {
 
-            u32 pos = UR(temp_len - 1);
-            u16 num = 1 + UR(ARITH_MAX);
+            u32 pos = UR(afl, temp_len - 1);
+            u16 num = 1 + UR(afl, ARITH_MAX);
 
             *(u16*)(out_buf + pos) =
                 SWAP16(SWAP16(*(u16*)(out_buf + pos)) + num);
@@ -1840,16 +1840,16 @@ havoc_stage:
 
           if (temp_len < 4) break;
 
-          if (UR(2)) {
+          if (UR(afl, 2)) {
 
-            u32 pos = UR(temp_len - 3);
+            u32 pos = UR(afl, temp_len - 3);
 
-            *(u32*)(out_buf + pos) -= 1 + UR(ARITH_MAX);
+            *(u32*)(out_buf + pos) -= 1 + UR(afl, ARITH_MAX);
 
           } else {
 
-            u32 pos = UR(temp_len - 3);
-            u32 num = 1 + UR(ARITH_MAX);
+            u32 pos = UR(afl, temp_len - 3);
+            u32 num = 1 + UR(afl, ARITH_MAX);
 
             *(u32*)(out_buf + pos) =
                 SWAP32(SWAP32(*(u32*)(out_buf + pos)) - num);
@@ -1864,16 +1864,16 @@ havoc_stage:
 
           if (temp_len < 4) break;
 
-          if (UR(2)) {
+          if (UR(afl, 2)) {
 
-            u32 pos = UR(temp_len - 3);
+            u32 pos = UR(afl, temp_len - 3);
 
-            *(u32*)(out_buf + pos) += 1 + UR(ARITH_MAX);
+            *(u32*)(out_buf + pos) += 1 + UR(afl, ARITH_MAX);
 
           } else {
 
-            u32 pos = UR(temp_len - 3);
-            u32 num = 1 + UR(ARITH_MAX);
+            u32 pos = UR(afl, temp_len - 3);
+            u32 num = 1 + UR(afl, ARITH_MAX);
 
             *(u32*)(out_buf + pos) =
                 SWAP32(SWAP32(*(u32*)(out_buf + pos)) + num);
@@ -1888,7 +1888,7 @@ havoc_stage:
              why not. We use XOR with 1-255 to eliminate the
              possibility of a no-op. */
 
-          out_buf[UR(temp_len)] ^= 1 + UR(255);
+          out_buf[UR(afl, temp_len)] ^= 1 + UR(afl, 255);
           break;
 
         case 11 ... 12: {
@@ -1903,9 +1903,9 @@ havoc_stage:
 
           /* Don't delete too much. */
 
-          del_len = choose_block_len(temp_len - 1);
+          del_len = choose_block_len(afl, temp_len - 1);
 
-          del_from = UR(temp_len - del_len + 1);
+          del_from = UR(afl, temp_len - del_len + 1);
 
           memmove(out_buf + del_from, out_buf + del_from + del_len,
                   temp_len - del_from - del_len);
@@ -1922,23 +1922,23 @@ havoc_stage:
 
             /* Clone bytes (75%) or insert a block of constant bytes (25%). */
 
-            u8  actually_clone = UR(4);
+            u8  actually_clone = UR(afl, 4);
             u32 clone_from, clone_to, clone_len;
             u8* new_buf;
 
             if (actually_clone) {
 
-              clone_len = choose_block_len(temp_len);
-              clone_from = UR(temp_len - clone_len + 1);
+              clone_len = choose_block_len(afl, temp_len);
+              clone_from = UR(afl, temp_len - clone_len + 1);
 
             } else {
 
-              clone_len = choose_block_len(HAVOC_BLK_XL);
+              clone_len = choose_block_len(afl, HAVOC_BLK_XL);
               clone_from = 0;
 
             }
 
-            clone_to = UR(temp_len);
+            clone_to = UR(afl, temp_len);
 
             new_buf = ck_alloc_nozero(temp_len + clone_len);
 
@@ -1952,7 +1952,7 @@ havoc_stage:
               memcpy(new_buf + clone_to, out_buf + clone_from, clone_len);
             else
               memset(new_buf + clone_to,
-                     UR(2) ? UR(256) : out_buf[UR(temp_len)], clone_len);
+                     UR(afl, 2) ? UR(afl, 256) : out_buf[UR(afl, temp_len)], clone_len);
 
             /* Tail */
             memcpy(new_buf + clone_to + clone_len, out_buf + clone_to,
@@ -1975,19 +1975,19 @@ havoc_stage:
 
           if (temp_len < 2) break;
 
-          copy_len = choose_block_len(temp_len - 1);
+          copy_len = choose_block_len(afl, temp_len - 1);
 
-          copy_from = UR(temp_len - copy_len + 1);
-          copy_to = UR(temp_len - copy_len + 1);
+          copy_from = UR(afl, temp_len - copy_len + 1);
+          copy_to = UR(afl, temp_len - copy_len + 1);
 
-          if (UR(4)) {
+          if (UR(afl, 4)) {
 
             if (copy_from != copy_to)
               memmove(out_buf + copy_to, out_buf + copy_from, copy_len);
 
           } else
 
-            memset(out_buf + copy_to, UR(2) ? UR(256) : out_buf[UR(temp_len)],
+            memset(out_buf + copy_to, UR(afl, 2) ? UR(afl, 256) : out_buf[UR(afl, temp_len)],
                    copy_len);
 
           break;
@@ -2001,32 +2001,32 @@ havoc_stage:
 
           /* Overwrite bytes with an extra. */
 
-          if (!extras_cnt || (a_extras_cnt && UR(2))) {
+          if (!afl->extras_cnt || (afl->a_extras_cnt && UR(afl, 2))) {
 
             /* No user-specified extras or odds in our favor. Let's use an
                auto-detected one. */
 
-            u32 use_extra = UR(a_extras_cnt);
-            u32 extra_len = a_extras[use_extra].len;
+            u32 use_extra = UR(afl, afl->a_extras_cnt);
+            u32 extra_len = afl->a_extras[use_extra].len;
             u32 insert_at;
 
             if (extra_len > temp_len) break;
 
-            insert_at = UR(temp_len - extra_len + 1);
-            memcpy(out_buf + insert_at, a_extras[use_extra].data, extra_len);
+            insert_at = UR(afl, temp_len - extra_len + 1);
+            memcpy(out_buf + insert_at, afl->a_extras[use_extra].data, extra_len);
 
           } else {
 
             /* No auto extras or odds in our favor. Use the dictionary. */
 
-            u32 use_extra = UR(extras_cnt);
-            u32 extra_len = extras[use_extra].len;
+            u32 use_extra = UR(afl, afl->extras_cnt);
+            u32 extra_len =afl->extras[use_extra].len;
             u32 insert_at;
 
             if (extra_len > temp_len) break;
 
-            insert_at = UR(temp_len - extra_len + 1);
-            memcpy(out_buf + insert_at, extras[use_extra].data, extra_len);
+            insert_at = UR(afl, temp_len - extra_len + 1);
+            memcpy(out_buf + insert_at,afl->extras[use_extra].data, extra_len);
 
           }
 
@@ -2036,16 +2036,16 @@ havoc_stage:
 
         case 16: {
 
-          u32 use_extra, extra_len, insert_at = UR(temp_len + 1);
+          u32 use_extra, extra_len, insert_at = UR(afl, temp_len + 1);
           u8* new_buf;
 
           /* Insert an extra. Do the same dice-rolling stuff as for the
              previous case. */
 
-          if (!extras_cnt || (a_extras_cnt && UR(2))) {
+          if (!afl->extras_cnt || (afl->a_extras_cnt && UR(afl, 2))) {
 
-            use_extra = UR(a_extras_cnt);
-            extra_len = a_extras[use_extra].len;
+            use_extra = UR(afl, afl->a_extras_cnt);
+            extra_len = afl->a_extras[use_extra].len;
 
             if (temp_len + extra_len >= MAX_FILE) break;
 
@@ -2055,12 +2055,12 @@ havoc_stage:
             memcpy(new_buf, out_buf, insert_at);
 
             /* Inserted part */
-            memcpy(new_buf + insert_at, a_extras[use_extra].data, extra_len);
+            memcpy(new_buf + insert_at, afl->a_extras[use_extra].data, extra_len);
 
           } else {
 
-            use_extra = UR(extras_cnt);
-            extra_len = extras[use_extra].len;
+            use_extra = UR(afl, afl->extras_cnt);
+            extra_len =afl->extras[use_extra].len;
 
             if (temp_len + extra_len >= MAX_FILE) break;
 
@@ -2070,7 +2070,7 @@ havoc_stage:
             memcpy(new_buf, out_buf, insert_at);
 
             /* Inserted part */
-            memcpy(new_buf + insert_at, extras[use_extra].data, extra_len);
+            memcpy(new_buf + insert_at,afl->extras[use_extra].data, extra_len);
 
           }
 
@@ -2090,7 +2090,7 @@ havoc_stage:
 
     }
 
-    if (common_fuzz_stuff(argv, out_buf, temp_len)) goto abandon_entry;
+    if (common_fuzz_stuff(afl, out_buf, temp_len)) goto abandon_entry;
 
     /* out_buf might have been mangled a bit, so let's restore it to its
        original size and shape. */
@@ -2102,32 +2102,32 @@ havoc_stage:
     /* If we're finding new stuff, let's run for a bit longer, limits
        permitting. */
 
-    if (queued_paths != havoc_queued) {
+    if (afl->queued_paths != havoc_queued) {
 
-      if (perf_score <= havoc_max_mult * 100) {
+      if (perf_score <= afl->havoc_max_mult * 100) {
 
-        stage_max *= 2;
+        afl->stage_max *= 2;
         perf_score *= 2;
 
       }
 
-      havoc_queued = queued_paths;
+      havoc_queued = afl->queued_paths;
 
     }
 
   }
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
   if (!splice_cycle) {
 
-    stage_finds[STAGE_HAVOC] += new_hit_cnt - orig_hit_cnt;
-    stage_cycles[STAGE_HAVOC] += stage_max;
+    afl->stage_finds[STAGE_HAVOC] += new_hit_cnt - orig_hit_cnt;
+    afl->stage_cycles[STAGE_HAVOC] += afl->stage_max;
 
   } else {
 
-    stage_finds[STAGE_SPLICE] += new_hit_cnt - orig_hit_cnt;
-    stage_cycles[STAGE_SPLICE] += stage_max;
+    afl->stage_finds[STAGE_SPLICE] += new_hit_cnt - orig_hit_cnt;
+    afl->stage_cycles[STAGE_SPLICE] += afl->stage_max;
 
   }
 
@@ -2144,8 +2144,8 @@ havoc_stage:
 
 retry_splicing:
 
-  if (use_splicing && splice_cycle++ < SPLICE_CYCLES && queued_paths > 1 &&
-      queue_cur->len > 1) {
+  if (afl->use_splicing && splice_cycle++ < SPLICE_CYCLES && afl->queued_paths > 1 &&
+      afl->queue_cur->len > 1) {
 
     struct queue_entry* target;
     u32                 tid, split_at;
@@ -2159,7 +2159,7 @@ retry_splicing:
 
       ck_free(in_buf);
       in_buf = orig_in;
-      len = queue_cur->len;
+      len = afl->queue_cur->len;
 
     }
 
@@ -2167,12 +2167,12 @@ retry_splicing:
 
     do {
 
-      tid = UR(queued_paths);
+      tid = UR(afl, afl->queued_paths);
 
-    } while (tid == current_entry);
+    } while (tid == afl->current_entry);
 
-    splicing_with = tid;
-    target = queue;
+    afl->splicing_with = tid;
+    target = afl->queue;
 
     while (tid >= 100) {
 
@@ -2186,10 +2186,10 @@ retry_splicing:
 
     /* Make sure that the target has a reasonable length. */
 
-    while (target && (target->len < 2 || target == queue_cur)) {
+    while (target && (target->len < 2 || target == afl->queue_cur)) {
 
       target = target->next;
-      ++splicing_with;
+      ++afl->splicing_with;
 
     }
 
@@ -2222,7 +2222,7 @@ retry_splicing:
 
     /* Split somewhere between the first and last differing byte. */
 
-    split_at = f_diff + UR(l_diff - f_diff);
+    split_at = f_diff + UR(afl, l_diff - f_diff);
 
     /* Do the thing. */
 
@@ -2248,28 +2248,28 @@ retry_splicing:
 
 radamsa_stage:
 
-  if (!use_radamsa || !radamsa_mutate_ptr) goto abandon_entry;
+  if (!afl->use_radamsa || !afl->radamsa_mutate_ptr) goto abandon_entry;
 
-  stage_name = "radamsa";
-  stage_short = "radamsa";
-  stage_max = (HAVOC_CYCLES * perf_score / havoc_div / 100) << use_radamsa;
+  afl->stage_name = "radamsa";
+  afl->stage_short = "radamsa";
+  afl->stage_max = (HAVOC_CYCLES * perf_score / afl->havoc_div / 100) << afl->use_radamsa;
 
-  if (stage_max < HAVOC_MIN) stage_max = HAVOC_MIN;
+  if (afl->stage_max < HAVOC_MIN) afl->stage_max = HAVOC_MIN;
 
-  orig_hit_cnt = queued_paths + unique_crashes;
+  orig_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
   /* Read the additional testcase into a new buffer. */
   u8* save_buf = ck_alloc_nozero(len);
   memcpy(save_buf, out_buf, len);
 
-  u32 max_len = len + choose_block_len(HAVOC_BLK_XL);
+  u32 max_len = len + choose_block_len(afl, HAVOC_BLK_XL);
   u8* new_buf = ck_alloc_nozero(max_len);
   u8* tmp_buf;
 
-  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+  for (afl->stage_cur = 0; afl->stage_cur < afl->stage_max; ++afl->stage_cur) {
 
     u32 new_len =
-        radamsa_mutate_ptr(save_buf, len, new_buf, max_len, get_rand_seed());
+        afl->radamsa_mutate_ptr(save_buf, len, new_buf, max_len, get_rand_seed(afl));
 
     if (new_len) {
 
@@ -2283,7 +2283,7 @@ radamsa_stage:
 
     }
 
-    if (common_fuzz_stuff(argv, tmp_buf, temp_len)) {
+    if (common_fuzz_stuff(afl, tmp_buf, temp_len)) {
 
       ck_free(save_buf);
       ck_free(new_buf);
@@ -2296,10 +2296,10 @@ radamsa_stage:
   ck_free(save_buf);
   ck_free(new_buf);
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_RADAMSA] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_RADAMSA] += stage_max;
+  afl->stage_finds[STAGE_RADAMSA] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_RADAMSA] += afl->stage_max;
 
   ret_val = 0;
   goto abandon_entry;
@@ -2307,23 +2307,23 @@ radamsa_stage:
 /* we are through with this queue entry - for this iteration */
 abandon_entry:
 
-  splicing_with = -1;
+  afl->splicing_with = -1;
 
-  /* Update pending_not_fuzzed count if we made it through the calibration
+  /* Update afl->pending_not_fuzzed count if we made it through the calibration
      cycle and have not seen this entry before. */
 
-  if (!stop_soon && !queue_cur->cal_failed &&
-      (queue_cur->was_fuzzed == 0 || queue_cur->fuzz_level == 0)) {
+  if (!afl->stop_soon && !afl->queue_cur->cal_failed &&
+      (afl->queue_cur->was_fuzzed == 0 || afl->queue_cur->fuzz_level == 0)) {
 
-    --pending_not_fuzzed;
-    queue_cur->was_fuzzed = 1;
-    if (queue_cur->favored) --pending_favored;
+    --afl->pending_not_fuzzed;
+    afl->queue_cur->was_fuzzed = 1;
+    if (afl->queue_cur->favored) --afl->pending_favored;
 
   }
 
-  ++queue_cur->fuzz_level;
+  ++afl->queue_cur->fuzz_level;
 
-  munmap(orig_in, queue_cur->len);
+  munmap(orig_in, afl->queue_cur->len);
 
   if (in_buf != orig_in) ck_free(in_buf);
   ck_free(out_buf);
@@ -2335,55 +2335,14 @@ abandon_entry:
 
 }
 
-struct MOpt_globals_t {
-
-  u64*      finds;
-  u64*      finds_v2;
-  u64*      cycles;
-  u64*      cycles_v2;
-  u64*      cycles_v3;
-  u32       is_pilot_mode;
-  u64*      pTime;
-  const u64 period;
-  char*     havoc_stagename;
-  char*     splice_stageformat;
-  char*     havoc_stagenameshort;
-  char*     splice_stagenameshort;
-
-} MOpt_globals_pilot = {stage_finds_puppet[0],
-
-                        stage_finds_puppet_v2[0],
-                        stage_cycles_puppet[0],
-                        stage_cycles_puppet_v2[0],
-                        stage_cycles_puppet_v3[0],
-                        1,
-                        &tmp_pilot_time,
-                        period_pilot,
-                        "MOpt-havoc",
-                        "MOpt-splice %u",
-                        "MOpt_havoc",
-                        "MOpt_splice"},
-  MOpt_globals_core = {core_operator_finds_puppet,
-                       core_operator_finds_puppet_v2,
-                       core_operator_cycles_puppet,
-                       core_operator_cycles_puppet_v2,
-                       core_operator_cycles_puppet_v3,
-                       0,
-                       &tmp_core_time,
-                       period_core,
-                       "MOpt-core-havoc",
-                       "MOpt-core-splice %u",
-                       "MOpt_core_havoc",
-                       "MOpt_core_splice"};
-
 /* MOpt mode */
-u8 common_fuzzing(char** argv, struct MOpt_globals_t MOpt_globals) {
+u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) {
 
   if (!MOpt_globals.is_pilot_mode) {
 
     if (swarm_num == 1) {
 
-      key_module = 2;
+      afl->key_module = 2;
       return 0;
 
     }
@@ -2405,33 +2364,33 @@ u8 common_fuzzing(char** argv, struct MOpt_globals_t MOpt_globals) {
   /* In IGNORE_FINDS mode, skip any entries that weren't in the
      initial data set. */
 
-  if (queue_cur->depth > 1) return 1;
+  if (afl->queue_cur->depth > 1) return 1;
 
 #else
 
-  if (pending_favored) {
+  if (afl->pending_favored) {
 
     /* If we have any favored, non-fuzzed new arrivals in the queue,
        possibly skip to them at the expense of already-fuzzed or non-favored
        cases. */
 
-    if ((queue_cur->was_fuzzed || !queue_cur->favored) &&
-        UR(100) < SKIP_TO_NEW_PROB)
+    if ((afl->queue_cur->was_fuzzed || !afl->queue_cur->favored) &&
+        UR(afl, 100) < SKIP_TO_NEW_PROB)
       return 1;
 
-  } else if (!dumb_mode && !queue_cur->favored && queued_paths > 10) {
+  } else if (!afl->dumb_mode && !afl->queue_cur->favored && afl->queued_paths > 10) {
 
     /* Otherwise, still possibly skip non-favored cases, albeit less often.
        The odds of skipping stuff are higher for already-fuzzed inputs and
        lower for never-fuzzed entries. */
 
-    if (queue_cycle > 1 && !queue_cur->was_fuzzed) {
+    if (afl->queue_cycle > 1 && !afl->queue_cur->was_fuzzed) {
 
-      if (UR(100) < SKIP_NFAV_NEW_PROB) return 1;
+      if (UR(afl, 100) < SKIP_NFAV_NEW_PROB) return 1;
 
     } else {
 
-      if (UR(100) < SKIP_NFAV_OLD_PROB) return 1;
+      if (UR(afl, 100) < SKIP_NFAV_OLD_PROB) return 1;
 
     }
 
@@ -2439,25 +2398,25 @@ u8 common_fuzzing(char** argv, struct MOpt_globals_t MOpt_globals) {
 
 #endif                                                     /* ^IGNORE_FINDS */
 
-  if (not_on_tty) {
+  if (afl->not_on_tty) {
 
     ACTF("Fuzzing test case #%u (%u total, %llu uniq crashes found)...",
-         current_entry, queued_paths, unique_crashes);
+         afl->current_entry, afl->queued_paths, afl->unique_crashes);
     fflush(stdout);
 
   }
 
   /* Map the test case into memory. */
 
-  fd = open(queue_cur->fname, O_RDONLY);
+  fd = open(afl->queue_cur->fname, O_RDONLY);
 
-  if (fd < 0) PFATAL("Unable to open '%s'", queue_cur->fname);
+  if (fd < 0) PFATAL("Unable to open '%s'", afl->queue_cur->fname);
 
-  len = queue_cur->len;
+  len = afl->queue_cur->len;
 
   orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
 
-  if (orig_in == MAP_FAILED) PFATAL("Unable to mmap '%s'", queue_cur->fname);
+  if (orig_in == MAP_FAILED) PFATAL("Unable to mmap '%s'", afl->queue_cur->fname);
 
   close(fd);
 
@@ -2467,29 +2426,29 @@ u8 common_fuzzing(char** argv, struct MOpt_globals_t MOpt_globals) {
 
   out_buf = ck_alloc_nozero(len);
 
-  subseq_tmouts = 0;
+  afl->subseq_tmouts = 0;
 
-  cur_depth = queue_cur->depth;
+  afl->cur_depth = afl->queue_cur->depth;
 
   /*******************************************
    * CALIBRATION (only if failed earlier on) *
    *******************************************/
 
-  if (queue_cur->cal_failed) {
+  if (afl->queue_cur->cal_failed) {
 
     u8 res = FAULT_TMOUT;
 
-    if (queue_cur->cal_failed < CAL_CHANCES) {
+    if (afl->queue_cur->cal_failed < CAL_CHANCES) {
 
-      res = calibrate_case(argv, queue_cur, in_buf, queue_cycle - 1, 0);
+      res = calibrate_case(afl, afl->queue_cur, in_buf, afl->queue_cycle - 1, 0);
 
       if (res == FAULT_ERROR) FATAL("Unable to execute target application");
 
     }
 
-    if (stop_soon || res != crash_mode) {
+    if (afl->stop_soon || res != afl->crash_mode) {
 
-      ++cur_skipped_paths;
+      ++afl->cur_skipped_paths;
       goto abandon_entry;
 
     }
@@ -2500,24 +2459,24 @@ u8 common_fuzzing(char** argv, struct MOpt_globals_t MOpt_globals) {
    * TRIMMING *
    ************/
 
-  if (!dumb_mode && !queue_cur->trim_done) {
+  if (!afl->dumb_mode && !afl->queue_cur->trim_done) {
 
-    u8 res = trim_case(argv, queue_cur, in_buf);
+    u8 res = trim_case(afl, afl->queue_cur, in_buf);
 
     if (res == FAULT_ERROR) FATAL("Unable to execute target application");
 
-    if (stop_soon) {
+    if (afl->stop_soon) {
 
-      ++cur_skipped_paths;
+      ++afl->cur_skipped_paths;
       goto abandon_entry;
 
     }
 
     /* Don't retry trimming, even if it failed. */
 
-    queue_cur->trim_done = 1;
+    afl->queue_cur->trim_done = 1;
 
-    len = queue_cur->len;
+    len = afl->queue_cur->len;
 
   }
 
@@ -2527,28 +2486,28 @@ u8 common_fuzzing(char** argv, struct MOpt_globals_t MOpt_globals) {
    * PERFORMANCE SCORE *
    *********************/
 
-  orig_perf = perf_score = calculate_score(queue_cur);
+  orig_perf = perf_score = calculate_score(afl, afl->queue_cur);
 
   /* Skip right away if -d is given, if we have done deterministic fuzzing on
      this entry ourselves (was_fuzzed), or if it has gone through deterministic
      testing in earlier, resumed runs (passed_det). */
 
-  if (skip_deterministic || queue_cur->was_fuzzed || queue_cur->passed_det)
+  if (afl->skip_deterministic || afl->queue_cur->was_fuzzed || afl->queue_cur->passed_det)
     goto havoc_stage;
 
   /* Skip deterministic fuzzing if exec path checksum puts this out of scope
      for this master instance. */
 
-  if (master_max && (queue_cur->exec_cksum % master_max) != master_id - 1)
+  if (afl->master_max && (afl->queue_cur->exec_cksum % afl->master_max) != afl->master_id - 1)
     goto havoc_stage;
 
   cur_ms_lv = get_cur_time();
-  if (!(key_puppet == 0 && ((cur_ms_lv - last_path_time < limit_time_puppet) ||
-                            (last_crash_time != 0 &&
-                             cur_ms_lv - last_crash_time < limit_time_puppet) ||
-                            last_path_time == 0))) {
+  if (!(afl->key_puppet == 0 && ((cur_ms_lv - afl->last_path_time < afl->limit_time_puppet) ||
+                            (afl->last_crash_time != 0 &&
+                             cur_ms_lv - afl->last_crash_time < afl->limit_time_puppet) ||
+                            afl->last_path_time == 0))) {
 
-    key_puppet = 1;
+    afl->key_puppet = 1;
     goto pacemaker_fuzzing;
 
   }
@@ -2570,25 +2529,25 @@ u8 common_fuzzing(char** argv, struct MOpt_globals_t MOpt_globals) {
 
   /* Single walking bit. */
 
-  stage_short = "flip1";
-  stage_max = len << 3;
-  stage_name = "bitflip 1/1";
+  afl->stage_short = "flip1";
+  afl->stage_max = len << 3;
+  afl->stage_name = "bitflip 1/1";
 
-  stage_val_type = STAGE_VAL_NONE;
+  afl->stage_val_type = STAGE_VAL_NONE;
 
-  orig_hit_cnt = queued_paths + unique_crashes;
+  orig_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  prev_cksum = queue_cur->exec_cksum;
+  prev_cksum = afl->queue_cur->exec_cksum;
 
-  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+  for (afl->stage_cur = 0; afl->stage_cur < afl->stage_max; ++afl->stage_cur) {
 
-    stage_cur_byte = stage_cur >> 3;
+    afl->stage_cur_byte = afl->stage_cur >> 3;
 
-    FLIP_BIT(out_buf, stage_cur);
+    FLIP_BIT(out_buf, afl->stage_cur);
 
-    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+    if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
 
-    FLIP_BIT(out_buf, stage_cur);
+    FLIP_BIT(out_buf, afl->stage_cur);
 
     /* While flipping the least significant bit in every byte, pull of an extra
        trick to detect possible syntax tokens. In essence, the idea is that if
@@ -2617,20 +2576,20 @@ u8 common_fuzzing(char** argv, struct MOpt_globals_t MOpt_globals) {
 
       */
 
-    if (!dumb_mode && (stage_cur & 7) == 7) {
+    if (!afl->dumb_mode && (afl->stage_cur & 7) == 7) {
 
-      u32 cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
+      u32 cksum = hash32(afl->fsrv.trace_bits, MAP_SIZE, HASH_CONST);
 
-      if (stage_cur == stage_max - 1 && cksum == prev_cksum) {
+      if (afl->stage_cur == afl->stage_max - 1 && cksum == prev_cksum) {
 
         /* If at end of file and we are still collecting a string, grab the
            final character and force output. */
 
-        if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3];
+        if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[afl->stage_cur >> 3];
         ++a_len;
 
         if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA)
-          maybe_add_auto(a_collect, a_len);
+          maybe_add_auto(afl, a_collect, a_len);
 
       } else if (cksum != prev_cksum) {
 
@@ -2638,7 +2597,7 @@ u8 common_fuzzing(char** argv, struct MOpt_globals_t MOpt_globals) {
            worthwhile queued up, and collect that if the answer is yes. */
 
         if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA)
-          maybe_add_auto(a_collect, a_len);
+          maybe_add_auto(afl, a_collect, a_len);
 
         a_len = 0;
         prev_cksum = cksum;
@@ -2648,79 +2607,79 @@ u8 common_fuzzing(char** argv, struct MOpt_globals_t MOpt_globals) {
       /* Continue collecting string, but only if the bit flip actually made
          any difference - we don't want no-op tokens. */
 
-      if (cksum != queue_cur->exec_cksum) {
+      if (cksum != afl->queue_cur->exec_cksum) {
 
-        if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3];
+        if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[afl->stage_cur >> 3];
         ++a_len;
 
       }
 
-    }                                            /* if (stage_cur & 7) == 7 */
+    }                                            /* if (afl->stage_cur & 7) == 7 */
 
-  }                                                        /* for stage_cur */
+  }                                                        /* for afl->stage_cur */
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_FLIP1] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_FLIP1] += stage_max;
+  afl->stage_finds[STAGE_FLIP1] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_FLIP1] += afl->stage_max;
 
   /* Two walking bits. */
 
-  stage_name = "bitflip 2/1";
-  stage_short = "flip2";
-  stage_max = (len << 3) - 1;
+  afl->stage_name = "bitflip 2/1";
+  afl->stage_short = "flip2";
+  afl->stage_max = (len << 3) - 1;
 
   orig_hit_cnt = new_hit_cnt;
 
-  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+  for (afl->stage_cur = 0; afl->stage_cur < afl->stage_max; ++afl->stage_cur) {
 
-    stage_cur_byte = stage_cur >> 3;
+    afl->stage_cur_byte = afl->stage_cur >> 3;
 
-    FLIP_BIT(out_buf, stage_cur);
-    FLIP_BIT(out_buf, stage_cur + 1);
+    FLIP_BIT(out_buf, afl->stage_cur);
+    FLIP_BIT(out_buf, afl->stage_cur + 1);
 
-    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+    if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
 
-    FLIP_BIT(out_buf, stage_cur);
-    FLIP_BIT(out_buf, stage_cur + 1);
+    FLIP_BIT(out_buf, afl->stage_cur);
+    FLIP_BIT(out_buf, afl->stage_cur + 1);
 
-  }                                                        /* for stage_cur */
+  }                                                        /* for afl->stage_cur */
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_FLIP2] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_FLIP2] += stage_max;
+  afl->stage_finds[STAGE_FLIP2] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_FLIP2] += afl->stage_max;
 
   /* Four walking bits. */
 
-  stage_name = "bitflip 4/1";
-  stage_short = "flip4";
-  stage_max = (len << 3) - 3;
+  afl->stage_name = "bitflip 4/1";
+  afl->stage_short = "flip4";
+  afl->stage_max = (len << 3) - 3;
 
   orig_hit_cnt = new_hit_cnt;
 
-  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+  for (afl->stage_cur = 0; afl->stage_cur < afl->stage_max; ++afl->stage_cur) {
 
-    stage_cur_byte = stage_cur >> 3;
+    afl->stage_cur_byte = afl->stage_cur >> 3;
 
-    FLIP_BIT(out_buf, stage_cur);
-    FLIP_BIT(out_buf, stage_cur + 1);
-    FLIP_BIT(out_buf, stage_cur + 2);
-    FLIP_BIT(out_buf, stage_cur + 3);
+    FLIP_BIT(out_buf, afl->stage_cur);
+    FLIP_BIT(out_buf, afl->stage_cur + 1);
+    FLIP_BIT(out_buf, afl->stage_cur + 2);
+    FLIP_BIT(out_buf, afl->stage_cur + 3);
 
-    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+    if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
 
-    FLIP_BIT(out_buf, stage_cur);
-    FLIP_BIT(out_buf, stage_cur + 1);
-    FLIP_BIT(out_buf, stage_cur + 2);
-    FLIP_BIT(out_buf, stage_cur + 3);
+    FLIP_BIT(out_buf, afl->stage_cur);
+    FLIP_BIT(out_buf, afl->stage_cur + 1);
+    FLIP_BIT(out_buf, afl->stage_cur + 2);
+    FLIP_BIT(out_buf, afl->stage_cur + 3);
 
-  }                                                        /* for stage_cur */
+  }                                                        /* for afl->stage_cur */
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_FLIP4] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_FLIP4] += stage_max;
+  afl->stage_finds[STAGE_FLIP4] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_FLIP4] += afl->stage_max;
 
   /* Effector map setup. These macros calculate:
 
@@ -2750,49 +2709,49 @@ u8 common_fuzzing(char** argv, struct MOpt_globals_t MOpt_globals) {
 
   /* Walking byte. */
 
-  stage_name = "bitflip 8/8";
-  stage_short = "flip8";
-  stage_max = len;
+  afl->stage_name = "bitflip 8/8";
+  afl->stage_short = "flip8";
+  afl->stage_max = len;
 
   orig_hit_cnt = new_hit_cnt;
 
-  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+  for (afl->stage_cur = 0; afl->stage_cur < afl->stage_max; ++afl->stage_cur) {
 
-    stage_cur_byte = stage_cur;
+    afl->stage_cur_byte = afl->stage_cur;
 
-    out_buf[stage_cur] ^= 0xFF;
+    out_buf[afl->stage_cur] ^= 0xFF;
 
-    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+    if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
 
     /* We also use this stage to pull off a simple trick: we identify
        bytes that seem to have no effect on the current execution path
        even when fully flipped - and we skip them during more expensive
        deterministic stages, such as arithmetics or known ints. */
 
-    if (!eff_map[EFF_APOS(stage_cur)]) {
+    if (!eff_map[EFF_APOS(afl->stage_cur)]) {
 
       u32 cksum;
 
       /* If in dumb mode or if the file is very short, just flag everything
          without wasting time on checksums. */
 
-      if (!dumb_mode && len >= EFF_MIN_LEN)
-        cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
+      if (!afl->dumb_mode && len >= EFF_MIN_LEN)
+        cksum = hash32(afl->fsrv.trace_bits, MAP_SIZE, HASH_CONST);
       else
-        cksum = ~queue_cur->exec_cksum;
+        cksum = ~afl->queue_cur->exec_cksum;
 
-      if (cksum != queue_cur->exec_cksum) {
+      if (cksum != afl->queue_cur->exec_cksum) {
 
-        eff_map[EFF_APOS(stage_cur)] = 1;
+        eff_map[EFF_APOS(afl->stage_cur)] = 1;
         ++eff_cnt;
 
       }
 
     }
 
-    out_buf[stage_cur] ^= 0xFF;
+    out_buf[afl->stage_cur] ^= 0xFF;
 
-  }                                                        /* for stage_cur */
+  }                                                        /* for afl->stage_cur */
 
   /* If the effector map is more than EFF_MAX_PERC dense, just flag the
      whole thing as worth fuzzing, since we wouldn't be saving much time
@@ -2803,29 +2762,29 @@ u8 common_fuzzing(char** argv, struct MOpt_globals_t MOpt_globals) {
 
     memset(eff_map, 1, EFF_ALEN(len));
 
-    blocks_eff_select += EFF_ALEN(len);
+    afl->blocks_eff_select += EFF_ALEN(len);
 
   } else {
 
-    blocks_eff_select += eff_cnt;
+    afl->blocks_eff_select += eff_cnt;
 
   }
 
-  blocks_eff_total += EFF_ALEN(len);
+  afl->blocks_eff_total += EFF_ALEN(len);
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_FLIP8] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_FLIP8] += stage_max;
+  afl->stage_finds[STAGE_FLIP8] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_FLIP8] += afl->stage_max;
 
   /* Two walking bytes. */
 
   if (len < 2) goto skip_bitflip;
 
-  stage_name = "bitflip 16/8";
-  stage_short = "flip16";
-  stage_cur = 0;
-  stage_max = len - 1;
+  afl->stage_name = "bitflip 16/8";
+  afl->stage_short = "flip16";
+  afl->stage_cur = 0;
+  afl->stage_max = len - 1;
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -2835,35 +2794,35 @@ u8 common_fuzzing(char** argv, struct MOpt_globals_t MOpt_globals) {
 
     if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
 
-      --stage_max;
+      --afl->stage_max;
       continue;
 
     }
 
-    stage_cur_byte = i;
+    afl->stage_cur_byte = i;
 
     *(u16*)(out_buf + i) ^= 0xFFFF;
 
-    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-    ++stage_cur;
+    if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+    ++afl->stage_cur;
 
     *(u16*)(out_buf + i) ^= 0xFFFF;
 
   }                                                   /* for i = 0; i < len */
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_FLIP16] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_FLIP16] += stage_max;
+  afl->stage_finds[STAGE_FLIP16] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_FLIP16] += afl->stage_max;
 
   if (len < 4) goto skip_bitflip;
 
   /* Four walking bytes. */
 
-  stage_name = "bitflip 32/8";
-  stage_short = "flip32";
-  stage_cur = 0;
-  stage_max = len - 3;
+  afl->stage_name = "bitflip 32/8";
+  afl->stage_short = "flip32";
+  afl->stage_cur = 0;
+  afl->stage_max = len - 3;
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -2873,30 +2832,30 @@ u8 common_fuzzing(char** argv, struct MOpt_globals_t MOpt_globals) {
     if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
         !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
 
-      --stage_max;
+      --afl->stage_max;
       continue;
 
     }
 
-    stage_cur_byte = i;
+    afl->stage_cur_byte = i;
 
     *(u32*)(out_buf + i) ^= 0xFFFFFFFF;
 
-    if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-    ++stage_cur;
+    if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+    ++afl->stage_cur;
 
     *(u32*)(out_buf + i) ^= 0xFFFFFFFF;
 
   }                                               /* for i = 0; i < len - 3 */
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_FLIP32] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_FLIP32] += stage_max;
+  afl->stage_finds[STAGE_FLIP32] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_FLIP32] += afl->stage_max;
 
 skip_bitflip:
 
-  if (no_arith) goto skip_arith;
+  if (afl->no_arith) goto skip_arith;
 
   /**********************
    * ARITHMETIC INC/DEC *
@@ -2904,12 +2863,12 @@ skip_bitflip:
 
   /* 8-bit arithmetics. */
 
-  stage_name = "arith 8/8";
-  stage_short = "arith8";
-  stage_cur = 0;
-  stage_max = 2 * len * ARITH_MAX;
+  afl->stage_name = "arith 8/8";
+  afl->stage_short = "arith8";
+  afl->stage_cur = 0;
+  afl->stage_max = 2 * len * ARITH_MAX;
 
-  stage_val_type = STAGE_VAL_LE;
+  afl->stage_val_type = STAGE_VAL_LE;
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -2921,12 +2880,12 @@ skip_bitflip:
 
     if (!eff_map[EFF_APOS(i)]) {
 
-      stage_max -= 2 * ARITH_MAX;
+      afl->stage_max -= 2 * ARITH_MAX;
       continue;
 
     }
 
-    stage_cur_byte = i;
+    afl->stage_cur_byte = i;
 
     for (j = 1; j <= ARITH_MAX; ++j) {
 
@@ -2937,29 +2896,29 @@ skip_bitflip:
 
       if (!could_be_bitflip(r)) {
 
-        stage_cur_val = j;
+        afl->stage_cur_val = j;
         out_buf[i] = orig + j;
 
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
+        if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+        ++afl->stage_cur;
 
       } else
 
-        --stage_max;
+        --afl->stage_max;
 
       r = orig ^ (orig - j);
 
       if (!could_be_bitflip(r)) {
 
-        stage_cur_val = -j;
+        afl->stage_cur_val = -j;
         out_buf[i] = orig - j;
 
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
+        if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+        ++afl->stage_cur;
 
       } else
 
-        --stage_max;
+        --afl->stage_max;
 
       out_buf[i] = orig;
 
@@ -2967,19 +2926,19 @@ skip_bitflip:
 
   }                                                   /* for i = 0; i < len */
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_ARITH8] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_ARITH8] += stage_max;
+  afl->stage_finds[STAGE_ARITH8] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_ARITH8] += afl->stage_max;
 
   /* 16-bit arithmetics, both endians. */
 
   if (len < 2) goto skip_arith;
 
-  stage_name = "arith 16/8";
-  stage_short = "arith16";
-  stage_cur = 0;
-  stage_max = 4 * (len - 1) * ARITH_MAX;
+  afl->stage_name = "arith 16/8";
+  afl->stage_short = "arith16";
+  afl->stage_cur = 0;
+  afl->stage_max = 4 * (len - 1) * ARITH_MAX;
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -2991,12 +2950,12 @@ skip_bitflip:
 
     if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
 
-      stage_max -= 4 * ARITH_MAX;
+      afl->stage_max -= 4 * ARITH_MAX;
       continue;
 
     }
 
-    stage_cur_byte = i;
+    afl->stage_cur_byte = i;
 
     for (j = 1; j <= ARITH_MAX; ++j) {
 
@@ -3009,59 +2968,59 @@ skip_bitflip:
          & 0xff overflow checks) and if it couldn't be a product of
          a bitflip. */
 
-      stage_val_type = STAGE_VAL_LE;
+      afl->stage_val_type = STAGE_VAL_LE;
 
       if ((orig & 0xff) + j > 0xff && !could_be_bitflip(r1)) {
 
-        stage_cur_val = j;
+        afl->stage_cur_val = j;
         *(u16*)(out_buf + i) = orig + j;
 
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
+        if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+        ++afl->stage_cur;
 
       } else
 
-        --stage_max;
+        --afl->stage_max;
 
       if ((orig & 0xff) < j && !could_be_bitflip(r2)) {
 
-        stage_cur_val = -j;
+        afl->stage_cur_val = -j;
         *(u16*)(out_buf + i) = orig - j;
 
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
+        if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+        ++afl->stage_cur;
 
       } else
 
-        --stage_max;
+        --afl->stage_max;
 
       /* Big endian comes next. Same deal. */
 
-      stage_val_type = STAGE_VAL_BE;
+      afl->stage_val_type = STAGE_VAL_BE;
 
       if ((orig >> 8) + j > 0xff && !could_be_bitflip(r3)) {
 
-        stage_cur_val = j;
+        afl->stage_cur_val = j;
         *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) + j);
 
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
+        if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+        ++afl->stage_cur;
 
       } else
 
-        --stage_max;
+        --afl->stage_max;
 
       if ((orig >> 8) < j && !could_be_bitflip(r4)) {
 
-        stage_cur_val = -j;
+        afl->stage_cur_val = -j;
         *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) - j);
 
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
+        if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+        ++afl->stage_cur;
 
       } else
 
-        --stage_max;
+        --afl->stage_max;
 
       *(u16*)(out_buf + i) = orig;
 
@@ -3069,19 +3028,19 @@ skip_bitflip:
 
   }                                               /* for i = 0; i < len - 1 */
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_ARITH16] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_ARITH16] += stage_max;
+  afl->stage_finds[STAGE_ARITH16] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_ARITH16] += afl->stage_max;
 
   /* 32-bit arithmetics, both endians. */
 
   if (len < 4) goto skip_arith;
 
-  stage_name = "arith 32/8";
-  stage_short = "arith32";
-  stage_cur = 0;
-  stage_max = 4 * (len - 3) * ARITH_MAX;
+  afl->stage_name = "arith 32/8";
+  afl->stage_short = "arith32";
+  afl->stage_cur = 0;
+  afl->stage_max = 4 * (len - 3) * ARITH_MAX;
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -3094,12 +3053,12 @@ skip_bitflip:
     if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
         !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
 
-      stage_max -= 4 * ARITH_MAX;
+      afl->stage_max -= 4 * ARITH_MAX;
       continue;
 
     }
 
-    stage_cur_byte = i;
+    afl->stage_cur_byte = i;
 
     for (j = 1; j <= ARITH_MAX; ++j) {
 
@@ -3110,59 +3069,59 @@ skip_bitflip:
       /* Little endian first. Same deal as with 16-bit: we only want to
          try if the operation would have effect on more than two bytes. */
 
-      stage_val_type = STAGE_VAL_LE;
+      afl->stage_val_type = STAGE_VAL_LE;
 
       if ((orig & 0xffff) + j > 0xffff && !could_be_bitflip(r1)) {
 
-        stage_cur_val = j;
+        afl->stage_cur_val = j;
         *(u32*)(out_buf + i) = orig + j;
 
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
+        if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+        ++afl->stage_cur;
 
       } else
 
-        --stage_max;
+        --afl->stage_max;
 
       if ((orig & 0xffff) < j && !could_be_bitflip(r2)) {
 
-        stage_cur_val = -j;
+        afl->stage_cur_val = -j;
         *(u32*)(out_buf + i) = orig - j;
 
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
+        if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+        ++afl->stage_cur;
 
       } else
 
-        --stage_max;
+        --afl->stage_max;
 
       /* Big endian next. */
 
-      stage_val_type = STAGE_VAL_BE;
+      afl->stage_val_type = STAGE_VAL_BE;
 
       if ((SWAP32(orig) & 0xffff) + j > 0xffff && !could_be_bitflip(r3)) {
 
-        stage_cur_val = j;
+        afl->stage_cur_val = j;
         *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) + j);
 
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
+        if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+        ++afl->stage_cur;
 
       } else
 
-        --stage_max;
+        --afl->stage_max;
 
       if ((SWAP32(orig) & 0xffff) < j && !could_be_bitflip(r4)) {
 
-        stage_cur_val = -j;
+        afl->stage_cur_val = -j;
         *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) - j);
 
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
+        if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+        ++afl->stage_cur;
 
       } else
 
-        --stage_max;
+        --afl->stage_max;
 
       *(u32*)(out_buf + i) = orig;
 
@@ -3170,10 +3129,10 @@ skip_bitflip:
 
   }                                               /* for i = 0; i < len - 3 */
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_ARITH32] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_ARITH32] += stage_max;
+  afl->stage_finds[STAGE_ARITH32] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_ARITH32] += afl->stage_max;
 
 skip_arith:
 
@@ -3181,12 +3140,12 @@ skip_arith:
    * INTERESTING VALUES *
    **********************/
 
-  stage_name = "interest 8/8";
-  stage_short = "int8";
-  stage_cur = 0;
-  stage_max = len * sizeof(interesting_8);
+  afl->stage_name = "interest 8/8";
+  afl->stage_short = "int8";
+  afl->stage_cur = 0;
+  afl->stage_max = len * sizeof(interesting_8);
 
-  stage_val_type = STAGE_VAL_LE;
+  afl->stage_val_type = STAGE_VAL_LE;
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -3200,12 +3159,12 @@ skip_arith:
 
     if (!eff_map[EFF_APOS(i)]) {
 
-      stage_max -= sizeof(interesting_8);
+      afl->stage_max -= sizeof(interesting_8);
       continue;
 
     }
 
-    stage_cur_byte = i;
+    afl->stage_cur_byte = i;
 
     for (j = 0; j < sizeof(interesting_8); ++j) {
 
@@ -3214,36 +3173,36 @@ skip_arith:
       if (could_be_bitflip(orig ^ (u8)interesting_8[j]) ||
           could_be_arith(orig, (u8)interesting_8[j], 1)) {
 
-        --stage_max;
+        --afl->stage_max;
         continue;
 
       }
 
-      stage_cur_val = interesting_8[j];
+      afl->stage_cur_val = interesting_8[j];
       out_buf[i] = interesting_8[j];
 
-      if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+      if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
 
       out_buf[i] = orig;
-      ++stage_cur;
+      ++afl->stage_cur;
 
     }
 
   }                                                   /* for i = 0; i < len */
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_INTEREST8] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_INTEREST8] += stage_max;
+  afl->stage_finds[STAGE_INTEREST8] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_INTEREST8] += afl->stage_max;
 
   /* Setting 16-bit integers, both endians. */
 
-  if (no_arith || len < 2) goto skip_interest;
+  if (afl->no_arith || len < 2) goto skip_interest;
 
-  stage_name = "interest 16/8";
-  stage_short = "int16";
-  stage_cur = 0;
-  stage_max = 2 * (len - 1) * (sizeof(interesting_16) >> 1);
+  afl->stage_name = "interest 16/8";
+  afl->stage_short = "int16";
+  afl->stage_cur = 0;
+  afl->stage_max = 2 * (len - 1) * (sizeof(interesting_16) >> 1);
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -3255,16 +3214,16 @@ skip_arith:
 
     if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) {
 
-      stage_max -= sizeof(interesting_16);
+      afl->stage_max -= sizeof(interesting_16);
       continue;
 
     }
 
-    stage_cur_byte = i;
+    afl->stage_cur_byte = i;
 
     for (j = 0; j < sizeof(interesting_16) / 2; ++j) {
 
-      stage_cur_val = interesting_16[j];
+      afl->stage_cur_val = interesting_16[j];
 
       /* Skip if this could be a product of a bitflip, arithmetics,
          or single-byte interesting value insertion. */
@@ -3273,31 +3232,31 @@ skip_arith:
           !could_be_arith(orig, (u16)interesting_16[j], 2) &&
           !could_be_interest(orig, (u16)interesting_16[j], 2, 0)) {
 
-        stage_val_type = STAGE_VAL_LE;
+        afl->stage_val_type = STAGE_VAL_LE;
 
         *(u16*)(out_buf + i) = interesting_16[j];
 
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
+        if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+        ++afl->stage_cur;
 
       } else
 
-        --stage_max;
+        --afl->stage_max;
 
       if ((u16)interesting_16[j] != SWAP16(interesting_16[j]) &&
           !could_be_bitflip(orig ^ SWAP16(interesting_16[j])) &&
           !could_be_arith(orig, SWAP16(interesting_16[j]), 2) &&
           !could_be_interest(orig, SWAP16(interesting_16[j]), 2, 1)) {
 
-        stage_val_type = STAGE_VAL_BE;
+        afl->stage_val_type = STAGE_VAL_BE;
 
         *(u16*)(out_buf + i) = SWAP16(interesting_16[j]);
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
+        if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+        ++afl->stage_cur;
 
       } else
 
-        --stage_max;
+        --afl->stage_max;
 
     }
 
@@ -3305,19 +3264,19 @@ skip_arith:
 
   }                                               /* for i = 0; i < len - 1 */
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_INTEREST16] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_INTEREST16] += stage_max;
+  afl->stage_finds[STAGE_INTEREST16] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_INTEREST16] += afl->stage_max;
 
   if (len < 4) goto skip_interest;
 
   /* Setting 32-bit integers, both endians. */
 
-  stage_name = "interest 32/8";
-  stage_short = "int32";
-  stage_cur = 0;
-  stage_max = 2 * (len - 3) * (sizeof(interesting_32) >> 2);
+  afl->stage_name = "interest 32/8";
+  afl->stage_short = "int32";
+  afl->stage_cur = 0;
+  afl->stage_max = 2 * (len - 3) * (sizeof(interesting_32) >> 2);
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -3330,16 +3289,16 @@ skip_arith:
     if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] &&
         !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) {
 
-      stage_max -= sizeof(interesting_32) >> 1;
+      afl->stage_max -= sizeof(interesting_32) >> 1;
       continue;
 
     }
 
-    stage_cur_byte = i;
+    afl->stage_cur_byte = i;
 
     for (j = 0; j < sizeof(interesting_32) / 4; ++j) {
 
-      stage_cur_val = interesting_32[j];
+      afl->stage_cur_val = interesting_32[j];
 
       /* Skip if this could be a product of a bitflip, arithmetics,
          or word interesting value insertion. */
@@ -3348,31 +3307,31 @@ skip_arith:
           !could_be_arith(orig, interesting_32[j], 4) &&
           !could_be_interest(orig, interesting_32[j], 4, 0)) {
 
-        stage_val_type = STAGE_VAL_LE;
+        afl->stage_val_type = STAGE_VAL_LE;
 
         *(u32*)(out_buf + i) = interesting_32[j];
 
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
+        if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+        ++afl->stage_cur;
 
       } else
 
-        --stage_max;
+        --afl->stage_max;
 
       if ((u32)interesting_32[j] != SWAP32(interesting_32[j]) &&
           !could_be_bitflip(orig ^ SWAP32(interesting_32[j])) &&
           !could_be_arith(orig, SWAP32(interesting_32[j]), 4) &&
           !could_be_interest(orig, SWAP32(interesting_32[j]), 4, 1)) {
 
-        stage_val_type = STAGE_VAL_BE;
+        afl->stage_val_type = STAGE_VAL_BE;
 
         *(u32*)(out_buf + i) = SWAP32(interesting_32[j]);
-        if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
-        ++stage_cur;
+        if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
+        ++afl->stage_cur;
 
       } else
 
-        --stage_max;
+        --afl->stage_max;
 
     }
 
@@ -3380,10 +3339,10 @@ skip_arith:
 
   }                                               /* for i = 0; i < len - 3 */
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_INTEREST32] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_INTEREST32] += stage_max;
+  afl->stage_finds[STAGE_INTEREST32] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_INTEREST32] += afl->stage_max;
 
 skip_interest:
 
@@ -3391,16 +3350,16 @@ skip_interest:
    * DICTIONARY STUFF *
    ********************/
 
-  if (!extras_cnt) goto skip_user_extras;
+  if (!afl->extras_cnt) goto skip_user_extras;
 
   /* Overwrite with user-supplied extras. */
 
-  stage_name = "user extras (over)";
-  stage_short = "ext_UO";
-  stage_cur = 0;
-  stage_max = extras_cnt * len;
+  afl->stage_name = "user extras (over)";
+  afl->stage_short = "ext_UO";
+  afl->stage_cur = 0;
+  afl->stage_max = afl->extras_cnt * len;
 
-  stage_val_type = STAGE_VAL_NONE;
+  afl->stage_val_type = STAGE_VAL_NONE;
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -3408,36 +3367,36 @@ skip_interest:
 
     u32 last_len = 0;
 
-    stage_cur_byte = i;
+    afl->stage_cur_byte = i;
 
     /* Extras are sorted by size, from smallest to largest. This means
        that we don't have to worry about restoring the buffer in
        between writes at a particular offset determined by the outer
        loop. */
 
-    for (j = 0; j < extras_cnt; ++j) {
+    for (j = 0; j < afl->extras_cnt; ++j) {
 
-      /* Skip extras probabilistically if extras_cnt > MAX_DET_EXTRAS. Also
+      /* Skip extras probabilistically if afl->extras_cnt > MAX_DET_EXTRAS. Also
          skip them if there's no room to insert the payload, if the token
          is redundant, or if its entire span has no bytes set in the effector
          map. */
 
-      if ((extras_cnt > MAX_DET_EXTRAS && UR(extras_cnt) >= MAX_DET_EXTRAS) ||
-          extras[j].len > len - i ||
-          !memcmp(extras[j].data, out_buf + i, extras[j].len) ||
-          !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, extras[j].len))) {
+      if ((afl->extras_cnt > MAX_DET_EXTRAS && UR(afl, afl->extras_cnt) >= MAX_DET_EXTRAS) ||
+          afl->extras[j].len > len - i ||
+          !memcmp(afl->extras[j].data, out_buf + i, afl->extras[j].len) ||
+          !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, afl->extras[j].len))) {
 
-        --stage_max;
+        --afl->stage_max;
         continue;
 
       }
 
-      last_len = extras[j].len;
-      memcpy(out_buf + i, extras[j].data, last_len);
+      last_len = afl->extras[j].len;
+      memcpy(out_buf + i, afl->extras[j].data, last_len);
 
-      if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+      if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
 
-      ++stage_cur;
+      ++afl->stage_cur;
 
     }
 
@@ -3446,17 +3405,17 @@ skip_interest:
 
   }                                                   /* for i = 0; i < len */
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_EXTRAS_UO] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_EXTRAS_UO] += stage_max;
+  afl->stage_finds[STAGE_EXTRAS_UO] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_EXTRAS_UO] += afl->stage_max;
 
   /* Insertion of user-supplied extras. */
 
-  stage_name = "user extras (insert)";
-  stage_short = "ext_UI";
-  stage_cur = 0;
-  stage_max = extras_cnt * (len + 1);
+  afl->stage_name = "user extras (insert)";
+  afl->stage_short = "ext_UI";
+  afl->stage_cur = 0;
+  afl->stage_max = afl->extras_cnt * (len + 1);
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -3464,31 +3423,31 @@ skip_interest:
 
   for (i = 0; i <= len; ++i) {
 
-    stage_cur_byte = i;
+    afl->stage_cur_byte = i;
 
-    for (j = 0; j < extras_cnt; ++j) {
+    for (j = 0; j < afl->extras_cnt; ++j) {
 
-      if (len + extras[j].len > MAX_FILE) {
+      if (len + afl->extras[j].len > MAX_FILE) {
 
-        --stage_max;
+        --afl->stage_max;
         continue;
 
       }
 
       /* Insert token */
-      memcpy(ex_tmp + i, extras[j].data, extras[j].len);
+      memcpy(ex_tmp + i, afl->extras[j].data, afl->extras[j].len);
 
       /* Copy tail */
-      memcpy(ex_tmp + i + extras[j].len, out_buf + i, len - i);
+      memcpy(ex_tmp + i + afl->extras[j].len, out_buf + i, len - i);
 
-      if (common_fuzz_stuff(argv, ex_tmp, len + extras[j].len)) {
+      if (common_fuzz_stuff(afl, ex_tmp, len +afl->extras[j].len)) {
 
         ck_free(ex_tmp);
         goto abandon_entry;
 
       }
 
-      ++stage_cur;
+      ++afl->stage_cur;
 
     }
 
@@ -3499,21 +3458,21 @@ skip_interest:
 
   ck_free(ex_tmp);
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_EXTRAS_UI] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_EXTRAS_UI] += stage_max;
+  afl->stage_finds[STAGE_EXTRAS_UI] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_EXTRAS_UI] += afl->stage_max;
 
 skip_user_extras:
 
-  if (!a_extras_cnt) goto skip_extras;
+  if (!afl->a_extras_cnt) goto skip_extras;
 
-  stage_name = "auto extras (over)";
-  stage_short = "ext_AO";
-  stage_cur = 0;
-  stage_max = MIN(a_extras_cnt, USE_AUTO_EXTRAS) * len;
+  afl->stage_name = "auto extras (over)";
+  afl->stage_short = "ext_AO";
+  afl->stage_cur = 0;
+  afl->stage_max = MIN(afl->a_extras_cnt, USE_AUTO_EXTRAS) * len;
 
-  stage_val_type = STAGE_VAL_NONE;
+  afl->stage_val_type = STAGE_VAL_NONE;
 
   orig_hit_cnt = new_hit_cnt;
 
@@ -3521,28 +3480,28 @@ skip_user_extras:
 
     u32 last_len = 0;
 
-    stage_cur_byte = i;
+    afl->stage_cur_byte = i;
 
-    for (j = 0; j < MIN(a_extras_cnt, USE_AUTO_EXTRAS); ++j) {
+    for (j = 0; j < MIN(afl->a_extras_cnt, USE_AUTO_EXTRAS); ++j) {
 
       /* See the comment in the earlier code; extras are sorted by size. */
 
-      if (a_extras[j].len > len - i ||
-          !memcmp(a_extras[j].data, out_buf + i, a_extras[j].len) ||
+      if (afl->a_extras[j].len > len - i ||
+          !memcmp(afl->a_extras[j].data, out_buf + i, afl->a_extras[j].len) ||
           !memchr(eff_map + EFF_APOS(i), 1,
-                  EFF_SPAN_ALEN(i, a_extras[j].len))) {
+                  EFF_SPAN_ALEN(i, afl->a_extras[j].len))) {
 
-        --stage_max;
+        --afl->stage_max;
         continue;
 
       }
 
-      last_len = a_extras[j].len;
-      memcpy(out_buf + i, a_extras[j].data, last_len);
+      last_len = afl->a_extras[j].len;
+      memcpy(out_buf + i, afl->a_extras[j].data, last_len);
 
-      if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry;
+      if (common_fuzz_stuff(afl, out_buf, len)) goto abandon_entry;
 
-      ++stage_cur;
+      ++afl->stage_cur;
 
     }
 
@@ -3551,10 +3510,10 @@ skip_user_extras:
 
   }                                                   /* for i = 0; i < len */
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_finds[STAGE_EXTRAS_AO] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_EXTRAS_AO] += stage_max;
+  afl->stage_finds[STAGE_EXTRAS_AO] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_EXTRAS_AO] += afl->stage_max;
 
 skip_extras:
 
@@ -3562,7 +3521,7 @@ skip_extras:
      we're properly done with deterministic steps and can mark it as such
      in the .state/ directory. */
 
-  if (!queue_cur->passed_det) mark_as_det_done(queue_cur);
+  if (!afl->queue_cur->passed_det) mark_as_det_done(afl, afl->queue_cur);
 
   /****************
    * RANDOM HAVOC *
@@ -3571,50 +3530,48 @@ skip_extras:
 havoc_stage:
 pacemaker_fuzzing:
 
-  stage_cur_byte = -1;
+  afl->stage_cur_byte = -1;
 
   /* The havoc stage mutation code is also invoked when splicing files; if the
      splice_cycle variable is set, generate different descriptions and such. */
 
   if (!splice_cycle) {
 
-    stage_name = MOpt_globals.havoc_stagename;
-    stage_short = MOpt_globals.havoc_stagenameshort;
-    stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * perf_score /
-                havoc_div / 100;
+    afl->stage_name = MOpt_globals.havoc_stagename;
+    afl->stage_short = MOpt_globals.havoc_stagenameshort;
+    afl->stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * perf_score /
+                afl->havoc_div / 100;
 
   } else {
 
-    static u8 tmp[32];
-
     perf_score = orig_perf;
 
-    sprintf(tmp, MOpt_globals.splice_stageformat, splice_cycle);
-    stage_name = tmp;
-    stage_short = MOpt_globals.splice_stagenameshort;
-    stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100;
+    snprintf(afl->stage_name_buf64, 64, MOpt_globals.splice_stageformat, splice_cycle);
+    afl->stage_name = afl->stage_name_buf64;
+    afl->stage_short = MOpt_globals.splice_stagenameshort;
+    afl->stage_max = SPLICE_HAVOC * perf_score / afl->havoc_div / 100;
 
   }
 
   s32 temp_len_puppet;
   cur_ms_lv = get_cur_time();
 
-  // for (; swarm_now < swarm_num; ++swarm_now)
+  // for (; afl->swarm_now < swarm_num; ++afl->swarm_now)
   {
 
-    if (key_puppet == 1) {
+    if (afl->key_puppet == 1) {
 
-      if (unlikely(orig_hit_cnt_puppet == 0)) {
+      if (unlikely(afl->orig_hit_cnt_puppet == 0)) {
 
-        orig_hit_cnt_puppet = queued_paths + unique_crashes;
-        last_limit_time_start = get_cur_time();
-        SPLICE_CYCLES_puppet =
-            (UR(SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) +
+        afl->orig_hit_cnt_puppet = afl->queued_paths + afl->unique_crashes;
+        afl->last_limit_time_start = get_cur_time();
+        afl->SPLICE_CYCLES_puppet =
+            (UR(afl, SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) +
              SPLICE_CYCLES_puppet_low);
 
       }
 
-    }                                                 /* if key_puppet == 1 */
+    }                                                 /* if afl->key_puppet == 1 */
 
     {
 
@@ -3622,7 +3579,7 @@ pacemaker_fuzzing:
     havoc_stage_puppet:
 #endif
 
-      stage_cur_byte = -1;
+      afl->stage_cur_byte = -1;
 
       /* The havoc stage mutation code is also invoked when splicing files; if
          the splice_cycle variable is set, generate different descriptions and
@@ -3630,35 +3587,34 @@ pacemaker_fuzzing:
 
       if (!splice_cycle) {
 
-        stage_name = MOpt_globals.havoc_stagename;
-        stage_short = MOpt_globals.havoc_stagenameshort;
-        stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) *
-                    perf_score / havoc_div / 100;
+        afl->stage_name = MOpt_globals.havoc_stagename;
+        afl->stage_short = MOpt_globals.havoc_stagenameshort;
+        afl->stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) *
+                    perf_score / afl->havoc_div / 100;
 
       } else {
 
-        static u8 tmp[32];
         perf_score = orig_perf;
-        sprintf(tmp, MOpt_globals.splice_stageformat, splice_cycle);
-        stage_name = tmp;
-        stage_short = MOpt_globals.splice_stagenameshort;
-        stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100;
+        snprintf(afl->stage_name_buf64, 64, MOpt_globals.splice_stageformat, splice_cycle);
+        afl->stage_name = afl->stage_name_buf64;
+        afl->stage_short = MOpt_globals.splice_stagenameshort;
+        afl->stage_max = SPLICE_HAVOC * perf_score / afl->havoc_div / 100;
 
       }
 
-      if (stage_max < HAVOC_MIN) stage_max = HAVOC_MIN;
+      if (afl->stage_max < HAVOC_MIN) afl->stage_max = HAVOC_MIN;
 
       temp_len = len;
 
-      orig_hit_cnt = queued_paths + unique_crashes;
+      orig_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-      havoc_queued = queued_paths;
+      havoc_queued = afl->queued_paths;
 
-      for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+      for (afl->stage_cur = 0; afl->stage_cur < afl->stage_max; ++afl->stage_cur) {
 
-        u32 use_stacking = 1 << (1 + UR(HAVOC_STACK_POW2));
+        u32 use_stacking = 1 << (1 + UR(afl, HAVOC_STACK_POW2));
 
-        stage_cur_val = use_stacking;
+        afl->stage_cur_val = use_stacking;
 
         for (i = 0; i < operator_num; ++i) {
 
@@ -3668,17 +3624,17 @@ pacemaker_fuzzing:
 
         for (i = 0; i < use_stacking; ++i) {
 
-          switch (select_algorithm()) {
+          switch (select_algorithm(afl)) {
 
             case 0:
               /* Flip a single bit somewhere. Spooky! */
-              FLIP_BIT(out_buf, UR(temp_len << 3));
+              FLIP_BIT(out_buf, UR(afl, temp_len << 3));
               MOpt_globals.cycles_v2[STAGE_FLIP1] += 1;
               break;
 
             case 1:
               if (temp_len < 2) break;
-              temp_len_puppet = UR((temp_len << 3) - 1);
+              temp_len_puppet = UR(afl, (temp_len << 3) - 1);
               FLIP_BIT(out_buf, temp_len_puppet);
               FLIP_BIT(out_buf, temp_len_puppet + 1);
               MOpt_globals.cycles_v2[STAGE_FLIP2] += 1;
@@ -3686,7 +3642,7 @@ pacemaker_fuzzing:
 
             case 2:
               if (temp_len < 2) break;
-              temp_len_puppet = UR((temp_len << 3) - 3);
+              temp_len_puppet = UR(afl, (temp_len << 3) - 3);
               FLIP_BIT(out_buf, temp_len_puppet);
               FLIP_BIT(out_buf, temp_len_puppet + 1);
               FLIP_BIT(out_buf, temp_len_puppet + 2);
@@ -3696,55 +3652,55 @@ pacemaker_fuzzing:
 
             case 3:
               if (temp_len < 4) break;
-              out_buf[UR(temp_len)] ^= 0xFF;
+              out_buf[UR(afl, temp_len)] ^= 0xFF;
               MOpt_globals.cycles_v2[STAGE_FLIP8] += 1;
               break;
 
             case 4:
               if (temp_len < 8) break;
-              *(u16*)(out_buf + UR(temp_len - 1)) ^= 0xFFFF;
+              *(u16*)(out_buf + UR(afl, temp_len - 1)) ^= 0xFFFF;
               MOpt_globals.cycles_v2[STAGE_FLIP16] += 1;
               break;
 
             case 5:
               if (temp_len < 8) break;
-              *(u32*)(out_buf + UR(temp_len - 3)) ^= 0xFFFFFFFF;
+              *(u32*)(out_buf + UR(afl, temp_len - 3)) ^= 0xFFFFFFFF;
               MOpt_globals.cycles_v2[STAGE_FLIP32] += 1;
               break;
 
             case 6:
-              out_buf[UR(temp_len)] -= 1 + UR(ARITH_MAX);
-              out_buf[UR(temp_len)] += 1 + UR(ARITH_MAX);
+              out_buf[UR(afl, temp_len)] -= 1 + UR(afl, ARITH_MAX);
+              out_buf[UR(afl, temp_len)] += 1 + UR(afl, ARITH_MAX);
               MOpt_globals.cycles_v2[STAGE_ARITH8] += 1;
               break;
 
             case 7:
               /* Randomly subtract from word, random endian. */
               if (temp_len < 8) break;
-              if (UR(2)) {
+              if (UR(afl, 2)) {
 
-                u32 pos = UR(temp_len - 1);
-                *(u16*)(out_buf + pos) -= 1 + UR(ARITH_MAX);
+                u32 pos = UR(afl, temp_len - 1);
+                *(u16*)(out_buf + pos) -= 1 + UR(afl, ARITH_MAX);
 
               } else {
 
-                u32 pos = UR(temp_len - 1);
-                u16 num = 1 + UR(ARITH_MAX);
+                u32 pos = UR(afl, temp_len - 1);
+                u16 num = 1 + UR(afl, ARITH_MAX);
                 *(u16*)(out_buf + pos) =
                     SWAP16(SWAP16(*(u16*)(out_buf + pos)) - num);
 
               }
 
               /* Randomly add to word, random endian. */
-              if (UR(2)) {
+              if (UR(afl, 2)) {
 
-                u32 pos = UR(temp_len - 1);
-                *(u16*)(out_buf + pos) += 1 + UR(ARITH_MAX);
+                u32 pos = UR(afl, temp_len - 1);
+                *(u16*)(out_buf + pos) += 1 + UR(afl, ARITH_MAX);
 
               } else {
 
-                u32 pos = UR(temp_len - 1);
-                u16 num = 1 + UR(ARITH_MAX);
+                u32 pos = UR(afl, temp_len - 1);
+                u16 num = 1 + UR(afl, ARITH_MAX);
                 *(u16*)(out_buf + pos) =
                     SWAP16(SWAP16(*(u16*)(out_buf + pos)) + num);
 
@@ -3756,15 +3712,15 @@ pacemaker_fuzzing:
             case 8:
               /* Randomly subtract from dword, random endian. */
               if (temp_len < 8) break;
-              if (UR(2)) {
+              if (UR(afl, 2)) {
 
-                u32 pos = UR(temp_len - 3);
-                *(u32*)(out_buf + pos) -= 1 + UR(ARITH_MAX);
+                u32 pos = UR(afl, temp_len - 3);
+                *(u32*)(out_buf + pos) -= 1 + UR(afl, ARITH_MAX);
 
               } else {
 
-                u32 pos = UR(temp_len - 3);
-                u32 num = 1 + UR(ARITH_MAX);
+                u32 pos = UR(afl, temp_len - 3);
+                u32 num = 1 + UR(afl, ARITH_MAX);
                 *(u32*)(out_buf + pos) =
                     SWAP32(SWAP32(*(u32*)(out_buf + pos)) - num);
 
@@ -3772,15 +3728,15 @@ pacemaker_fuzzing:
 
               /* Randomly add to dword, random endian. */
               // if (temp_len < 4) break;
-              if (UR(2)) {
+              if (UR(afl, 2)) {
 
-                u32 pos = UR(temp_len - 3);
-                *(u32*)(out_buf + pos) += 1 + UR(ARITH_MAX);
+                u32 pos = UR(afl, temp_len - 3);
+                *(u32*)(out_buf + pos) += 1 + UR(afl, ARITH_MAX);
 
               } else {
 
-                u32 pos = UR(temp_len - 3);
-                u32 num = 1 + UR(ARITH_MAX);
+                u32 pos = UR(afl, temp_len - 3);
+                u32 num = 1 + UR(afl, ARITH_MAX);
                 *(u32*)(out_buf + pos) =
                     SWAP32(SWAP32(*(u32*)(out_buf + pos)) + num);
 
@@ -3792,22 +3748,22 @@ pacemaker_fuzzing:
             case 9:
               /* Set byte to interesting value. */
               if (temp_len < 4) break;
-              out_buf[UR(temp_len)] = interesting_8[UR(sizeof(interesting_8))];
+              out_buf[UR(afl, temp_len)] = interesting_8[UR(afl, sizeof(interesting_8))];
               MOpt_globals.cycles_v2[STAGE_INTEREST8] += 1;
               break;
 
             case 10:
               /* Set word to interesting value, randomly choosing endian. */
               if (temp_len < 8) break;
-              if (UR(2)) {
+              if (UR(afl, 2)) {
 
-                *(u16*)(out_buf + UR(temp_len - 1)) =
-                    interesting_16[UR(sizeof(interesting_16) >> 1)];
+                *(u16*)(out_buf + UR(afl, temp_len - 1)) =
+                    interesting_16[UR(afl, sizeof(interesting_16) >> 1)];
 
               } else {
 
-                *(u16*)(out_buf + UR(temp_len - 1)) =
-                    SWAP16(interesting_16[UR(sizeof(interesting_16) >> 1)]);
+                *(u16*)(out_buf + UR(afl, temp_len - 1)) =
+                    SWAP16(interesting_16[UR(afl, sizeof(interesting_16) >> 1)]);
 
               }
 
@@ -3819,15 +3775,15 @@ pacemaker_fuzzing:
 
               if (temp_len < 8) break;
 
-              if (UR(2)) {
+              if (UR(afl, 2)) {
 
-                *(u32*)(out_buf + UR(temp_len - 3)) =
-                    interesting_32[UR(sizeof(interesting_32) >> 2)];
+                *(u32*)(out_buf + UR(afl, temp_len - 3)) =
+                    interesting_32[UR(afl, sizeof(interesting_32) >> 2)];
 
               } else {
 
-                *(u32*)(out_buf + UR(temp_len - 3)) =
-                    SWAP32(interesting_32[UR(sizeof(interesting_32) >> 2)]);
+                *(u32*)(out_buf + UR(afl, temp_len - 3)) =
+                    SWAP32(interesting_32[UR(afl, sizeof(interesting_32) >> 2)]);
 
               }
 
@@ -3840,7 +3796,7 @@ pacemaker_fuzzing:
                  why not. We use XOR with 1-255 to eliminate the
                  possibility of a no-op. */
 
-              out_buf[UR(temp_len)] ^= 1 + UR(255);
+              out_buf[UR(afl, temp_len)] ^= 1 + UR(afl, 255);
               MOpt_globals.cycles_v2[STAGE_RANDOMBYTE] += 1;
               break;
 
@@ -3856,9 +3812,9 @@ pacemaker_fuzzing:
 
               /* Don't delete too much. */
 
-              del_len = choose_block_len(temp_len - 1);
+              del_len = choose_block_len(afl, temp_len - 1);
 
-              del_from = UR(temp_len - del_len + 1);
+              del_from = UR(afl, temp_len - del_len + 1);
 
               memmove(out_buf + del_from, out_buf + del_from + del_len,
                       temp_len - del_from - del_len);
@@ -3876,23 +3832,23 @@ pacemaker_fuzzing:
                 /* Clone bytes (75%) or insert a block of constant bytes (25%).
                  */
 
-                u8  actually_clone = UR(4);
+                u8  actually_clone = UR(afl, 4);
                 u32 clone_from, clone_to, clone_len;
                 u8* new_buf;
 
                 if (actually_clone) {
 
-                  clone_len = choose_block_len(temp_len);
-                  clone_from = UR(temp_len - clone_len + 1);
+                  clone_len = choose_block_len(afl, temp_len);
+                  clone_from = UR(afl, temp_len - clone_len + 1);
 
                 } else {
 
-                  clone_len = choose_block_len(HAVOC_BLK_XL);
+                  clone_len = choose_block_len(afl, HAVOC_BLK_XL);
                   clone_from = 0;
 
                 }
 
-                clone_to = UR(temp_len);
+                clone_to = UR(afl, temp_len);
 
                 new_buf = ck_alloc_nozero(temp_len + clone_len);
 
@@ -3906,7 +3862,7 @@ pacemaker_fuzzing:
                   memcpy(new_buf + clone_to, out_buf + clone_from, clone_len);
                 else
                   memset(new_buf + clone_to,
-                         UR(2) ? UR(256) : out_buf[UR(temp_len)], clone_len);
+                         UR(afl, 2) ? UR(afl, 256) : out_buf[UR(afl, temp_len)], clone_len);
 
                 /* Tail */
                 memcpy(new_buf + clone_to + clone_len, out_buf + clone_to,
@@ -3930,12 +3886,12 @@ pacemaker_fuzzing:
 
               if (temp_len < 2) break;
 
-              copy_len = choose_block_len(temp_len - 1);
+              copy_len = choose_block_len(afl, temp_len - 1);
 
-              copy_from = UR(temp_len - copy_len + 1);
-              copy_to = UR(temp_len - copy_len + 1);
+              copy_from = UR(afl, temp_len - copy_len + 1);
+              copy_to = UR(afl, temp_len - copy_len + 1);
 
-              if (UR(4)) {
+              if (UR(afl, 4)) {
 
                 if (copy_from != copy_to)
                   memmove(out_buf + copy_to, out_buf + copy_from, copy_len);
@@ -3943,7 +3899,7 @@ pacemaker_fuzzing:
               } else
 
                 memset(out_buf + copy_to,
-                       UR(2) ? UR(256) : out_buf[UR(temp_len)], copy_len);
+                       UR(afl, 2) ? UR(afl, 256) : out_buf[UR(afl, temp_len)], copy_len);
               MOpt_globals.cycles_v2[STAGE_OverWrite75] += 1;
               break;
 
@@ -3955,9 +3911,9 @@ pacemaker_fuzzing:
 
         *MOpt_globals.pTime += 1;
 
-        u64 temp_total_found = queued_paths + unique_crashes;
+        u64 temp_total_found = afl->queued_paths + afl->unique_crashes;
 
-        if (common_fuzz_stuff(argv, out_buf, temp_len))
+        if (common_fuzz_stuff(afl, out_buf, temp_len))
           goto abandon_entry_puppet;
 
         /* out_buf might have been mangled a bit, so let's restore it to its
@@ -3970,24 +3926,24 @@ pacemaker_fuzzing:
         /* If we're finding new stuff, let's run for a bit longer, limits
            permitting. */
 
-        if (queued_paths != havoc_queued) {
+        if (afl->queued_paths != havoc_queued) {
 
-          if (perf_score <= havoc_max_mult * 100) {
+          if (perf_score <= afl->havoc_max_mult * 100) {
 
-            stage_max *= 2;
+            afl->stage_max *= 2;
             perf_score *= 2;
 
           }
 
-          havoc_queued = queued_paths;
+          havoc_queued = afl->queued_paths;
 
         }
 
-        if (unlikely(queued_paths + unique_crashes > temp_total_found)) {
+        if (unlikely(afl->queued_paths + afl->unique_crashes > temp_total_found)) {
 
           u64 temp_temp_puppet =
-              queued_paths + unique_crashes - temp_total_found;
-          total_puppet_find = total_puppet_find + temp_temp_puppet;
+              afl->queued_paths + afl->unique_crashes - temp_total_found;
+          afl->total_puppet_find = afl->total_puppet_find + temp_temp_puppet;
           for (i = 0; i < operator_num; ++i) {
 
             if (MOpt_globals.cycles_v2[i] > MOpt_globals.cycles_v3[i])
@@ -3997,21 +3953,21 @@ pacemaker_fuzzing:
 
         }                                                             /* if */
 
-      }        /* for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { */
+      }        /* for (afl->stage_cur = 0; afl->stage_cur < afl->stage_max; ++afl->stage_cur) { */
 
-      new_hit_cnt = queued_paths + unique_crashes;
+      new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
       if (MOpt_globals.is_pilot_mode) {
 
         if (!splice_cycle) {
 
-          stage_finds[STAGE_HAVOC] += new_hit_cnt - orig_hit_cnt;
-          stage_cycles[STAGE_HAVOC] += stage_max;
+          afl->stage_finds[STAGE_HAVOC] += new_hit_cnt - orig_hit_cnt;
+          afl->stage_cycles[STAGE_HAVOC] += afl->stage_max;
 
         } else {
 
-          stage_finds[STAGE_SPLICE] += new_hit_cnt - orig_hit_cnt;
-          stage_cycles[STAGE_SPLICE] += stage_max;
+          afl->stage_finds[STAGE_SPLICE] += new_hit_cnt - orig_hit_cnt;
+          afl->stage_cycles[STAGE_SPLICE] += afl->stage_max;
 
         }
 
@@ -4025,8 +3981,8 @@ pacemaker_fuzzing:
 
     retry_splicing_puppet:
 
-      if (use_splicing && splice_cycle++ < SPLICE_CYCLES_puppet &&
-          queued_paths > 1 && queue_cur->len > 1) {
+      if (afl->use_splicing && splice_cycle++ < afl->SPLICE_CYCLES_puppet &&
+          afl->queued_paths > 1 && afl->queue_cur->len > 1) {
 
         struct queue_entry* target;
         u32                 tid, split_at;
@@ -4040,7 +3996,7 @@ pacemaker_fuzzing:
 
           ck_free(in_buf);
           in_buf = orig_in;
-          len = queue_cur->len;
+          len = afl->queue_cur->len;
 
         }
 
@@ -4049,12 +4005,12 @@ pacemaker_fuzzing:
 
         do {
 
-          tid = UR(queued_paths);
+          tid = UR(afl, afl->queued_paths);
 
-        } while (tid == current_entry);
+        } while (tid == afl->current_entry);
 
-        splicing_with = tid;
-        target = queue;
+        afl->splicing_with = tid;
+        target = afl->queue;
 
         while (tid >= 100) {
 
@@ -4068,10 +4024,10 @@ pacemaker_fuzzing:
 
         /* Make sure that the target has a reasonable length. */
 
-        while (target && (target->len < 2 || target == queue_cur)) {
+        while (target && (target->len < 2 || target == afl->queue_cur)) {
 
           target = target->next;
-          ++splicing_with;
+          ++afl->splicing_with;
 
         }
 
@@ -4104,7 +4060,7 @@ pacemaker_fuzzing:
 
         /* Split somewhere between the first and last differing byte. */
 
-        split_at = f_diff + UR(l_diff - f_diff);
+        split_at = f_diff + UR(afl, l_diff - f_diff);
 
         /* Do the thing. */
 
@@ -4126,40 +4082,40 @@ pacemaker_fuzzing:
     abandon_entry:
     abandon_entry_puppet:
 
-      if (splice_cycle >= SPLICE_CYCLES_puppet)
-        SPLICE_CYCLES_puppet =
-            (UR(SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) +
+      if (splice_cycle >= afl->SPLICE_CYCLES_puppet)
+        afl->SPLICE_CYCLES_puppet =
+            (UR(afl, SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) +
              SPLICE_CYCLES_puppet_low);
 
-      splicing_with = -1;
+      afl->splicing_with = -1;
 
-      /* Update pending_not_fuzzed count if we made it through the calibration
+      /* Update afl->pending_not_fuzzed count if we made it through the calibration
          cycle and have not seen this entry before. */
 
-      // if (!stop_soon && !queue_cur->cal_failed && !queue_cur->was_fuzzed) {
+      // if (!afl->stop_soon && !afl->queue_cur->cal_failed && !afl->queue_cur->was_fuzzed) {
 
-      //   queue_cur->was_fuzzed = 1;
-      //   --pending_not_fuzzed;
-      //   if (queue_cur->favored) --pending_favored;
+      //   afl->queue_cur->was_fuzzed = 1;
+      //   --afl->pending_not_fuzzed;
+      //   if (afl->queue_cur->favored) --afl->pending_favored;
       // }
 
-      munmap(orig_in, queue_cur->len);
+      munmap(orig_in, afl->queue_cur->len);
 
       if (in_buf != orig_in) ck_free(in_buf);
       ck_free(out_buf);
       ck_free(eff_map);
 
-      if (key_puppet == 1) {
+      if (afl->key_puppet == 1) {
 
-        if (unlikely(queued_paths + unique_crashes >
-                     ((queued_paths + unique_crashes) * limit_time_bound +
-                      orig_hit_cnt_puppet))) {
+        if (unlikely(afl->queued_paths + afl->unique_crashes >
+                     ((afl->queued_paths + afl->unique_crashes) * limit_time_bound +
+                      afl->orig_hit_cnt_puppet))) {
 
-          key_puppet = 0;
+          afl->key_puppet = 0;
           cur_ms_lv = get_cur_time();
-          new_hit_cnt = queued_paths + unique_crashes;
-          orig_hit_cnt_puppet = 0;
-          last_limit_time_start = 0;
+          new_hit_cnt = afl->queued_paths + afl->unique_crashes;
+          afl->orig_hit_cnt_puppet = 0;
+          afl->last_limit_time_start = 0;
 
         }
 
@@ -4167,16 +4123,16 @@ pacemaker_fuzzing:
 
       if (unlikely(*MOpt_globals.pTime > MOpt_globals.period)) {
 
-        total_pacemaker_time += *MOpt_globals.pTime;
+        afl->total_pacemaker_time += *MOpt_globals.pTime;
         *MOpt_globals.pTime = 0;
-        temp_puppet_find = total_puppet_find;
-        new_hit_cnt = queued_paths + unique_crashes;
+        afl->temp_puppet_find = afl->total_puppet_find;
+        new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
         if (MOpt_globals.is_pilot_mode) {
 
-          swarm_fitness[swarm_now] =
-              (double)(total_puppet_find - temp_puppet_find) /
-              ((double)(tmp_pilot_time) / period_pilot_tmp);
+          afl->swarm_fitness[afl->swarm_now] =
+              (double)(afl->total_puppet_find - afl->temp_puppet_find) /
+              ((double)(afl->tmp_pilot_time) / afl->period_pilot_tmp);
 
         }
 
@@ -4192,10 +4148,10 @@ pacemaker_fuzzing:
                   (double)(MOpt_globals.finds_v2[i] - MOpt_globals.finds[i]) /
                   (double)(MOpt_globals.cycles_v2[i] - MOpt_globals.cycles[i]);
 
-            if (eff_best[swarm_now][i] < temp_eff) {
+            if (afl->eff_best[afl->swarm_now][i] < temp_eff) {
 
-              eff_best[swarm_now][i] = temp_eff;
-              L_best[swarm_now][i] = x_now[swarm_now][i];
+              afl->eff_best[afl->swarm_now][i] = temp_eff;
+              afl->L_best[afl->swarm_now][i] = afl->x_now[afl->swarm_now][i];
 
             }
 
@@ -4209,50 +4165,50 @@ pacemaker_fuzzing:
 
         if (MOpt_globals.is_pilot_mode) {
 
-          swarm_now = swarm_now + 1;
-          if (swarm_now == swarm_num) {
+          afl->swarm_now = afl->swarm_now + 1;
+          if (afl->swarm_now == swarm_num) {
 
-            key_module = 1;
+            afl->key_module = 1;
             for (i = 0; i < operator_num; ++i) {
 
-              core_operator_cycles_puppet_v2[i] =
-                  core_operator_cycles_puppet[i];
-              core_operator_cycles_puppet_v3[i] =
-                  core_operator_cycles_puppet[i];
-              core_operator_finds_puppet_v2[i] = core_operator_finds_puppet[i];
+              afl->core_operator_cycles_puppet_v2[i] =
+                  afl->core_operator_cycles_puppet[i];
+              afl->core_operator_cycles_puppet_v3[i] =
+                  afl->core_operator_cycles_puppet[i];
+              afl->core_operator_finds_puppet_v2[i] = afl->core_operator_finds_puppet[i];
 
             }
 
             double swarm_eff = 0.0;
-            swarm_now = 0;
+            afl->swarm_now = 0;
             for (i = 0; i < swarm_num; ++i) {
 
-              if (swarm_fitness[i] > swarm_eff) {
+              if (afl->swarm_fitness[i] > swarm_eff) {
 
-                swarm_eff = swarm_fitness[i];
-                swarm_now = i;
+                swarm_eff = afl->swarm_fitness[i];
+                afl->swarm_now = i;
 
               }
 
             }
 
-            if (swarm_now < 0 || swarm_now > swarm_num - 1)
-              PFATAL("swarm_now error number  %d", swarm_now);
+            if (afl->swarm_now < 0 || afl->swarm_now > swarm_num - 1)
+              PFATAL("swarm_now error number  %d", afl->swarm_now);
 
-          }                                    /* if swarm_now == swarm_num */
+          }                                    /* if afl->swarm_now == swarm_num */
 
-          /* adjust pointers dependent on 'swarm_now' */
-          MOpt_globals_pilot.finds = stage_finds_puppet[swarm_now];
-          MOpt_globals_pilot.finds_v2 = stage_finds_puppet_v2[swarm_now];
-          MOpt_globals_pilot.cycles = stage_cycles_puppet[swarm_now];
-          MOpt_globals_pilot.cycles_v2 = stage_cycles_puppet_v2[swarm_now];
-          MOpt_globals_pilot.cycles_v3 = stage_cycles_puppet_v3[swarm_now];
+          /* adjust pointers dependent on 'afl->swarm_now' */
+          afl->mopt_globals_pilot.finds = afl->stage_finds_puppet[afl->swarm_now];
+          afl->mopt_globals_pilot.finds_v2 = afl->stage_finds_puppet_v2[afl->swarm_now];
+          afl->mopt_globals_pilot.cycles = afl->stage_cycles_puppet[afl->swarm_now];
+          afl->mopt_globals_pilot.cycles_v2 = afl->stage_cycles_puppet_v2[afl->swarm_now];
+          afl->mopt_globals_pilot.cycles_v3 = afl->stage_cycles_puppet_v3[afl->swarm_now];
 
         } else {
 
-          key_module = 2;
+          afl->key_module = 2;
 
-          old_hit_count = new_hit_cnt;
+          afl->old_hit_count = new_hit_cnt;
 
         }                                                  /* if pilot_mode */
 
@@ -4268,37 +4224,41 @@ pacemaker_fuzzing:
 
 #undef FLIP_BIT
 
-#define pilot_fuzzing(a) common_fuzzing((a), MOpt_globals_pilot)
+u8 core_fuzzing(afl_state_t *afl) {
+  return mopt_common_fuzzing(afl, afl->mopt_globals_core);
+}
 
-#define core_fuzzing(a) common_fuzzing((a), MOpt_globals_core)
+u8 pilot_fuzzing(afl_state_t *afl) {
+  return mopt_common_fuzzing(afl, afl->mopt_globals_pilot);
+}
 
-void pso_updating(void) {
+void pso_updating(afl_state_t *afl) {
 
-  g_now += 1;
-  if (g_now > g_max) g_now = 0;
-  w_now = (w_init - w_end) * (g_max - g_now) / (g_max) + w_end;
+  afl->g_now += 1;
+  if (afl->g_now > afl->g_max) afl->g_now = 0;
+  afl->w_now = (afl->w_init - afl->w_end) * (afl->g_max - afl->g_now) / (afl->g_max) + afl->w_end;
   int tmp_swarm, i, j;
   u64 temp_operator_finds_puppet = 0;
   for (i = 0; i < operator_num; ++i) {
 
-    operator_finds_puppet[i] = core_operator_finds_puppet[i];
+    afl->operator_finds_puppet[i] = afl->core_operator_finds_puppet[i];
 
     for (j = 0; j < swarm_num; ++j) {
 
-      operator_finds_puppet[i] =
-          operator_finds_puppet[i] + stage_finds_puppet[j][i];
+      afl->operator_finds_puppet[i] =
+          afl->operator_finds_puppet[i] + afl->stage_finds_puppet[j][i];
 
     }
 
     temp_operator_finds_puppet =
-        temp_operator_finds_puppet + operator_finds_puppet[i];
+        temp_operator_finds_puppet + afl->operator_finds_puppet[i];
 
   }
 
   for (i = 0; i < operator_num; ++i) {
 
-    if (operator_finds_puppet[i])
-      G_best[i] = (double)((double)(operator_finds_puppet[i]) /
+    if (afl->operator_finds_puppet[i])
+      afl->G_best[i] = (double)((double)(afl->operator_finds_puppet[i]) /
                            (double)(temp_operator_finds_puppet));
 
   }
@@ -4308,39 +4268,39 @@ void pso_updating(void) {
     double x_temp = 0.0;
     for (i = 0; i < operator_num; ++i) {
 
-      probability_now[tmp_swarm][i] = 0.0;
-      v_now[tmp_swarm][i] =
-          w_now * v_now[tmp_swarm][i] +
-          RAND_C * (L_best[tmp_swarm][i] - x_now[tmp_swarm][i]) +
-          RAND_C * (G_best[i] - x_now[tmp_swarm][i]);
-      x_now[tmp_swarm][i] += v_now[tmp_swarm][i];
-      if (x_now[tmp_swarm][i] > v_max)
-        x_now[tmp_swarm][i] = v_max;
-      else if (x_now[tmp_swarm][i] < v_min)
-        x_now[tmp_swarm][i] = v_min;
-      x_temp += x_now[tmp_swarm][i];
+      afl->probability_now[tmp_swarm][i] = 0.0;
+      afl->v_now[tmp_swarm][i] =
+          afl->w_now * afl->v_now[tmp_swarm][i] +
+          RAND_C * (afl->L_best[tmp_swarm][i] - afl->x_now[tmp_swarm][i]) +
+          RAND_C * (afl->G_best[i] - afl->x_now[tmp_swarm][i]);
+      afl->x_now[tmp_swarm][i] += afl->v_now[tmp_swarm][i];
+      if (afl->x_now[tmp_swarm][i] > v_max)
+        afl->x_now[tmp_swarm][i] = v_max;
+      else if (afl->x_now[tmp_swarm][i] < v_min)
+        afl->x_now[tmp_swarm][i] = v_min;
+      x_temp += afl->x_now[tmp_swarm][i];
 
     }
 
     for (i = 0; i < operator_num; ++i) {
 
-      x_now[tmp_swarm][i] = x_now[tmp_swarm][i] / x_temp;
+      afl->x_now[tmp_swarm][i] = afl->x_now[tmp_swarm][i] / x_temp;
       if (likely(i != 0))
-        probability_now[tmp_swarm][i] =
-            probability_now[tmp_swarm][i - 1] + x_now[tmp_swarm][i];
+        afl->probability_now[tmp_swarm][i] =
+            afl->probability_now[tmp_swarm][i - 1] + afl->x_now[tmp_swarm][i];
       else
-        probability_now[tmp_swarm][i] = x_now[tmp_swarm][i];
+        afl->probability_now[tmp_swarm][i] = afl->x_now[tmp_swarm][i];
 
     }
 
-    if (probability_now[tmp_swarm][operator_num - 1] < 0.99 ||
-        probability_now[tmp_swarm][operator_num - 1] > 1.01)
+    if (afl->probability_now[tmp_swarm][operator_num - 1] < 0.99 ||
+        afl->probability_now[tmp_swarm][operator_num - 1] > 1.01)
       FATAL("ERROR probability");
 
   }
 
-  swarm_now = 0;
-  key_module = 0;
+  afl->swarm_now = 0;
+  afl->key_module = 0;
 
 }
 
@@ -4348,18 +4308,18 @@ void pso_updating(void) {
    to fuzz_one_original. All documentation references to fuzz_one therefore
    mean fuzz_one_original */
 
-u8 fuzz_one(char** argv) {
+u8 fuzz_one(afl_state_t *afl) {
 
   int key_val_lv = 0;
 
 #ifdef _AFL_DOCUMENT_MUTATIONS
-  if (do_document == 0) {
+  if (afl->do_document == 0) {
 
-    char* fn = alloc_printf("%s/mutations", out_dir);
+    char* fn = alloc_printf("%s/mutations", afl->out_dir);
     if (fn) {
 
-      do_document = mkdir(fn, 0700);  // if it exists we do not care
-      do_document = 1;
+      afl->do_document = mkdir(fn, 0700);  // if it exists we do not care
+      afl->do_document = 1;
       ck_free(fn);
 
     } else
@@ -4368,29 +4328,28 @@ u8 fuzz_one(char** argv) {
 
   } else {
 
-    do_document = 2;
-    stop_soon = 2;
+    afl->do_document = 2;
+    afl->stop_soon = 2;
 
   }
 
 #endif
 
-  if (limit_time_sig == 0) {
+  if (afl->limit_time_sig == 0) {
 
-    key_val_lv = fuzz_one_original(argv);
+    key_val_lv = fuzz_one_original(afl);
 
   } else {
 
-    if (key_module == 0)
-      key_val_lv = pilot_fuzzing(argv);
-    else if (key_module == 1)
-      key_val_lv = core_fuzzing(argv);
-    else if (key_module == 2)
-      pso_updating();
+    if (afl->key_module == 0)
+      key_val_lv = pilot_fuzzing(afl);
+    else if (afl->key_module == 1)
+      key_val_lv = core_fuzzing(afl);
+    else if (afl->key_module == 2)
+      pso_updating(afl);
 
   }
 
   return key_val_lv;
 
 }
-
diff --git a/src/afl-fuzz-python.c b/src/afl-fuzz-python.c
index 28b101f3..195fc6f3 100644
--- a/src/afl-fuzz-python.c
+++ b/src/afl-fuzz-python.c
@@ -28,7 +28,7 @@
 /* Python stuff */
 #ifdef USE_PYTHON
 
-int init_py_module(u8* module_name) {
+int init_py_module(afl_state_t *afl, u8* module_name) {
 
   if (!module_name) return 1;
 
@@ -40,14 +40,17 @@ int init_py_module(u8* module_name) {
   PyObject* py_name = PyString_FromString(module_name);
 #endif
 
-  py_module = PyImport_Import(py_name);
+  afl->py_module = PyImport_Import(py_name);
   Py_DECREF(py_name);
 
-  if (py_module != NULL) {
+  PyObject *py_module = afl->py_module;
+  PyObject **py_functions = afl->py_functions;
+
+  if (afl->py_module != NULL) {
 
     u8 py_notrim = 0, py_idx;
-    py_functions[PY_FUNC_INIT] = PyObject_GetAttrString(py_module, "init");
-    py_functions[PY_FUNC_FUZZ] = PyObject_GetAttrString(py_module, "fuzz");
+    py_functions[PY_FUNC_INIT] = PyObject_GetAttrString(afl->py_module, "init");
+    py_functions[PY_FUNC_FUZZ] = PyObject_GetAttrString(afl->py_module, "fuzz");
     py_functions[PY_FUNC_PRE_SAVE] =
         PyObject_GetAttrString(py_module, "pre_save");
     py_functions[PY_FUNC_INIT_TRIM] =
@@ -124,15 +127,15 @@ int init_py_module(u8* module_name) {
 
 }
 
-void finalize_py_module() {
+void finalize_py_module(afl_state_t *afl) {
 
-  if (py_module != NULL) {
+  if (afl->py_module != NULL) {
 
     u32 i;
     for (i = 0; i < PY_FUNC_COUNT; ++i)
-      Py_XDECREF(py_functions[i]);
+      Py_XDECREF(afl->py_functions[i]);
 
-    Py_DECREF(py_module);
+    Py_DECREF(afl->py_module);
 
   }
 
@@ -140,8 +143,7 @@ void finalize_py_module() {
 
 }
 
-void init_py(unsigned int seed) {
-
+void init_py(afl_state_t *afl, unsigned int seed) {
   PyObject *py_args, *py_value;
 
   /* Provide the init function a seed for the Python RNG */
@@ -162,7 +164,7 @@ void init_py(unsigned int seed) {
 
   PyTuple_SetItem(py_args, 0, py_value);
 
-  py_value = PyObject_CallObject(py_functions[PY_FUNC_INIT], py_args);
+  py_value = PyObject_CallObject(afl->py_functions[PY_FUNC_INIT], py_args);
 
   Py_DECREF(py_args);
 
@@ -176,8 +178,8 @@ void init_py(unsigned int seed) {
 
 }
 
-size_t fuzz_py(u8** buf, size_t buf_size, u8* add_buf, size_t add_buf_size,
-               size_t max_size) {
+size_t fuzz_py(afl_state_t *afl, u8** buf, size_t buf_size, u8* add_buf, 
+               size_t add_buf_size, size_t max_size) {
 
   size_t    mutated_size;
   PyObject *py_args, *py_value;
@@ -220,7 +222,7 @@ size_t fuzz_py(u8** buf, size_t buf_size, u8* add_buf, size_t add_buf_size,
 
   PyTuple_SetItem(py_args, 2, py_value);
 
-  py_value = PyObject_CallObject(py_functions[PY_FUNC_FUZZ], py_args);
+  py_value = PyObject_CallObject(afl->py_functions[PY_FUNC_FUZZ], py_args);
 
   Py_DECREF(py_args);
 
@@ -242,7 +244,7 @@ size_t fuzz_py(u8** buf, size_t buf_size, u8* add_buf, size_t add_buf_size,
 
 }
 
-size_t pre_save_py(u8* buf, size_t buf_size, u8** out_buf) {
+size_t pre_save_py(afl_state_t *afl, u8* buf, size_t buf_size, u8** out_buf) {
 
   size_t    out_buf_size;
   PyObject *py_args, *py_value;
@@ -257,7 +259,7 @@ size_t pre_save_py(u8* buf, size_t buf_size, u8** out_buf) {
 
   PyTuple_SetItem(py_args, 0, py_value);
 
-  py_value = PyObject_CallObject(py_functions[PY_FUNC_PRE_SAVE], py_args);
+  py_value = PyObject_CallObject(afl->py_functions[PY_FUNC_PRE_SAVE], py_args);
 
   Py_DECREF(py_args);
 
@@ -278,7 +280,7 @@ size_t pre_save_py(u8* buf, size_t buf_size, u8** out_buf) {
 
 }
 
-u32 init_trim_py(u8* buf, size_t buf_size) {
+u32 init_trim_py(afl_state_t *afl, u8* buf, size_t buf_size) {
 
   PyObject *py_args, *py_value;
 
@@ -293,7 +295,7 @@ u32 init_trim_py(u8* buf, size_t buf_size) {
 
   PyTuple_SetItem(py_args, 0, py_value);
 
-  py_value = PyObject_CallObject(py_functions[PY_FUNC_INIT_TRIM], py_args);
+  py_value = PyObject_CallObject(afl->py_functions[PY_FUNC_INIT_TRIM], py_args);
   Py_DECREF(py_args);
 
   if (py_value != NULL) {
@@ -315,7 +317,7 @@ u32 init_trim_py(u8* buf, size_t buf_size) {
 
 }
 
-u32 post_trim_py(u8 success) {
+u32 post_trim_py(afl_state_t *afl, u8 success) {
 
   PyObject *py_args, *py_value;
 
@@ -331,7 +333,7 @@ u32 post_trim_py(u8 success) {
 
   PyTuple_SetItem(py_args, 0, py_value);
 
-  py_value = PyObject_CallObject(py_functions[PY_FUNC_POST_TRIM], py_args);
+  py_value = PyObject_CallObject(afl->py_functions[PY_FUNC_POST_TRIM], py_args);
   Py_DECREF(py_args);
 
   if (py_value != NULL) {
@@ -353,12 +355,12 @@ u32 post_trim_py(u8 success) {
 
 }
 
-void trim_py(u8** out_buf, size_t* out_buf_size) {
+void trim_py(afl_state_t *afl, u8** out_buf, size_t* out_buf_size) {
 
   PyObject *py_args, *py_value;
 
   py_args = PyTuple_New(0);
-  py_value = PyObject_CallObject(py_functions[PY_FUNC_TRIM], py_args);
+  py_value = PyObject_CallObject(afl->py_functions[PY_FUNC_TRIM], py_args);
   Py_DECREF(py_args);
 
   if (py_value != NULL) {
@@ -377,7 +379,7 @@ void trim_py(u8** out_buf, size_t* out_buf_size) {
 
 }
 
-size_t havoc_mutation_py(u8** buf, size_t buf_size, size_t max_size) {
+size_t havoc_mutation_py(afl_state_t *afl, u8** buf, size_t buf_size, size_t max_size) {
 
   size_t    mutated_size;
   PyObject *py_args, *py_value;
@@ -409,7 +411,7 @@ size_t havoc_mutation_py(u8** buf, size_t buf_size, size_t max_size) {
 
   PyTuple_SetItem(py_args, 1, py_value);
 
-  py_value = PyObject_CallObject(py_functions[PY_FUNC_HAVOC_MUTATION], py_args);
+  py_value = PyObject_CallObject(afl->py_functions[PY_FUNC_HAVOC_MUTATION], py_args);
 
   Py_DECREF(py_args);
 
@@ -432,13 +434,13 @@ size_t havoc_mutation_py(u8** buf, size_t buf_size, size_t max_size) {
 
 }
 
-u8 havoc_mutation_probability_py(void) {
+u8 havoc_mutation_probability_py(afl_state_t *afl) {
 
   PyObject *py_args, *py_value;
 
   py_args = PyTuple_New(0);
-  py_value = PyObject_CallObject(
-      py_functions[PY_FUNC_HAVOC_MUTATION_PROBABILITY], py_args);
+  py_value = PyObject_CallObject(afl->py_functions[PY_FUNC_HAVOC_MUTATION_PROBABILITY], 
+                                 py_args);
   Py_DECREF(py_args);
 
   if (py_value != NULL) {
@@ -456,7 +458,7 @@ u8 havoc_mutation_probability_py(void) {
 
 }
 
-u8 queue_get_py(const u8* filename) {
+u8 queue_get_py(afl_state_t *afl, const u8* filename) {
 
   PyObject *py_args, *py_value;
 
@@ -478,7 +480,7 @@ u8 queue_get_py(const u8* filename) {
   PyTuple_SetItem(py_args, 0, py_value);
 
   // Call Python function
-  py_value = PyObject_CallObject(py_functions[PY_FUNC_QUEUE_GET], py_args);
+  py_value = PyObject_CallObject(afl->py_functions[PY_FUNC_QUEUE_GET], py_args);
   Py_DECREF(py_args);
 
   if (py_value != NULL) {
@@ -504,7 +506,7 @@ u8 queue_get_py(const u8* filename) {
 
 }
 
-void queue_new_entry_py(const u8* filename_new_queue,
+void queue_new_entry_py(afl_state_t *afl, const u8* filename_new_queue,
                         const u8* filename_orig_queue) {
 
   PyObject *py_args, *py_value;
@@ -547,8 +549,8 @@ void queue_new_entry_py(const u8* filename_new_queue,
   PyTuple_SetItem(py_args, 1, py_value);
 
   // Call
-  py_value =
-      PyObject_CallObject(py_functions[PY_FUNC_QUEUE_NEW_ENTRY], py_args);
+  py_value = PyObject_CallObject(afl->py_functions[PY_FUNC_QUEUE_NEW_ENTRY],
+                                 py_args);
   Py_DECREF(py_args);
 
   if (py_value == NULL) {
diff --git a/src/afl-fuzz-queue.c b/src/afl-fuzz-queue.c
index ad9dad13..c95889f5 100644
--- a/src/afl-fuzz-queue.c
+++ b/src/afl-fuzz-queue.c
@@ -28,12 +28,12 @@
    .state file to avoid repeating deterministic fuzzing when resuming aborted
    scans. */
 
-void mark_as_det_done(struct queue_entry* q) {
+void mark_as_det_done(afl_state_t *afl, struct queue_entry* q) {
 
   u8* fn = strrchr(q->fname, '/');
   s32 fd;
 
-  fn = alloc_printf("%s/queue/.state/deterministic_done/%s", out_dir, fn + 1);
+  fn = alloc_printf("%s/queue/.state/deterministic_done/%s", afl->out_dir, fn + 1);
 
   fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600);
   if (fd < 0) PFATAL("Unable to create '%s'", fn);
@@ -48,12 +48,12 @@ void mark_as_det_done(struct queue_entry* q) {
 /* Mark as variable. Create symlinks if possible to make it easier to examine
    the files. */
 
-void mark_as_variable(struct queue_entry* q) {
+void mark_as_variable(afl_state_t *afl, struct queue_entry* q) {
 
   u8 *fn = strrchr(q->fname, '/') + 1, *ldest;
 
   ldest = alloc_printf("../../%s", fn);
-  fn = alloc_printf("%s/queue/.state/variable_behavior/%s", out_dir, fn);
+  fn = alloc_printf("%s/queue/.state/variable_behavior/%s", afl->out_dir, fn);
 
   if (symlink(ldest, fn)) {
 
@@ -73,7 +73,7 @@ void mark_as_variable(struct queue_entry* q) {
 /* Mark / unmark as redundant (edge-only). This is not used for restoring state,
    but may be useful for post-processing datasets. */
 
-void mark_as_redundant(struct queue_entry* q, u8 state) {
+void mark_as_redundant(afl_state_t *afl, struct queue_entry* q, u8 state) {
 
   u8* fn;
 
@@ -82,7 +82,7 @@ void mark_as_redundant(struct queue_entry* q, u8 state) {
   q->fs_redundant = state;
 
   fn = strrchr(q->fname, '/');
-  fn = alloc_printf("%s/queue/.state/redundant_edges/%s", out_dir, fn + 1);
+  fn = alloc_printf("%s/queue/.state/redundant_edges/%s", afl->out_dir, fn + 1);
 
   if (state) {
 
@@ -104,49 +104,49 @@ void mark_as_redundant(struct queue_entry* q, u8 state) {
 
 /* Append new test case to the queue. */
 
-void add_to_queue(u8* fname, u32 len, u8 passed_det) {
+void add_to_queue(afl_state_t *afl, u8* fname, u32 len, u8 passed_det) {
 
   struct queue_entry* q = ck_alloc(sizeof(struct queue_entry));
 
   q->fname = fname;
   q->len = len;
-  q->depth = cur_depth + 1;
+  q->depth = afl->cur_depth + 1;
   q->passed_det = passed_det;
   q->n_fuzz = 1;
 
-  if (q->depth > max_depth) max_depth = q->depth;
+  if (q->depth > afl->max_depth) afl->max_depth = q->depth;
 
-  if (queue_top) {
+  if (afl->queue_top) {
 
-    queue_top->next = q;
-    queue_top = q;
+    afl->queue_top->next = q;
+    afl->queue_top = q;
 
   } else
 
-    q_prev100 = queue = queue_top = q;
+    afl->q_prev100 = afl->queue = afl->queue_top = q;
 
-  ++queued_paths;
-  ++pending_not_fuzzed;
+  ++afl->queued_paths;
+  ++afl->pending_not_fuzzed;
 
-  cycles_wo_finds = 0;
+  afl->cycles_wo_finds = 0;
 
-  if (!(queued_paths % 100)) {
+  if (!(afl->queued_paths % 100)) {
 
-    q_prev100->next_100 = q;
-    q_prev100 = q;
+    afl->q_prev100->next_100 = q;
+    afl->q_prev100 = q;
 
   }
 
-  last_path_time = get_cur_time();
+  afl->last_path_time = get_cur_time();
 
-  if (mutator && mutator->afl_custom_queue_new_entry) {
+  if (afl->mutator && afl->mutator->afl_custom_queue_new_entry) {
 
     u8* fname_orig = NULL;
 
     /* At the initialization stage, queue_cur is NULL */
-    if (queue_cur) fname_orig = queue_cur->fname;
+    if (afl->queue_cur) fname_orig = afl->queue_cur->fname;
 
-    mutator->afl_custom_queue_new_entry(fname, fname_orig);
+    afl->mutator->afl_custom_queue_new_entry(afl, fname, fname_orig);
 
   }
 
@@ -154,9 +154,9 @@ void add_to_queue(u8* fname, u32 len, u8 passed_det) {
 
 /* Destroy the entire queue. */
 
-void destroy_queue(void) {
+void destroy_queue(afl_state_t *afl) {
 
-  struct queue_entry *q = queue, *n;
+  struct queue_entry *q = afl->queue, *n;
 
   while (q) {
 
@@ -176,28 +176,28 @@ void destroy_queue(void) {
    seen in the bitmap so far, and focus on fuzzing them at the expense of
    the rest.
 
-   The first step of the process is to maintain a list of top_rated[] entries
+   The first step of the process is to maintain a list of afl->top_rated[] entries
    for every byte in the bitmap. We win that slot if there is no previous
    contender, or if the contender has a more favorable speed x size factor. */
 
-void update_bitmap_score(struct queue_entry* q) {
+void update_bitmap_score(afl_state_t *afl, struct queue_entry* q) {
 
   u32 i;
   u64 fav_factor = q->exec_us * q->len;
   u64 fuzz_p2 = next_p2(q->n_fuzz);
 
-  /* For every byte set in trace_bits[], see if there is a previous winner,
+  /* For every byte set in afl->fsrv.trace_bits[], see if there is a previous winner,
      and how it compares to us. */
 
   for (i = 0; i < MAP_SIZE; ++i)
 
-    if (trace_bits[i]) {
+    if (afl->fsrv.trace_bits[i]) {
 
-      if (top_rated[i]) {
+      if (afl->top_rated[i]) {
 
         /* Faster-executing or smaller test cases are favored. */
-        u64 top_rated_fuzz_p2 = next_p2(top_rated[i]->n_fuzz);
-        u64 top_rated_fav_factor = top_rated[i]->exec_us * top_rated[i]->len;
+        u64 top_rated_fuzz_p2 = next_p2(afl->top_rated[i]->n_fuzz);
+        u64 top_rated_fav_factor = afl->top_rated[i]->exec_us * afl->top_rated[i]->len;
 
         if (fuzz_p2 > top_rated_fuzz_p2) {
 
@@ -209,15 +209,15 @@ void update_bitmap_score(struct queue_entry* q) {
 
         }
 
-        if (fav_factor > top_rated[i]->exec_us * top_rated[i]->len) continue;
+        if (fav_factor > afl->top_rated[i]->exec_us * afl->top_rated[i]->len) continue;
 
         /* Looks like we're going to win. Decrease ref count for the
-           previous winner, discard its trace_bits[] if necessary. */
+           previous winner, discard its afl->fsrv.trace_bits[] if necessary. */
 
-        if (!--top_rated[i]->tc_ref) {
+        if (!--afl->top_rated[i]->tc_ref) {
 
-          ck_free(top_rated[i]->trace_mini);
-          top_rated[i]->trace_mini = 0;
+          ck_free(afl->top_rated[i]->trace_mini);
+          afl->top_rated[i]->trace_mini = 0;
 
         }
 
@@ -225,44 +225,44 @@ void update_bitmap_score(struct queue_entry* q) {
 
       /* Insert ourselves as the new winner. */
 
-      top_rated[i] = q;
+      afl->top_rated[i] = q;
       ++q->tc_ref;
 
       if (!q->trace_mini) {
 
         q->trace_mini = ck_alloc(MAP_SIZE >> 3);
-        minimize_bits(q->trace_mini, trace_bits);
+        minimize_bits(q->trace_mini, afl->fsrv.trace_bits);
 
       }
 
-      score_changed = 1;
+      afl->score_changed = 1;
 
     }
 
 }
 
 /* The second part of the mechanism discussed above is a routine that
-   goes over top_rated[] entries, and then sequentially grabs winners for
+   goes over afl->top_rated[] entries, and then sequentially grabs winners for
    previously-unseen bytes (temp_v) and marks them as favored, at least
    until the next run. The favored entries are given more air time during
    all fuzzing steps. */
 
-void cull_queue(void) {
+void cull_queue(afl_state_t *afl) {
 
   struct queue_entry* q;
   static u8           temp_v[MAP_SIZE >> 3];
   u32                 i;
 
-  if (dumb_mode || !score_changed) return;
+  if (afl->dumb_mode || !afl->score_changed) return;
 
-  score_changed = 0;
+  afl->score_changed = 0;
 
   memset(temp_v, 255, MAP_SIZE >> 3);
 
-  queued_favored = 0;
-  pending_favored = 0;
+  afl->queued_favored = 0;
+  afl->pending_favored = 0;
 
-  q = queue;
+  q = afl->queue;
 
   while (q) {
 
@@ -272,32 +272,32 @@ void cull_queue(void) {
   }
 
   /* Let's see if anything in the bitmap isn't captured in temp_v.
-     If yes, and if it has a top_rated[] contender, let's use it. */
+     If yes, and if it has a afl->top_rated[] contender, let's use it. */
 
   for (i = 0; i < MAP_SIZE; ++i)
-    if (top_rated[i] && (temp_v[i >> 3] & (1 << (i & 7)))) {
+    if (afl->top_rated[i] && (temp_v[i >> 3] & (1 << (i & 7)))) {
 
       u32 j = MAP_SIZE >> 3;
 
       /* Remove all bits belonging to the current entry from temp_v. */
 
       while (j--)
-        if (top_rated[i]->trace_mini[j])
-          temp_v[j] &= ~top_rated[i]->trace_mini[j];
+        if (afl->top_rated[i]->trace_mini[j])
+          temp_v[j] &= ~afl->top_rated[i]->trace_mini[j];
 
-      top_rated[i]->favored = 1;
-      ++queued_favored;
+      afl->top_rated[i]->favored = 1;
+      ++afl->queued_favored;
 
-      if (top_rated[i]->fuzz_level == 0 || !top_rated[i]->was_fuzzed)
-        ++pending_favored;
+      if (afl->top_rated[i]->fuzz_level == 0 || !afl->top_rated[i]->was_fuzzed)
+        ++afl->pending_favored;
 
     }
 
-  q = queue;
+  q = afl->queue;
 
   while (q) {
 
-    mark_as_redundant(q, !q->favored);
+    mark_as_redundant(afl, q, !q->favored);
     q = q->next;
 
   }
@@ -308,10 +308,10 @@ void cull_queue(void) {
    A helper function for fuzz_one(). Maybe some of these constants should
    go into config.h. */
 
-u32 calculate_score(struct queue_entry* q) {
+u32 calculate_score(afl_state_t *afl, struct queue_entry* q) {
 
-  u32 avg_exec_us = total_cal_us / total_cal_cycles;
-  u32 avg_bitmap_size = total_bitmap_size / total_bitmap_entries;
+  u32 avg_exec_us = afl->total_cal_us / afl->total_cal_cycles;
+  u32 avg_bitmap_size = afl->total_bitmap_size / afl->total_bitmap_entries;
   u32 perf_score = 100;
 
   /* Adjust score based on execution speed of this path, compared to the
@@ -391,7 +391,7 @@ u32 calculate_score(struct queue_entry* q) {
   u32 n_paths, fuzz_mu;
   u32 factor = 1;
 
-  switch (schedule) {
+  switch (afl->schedule) {
 
     case EXPLORE: break;
 
@@ -401,7 +401,7 @@ u32 calculate_score(struct queue_entry* q) {
       fuzz_total = 0;
       n_paths = 0;
 
-      struct queue_entry* queue_it = queue;
+      struct queue_entry* queue_it = afl->queue;
       while (queue_it) {
 
         fuzz_total += queue_it->n_fuzz;
@@ -451,7 +451,7 @@ u32 calculate_score(struct queue_entry* q) {
   perf_score *= factor / POWER_BETA;
 
   // MOpt mode
-  if (limit_time_sig != 0 && max_depth - q->depth < 3)
+  if (afl->limit_time_sig != 0 && afl->max_depth - q->depth < 3)
     perf_score *= 2;
   else if (perf_score < 1)
     perf_score =
@@ -459,7 +459,7 @@ u32 calculate_score(struct queue_entry* q) {
 
   /* Make sure that we don't go over limit. */
 
-  if (perf_score > havoc_max_mult * 100) perf_score = havoc_max_mult * 100;
+  if (perf_score > afl->havoc_max_mult * 100) perf_score = afl->havoc_max_mult * 100;
 
   return perf_score;
 
diff --git a/src/afl-fuzz-redqueen.c b/src/afl-fuzz-redqueen.c
index d6f117f6..560ec419 100644
--- a/src/afl-fuzz-redqueen.c
+++ b/src/afl-fuzz-redqueen.c
@@ -27,8 +27,6 @@
 #include "afl-fuzz.h"
 #include "cmplog.h"
 
-static char** its_argv;
-
 ///// Colorization
 
 struct range {
@@ -86,24 +84,24 @@ struct range* pop_biggest_range(struct range** ranges) {
 
 }
 
-u8 get_exec_checksum(u8* buf, u32 len, u32* cksum) {
+static u8 get_exec_checksum(afl_state_t *afl, u8* buf, u32 len, u32* cksum) {
 
-  if (unlikely(common_fuzz_stuff(its_argv, buf, len))) return 1;
+  if (unlikely(common_fuzz_stuff(afl, buf, len))) return 1;
 
-  *cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
+  *cksum = hash32(afl->fsrv.trace_bits, MAP_SIZE, HASH_CONST);
   return 0;
 
 }
 
-static void rand_replace(u8* buf, u32 len) {
+static void rand_replace(afl_state_t *afl, u8* buf, u32 len) {
 
   u32 i;
   for (i = 0; i < len; ++i)
-    buf[i] = UR(256);
+    buf[i] = UR(afl, 256);
 
 }
 
-u8 colorization(u8* buf, u32 len, u32 exec_cksum) {
+static u8 colorization(afl_state_t *afl, u8* buf, u32 len, u32 exec_cksum) {
 
   struct range* ranges = add_range(NULL, 0, len);
   u8*           backup = ck_alloc_nozero(len);
@@ -111,24 +109,24 @@ u8 colorization(u8* buf, u32 len, u32 exec_cksum) {
   u8 needs_write = 0;
 
   u64 orig_hit_cnt, new_hit_cnt;
-  orig_hit_cnt = queued_paths + unique_crashes;
+  orig_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_name = "colorization";
-  stage_short = "colorization";
-  stage_max = 1000;
+  afl->stage_name = "colorization";
+  afl->stage_short = "colorization";
+  afl->stage_max = 1000;
 
   struct range* rng;
-  stage_cur = 0;
-  while ((rng = pop_biggest_range(&ranges)) != NULL && stage_cur < stage_max) {
+  afl->stage_cur = 0;
+  while ((rng = pop_biggest_range(&ranges)) != NULL && afl->stage_cur < afl->stage_max) {
 
     u32 s = rng->end - rng->start;
     if (s == 0) goto empty_range;
 
     memcpy(backup, buf + rng->start, s);
-    rand_replace(buf + rng->start, s);
+    rand_replace(afl, buf + rng->start, s);
 
     u32 cksum;
-    if (unlikely(get_exec_checksum(buf, len, &cksum))) goto checksum_fail;
+    if (unlikely(get_exec_checksum(afl, buf, len, &cksum))) goto checksum_fail;
 
     if (cksum != exec_cksum) {
 
@@ -142,15 +140,15 @@ u8 colorization(u8* buf, u32 len, u32 exec_cksum) {
 
   empty_range:
     ck_free(rng);
-    ++stage_cur;
+    ++afl->stage_cur;
 
   }
 
-  if (stage_cur < stage_max) queue_cur->fully_colorized = 1;
+  if (afl->stage_cur < afl->stage_max) afl->queue_cur->fully_colorized = 1;
 
-  new_hit_cnt = queued_paths + unique_crashes;
-  stage_finds[STAGE_COLORIZATION] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_COLORIZATION] += stage_cur;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
+  afl->stage_finds[STAGE_COLORIZATION] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_COLORIZATION] += afl->stage_cur;
   ck_free(backup);
 
   while (ranges) {
@@ -167,21 +165,21 @@ u8 colorization(u8* buf, u32 len, u32 exec_cksum) {
 
     s32 fd;
 
-    if (no_unlink) {
+    if (afl->no_unlink) {
 
-      fd = open(queue_cur->fname, O_WRONLY | O_CREAT | O_TRUNC, 0600);
+      fd = open(afl->queue_cur->fname, O_WRONLY | O_CREAT | O_TRUNC, 0600);
 
     } else {
 
-      unlink(queue_cur->fname);                            /* ignore errors */
-      fd = open(queue_cur->fname, O_WRONLY | O_CREAT | O_EXCL, 0600);
+      unlink(afl->queue_cur->fname);                            /* ignore errors */
+      fd = open(afl->queue_cur->fname, O_WRONLY | O_CREAT | O_EXCL, 0600);
 
     }
 
-    if (fd < 0) PFATAL("Unable to create '%s'", queue_cur->fname);
+    if (fd < 0) PFATAL("Unable to create '%s'", afl->queue_cur->fname);
 
-    ck_write(fd, buf, len, queue_cur->fname);
-    queue_cur->len = len;  // no-op, just to be 100% safe
+    ck_write(fd, buf, len, afl->queue_cur->fname);
+    afl->queue_cur->len = len;  // no-op, just to be 100% safe
 
     close(fd);
 
@@ -206,15 +204,15 @@ checksum_fail:
 
 ///// Input to State replacement
 
-u8 its_fuzz(u8* buf, u32 len, u8* status) {
+static u8 its_fuzz(afl_state_t *afl, u8* buf, u32 len, u8* status) {
 
   u64 orig_hit_cnt, new_hit_cnt;
 
-  orig_hit_cnt = queued_paths + unique_crashes;
+  orig_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  if (unlikely(common_fuzz_stuff(its_argv, buf, len))) return 1;
+  if (unlikely(common_fuzz_stuff(afl, buf, len))) return 1;
 
-  new_hit_cnt = queued_paths + unique_crashes;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
   if (unlikely(new_hit_cnt != orig_hit_cnt))
     *status = 1;
@@ -225,7 +223,7 @@ u8 its_fuzz(u8* buf, u32 len, u8* status) {
 
 }
 
-u8 cmp_extend_encoding(struct cmp_header* h, u64 pattern, u64 repl, u32 idx,
+static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header* h, u64 pattern, u64 repl, u32 idx,
                        u8* orig_buf, u8* buf, u32 len, u8 do_reverse,
                        u8* status) {
 
@@ -246,14 +244,14 @@ u8 cmp_extend_encoding(struct cmp_header* h, u64 pattern, u64 repl, u32 idx,
     if (its_len >= 8 && *buf_64 == pattern) {  // && *o_buf_64 == pattern) {
 
       *buf_64 = repl;
-      if (unlikely(its_fuzz(buf, len, status))) return 1;
+      if (unlikely(its_fuzz(afl, buf, len, status))) return 1;
       *buf_64 = pattern;
 
     }
 
     // reverse encoding
     if (do_reverse)
-      if (unlikely(cmp_extend_encoding(h, SWAP64(pattern), SWAP64(repl), idx,
+      if (unlikely(cmp_extend_encoding(afl, h, SWAP64(pattern), SWAP64(repl), idx,
                                        orig_buf, buf, len, 0, status)))
         return 1;
 
@@ -265,14 +263,14 @@ u8 cmp_extend_encoding(struct cmp_header* h, u64 pattern, u64 repl, u32 idx,
         *buf_32 == (u32)pattern) {  // && *o_buf_32 == (u32)pattern) {
 
       *buf_32 = (u32)repl;
-      if (unlikely(its_fuzz(buf, len, status))) return 1;
+      if (unlikely(its_fuzz(afl, buf, len, status))) return 1;
       *buf_32 = pattern;
 
     }
 
     // reverse encoding
     if (do_reverse)
-      if (unlikely(cmp_extend_encoding(h, SWAP32(pattern), SWAP32(repl), idx,
+      if (unlikely(cmp_extend_encoding(afl, h, SWAP32(pattern), SWAP32(repl), idx,
                                        orig_buf, buf, len, 0, status)))
         return 1;
 
@@ -284,14 +282,14 @@ u8 cmp_extend_encoding(struct cmp_header* h, u64 pattern, u64 repl, u32 idx,
         *buf_16 == (u16)pattern) {  // && *o_buf_16 == (u16)pattern) {
 
       *buf_16 = (u16)repl;
-      if (unlikely(its_fuzz(buf, len, status))) return 1;
+      if (unlikely(its_fuzz(afl, buf, len, status))) return 1;
       *buf_16 = (u16)pattern;
 
     }
 
     // reverse encoding
     if (do_reverse)
-      if (unlikely(cmp_extend_encoding(h, SWAP16(pattern), SWAP16(repl), idx,
+      if (unlikely(cmp_extend_encoding(afl, h, SWAP16(pattern), SWAP16(repl), idx,
                                        orig_buf, buf, len, 0, status)))
         return 1;
 
@@ -302,7 +300,7 @@ u8 cmp_extend_encoding(struct cmp_header* h, u64 pattern, u64 repl, u32 idx,
     if (its_len >= 2 && *buf_8 == (u8)pattern) {// && *o_buf_8 == (u8)pattern) {
 
       *buf_8 = (u8)repl;
-      if (unlikely(its_fuzz(buf, len, status)))
+      if (unlikely(its_fuzz(afl, buf, len, status)))
         return 1;
       *buf_16 = (u16)pattern;
 
@@ -314,7 +312,7 @@ u8 cmp_extend_encoding(struct cmp_header* h, u64 pattern, u64 repl, u32 idx,
 
 }
 
-void try_to_add_to_dict(u64 v, u8 shape) {
+static void try_to_add_to_dict(afl_state_t *afl, u64 v, u8 shape) {
 
   u8* b = (u8*)&v;
 
@@ -333,7 +331,7 @@ void try_to_add_to_dict(u64 v, u8 shape) {
 
   }
 
-  maybe_add_auto((u8*)&v, shape);
+  maybe_add_auto(afl, (u8*)&v, shape);
 
   u64 rev;
   switch (shape) {
@@ -341,24 +339,24 @@ void try_to_add_to_dict(u64 v, u8 shape) {
     case 1: break;
     case 2:
       rev = SWAP16((u16)v);
-      maybe_add_auto((u8*)&rev, shape);
+      maybe_add_auto(afl, (u8*)&rev, shape);
       break;
     case 4:
       rev = SWAP32((u32)v);
-      maybe_add_auto((u8*)&rev, shape);
+      maybe_add_auto(afl, (u8*)&rev, shape);
       break;
     case 8:
       rev = SWAP64(v);
-      maybe_add_auto((u8*)&rev, shape);
+      maybe_add_auto(afl, (u8*)&rev, shape);
       break;
 
   }
 
 }
 
-u8 cmp_fuzz(u32 key, u8* orig_buf, u8* buf, u32 len) {
+static u8 cmp_fuzz(afl_state_t *afl, u32 key, u8* orig_buf, u8* buf, u32 len) {
 
-  struct cmp_header* h = &cmp_map->headers[key];
+  struct cmp_header* h = &afl->shm.cmp_map->headers[key];
   u32                i, j, idx;
 
   u32 loggeds = h->hits;
@@ -370,16 +368,16 @@ u8 cmp_fuzz(u32 key, u8* orig_buf, u8* buf, u32 len) {
 
   for (i = 0; i < loggeds; ++i) {
 
-    struct cmp_operands* o = &cmp_map->log[key][i];
+    struct cmp_operands* o = &afl->shm.cmp_map->log[key][i];
 
     // opt not in the paper
     for (j = 0; j < i; ++j)
-      if (cmp_map->log[key][j].v0 == o->v0 && cmp_map->log[key][i].v1 == o->v1)
+      if (afl->shm.cmp_map->log[key][j].v0 == o->v0 && afl->shm.cmp_map->log[key][i].v1 == o->v1)
         goto cmp_fuzz_next_iter;
 
     for (idx = 0; idx < len && fails < 8; ++idx) {
 
-      if (unlikely(cmp_extend_encoding(h, o->v0, o->v1, idx, orig_buf, buf, len,
+      if (unlikely(cmp_extend_encoding(afl, h, o->v0, o->v1, idx, orig_buf, buf, len,
                                        1, &status)))
         return 1;
       if (status == 2)
@@ -387,7 +385,7 @@ u8 cmp_fuzz(u32 key, u8* orig_buf, u8* buf, u32 len) {
       else if (status == 1)
         break;
 
-      if (unlikely(cmp_extend_encoding(h, o->v1, o->v0, idx, orig_buf, buf, len,
+      if (unlikely(cmp_extend_encoding(afl, h, o->v1, o->v0, idx, orig_buf, buf, len,
                                        1, &status)))
         return 1;
       if (status == 2)
@@ -400,13 +398,13 @@ u8 cmp_fuzz(u32 key, u8* orig_buf, u8* buf, u32 len) {
     // If failed, add to dictionary
     if (fails == 8) {
 
-      try_to_add_to_dict(o->v0, SHAPE_BYTES(h->shape));
-      try_to_add_to_dict(o->v1, SHAPE_BYTES(h->shape));
+      try_to_add_to_dict(afl, o->v0, SHAPE_BYTES(h->shape));
+      try_to_add_to_dict(afl, o->v1, SHAPE_BYTES(h->shape));
 
     }
 
   cmp_fuzz_next_iter:
-    stage_cur++;
+    afl->stage_cur++;
 
   }
 
@@ -414,7 +412,7 @@ u8 cmp_fuzz(u32 key, u8* orig_buf, u8* buf, u32 len) {
 
 }
 
-u8 rtn_extend_encoding(struct cmp_header* h, u8* pattern, u8* repl, u32 idx,
+static u8 rtn_extend_encoding(afl_state_t *afl, struct cmp_header* h, u8* pattern, u8* repl, u32 idx,
                        u8* orig_buf, u8* buf, u32 len, u8* status) {
 
   u32 i;
@@ -430,7 +428,7 @@ u8 rtn_extend_encoding(struct cmp_header* h, u8* pattern, u8* repl, u32 idx,
     if (pattern[idx + i] != buf[idx + i] || *status == 1) break;
 
     buf[idx + i] = repl[idx + i];
-    if (unlikely(its_fuzz(buf, len, status))) return 1;
+    if (unlikely(its_fuzz(afl, buf, len, status))) return 1;
 
   }
 
@@ -439,9 +437,9 @@ u8 rtn_extend_encoding(struct cmp_header* h, u8* pattern, u8* repl, u32 idx,
 
 }
 
-u8 rtn_fuzz(u32 key, u8* orig_buf, u8* buf, u32 len) {
+static u8 rtn_fuzz(afl_state_t *afl, u32 key, u8* orig_buf, u8* buf, u32 len) {
 
-  struct cmp_header* h = &cmp_map->headers[key];
+  struct cmp_header* h = &afl->shm.cmp_map->headers[key];
   u32                i, j, idx;
 
   u32 loggeds = h->hits;
@@ -453,17 +451,17 @@ u8 rtn_fuzz(u32 key, u8* orig_buf, u8* buf, u32 len) {
 
   for (i = 0; i < loggeds; ++i) {
 
-    struct cmpfn_operands* o = &((struct cmpfn_operands*)cmp_map->log[key])[i];
+    struct cmpfn_operands* o = &((struct cmpfn_operands*)afl->shm.cmp_map->log[key])[i];
 
     // opt not in the paper
     for (j = 0; j < i; ++j)
-      if (!memcmp(&((struct cmpfn_operands*)cmp_map->log[key])[j], o,
+      if (!memcmp(&((struct cmpfn_operands*)afl->shm.cmp_map->log[key])[j], o,
                   sizeof(struct cmpfn_operands)))
         goto rtn_fuzz_next_iter;
 
     for (idx = 0; idx < len && fails < 8; ++idx) {
 
-      if (unlikely(rtn_extend_encoding(h, o->v0, o->v1, idx, orig_buf, buf, len,
+      if (unlikely(rtn_extend_encoding(afl, h, o->v0, o->v1, idx, orig_buf, buf, len,
                                        &status)))
         return 1;
       if (status == 2)
@@ -471,7 +469,7 @@ u8 rtn_fuzz(u32 key, u8* orig_buf, u8* buf, u32 len) {
       else if (status == 1)
         break;
 
-      if (unlikely(rtn_extend_encoding(h, o->v1, o->v0, idx, orig_buf, buf, len,
+      if (unlikely(rtn_extend_encoding(afl, h, o->v1, o->v0, idx, orig_buf, buf, len,
                                        &status)))
         return 1;
       if (status == 2)
@@ -484,13 +482,13 @@ u8 rtn_fuzz(u32 key, u8* orig_buf, u8* buf, u32 len) {
     // If failed, add to dictionary
     if (fails == 8) {
 
-      maybe_add_auto(o->v0, SHAPE_BYTES(h->shape));
-      maybe_add_auto(o->v1, SHAPE_BYTES(h->shape));
+      maybe_add_auto(afl, o->v0, SHAPE_BYTES(h->shape));
+      maybe_add_auto(afl, o->v1, SHAPE_BYTES(h->shape));
 
     }
 
   rtn_fuzz_next_iter:
-    stage_cur++;
+    afl->stage_cur++;
 
   }
 
@@ -500,51 +498,50 @@ u8 rtn_fuzz(u32 key, u8* orig_buf, u8* buf, u32 len) {
 
 ///// Input to State stage
 
-// queue_cur->exec_cksum
-u8 input_to_state_stage(char** argv, u8* orig_buf, u8* buf, u32 len,
+// afl->queue_cur->exec_cksum
+u8 input_to_state_stage(afl_state_t *afl, u8* orig_buf, u8* buf, u32 len,
                         u32 exec_cksum) {
 
   u8 r = 1;
-  its_argv = argv;
 
-  if (unlikely(colorization(buf, len, exec_cksum))) return 1;
+  if (unlikely(colorization(afl, buf, len, exec_cksum))) return 1;
 
-  // do it manually, forkserver clear only trace_bits
-  memset(cmp_map->headers, 0, sizeof(cmp_map->headers));
+  // do it manually, forkserver clear only afl->fsrv.trace_bits
+  memset(afl->shm.cmp_map->headers, 0, sizeof(afl->shm.cmp_map->headers));
 
-  if (unlikely(common_fuzz_cmplog_stuff(argv, buf, len))) return 1;
+  if (unlikely(common_fuzz_cmplog_stuff(afl, buf, len))) return 1;
 
   u64 orig_hit_cnt, new_hit_cnt;
-  u64 orig_execs = total_execs;
-  orig_hit_cnt = queued_paths + unique_crashes;
+  u64 orig_execs = afl->total_execs;
+  orig_hit_cnt = afl->queued_paths + afl->unique_crashes;
 
-  stage_name = "input-to-state";
-  stage_short = "its";
-  stage_max = 0;
-  stage_cur = 0;
+  afl->stage_name = "input-to-state";
+  afl->stage_short = "its";
+  afl->stage_max = 0;
+  afl->stage_cur = 0;
 
   u32 k;
   for (k = 0; k < CMP_MAP_W; ++k) {
 
-    if (!cmp_map->headers[k].hits) continue;
-    if (cmp_map->headers[k].type == CMP_TYPE_INS)
-      stage_max += MIN(cmp_map->headers[k].hits, CMP_MAP_H);
+    if (!afl->shm.cmp_map->headers[k].hits) continue;
+    if (afl->shm.cmp_map->headers[k].type == CMP_TYPE_INS)
+      afl->stage_max += MIN(afl->shm.cmp_map->headers[k].hits, CMP_MAP_H);
     else
-      stage_max += MIN(cmp_map->headers[k].hits, CMP_MAP_RTN_H);
+      afl->stage_max += MIN(afl->shm.cmp_map->headers[k].hits, CMP_MAP_RTN_H);
 
   }
 
   for (k = 0; k < CMP_MAP_W; ++k) {
 
-    if (!cmp_map->headers[k].hits) continue;
+    if (!afl->shm.cmp_map->headers[k].hits) continue;
 
-    if (cmp_map->headers[k].type == CMP_TYPE_INS) {
+    if (afl->shm.cmp_map->headers[k].type == CMP_TYPE_INS) {
 
-      if (unlikely(cmp_fuzz(k, orig_buf, buf, len))) goto exit_its;
+      if (unlikely(cmp_fuzz(afl, k, orig_buf, buf, len))) goto exit_its;
 
     } else {
 
-      if (unlikely(rtn_fuzz(k, orig_buf, buf, len))) goto exit_its;
+      if (unlikely(rtn_fuzz(afl, k, orig_buf, buf, len))) goto exit_its;
 
     }
 
@@ -555,9 +552,9 @@ u8 input_to_state_stage(char** argv, u8* orig_buf, u8* buf, u32 len,
 exit_its:
   memcpy(orig_buf, buf, len);
 
-  new_hit_cnt = queued_paths + unique_crashes;
-  stage_finds[STAGE_ITS] += new_hit_cnt - orig_hit_cnt;
-  stage_cycles[STAGE_ITS] += total_execs - orig_execs;
+  new_hit_cnt = afl->queued_paths + afl->unique_crashes;
+  afl->stage_finds[STAGE_ITS] += new_hit_cnt - orig_hit_cnt;
+  afl->stage_cycles[STAGE_ITS] += afl->total_execs - orig_execs;
 
   return r;
 
diff --git a/src/afl-fuzz-run.c b/src/afl-fuzz-run.c
index f0ba2fe8..4ff59f99 100644
--- a/src/afl-fuzz-run.c
+++ b/src/afl-fuzz-run.c
@@ -26,9 +26,9 @@
 #include "afl-fuzz.h"
 
 /* Execute target application, monitoring for timeouts. Return status
-   information. The called program will update trace_bits[]. */
+   information. The called program will update afl->fsrv.trace_bits[]. */
 
-u8 run_target(char** argv, u32 timeout) {
+u8 run_target(afl_state_t *afl, u32 timeout) {
 
   static struct itimerval it;
   static u32              prev_timed_out = 0;
@@ -37,13 +37,13 @@ u8 run_target(char** argv, u32 timeout) {
   int status = 0;
   u32 tb4;
 
-  child_timed_out = 0;
+  afl->fsrv.child_timed_out = 0;
 
-  /* After this memset, trace_bits[] are effectively volatile, so we
+  /* After this memset, afl->fsrv.trace_bits[] are effectively volatile, so we
      must prevent any earlier operations from venturing into that
      territory. */
 
-  memset(trace_bits, 0, MAP_SIZE);
+  memset(afl->fsrv.trace_bits, 0, MAP_SIZE);
   MEM_BARRIER();
 
   /* If we're running in "dumb" mode, we can't rely on the fork server
@@ -51,19 +51,19 @@ u8 run_target(char** argv, u32 timeout) {
      execve(). There is a bit of code duplication between here and
      init_forkserver(), but c'est la vie. */
 
-  if (dumb_mode == 1 || no_forkserver) {
+  if (afl->dumb_mode == 1 || afl->no_forkserver) {
 
-    child_pid = fork();
+    afl->fsrv.child_pid = fork();
 
-    if (child_pid < 0) PFATAL("fork() failed");
+    if (afl->fsrv.child_pid < 0) PFATAL("fork() failed");
 
-    if (!child_pid) {
+    if (!afl->fsrv.child_pid) {
 
       struct rlimit r;
 
-      if (mem_limit) {
+      if (afl->fsrv.mem_limit) {
 
-        r.rlim_max = r.rlim_cur = ((rlim_t)mem_limit) << 20;
+        r.rlim_max = r.rlim_cur = ((rlim_t)afl->fsrv.mem_limit) << 20;
 
 #ifdef RLIMIT_AS
 
@@ -81,33 +81,33 @@ u8 run_target(char** argv, u32 timeout) {
 
       setrlimit(RLIMIT_CORE, &r);                          /* Ignore errors */
 
-      /* Isolate the process and configure standard descriptors. If out_file is
-         specified, stdin is /dev/null; otherwise, out_fd is cloned instead. */
+      /* Isolate the process and configure standard descriptors. If afl->fsrv.out_file is
+         specified, stdin is /dev/null; otherwise, afl->fsrv.out_fd is cloned instead. */
 
       setsid();
 
-      dup2(dev_null_fd, 1);
-      dup2(dev_null_fd, 2);
+      dup2(afl->fsrv.dev_null_fd, 1);
+      dup2(afl->fsrv.dev_null_fd, 2);
 
-      if (out_file) {
+      if (afl->fsrv.out_file) {
 
-        dup2(dev_null_fd, 0);
+        dup2(afl->fsrv.dev_null_fd, 0);
 
       } else {
 
-        dup2(out_fd, 0);
-        close(out_fd);
+        dup2(afl->fsrv.out_fd, 0);
+        close(afl->fsrv.out_fd);
 
       }
 
       /* On Linux, would be faster to use O_CLOEXEC. Maybe TODO. */
 
-      close(dev_null_fd);
-      close(out_dir_fd);
+      close(afl->fsrv.dev_null_fd);
+      close(afl->fsrv.out_dir_fd);
 #ifndef HAVE_ARC4RANDOM
-      close(dev_urandom_fd);
+      close(afl->fsrv.dev_urandom_fd);
 #endif
-      close(fileno(plot_file));
+      close(fileno(afl->fsrv.plot_file));
 
       /* Set sane defaults for ASAN if nothing else specified. */
 
@@ -122,12 +122,12 @@ u8 run_target(char** argv, u32 timeout) {
                              "symbolize=0:"
                              "msan_track_origins=0", 0);
 
-      execv(target_path, argv);
+      execv(afl->fsrv.target_path, afl->argv);
 
       /* Use a distinctive bitmap value to tell the parent about execv()
          falling through. */
 
-      *(u32*)trace_bits = EXEC_FAIL_SIG;
+      *(u32*)afl->fsrv.trace_bits = EXEC_FAIL_SIG;
       exit(0);
 
     }
@@ -139,21 +139,21 @@ u8 run_target(char** argv, u32 timeout) {
     /* In non-dumb mode, we have the fork server up and running, so simply
        tell it to have at it, and then read back PID. */
 
-    if ((res = write(fsrv_ctl_fd, &prev_timed_out, 4)) != 4) {
+    if ((res = write(afl->fsrv.fsrv_ctl_fd, &prev_timed_out, 4)) != 4) {
 
-      if (stop_soon) return 0;
+      if (afl->stop_soon) return 0;
       RPFATAL(res, "Unable to request new process from fork server (OOM?)");
 
     }
 
-    if ((res = read(fsrv_st_fd, &child_pid, 4)) != 4) {
+    if ((res = read(afl->fsrv.fsrv_st_fd, &afl->fsrv.child_pid, 4)) != 4) {
 
-      if (stop_soon) return 0;
+      if (afl->stop_soon) return 0;
       RPFATAL(res, "Unable to request new process from fork server (OOM?)");
 
     }
 
-    if (child_pid <= 0) FATAL("Fork server is misbehaving (OOM?)");
+    if (afl->fsrv.child_pid <= 0) FATAL("Fork server is misbehaving (OOM?)");
 
   }
 
@@ -165,19 +165,19 @@ u8 run_target(char** argv, u32 timeout) {
 
   setitimer(ITIMER_REAL, &it, NULL);
 
-  /* The SIGALRM handler simply kills the child_pid and sets child_timed_out. */
+  /* The SIGALRM handler simply kills the afl->fsrv.child_pid and sets afl->fsrv.child_timed_out. */
 
-  if (dumb_mode == 1 || no_forkserver) {
+  if (afl->dumb_mode == 1 || afl->no_forkserver) {
 
-    if (waitpid(child_pid, &status, 0) <= 0) PFATAL("waitpid() failed");
+    if (waitpid(afl->fsrv.child_pid, &status, 0) <= 0) PFATAL("waitpid() failed");
 
   } else {
 
     s32 res;
 
-    if ((res = read(fsrv_st_fd, &status, 4)) != 4) {
+    if ((res = read(afl->fsrv.fsrv_st_fd, &status, 4)) != 4) {
 
-      if (stop_soon) return 0;
+      if (afl->stop_soon) return 0;
       SAYF(
           "\n" cLRD "[-] " cRST
           "Unable to communicate with fork server. Some possible reasons:\n\n"
@@ -196,50 +196,50 @@ u8 run_target(char** argv, u32 timeout) {
           "\n\n"
           "If all else fails you can disable the fork server via "
           "AFL_NO_FORKSRV=1.\n",
-          mem_limit);
+          afl->fsrv.mem_limit);
       RPFATAL(res, "Unable to communicate with fork server");
 
     }
 
   }
 
-  if (!WIFSTOPPED(status)) child_pid = 0;
+  if (!WIFSTOPPED(status)) afl->fsrv.child_pid = 0;
 
   getitimer(ITIMER_REAL, &it);
   exec_ms =
       (u64)timeout - (it.it_value.tv_sec * 1000 + it.it_value.tv_usec / 1000);
-  if (slowest_exec_ms < exec_ms) slowest_exec_ms = exec_ms;
+  if (afl->slowest_exec_ms < exec_ms) afl->slowest_exec_ms = exec_ms;
 
   it.it_value.tv_sec = 0;
   it.it_value.tv_usec = 0;
 
   setitimer(ITIMER_REAL, &it, NULL);
 
-  ++total_execs;
+  ++afl->total_execs;
 
-  /* Any subsequent operations on trace_bits must not be moved by the
-     compiler below this point. Past this location, trace_bits[] behave
+  /* Any subsequent operations on afl->fsrv.trace_bits must not be moved by the
+     compiler below this point. Past this location, afl->fsrv.trace_bits[] behave
      very normally and do not have to be treated as volatile. */
 
   MEM_BARRIER();
 
-  tb4 = *(u32*)trace_bits;
+  tb4 = *(u32*)afl->fsrv.trace_bits;
 
 #ifdef WORD_SIZE_64
-  classify_counts((u64*)trace_bits);
+  classify_counts((u64*)afl->fsrv.trace_bits);
 #else
-  classify_counts((u32*)trace_bits);
+  classify_counts((u32*)afl->fsrv.trace_bits);
 #endif                                                     /* ^WORD_SIZE_64 */
 
-  prev_timed_out = child_timed_out;
+  prev_timed_out = afl->fsrv.child_timed_out;
 
   /* Report outcome to caller. */
 
-  if (WIFSIGNALED(status) && !stop_soon) {
+  if (WIFSIGNALED(status) && !afl->stop_soon) {
 
-    kill_signal = WTERMSIG(status);
+    afl->kill_signal = WTERMSIG(status);
 
-    if (child_timed_out && kill_signal == SIGKILL) return FAULT_TMOUT;
+    if (afl->fsrv.child_timed_out && afl->kill_signal == SIGKILL) return FAULT_TMOUT;
 
     return FAULT_CRASH;
 
@@ -248,31 +248,31 @@ u8 run_target(char** argv, u32 timeout) {
   /* A somewhat nasty hack for MSAN, which doesn't support abort_on_error and
      must use a special exit code. */
 
-  if (uses_asan && WEXITSTATUS(status) == MSAN_ERROR) {
+  if (afl->fsrv.uses_asan && WEXITSTATUS(status) == MSAN_ERROR) {
 
-    kill_signal = 0;
+    afl->kill_signal = 0;
     return FAULT_CRASH;
 
   }
 
-  if ((dumb_mode == 1 || no_forkserver) && tb4 == EXEC_FAIL_SIG)
+  if ((afl->dumb_mode == 1 || afl->no_forkserver) && tb4 == EXEC_FAIL_SIG)
     return FAULT_ERROR;
 
   return FAULT_NONE;
 
 }
 
-/* Write modified data to file for testing. If out_file is set, the old file
-   is unlinked and a new one is created. Otherwise, out_fd is rewound and
+/* Write modified data to file for testing. If afl->fsrv.out_file is set, the old file
+   is unlinked and a new one is created. Otherwise, afl->fsrv.out_fd is rewound and
    truncated. */
 
-void write_to_testcase(void* mem, u32 len) {
+void write_to_testcase(afl_state_t *afl, void* mem, u32 len) {
 
-  s32 fd = out_fd;
+  s32 fd = afl->fsrv.out_fd;
 
 #ifdef _AFL_DOCUMENT_MUTATIONS
   s32   doc_fd;
-  char* fn = alloc_printf("%s/mutations/%09u:%s", out_dir, document_counter++,
+  char* fn = alloc_printf("%s/mutations/%09u:%s", afl->out_dir, afl->document_counter++,
                           describe_op(0));
   if (fn != NULL) {
 
@@ -290,39 +290,39 @@ void write_to_testcase(void* mem, u32 len) {
 
 #endif
 
-  if (out_file) {
+  if (afl->fsrv.out_file) {
 
-    if (no_unlink) {
+    if (afl->no_unlink) {
 
-      fd = open(out_file, O_WRONLY | O_CREAT | O_TRUNC, 0600);
+      fd = open(afl->fsrv.out_file, O_WRONLY | O_CREAT | O_TRUNC, 0600);
 
     } else {
 
-      unlink(out_file);                                   /* Ignore errors. */
-      fd = open(out_file, O_WRONLY | O_CREAT | O_EXCL, 0600);
+      unlink(afl->fsrv.out_file);                                   /* Ignore errors. */
+      fd = open(afl->fsrv.out_file, O_WRONLY | O_CREAT | O_EXCL, 0600);
 
     }
 
-    if (fd < 0) PFATAL("Unable to create '%s'", out_file);
+    if (fd < 0) PFATAL("Unable to create '%s'", afl->fsrv.out_file);
 
   } else
 
     lseek(fd, 0, SEEK_SET);
 
-  if (mutator && mutator->afl_custom_pre_save) {
+  if (afl->mutator && afl->mutator->afl_custom_pre_save) {
 
     u8*    new_data;
-    size_t new_size = mutator->afl_custom_pre_save(mem, len, &new_data);
-    ck_write(fd, new_data, new_size, out_file);
+    size_t new_size = afl->mutator->afl_custom_pre_save(afl, mem, len, &new_data);
+    ck_write(fd, new_data, new_size, afl->fsrv.out_file);
     ck_free(new_data);
 
   } else {
 
-    ck_write(fd, mem, len, out_file);
+    ck_write(fd, mem, len, afl->fsrv.out_file);
 
   }
 
-  if (!out_file) {
+  if (!afl->fsrv.out_file) {
 
     if (ftruncate(fd, len)) PFATAL("ftruncate() failed");
     lseek(fd, 0, SEEK_SET);
@@ -335,36 +335,36 @@ void write_to_testcase(void* mem, u32 len) {
 
 /* The same, but with an adjustable gap. Used for trimming. */
 
-void write_with_gap(void* mem, u32 len, u32 skip_at, u32 skip_len) {
+static void write_with_gap(afl_state_t *afl, void* mem, u32 len, u32 skip_at, u32 skip_len) {
 
-  s32 fd = out_fd;
+  s32 fd = afl->fsrv.out_fd;
   u32 tail_len = len - skip_at - skip_len;
 
-  if (out_file) {
+  if (afl->fsrv.out_file) {
 
-    if (no_unlink) {
+    if (afl->no_unlink) {
 
-      fd = open(out_file, O_WRONLY | O_CREAT | O_TRUNC, 0600);
+      fd = open(afl->fsrv.out_file, O_WRONLY | O_CREAT | O_TRUNC, 0600);
 
     } else {
 
-      unlink(out_file);                                   /* Ignore errors. */
-      fd = open(out_file, O_WRONLY | O_CREAT | O_EXCL, 0600);
+      unlink(afl->fsrv.out_file);                                   /* Ignore errors. */
+      fd = open(afl->fsrv.out_file, O_WRONLY | O_CREAT | O_EXCL, 0600);
 
     }
 
-    if (fd < 0) PFATAL("Unable to create '%s'", out_file);
+    if (fd < 0) PFATAL("Unable to create '%s'", afl->fsrv.out_file);
 
   } else
 
     lseek(fd, 0, SEEK_SET);
 
-  if (skip_at) ck_write(fd, mem, skip_at, out_file);
+  if (skip_at) ck_write(fd, mem, skip_at, afl->fsrv.out_file);
 
   u8* memu8 = mem;
-  if (tail_len) ck_write(fd, memu8 + skip_at + skip_len, tail_len, out_file);
+  if (tail_len) ck_write(fd, memu8 + skip_at + skip_len, tail_len, afl->fsrv.out_file);
 
-  if (!out_file) {
+  if (!afl->fsrv.out_file) {
 
     if (ftruncate(fd, len - skip_len)) PFATAL("ftruncate() failed");
     lseek(fd, 0, SEEK_SET);
@@ -379,7 +379,7 @@ void write_with_gap(void* mem, u32 len, u32 skip_at, u32 skip_len) {
    to warn about flaky or otherwise problematic test cases early on; and when
    new paths are discovered to detect variable behavior and so on. */
 
-u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem, u32 handicap,
+u8 calibrate_case(afl_state_t *afl, struct queue_entry* q, u8* use_mem, u32 handicap,
                   u8 from_queue) {
 
   static u8 first_trace[MAP_SIZE];
@@ -389,61 +389,61 @@ u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem, u32 handicap,
 
   u64 start_us, stop_us;
 
-  s32 old_sc = stage_cur, old_sm = stage_max;
-  u32 use_tmout = exec_tmout;
-  u8* old_sn = stage_name;
+  s32 old_sc = afl->stage_cur, old_sm = afl->stage_max;
+  u32 use_tmout = afl->fsrv.exec_tmout;
+  u8* old_sn = afl->stage_name;
 
   /* Be a bit more generous about timeouts when resuming sessions, or when
      trying to calibrate already-added finds. This helps avoid trouble due
      to intermittent latency. */
 
-  if (!from_queue || resuming_fuzz)
+  if (!from_queue || afl->resuming_fuzz)
     use_tmout =
-        MAX(exec_tmout + CAL_TMOUT_ADD, exec_tmout * CAL_TMOUT_PERC / 100);
+        MAX(afl->fsrv.exec_tmout + CAL_TMOUT_ADD, afl->fsrv.exec_tmout * CAL_TMOUT_PERC / 100);
 
   ++q->cal_failed;
 
-  stage_name = "calibration";
-  stage_max = fast_cal ? 3 : CAL_CYCLES;
+  afl->stage_name = "calibration";
+  afl->stage_max = afl->fast_cal ? 3 : CAL_CYCLES;
 
   /* Make sure the forkserver is up before we do anything, and let's not
      count its spin-up time toward binary calibration. */
 
-  if (dumb_mode != 1 && !no_forkserver && !forksrv_pid) init_forkserver(argv);
-  if (dumb_mode != 1 && !no_forkserver && !cmplog_forksrv_pid && cmplog_mode)
-    init_cmplog_forkserver(argv);
+  if (afl->dumb_mode != 1 && !afl->no_forkserver && !afl->fsrv.fsrv_pid) afl_fsrv_start(&afl->fsrv, afl->argv);
+  if (afl->dumb_mode != 1 && !afl->no_forkserver && !afl->cmplog_fsrv_pid && afl->shm.cmplog_mode)
+    init_cmplog_forkserver(afl);
 
-  if (q->exec_cksum) memcpy(first_trace, trace_bits, MAP_SIZE);
+  if (q->exec_cksum) memcpy(first_trace, afl->fsrv.trace_bits, MAP_SIZE);
 
   start_us = get_cur_time_us();
 
-  for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
+  for (afl->stage_cur = 0; afl->stage_cur < afl->stage_max; ++afl->stage_cur) {
 
     u32 cksum;
 
-    if (!first_run && !(stage_cur % stats_update_freq)) show_stats();
+    if (!first_run && !(afl->stage_cur % afl->stats_update_freq)) show_stats(afl);
 
-    write_to_testcase(use_mem, q->len);
+    write_to_testcase(afl, use_mem, q->len);
 
-    fault = run_target(argv, use_tmout);
+    fault = run_target(afl, use_tmout);
 
-    /* stop_soon is set by the handler for Ctrl+C. When it's pressed,
+    /* afl->stop_soon is set by the handler for Ctrl+C. When it's pressed,
        we want to bail out quickly. */
 
-    if (stop_soon || fault != crash_mode) goto abort_calibration;
+    if (afl->stop_soon || fault != afl->crash_mode) goto abort_calibration;
 
-    if (!dumb_mode && !stage_cur && !count_bytes(trace_bits)) {
+    if (!afl->dumb_mode && !afl->stage_cur && !count_bytes(afl->fsrv.trace_bits)) {
 
       fault = FAULT_NOINST;
       goto abort_calibration;
 
     }
 
-    cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
+    cksum = hash32(afl->fsrv.trace_bits, MAP_SIZE, HASH_CONST);
 
     if (q->exec_cksum != cksum) {
 
-      u8 hnb = has_new_bits(virgin_bits);
+      u8 hnb = has_new_bits(afl, afl->virgin_bits);
       if (hnb > new_bits) new_bits = hnb;
 
       if (q->exec_cksum) {
@@ -452,10 +452,10 @@ u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem, u32 handicap,
 
         for (i = 0; i < MAP_SIZE; ++i) {
 
-          if (!var_bytes[i] && first_trace[i] != trace_bits[i]) {
+          if (!afl->var_bytes[i] && first_trace[i] != afl->fsrv.trace_bits[i]) {
 
-            var_bytes[i] = 1;
-            stage_max = CAL_CYCLES_LONG;
+            afl->var_bytes[i] = 1;
+            afl->stage_max = CAL_CYCLES_LONG;
 
           }
 
@@ -466,7 +466,7 @@ u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem, u32 handicap,
       } else {
 
         q->exec_cksum = cksum;
-        memcpy(first_trace, trace_bits, MAP_SIZE);
+        memcpy(first_trace, afl->fsrv.trace_bits, MAP_SIZE);
 
       }
 
@@ -476,34 +476,34 @@ u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem, u32 handicap,
 
   stop_us = get_cur_time_us();
 
-  total_cal_us += stop_us - start_us;
-  total_cal_cycles += stage_max;
+  afl->total_cal_us += stop_us - start_us;
+  afl->total_cal_cycles += afl->stage_max;
 
   /* OK, let's collect some stats about the performance of this test case.
      This is used for fuzzing air time calculations in calculate_score(). */
 
-  q->exec_us = (stop_us - start_us) / stage_max;
-  q->bitmap_size = count_bytes(trace_bits);
+  q->exec_us = (stop_us - start_us) / afl->stage_max;
+  q->bitmap_size = count_bytes(afl->fsrv.trace_bits);
   q->handicap = handicap;
   q->cal_failed = 0;
 
-  total_bitmap_size += q->bitmap_size;
-  ++total_bitmap_entries;
+  afl->total_bitmap_size += q->bitmap_size;
+  ++afl->total_bitmap_entries;
 
-  update_bitmap_score(q);
+  update_bitmap_score(afl, q);
 
   /* If this case didn't result in new output from the instrumentation, tell
      parent. This is a non-critical problem, but something to warn the user
      about. */
 
-  if (!dumb_mode && first_run && !fault && !new_bits) fault = FAULT_NOBITS;
+  if (!afl->dumb_mode && first_run && !fault && !new_bits) fault = FAULT_NOBITS;
 
 abort_calibration:
 
   if (new_bits == 2 && !q->has_new_cov) {
 
     q->has_new_cov = 1;
-    ++queued_with_cov;
+    ++afl->queued_with_cov;
 
   }
 
@@ -511,22 +511,22 @@ abort_calibration:
 
   if (var_detected) {
 
-    var_byte_count = count_bytes(var_bytes);
+    afl->var_byte_count = count_bytes(afl->var_bytes);
 
     if (!q->var_behavior) {
 
-      mark_as_variable(q);
-      ++queued_variable;
+      mark_as_variable(afl, q);
+      ++afl->queued_variable;
 
     }
 
   }
 
-  stage_name = old_sn;
-  stage_cur = old_sc;
-  stage_max = old_sm;
+  afl->stage_name = old_sn;
+  afl->stage_cur = old_sc;
+  afl->stage_max = old_sm;
 
-  if (!first_run) show_stats();
+  if (!first_run) show_stats(afl);
 
   return fault;
 
@@ -534,17 +534,17 @@ abort_calibration:
 
 /* Grab interesting test cases from other fuzzers. */
 
-void sync_fuzzers(char** argv) {
+void sync_fuzzers(afl_state_t *afl) {
 
   DIR*           sd;
   struct dirent* sd_ent;
   u32            sync_cnt = 0;
 
-  sd = opendir(sync_dir);
-  if (!sd) PFATAL("Unable to open '%s'", sync_dir);
+  sd = opendir(afl->sync_dir);
+  if (!sd) PFATAL("Unable to open '%s'", afl->sync_dir);
 
-  stage_max = stage_cur = 0;
-  cur_depth = 0;
+  afl->stage_max = afl->stage_cur = 0;
+  afl->cur_depth = 0;
 
   /* Look at the entries created for every other fuzzer in the sync directory.
    */
@@ -562,11 +562,11 @@ void sync_fuzzers(char** argv) {
 
     /* Skip dot files and our own output directory. */
 
-    if (sd_ent->d_name[0] == '.' || !strcmp(sync_id, sd_ent->d_name)) continue;
+    if (sd_ent->d_name[0] == '.' || !strcmp(afl->sync_id, sd_ent->d_name)) continue;
 
     /* Skip anything that doesn't have a queue/ subdirectory. */
 
-    qd_path = alloc_printf("%s/%s/queue", sync_dir, sd_ent->d_name);
+    qd_path = alloc_printf("%s/%s/queue", afl->sync_dir, sd_ent->d_name);
 
     if (!(qd = opendir(qd_path))) {
 
@@ -577,7 +577,7 @@ void sync_fuzzers(char** argv) {
 
     /* Retrieve the ID of the last seen test case. */
 
-    qd_synced_path = alloc_printf("%s/.synced/%s", out_dir, sd_ent->d_name);
+    qd_synced_path = alloc_printf("%s/.synced/%s", afl->out_dir, sd_ent->d_name);
 
     id_fd = open(qd_synced_path, O_RDWR | O_CREAT, 0600);
 
@@ -590,9 +590,9 @@ void sync_fuzzers(char** argv) {
     /* Show stats */
 
     sprintf(stage_tmp, "sync %u", ++sync_cnt);
-    stage_name = stage_tmp;
-    stage_cur = 0;
-    stage_max = 0;
+    afl->stage_name = stage_tmp;
+    afl->stage_cur = 0;
+    afl->stage_max = 0;
 
     /* For every file queued by this fuzzer, parse ID and see if we have looked
        at it before; exec a test case if not. */
@@ -604,13 +604,13 @@ void sync_fuzzers(char** argv) {
       struct stat st;
 
       if (qd_ent->d_name[0] == '.' ||
-          sscanf(qd_ent->d_name, CASE_PREFIX "%06u", &syncing_case) != 1 ||
-          syncing_case < min_accept)
+          sscanf(qd_ent->d_name, CASE_PREFIX "%06u", &afl->syncing_case) != 1 ||
+          afl->syncing_case < min_accept)
         continue;
 
       /* OK, sounds like a new one. Let's give it a try. */
 
-      if (syncing_case >= next_min_accept) next_min_accept = syncing_case + 1;
+      if (afl->syncing_case >= next_min_accept) next_min_accept = afl->syncing_case + 1;
 
       path = alloc_printf("%s/%s", qd_path, qd_ent->d_name);
 
@@ -639,19 +639,19 @@ void sync_fuzzers(char** argv) {
         /* See what happens. We rely on save_if_interesting() to catch major
            errors and save the test case. */
 
-        write_to_testcase(mem, st.st_size);
+        write_to_testcase(afl, mem, st.st_size);
 
-        fault = run_target(argv, exec_tmout);
+        fault = run_target(afl, afl->fsrv.exec_tmout);
 
-        if (stop_soon) goto close_sync;
+        if (afl->stop_soon) goto close_sync;
 
-        syncing_party = sd_ent->d_name;
-        queued_imported += save_if_interesting(argv, mem, st.st_size, fault);
-        syncing_party = 0;
+        afl->syncing_party = sd_ent->d_name;
+        afl->queued_imported += save_if_interesting(afl, mem, st.st_size, fault);
+        afl->syncing_party = 0;
 
         munmap(mem, st.st_size);
 
-        if (!(stage_cur++ % stats_update_freq)) show_stats();
+        if (!(afl->stage_cur++ % afl->stats_update_freq)) show_stats(afl);
 
       }
 
@@ -678,11 +678,11 @@ void sync_fuzzers(char** argv) {
    trimmer uses power-of-two increments somewhere between 1/16 and 1/1024 of
    file size, to keep the stage short and sweet. */
 
-u8 trim_case(char** argv, struct queue_entry* q, u8* in_buf) {
+u8 trim_case(afl_state_t *afl, struct queue_entry* q, u8* in_buf) {
 
   /* Custom mutator trimmer */
-  if (mutator && mutator->afl_custom_trim)
-    return trim_case_custom(argv, q, in_buf);
+  if (afl->mutator && afl->mutator->afl_custom_trim)
+    return trim_case_custom(afl, q, in_buf);
 
   static u8 tmp[64];
   static u8 clean_trace[MAP_SIZE];
@@ -698,8 +698,8 @@ u8 trim_case(char** argv, struct queue_entry* q, u8* in_buf) {
 
   if (q->len < 5) return 0;
 
-  stage_name = tmp;
-  bytes_trim_in += q->len;
+  afl->stage_name = tmp;
+  afl->bytes_trim_in += q->len;
 
   /* Select initial chunk len, starting with large steps. */
 
@@ -716,24 +716,24 @@ u8 trim_case(char** argv, struct queue_entry* q, u8* in_buf) {
 
     sprintf(tmp, "trim %s/%s", DI(remove_len), DI(remove_len));
 
-    stage_cur = 0;
-    stage_max = q->len / remove_len;
+    afl->stage_cur = 0;
+    afl->stage_max = q->len / remove_len;
 
     while (remove_pos < q->len) {
 
       u32 trim_avail = MIN(remove_len, q->len - remove_pos);
       u32 cksum;
 
-      write_with_gap(in_buf, q->len, remove_pos, trim_avail);
+      write_with_gap(afl, in_buf, q->len, remove_pos, trim_avail);
 
-      fault = run_target(argv, exec_tmout);
-      ++trim_execs;
+      fault = run_target(afl, afl->fsrv.exec_tmout);
+      ++afl->trim_execs;
 
-      if (stop_soon || fault == FAULT_ERROR) goto abort_trimming;
+      if (afl->stop_soon || fault == FAULT_ERROR) goto abort_trimming;
 
       /* Note that we don't keep track of crashes or hangs here; maybe TODO? */
 
-      cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
+      cksum = hash32(afl->fsrv.trace_bits, MAP_SIZE, HASH_CONST);
 
       /* If the deletion had no impact on the trace, make it permanent. This
          isn't perfect for variable-path inputs, but we're just making a
@@ -756,7 +756,7 @@ u8 trim_case(char** argv, struct queue_entry* q, u8* in_buf) {
         if (!needs_write) {
 
           needs_write = 1;
-          memcpy(clean_trace, trace_bits, MAP_SIZE);
+          memcpy(clean_trace, afl->fsrv.trace_bits, MAP_SIZE);
 
         }
 
@@ -766,8 +766,8 @@ u8 trim_case(char** argv, struct queue_entry* q, u8* in_buf) {
 
       /* Since this can be slow, update the screen every now and then. */
 
-      if (!(trim_exec++ % stats_update_freq)) show_stats();
-      ++stage_cur;
+      if (!(trim_exec++ % afl->stats_update_freq)) show_stats(afl);
+      ++afl->stage_cur;
 
     }
 
@@ -782,7 +782,7 @@ u8 trim_case(char** argv, struct queue_entry* q, u8* in_buf) {
 
     s32 fd;
 
-    if (no_unlink) {
+    if (afl->no_unlink) {
 
       fd = open(q->fname, O_WRONLY | O_CREAT | O_TRUNC, 0600);
 
@@ -798,14 +798,14 @@ u8 trim_case(char** argv, struct queue_entry* q, u8* in_buf) {
     ck_write(fd, in_buf, q->len, q->fname);
     close(fd);
 
-    memcpy(trace_bits, clean_trace, MAP_SIZE);
-    update_bitmap_score(q);
+    memcpy(afl->fsrv.trace_bits, clean_trace, MAP_SIZE);
+    update_bitmap_score(afl, q);
 
   }
 
 abort_trimming:
 
-  bytes_trim_out += q->len;
+  afl->bytes_trim_out += q->len;
   return fault;
 
 }
@@ -814,53 +814,53 @@ abort_trimming:
    error conditions, returning 1 if it's time to bail out. This is
    a helper function for fuzz_one(). */
 
-u8 common_fuzz_stuff(char** argv, u8* out_buf, u32 len) {
+u8 common_fuzz_stuff(afl_state_t *afl, u8* out_buf, u32 len) {
 
   u8 fault;
 
-  if (post_handler) {
+  if (afl->post_handler) {
 
-    out_buf = post_handler(out_buf, &len);
+    out_buf = afl->post_handler(out_buf, &len);
     if (!out_buf || !len) return 0;
 
   }
 
-  write_to_testcase(out_buf, len);
+  write_to_testcase(afl, out_buf, len);
 
-  fault = run_target(argv, exec_tmout);
+  fault = run_target(afl, afl->fsrv.exec_tmout);
 
-  if (stop_soon) return 1;
+  if (afl->stop_soon) return 1;
 
   if (fault == FAULT_TMOUT) {
 
-    if (subseq_tmouts++ > TMOUT_LIMIT) {
+    if (afl->subseq_tmouts++ > TMOUT_LIMIT) {
 
-      ++cur_skipped_paths;
+      ++afl->cur_skipped_paths;
       return 1;
 
     }
 
   } else
 
-    subseq_tmouts = 0;
+    afl->subseq_tmouts = 0;
 
   /* Users can hit us with SIGUSR1 to request the current input
      to be abandoned. */
 
-  if (skip_requested) {
+  if (afl->skip_requested) {
 
-    skip_requested = 0;
-    ++cur_skipped_paths;
+    afl->skip_requested = 0;
+    ++afl->cur_skipped_paths;
     return 1;
 
   }
 
   /* This handles FAULT_ERROR for us: */
 
-  queued_discovered += save_if_interesting(argv, out_buf, len, fault);
+  afl->queued_discovered += save_if_interesting(afl, out_buf, len, fault);
 
-  if (!(stage_cur % stats_update_freq) || stage_cur + 1 == stage_max)
-    show_stats();
+  if (!(afl->stage_cur % afl->stats_update_freq) || afl->stage_cur + 1 == afl->stage_max)
+    show_stats(afl);
 
   return 0;
 
diff --git a/src/afl-fuzz-stats.c b/src/afl-fuzz-stats.c
index 1b763c01..c9a1dc86 100644
--- a/src/afl-fuzz-stats.c
+++ b/src/afl-fuzz-stats.c
@@ -27,12 +27,12 @@
 
 /* Update stats file for unattended monitoring. */
 
-void write_stats_file(double bitmap_cvg, double stability, double eps) {
+void write_stats_file(afl_state_t *afl, double bitmap_cvg, double stability, double eps) {
 
   static double        last_bcvg, last_stab, last_eps;
   static struct rusage rus;
 
-  u8*   fn = alloc_printf("%s/fuzzer_stats", out_dir);
+  u8*   fn = alloc_printf("%s/fuzzer_stats", afl->out_dir);
   s32   fd;
   FILE* f;
 
@@ -99,28 +99,28 @@ void write_stats_file(double bitmap_cvg, double stability, double eps) {
       "\n"
       "target_mode       : %s%s%s%s%s%s%s%s\n"
       "command_line      : %s\n",
-      start_time / 1000, get_cur_time() / 1000, getpid(),
-      queue_cycle ? (queue_cycle - 1) : 0, total_execs,
-      /*eps,*/ total_execs / ((double)(get_cur_time() - start_time) / 1000),
-      queued_paths, queued_favored, queued_discovered, queued_imported,
-      max_depth, current_entry, pending_favored, pending_not_fuzzed,
-      queued_variable, stability, bitmap_cvg, unique_crashes, unique_hangs,
-      last_path_time / 1000, last_crash_time / 1000, last_hang_time / 1000,
-      total_execs - last_crash_execs, exec_tmout, slowest_exec_ms,
+      afl->start_time / 1000, get_cur_time() / 1000, getpid(),
+      afl->queue_cycle ? (afl->queue_cycle - 1) : 0, afl->total_execs,
+      /*eps,*/ afl->total_execs / ((double)(get_cur_time() - afl->start_time) / 1000),
+      afl->queued_paths, afl->queued_favored, afl->queued_discovered, afl->queued_imported,
+      afl->max_depth, afl->current_entry, afl->pending_favored, afl->pending_not_fuzzed,
+      afl->queued_variable, stability, bitmap_cvg, afl->unique_crashes, afl->unique_hangs,
+      afl->last_path_time / 1000, afl->last_crash_time / 1000, afl->last_hang_time / 1000,
+      afl->total_execs - afl->last_crash_execs, afl->fsrv.exec_tmout, afl->slowest_exec_ms,
 #ifdef __APPLE__
       (unsigned long int)(rus.ru_maxrss >> 20),
 #else
       (unsigned long int)(rus.ru_maxrss >> 10),
 #endif
-      use_banner, unicorn_mode ? "unicorn" : "", qemu_mode ? "qemu " : "",
-      dumb_mode ? " dumb " : "", no_forkserver ? "no_forksrv " : "",
-      crash_mode ? "crash " : "", persistent_mode ? "persistent " : "",
-      deferred_mode ? "deferred " : "",
-      (unicorn_mode || qemu_mode || dumb_mode || no_forkserver || crash_mode ||
-       persistent_mode || deferred_mode)
+      afl->use_banner, afl->unicorn_mode ? "unicorn" : "", afl->qemu_mode ? "qemu " : "",
+      afl->dumb_mode ? " dumb " : "", afl->no_forkserver ? "no_fsrv " : "",
+      afl->crash_mode ? "crash " : "", afl->persistent_mode ? "persistent " : "",
+      afl->deferred_mode ? "deferred " : "",
+      (afl->unicorn_mode || afl->qemu_mode || afl->dumb_mode || afl->no_forkserver || afl->crash_mode ||
+       afl->persistent_mode || afl->deferred_mode)
           ? ""
           : "default",
-      orig_cmdline);
+      afl->orig_cmdline);
   /* ignore errors */
 
   fclose(f);
@@ -129,61 +129,61 @@ void write_stats_file(double bitmap_cvg, double stability, double eps) {
 
 /* Update the plot file if there is a reason to. */
 
-void maybe_update_plot_file(double bitmap_cvg, double eps) {
+void maybe_update_plot_file(afl_state_t *afl, double bitmap_cvg, double eps) {
 
   static u32 prev_qp, prev_pf, prev_pnf, prev_ce, prev_md;
   static u64 prev_qc, prev_uc, prev_uh;
 
-  if (prev_qp == queued_paths && prev_pf == pending_favored &&
-      prev_pnf == pending_not_fuzzed && prev_ce == current_entry &&
-      prev_qc == queue_cycle && prev_uc == unique_crashes &&
-      prev_uh == unique_hangs && prev_md == max_depth)
+  if (prev_qp == afl->queued_paths && prev_pf == afl->pending_favored &&
+      prev_pnf == afl->pending_not_fuzzed && prev_ce == afl->current_entry &&
+      prev_qc == afl->queue_cycle && prev_uc == afl->unique_crashes &&
+      prev_uh == afl->unique_hangs && prev_md == afl->max_depth)
     return;
 
-  prev_qp = queued_paths;
-  prev_pf = pending_favored;
-  prev_pnf = pending_not_fuzzed;
-  prev_ce = current_entry;
-  prev_qc = queue_cycle;
-  prev_uc = unique_crashes;
-  prev_uh = unique_hangs;
-  prev_md = max_depth;
+  prev_qp = afl->queued_paths;
+  prev_pf = afl->pending_favored;
+  prev_pnf = afl->pending_not_fuzzed;
+  prev_ce = afl->current_entry;
+  prev_qc = afl->queue_cycle;
+  prev_uc = afl->unique_crashes;
+  prev_uh = afl->unique_hangs;
+  prev_md = afl->max_depth;
 
   /* Fields in the file:
 
-     unix_time, cycles_done, cur_path, paths_total, paths_not_fuzzed,
-     favored_not_fuzzed, unique_crashes, unique_hangs, max_depth,
+     unix_time, afl->cycles_done, cur_path, paths_total, paths_not_fuzzed,
+     favored_not_fuzzed, afl->unique_crashes, afl->unique_hangs, afl->max_depth,
      execs_per_sec */
 
-  fprintf(plot_file,
+  fprintf(afl->fsrv.plot_file,
           "%llu, %llu, %u, %u, %u, %u, %0.02f%%, %llu, %llu, %u, %0.02f\n",
-          get_cur_time() / 1000, queue_cycle - 1, current_entry, queued_paths,
-          pending_not_fuzzed, pending_favored, bitmap_cvg, unique_crashes,
-          unique_hangs, max_depth, eps);                   /* ignore errors */
+          get_cur_time() / 1000, afl->queue_cycle - 1, afl->current_entry, afl->queued_paths,
+          afl->pending_not_fuzzed, afl->pending_favored, bitmap_cvg, afl->unique_crashes,
+          afl->unique_hangs, afl->max_depth, eps);                   /* ignore errors */
 
-  fflush(plot_file);
+  fflush(afl->fsrv.plot_file);
 
 }
 
 /* Check terminal dimensions after resize. */
 
-static void check_term_size(void) {
+static void check_term_size(afl_state_t *afl) {
 
   struct winsize ws;
 
-  term_too_small = 0;
+  afl->term_too_small = 0;
 
   if (ioctl(1, TIOCGWINSZ, &ws)) return;
 
   if (ws.ws_row == 0 || ws.ws_col == 0) return;
-  if (ws.ws_row < 24 || ws.ws_col < 79) term_too_small = 1;
+  if (ws.ws_row < 24 || ws.ws_col < 79) afl->term_too_small = 1;
 
 }
 
-/* A spiffy retro stats screen! This is called every stats_update_freq
+/* A spiffy retro stats screen! This is called every afl->stats_update_freq
    execve() calls, plus in several other circumstances. */
 
-void show_stats(void) {
+void show_stats(afl_state_t *afl) {
 
   static u64    last_stats_ms, last_plot_ms, last_ms, last_execs;
   static double avg_exec;
@@ -203,18 +203,18 @@ void show_stats(void) {
 
   /* Check if we're past the 10 minute mark. */
 
-  if (cur_ms - start_time > 10 * 60 * 1000) run_over10m = 1;
+  if (cur_ms - afl->start_time > 10 * 60 * 1000) afl->run_over10m = 1;
 
   /* Calculate smoothed exec speed stats. */
 
   if (!last_execs) {
 
-    avg_exec = ((double)total_execs) * 1000 / (cur_ms - start_time);
+    avg_exec = ((double)afl->total_execs) * 1000 / (cur_ms - afl->start_time);
 
   } else {
 
     double cur_avg =
-        ((double)(total_execs - last_execs)) * 1000 / (cur_ms - last_ms);
+        ((double)(afl->total_execs - last_execs)) * 1000 / (cur_ms - last_ms);
 
     /* If there is a dramatic (5x+) jump in speed, reset the indicator
        more quickly. */
@@ -227,20 +227,20 @@ void show_stats(void) {
   }
 
   last_ms = cur_ms;
-  last_execs = total_execs;
+  last_execs = afl->total_execs;
 
   /* Tell the callers when to contact us (as measured in execs). */
 
-  stats_update_freq = avg_exec / (UI_TARGET_HZ * 10);
-  if (!stats_update_freq) stats_update_freq = 1;
+  afl->stats_update_freq = avg_exec / (UI_TARGET_HZ * 10);
+  if (!afl->stats_update_freq) afl->stats_update_freq = 1;
 
   /* Do some bitmap stats. */
 
-  t_bytes = count_non_255_bytes(virgin_bits);
+  t_bytes = count_non_255_bytes(afl->virgin_bits);
   t_byte_ratio = ((double)t_bytes * 100) / MAP_SIZE;
 
   if (t_bytes)
-    stab_ratio = 100 - ((double)var_byte_count) * 100 / t_bytes;
+    stab_ratio = 100 - ((double)afl->var_byte_count) * 100 / t_bytes;
   else
     stab_ratio = 100;
 
@@ -249,9 +249,9 @@ void show_stats(void) {
   if (cur_ms - last_stats_ms > STATS_UPDATE_SEC * 1000) {
 
     last_stats_ms = cur_ms;
-    write_stats_file(t_byte_ratio, stab_ratio, avg_exec);
-    save_auto();
-    write_bitmap();
+    write_stats_file(afl, t_byte_ratio, stab_ratio, avg_exec);
+    save_auto(afl);
+    write_bitmap(afl);
 
   }
 
@@ -260,40 +260,40 @@ void show_stats(void) {
   if (cur_ms - last_plot_ms > PLOT_UPDATE_SEC * 1000) {
 
     last_plot_ms = cur_ms;
-    maybe_update_plot_file(t_byte_ratio, avg_exec);
+    maybe_update_plot_file(afl, t_byte_ratio, avg_exec);
 
   }
 
   /* Honor AFL_EXIT_WHEN_DONE and AFL_BENCH_UNTIL_CRASH. */
 
-  if (!dumb_mode && cycles_wo_finds > 100 && !pending_not_fuzzed &&
+  if (!afl->dumb_mode && afl->cycles_wo_finds > 100 && !afl->pending_not_fuzzed &&
       get_afl_env("AFL_EXIT_WHEN_DONE"))
-    stop_soon = 2;
+    afl->stop_soon = 2;
 
-  if (total_crashes && get_afl_env("AFL_BENCH_UNTIL_CRASH")) stop_soon = 2;
+  if (afl->total_crashes && get_afl_env("AFL_BENCH_UNTIL_CRASH")) afl->stop_soon = 2;
 
   /* If we're not on TTY, bail out. */
 
-  if (not_on_tty) return;
+  if (afl->not_on_tty) return;
 
   /* Compute some mildly useful bitmap stats. */
 
-  t_bits = (MAP_SIZE << 3) - count_bits(virgin_bits);
+  t_bits = (MAP_SIZE << 3) - count_bits(afl->virgin_bits);
 
   /* Now, for the visuals... */
 
-  if (clear_screen) {
+  if (afl->clear_screen) {
 
     SAYF(TERM_CLEAR CURSOR_HIDE);
-    clear_screen = 0;
+    afl->clear_screen = 0;
 
-    check_term_size();
+    check_term_size(afl);
 
   }
 
   SAYF(TERM_HOME);
 
-  if (term_too_small) {
+  if (afl->term_too_small) {
 
     SAYF(cBRI
          "Your terminal is too small to display the UI.\n"
@@ -305,20 +305,20 @@ void show_stats(void) {
 
   /* Let's start by drawing a centered banner. */
 
-  banner_len = (crash_mode ? 24 : 22) + strlen(VERSION) + strlen(use_banner) +
-               strlen(power_name) + 3 + 5;
+  banner_len = (afl->crash_mode ? 24 : 22) + strlen(VERSION) + strlen(afl->use_banner) +
+               strlen(afl->power_name) + 3 + 5;
   banner_pad = (79 - banner_len) / 2;
   memset(tmp, ' ', banner_pad);
 
 #ifdef HAVE_AFFINITY
   sprintf(tmp + banner_pad,
           "%s " cLCY VERSION cLGN " (%s) " cPIN "[%s]" cBLU " {%d}",
-          crash_mode ? cPIN "peruvian were-rabbit" : cYEL "american fuzzy lop",
-          use_banner, power_name, cpu_aff);
+          afl->crash_mode ? cPIN "peruvian were-rabbit" : cYEL "american fuzzy lop",
+          afl->use_banner, afl->power_name, afl->cpu_aff);
 #else
   sprintf(tmp + banner_pad, "%s " cLCY VERSION cLGN " (%s) " cPIN "[%s]",
-          crash_mode ? cPIN "peruvian were-rabbit" : cYEL "american fuzzy lop",
-          use_banner, power_name);
+          afl->crash_mode ? cPIN "peruvian were-rabbit" : cYEL "american fuzzy lop",
+          afl->use_banner, afl->power_name);
 #endif                                                     /* HAVE_AFFINITY */
 
   SAYF("\n%s\n", tmp);
@@ -341,26 +341,26 @@ void show_stats(void) {
        " process timing " bSTG bH30 bH5 bH bHB bH bSTOP cCYA
        " overall results " bSTG bH2 bH2 bRT "\n");
 
-  if (dumb_mode) {
+  if (afl->dumb_mode) {
 
     strcpy(tmp, cRST);
 
   } else {
 
-    u64 min_wo_finds = (cur_ms - last_path_time) / 1000 / 60;
+    u64 min_wo_finds = (cur_ms - afl->last_path_time) / 1000 / 60;
 
     /* First queue cycle: don't stop now! */
-    if (queue_cycle == 1 || min_wo_finds < 15)
+    if (afl->queue_cycle == 1 || min_wo_finds < 15)
       strcpy(tmp, cMGN);
     else
 
         /* Subsequent cycles, but we're still making finds. */
-        if (cycles_wo_finds < 25 || min_wo_finds < 30)
+        if (afl->cycles_wo_finds < 25 || min_wo_finds < 30)
       strcpy(tmp, cYEL);
     else
 
         /* No finds for a long time and no test cases to try. */
-        if (cycles_wo_finds > 100 && !pending_not_fuzzed && min_wo_finds > 120)
+        if (afl->cycles_wo_finds > 100 && !afl->pending_not_fuzzed && min_wo_finds > 120)
       strcpy(tmp, cLGN);
 
     /* Default: cautiously OK to stop? */
@@ -371,20 +371,20 @@ void show_stats(void) {
 
   SAYF(bV bSTOP "        run time : " cRST "%-33s " bSTG bV bSTOP
                 "  cycles done : %s%-5s " bSTG              bV "\n",
-       DTD(cur_ms, start_time), tmp, DI(queue_cycle - 1));
+       DTD(cur_ms, afl->start_time), tmp, DI(afl->queue_cycle - 1));
 
   /* We want to warn people about not seeing new paths after a full cycle,
      except when resuming fuzzing or running in non-instrumented mode. */
 
-  if (!dumb_mode && (last_path_time || resuming_fuzz || queue_cycle == 1 ||
-                     in_bitmap || crash_mode)) {
+  if (!afl->dumb_mode && (afl->last_path_time || afl->resuming_fuzz || afl->queue_cycle == 1 ||
+                     afl->in_bitmap || afl->crash_mode)) {
 
     SAYF(bV bSTOP "   last new path : " cRST "%-33s ",
-         DTD(cur_ms, last_path_time));
+         DTD(cur_ms, afl->last_path_time));
 
   } else {
 
-    if (dumb_mode)
+    if (afl->dumb_mode)
 
       SAYF(bV bSTOP "   last new path : " cPIN "n/a" cRST
                     " (non-instrumented mode)       ");
@@ -397,24 +397,24 @@ void show_stats(void) {
   }
 
   SAYF(bSTG bV bSTOP "  total paths : " cRST "%-5s " bSTG bV "\n",
-       DI(queued_paths));
+       DI(afl->queued_paths));
 
   /* Highlight crashes in red if found, denote going over the KEEP_UNIQUE_CRASH
      limit with a '+' appended to the count. */
 
-  sprintf(tmp, "%s%s", DI(unique_crashes),
-          (unique_crashes >= KEEP_UNIQUE_CRASH) ? "+" : "");
+  sprintf(tmp, "%s%s", DI(afl->unique_crashes),
+          (afl->unique_crashes >= KEEP_UNIQUE_CRASH) ? "+" : "");
 
   SAYF(bV bSTOP " last uniq crash : " cRST "%-33s " bSTG bV bSTOP
                 " uniq crashes : %s%-6s" bSTG               bV "\n",
-       DTD(cur_ms, last_crash_time), unique_crashes ? cLRD : cRST, tmp);
+       DTD(cur_ms, afl->last_crash_time), afl->unique_crashes ? cLRD : cRST, tmp);
 
-  sprintf(tmp, "%s%s", DI(unique_hangs),
-          (unique_hangs >= KEEP_UNIQUE_HANG) ? "+" : "");
+  sprintf(tmp, "%s%s", DI(afl->unique_hangs),
+          (afl->unique_hangs >= KEEP_UNIQUE_HANG) ? "+" : "");
 
   SAYF(bV bSTOP "  last uniq hang : " cRST "%-33s " bSTG bV bSTOP
                 "   uniq hangs : " cRST "%-6s" bSTG         bV "\n",
-       DTD(cur_ms, last_hang_time), tmp);
+       DTD(cur_ms, afl->last_hang_time), tmp);
 
   SAYF(bVR bH bSTOP            cCYA
        " cycle progress " bSTG bH10 bH5 bH2 bH2 bHB bH bSTOP cCYA
@@ -424,21 +424,21 @@ void show_stats(void) {
      together, but then cram them into a fixed-width field - so we need to
      put them in a temporary buffer first. */
 
-  sprintf(tmp, "%s%s%u (%0.01f%%)", DI(current_entry),
-          queue_cur->favored ? "." : "*", queue_cur->fuzz_level,
-          ((double)current_entry * 100) / queued_paths);
+  sprintf(tmp, "%s%s%u (%0.01f%%)", DI(afl->current_entry),
+          afl->queue_cur->favored ? "." : "*", afl->queue_cur->fuzz_level,
+          ((double)afl->current_entry * 100) / afl->queued_paths);
 
   SAYF(bV bSTOP "  now processing : " cRST "%-16s " bSTG bV bSTOP, tmp);
 
   sprintf(tmp, "%0.02f%% / %0.02f%%",
-          ((double)queue_cur->bitmap_size) * 100 / MAP_SIZE, t_byte_ratio);
+          ((double)afl->queue_cur->bitmap_size) * 100 / MAP_SIZE, t_byte_ratio);
 
   SAYF("    map density : %s%-21s" bSTG bV "\n",
-       t_byte_ratio > 70 ? cLRD : ((t_bytes < 200 && !dumb_mode) ? cPIN : cRST),
+       t_byte_ratio > 70 ? cLRD : ((t_bytes < 200 && !afl->dumb_mode) ? cPIN : cRST),
        tmp);
 
-  sprintf(tmp, "%s (%0.02f%%)", DI(cur_skipped_paths),
-          ((double)cur_skipped_paths * 100) / queued_paths);
+  sprintf(tmp, "%s (%0.02f%%)", DI(afl->cur_skipped_paths),
+          ((double)afl->cur_skipped_paths * 100) / afl->queued_paths);
 
   SAYF(bV bSTOP " paths timed out : " cRST "%-16s " bSTG bV, tmp);
 
@@ -450,47 +450,47 @@ void show_stats(void) {
        " stage progress " bSTG bH10 bH5 bH2 bH2 bX bH bSTOP cCYA
        " findings in depth " bSTG bH10 bH5 bH2 bH2 bVL "\n");
 
-  sprintf(tmp, "%s (%0.02f%%)", DI(queued_favored),
-          ((double)queued_favored) * 100 / queued_paths);
+  sprintf(tmp, "%s (%0.02f%%)", DI(afl->queued_favored),
+          ((double)afl->queued_favored) * 100 / afl->queued_paths);
 
   /* Yeah... it's still going on... halp? */
 
   SAYF(bV bSTOP "  now trying : " cRST "%-20s " bSTG bV bSTOP
                 " favored paths : " cRST "%-22s" bSTG   bV "\n",
-       stage_name, tmp);
+       afl->stage_name, tmp);
 
-  if (!stage_max) {
+  if (!afl->stage_max) {
 
-    sprintf(tmp, "%s/-", DI(stage_cur));
+    sprintf(tmp, "%s/-", DI(afl->stage_cur));
 
   } else {
 
-    sprintf(tmp, "%s/%s (%0.02f%%)", DI(stage_cur), DI(stage_max),
-            ((double)stage_cur) * 100 / stage_max);
+    sprintf(tmp, "%s/%s (%0.02f%%)", DI(afl->stage_cur), DI(afl->stage_max),
+            ((double)afl->stage_cur) * 100 / afl->stage_max);
 
   }
 
   SAYF(bV bSTOP " stage execs : " cRST "%-20s " bSTG bV bSTOP, tmp);
 
-  sprintf(tmp, "%s (%0.02f%%)", DI(queued_with_cov),
-          ((double)queued_with_cov) * 100 / queued_paths);
+  sprintf(tmp, "%s (%0.02f%%)", DI(afl->queued_with_cov),
+          ((double)afl->queued_with_cov) * 100 / afl->queued_paths);
 
   SAYF("  new edges on : " cRST "%-22s" bSTG bV "\n", tmp);
 
-  sprintf(tmp, "%s (%s%s unique)", DI(total_crashes), DI(unique_crashes),
-          (unique_crashes >= KEEP_UNIQUE_CRASH) ? "+" : "");
+  sprintf(tmp, "%s (%s%s unique)", DI(afl->total_crashes), DI(afl->unique_crashes),
+          (afl->unique_crashes >= KEEP_UNIQUE_CRASH) ? "+" : "");
 
-  if (crash_mode) {
+  if (afl->crash_mode) {
 
     SAYF(bV bSTOP " total execs : " cRST "%-20s " bSTG bV bSTOP
                   "   new crashes : %s%-22s" bSTG         bV "\n",
-         DI(total_execs), unique_crashes ? cLRD : cRST, tmp);
+         DI(afl->total_execs), afl->unique_crashes ? cLRD : cRST, tmp);
 
   } else {
 
     SAYF(bV bSTOP " total execs : " cRST "%-20s " bSTG bV bSTOP
                   " total crashes : %s%-22s" bSTG         bV "\n",
-         DI(total_execs), unique_crashes ? cLRD : cRST, tmp);
+         DI(afl->total_execs), afl->unique_crashes ? cLRD : cRST, tmp);
 
   }
 
@@ -510,8 +510,8 @@ void show_stats(void) {
 
   }
 
-  sprintf(tmp, "%s (%s%s unique)", DI(total_tmouts), DI(unique_tmouts),
-          (unique_hangs >= KEEP_UNIQUE_HANG) ? "+" : "");
+  sprintf(tmp, "%s (%s%s unique)", DI(afl->total_tmouts), DI(afl->unique_tmouts),
+          (afl->unique_hangs >= KEEP_UNIQUE_HANG) ? "+" : "");
 
   SAYF(bSTG bV bSTOP "  total tmouts : " cRST "%-22s" bSTG bV "\n", tmp);
 
@@ -521,68 +521,68 @@ void show_stats(void) {
        " fuzzing strategy yields " bSTG bH10 bHT bH10 bH5 bHB bH bSTOP cCYA
        " path geometry " bSTG bH5 bH2 bVL "\n");
 
-  if (skip_deterministic) {
+  if (afl->skip_deterministic) {
 
     strcpy(tmp, "n/a, n/a, n/a");
 
   } else {
 
-    sprintf(tmp, "%s/%s, %s/%s, %s/%s", DI(stage_finds[STAGE_FLIP1]),
-            DI(stage_cycles[STAGE_FLIP1]), DI(stage_finds[STAGE_FLIP2]),
-            DI(stage_cycles[STAGE_FLIP2]), DI(stage_finds[STAGE_FLIP4]),
-            DI(stage_cycles[STAGE_FLIP4]));
+    sprintf(tmp, "%s/%s, %s/%s, %s/%s", DI(afl->stage_finds[STAGE_FLIP1]),
+            DI(afl->stage_cycles[STAGE_FLIP1]), DI(afl->stage_finds[STAGE_FLIP2]),
+            DI(afl->stage_cycles[STAGE_FLIP2]), DI(afl->stage_finds[STAGE_FLIP4]),
+            DI(afl->stage_cycles[STAGE_FLIP4]));
 
   }
 
   SAYF(bV bSTOP "   bit flips : " cRST "%-36s " bSTG bV bSTOP
                 "    levels : " cRST "%-10s" bSTG       bV "\n",
-       tmp, DI(max_depth));
+       tmp, DI(afl->max_depth));
 
-  if (!skip_deterministic)
-    sprintf(tmp, "%s/%s, %s/%s, %s/%s", DI(stage_finds[STAGE_FLIP8]),
-            DI(stage_cycles[STAGE_FLIP8]), DI(stage_finds[STAGE_FLIP16]),
-            DI(stage_cycles[STAGE_FLIP16]), DI(stage_finds[STAGE_FLIP32]),
-            DI(stage_cycles[STAGE_FLIP32]));
+  if (!afl->skip_deterministic)
+    sprintf(tmp, "%s/%s, %s/%s, %s/%s", DI(afl->stage_finds[STAGE_FLIP8]),
+            DI(afl->stage_cycles[STAGE_FLIP8]), DI(afl->stage_finds[STAGE_FLIP16]),
+            DI(afl->stage_cycles[STAGE_FLIP16]), DI(afl->stage_finds[STAGE_FLIP32]),
+            DI(afl->stage_cycles[STAGE_FLIP32]));
 
   SAYF(bV bSTOP "  byte flips : " cRST "%-36s " bSTG bV bSTOP
                 "   pending : " cRST "%-10s" bSTG       bV "\n",
-       tmp, DI(pending_not_fuzzed));
+       tmp, DI(afl->pending_not_fuzzed));
 
-  if (!skip_deterministic)
-    sprintf(tmp, "%s/%s, %s/%s, %s/%s", DI(stage_finds[STAGE_ARITH8]),
-            DI(stage_cycles[STAGE_ARITH8]), DI(stage_finds[STAGE_ARITH16]),
-            DI(stage_cycles[STAGE_ARITH16]), DI(stage_finds[STAGE_ARITH32]),
-            DI(stage_cycles[STAGE_ARITH32]));
+  if (!afl->skip_deterministic)
+    sprintf(tmp, "%s/%s, %s/%s, %s/%s", DI(afl->stage_finds[STAGE_ARITH8]),
+            DI(afl->stage_cycles[STAGE_ARITH8]), DI(afl->stage_finds[STAGE_ARITH16]),
+            DI(afl->stage_cycles[STAGE_ARITH16]), DI(afl->stage_finds[STAGE_ARITH32]),
+            DI(afl->stage_cycles[STAGE_ARITH32]));
 
   SAYF(bV bSTOP " arithmetics : " cRST "%-36s " bSTG bV bSTOP
                 "  pend fav : " cRST "%-10s" bSTG       bV "\n",
-       tmp, DI(pending_favored));
+       tmp, DI(afl->pending_favored));
 
-  if (!skip_deterministic)
+  if (!afl->skip_deterministic)
     sprintf(
-        tmp, "%s/%s, %s/%s, %s/%s", DI(stage_finds[STAGE_INTEREST8]),
-        DI(stage_cycles[STAGE_INTEREST8]), DI(stage_finds[STAGE_INTEREST16]),
-        DI(stage_cycles[STAGE_INTEREST16]), DI(stage_finds[STAGE_INTEREST32]),
-        DI(stage_cycles[STAGE_INTEREST32]));
+        tmp, "%s/%s, %s/%s, %s/%s", DI(afl->stage_finds[STAGE_INTEREST8]),
+        DI(afl->stage_cycles[STAGE_INTEREST8]), DI(afl->stage_finds[STAGE_INTEREST16]),
+        DI(afl->stage_cycles[STAGE_INTEREST16]), DI(afl->stage_finds[STAGE_INTEREST32]),
+        DI(afl->stage_cycles[STAGE_INTEREST32]));
 
   SAYF(bV bSTOP "  known ints : " cRST "%-36s " bSTG bV bSTOP
                 " own finds : " cRST "%-10s" bSTG       bV "\n",
-       tmp, DI(queued_discovered));
+       tmp, DI(afl->queued_discovered));
 
-  if (!skip_deterministic)
-    sprintf(tmp, "%s/%s, %s/%s, %s/%s", DI(stage_finds[STAGE_EXTRAS_UO]),
-            DI(stage_cycles[STAGE_EXTRAS_UO]), DI(stage_finds[STAGE_EXTRAS_UI]),
-            DI(stage_cycles[STAGE_EXTRAS_UI]), DI(stage_finds[STAGE_EXTRAS_AO]),
-            DI(stage_cycles[STAGE_EXTRAS_AO]));
+  if (!afl->skip_deterministic)
+    sprintf(tmp, "%s/%s, %s/%s, %s/%s", DI(afl->stage_finds[STAGE_EXTRAS_UO]),
+            DI(afl->stage_cycles[STAGE_EXTRAS_UO]), DI(afl->stage_finds[STAGE_EXTRAS_UI]),
+            DI(afl->stage_cycles[STAGE_EXTRAS_UI]), DI(afl->stage_finds[STAGE_EXTRAS_AO]),
+            DI(afl->stage_cycles[STAGE_EXTRAS_AO]));
 
   SAYF(bV bSTOP "  dictionary : " cRST "%-36s " bSTG bV bSTOP
                 "  imported : " cRST "%-10s" bSTG       bV "\n",
-       tmp, sync_id ? DI(queued_imported) : (u8*)"n/a");
+       tmp, afl->sync_id ? DI(afl->queued_imported) : (u8*)"n/a");
 
-  sprintf(tmp, "%s/%s, %s/%s, %s/%s", DI(stage_finds[STAGE_HAVOC]),
-          DI(stage_cycles[STAGE_HAVOC]), DI(stage_finds[STAGE_SPLICE]),
-          DI(stage_cycles[STAGE_SPLICE]), DI(stage_finds[STAGE_RADAMSA]),
-          DI(stage_cycles[STAGE_RADAMSA]));
+  sprintf(tmp, "%s/%s, %s/%s, %s/%s", DI(afl->stage_finds[STAGE_HAVOC]),
+          DI(afl->stage_cycles[STAGE_HAVOC]), DI(afl->stage_finds[STAGE_SPLICE]),
+          DI(afl->stage_cycles[STAGE_SPLICE]), DI(afl->stage_finds[STAGE_RADAMSA]),
+          DI(afl->stage_cycles[STAGE_RADAMSA]));
 
   SAYF(bV bSTOP "   havoc/rad : " cRST "%-36s " bSTG bV bSTOP, tmp);
 
@@ -592,51 +592,51 @@ void show_stats(void) {
     strcpy(tmp, "n/a");
 
   SAYF(" stability : %s%-10s" bSTG bV "\n",
-       (stab_ratio < 85 && var_byte_count > 40)
+       (stab_ratio < 85 && afl->var_byte_count > 40)
            ? cLRD
-           : ((queued_variable && (!persistent_mode || var_byte_count > 20))
+           : ((afl->queued_variable && (!afl->persistent_mode || afl->var_byte_count > 20))
                   ? cMGN
                   : cRST),
        tmp);
 
-  if (cmplog_mode) {
+  if (afl->shm.cmplog_mode) {
 
-    sprintf(tmp, "%s/%s, %s/%s, %s/%s, %s/%s", DI(stage_finds[STAGE_PYTHON]),
-            DI(stage_cycles[STAGE_PYTHON]),
-            DI(stage_finds[STAGE_CUSTOM_MUTATOR]),
-            DI(stage_cycles[STAGE_CUSTOM_MUTATOR]),
-            DI(stage_finds[STAGE_COLORIZATION]),
-            DI(stage_cycles[STAGE_COLORIZATION]), DI(stage_finds[STAGE_ITS]),
-            DI(stage_cycles[STAGE_ITS]));
+    sprintf(tmp, "%s/%s, %s/%s, %s/%s, %s/%s", DI(afl->stage_finds[STAGE_PYTHON]),
+            DI(afl->stage_cycles[STAGE_PYTHON]),
+            DI(afl->stage_finds[STAGE_CUSTOM_MUTATOR]),
+            DI(afl->stage_cycles[STAGE_CUSTOM_MUTATOR]),
+            DI(afl->stage_finds[STAGE_COLORIZATION]),
+            DI(afl->stage_cycles[STAGE_COLORIZATION]), DI(afl->stage_finds[STAGE_ITS]),
+            DI(afl->stage_cycles[STAGE_ITS]));
 
     SAYF(bV bSTOP "   custom/rq : " cRST "%-36s " bSTG bVR bH20 bH2 bH bRB "\n",
          tmp);
 
   } else {
 
-    sprintf(tmp, "%s/%s, %s/%s", DI(stage_finds[STAGE_PYTHON]),
-            DI(stage_cycles[STAGE_PYTHON]),
-            DI(stage_finds[STAGE_CUSTOM_MUTATOR]),
-            DI(stage_cycles[STAGE_CUSTOM_MUTATOR]));
+    sprintf(tmp, "%s/%s, %s/%s", DI(afl->stage_finds[STAGE_PYTHON]),
+            DI(afl->stage_cycles[STAGE_PYTHON]),
+            DI(afl->stage_finds[STAGE_CUSTOM_MUTATOR]),
+            DI(afl->stage_cycles[STAGE_CUSTOM_MUTATOR]));
 
     SAYF(bV bSTOP "   py/custom : " cRST "%-36s " bSTG bVR bH20 bH2 bH bRB "\n",
          tmp);
 
   }
 
-  if (!bytes_trim_out) {
+  if (!afl->bytes_trim_out) {
 
     sprintf(tmp, "n/a, ");
 
   } else {
 
     sprintf(tmp, "%0.02f%%/%s, ",
-            ((double)(bytes_trim_in - bytes_trim_out)) * 100 / bytes_trim_in,
-            DI(trim_execs));
+            ((double)(afl->bytes_trim_in - afl->bytes_trim_out)) * 100 / afl->bytes_trim_in,
+            DI(afl->trim_execs));
 
   }
 
-  if (!blocks_eff_total) {
+  if (!afl->blocks_eff_total) {
 
     u8 tmp2[128];
 
@@ -648,17 +648,17 @@ void show_stats(void) {
     u8 tmp2[128];
 
     sprintf(tmp2, "%0.02f%%",
-            ((double)(blocks_eff_total - blocks_eff_select)) * 100 /
-                blocks_eff_total);
+            ((double)(afl->blocks_eff_total - afl->blocks_eff_select)) * 100 /
+                afl->blocks_eff_total);
 
     strcat(tmp, tmp2);
 
   }
 
-  if (mutator) {
+  if (afl->mutator) {
 
-    sprintf(tmp, "%s/%s", DI(stage_finds[STAGE_CUSTOM_MUTATOR]),
-            DI(stage_cycles[STAGE_CUSTOM_MUTATOR]));
+    sprintf(tmp, "%s/%s", DI(afl->stage_finds[STAGE_CUSTOM_MUTATOR]),
+            DI(afl->stage_cycles[STAGE_CUSTOM_MUTATOR]));
     SAYF(bV bSTOP " custom mut. : " cRST "%-36s " bSTG bV RESET_G1, tmp);
 
   } else {
@@ -669,27 +669,27 @@ void show_stats(void) {
 
   /* Provide some CPU utilization stats. */
 
-  if (cpu_core_count) {
+  if (afl->cpu_core_count) {
 
     double cur_runnable = get_runnable_processes();
-    u32    cur_utilization = cur_runnable * 100 / cpu_core_count;
+    u32    cur_utilization = cur_runnable * 100 / afl->cpu_core_count;
 
     u8* cpu_color = cCYA;
 
     /* If we could still run one or more processes, use green. */
 
-    if (cpu_core_count > 1 && cur_runnable + 1 <= cpu_core_count)
+    if (afl->cpu_core_count > 1 && cur_runnable + 1 <= afl->cpu_core_count)
       cpu_color = cLGN;
 
     /* If we're clearly oversubscribed, use red. */
 
-    if (!no_cpu_meter_red && cur_utilization >= 150) cpu_color = cLRD;
+    if (!afl->no_cpu_meter_red && cur_utilization >= 150) cpu_color = cLRD;
 
 #ifdef HAVE_AFFINITY
 
-    if (cpu_aff >= 0) {
+    if (afl->cpu_aff >= 0) {
 
-      SAYF(SP10 cGRA "[cpu%03u:%s%3u%%" cGRA "]\r" cRST, MIN(cpu_aff, 999),
+      SAYF(SP10 cGRA "[cpu%03u:%s%3u%%" cGRA "]\r" cRST, MIN(afl->cpu_aff, 999),
            cpu_color, MIN(cur_utilization, 999));
 
     } else {
@@ -723,15 +723,15 @@ void show_stats(void) {
    plus a bunch of warnings. Some calibration stuff also ended up here,
    along with several hardcoded constants. Maybe clean up eventually. */
 
-void show_init_stats(void) {
+void show_init_stats(afl_state_t *afl) {
 
-  struct queue_entry* q = queue;
+  struct queue_entry* q = afl->queue;
   u32                 min_bits = 0, max_bits = 0;
   u64                 min_us = 0, max_us = 0;
   u64                 avg_us = 0;
   u32                 max_len = 0;
 
-  if (total_cal_cycles) avg_us = total_cal_us / total_cal_cycles;
+  if (afl->total_cal_cycles) avg_us = afl->total_cal_us / afl->total_cal_cycles;
 
   while (q) {
 
@@ -749,20 +749,20 @@ void show_init_stats(void) {
 
   SAYF("\n");
 
-  if (avg_us > ((qemu_mode || unicorn_mode) ? 50000 : 10000))
+  if (avg_us > ((afl->qemu_mode || afl->unicorn_mode) ? 50000 : 10000))
     WARNF(cLRD "The target binary is pretty slow! See %s/perf_tips.md.",
           doc_path);
 
   /* Let's keep things moving with slow binaries. */
 
   if (avg_us > 50000)
-    havoc_div = 10;                                     /* 0-19 execs/sec   */
+    afl->havoc_div = 10;                                     /* 0-19 execs/sec   */
   else if (avg_us > 20000)
-    havoc_div = 5;                                      /* 20-49 execs/sec  */
+    afl->havoc_div = 5;                                      /* 20-49 execs/sec  */
   else if (avg_us > 10000)
-    havoc_div = 2;                                      /* 50-100 execs/sec */
+    afl->havoc_div = 2;                                      /* 50-100 execs/sec */
 
-  if (!resuming_fuzz) {
+  if (!afl->resuming_fuzz) {
 
     if (max_len > 50 * 1024)
       WARNF(cLRD "Some test cases are huge (%s) - see %s/perf_tips.md!",
@@ -771,14 +771,14 @@ void show_init_stats(void) {
       WARNF("Some test cases are big (%s) - see %s/perf_tips.md.", DMS(max_len),
             doc_path);
 
-    if (useless_at_start && !in_bitmap)
+    if (afl->useless_at_start && !afl->in_bitmap)
       WARNF(cLRD "Some test cases look useless. Consider using a smaller set.");
 
-    if (queued_paths > 100)
+    if (afl->queued_paths > 100)
       WARNF(cLRD
             "You probably have far too many input files! Consider trimming "
             "down.");
-    else if (queued_paths > 20)
+    else if (afl->queued_paths > 20)
       WARNF("You have lots of input files; try starting small.");
 
   }
@@ -789,12 +789,12 @@ void show_init_stats(void) {
       "%u favored, %u variable, %u total\n" cGRA "       Bitmap range : " cRST
       "%u to %u bits (average: %0.02f bits)\n" cGRA
       "        Exec timing : " cRST "%s to %s us (average: %s us)\n",
-      queued_favored, queued_variable, queued_paths, min_bits, max_bits,
-      ((double)total_bitmap_size) /
-          (total_bitmap_entries ? total_bitmap_entries : 1),
+      afl->queued_favored, afl->queued_variable, afl->queued_paths, min_bits, max_bits,
+      ((double)afl->total_bitmap_size) /
+          (afl->total_bitmap_entries ? afl->total_bitmap_entries : 1),
       DI(min_us), DI(max_us), DI(avg_us));
 
-  if (!timeout_given) {
+  if (!afl->timeout_given) {
 
     /* Figure out the appropriate timeout. The basic idea is: 5x average or
        1x max, rounded up to EXEC_TM_ROUND ms and capped at 1 second.
@@ -804,33 +804,33 @@ void show_init_stats(void) {
        our patience is wearing thin =) */
 
     if (avg_us > 50000)
-      exec_tmout = avg_us * 2 / 1000;
+      afl->fsrv.exec_tmout = avg_us * 2 / 1000;
     else if (avg_us > 10000)
-      exec_tmout = avg_us * 3 / 1000;
+      afl->fsrv.exec_tmout = avg_us * 3 / 1000;
     else
-      exec_tmout = avg_us * 5 / 1000;
+      afl->fsrv.exec_tmout = avg_us * 5 / 1000;
 
-    exec_tmout = MAX(exec_tmout, max_us / 1000);
-    exec_tmout = (exec_tmout + EXEC_TM_ROUND) / EXEC_TM_ROUND * EXEC_TM_ROUND;
+    afl->fsrv.exec_tmout = MAX(afl->fsrv.exec_tmout, max_us / 1000);
+    afl->fsrv.exec_tmout = (afl->fsrv.exec_tmout + EXEC_TM_ROUND) / EXEC_TM_ROUND * EXEC_TM_ROUND;
 
-    if (exec_tmout > EXEC_TIMEOUT) exec_tmout = EXEC_TIMEOUT;
+    if (afl->fsrv.exec_tmout > EXEC_TIMEOUT) afl->fsrv.exec_tmout = EXEC_TIMEOUT;
 
     ACTF("No -t option specified, so I'll use exec timeout of %u ms.",
-         exec_tmout);
+         afl->fsrv.exec_tmout);
 
-    timeout_given = 1;
+    afl->timeout_given = 1;
 
-  } else if (timeout_given == 3) {
+  } else if (afl->timeout_given == 3) {
 
-    ACTF("Applying timeout settings from resumed session (%u ms).", exec_tmout);
+    ACTF("Applying timeout settings from resumed session (%u ms).", afl->fsrv.exec_tmout);
 
   }
 
   /* In dumb mode, re-running every timing out test case with a generous time
      limit is very expensive, so let's select a more conservative default. */
 
-  if (dumb_mode && !get_afl_env("AFL_HANG_TMOUT"))
-    hang_tmout = MIN(EXEC_TIMEOUT, exec_tmout * 2 + 100);
+  if (afl->dumb_mode && !get_afl_env("AFL_HANG_TMOUT"))
+    afl->hang_tmout = MIN(EXEC_TIMEOUT, afl->fsrv.exec_tmout * 2 + 100);
 
   OKF("All set and ready to roll!");
 
diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c
index 778ada9a..63035e36 100644
--- a/src/afl-fuzz.c
+++ b/src/afl-fuzz.c
@@ -25,6 +25,8 @@
 
 #include "afl-fuzz.h"
 
+u8 be_quiet = 0;
+
 static u8* get_libradamsa_path(u8* own_loc) {
 
   u8 *tmp, *cp, *rsl, *own_copy;
@@ -82,7 +84,7 @@ static u8* get_libradamsa_path(u8* own_loc) {
 
 /* Display usage hints. */
 
-static void usage(u8* argv0, int more_help) {
+static void usage(afl_state_t *afl, u8* argv0, int more_help) {
 
   SAYF(
       "\n%s [ options ] -- /path/to/fuzzed_app [ ... ]\n\n"
@@ -220,6 +222,7 @@ static int stricmp(char const* a, char const* b) {
 
 }
 
+
 /* Main entry point */
 
 int main(int argc, char** argv, char** envp) {
@@ -235,33 +238,41 @@ int main(int argc, char** argv, char** envp) {
   struct timeval  tv;
   struct timezone tz;
 
+  afl_state_t *afl = calloc(1, sizeof(afl_state_t));
+  if (!afl) {
+    FATAL("Could not create afl state");
+  }
+
+  afl_state_init(afl);
+  afl_fsrv_init(&afl->fsrv);
+
   SAYF(cCYA "afl-fuzz" VERSION cRST
             " based on afl by Michal Zalewski and a big online community\n");
-
-  doc_path = access(DOC_PATH, F_OK) ? "docs" : DOC_PATH;
+          
+  doc_path = access(DOC_PATH, F_OK) ? (u8 *)"docs" : doc_path;
 
   gettimeofday(&tv, &tz);
-  init_seed = tv.tv_sec ^ tv.tv_usec ^ getpid();
+  afl->init_seed = tv.tv_sec ^ tv.tv_usec ^ getpid();
 
   while ((opt = getopt(argc, argv,
                        "+c:i:I:o:f:m:t:T:dnCB:S:M:x:QNUWe:p:s:V:E:L:hRP:")) > 0)
 
     switch (opt) {
 
-      case 'I': infoexec = optarg; break;
+      case 'I': afl->infoexec = optarg; break;
 
       case 'c': {
 
-        cmplog_mode = 1;
-        cmplog_binary = ck_strdup(optarg);
+        afl->shm.cmplog_mode = 1;
+        afl->cmplog_binary = ck_strdup(optarg);
         break;
 
       }
 
       case 's': {
 
-        init_seed = strtoul(optarg, 0L, 10);
-        fixed_seed = 1;
+        afl->init_seed = strtoul(optarg, 0L, 10);
+        afl->fixed_seed = 1;
         break;
 
       }
@@ -270,29 +281,29 @@ int main(int argc, char** argv, char** envp) {
 
         if (!stricmp(optarg, "fast")) {
 
-          schedule = FAST;
+          afl->schedule = FAST;
 
         } else if (!stricmp(optarg, "coe")) {
 
-          schedule = COE;
+          afl->schedule = COE;
 
         } else if (!stricmp(optarg, "exploit")) {
 
-          schedule = EXPLOIT;
+          afl->schedule = EXPLOIT;
 
         } else if (!stricmp(optarg, "lin")) {
 
-          schedule = LIN;
+          afl->schedule = LIN;
 
         } else if (!stricmp(optarg, "quad")) {
 
-          schedule = QUAD;
+          afl->schedule = QUAD;
 
         } else if (!stricmp(optarg, "explore") || !stricmp(optarg, "default") ||
 
                    !stricmp(optarg, "normal") || !stricmp(optarg, "afl")) {
 
-          schedule = EXPLORE;
+          afl->schedule = EXPLORE;
 
         } else {
 
@@ -304,46 +315,46 @@ int main(int argc, char** argv, char** envp) {
 
       case 'e':
 
-        if (file_extension) FATAL("Multiple -e options not supported");
+        if (afl->file_extension) FATAL("Multiple -e options not supported");
 
-        file_extension = optarg;
+        afl->file_extension = optarg;
 
         break;
 
       case 'i':                                                /* input dir */
 
-        if (in_dir) FATAL("Multiple -i options not supported");
-        in_dir = optarg;
+        if (afl->in_dir) FATAL("Multiple -i options not supported");
+        afl->in_dir = optarg;
 
-        if (!strcmp(in_dir, "-")) in_place_resume = 1;
+        if (!strcmp(afl->in_dir, "-")) afl->in_place_resume = 1;
 
         break;
 
       case 'o':                                               /* output dir */
 
-        if (out_dir) FATAL("Multiple -o options not supported");
-        out_dir = optarg;
+        if (afl->out_dir) FATAL("Multiple -o options not supported");
+        afl->out_dir = optarg;
         break;
 
       case 'M': {                                         /* master sync ID */
 
         u8* c;
 
-        if (sync_id) FATAL("Multiple -S or -M options not supported");
-        sync_id = ck_strdup(optarg);
+        if (afl->sync_id) FATAL("Multiple -S or -M options not supported");
+        afl->sync_id = ck_strdup(optarg);
 
-        if ((c = strchr(sync_id, ':'))) {
+        if ((c = strchr(afl->sync_id, ':'))) {
 
           *c = 0;
 
-          if (sscanf(c + 1, "%u/%u", &master_id, &master_max) != 2 ||
-              !master_id || !master_max || master_id > master_max ||
-              master_max > 1000000)
+          if (sscanf(c + 1, "%u/%u", &afl->master_id, &afl->master_max) != 2 ||
+              !afl->master_id || !afl->master_max || afl->master_id > afl->master_max ||
+              afl->master_max > 1000000)
             FATAL("Bogus master ID passed to -M");
 
         }
 
-        force_deterministic = 1;
+        afl->force_deterministic = 1;
 
       }
 
@@ -351,15 +362,15 @@ int main(int argc, char** argv, char** envp) {
 
       case 'S':
 
-        if (sync_id) FATAL("Multiple -S or -M options not supported");
-        sync_id = ck_strdup(optarg);
+        if (afl->sync_id) FATAL("Multiple -S or -M options not supported");
+        afl->sync_id = ck_strdup(optarg);
         break;
 
       case 'f':                                              /* target file */
 
-        if (out_file) FATAL("Multiple -f options not supported");
-        out_file = optarg;
-        use_stdin = 0;
+        if (afl->fsrv.out_file) FATAL("Multiple -f options not supported");
+        afl->fsrv.out_file = ck_strdup(optarg);
+        afl->fsrv.use_stdin = 0;
         break;
 
       case 'x':                                               /* dictionary */
@@ -372,18 +383,18 @@ int main(int argc, char** argv, char** envp) {
 
         u8 suffix = 0;
 
-        if (timeout_given) FATAL("Multiple -t options not supported");
+        if (afl->timeout_given) FATAL("Multiple -t options not supported");
 
-        if (sscanf(optarg, "%u%c", &exec_tmout, &suffix) < 1 ||
+        if (sscanf(optarg, "%u%c", &afl->fsrv.exec_tmout, &suffix) < 1 ||
             optarg[0] == '-')
           FATAL("Bad syntax used for -t");
 
-        if (exec_tmout < 5) FATAL("Dangerously low value of -t");
+        if (afl->fsrv.exec_tmout < 5) FATAL("Dangerously low value of -t");
 
         if (suffix == '+')
-          timeout_given = 2;
+          afl->timeout_given = 2;
         else
-          timeout_given = 1;
+          afl->timeout_given = 1;
 
         break;
 
@@ -398,29 +409,29 @@ int main(int argc, char** argv, char** envp) {
 
         if (!strcmp(optarg, "none")) {
 
-          mem_limit = 0;
+          afl->fsrv.mem_limit = 0;
           break;
 
         }
 
-        if (sscanf(optarg, "%llu%c", &mem_limit, &suffix) < 1 ||
+        if (sscanf(optarg, "%llu%c", &afl->fsrv.mem_limit, &suffix) < 1 ||
             optarg[0] == '-')
           FATAL("Bad syntax used for -m");
 
         switch (suffix) {
 
-          case 'T': mem_limit *= 1024 * 1024; break;
-          case 'G': mem_limit *= 1024; break;
-          case 'k': mem_limit /= 1024; break;
+          case 'T': afl->fsrv.mem_limit *= 1024 * 1024; break;
+          case 'G': afl->fsrv.mem_limit *= 1024; break;
+          case 'k': afl->fsrv.mem_limit /= 1024; break;
           case 'M': break;
 
           default: FATAL("Unsupported suffix or bad syntax for -m");
 
         }
 
-        if (mem_limit < 5) FATAL("Dangerously low value of -m");
+        if (afl->fsrv.mem_limit < 5) FATAL("Dangerously low value of -m");
 
-        if (sizeof(rlim_t) == 4 && mem_limit > 2000)
+        if (sizeof(rlim_t) == 4 && afl->fsrv.mem_limit > 2000)
           FATAL("Value of -m out of range on 32-bit systems");
 
       }
@@ -429,9 +440,9 @@ int main(int argc, char** argv, char** envp) {
 
       case 'd':                                       /* skip deterministic */
 
-        if (skip_deterministic) FATAL("Multiple -d options not supported");
-        skip_deterministic = 1;
-        use_splicing = 1;
+        if (afl->skip_deterministic) FATAL("Multiple -d options not supported");
+        afl->skip_deterministic = 1;
+        afl->use_splicing = 1;
         break;
 
       case 'B':                                              /* load bitmap */
@@ -447,136 +458,136 @@ int main(int argc, char** argv, char** envp) {
            I only used this once or twice to get variants of a particular
            file, so I'm not making this an official setting. */
 
-        if (in_bitmap) FATAL("Multiple -B options not supported");
+        if (afl->in_bitmap) FATAL("Multiple -B options not supported");
 
-        in_bitmap = optarg;
-        read_bitmap(in_bitmap);
+        afl->in_bitmap = optarg;
+        read_bitmap(afl, afl->in_bitmap);
         break;
 
       case 'C':                                               /* crash mode */
 
-        if (crash_mode) FATAL("Multiple -C options not supported");
-        crash_mode = FAULT_CRASH;
+        if (afl->crash_mode) FATAL("Multiple -C options not supported");
+        afl->crash_mode = FAULT_CRASH;
         break;
 
       case 'n':                                                /* dumb mode */
 
-        if (dumb_mode) FATAL("Multiple -n options not supported");
+        if (afl->dumb_mode) FATAL("Multiple -n options not supported");
         if (get_afl_env("AFL_DUMB_FORKSRV"))
-          dumb_mode = 2;
+          afl->dumb_mode = 2;
         else
-          dumb_mode = 1;
+          afl->dumb_mode = 1;
 
         break;
 
       case 'T':                                                   /* banner */
 
-        if (use_banner) FATAL("Multiple -T options not supported");
-        use_banner = optarg;
+        if (afl->use_banner) FATAL("Multiple -T options not supported");
+        afl->use_banner = optarg;
         break;
 
       case 'Q':                                                /* QEMU mode */
 
-        if (qemu_mode) FATAL("Multiple -Q options not supported");
-        qemu_mode = 1;
+        if (afl->qemu_mode) FATAL("Multiple -Q options not supported");
+        afl->qemu_mode = 1;
 
-        if (!mem_limit_given) mem_limit = MEM_LIMIT_QEMU;
+        if (!mem_limit_given) afl->fsrv.mem_limit = MEM_LIMIT_QEMU;
 
         break;
 
       case 'N':                                             /* Unicorn mode */
 
-        if (no_unlink) FATAL("Multiple -N options not supported");
-        no_unlink = 1;
+        if (afl->no_unlink) FATAL("Multiple -N options not supported");
+        afl->no_unlink = 1;
 
         break;
 
       case 'U':                                             /* Unicorn mode */
 
-        if (unicorn_mode) FATAL("Multiple -U options not supported");
-        unicorn_mode = 1;
+        if (afl->unicorn_mode) FATAL("Multiple -U options not supported");
+        afl->unicorn_mode = 1;
 
-        if (!mem_limit_given) mem_limit = MEM_LIMIT_UNICORN;
+        if (!mem_limit_given) afl->fsrv.mem_limit = MEM_LIMIT_UNICORN;
 
         break;
 
       case 'W':                                           /* Wine+QEMU mode */
 
-        if (use_wine) FATAL("Multiple -W options not supported");
-        qemu_mode = 1;
-        use_wine = 1;
+        if (afl->use_wine) FATAL("Multiple -W options not supported");
+        afl->qemu_mode = 1;
+        afl->use_wine = 1;
 
-        if (!mem_limit_given) mem_limit = 0;
+        if (!mem_limit_given) afl->fsrv.mem_limit = 0;
 
         break;
 
       case 'V': {
 
-        most_time_key = 1;
-        if (sscanf(optarg, "%llu", &most_time) < 1 || optarg[0] == '-')
+        afl->most_time_key = 1;
+        if (sscanf(optarg, "%llu", &afl->most_time) < 1 || optarg[0] == '-')
           FATAL("Bad syntax used for -V");
 
       } break;
 
       case 'E': {
 
-        most_execs_key = 1;
-        if (sscanf(optarg, "%llu", &most_execs) < 1 || optarg[0] == '-')
+        afl->most_execs_key = 1;
+        if (sscanf(optarg, "%llu", &afl->most_execs) < 1 || optarg[0] == '-')
           FATAL("Bad syntax used for -E");
 
       } break;
 
       case 'L': {                                              /* MOpt mode */
 
-        if (limit_time_sig) FATAL("Multiple -L options not supported");
-        limit_time_sig = 1;
-        havoc_max_mult = HAVOC_MAX_MULT_MOPT;
+        if (afl->limit_time_sig) FATAL("Multiple -L options not supported");
+        afl->limit_time_sig = 1;
+        afl->havoc_max_mult = HAVOC_MAX_MULT_MOPT;
 
-        if (sscanf(optarg, "%llu", &limit_time_puppet) < 1 || optarg[0] == '-')
+        if (sscanf(optarg, "%llu", &afl->limit_time_puppet) < 1 || optarg[0] == '-')
           FATAL("Bad syntax used for -L");
 
-        u64 limit_time_puppet2 = limit_time_puppet * 60 * 1000;
+        u64 limit_time_puppet2 = afl->limit_time_puppet * 60 * 1000;
 
-        if (limit_time_puppet2 < limit_time_puppet)
+        if (limit_time_puppet2 < afl->limit_time_puppet)
           FATAL("limit_time overflow");
-        limit_time_puppet = limit_time_puppet2;
+        afl->limit_time_puppet = limit_time_puppet2;
 
-        SAYF("limit_time_puppet %llu\n", limit_time_puppet);
-        swarm_now = 0;
+        SAYF("limit_time_puppet %llu\n", afl->limit_time_puppet);
+        afl->swarm_now = 0;
 
-        if (limit_time_puppet == 0) key_puppet = 1;
+        if (afl->limit_time_puppet == 0) afl->key_puppet = 1;
 
         int i;
         int tmp_swarm = 0;
 
-        if (g_now > g_max) g_now = 0;
-        w_now = (w_init - w_end) * (g_max - g_now) / (g_max) + w_end;
+        if (afl->g_now > afl->g_max) afl->g_now = 0;
+        afl->w_now = (afl->w_init - afl->w_end) * (afl->g_max - afl->g_now) / (afl->g_max) + afl->w_end;
 
         for (tmp_swarm = 0; tmp_swarm < swarm_num; ++tmp_swarm) {
 
           double total_puppet_temp = 0.0;
-          swarm_fitness[tmp_swarm] = 0.0;
+          afl->swarm_fitness[tmp_swarm] = 0.0;
 
           for (i = 0; i < operator_num; ++i) {
 
-            stage_finds_puppet[tmp_swarm][i] = 0;
-            probability_now[tmp_swarm][i] = 0.0;
-            x_now[tmp_swarm][i] = ((double)(random() % 7000) * 0.0001 + 0.1);
-            total_puppet_temp += x_now[tmp_swarm][i];
-            v_now[tmp_swarm][i] = 0.1;
-            L_best[tmp_swarm][i] = 0.5;
-            G_best[i] = 0.5;
-            eff_best[tmp_swarm][i] = 0.0;
+            afl->stage_finds_puppet[tmp_swarm][i] = 0;
+            afl->probability_now[tmp_swarm][i] = 0.0;
+            afl->x_now[tmp_swarm][i] = ((double)(random() % 7000) * 0.0001 + 0.1);
+            total_puppet_temp += afl->x_now[tmp_swarm][i];
+            afl->v_now[tmp_swarm][i] = 0.1;
+            afl->L_best[tmp_swarm][i] = 0.5;
+            afl->G_best[i] = 0.5;
+            afl->eff_best[tmp_swarm][i] = 0.0;
 
           }
 
           for (i = 0; i < operator_num; ++i) {
 
-            stage_cycles_puppet_v2[tmp_swarm][i] =
-                stage_cycles_puppet[tmp_swarm][i];
-            stage_finds_puppet_v2[tmp_swarm][i] =
-                stage_finds_puppet[tmp_swarm][i];
-            x_now[tmp_swarm][i] = x_now[tmp_swarm][i] / total_puppet_temp;
+            afl->stage_cycles_puppet_v2[tmp_swarm][i] =
+                afl->stage_cycles_puppet[tmp_swarm][i];
+            afl->stage_finds_puppet_v2[tmp_swarm][i] =
+                afl->stage_finds_puppet[tmp_swarm][i];
+            afl->x_now[tmp_swarm][i] = afl->x_now[tmp_swarm][i] / total_puppet_temp;
 
           }
 
@@ -584,47 +595,47 @@ int main(int argc, char** argv, char** envp) {
 
           for (i = 0; i < operator_num; ++i) {
 
-            probability_now[tmp_swarm][i] = 0.0;
-            v_now[tmp_swarm][i] =
-                w_now * v_now[tmp_swarm][i] +
-                RAND_C * (L_best[tmp_swarm][i] - x_now[tmp_swarm][i]) +
-                RAND_C * (G_best[i] - x_now[tmp_swarm][i]);
+            afl->probability_now[tmp_swarm][i] = 0.0;
+            afl->v_now[tmp_swarm][i] =
+                afl->w_now * afl->v_now[tmp_swarm][i] +
+                RAND_C * (afl->L_best[tmp_swarm][i] - afl->x_now[tmp_swarm][i]) +
+                RAND_C * (afl->G_best[i] - afl->x_now[tmp_swarm][i]);
 
-            x_now[tmp_swarm][i] += v_now[tmp_swarm][i];
+            afl->x_now[tmp_swarm][i] += afl->v_now[tmp_swarm][i];
 
-            if (x_now[tmp_swarm][i] > v_max)
-              x_now[tmp_swarm][i] = v_max;
-            else if (x_now[tmp_swarm][i] < v_min)
-              x_now[tmp_swarm][i] = v_min;
+            if (afl->x_now[tmp_swarm][i] > v_max)
+              afl->x_now[tmp_swarm][i] = v_max;
+            else if (afl->x_now[tmp_swarm][i] < v_min)
+              afl->x_now[tmp_swarm][i] = v_min;
 
-            x_temp += x_now[tmp_swarm][i];
+            x_temp += afl->x_now[tmp_swarm][i];
 
           }
 
           for (i = 0; i < operator_num; ++i) {
 
-            x_now[tmp_swarm][i] = x_now[tmp_swarm][i] / x_temp;
+            afl->x_now[tmp_swarm][i] = afl->x_now[tmp_swarm][i] / x_temp;
             if (likely(i != 0))
-              probability_now[tmp_swarm][i] =
-                  probability_now[tmp_swarm][i - 1] + x_now[tmp_swarm][i];
+              afl->probability_now[tmp_swarm][i] =
+                  afl->probability_now[tmp_swarm][i - 1] + afl->x_now[tmp_swarm][i];
             else
-              probability_now[tmp_swarm][i] = x_now[tmp_swarm][i];
+              afl->probability_now[tmp_swarm][i] = afl->x_now[tmp_swarm][i];
 
           }
 
-          if (probability_now[tmp_swarm][operator_num - 1] < 0.99 ||
-              probability_now[tmp_swarm][operator_num - 1] > 1.01)
+          if (afl->probability_now[tmp_swarm][operator_num - 1] < 0.99 ||
+              afl->probability_now[tmp_swarm][operator_num - 1] > 1.01)
             FATAL("ERROR probability");
 
         }
 
         for (i = 0; i < operator_num; ++i) {
 
-          core_operator_finds_puppet[i] = 0;
-          core_operator_finds_puppet_v2[i] = 0;
-          core_operator_cycles_puppet[i] = 0;
-          core_operator_cycles_puppet_v2[i] = 0;
-          core_operator_cycles_puppet_v3[i] = 0;
+          afl->core_operator_finds_puppet[i] = 0;
+          afl->core_operator_finds_puppet_v2[i] = 0;
+          afl->core_operator_cycles_puppet[i] = 0;
+          afl->core_operator_cycles_puppet_v2[i] = 0;
+          afl->core_operator_cycles_puppet_v3[i] = 0;
 
         }
 
@@ -634,10 +645,10 @@ int main(int argc, char** argv, char** envp) {
 
       case 'R':
 
-        if (use_radamsa)
-          use_radamsa = 2;
+        if (afl->use_radamsa)
+          afl->use_radamsa = 2;
         else
-          use_radamsa = 1;
+          afl->use_radamsa = 1;
 
         break;
 
@@ -646,8 +657,8 @@ int main(int argc, char** argv, char** envp) {
 
     }
 
-  if (optind == argc || !in_dir || !out_dir || show_help)
-    usage(argv[0], show_help);
+  if (optind == argc || !afl->in_dir || !afl->out_dir || show_help)
+    usage(afl, argv[0], show_help);
 
   OKF("afl++ is maintained by Marc \"van Hauser\" Heuse, Heiko \"hexcoder\" "
       "Eißfeldt, Andrea Fioraldi and Dominik Maier");
@@ -658,19 +669,19 @@ int main(int argc, char** argv, char** envp) {
   OKF("afl-tmin fork server patch from github.com/nccgroup/TriforceAFL");
   OKF("MOpt Mutator from github.com/puppet-meteor/MOpt-AFL");
 
-  if (sync_id && force_deterministic && getenv("AFL_CUSTOM_MUTATOR_ONLY"))
+  if (afl->sync_id && afl->force_deterministic && getenv("AFL_CUSTOM_MUTATOR_ONLY"))
     WARNF(
         "Using -M master with the AFL_CUSTOM_MUTATOR_ONLY mutator options will "
         "result in no deterministic mutations being done!");
 
   check_environment_vars(envp);
 
-  if (fixed_seed) OKF("Running with fixed seed: %u", (u32)init_seed);
-  srandom((u32)init_seed);
+  if (afl->fixed_seed) OKF("Running with fixed seed: %u", (u32)afl->init_seed);
+  srandom((u32)afl->init_seed);
 
-  if (use_radamsa) {
+  if (afl->use_radamsa) {
 
-    if (limit_time_sig)
+    if (afl->limit_time_sig)
       FATAL(
           "MOpt and Radamsa are mutually exclusive. We accept pull requests "
           "that integrates MOpt with the optional mutators "
@@ -685,9 +696,9 @@ int main(int argc, char** argv, char** envp) {
     if (!handle) FATAL("Failed to dlopen() libradamsa");
 
     void (*radamsa_init_ptr)(void) = dlsym(handle, "radamsa_init");
-    radamsa_mutate_ptr = dlsym(handle, "radamsa");
+    afl->radamsa_mutate_ptr = dlsym(handle, "radamsa");
 
-    if (!radamsa_init_ptr || !radamsa_mutate_ptr)
+    if (!radamsa_init_ptr || !afl->radamsa_mutate_ptr)
       FATAL("Failed to dlsym() libradamsa");
 
     /* randamsa_init installs some signal hadlers, call it before
@@ -700,27 +711,27 @@ int main(int argc, char** argv, char** envp) {
   setup_signal_handlers();
   check_asan_opts();
 
-  power_name = power_names[schedule];
+  afl->power_name = power_names[afl->schedule];
 
-  if (sync_id) fix_up_sync();
+  if (afl->sync_id) fix_up_sync(afl);
 
-  if (!strcmp(in_dir, out_dir))
+  if (!strcmp(afl->in_dir, afl->out_dir))
     FATAL("Input and output directories can't be the same");
 
-  if (dumb_mode) {
+  if (afl->dumb_mode) {
 
-    if (crash_mode) FATAL("-C and -n are mutually exclusive");
-    if (qemu_mode) FATAL("-Q and -n are mutually exclusive");
-    if (unicorn_mode) FATAL("-U and -n are mutually exclusive");
+    if (afl->crash_mode) FATAL("-C and -n are mutually exclusive");
+    if (afl->qemu_mode) FATAL("-Q and -n are mutually exclusive");
+    if (afl->unicorn_mode) FATAL("-U and -n are mutually exclusive");
 
   }
 
-  if (get_afl_env("AFL_DISABLE_TRIM")) disable_trim = 1;
+  if (get_afl_env("AFL_DISABLE_TRIM")) afl->disable_trim = 1;
 
   if (getenv("AFL_NO_UI") && getenv("AFL_FORCE_UI"))
     FATAL("AFL_NO_UI and AFL_FORCE_UI are mutually exclusive");
 
-  if (strchr(argv[optind], '/') == NULL && !unicorn_mode)
+  if (strchr(argv[optind], '/') == NULL && !afl->unicorn_mode)
     WARNF(cLRD
           "Target binary called without a prefixed path, make sure you are "
           "fuzzing the right binary: " cRST "%s",
@@ -728,7 +739,7 @@ int main(int argc, char** argv, char** envp) {
 
   ACTF("Getting to work...");
 
-  switch (schedule) {
+  switch (afl->schedule) {
 
     case FAST: OKF("Using exponential power schedule (FAST)"); break;
     case COE: OKF("Using cut-off exponential power schedule (COE)"); break;
@@ -744,27 +755,28 @@ int main(int argc, char** argv, char** envp) {
 
   }
 
-  if (get_afl_env("AFL_NO_FORKSRV")) no_forkserver = 1;
-  if (get_afl_env("AFL_NO_CPU_RED")) no_cpu_meter_red = 1;
-  if (get_afl_env("AFL_NO_ARITH")) no_arith = 1;
-  if (get_afl_env("AFL_SHUFFLE_QUEUE")) shuffle_queue = 1;
-  if (get_afl_env("AFL_FAST_CAL")) fast_cal = 1;
+  if (get_afl_env("AFL_NO_FORKSRV")) afl->no_forkserver = 1;
+  if (get_afl_env("AFL_NO_CPU_RED")) afl->no_cpu_meter_red = 1;
+  if (get_afl_env("AFL_NO_ARITH")) afl->no_arith = 1;
+  if (get_afl_env("AFL_SHUFFLE_QUEUE")) afl->shuffle_queue = 1;
+  if (get_afl_env("AFL_FAST_CAL")) afl->fast_cal = 1;
 
   if (get_afl_env("AFL_AUTORESUME")) {
 
-    autoresume = 1;
-    if (in_place_resume) SAYF("AFL_AUTORESUME has no effect for '-i -'");
+    afl->autoresume = 1;
+    if (afl->in_place_resume)
+      SAYF("AFL_AUTORESUME has no effect for '-i -'");
 
   }
 
   if (get_afl_env("AFL_HANG_TMOUT")) {
 
-    hang_tmout = atoi(getenv("AFL_HANG_TMOUT"));
-    if (!hang_tmout) FATAL("Invalid value of AFL_HANG_TMOUT");
+    afl->hang_tmout = atoi(getenv("AFL_HANG_TMOUT"));
+    if (!afl->hang_tmout) FATAL("Invalid value of AFL_HANG_TMOUT");
 
   }
 
-  if (dumb_mode == 2 && no_forkserver)
+  if (afl->dumb_mode == 2 && afl->no_forkserver)
     FATAL("AFL_DUMB_FORKSRV and AFL_NO_FORKSRV are mutually exclusive");
 
   if (getenv("LD_PRELOAD"))
@@ -774,7 +786,7 @@ int main(int argc, char** argv, char** envp) {
 
   if (get_afl_env("AFL_PRELOAD")) {
 
-    if (qemu_mode) {
+    if (afl->qemu_mode) {
 
       u8* qemu_preload = getenv("QEMU_SET_ENV");
       u8* afl_preload = getenv("AFL_PRELOAD");
@@ -813,78 +825,79 @@ int main(int argc, char** argv, char** envp) {
   if (getenv("AFL_LD_PRELOAD"))
     FATAL("Use AFL_PRELOAD instead of AFL_LD_PRELOAD");
 
-  save_cmdline(argc, argv);
+  save_cmdline(afl, argc, argv);
 
-  fix_up_banner(argv[optind]);
+  fix_up_banner(afl, argv[optind]);
 
-  check_if_tty();
-  if (get_afl_env("AFL_FORCE_UI")) not_on_tty = 0;
+  check_if_tty(afl);
+  if (get_afl_env("AFL_FORCE_UI")) afl->not_on_tty = 0;
 
   if (get_afl_env("AFL_CAL_FAST")) {
 
     /* Use less calibration cycles, for slow applications */
-    cal_cycles = 3;
-    cal_cycles_long = 5;
+    afl->cal_cycles = 3;
+    afl->cal_cycles_long = 5;
 
   }
 
-  if (get_afl_env("AFL_DEBUG")) debug = 1;
+  if (get_afl_env("AFL_DEBUG")) afl->debug = 1;
 
   if (get_afl_env("AFL_CUSTOM_MUTATOR_ONLY")) {
 
     /* This ensures we don't proceed to havoc/splice */
-    custom_only = 1;
+    afl->custom_only = 1;
 
     /* Ensure we also skip all deterministic steps */
-    skip_deterministic = 1;
+    afl->skip_deterministic = 1;
 
   }
 
-  get_core_count();
+  get_core_count(afl);
 
 #ifdef HAVE_AFFINITY
-  bind_to_free_cpu();
+  bind_to_free_cpu(afl);
 #endif                                                     /* HAVE_AFFINITY */
 
   check_crash_handling();
-  check_cpu_governor();
+  check_cpu_governor(afl);
+
+  afl->fsrv.trace_bits = afl_shm_init(&afl->shm, MAP_SIZE, afl->dumb_mode);
 
-  setup_post();
-  setup_shm(dumb_mode);
+  setup_post(afl);
 
-  if (!in_bitmap) memset(virgin_bits, 255, MAP_SIZE);
-  memset(virgin_tmout, 255, MAP_SIZE);
-  memset(virgin_crash, 255, MAP_SIZE);
+  if (!afl->in_bitmap) memset(afl->virgin_bits, 255, MAP_SIZE);
+  memset(afl->virgin_tmout, 255, MAP_SIZE);
+  memset(afl->virgin_crash, 255, MAP_SIZE);
 
   init_count_class16();
 
-  setup_dirs_fds();
+  setup_dirs_fds(afl);
 
-  setup_custom_mutator();
+  setup_custom_mutator(afl);
 
-  setup_cmdline_file(argv + optind);
+  setup_cmdline_file(afl, argv + optind);
 
-  read_testcases();
-  load_auto();
+  read_testcases(afl);
+  load_auto(afl);
 
-  pivot_inputs();
+  pivot_inputs(afl);
 
-  if (extras_dir) load_extras(extras_dir);
+  if (extras_dir) load_extras(afl, extras_dir);
 
-  if (!timeout_given) find_timeout();
+  if (!afl->timeout_given) find_timeout(afl);
 
-  if ((tmp_dir = get_afl_env("AFL_TMPDIR")) != NULL && !in_place_resume) {
+  if ((afl->tmp_dir = get_afl_env("AFL_TMPDIR")) != NULL && !afl->in_place_resume) {
 
-    char tmpfile[file_extension
-                     ? strlen(tmp_dir) + 1 + 10 + 1 + strlen(file_extension) + 1
-                     : strlen(tmp_dir) + 1 + 10 + 1];
-    if (file_extension) {
+    char tmpfile[afl->file_extension
+                     ? strlen(afl->tmp_dir) + 1 + 10 + 1 + strlen(afl->file_extension) + 1
+                     : strlen(afl->tmp_dir) + 1 + 10 + 1];
+    if (afl->file_extension) {
 
-      sprintf(tmpfile, "%s/.cur_input.%s", tmp_dir, file_extension);
+      sprintf(tmpfile, "%s/.cur_input.%s", afl->tmp_dir, afl->file_extension);
 
     } else {
 
-      sprintf(tmpfile, "%s/.cur_input", tmp_dir);
+      sprintf(tmpfile, "%s/.cur_input", afl->tmp_dir);
 
     }
 
@@ -897,32 +910,32 @@ int main(int argc, char** argv, char** envp) {
 
   } else
 
-    tmp_dir = out_dir;
+    afl->tmp_dir = afl->out_dir;
 
   /* If we don't have a file name chosen yet, use a safe default. */
 
-  if (!out_file) {
+  if (!afl->fsrv.out_file) {
 
     u32 i = optind + 1;
     while (argv[i]) {
 
       u8* aa_loc = strstr(argv[i], "@@");
 
-      if (aa_loc && !out_file) {
+      if (aa_loc && !afl->fsrv.out_file) {
 
-        use_stdin = 0;
+        afl->fsrv.use_stdin = 0;
 
-        if (file_extension) {
+        if (afl->file_extension) {
 
-          out_file = alloc_printf("%s/.cur_input.%s", tmp_dir, file_extension);
+          afl->fsrv.out_file = alloc_printf("%s/.cur_input.%s", afl->tmp_dir, afl->file_extension);
 
         } else {
 
-          out_file = alloc_printf("%s/.cur_input", tmp_dir);
+          afl->fsrv.out_file = alloc_printf("%s/.cur_input", afl->tmp_dir);
 
         }
 
-        detect_file_args(argv + optind + 1, out_file);
+        detect_file_args(argv + optind + 1, afl->fsrv.out_file, afl->fsrv.use_stdin);
         break;
 
       }
@@ -933,90 +946,93 @@ int main(int argc, char** argv, char** envp) {
 
   }
 
-  if (!out_file) setup_stdio_file();
+  if (!afl->fsrv.out_file) setup_stdio_file(afl);
 
-  if (cmplog_binary) {
+  if (afl->cmplog_binary) {
 
-    if (limit_time_sig)
+    if (afl->limit_time_sig)
       FATAL(
           "MOpt and CmpLog are mutually exclusive. We accept pull requests "
           "that integrates MOpt with the optional mutators "
           "(custom/radamsa/redquenn/...).");
 
-    if (unicorn_mode)
+    if (afl->unicorn_mode)
       FATAL("CmpLog and Unicorn mode are not compatible at the moment, sorry");
-    if (!qemu_mode) check_binary(cmplog_binary);
+    if (!afl->qemu_mode) check_binary(afl, afl->cmplog_binary);
 
   }
 
-  check_binary(argv[optind]);
+  check_binary(afl, argv[optind]);
 
-  start_time = get_cur_time();
+  afl->start_time = get_cur_time();
 
-  if (qemu_mode) {
+  if (afl->qemu_mode) {
 
-    if (use_wine)
-      use_argv = get_wine_argv(argv[0], argv + optind, argc - optind);
+    if (afl->use_wine)
+      use_argv = get_wine_argv(argv[0], &afl->fsrv.target_path, argc - optind, argv + optind);
     else
-      use_argv = get_qemu_argv(argv[0], argv + optind, argc - optind);
+      use_argv = get_qemu_argv(argv[0], &afl->fsrv.target_path, argc - optind, argv + optind);
 
-  } else
+  } else {
 
     use_argv = argv + optind;
 
-  perform_dry_run(use_argv);
+  }
+
+  afl->argv = use_argv; 
+  perform_dry_run(afl);
 
-  cull_queue();
+  cull_queue(afl);
 
-  show_init_stats();
+  show_init_stats(afl);
 
-  seek_to = find_start_position();
+  seek_to = find_start_position(afl);
 
-  write_stats_file(0, 0, 0);
-  maybe_update_plot_file(0, 0);
-  save_auto();
+  write_stats_file(afl, 0, 0, 0);
+  maybe_update_plot_file(afl, 0, 0);
+  save_auto(afl);
 
-  if (stop_soon) goto stop_fuzzing;
+  if (afl->stop_soon) goto stop_fuzzing;
 
   /* Woop woop woop */
 
-  if (!not_on_tty) {
+  if (!afl->not_on_tty) {
 
     sleep(4);
-    start_time += 4000;
-    if (stop_soon) goto stop_fuzzing;
+    afl->start_time += 4000;
+    if (afl->stop_soon) goto stop_fuzzing;
 
   }
 
   // real start time, we reset, so this works correctly with -V
-  start_time = get_cur_time();
+  afl->start_time = get_cur_time();
 
   while (1) {
 
     u8 skipped_fuzz;
 
-    cull_queue();
+    cull_queue(afl);
 
-    if (!queue_cur) {
+    if (!afl->queue_cur) {
 
-      ++queue_cycle;
-      current_entry = 0;
-      cur_skipped_paths = 0;
-      queue_cur = queue;
+      ++afl->queue_cycle;
+      afl->current_entry = 0;
+      afl->cur_skipped_paths = 0;
+      afl->queue_cur = afl->queue;
 
       while (seek_to) {
 
-        ++current_entry;
+        ++afl->current_entry;
         --seek_to;
-        queue_cur = queue_cur->next;
+        afl->queue_cur = afl->queue_cur->next;
 
       }
 
-      show_stats();
+      show_stats(afl);
 
-      if (not_on_tty) {
+      if (afl->not_on_tty) {
 
-        ACTF("Entering queue cycle %llu.", queue_cycle);
+        ACTF("Entering queue cycle %llu.", afl->queue_cycle);
         fflush(stdout);
 
       }
@@ -1024,58 +1040,58 @@ int main(int argc, char** argv, char** envp) {
       /* If we had a full queue cycle with no new finds, try
          recombination strategies next. */
 
-      if (queued_paths == prev_queued) {
+      if (afl->queued_paths == prev_queued) {
 
-        if (use_splicing)
-          ++cycles_wo_finds;
+        if (afl->use_splicing)
+          ++afl->cycles_wo_finds;
         else
-          use_splicing = 1;
+          afl->use_splicing = 1;
 
       } else
 
-        cycles_wo_finds = 0;
+        afl->cycles_wo_finds = 0;
 
-      prev_queued = queued_paths;
+      prev_queued = afl->queued_paths;
 
-      if (sync_id && queue_cycle == 1 && get_afl_env("AFL_IMPORT_FIRST"))
-        sync_fuzzers(use_argv);
+      if (afl->sync_id && afl->queue_cycle == 1 && get_afl_env("AFL_IMPORT_FIRST"))
+        sync_fuzzers(afl);
 
     }
 
-    skipped_fuzz = fuzz_one(use_argv);
+    skipped_fuzz = fuzz_one(afl);
 
-    if (!stop_soon && sync_id && !skipped_fuzz) {
+    if (!afl->stop_soon && afl->sync_id && !skipped_fuzz) {
 
-      if (!(sync_interval_cnt++ % SYNC_INTERVAL)) sync_fuzzers(use_argv);
+      if (!(sync_interval_cnt++ % SYNC_INTERVAL)) sync_fuzzers(afl);
 
     }
 
-    if (!stop_soon && exit_1) stop_soon = 2;
+    if (!afl->stop_soon && exit_1) afl->stop_soon = 2;
 
-    if (stop_soon) break;
+    if (afl->stop_soon) break;
 
-    queue_cur = queue_cur->next;
-    ++current_entry;
+    afl->queue_cur = afl->queue_cur->next;
+    ++afl->current_entry;
 
-    if (most_time_key == 1) {
+    if (afl->most_time_key == 1) {
 
       u64 cur_ms_lv = get_cur_time();
-      if (most_time * 1000 < cur_ms_lv - start_time) {
+      if (afl->most_time * 1000 < cur_ms_lv - afl->start_time) {
 
-        most_time_key = 2;
-        stop_soon = 2;
+        afl->most_time_key = 2;
+        afl->stop_soon = 2;
         break;
 
       }
 
     }
 
-    if (most_execs_key == 1) {
+    if (afl->most_execs_key == 1) {
 
-      if (most_execs <= total_execs) {
+      if (afl->most_execs <= afl->total_execs) {
 
-        most_execs_key = 2;
-        stop_soon = 2;
+        afl->most_execs_key = 2;
+        afl->stop_soon = 2;
         break;
 
       }
@@ -1084,7 +1100,7 @@ int main(int argc, char** argv, char** envp) {
 
   }
 
-  if (queue_cur) show_stats();
+  if (afl->queue_cur) show_stats(afl);
 
   /*
    * ATTENTION - the following 10 lines were copied from a PR to Google's afl
@@ -1096,35 +1112,35 @@ int main(int argc, char** argv, char** envp) {
    */
   /* if we stopped programmatically, we kill the forkserver and the current
      runner. if we stopped manually, this is done by the signal handler */
-  if (stop_soon == 2) {
+  if (afl->stop_soon == 2) {
 
-    if (child_pid > 0) kill(child_pid, SIGKILL);
-    if (forksrv_pid > 0) kill(forksrv_pid, SIGKILL);
-    if (cmplog_child_pid > 0) kill(cmplog_child_pid, SIGKILL);
-    if (cmplog_forksrv_pid > 0) kill(cmplog_forksrv_pid, SIGKILL);
+    if (afl->fsrv.child_pid > 0) kill(afl->fsrv.child_pid, SIGKILL);
+    if (afl->fsrv.fsrv_pid > 0) kill(afl->fsrv.fsrv_pid, SIGKILL);
+    if (afl->cmplog_child_pid > 0) kill(afl->cmplog_child_pid, SIGKILL);
+    if (afl->cmplog_fsrv_pid > 0) kill(afl->cmplog_fsrv_pid, SIGKILL);
     /* Now that we've killed the forkserver, we wait for it to be able to get
      * rusage stats. */
-    if (waitpid(forksrv_pid, NULL, 0) <= 0) { WARNF("error waitpid\n"); }
+    if (waitpid(afl->fsrv.fsrv_pid, NULL, 0) <= 0) { WARNF("error waitpid\n"); }
 
   }
 
-  write_bitmap();
-  write_stats_file(0, 0, 0);
-  maybe_update_plot_file(0, 0);
-  save_auto();
+  write_bitmap(afl);
+  write_stats_file(afl, 0, 0, 0);
+  maybe_update_plot_file(afl, 0, 0);
+  save_auto(afl);
 
 stop_fuzzing:
 
   SAYF(CURSOR_SHOW cLRD "\n\n+++ Testing aborted %s +++\n" cRST,
-       stop_soon == 2 ? "programmatically" : "by user");
+       afl->stop_soon == 2 ? "programmatically" : "by user");
 
-  if (most_time_key == 2) SAYF(cYEL "[!] " cRST "Time limit was reached\n");
-  if (most_execs_key == 2)
+  if (afl->most_time_key == 2) SAYF(cYEL "[!] " cRST "Time limit was reached\n");
+  if (afl->most_execs_key == 2)
     SAYF(cYEL "[!] " cRST "Execution limit was reached\n");
 
   /* Running for more than 30 minutes but still doing first cycle? */
 
-  if (queue_cycle == 1 && get_cur_time() - start_time > 30 * 60 * 1000) {
+  if (afl->queue_cycle == 1 && get_cur_time() - afl->start_time > 30 * 60 * 1000) {
 
     SAYF("\n" cYEL "[!] " cRST
          "Stopped during the first cycle, results may be incomplete.\n"
@@ -1133,12 +1149,17 @@ stop_fuzzing:
 
   }
 
-  fclose(plot_file);
-  destroy_queue();
-  destroy_extras();
-  ck_free(target_path);
-  ck_free(sync_id);
-  destroy_custom_mutator();
+  fclose(afl->fsrv.plot_file);
+  destroy_queue(afl);
+  destroy_extras(afl);
+  destroy_custom_mutator(afl);
+  afl_shm_deinit(&afl->shm);
+  afl_fsrv_deinit(&afl->fsrv);
+  if (afl->orig_cmdline) ck_free(afl->orig_cmdline);
+  ck_free(afl->fsrv.target_path);
+  ck_free(afl->fsrv.out_file);
+  ck_free(afl->sync_id);
+  ck_free(afl);
 
   alloc_report();
 
diff --git a/src/afl-sharedmem.c b/src/afl-sharedmem.c
index 49ccae2a..bb49b6bd 100644
--- a/src/afl-sharedmem.c
+++ b/src/afl-sharedmem.c
@@ -36,9 +36,11 @@
 #include "hash.h"
 #include "sharedmem.h"
 #include "cmplog.h"
+#include "list.h"
 
 #include <stdio.h>
 #include <unistd.h>
+#include <stdbool.h>
 #include <stdlib.h>
 #include <string.h>
 #include <time.h>
@@ -59,114 +61,116 @@
 #include <sys/shm.h>
 #endif
 
-extern unsigned char *trace_bits;
+list_t shm_list = {0};
 
-#ifdef USEMMAP
-/* ================ Proteas ================ */
-int            g_shm_fd = -1;
-unsigned char *g_shm_base = NULL;
-char           g_shm_file_path[L_tmpnam];
-/* ========================================= */
-#else
-static s32 shm_id;                     /* ID of the SHM region              */
-static s32 cmplog_shm_id;
-#endif
+/* Get rid of shared memory. */
 
-int             cmplog_mode;
-struct cmp_map *cmp_map;
+void afl_shm_deinit(sharedmem_t *shm) {
 
-/* Get rid of shared memory (atexit handler). */
-
-void remove_shm(void) {
+  list_remove(&shm_list, shm);
 
 #ifdef USEMMAP
-  if (g_shm_base != NULL) {
+  if (shm->map != NULL) {
 
-    munmap(g_shm_base, MAP_SIZE);
-    g_shm_base = NULL;
+    munmap(shm->map, shm->size_alloc);
+    shm->map = NULL;
 
   }
 
-  if (g_shm_fd != -1) {
+  if (shm->g_shm_fd != -1) {
 
-    close(g_shm_fd);
-    g_shm_fd = -1;
+    close(shm->g_shm_fd);
+    shm->g_shm_fd = -1;
 
   }
 
 #else
-  shmctl(shm_id, IPC_RMID, NULL);
-  if (cmplog_mode) shmctl(cmplog_shm_id, IPC_RMID, NULL);
+  shmctl(shm->shm_id, IPC_RMID, NULL);
+  if (shm->cmplog_mode) shmctl(shm->cmplog_shm_id, IPC_RMID, NULL);
 #endif
 
+  shm->map = NULL;
+
 }
 
-/* Configure shared memory. */
+/* At exit, remove all leftover maps */
+
+void afl_shm_atexit() {
+
+  LIST_FOREACH(&shm_list, sharedmem_t, { afl_shm_deinit(el); });
+
+}
+
+/* Configure shared memory. 
+   Returns a pointer to shm->map for ease of use.
+*/
+
+u8 *afl_shm_init(sharedmem_t *shm, size_t map_size, unsigned char dumb_mode) {
 
-void setup_shm(unsigned char dumb_mode) {
+  shm->size_alloc = shm->size_used = map_size;
+
+  shm->map = NULL;
 
 #ifdef USEMMAP
-  /* generate random file name for multi instance */
 
-  /* thanks to f*cking glibc we can not use tmpnam securely, it generates a
-   * security warning that cannot be suppressed */
-  /* so we do this worse workaround */
-  snprintf(g_shm_file_path, L_tmpnam, "/afl_%d_%ld", getpid(), random());
+  shm->g_shm_fd = -1;
+
+  /* ======
+  generate random file name for multi instance
+
+  thanks to f*cking glibc we can not use tmpnam securely, it generates a
+  security warning that cannot be suppressed
+  so we do this worse workaround */
+  snprintf(shm->g_shm_file_path, L_tmpnam, "/afl_%d_%ld", getpid(), random());
 
   /* create the shared memory segment as if it was a file */
-  g_shm_fd = shm_open(g_shm_file_path, O_CREAT | O_RDWR | O_EXCL, 0600);
-  if (g_shm_fd == -1) { PFATAL("shm_open() failed"); }
+  shm->g_shm_fd = shm_open(shm->g_shm_file_path, O_CREAT | O_RDWR | O_EXCL, 0600);
+  if (shm->g_shm_fd == -1) { PFATAL("shm_open() failed"); }
 
   /* configure the size of the shared memory segment */
-  if (ftruncate(g_shm_fd, MAP_SIZE)) {
+  if (ftruncate(shm->g_shm_fd, map_size)) {
 
     PFATAL("setup_shm(): ftruncate() failed");
 
   }
 
   /* map the shared memory segment to the address space of the process */
-  g_shm_base =
-      mmap(0, MAP_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, g_shm_fd, 0);
-  if (g_shm_base == MAP_FAILED) {
+  shm->map =
+      mmap(0, map_size, PROT_READ | PROT_WRITE, MAP_SHARED, map_size->g_shm_fd, 0);
+  if (map_size->map == MAP_FAILED) {
 
-    close(g_shm_fd);
-    g_shm_fd = -1;
+    close(map_size->g_shm_fd);
+    map_size->g_shm_fd = -1;
     PFATAL("mmap() failed");
 
   }
 
-  atexit(remove_shm);
-
   /* If somebody is asking us to fuzz instrumented binaries in dumb mode,
      we don't want them to detect instrumentation, since we won't be sending
      fork server commands. This should be replaced with better auto-detection
      later on, perhaps? */
 
-  if (!dumb_mode) setenv(SHM_ENV_VAR, g_shm_file_path, 1);
-
-  trace_bits = g_shm_base;
+  if (!dumb_mode) setenv(SHM_ENV_VAR, shm->g_shm_file_path, 1);
 
-  if (trace_bits == -1 || !trace_bits) PFATAL("mmap() failed");
+  if (shm->map == -1 || !shm->map) PFATAL("mmap() failed");
 
 #else
   u8 *shm_str;
 
-  shm_id = shmget(IPC_PRIVATE, MAP_SIZE, IPC_CREAT | IPC_EXCL | 0600);
+  shm->shm_id = shmget(IPC_PRIVATE, map_size, IPC_CREAT | IPC_EXCL | 0600);
 
-  if (shm_id < 0) PFATAL("shmget() failed");
+  if (shm->shm_id < 0) PFATAL("shmget() failed");
 
-  if (cmplog_mode) {
+  if (shm->cmplog_mode) {
 
-    cmplog_shm_id = shmget(IPC_PRIVATE, sizeof(struct cmp_map),
+    shm->cmplog_shm_id = shmget(IPC_PRIVATE, sizeof(struct cmp_map),
                            IPC_CREAT | IPC_EXCL | 0600);
 
-    if (cmplog_shm_id < 0) PFATAL("shmget() failed");
+    if (shm->cmplog_shm_id < 0) PFATAL("shmget() failed");
 
   }
 
-  atexit(remove_shm);
-
-  shm_str = alloc_printf("%d", shm_id);
+  shm_str = alloc_printf("%d", shm->shm_id);
 
   /* If somebody is asking us to fuzz instrumented binaries in dumb mode,
      we don't want them to detect instrumentation, since we won't be sending
@@ -177,9 +181,9 @@ void setup_shm(unsigned char dumb_mode) {
 
   ck_free(shm_str);
 
-  if (cmplog_mode) {
+  if (shm->cmplog_mode) {
 
-    shm_str = alloc_printf("%d", cmplog_shm_id);
+    shm_str = alloc_printf("%d", shm->cmplog_shm_id);
 
     if (!dumb_mode) setenv(CMPLOG_SHM_ENV_VAR, shm_str, 1);
 
@@ -187,19 +191,24 @@ void setup_shm(unsigned char dumb_mode) {
 
   }
 
-  trace_bits = shmat(shm_id, NULL, 0);
+  shm->map = shmat(shm->shm_id, NULL, 0);
 
-  if (trace_bits == (void *)-1 || !trace_bits) PFATAL("shmat() failed");
+  if (shm->map == (void *)-1 || !shm->map) PFATAL("shmat() failed");
 
-  if (cmplog_mode) {
+  if (shm->cmplog_mode) {
 
-    cmp_map = shmat(cmplog_shm_id, NULL, 0);
+    shm->cmp_map = shmat(shm->cmplog_shm_id, NULL, 0);
 
-    if (cmp_map == (void *)-1 || !cmp_map) PFATAL("shmat() failed");
+    if (shm->cmp_map == (void *)-1 || !shm->cmp_map) PFATAL("shmat() failed");
 
   }
 
+
 #endif
 
-}
+  list_append(&shm_list, shm);
+  atexit(afl_shm_atexit);
 
+  return shm->map;
+
+}
diff --git a/src/afl-showmap.c b/src/afl-showmap.c
index ffdb67e4..0f0d19c7 100644
--- a/src/afl-showmap.c
+++ b/src/afl-showmap.c
@@ -59,50 +59,28 @@
 #include <sys/types.h>
 #include <sys/resource.h>
 
-u8* trace_bits;                        /* SHM with instrumentation bitmap   */
+u8    be_quiet;
 
-s32 forksrv_pid,                       /* PID of the fork server            */
-    child_pid;                         /* PID of the tested program         */
-
-s32 fsrv_ctl_fd,                       /* Fork server control pipe (write)  */
-    fsrv_st_fd;                        /* Fork server status pipe (read)    */
-
-s32 out_fd;                            /* Persistent fd for stdin_file      */
-s32 dev_null_fd = -1;                  /* FD to /dev/null                   */
-
-s32   out_fd = -1, out_dir_fd = -1, dev_urandom_fd = -1;
-FILE* plot_file;
-u8    uses_asan, be_quiet;
-
-u8* trace_bits;                        /* SHM with instrumentation bitmap   */
-
-u8 *out_file,                          /* Trace output file                 */
-    *stdin_file,                       /* stdin file                        */
+u8  *stdin_file,                       /* stdin file                        */
     *in_dir,                           /* input folder                      */
     *doc_path,                         /* Path to docs                      */
-        *at_file = NULL;               /* Substitution string for @@        */
+    *at_file = NULL;               /* Substitution string for @@        */
 
 static u8* in_data;                    /* Input data                        */
 
-u32 exec_tmout;                        /* Exec timeout (ms)                 */
-
 static u32 total, highest;             /* tuple content information         */
 
 static u32 in_len,                     /* Input data length                 */
     arg_offset, total_execs;           /* Total number of execs             */
 
-u64 mem_limit = MEM_LIMIT;             /* Memory limit (MB)                 */
-
 u8 quiet_mode,                         /* Hide non-essential messages?      */
     edges_only,                        /* Ignore hit counts?                */
     raw_instr_output,                  /* Do not apply AFL filters          */
     cmin_mode,                         /* Generate output in afl-cmin mode? */
     binary_mode,                       /* Write output as a binary map      */
-    use_stdin = 1,                     /* use stdin - unused here           */
     keep_cores;                        /* Allow coredumps?                  */
 
 static volatile u8 stop_soon,          /* Ctrl-C pressed?                   */
-    child_timed_out,                   /* Child timed out?                  */
     child_crashed;                     /* Child crashed?                    */
 
 static u8 qemu_mode;
@@ -168,7 +146,7 @@ static void at_exit_handler(void) {
 
 /* Write results. */
 
-static u32 write_results_to_file(u8* out_file) {
+static u32 write_results_to_file(afl_forkserver_t *fsrv) {
 
   s32 fd;
   u32 i, ret = 0;
@@ -176,30 +154,30 @@ static u32 write_results_to_file(u8* out_file) {
   u8 cco = !!getenv("AFL_CMIN_CRASHES_ONLY"),
      caa = !!getenv("AFL_CMIN_ALLOW_ANY");
 
-  if (!strncmp(out_file, "/dev/", 5)) {
+  if (!strncmp(fsrv->out_file, "/dev/", 5)) {
 
-    fd = open(out_file, O_WRONLY, 0600);
-    if (fd < 0) PFATAL("Unable to open '%s'", out_file);
+    fd = open(fsrv->out_file, O_WRONLY, 0600);
+    if (fd < 0) PFATAL("Unable to open '%s'", fsrv->out_file);
 
-  } else if (!strcmp(out_file, "-")) {
+  } else if (!strcmp(fsrv->out_file, "-")) {
 
     fd = dup(1);
     if (fd < 0) PFATAL("Unable to open stdout");
 
   } else {
 
-    unlink(out_file);                                      /* Ignore errors */
-    fd = open(out_file, O_WRONLY | O_CREAT | O_EXCL, 0600);
-    if (fd < 0) PFATAL("Unable to create '%s'", out_file);
+    unlink(fsrv->out_file);                                      /* Ignore errors */
+    fd = open(fsrv->out_file, O_WRONLY | O_CREAT | O_EXCL, 0600);
+    if (fd < 0) PFATAL("Unable to create '%s'", fsrv->out_file);
 
   }
 
   if (binary_mode) {
 
     for (i = 0; i < MAP_SIZE; i++)
-      if (trace_bits[i]) ret++;
+      if (fsrv->trace_bits[i]) ret++;
 
-    ck_write(fd, trace_bits, MAP_SIZE, out_file);
+    ck_write(fd, fsrv->trace_bits, MAP_SIZE, fsrv->out_file);
     close(fd);
 
   } else {
@@ -210,22 +188,22 @@ static u32 write_results_to_file(u8* out_file) {
 
     for (i = 0; i < MAP_SIZE; i++) {
 
-      if (!trace_bits[i]) continue;
+      if (!fsrv->trace_bits[i]) continue;
       ret++;
 
-      total += trace_bits[i];
-      if (highest < trace_bits[i]) highest = trace_bits[i];
+      total += fsrv->trace_bits[i];
+      if (highest < fsrv->trace_bits[i]) highest = fsrv->trace_bits[i];
 
       if (cmin_mode) {
 
-        if (child_timed_out) break;
+        if (fsrv->child_timed_out) break;
         if (!caa && child_crashed != cco) break;
 
-        fprintf(f, "%u%u\n", trace_bits[i], i);
+        fprintf(f, "%u%u\n", fsrv->trace_bits[i], i);
 
       } else
 
-        fprintf(f, "%06u:%u\n", i, trace_bits[i]);
+        fprintf(f, "%06u:%u\n", i, fsrv->trace_bits[i]);
 
     }
 
@@ -239,9 +217,9 @@ static u32 write_results_to_file(u8* out_file) {
 
 /* Write results. */
 
-static u32 write_results(void) {
+static u32 write_results(afl_forkserver_t *fsrv) {
 
-  return write_results_to_file(out_file);
+  return write_results_to_file(fsrv);
 
 }
 
@@ -269,69 +247,69 @@ static s32 write_to_file(u8* path, u8* mem, u32 len) {
    is unlinked and a new one is created. Otherwise, out_fd is rewound and
    truncated. */
 
-static void write_to_testcase(void* mem, u32 len) {
+static void write_to_testcase(afl_forkserver_t *fsrv, void* mem, u32 len) {
 
-  lseek(out_fd, 0, SEEK_SET);
-  ck_write(out_fd, mem, len, out_file);
-  if (ftruncate(out_fd, len)) PFATAL("ftruncate() failed");
-  lseek(out_fd, 0, SEEK_SET);
+  lseek(fsrv->out_fd, 0, SEEK_SET);
+  ck_write(fsrv->out_fd, mem, len, fsrv->out_file);
+  if (ftruncate(fsrv->out_fd, len)) PFATAL("ftruncate() failed");
+  lseek(fsrv->out_fd, 0, SEEK_SET);
 
 }
 
 /* Execute target application. Returns 0 if the changes are a dud, or
    1 if they should be kept. */
 
-static u8 run_target_forkserver(char** argv, u8* mem, u32 len) {
+static u8 run_target_forkserver(afl_forkserver_t *fsrv, char** argv, u8* mem, u32 len) {
 
   static struct itimerval it;
   static u32              prev_timed_out = 0;
   int                     status = 0;
 
-  memset(trace_bits, 0, MAP_SIZE);
+  memset(fsrv->trace_bits, 0, MAP_SIZE);
   MEM_BARRIER();
 
-  write_to_testcase(mem, len);
+  write_to_testcase(fsrv, mem, len);
 
   s32 res;
 
   /* we have the fork server up and running, so simply
      tell it to have at it, and then read back PID. */
 
-  if ((res = write(fsrv_ctl_fd, &prev_timed_out, 4)) != 4) {
+  if ((res = write(fsrv->fsrv_ctl_fd, &prev_timed_out, 4)) != 4) {
 
     if (stop_soon) return 0;
     RPFATAL(res, "Unable to request new process from fork server (OOM?)");
 
   }
 
-  if ((res = read(fsrv_st_fd, &child_pid, 4)) != 4) {
+  if ((res = read(fsrv->fsrv_st_fd, &fsrv->child_pid, 4)) != 4) {
 
     if (stop_soon) return 0;
     RPFATAL(res, "Unable to request new process from fork server (OOM?)");
 
   }
 
-  if (child_pid <= 0) FATAL("Fork server is misbehaving (OOM?)");
+  if (fsrv->child_pid <= 0) FATAL("Fork server is misbehaving (OOM?)");
 
   /* Configure timeout, wait for child, cancel timeout. */
 
-  if (exec_tmout) {
+  if (fsrv->exec_tmout) {
 
-    it.it_value.tv_sec = (exec_tmout / 1000);
-    it.it_value.tv_usec = (exec_tmout % 1000) * 1000;
+    it.it_value.tv_sec = (fsrv->exec_tmout / 1000);
+    it.it_value.tv_usec = (fsrv->exec_tmout % 1000) * 1000;
 
   }
 
   setitimer(ITIMER_REAL, &it, NULL);
 
-  if ((res = read(fsrv_st_fd, &status, 4)) != 4) {
+  if ((res = read(fsrv->fsrv_st_fd, &status, 4)) != 4) {
 
     if (stop_soon) return 0;
     RPFATAL(res, "Unable to communicate with fork server (OOM?)");
 
   }
 
-  child_pid = 0;
+  fsrv->child_pid = 0;
   it.it_value.tv_sec = 0;
   it.it_value.tv_usec = 0;
 
@@ -341,24 +319,24 @@ static u8 run_target_forkserver(char** argv, u8* mem, u32 len) {
 
   /* Clean up bitmap, analyze exit condition, etc. */
 
-  if (*(u32*)trace_bits == EXEC_FAIL_SIG)
+  if (*(u32*)fsrv->trace_bits == EXEC_FAIL_SIG)
     FATAL("Unable to execute '%s'", argv[0]);
 
-  classify_counts(trace_bits,
+  classify_counts(fsrv->trace_bits,
                   binary_mode ? count_class_binary : count_class_human);
   total_execs++;
 
   if (stop_soon) {
 
     SAYF(cRST cLRD "\n+++ afl-showmap folder mode aborted by user +++\n" cRST);
-    close(write_to_file(out_file, in_data, in_len));
+    close(write_to_file(fsrv->out_file, in_data, in_len));
     exit(1);
 
   }
 
   /* Always discard inputs that time out. */
 
-  if (child_timed_out) { return 0; }
+  if (fsrv->child_timed_out) { return 0; }
 
   /* Handle crashing inputs depending on current mode. */
 
@@ -401,7 +379,7 @@ u32 read_file(u8* in_file) {
 
 /* Execute target application. */
 
-static void run_target(char** argv) {
+static void run_target(afl_forkserver_t *fsrv, char** argv) {
 
   static struct itimerval it;
   int                     status = 0;
@@ -410,11 +388,11 @@ static void run_target(char** argv) {
 
   MEM_BARRIER();
 
-  child_pid = fork();
+  fsrv->child_pid = fork();
 
-  if (child_pid < 0) PFATAL("fork() failed");
+  if (fsrv->child_pid < 0) PFATAL("fork() failed");
 
-  if (!child_pid) {
+  if (!fsrv->child_pid) {
 
     struct rlimit r;
 
@@ -424,7 +402,7 @@ static void run_target(char** argv) {
 
       if (fd < 0 || dup2(fd, 1) < 0 || dup2(fd, 2) < 0) {
 
-        *(u32*)trace_bits = EXEC_FAIL_SIG;
+        *(u32*)fsrv->trace_bits = EXEC_FAIL_SIG;
         PFATAL("Descriptor initialization failed");
 
       }
@@ -433,9 +411,9 @@ static void run_target(char** argv) {
 
     }
 
-    if (mem_limit) {
+    if (fsrv->mem_limit) {
 
-      r.rlim_max = r.rlim_cur = ((rlim_t)mem_limit) << 20;
+      r.rlim_max = r.rlim_cur = ((rlim_t)fsrv->mem_limit) << 20;
 
 #ifdef RLIMIT_AS
 
@@ -460,28 +438,28 @@ static void run_target(char** argv) {
 
     setsid();
 
-    execv(target_path, argv);
+    execv(fsrv->target_path, argv);
 
-    *(u32*)trace_bits = EXEC_FAIL_SIG;
+    *(u32*)fsrv->trace_bits = EXEC_FAIL_SIG;
     exit(0);
 
   }
 
   /* Configure timeout, wait for child, cancel timeout. */
 
-  if (exec_tmout) {
+  if (fsrv->exec_tmout) {
 
-    child_timed_out = 0;
-    it.it_value.tv_sec = (exec_tmout / 1000);
-    it.it_value.tv_usec = (exec_tmout % 1000) * 1000;
+    fsrv->child_timed_out = 0;
+    it.it_value.tv_sec = (fsrv->exec_tmout / 1000);
+    it.it_value.tv_usec = (fsrv->exec_tmout % 1000) * 1000;
 
   }
 
   setitimer(ITIMER_REAL, &it, NULL);
 
-  if (waitpid(child_pid, &status, 0) <= 0) FATAL("waitpid() failed");
+  if (waitpid(fsrv->child_pid, &status, 0) <= 0) FATAL("waitpid() failed");
 
-  child_pid = 0;
+  fsrv->child_pid = 0;
   it.it_value.tv_sec = 0;
   it.it_value.tv_usec = 0;
   setitimer(ITIMER_REAL, &it, NULL);
@@ -490,19 +468,19 @@ static void run_target(char** argv) {
 
   /* Clean up bitmap, analyze exit condition, etc. */
 
-  if (*(u32*)trace_bits == EXEC_FAIL_SIG)
+  if (*(u32*)fsrv->trace_bits == EXEC_FAIL_SIG)
     FATAL("Unable to execute '%s'", argv[0]);
 
-  classify_counts(trace_bits,
+  classify_counts(fsrv->trace_bits,
                   binary_mode ? count_class_binary : count_class_human);
 
   if (!quiet_mode) SAYF(cRST "-- Program output ends --\n");
 
-  if (!child_timed_out && !stop_soon && WIFSIGNALED(status)) child_crashed = 1;
+  if (!fsrv->child_timed_out && !stop_soon && WIFSIGNALED(status)) child_crashed = 1;
 
   if (!quiet_mode) {
 
-    if (child_timed_out)
+    if (fsrv->child_timed_out)
       SAYF(cLRD "\n+++ Program timed off +++\n" cRST);
     else if (stop_soon)
       SAYF(cLRD "\n+++ Program aborted by user +++\n" cRST);
@@ -514,13 +492,14 @@ static void run_target(char** argv) {
 
 }
 
+extern afl_forkserver_t *fsrv_glob;
+
 /* Handle Ctrl-C and the like. */
 
 static void handle_stop_sig(int sig) {
 
   stop_soon = 1;
-
-  if (child_pid > 0) kill(child_pid, SIGKILL);
+  afl_fsrv_killall();
 
 }
 
@@ -667,16 +646,16 @@ static void usage(u8* argv0) {
 
 /* Find binary. */
 
-static void find_binary(u8* fname) {
+static void find_binary(afl_forkserver_t *fsrv, u8* fname) {
 
   u8*         env_path = 0;
   struct stat st;
 
   if (strchr(fname, '/') || !(env_path = getenv("PATH"))) {
 
-    target_path = ck_strdup(fname);
+    fsrv->target_path = ck_strdup(fname);
 
-    if (stat(target_path, &st) || !S_ISREG(st.st_mode) ||
+    if (stat(fsrv->target_path, &st) || !S_ISREG(st.st_mode) ||
         !(st.st_mode & 0111) || st.st_size < 4)
       FATAL("Program '%s' not found or not executable", fname);
 
@@ -699,22 +678,22 @@ static void find_binary(u8* fname) {
       env_path = delim;
 
       if (cur_elem[0])
-        target_path = alloc_printf("%s/%s", cur_elem, fname);
+        fsrv->target_path = alloc_printf("%s/%s", cur_elem, fname);
       else
-        target_path = ck_strdup(fname);
+        fsrv->target_path = ck_strdup(fname);
 
       ck_free(cur_elem);
 
-      if (!stat(target_path, &st) && S_ISREG(st.st_mode) &&
+      if (!stat(fsrv->target_path, &st) && S_ISREG(st.st_mode) &&
           (st.st_mode & 0111) && st.st_size >= 4)
         break;
 
-      ck_free(target_path);
-      target_path = 0;
+      ck_free(fsrv->target_path);
+      fsrv->target_path = 0;
 
     }
 
-    if (!target_path) FATAL("Program '%s' not found or not executable", fname);
+    if (!fsrv->target_path) FATAL("Program '%s' not found or not executable", fname);
 
   }
 
@@ -724,11 +703,16 @@ static void find_binary(u8* fname) {
 
 int main(int argc, char** argv, char** envp) {
 
+  //TODO: u64 mem_limit = MEM_LIMIT;             /* Memory limit (MB)                 */
+
   s32    opt, i;
   u8     mem_limit_given = 0, timeout_given = 0, unicorn_mode = 0, use_wine = 0;
   u32    tcnt = 0;
   char** use_argv;
 
+  afl_forkserver_t *fsrv = calloc(1, sizeof(afl_forkserver_t));
+  afl_fsrv_init(fsrv);
+
   doc_path = access(DOC_PATH, F_OK) ? "docs" : DOC_PATH;
 
   if (getenv("AFL_QUIET") != NULL) be_quiet = 1;
@@ -744,8 +728,8 @@ int main(int argc, char** argv, char** envp) {
 
       case 'o':
 
-        if (out_file) FATAL("Multiple -o options not supported");
-        out_file = optarg;
+        if (fsrv->out_file) FATAL("Multiple -o options not supported");
+        fsrv->out_file = optarg;
         break;
 
       case 'm': {
@@ -757,29 +741,29 @@ int main(int argc, char** argv, char** envp) {
 
         if (!strcmp(optarg, "none")) {
 
-          mem_limit = 0;
+          fsrv->mem_limit = 0;
           break;
 
         }
 
-        if (sscanf(optarg, "%llu%c", &mem_limit, &suffix) < 1 ||
+        if (sscanf(optarg, "%llu%c", &fsrv->mem_limit, &suffix) < 1 ||
             optarg[0] == '-')
           FATAL("Bad syntax used for -m");
 
         switch (suffix) {
 
-          case 'T': mem_limit *= 1024 * 1024; break;
-          case 'G': mem_limit *= 1024; break;
-          case 'k': mem_limit /= 1024; break;
+          case 'T': fsrv->mem_limit *= 1024 * 1024; break;
+          case 'G': fsrv->mem_limit *= 1024; break;
+          case 'k': fsrv->mem_limit /= 1024; break;
           case 'M': break;
 
           default: FATAL("Unsupported suffix or bad syntax for -m");
 
         }
 
-        if (mem_limit < 5) FATAL("Dangerously low value of -m");
+        if (fsrv->mem_limit < 5) FATAL("Dangerously low value of -m");
 
-        if (sizeof(rlim_t) == 4 && mem_limit > 2000)
+        if (sizeof(rlim_t) == 4 && fsrv->mem_limit > 2000)
           FATAL("Value of -m out of range on 32-bit systems");
 
       }
@@ -788,7 +772,7 @@ int main(int argc, char** argv, char** envp) {
 
       case 'f':  // only in here to avoid a compiler warning for use_stdin
 
-        use_stdin = 0;
+        fsrv->use_stdin = 0;
         FATAL("Option -f is not supported in afl-showmap");
 
         break;
@@ -800,9 +784,9 @@ int main(int argc, char** argv, char** envp) {
 
         if (strcmp(optarg, "none")) {
 
-          exec_tmout = atoi(optarg);
+          fsrv->exec_tmout = atoi(optarg);
 
-          if (exec_tmout < 20 || optarg[0] == '-')
+          if (fsrv->exec_tmout < 20 || optarg[0] == '-')
             FATAL("Dangerously low value of -t");
 
         }
@@ -839,7 +823,7 @@ int main(int argc, char** argv, char** envp) {
       case 'Q':
 
         if (qemu_mode) FATAL("Multiple -Q options not supported");
-        if (!mem_limit_given) mem_limit = MEM_LIMIT_QEMU;
+        if (!mem_limit_given) fsrv->mem_limit = MEM_LIMIT_QEMU;
 
         qemu_mode = 1;
         break;
@@ -847,7 +831,7 @@ int main(int argc, char** argv, char** envp) {
       case 'U':
 
         if (unicorn_mode) FATAL("Multiple -U options not supported");
-        if (!mem_limit_given) mem_limit = MEM_LIMIT_UNICORN;
+        if (!mem_limit_given) fsrv->mem_limit = MEM_LIMIT_UNICORN;
 
         unicorn_mode = 1;
         break;
@@ -858,7 +842,7 @@ int main(int argc, char** argv, char** envp) {
         qemu_mode = 1;
         use_wine = 1;
 
-        if (!mem_limit_given) mem_limit = 0;
+        if (!mem_limit_given) fsrv->mem_limit = 0;
 
         break;
 
@@ -892,32 +876,33 @@ int main(int argc, char** argv, char** envp) {
 
     }
 
-  if (optind == argc || !out_file) usage(argv[0]);
+  if (optind == argc || !fsrv->out_file) usage(argv[0]);
 
   check_environment_vars(envp);
 
-  setup_shm(0);
+  sharedmem_t shm = {0};
+  fsrv->trace_bits = afl_shm_init(&shm, MAP_SIZE, 0);
   setup_signal_handlers();
 
   set_up_environment();
 
-  find_binary(argv[optind]);
+  find_binary(fsrv, argv[optind]);
 
   if (!quiet_mode) {
 
     show_banner();
-    ACTF("Executing '%s'...", target_path);
+    ACTF("Executing '%s'...", fsrv->target_path);
 
   }
 
   if (in_dir) {
 
     if (at_file) PFATAL("Options -A and -i are mutually exclusive");
-    detect_file_args(argv + optind, "");
+    detect_file_args(argv + optind, "", fsrv->use_stdin);
 
   } else {
 
-    detect_file_args(argv + optind, at_file);
+    detect_file_args(argv + optind, at_file, fsrv->use_stdin);
 
   }
 
@@ -927,9 +912,9 @@ int main(int argc, char** argv, char** envp) {
   if (qemu_mode) {
 
     if (use_wine)
-      use_argv = get_wine_argv(argv[0], argv + optind, argc - optind);
+      use_argv = get_wine_argv(argv[0], &fsrv->target_path, argc - optind, argv + optind);
     else
-      use_argv = get_qemu_argv(argv[0], argv + optind, argc - optind);
+      use_argv = get_qemu_argv(argv[0], &fsrv->target_path, argc - optind, argv + optind);
 
   } else
 
@@ -945,14 +930,14 @@ int main(int argc, char** argv, char** envp) {
     struct stat statbuf;
 #endif
 
-    dev_null_fd = open("/dev/null", O_RDWR);
-    if (dev_null_fd < 0) PFATAL("Unable to open /dev/null");
+    fsrv->dev_null_fd = open("/dev/null", O_RDWR);
+    if (fsrv->dev_null_fd < 0) PFATAL("Unable to open /dev/null");
 
     if (!(dir_in = opendir(in_dir))) PFATAL("cannot open directory %s", in_dir);
 
-    if (!(dir_out = opendir(out_file)))
-      if (mkdir(out_file, 0700))
-        PFATAL("cannot create output directory %s", out_file);
+    if (!(dir_out = opendir(fsrv->out_file)))
+      if (mkdir(fsrv->out_file, 0700))
+        PFATAL("cannot create output directory %s", fsrv->out_file);
 
     u8* use_dir = ".";
 
@@ -966,15 +951,15 @@ int main(int argc, char** argv, char** envp) {
     stdin_file = alloc_printf("%s/.afl-showmap-temp-%u", use_dir, getpid());
     unlink(stdin_file);
     atexit(at_exit_handler);
-    out_fd = open(stdin_file, O_RDWR | O_CREAT | O_EXCL, 0600);
-    if (out_fd < 0) PFATAL("Unable to create '%s'", out_file);
+    fsrv->out_fd = open(stdin_file, O_RDWR | O_CREAT | O_EXCL, 0600);
+    if (fsrv->out_fd < 0) PFATAL("Unable to create '%s'", fsrv->out_file);
 
     if (arg_offset) argv[arg_offset] = stdin_file;
 
     if (get_afl_env("AFL_DEBUG")) {
 
       int i = optind;
-      SAYF(cMGN "[D]" cRST " %s:", target_path);
+      SAYF(cMGN "[D]" cRST " %s:", fsrv->target_path);
       while (argv[i] != NULL)
         SAYF(" \"%s\"", argv[i++]);
       SAYF("\n");
@@ -983,7 +968,7 @@ int main(int argc, char** argv, char** envp) {
 
     }
 
-    init_forkserver(use_argv);
+    afl_fsrv_start(fsrv, use_argv);
 
     while (done == 0 && (dir_ent = readdir(dir_in))) {
 
@@ -1000,13 +985,13 @@ int main(int argc, char** argv, char** envp) {
       if (-1 == stat(infile, &statbuf) || !S_ISREG(statbuf.st_mode)) continue;
 #endif
 
-      snprintf(outfile, sizeof(outfile), "%s/%s", out_file, dir_ent->d_name);
+      snprintf(outfile, sizeof(outfile), "%s/%s", fsrv->out_file, dir_ent->d_name);
 
       if (read_file(infile)) {
 
-        run_target_forkserver(use_argv, in_data, in_len);
+        run_target_forkserver(fsrv, use_argv, in_data, in_len);
         ck_free(in_data);
-        tcnt = write_results_to_file(outfile);
+        tcnt = write_results_to_file(fsrv);
 
       }
 
@@ -1016,8 +1001,8 @@ int main(int argc, char** argv, char** envp) {
 
   } else {
 
-    run_target(use_argv);
-    tcnt = write_results();
+    run_target(fsrv, use_argv);
+    tcnt = write_results(fsrv);
 
   }
 
@@ -1025,7 +1010,7 @@ int main(int argc, char** argv, char** envp) {
 
     if (!tcnt) FATAL("No instrumentation detected" cRST);
     OKF("Captured %u tuples (highest value %u, total values %u) in '%s'." cRST,
-        tcnt, highest, total, out_file);
+        tcnt, highest, total, fsrv->out_file);
 
   }
 
@@ -1036,6 +1021,12 @@ int main(int argc, char** argv, char** envp) {
 
   }
 
+  afl_shm_deinit(&shm);
+
+  u8 child_timed_out = fsrv->child_timed_out;
+  afl_fsrv_deinit(fsrv);
+  free(fsrv);
+
   exit(child_crashed * 2 + child_timed_out);
 
 }
diff --git a/src/afl-tmin.c b/src/afl-tmin.c
index 31296cb5..6ff77cfd 100644
--- a/src/afl-tmin.c
+++ b/src/afl-tmin.c
@@ -58,22 +58,12 @@
 #include <sys/types.h>
 #include <sys/resource.h>
 
-s32 forksrv_pid,                        /* PID of the fork server           */
-    child_pid;                          /* PID of the tested program        */
-
-s32 fsrv_ctl_fd,                        /* Fork server control pipe (write) */
-    fsrv_st_fd;                         /* Fork server status pipe (read)   */
-
-u8*        trace_bits;                 /* SHM with instrumentation bitmap   */
 static u8* mask_bitmap;                /* Mask for trace bits (-B)          */
 
 u8 *in_file,                           /* Minimizer input test case         */
     *output_file,                      /* Minimizer output file             */
-    *out_file,                         /* Targeted program input file       */
     *doc_path;                         /* Path to docs                      */
 
-s32 out_fd;                           /* Persistent fd for out_file         */
-
 static u8* in_data;                    /* Input data for trimming           */
 
 static u32 in_len,                     /* Input data length                 */
@@ -82,18 +72,13 @@ static u32 in_len,                     /* Input data length                 */
     missed_hangs,                      /* Misses due to hangs               */
     missed_crashes,                    /* Misses due to crashes             */
     missed_paths;                      /* Misses due to exec path diffs     */
-u32 exec_tmout = EXEC_TIMEOUT;         /* Exec timeout (ms)                 */
-
-u64 mem_limit = MEM_LIMIT;             /* Memory limit (MB)                 */
-
-s32 dev_null_fd = -1;                  /* FD to /dev/null                   */
 
 u8 crash_mode,                         /* Crash-centric mode?               */
     hang_mode,                         /* Minimize as long as it hangs      */
     exit_crash,                        /* Treat non-zero exit as crash?     */
     edges_only,                        /* Ignore hit counts?                */
     exact_mode,                        /* Require path match for crashes?   */
-    be_quiet, use_stdin = 1;           /* Use stdin for program input?      */
+    be_quiet; 
 
 static volatile u8 stop_soon;          /* Ctrl-C pressed?                   */
 
@@ -174,9 +159,9 @@ static void apply_mask(u32* mem, u32* mask) {
 
 /* See if any bytes are set in the bitmap. */
 
-static inline u8 anything_set(void) {
+static inline u8 anything_set(afl_forkserver_t *fsrv) {
 
-  u32* ptr = (u32*)trace_bits;
+  u32* ptr = (u32*)fsrv->trace_bits;
   u32  i = (MAP_SIZE >> 2);
 
   while (i--)
@@ -186,11 +171,9 @@ static inline u8 anything_set(void) {
 
 }
 
-/* Get rid of temp files (atexit handler). */
-
 static void at_exit_handler(void) {
 
-  if (out_file) unlink(out_file);                          /* Ignore errors */
+  afl_fsrv_killall();
 
 }
 
@@ -243,25 +226,25 @@ static s32 write_to_file(u8* path, u8* mem, u32 len) {
    is unlinked and a new one is created. Otherwise, out_fd is rewound and
    truncated. */
 
-static void write_to_testcase(void* mem, u32 len) {
+static void write_to_testcase(afl_forkserver_t *fsrv, void* mem, u32 len) {
 
-  s32 fd = out_fd;
+  s32 fd = fsrv->out_fd;
 
-  if (!use_stdin) {
+  if (!fsrv->use_stdin) {
 
-    unlink(out_file);                                     /* Ignore errors. */
+    unlink(fsrv->out_file);                                     /* Ignore errors. */
 
-    fd = open(out_file, O_WRONLY | O_CREAT | O_EXCL, 0600);
+    fd = open(fsrv->out_file, O_WRONLY | O_CREAT | O_EXCL, 0600);
 
-    if (fd < 0) PFATAL("Unable to create '%s'", out_file);
+    if (fd < 0) PFATAL("Unable to create '%s'", fsrv->out_file);
 
   } else
 
     lseek(fd, 0, SEEK_SET);
 
-  ck_write(fd, mem, len, out_file);
+  ck_write(fd, mem, len, fsrv->out_file);
 
-  if (use_stdin) {
+  if (fsrv->use_stdin) {
 
     if (ftruncate(fd, len)) PFATAL("ftruncate() failed");
     lseek(fd, 0, SEEK_SET);
@@ -355,7 +338,7 @@ static void init_forkserver(char **argv) {
     close(st_pipe[0]);
     close(st_pipe[1]);
 
-    execv(target_path, argv);
+    execv(fsrv->target_path, argv);
 
     *(u32*)trace_bits = EXEC_FAIL_SIG;
     exit(0);
@@ -420,7 +403,7 @@ static void init_forkserver(char **argv) {
 /* Execute target application. Returns 0 if the changes are a dud, or
    1 if they should be kept. */
 
-static u8 run_target(char** argv, u8* mem, u32 len, u8 first_run) {
+static u8 run_target(afl_forkserver_t *fsrv, char** argv, u8* mem, u32 len, u8 first_run) {
 
   static struct itimerval it;
   static u32              prev_timed_out = 0;
@@ -428,53 +411,53 @@ static u8 run_target(char** argv, u8* mem, u32 len, u8 first_run) {
 
   u32 cksum;
 
-  child_timed_out = 0;
+  fsrv->child_timed_out = 0;
 
-  memset(trace_bits, 0, MAP_SIZE);
+  memset(fsrv->trace_bits, 0, MAP_SIZE);
   MEM_BARRIER();
 
-  write_to_testcase(mem, len);
+  write_to_testcase(fsrv, mem, len);
 
   s32 res;
 
   /* we have the fork server up and running, so simply
      tell it to have at it, and then read back PID. */
 
-  if ((res = write(fsrv_ctl_fd, &prev_timed_out, 4)) != 4) {
+  if ((res = write(fsrv->fsrv_ctl_fd, &prev_timed_out, 4)) != 4) {
 
     if (stop_soon) return 0;
     RPFATAL(res, "Unable to request new process from fork server (OOM?)");
 
   }
 
-  if ((res = read(fsrv_st_fd, &child_pid, 4)) != 4) {
+  if ((res = read(fsrv->fsrv_st_fd, &fsrv->child_pid, 4)) != 4) {
 
     if (stop_soon) return 0;
     RPFATAL(res, "Unable to request new process from fork server (OOM?)");
 
   }
 
-  if (child_pid <= 0) FATAL("Fork server is misbehaving (OOM?)");
+  if (fsrv->child_pid <= 0) FATAL("Fork server is misbehaving (OOM?)");
 
   /* Configure timeout, wait for child, cancel timeout. */
 
-  if (exec_tmout) {
+  if (fsrv->exec_tmout) {
 
-    it.it_value.tv_sec = (exec_tmout / 1000);
-    it.it_value.tv_usec = (exec_tmout % 1000) * 1000;
+    it.it_value.tv_sec = (fsrv->exec_tmout / 1000);
+    it.it_value.tv_usec = (fsrv->exec_tmout % 1000) * 1000;
 
   }
 
   setitimer(ITIMER_REAL, &it, NULL);
 
-  if ((res = read(fsrv_st_fd, &status, 4)) != 4) {
+  if ((res = read(fsrv->fsrv_st_fd, &status, 4)) != 4) {
 
     if (stop_soon) return 0;
     RPFATAL(res, "Unable to communicate with fork server (OOM?)");
 
   }
 
-  child_pid = 0;
+  fsrv->child_pid = 0;
   it.it_value.tv_sec = 0;
   it.it_value.tv_usec = 0;
 
@@ -484,13 +467,13 @@ static u8 run_target(char** argv, u8* mem, u32 len, u8 first_run) {
 
   /* Clean up bitmap, analyze exit condition, etc. */
 
-  if (*(u32*)trace_bits == EXEC_FAIL_SIG)
+  if (*(u32*)fsrv->trace_bits == EXEC_FAIL_SIG)
     FATAL("Unable to execute '%s'", argv[0]);
 
   if (!hang_mode) {
 
-    classify_counts(trace_bits);
-    apply_mask((u32*)trace_bits, (u32*)mask_bitmap);
+    classify_counts(fsrv->trace_bits);
+    apply_mask((u32*)fsrv->trace_bits, (u32*)mask_bitmap);
 
   }
 
@@ -508,7 +491,7 @@ static u8 run_target(char** argv, u8* mem, u32 len, u8 first_run) {
 
   if (hang_mode) {
 
-    if (child_timed_out) return 1;
+    if (fsrv->child_timed_out) return 1;
 
     if (WIFSIGNALED(status) ||
         (WIFEXITED(status) && WEXITSTATUS(status) == MSAN_ERROR) ||
@@ -526,7 +509,7 @@ static u8 run_target(char** argv, u8* mem, u32 len, u8 first_run) {
 
   }
 
-  if (child_timed_out) {
+  if (fsrv->child_timed_out) {
 
     missed_hangs++;
     return 0;
@@ -565,7 +548,7 @@ static u8 run_target(char** argv, u8* mem, u32 len, u8 first_run) {
 
   }
 
-  cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
+  cksum = hash32(fsrv->trace_bits, MAP_SIZE, HASH_CONST);
 
   if (first_run) orig_cksum = cksum;
 
@@ -589,7 +572,7 @@ static u32 next_p2(u32 val) {
 
 /* Actually minimize! */
 
-static void minimize(char** argv) {
+static void minimize(afl_forkserver_t *fsrv, char** argv) {
 
   static u32 alpha_map[256];
 
@@ -624,7 +607,7 @@ static void minimize(char** argv) {
       memset(tmp_buf + set_pos, '0', use_len);
 
       u8 res;
-      res = run_target(argv, tmp_buf, in_len, 0);
+      res = run_target(fsrv, argv, tmp_buf, in_len, 0);
 
       if (res) {
 
@@ -697,7 +680,7 @@ next_del_blksize:
     /* Tail */
     memcpy(tmp_buf + del_pos, in_data + del_pos + del_len, tail_len);
 
-    res = run_target(argv, tmp_buf, del_pos + tail_len, 0);
+    res = run_target(fsrv, argv, tmp_buf, del_pos + tail_len, 0);
 
     if (res) {
 
@@ -760,7 +743,7 @@ next_del_blksize:
     for (r = 0; r < in_len; r++)
       if (tmp_buf[r] == i) tmp_buf[r] = '0';
 
-    res = run_target(argv, tmp_buf, in_len, 0);
+    res = run_target(fsrv, argv, tmp_buf, in_len, 0);
 
     if (res) {
 
@@ -796,7 +779,7 @@ next_del_blksize:
     if (orig == '0') continue;
     tmp_buf[i] = '0';
 
-    res = run_target(argv, tmp_buf, in_len, 0);
+    res = run_target(fsrv, argv, tmp_buf, in_len, 0);
 
     if (res) {
 
@@ -851,21 +834,20 @@ finalize_all:
 static void handle_stop_sig(int sig) {
 
   stop_soon = 1;
-
-  if (child_pid > 0) kill(child_pid, SIGKILL);
+  afl_fsrv_killall();
 
 }
 
 /* Do basic preparations - persistent fds, filenames, etc. */
 
-static void set_up_environment(void) {
+static void set_up_environment(afl_forkserver_t *fsrv) {
 
   u8* x;
 
-  dev_null_fd = open("/dev/null", O_RDWR);
-  if (dev_null_fd < 0) PFATAL("Unable to open /dev/null");
+  fsrv->dev_null_fd = open("/dev/null", O_RDWR);
+  if (fsrv->dev_null_fd < 0) PFATAL("Unable to open /dev/null");
 
-  if (!out_file) {
+  if (!fsrv->out_file) {
 
     u8* use_dir = ".";
 
@@ -876,15 +858,15 @@ static void set_up_environment(void) {
 
     }
 
-    out_file = alloc_printf("%s/.afl-tmin-temp-%u", use_dir, getpid());
+    fsrv->out_file = alloc_printf("%s/.afl-tmin-temp-%u", use_dir, getpid());
 
   }
 
-  unlink(out_file);
+  unlink(fsrv->out_file);
 
-  out_fd = open(out_file, O_RDWR | O_CREAT | O_EXCL, 0600);
+  fsrv->out_fd = open(fsrv->out_file, O_RDWR | O_CREAT | O_EXCL, 0600);
 
-  if (out_fd < 0) PFATAL("Unable to create '%s'", out_file);
+  if (fsrv->out_fd < 0) PFATAL("Unable to create '%s'", fsrv->out_file);
 
   /* Set sane defaults... */
 
@@ -1041,16 +1023,16 @@ static void usage(u8* argv0) {
 
 /* Find binary. */
 
-static void find_binary(u8* fname) {
+static void find_binary(afl_forkserver_t *fsrv, u8* fname) {
 
   u8*         env_path = 0;
   struct stat st;
 
   if (strchr(fname, '/') || !(env_path = getenv("PATH"))) {
 
-    target_path = ck_strdup(fname);
+    fsrv->target_path = ck_strdup(fname);
 
-    if (stat(target_path, &st) || !S_ISREG(st.st_mode) ||
+    if (stat(fsrv->target_path, &st) || !S_ISREG(st.st_mode) ||
         !(st.st_mode & 0111) || st.st_size < 4)
       FATAL("Program '%s' not found or not executable", fname);
 
@@ -1073,22 +1055,22 @@ static void find_binary(u8* fname) {
       env_path = delim;
 
       if (cur_elem[0])
-        target_path = alloc_printf("%s/%s", cur_elem, fname);
+        fsrv->target_path = alloc_printf("%s/%s", cur_elem, fname);
       else
-        target_path = ck_strdup(fname);
+        fsrv->target_path = ck_strdup(fname);
 
       ck_free(cur_elem);
 
-      if (!stat(target_path, &st) && S_ISREG(st.st_mode) &&
+      if (!stat(fsrv->target_path, &st) && S_ISREG(st.st_mode) &&
           (st.st_mode & 0111) && st.st_size >= 4)
         break;
 
-      ck_free(target_path);
-      target_path = 0;
+      ck_free(fsrv->target_path);
+      fsrv->target_path = NULL;
 
     }
 
-    if (!target_path) FATAL("Program '%s' not found or not executable", fname);
+    if (!fsrv->target_path) FATAL("Program '%s' not found or not executable", fname);
 
   }
 
@@ -1116,6 +1098,9 @@ int main(int argc, char** argv, char** envp) {
   u8     mem_limit_given = 0, timeout_given = 0, unicorn_mode = 0, use_wine = 0;
   char** use_argv;
 
+  afl_forkserver_t *fsrv = calloc(1, sizeof(afl_forkserver_t));
+  afl_fsrv_init(fsrv);
+
   doc_path = access(DOC_PATH, F_OK) ? "docs" : DOC_PATH;
 
   SAYF(cCYA "afl-tmin" VERSION cRST " by Michal Zalewski\n");
@@ -1138,9 +1123,9 @@ int main(int argc, char** argv, char** envp) {
 
       case 'f':
 
-        if (out_file) FATAL("Multiple -f options not supported");
-        use_stdin = 0;
-        out_file = optarg;
+        if (fsrv->out_file) FATAL("Multiple -f options not supported");
+        fsrv->use_stdin = 0;
+        fsrv->out_file = optarg;
         break;
 
       case 'e':
@@ -1166,29 +1151,29 @@ int main(int argc, char** argv, char** envp) {
 
         if (!strcmp(optarg, "none")) {
 
-          mem_limit = 0;
+          fsrv->mem_limit = 0;
           break;
 
         }
 
-        if (sscanf(optarg, "%llu%c", &mem_limit, &suffix) < 1 ||
+        if (sscanf(optarg, "%llu%c", &fsrv->mem_limit, &suffix) < 1 ||
             optarg[0] == '-')
           FATAL("Bad syntax used for -m");
 
         switch (suffix) {
 
-          case 'T': mem_limit *= 1024 * 1024; break;
-          case 'G': mem_limit *= 1024; break;
-          case 'k': mem_limit /= 1024; break;
+          case 'T': fsrv->mem_limit *= 1024 * 1024; break;
+          case 'G': fsrv->mem_limit *= 1024; break;
+          case 'k': fsrv->mem_limit /= 1024; break;
           case 'M': break;
 
           default: FATAL("Unsupported suffix or bad syntax for -m");
 
         }
 
-        if (mem_limit < 5) FATAL("Dangerously low value of -m");
+        if (fsrv->mem_limit < 5) FATAL("Dangerously low value of -m");
 
-        if (sizeof(rlim_t) == 4 && mem_limit > 2000)
+        if (sizeof(rlim_t) == 4 && fsrv->mem_limit > 2000)
           FATAL("Value of -m out of range on 32-bit systems");
 
       }
@@ -1200,9 +1185,9 @@ int main(int argc, char** argv, char** envp) {
         if (timeout_given) FATAL("Multiple -t options not supported");
         timeout_given = 1;
 
-        exec_tmout = atoi(optarg);
+        fsrv->exec_tmout = atoi(optarg);
 
-        if (exec_tmout < 10 || optarg[0] == '-')
+        if (fsrv->exec_tmout < 10 || optarg[0] == '-')
           FATAL("Dangerously low value of -t");
 
         break;
@@ -1210,7 +1195,7 @@ int main(int argc, char** argv, char** envp) {
       case 'Q':
 
         if (qemu_mode) FATAL("Multiple -Q options not supported");
-        if (!mem_limit_given) mem_limit = MEM_LIMIT_QEMU;
+        if (!mem_limit_given) fsrv->mem_limit = MEM_LIMIT_QEMU;
 
         qemu_mode = 1;
         break;
@@ -1218,7 +1203,7 @@ int main(int argc, char** argv, char** envp) {
       case 'U':
 
         if (unicorn_mode) FATAL("Multiple -Q options not supported");
-        if (!mem_limit_given) mem_limit = MEM_LIMIT_UNICORN;
+        if (!mem_limit_given) fsrv->mem_limit = MEM_LIMIT_UNICORN;
 
         unicorn_mode = 1;
         break;
@@ -1229,7 +1214,7 @@ int main(int argc, char** argv, char** envp) {
         qemu_mode = 1;
         use_wine = 1;
 
-        if (!mem_limit_given) mem_limit = 0;
+        if (!mem_limit_given) fsrv->mem_limit = 0;
 
         break;
 
@@ -1275,21 +1260,24 @@ int main(int argc, char** argv, char** envp) {
   if (optind == argc || !in_file || !output_file) usage(argv[0]);
 
   check_environment_vars(envp);
-  setup_shm(0);
+
+  sharedmem_t shm = {0};
+  fsrv->trace_bits = afl_shm_init(&shm, MAP_SIZE, 0);
+
   atexit(at_exit_handler);
   setup_signal_handlers();
 
-  set_up_environment();
+  set_up_environment(fsrv);
 
-  find_binary(argv[optind]);
-  detect_file_args(argv + optind, out_file);
+  find_binary(fsrv, argv[optind]);
+  detect_file_args(argv + optind, fsrv->out_file, fsrv->use_stdin);
 
   if (qemu_mode) {
 
     if (use_wine)
-      use_argv = get_wine_argv(argv[0], argv + optind, argc - optind);
+      use_argv = get_wine_argv(argv[0], &fsrv->target_path, argc - optind, argv + optind);
     else
-      use_argv = get_qemu_argv(argv[0], argv + optind, argc - optind);
+      use_argv = get_qemu_argv(argv[0], &fsrv->target_path, argc - optind, argv + optind);
 
   } else
 
@@ -1308,20 +1296,20 @@ int main(int argc, char** argv, char** envp) {
 
   read_initial_file();
 
-  init_forkserver(use_argv);
+  afl_fsrv_start(fsrv, use_argv);
 
   ACTF("Performing dry run (mem limit = %llu MB, timeout = %u ms%s)...",
-       mem_limit, exec_tmout, edges_only ? ", edges only" : "");
+       fsrv->mem_limit, fsrv->exec_tmout, edges_only ? ", edges only" : "");
 
-  run_target(use_argv, in_data, in_len, 1);
+  run_target(fsrv, use_argv, in_data, in_len, 1);
 
-  if (hang_mode && !child_timed_out)
+  if (hang_mode && !fsrv->child_timed_out)
     FATAL(
         "Target binary did not time out but hang minimization mode "
         "(-H) was set (-t %u).",
-        exec_tmout);
+        fsrv->exec_tmout);
 
-  if (child_timed_out && !hang_mode)
+  if (fsrv->child_timed_out && !hang_mode)
     FATAL(
         "Target binary times out (adjusting -t may help). Use -H to minimize a "
         "hang.");
@@ -1335,7 +1323,7 @@ int main(int argc, char** argv, char** envp) {
     OKF("Program terminates normally, minimizing in " cCYA "instrumented" cRST
         " mode.");
 
-    if (!anything_set()) FATAL("No instrumentation detected.");
+    if (!anything_set(fsrv)) FATAL("No instrumentation detected.");
 
   } else {
 
@@ -1345,17 +1333,22 @@ int main(int argc, char** argv, char** envp) {
 
   }
 
-  minimize(use_argv);
+  minimize(fsrv, use_argv);
 
   ACTF("Writing output to '%s'...", output_file);
 
-  unlink(out_file);
-  out_file = NULL;
+  unlink(fsrv->out_file);
+  fsrv->out_file = NULL;
 
   close(write_to_file(output_file, in_data, in_len));
 
   OKF("We're done here. Have a nice day!\n");
 
+
+  afl_shm_deinit(&shm);
+  afl_fsrv_deinit(fsrv);
+  free(fsrv);
+
   exit(0);
 
 }
diff --git a/unicorn_mode/samples/c/harness.c b/unicorn_mode/samples/c/harness.c
index eb226f9a..18c59c3f 100644
--- a/unicorn_mode/samples/c/harness.c
+++ b/unicorn_mode/samples/c/harness.c
@@ -209,10 +209,10 @@ int main(int argc, char **argv, char **envp) {
     // reserve some space for our input data
     mem_map_checked(uc, INPUT_LOCATION, INPUT_SIZE_MAX, UC_PROT_READ);
 
-    // build a "dummy" argv with lenth 2 at 0x10000: 
+    // build a "dummy" argv with length 2 at 0x10000: 
     // 0x10000 argv[0]  NULL
     // 0x10008 argv[1]  (char *)0x10016 --. points to the next offset.
-    // 0x10016 argv[1][0], ...          <-^ contains the acutal input data. (INPUT_LOCATION + INPUT_OFFSET)
+    // 0x10016 argv[1][0], ...          <-^ contains the actual input data. (INPUT_LOCATION + INPUT_OFFSET)
 
     uc_mem_write(uc, 0x10008, "\x16\x00\x01", 3); // little endian of 0x10016, see above