about summary refs log tree commit diff
path: root/src
diff options
context:
space:
mode:
authorvan Hauser <vh@thc.org>2020-06-17 15:05:14 +0200
committerGitHub <noreply@github.com>2020-06-17 15:05:14 +0200
commit0dc9967984df3f9c250d4c27b6de1cbd4cac3eb3 (patch)
treeca6096f1d22ba87c262bdeaf57455520ef3143e2 /src
parent12bdefe00e38cdc3dd8cb028eeac325ab2e94e16 (diff)
parent889e54eab858b1928f74a8c179b32275b62f2286 (diff)
downloadafl++-0dc9967984df3f9c250d4c27b6de1cbd4cac3eb3.tar.gz
Merge pull request #403 from AFLplusplus/dev
push to master
Diffstat (limited to 'src')
-rw-r--r--src/afl-analyze.c11
-rw-r--r--src/afl-forkserver.c84
-rw-r--r--src/afl-fuzz-bitmap.c30
-rw-r--r--src/afl-fuzz-init.c72
-rw-r--r--src/afl-fuzz-mutators.c4
-rw-r--r--src/afl-fuzz-one.c174
-rw-r--r--src/afl-fuzz-queue.c26
-rw-r--r--src/afl-fuzz-redqueen.c14
-rw-r--r--src/afl-fuzz-run.c38
-rw-r--r--src/afl-fuzz-state.c2
-rw-r--r--src/afl-fuzz-stats.c19
-rw-r--r--src/afl-fuzz.c32
-rw-r--r--src/afl-gcc.c2
-rw-r--r--src/afl-gotcpu.c16
-rw-r--r--src/afl-performance.c144
-rw-r--r--src/afl-sharedmem.c2
-rw-r--r--src/afl-showmap.c4
-rw-r--r--src/afl-tmin.c9
-rw-r--r--src/third_party/libradamsa/libradamsa.c9
19 files changed, 563 insertions, 129 deletions
diff --git a/src/afl-analyze.c b/src/afl-analyze.c
index 900fbeb1..f9ba8860 100644
--- a/src/afl-analyze.c
+++ b/src/afl-analyze.c
@@ -51,7 +51,9 @@
 
 #include <sys/wait.h>
 #include <sys/time.h>
-#include <sys/shm.h>
+#ifndef USEMMAP
+  #include <sys/shm.h>
+#endif
 #include <sys/stat.h>
 #include <sys/types.h>
 #include <sys/resource.h>
@@ -66,11 +68,12 @@ static u8 *in_file,                    /* Analyzer input test case          */
 static u8 *in_data;                    /* Input data for analysis           */
 
 static u32 in_len,                     /* Input data length                 */
-    orig_cksum,                        /* Original checksum                 */
     total_execs,                       /* Total number of execs             */
     exec_hangs,                        /* Total number of hangs             */
     exec_tmout = EXEC_TIMEOUT;         /* Exec timeout (ms)                 */
 
+static u64 orig_cksum;                 /* Original checksum                 */
+
 static u64 mem_limit = MEM_LIMIT;      /* Memory limit (MB)                 */
 
 static s32 dev_null_fd = -1;           /* FD to /dev/null                   */
@@ -222,7 +225,7 @@ static u32 analyze_run_target(char **argv, u8 *mem, u32 len, u8 first_run) {
   int                     status = 0;
 
   s32 prog_in_fd;
-  u32 cksum;
+  u64 cksum;
 
   memset(trace_bits, 0, map_size);
   MEM_BARRIER();
@@ -321,7 +324,7 @@ static u32 analyze_run_target(char **argv, u8 *mem, u32 len, u8 first_run) {
 
   }
 
-  cksum = hash32(trace_bits, map_size, HASH_CONST);
+  cksum = hash64(trace_bits, map_size, HASH_CONST);
 
   /* We don't actually care if the target is crashing or not,
      except that when it does, the checksum should be different. */
diff --git a/src/afl-forkserver.c b/src/afl-forkserver.c
index a549e471..b2734335 100644
--- a/src/afl-forkserver.c
+++ b/src/afl-forkserver.c
@@ -32,6 +32,7 @@
 #include "common.h"
 #include "list.h"
 #include "forkserver.h"
+#include "hash.h"
 
 #include <stdio.h>
 #include <unistd.h>
@@ -70,9 +71,8 @@ void afl_fsrv_init(afl_forkserver_t *fsrv) {
   fsrv->out_fd = -1;
   fsrv->out_dir_fd = -1;
   fsrv->dev_null_fd = -1;
-#ifndef HAVE_ARC4RANDOM
   fsrv->dev_urandom_fd = -1;
-#endif
+
   /* Settings */
   fsrv->use_stdin = 1;
   fsrv->no_unlink = 0;
@@ -103,9 +103,7 @@ void afl_fsrv_init_dup(afl_forkserver_t *fsrv_to, afl_forkserver_t *from) {
   fsrv_to->map_size = from->map_size;
   fsrv_to->support_shmem_fuzz = from->support_shmem_fuzz;
 
-#ifndef HAVE_ARC4RANDOM
   fsrv_to->dev_urandom_fd = from->dev_urandom_fd;
-#endif
 
   // These are forkserver specific.
   fsrv_to->out_dir_fd = -1;
@@ -131,7 +129,8 @@ static u32 read_s32_timed(s32 fd, s32 *buf, u32 timeout_ms,
   FD_ZERO(&readfds);
   FD_SET(fd, &readfds);
   struct timeval timeout;
-  size_t         len = 4;
+  int            sret;
+  ssize_t        len_read;
 
   timeout.tv_sec = (timeout_ms / 1000);
   timeout.tv_usec = (timeout_ms % 1000) * 1000;
@@ -140,33 +139,52 @@ static u32 read_s32_timed(s32 fd, s32 *buf, u32 timeout_ms,
 #endif
 
   /* set exceptfds as well to return when a child exited/closed the pipe. */
-  int sret = select(fd + 1, &readfds, NULL, NULL, &timeout);
+restart_select:
+  sret = select(fd + 1, &readfds, NULL, NULL, &timeout);
+
+  if (likely(sret > 0)) {
+
+  restart_read:
+    len_read = read(fd, (u8 *)buf, 4);
+
+    if (likely(len_read == 4)) {  // for speed we put this first
+
+#if defined(__linux__)
+      u32 exec_ms = MIN(
+          timeout_ms,
+          ((u64)timeout_ms - (timeout.tv_sec * 1000 + timeout.tv_usec / 1000)));
+#else
+      u32 exec_ms = MIN(timeout_ms, get_cur_time_us() - read_start);
+#endif
+
+      // ensure to report 1 ms has passed (0 is an error)
+      return exec_ms > 0 ? exec_ms : 1;
+
+    } else if (unlikely(len_read == -1 && errno == EINTR)) {
+
+      goto restart_read;
+
+    } else if (unlikely(len_read < 4)) {
 
-  if (!sret) {
+      return 0;
+
+    }
+
+  } else if (unlikely(!sret)) {
 
     *buf = -1;
     return timeout_ms + 1;
 
-  } else if (sret < 0) {
+  } else if (unlikely(sret < 0)) {
+
+    if (likely(errno == EINTR)) goto restart_select;
 
     *buf = -1;
     return 0;
 
   }
 
-  ssize_t len_read = read(fd, ((u8 *)buf), len);
-  if (len_read < len) { return 0; }
-
-#if defined(__linux__)
-  u32 exec_ms =
-      MIN(timeout_ms,
-          ((u64)timeout_ms - (timeout.tv_sec * 1000 + timeout.tv_usec / 1000)));
-#else
-  u32 exec_ms = MIN(timeout_ms, get_cur_time_us() - read_start);
-#endif
-
-  // ensure to report 1 ms has passed (0 is an error)
-  return exec_ms > 0 ? exec_ms : 1;
+  return 0;  // not reached
 
 }
 
@@ -400,9 +418,8 @@ void afl_fsrv_start(afl_forkserver_t *fsrv, char **argv,
 
     close(fsrv->out_dir_fd);
     close(fsrv->dev_null_fd);
-#ifndef HAVE_ARC4RANDOM
     close(fsrv->dev_urandom_fd);
-#endif
+
     if (fsrv->plot_file != NULL) { fclose(fsrv->plot_file); }
 
     /* This should improve performance a bit, since it stops the linker from
@@ -445,6 +462,13 @@ void afl_fsrv_start(afl_forkserver_t *fsrv, char **argv,
 
   /* PARENT PROCESS */
 
+  char pid_buf[16];
+  sprintf(pid_buf, "%d", fsrv->fsrv_pid);
+  if (fsrv->cmplog_binary)
+    setenv("__AFL_TARGET_PID2", pid_buf, 1);
+  else
+    setenv("__AFL_TARGET_PID1", pid_buf, 1);
+
   /* Close the unneeded endpoints. */
 
   close(ctl_pipe[0]);
@@ -837,8 +861,18 @@ void afl_fsrv_write_to_testcase(afl_forkserver_t *fsrv, u8 *buf, size_t len) {
 
     *fsrv->shmem_fuzz_len = len;
     memcpy(fsrv->shmem_fuzz, buf, len);
-    // printf("test case len: %u [0]:0x%02x\n", *fsrv->shmem_fuzz_len, buf[0]);
-    // fflush(stdout);
+#ifdef _DEBUG
+    fprintf(stderr, "FS crc: %08x len: %u\n",
+            hash64(fsrv->shmem_fuzz, *fsrv->shmem_fuzz_len, 0xa5b35705),
+            *fsrv->shmem_fuzz_len);
+    fprintf(stderr, "SHM :");
+    for (int i = 0; i < *fsrv->shmem_fuzz_len; i++)
+      fprintf(stderr, "%02x", fsrv->shmem_fuzz[i]);
+    fprintf(stderr, "\nORIG:");
+    for (int i = 0; i < *fsrv->shmem_fuzz_len; i++)
+      fprintf(stderr, "%02x", buf[i]);
+    fprintf(stderr, "\n");
+#endif
 
   } else {
 
diff --git a/src/afl-fuzz-bitmap.c b/src/afl-fuzz-bitmap.c
index 5b98be9e..a6d0c994 100644
--- a/src/afl-fuzz-bitmap.c
+++ b/src/afl-fuzz-bitmap.c
@@ -542,23 +542,31 @@ u8 save_if_interesting(afl_state_t *afl, void *mem, u32 len, u8 fault) {
   u8  hnb = '\0';
   s32 fd;
   u8  keeping = 0, res;
+  u64 cksum = 0;
 
   u8 fn[PATH_MAX];
 
   /* Update path frequency. */
-  u32 cksum = hash32(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
 
-  struct queue_entry *q = afl->queue;
-  while (q) {
+  /* Generating a hash on every input is super expensive. Bad idea and should
+     only be used for special schedules */
+  if (unlikely(afl->schedule >= FAST && afl->schedule <= RARE)) {
 
-    if (q->exec_cksum == cksum) {
+    cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
 
-      q->n_fuzz = q->n_fuzz + 1;
-      break;
+    struct queue_entry *q = afl->queue;
+    while (q) {
 
-    }
+      if (q->exec_cksum == cksum) {
 
-    q = q->next;
+        q->n_fuzz = q->n_fuzz + 1;
+        break;
+
+      }
+
+      q = q->next;
+
+    }
 
   }
 
@@ -595,7 +603,11 @@ u8 save_if_interesting(afl_state_t *afl, void *mem, u32 len, u8 fault) {
 
     }
 
-    afl->queue_top->exec_cksum = cksum;
+    if (cksum)
+      afl->queue_top->exec_cksum = cksum;
+    else
+      afl->queue_top->exec_cksum =
+          hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
 
     /* Try to calibrate inline; this also calls update_bitmap_score() when
        successful. */
diff --git a/src/afl-fuzz-init.c b/src/afl-fuzz-init.c
index 4184fa6b..ee96c73c 100644
--- a/src/afl-fuzz-init.c
+++ b/src/afl-fuzz-init.c
@@ -37,6 +37,8 @@ void bind_to_free_cpu(afl_state_t *afl) {
   cpu_set_t c;
   #elif defined(__NetBSD__)
   cpuset_t *         c;
+  #elif defined(__sun)
+  psetid_t            c;
   #endif
 
   u8  cpu_used[4096] = {0};
@@ -181,6 +183,56 @@ void bind_to_free_cpu(afl_state_t *afl) {
   }
 
   ck_free(procs);
+  #elif defined(__sun)
+  kstat_named_t *n;
+  kstat_ctl_t *  m;
+  kstat_t *      k;
+  cpu_stat_t     cs;
+  u32            ncpus;
+
+  m = kstat_open();
+
+  if (!m) FATAL("kstat_open failed");
+
+  k = kstat_lookup(m, "unix", 0, "system_misc");
+
+  if (!k) {
+
+    kstat_close(m);
+    return;
+
+  }
+
+  if (kstat_read(m, k, NULL)) {
+
+    kstat_close(m);
+    return;
+
+  }
+
+  n = kstat_data_lookup(k, "ncpus");
+  ncpus = n->value.i32;
+
+  if (ncpus > sizeof(cpu_used)) ncpus = sizeof(cpu_used);
+
+  for (i = 0; i < ncpus; i++) {
+
+    k = kstat_lookup(m, "cpu_stat", i, NULL);
+    if (kstat_read(m, k, &cs)) {
+
+      kstat_close(m);
+      return;
+
+    }
+
+    if (cs.cpu_sysinfo.cpu[CPU_IDLE] > 0) continue;
+
+    if (cs.cpu_sysinfo.cpu[CPU_USER] > 0 || cs.cpu_sysinfo.cpu[CPU_KERNEL] > 0)
+      cpu_used[i] = 1;
+
+  }
+
+  kstat_close(m);
   #else
     #warning \
         "For this platform we do not have free CPU binding code yet. If possible, please supply a PR to https://github.com/AFLplusplus/AFLplusplus"
@@ -189,7 +241,7 @@ void bind_to_free_cpu(afl_state_t *afl) {
   size_t cpu_start = 0;
 
   try:
-  #ifndef __ANDROID__
+  #if !defined(__ANDROID__)
     for (i = cpu_start; i < afl->cpu_core_count; i++) {
 
       if (!cpu_used[i]) { break; }
@@ -228,6 +280,9 @@ void bind_to_free_cpu(afl_state_t *afl) {
   c = cpuset_create();
   if (c == NULL) PFATAL("cpuset_create failed");
   cpuset_set(i, c);
+  #elif defined(__sun)
+pset_create(&c);
+if (pset_assign(c, i, NULL)) PFATAL("pset_assign failed");
   #endif
 
   #if defined(__linux__)
@@ -271,6 +326,19 @@ if (pthread_setaffinity_np(pthread_self(), cpuset_size(c), c)) {
 }
 
 cpuset_destroy(c);
+  #elif defined(__sun)
+if (pset_bind(c, P_PID, getpid(), NULL)) {
+
+  if (cpu_start == afl->cpu_core_count)
+    PFATAL("pset_bind failed for cpu %d, exit", i);
+  WARNF("pthread_setaffinity failed to CPU %d, trying next CPU", i);
+  cpu_start++;
+  goto try
+    ;
+
+}
+
+pset_destroy(c);
   #else
   // this will need something for other platforms
   // TODO: Solaris/Illumos has processor_bind ... might worth a try
@@ -1473,10 +1541,8 @@ void setup_dirs_fds(afl_state_t *afl) {
   afl->fsrv.dev_null_fd = open("/dev/null", O_RDWR);
   if (afl->fsrv.dev_null_fd < 0) { PFATAL("Unable to open /dev/null"); }
 
-#ifndef HAVE_ARC4RANDOM
   afl->fsrv.dev_urandom_fd = open("/dev/urandom", O_RDONLY);
   if (afl->fsrv.dev_urandom_fd < 0) { PFATAL("Unable to open /dev/urandom"); }
-#endif
 
   /* Gnuplot output file. */
 
diff --git a/src/afl-fuzz-mutators.c b/src/afl-fuzz-mutators.c
index 29e10d02..f149bb4c 100644
--- a/src/afl-fuzz-mutators.c
+++ b/src/afl-fuzz-mutators.c
@@ -272,7 +272,7 @@ u8 trim_case_custom(afl_state_t *afl, struct queue_entry *q, u8 *in_buf,
     sprintf(afl->stage_name_buf, "ptrim %s",
             u_stringify_int(val_buf, trim_exec));
 
-    u32 cksum;
+    u64 cksum;
 
     size_t retlen = mutator->afl_custom_trim(mutator->data, &retbuf);
 
@@ -295,7 +295,7 @@ u8 trim_case_custom(afl_state_t *afl, struct queue_entry *q, u8 *in_buf,
 
     if (afl->stop_soon || fault == FSRV_RUN_ERROR) { goto abort_trimming; }
 
-    cksum = hash32(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
+    cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
 
     if (cksum == q->exec_cksum) {
 
diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c
index 578ac584..e42a323d 100644
--- a/src/afl-fuzz-one.c
+++ b/src/afl-fuzz-one.c
@@ -29,10 +29,14 @@
 
 static int select_algorithm(afl_state_t *afl) {
 
-  int i_puppet, j_puppet;
+  int i_puppet, j_puppet = 0, operator_number = operator_num;
+
+  if (!afl->extras_cnt && !afl->a_extras_cnt) operator_number -= 2;
+
+  double range_sele =
+      (double)afl->probability_now[afl->swarm_now][operator_number - 1];
+  double sele = ((double)(rand_below(afl, 10000) * 0.0001 * range_sele));
 
-  double sele = ((double)(rand_below(afl, 10000)) * 0.0001);
-  j_puppet = 0;
   for (i_puppet = 0; i_puppet < operator_num; ++i_puppet) {
 
     if (unlikely(i_puppet == 0)) {
@@ -52,8 +56,10 @@ static int select_algorithm(afl_state_t *afl) {
 
   }
 
-  if (j_puppet == 1 &&
-      sele < afl->probability_now[afl->swarm_now][i_puppet - 1]) {
+  if ((j_puppet == 1 &&
+       sele < afl->probability_now[afl->swarm_now][i_puppet - 1]) ||
+      (i_puppet + 1 < operator_num &&
+       sele > afl->probability_now[afl->swarm_now][i_puppet + 1])) {
 
     FATAL("error select_algorithm");
 
@@ -364,8 +370,8 @@ u8 fuzz_one_original(afl_state_t *afl) {
 
   s32 len, fd, temp_len, i, j;
   u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0;
-  u64 havoc_queued = 0, orig_hit_cnt, new_hit_cnt;
-  u32 splice_cycle = 0, perf_score = 100, orig_perf, prev_cksum, eff_cnt = 1;
+  u64 havoc_queued = 0, orig_hit_cnt, new_hit_cnt = 0, prev_cksum;
+  u32 splice_cycle = 0, perf_score = 100, orig_perf, eff_cnt = 1;
 
   u8 ret_val = 1, doing_det = 0;
 
@@ -566,12 +572,11 @@ u8 fuzz_one_original(afl_state_t *afl) {
      if it has gone through deterministic testing in earlier, resumed runs
      (passed_det). */
 
-  if (afl->skip_deterministic ||
-      ((!afl->queue_cur->passed_det) &&
-       perf_score < (afl->queue_cur->depth * 30 <= afl->havoc_max_mult * 100
-                         ? afl->queue_cur->depth * 30
-                         : afl->havoc_max_mult * 100)) ||
-      afl->queue_cur->passed_det) {
+  if (likely(afl->queue_cur->passed_det) || likely(afl->skip_deterministic) ||
+      likely(perf_score <
+             (afl->queue_cur->depth * 30 <= afl->havoc_max_mult * 100
+                  ? afl->queue_cur->depth * 30
+                  : afl->havoc_max_mult * 100))) {
 
     goto custom_mutator_stage;
 
@@ -653,7 +658,7 @@ u8 fuzz_one_original(afl_state_t *afl) {
 
     if (!afl->non_instrumented_mode && (afl->stage_cur & 7) == 7) {
 
-      u32 cksum = hash32(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
+      u64 cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
 
       if (afl->stage_cur == afl->stage_max - 1 && cksum == prev_cksum) {
 
@@ -821,14 +826,14 @@ u8 fuzz_one_original(afl_state_t *afl) {
 
     if (!eff_map[EFF_APOS(afl->stage_cur)]) {
 
-      u32 cksum;
+      u64 cksum;
 
       /* If in non-instrumented mode or if the file is very short, just flag
          everything without wasting time on checksums. */
 
       if (!afl->non_instrumented_mode && len >= EFF_MIN_LEN) {
 
-        cksum = hash32(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
+        cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
 
       } else {
 
@@ -2230,7 +2235,7 @@ havoc_stage:
         case 16: {
 
           u32 use_extra, extra_len, insert_at = rand_below(afl, temp_len + 1);
-          u8 *new_buf;
+          u8 *ptr;
 
           /* Insert an extra. Do the same dice-rolling stuff as for the
              previous case. */
@@ -2239,44 +2244,27 @@ havoc_stage:
 
             use_extra = rand_below(afl, afl->a_extras_cnt);
             extra_len = afl->a_extras[use_extra].len;
-
-            if (temp_len + extra_len >= MAX_FILE) { break; }
-
-            new_buf =
-                ck_maybe_grow(BUF_PARAMS(out_scratch), temp_len + extra_len);
-
-            /* Head */
-            memcpy(new_buf, out_buf, insert_at);
-
-            /* Inserted part */
-            memcpy(new_buf + insert_at, afl->a_extras[use_extra].data,
-                   extra_len);
+            ptr = afl->a_extras[use_extra].data;
 
           } else {
 
             use_extra = rand_below(afl, afl->extras_cnt);
             extra_len = afl->extras[use_extra].len;
+            ptr = afl->extras[use_extra].data;
 
-            if (temp_len + extra_len >= MAX_FILE) { break; }
-
-            new_buf =
-                ck_maybe_grow(BUF_PARAMS(out_scratch), temp_len + extra_len);
-
-            /* Head */
-            memcpy(new_buf, out_buf, insert_at);
+          }
 
-            /* Inserted part */
-            memcpy(new_buf + insert_at, afl->extras[use_extra].data, extra_len);
+          if (temp_len + extra_len >= MAX_FILE) { break; }
 
-          }
+          out_buf = ck_maybe_grow(BUF_PARAMS(out), temp_len + extra_len);
 
           /* Tail */
-          memcpy(new_buf + insert_at + extra_len, out_buf + insert_at,
-                 temp_len - insert_at);
+          memmove(out_buf + insert_at + extra_len, out_buf + insert_at,
+                  temp_len - insert_at);
+
+          /* Inserted part */
+          memcpy(out_buf + insert_at, ptr, extra_len);
 
-          swap_bufs(BUF_PARAMS(out), BUF_PARAMS(out_scratch));
-          out_buf = new_buf;
-          new_buf = NULL;
           temp_len += extra_len;
 
           break;
@@ -2539,8 +2527,8 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) {
 
   s32 len, fd, temp_len, i, j;
   u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0;
-  u64 havoc_queued, orig_hit_cnt, new_hit_cnt, cur_ms_lv;
-  u32 splice_cycle = 0, perf_score = 100, orig_perf, prev_cksum, eff_cnt = 1;
+  u64 havoc_queued = 0, orig_hit_cnt, new_hit_cnt = 0, cur_ms_lv, prev_cksum;
+  u32 splice_cycle = 0, perf_score = 100, orig_perf, eff_cnt = 1;
 
   u8 ret_val = 1, doing_det = 0;
 
@@ -2806,7 +2794,7 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) {
 
     if (!afl->non_instrumented_mode && (afl->stage_cur & 7) == 7) {
 
-      u32 cksum = hash32(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
+      u64 cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
 
       if (afl->stage_cur == afl->stage_max - 1 && cksum == prev_cksum) {
 
@@ -2974,14 +2962,14 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) {
 
     if (!eff_map[EFF_APOS(afl->stage_cur)]) {
 
-      u32 cksum;
+      u64 cksum;
 
       /* If in non-instrumented mode or if the file is very short, just flag
          everything without wasting time on checksums. */
 
       if (!afl->non_instrumented_mode && len >= EFF_MIN_LEN) {
 
-        cksum = hash32(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
+        cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
 
       } else {
 
@@ -4208,6 +4196,94 @@ pacemaker_fuzzing:
 
             }                                                    /* case 15 */
 
+              /* Values 16 and 17 can be selected only if there are any extras
+                 present in the dictionaries. */
+
+            case 16: {
+
+              /* Overwrite bytes with an extra. */
+
+              if (!afl->extras_cnt ||
+                  (afl->a_extras_cnt && rand_below(afl, 2))) {
+
+                /* No user-specified extras or odds in our favor. Let's use an
+                  auto-detected one. */
+
+                u32 use_extra = rand_below(afl, afl->a_extras_cnt);
+                u32 extra_len = afl->a_extras[use_extra].len;
+
+                if (extra_len > temp_len) break;
+
+                u32 insert_at = rand_below(afl, temp_len - extra_len + 1);
+                memcpy(out_buf + insert_at, afl->a_extras[use_extra].data,
+                       extra_len);
+
+              } else {
+
+                /* No auto extras or odds in our favor. Use the dictionary. */
+
+                u32 use_extra = rand_below(afl, afl->extras_cnt);
+                u32 extra_len = afl->extras[use_extra].len;
+
+                if (extra_len > temp_len) break;
+
+                u32 insert_at = rand_below(afl, temp_len - extra_len + 1);
+                memcpy(out_buf + insert_at, afl->extras[use_extra].data,
+                       extra_len);
+
+              }
+
+              afl->stage_cycles_puppet_v2[afl->swarm_now]
+                                         [STAGE_OverWriteExtra] += 1;
+
+              break;
+
+            }
+
+              /* Insert an extra. */
+
+            case 17: {
+
+              u32 use_extra, extra_len,
+                  insert_at = rand_below(afl, temp_len + 1);
+              u8 *ptr;
+
+              /* Insert an extra. Do the same dice-rolling stuff as for the
+                previous case. */
+
+              if (!afl->extras_cnt ||
+                  (afl->a_extras_cnt && rand_below(afl, 2))) {
+
+                use_extra = rand_below(afl, afl->a_extras_cnt);
+                extra_len = afl->a_extras[use_extra].len;
+                ptr = afl->a_extras[use_extra].data;
+
+              } else {
+
+                use_extra = rand_below(afl, afl->extras_cnt);
+                extra_len = afl->extras[use_extra].len;
+                ptr = afl->extras[use_extra].data;
+
+              }
+
+              if (temp_len + extra_len >= MAX_FILE) break;
+
+              out_buf = ck_maybe_grow(BUF_PARAMS(out), temp_len + extra_len);
+
+              /* Tail */
+              memmove(out_buf + insert_at + extra_len, out_buf + insert_at,
+                      temp_len - insert_at);
+
+              /* Inserted part */
+              memcpy(out_buf + insert_at, ptr, extra_len);
+
+              temp_len += extra_len;
+              afl->stage_cycles_puppet_v2[afl->swarm_now][STAGE_InsertExtra] +=
+                  1;
+              break;
+
+            }
+
           }                                    /* switch select_algorithm() */
 
         }                                      /* for i=0; i < use_stacking */
diff --git a/src/afl-fuzz-queue.c b/src/afl-fuzz-queue.c
index ea7f57e2..0e8c8e47 100644
--- a/src/afl-fuzz-queue.c
+++ b/src/afl-fuzz-queue.c
@@ -194,9 +194,14 @@ void update_bitmap_score(afl_state_t *afl, struct queue_entry *q) {
 
   u32 i;
   u64 fav_factor;
-  u64 fuzz_p2 = next_pow2(q->n_fuzz);
+  u64 fuzz_p2;
 
-  if (afl->schedule == MMOPT || afl->schedule == RARE ||
+  if (unlikely(afl->schedule >= FAST))
+    fuzz_p2 = next_pow2(q->n_fuzz);
+  else
+    fuzz_p2 = q->fuzz_level;
+
+  if (unlikely(afl->schedule == MMOPT || afl->schedule == RARE) ||
       unlikely(afl->fixed_seed)) {
 
     fav_factor = q->len << 2;
@@ -217,9 +222,13 @@ void update_bitmap_score(afl_state_t *afl, struct queue_entry *q) {
 
         /* Faster-executing or smaller test cases are favored. */
         u64 top_rated_fav_factor;
-        u64 top_rated_fuzz_p2 = next_pow2(afl->top_rated[i]->n_fuzz);
+        u64 top_rated_fuzz_p2;
+        if (unlikely(afl->schedule >= FAST))
+          top_rated_fuzz_p2 = next_pow2(afl->top_rated[i]->n_fuzz);
+        else
+          top_rated_fuzz_p2 = afl->top_rated[i]->fuzz_level;
 
-        if (afl->schedule == MMOPT || afl->schedule == RARE ||
+        if (unlikely(afl->schedule == MMOPT || afl->schedule == RARE) ||
             unlikely(afl->fixed_seed)) {
 
           top_rated_fav_factor = afl->top_rated[i]->len << 2;
@@ -241,7 +250,7 @@ void update_bitmap_score(afl_state_t *afl, struct queue_entry *q) {
 
         }
 
-        if (afl->schedule == MMOPT || afl->schedule == RARE ||
+        if (unlikely(afl->schedule == MMOPT || afl->schedule == RARE) ||
             unlikely(afl->fixed_seed)) {
 
           if (fav_factor > afl->top_rated[i]->len << 2) { continue; }
@@ -593,9 +602,12 @@ u32 calculate_score(afl_state_t *afl, struct queue_entry *q) {
 
   }
 
-  if (factor > MAX_FACTOR) { factor = MAX_FACTOR; }
+  if (unlikely(afl->schedule >= FAST)) {
+
+    if (factor > MAX_FACTOR) { factor = MAX_FACTOR; }
+    perf_score *= factor / POWER_BETA;
 
-  perf_score *= factor / POWER_BETA;
+  }
 
   // MOpt mode
   if (afl->limit_time_sig != 0 && afl->max_depth - q->depth < 3) {
diff --git a/src/afl-fuzz-redqueen.c b/src/afl-fuzz-redqueen.c
index 7621d180..43850eb5 100644
--- a/src/afl-fuzz-redqueen.c
+++ b/src/afl-fuzz-redqueen.c
@@ -89,11 +89,11 @@ static struct range *pop_biggest_range(struct range **ranges) {
 
 }
 
-static u8 get_exec_checksum(afl_state_t *afl, u8 *buf, u32 len, u32 *cksum) {
+static u8 get_exec_checksum(afl_state_t *afl, u8 *buf, u32 len, u64 *cksum) {
 
   if (unlikely(common_fuzz_stuff(afl, buf, len))) { return 1; }
 
-  *cksum = hash32(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
+  *cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
   return 0;
 
 }
@@ -109,7 +109,7 @@ static void rand_replace(afl_state_t *afl, u8 *buf, u32 len) {
 
 }
 
-static u8 colorization(afl_state_t *afl, u8 *buf, u32 len, u32 exec_cksum) {
+static u8 colorization(afl_state_t *afl, u8 *buf, u32 len, u64 exec_cksum) {
 
   struct range *ranges = add_range(NULL, 0, len);
   u8 *          backup = ck_alloc_nozero(len);
@@ -137,7 +137,7 @@ static u8 colorization(afl_state_t *afl, u8 *buf, u32 len, u32 exec_cksum) {
       memcpy(backup, buf + rng->start, s);
       rand_replace(afl, buf + rng->start, s);
 
-      u32 cksum;
+      u64 cksum;
       u64 start_us = get_cur_time_us();
       if (unlikely(get_exec_checksum(afl, buf, len, &cksum))) {
 
@@ -180,7 +180,7 @@ static u8 colorization(afl_state_t *afl, u8 *buf, u32 len, u32 exec_cksum) {
   while (ranges) {
 
     rng = ranges;
-    ranges = ranges->next;
+    ranges = rng->next;
     ck_free(rng);
     rng = NULL;
 
@@ -224,7 +224,7 @@ checksum_fail:
   while (ranges) {
 
     rng = ranges;
-    ranges = ranges->next;
+    ranges = rng->next;
     ck_free(rng);
     rng = NULL;
 
@@ -695,7 +695,7 @@ static u8 rtn_fuzz(afl_state_t *afl, u32 key, u8 *orig_buf, u8 *buf, u32 len) {
 
 // afl->queue_cur->exec_cksum
 u8 input_to_state_stage(afl_state_t *afl, u8 *orig_buf, u8 *buf, u32 len,
-                        u32 exec_cksum) {
+                        u64 exec_cksum) {
 
   u8 r = 1;
   if (afl->orig_cmp_map == NULL) {
diff --git a/src/afl-fuzz-run.c b/src/afl-fuzz-run.c
index a85e00fe..a1e8417f 100644
--- a/src/afl-fuzz-run.c
+++ b/src/afl-fuzz-run.c
@@ -142,7 +142,35 @@ static void write_with_gap(afl_state_t *afl, void *mem, u32 len, u32 skip_at,
   s32 fd = afl->fsrv.out_fd;
   u32 tail_len = len - skip_at - skip_len;
 
-  if (afl->fsrv.out_file) {
+  if (afl->fsrv.shmem_fuzz) {
+
+    if (skip_at) { memcpy(afl->fsrv.shmem_fuzz, mem, skip_at); }
+
+    if (tail_len) {
+
+      memcpy(afl->fsrv.shmem_fuzz + skip_at, (u8 *)mem + skip_at + skip_len,
+             tail_len);
+
+    }
+
+    *afl->fsrv.shmem_fuzz_len = len - skip_len;
+
+#ifdef _DEBUG
+    fprintf(stderr, "FS crc: %08x len: %u\n",
+            hash64(fsrv->shmem_fuzz, *fsrv->shmem_fuzz_len, 0xa5b35705),
+            *fsrv->shmem_fuzz_len);
+    fprintf(stderr, "SHM :");
+    for (int i = 0; i < *fsrv->shmem_fuzz_len; i++)
+      fprintf(stderr, "%02x", fsrv->shmem_fuzz[i]);
+    fprintf(stderr, "\nORIG:");
+    for (int i = 0; i < *fsrv->shmem_fuzz_len; i++)
+      fprintf(stderr, "%02x", buf[i]);
+    fprintf(stderr, "\n");
+#endif
+
+    return;
+
+  } else if (afl->fsrv.out_file) {
 
     if (afl->no_unlink) {
 
@@ -256,7 +284,7 @@ u8 calibrate_case(afl_state_t *afl, struct queue_entry *q, u8 *use_mem,
 
   for (afl->stage_cur = 0; afl->stage_cur < afl->stage_max; ++afl->stage_cur) {
 
-    u32 cksum;
+    u64 cksum;
 
     if (!first_run && !(afl->stage_cur % afl->stats_update_freq)) {
 
@@ -281,7 +309,7 @@ u8 calibrate_case(afl_state_t *afl, struct queue_entry *q, u8 *use_mem,
 
     }
 
-    cksum = hash32(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
+    cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
     if (q->exec_cksum != cksum) {
 
       hnb = has_new_bits(afl, afl->virgin_bits);
@@ -646,7 +674,7 @@ u8 trim_case(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) {
     while (remove_pos < q->len) {
 
       u32 trim_avail = MIN(remove_len, q->len - remove_pos);
-      u32 cksum;
+      u64 cksum;
 
       write_with_gap(afl, in_buf, q->len, remove_pos, trim_avail);
 
@@ -658,7 +686,7 @@ u8 trim_case(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) {
       /* Note that we don't keep track of crashes or hangs here; maybe TODO?
        */
 
-      cksum = hash32(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
+      cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
 
       /* If the deletion had no impact on the trace, make it permanent. This
          isn't perfect for variable-path inputs, but we're just making a
diff --git a/src/afl-fuzz-state.c b/src/afl-fuzz-state.c
index 99863103..f1474f33 100644
--- a/src/afl-fuzz-state.c
+++ b/src/afl-fuzz-state.c
@@ -124,9 +124,7 @@ void afl_state_init(afl_state_t *afl, uint32_t map_size) {
 
   afl->stats_update_freq = 1;
 
-#ifndef HAVE_ARC4RANDOM
   afl->fsrv.dev_urandom_fd = -1;
-#endif
   afl->fsrv.dev_null_fd = -1;
 
   afl->fsrv.child_pid = -1;
diff --git a/src/afl-fuzz-stats.c b/src/afl-fuzz-stats.c
index 374b2411..28473c0c 100644
--- a/src/afl-fuzz-stats.c
+++ b/src/afl-fuzz-stats.c
@@ -31,7 +31,9 @@
 void write_stats_file(afl_state_t *afl, double bitmap_cvg, double stability,
                       double eps) {
 
+#ifndef __HAIKU__
   struct rusage rus;
+#endif
 
   unsigned long long int cur_time = get_cur_time();
   u8                     fn[PATH_MAX];
@@ -65,7 +67,9 @@ void write_stats_file(afl_state_t *afl, double bitmap_cvg, double stability,
 
   }
 
+#ifndef __HAIKU__
   if (getrusage(RUSAGE_CHILDREN, &rus)) { rus.ru_maxrss = 0; }
+#endif
 
   fprintf(
       f,
@@ -119,12 +123,21 @@ void write_stats_file(afl_state_t *afl, double bitmap_cvg, double stability,
       afl->last_path_time / 1000, afl->last_crash_time / 1000,
       afl->last_hang_time / 1000, afl->fsrv.total_execs - afl->last_crash_execs,
       afl->fsrv.exec_tmout, afl->slowest_exec_ms,
-#ifdef __APPLE__
+#ifndef __HAIKU__
+  #ifdef __APPLE__
       (unsigned long int)(rus.ru_maxrss >> 20),
-#else
+  #else
       (unsigned long int)(rus.ru_maxrss >> 10),
+  #endif
+#else
+      -1UL,
+#endif
+#ifdef HAVE_AFFINITY
+      afl->cpu_aff,
+#else
+      -1,
 #endif
-      afl->cpu_aff, t_bytes, afl->var_byte_count, afl->use_banner,
+      t_bytes, afl->var_byte_count, afl->use_banner,
       afl->unicorn_mode ? "unicorn" : "", afl->fsrv.qemu_mode ? "qemu " : "",
       afl->non_instrumented_mode ? " non_instrumented " : "",
       afl->no_forkserver ? "no_fsrv " : "", afl->crash_mode ? "crash " : "",
diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c
index d5fed9f2..cefcd73f 100644
--- a/src/afl-fuzz.c
+++ b/src/afl-fuzz.c
@@ -31,6 +31,21 @@
 extern u64 time_spent_working;
 #endif
 
+static void at_exit() {
+
+  int   i;
+  char *ptr = getenv("__AFL_TARGET_PID1");
+
+  if (ptr && *ptr && (i = atoi(ptr)) > 0) kill(i, SIGKILL);
+
+  ptr = getenv("__AFL_TARGET_PID2");
+
+  if (ptr && *ptr && (i = atoi(ptr)) > 0) kill(i, SIGKILL);
+
+  // anything else? shared memory?
+
+}
+
 static u8 *get_libradamsa_path(u8 *own_loc) {
 
   u8 *tmp, *cp, *rsl, *own_copy;
@@ -231,7 +246,7 @@ static int stricmp(char const *a, char const *b) {
   for (;; ++a, ++b) {
 
     int d;
-    d = tolower(*a) - tolower(*b);
+    d = tolower((int)*a) - tolower((int)*b);
     if (d != 0 || !*a) { return d; }
 
   }
@@ -819,8 +834,17 @@ int main(int argc, char **argv_orig, char **envp) {
 
   }
 
-  srandom((u32)afl->init_seed);
-  srand((u32)afl->init_seed);  // in case it is a different implementation
+  if (afl->init_seed) {
+
+    afl->rand_seed[0] = afl->init_seed;
+    afl->rand_seed[1] = afl->init_seed ^ 0x1234567890abcdef;
+    afl->rand_seed[2] = afl->init_seed & 0x0123456789abcdef;
+    afl->rand_seed[3] = afl->init_seed | 0x01abcde43f567908;
+
+  }
+
+  // srandom((u32)afl->init_seed);
+  // srand((u32)afl->init_seed);  // in case it is a different implementation
 
   if (afl->use_radamsa) {
 
@@ -1234,6 +1258,8 @@ int main(int argc, char **argv_orig, char **envp) {
 
   }
 
+  atexit(at_exit);
+
   perform_dry_run(afl);
 
   cull_queue(afl);
diff --git a/src/afl-gcc.c b/src/afl-gcc.c
index 7eb01c0c..b8ff7e77 100644
--- a/src/afl-gcc.c
+++ b/src/afl-gcc.c
@@ -335,7 +335,7 @@ static void edit_params(u32 argc, char **argv) {
 
   }
 
-#ifdef USEMMAP
+#if defined(USEMMAP) && !defined(__HAIKU__)
   cc_params[cc_par_cnt++] = "-lrt";
 #endif
 
diff --git a/src/afl-gotcpu.c b/src/afl-gotcpu.c
index 43b3196b..bd0f7de6 100644
--- a/src/afl-gotcpu.c
+++ b/src/afl-gotcpu.c
@@ -54,7 +54,7 @@
 #include "common.h"
 
 #if defined(__linux__) || defined(__FreeBSD__) || defined(__NetBSD__) || \
-    defined(__APPLE__) || defined(__DragonFly__)
+    defined(__APPLE__) || defined(__DragonFly__) || defined(__sun)
   #define HAVE_AFFINITY 1
   #if defined(__FreeBSD__) || defined(__DragonFly__)
     #include <pthread.h>
@@ -70,6 +70,8 @@
     #include <pthread.h>
     #include <mach/thread_act.h>
     #include <mach/thread_policy.h>
+  #elif defined(__sun)
+    #include <sys/pset.h>
   #endif
 #endif               /* __linux__ || __FreeBSD__ || __NetBSD__ || __APPLE__ */
 
@@ -181,6 +183,12 @@ int main(int argc, char **argv) {
       if (thread_policy_set(native_thread, THREAD_AFFINITY_POLICY,
                             (thread_policy_t)&c, 1) != KERN_SUCCESS)
         PFATAL("thread_policy_set failed");
+  #elif defined(__sun)
+      psetid_t c;
+
+      if (pset_create(&c)) PFATAL("pset_create failed");
+
+      if (pset_assign(c, i, NULL)) PFATAL("pset_assign failed");
   #endif
 
   #if defined(__FreeBSD__) || defined(__DragonFly__)
@@ -195,6 +203,12 @@ int main(int argc, char **argv) {
       cpuset_destroy(c);
   #endif
 
+  #if defined(__sun)
+      if (pset_bind(c, P_PID, getpid(), NULL)) PFATAL("pset_bind failed");
+
+      pset_destroy(c);
+  #endif
+
   #if defined(__linux__)
       if (sched_setaffinity(0, sizeof(c), &c)) {
 
diff --git a/src/afl-performance.c b/src/afl-performance.c
new file mode 100644
index 00000000..8efefcd8
--- /dev/null
+++ b/src/afl-performance.c
@@ -0,0 +1,144 @@
+/*
+   Written in 2019 by David Blackman and Sebastiano Vigna (vigna@acm.org)
+
+   To the extent possible under law, the author has dedicated all copyright
+   and related and neighboring rights to this software to the public domain
+   worldwide. This software is distributed without any warranty.
+
+   See <http://creativecommons.org/publicdomain/zero/1.0/>.
+
+   This is xoshiro256++ 1.0, one of our all-purpose, rock-solid generators.
+   It has excellent (sub-ns) speed, a state (256 bits) that is large
+   enough for any parallel application, and it passes all tests we are
+   aware of.
+
+   For generating just floating-point numbers, xoshiro256+ is even faster.
+
+   The state must be seeded so that it is not everywhere zero. If you have
+   a 64-bit seed, we suggest to seed a splitmix64 generator and use its
+   output to fill s[].
+*/
+
+#include <stdint.h>
+#include "afl-fuzz.h"
+#include "types.h"
+#include "xxh3.h"
+
+/* we use xoshiro256** instead of rand/random because it is 10x faster and has
+   better randomness properties. */
+
+static inline uint64_t rotl(const uint64_t x, int k) {
+
+  return (x << k) | (x >> (64 - k));
+
+}
+
+uint64_t rand_next(afl_state_t *afl) {
+
+  const uint64_t result =
+      rotl(afl->rand_seed[0] + afl->rand_seed[3], 23) + afl->rand_seed[0];
+
+  const uint64_t t = afl->rand_seed[1] << 17;
+
+  afl->rand_seed[2] ^= afl->rand_seed[0];
+  afl->rand_seed[3] ^= afl->rand_seed[1];
+  afl->rand_seed[1] ^= afl->rand_seed[2];
+  afl->rand_seed[0] ^= afl->rand_seed[3];
+
+  afl->rand_seed[2] ^= t;
+
+  afl->rand_seed[3] = rotl(afl->rand_seed[3], 45);
+
+  return result;
+
+}
+
+/* This is the jump function for the generator. It is equivalent
+   to 2^128 calls to rand_next(); it can be used to generate 2^128
+   non-overlapping subsequences for parallel computations. */
+
+void jump(afl_state_t *afl) {
+
+  static const uint64_t JUMP[] = {0x180ec6d33cfd0aba, 0xd5a61266f0c9392c,
+                                  0xa9582618e03fc9aa, 0x39abdc4529b1661c};
+  int                   i, b;
+  uint64_t              s0 = 0;
+  uint64_t              s1 = 0;
+  uint64_t              s2 = 0;
+  uint64_t              s3 = 0;
+  for (i = 0; i < sizeof JUMP / sizeof *JUMP; i++)
+    for (b = 0; b < 64; b++) {
+
+      if (JUMP[i] & UINT64_C(1) << b) {
+
+        s0 ^= afl->rand_seed[0];
+        s1 ^= afl->rand_seed[1];
+        s2 ^= afl->rand_seed[2];
+        s3 ^= afl->rand_seed[3];
+
+      }
+
+      rand_next(afl);
+
+    }
+
+  afl->rand_seed[0] = s0;
+  afl->rand_seed[1] = s1;
+  afl->rand_seed[2] = s2;
+  afl->rand_seed[3] = s3;
+
+}
+
+/* This is the long-jump function for the generator. It is equivalent to
+   2^192 calls to rand_next(); it can be used to generate 2^64 starting points,
+   from each of which jump() will generate 2^64 non-overlapping
+   subsequences for parallel distributed computations. */
+
+void long_jump(afl_state_t *afl) {
+
+  static const uint64_t LONG_JUMP[] = {0x76e15d3efefdcbbf, 0xc5004e441c522fb3,
+                                       0x77710069854ee241, 0x39109bb02acbe635};
+
+  int      i, b;
+  uint64_t s0 = 0;
+  uint64_t s1 = 0;
+  uint64_t s2 = 0;
+  uint64_t s3 = 0;
+  for (i = 0; i < sizeof LONG_JUMP / sizeof *LONG_JUMP; i++)
+    for (b = 0; b < 64; b++) {
+
+      if (LONG_JUMP[i] & UINT64_C(1) << b) {
+
+        s0 ^= afl->rand_seed[0];
+        s1 ^= afl->rand_seed[1];
+        s2 ^= afl->rand_seed[2];
+        s3 ^= afl->rand_seed[3];
+
+      }
+
+      rand_next(afl);
+
+    }
+
+  afl->rand_seed[0] = s0;
+  afl->rand_seed[1] = s1;
+  afl->rand_seed[2] = s2;
+  afl->rand_seed[3] = s3;
+
+}
+
+/* we switch from afl's murmur implementation to xxh3 as it is 30% faster -
+   and get 64 bit hashes instead of just 32 bit. Less collisions! :-) */
+
+u32 inline hash32(const void *key, u32 len, u32 seed) {
+
+  return (u32)XXH64(key, len, seed);
+
+}
+
+u64 inline hash64(const void *key, u32 len, u64 seed) {
+
+  return XXH64(key, len, seed);
+
+}
+
diff --git a/src/afl-sharedmem.c b/src/afl-sharedmem.c
index 63013435..f8bbebc8 100644
--- a/src/afl-sharedmem.c
+++ b/src/afl-sharedmem.c
@@ -145,7 +145,7 @@ u8 *afl_shm_init(sharedmem_t *shm, size_t map_size,
 
   if (!non_instrumented_mode) setenv(SHM_ENV_VAR, shm->g_shm_file_path, 1);
 
-  if (shm->map == -1 || !shm->map) PFATAL("mmap() failed");
+  if (shm->map == (void *)-1 || !shm->map) PFATAL("mmap() failed");
 
 #else
   u8 *shm_str;
diff --git a/src/afl-showmap.c b/src/afl-showmap.c
index 560c8cf6..7b46cd2b 100644
--- a/src/afl-showmap.c
+++ b/src/afl-showmap.c
@@ -56,7 +56,9 @@
 
 #include <sys/wait.h>
 #include <sys/time.h>
-#include <sys/shm.h>
+#ifndef USEMMAP
+  #include <sys/shm.h>
+#endif
 #include <sys/stat.h>
 #include <sys/types.h>
 #include <sys/resource.h>
diff --git a/src/afl-tmin.c b/src/afl-tmin.c
index 091e5177..9df5112b 100644
--- a/src/afl-tmin.c
+++ b/src/afl-tmin.c
@@ -54,7 +54,9 @@
 
 #include <sys/wait.h>
 #include <sys/time.h>
-#include <sys/shm.h>
+#ifndef USEMMAP
+  #include <sys/shm.h>
+#endif
 #include <sys/stat.h>
 #include <sys/types.h>
 #include <sys/resource.h>
@@ -67,12 +69,13 @@ static u8 *in_file,                    /* Minimizer input test case         */
 static u8 *in_data;                    /* Input data for trimming           */
 
 static u32 in_len,                     /* Input data length                 */
-    orig_cksum,                        /* Original checksum                 */
     missed_hangs,                      /* Misses due to hangs               */
     missed_crashes,                    /* Misses due to crashes             */
     missed_paths,                      /* Misses due to exec path diffs     */
     map_size = MAP_SIZE;
 
+static u64 orig_cksum;                 /* Original checksum                 */
+
 static u8 crash_mode,                  /* Crash-centric mode?               */
     hang_mode,                         /* Minimize as long as it hangs      */
     exit_crash,                        /* Treat non-zero exit as crash?     */
@@ -300,7 +303,7 @@ static u8 tmin_run_target(afl_forkserver_t *fsrv, char **argv, u8 *mem, u32 len,
 
   if (ret == FSRV_RUN_NOINST) { FATAL("Binary not instrumented?"); }
 
-  u32 cksum = hash32(fsrv->trace_bits, fsrv->map_size, HASH_CONST);
+  u64 cksum = hash64(fsrv->trace_bits, fsrv->map_size, HASH_CONST);
 
   if (first_run) { orig_cksum = cksum; }
 
diff --git a/src/third_party/libradamsa/libradamsa.c b/src/third_party/libradamsa/libradamsa.c
index 4f5515e5..37c986e9 100644
--- a/src/third_party/libradamsa/libradamsa.c
+++ b/src/third_party/libradamsa/libradamsa.c
@@ -2413,9 +2413,12 @@ static word prim_sys(word op, word a, word b, word c) {
 #endif
                                                                            O_DSYNC, O_EXCL,
             O_NOCTTY, O_NOFOLLOW, O_NONBLOCK, O_RSYNC, O_SYNC, O_TRUNC, O_TTY_INIT, O_ACCMODE,
-            FD_CLOEXEC, F_DUPFD, F_DUPFD_CLOEXEC, F_GETFD, F_SETFD, F_GETFL, F_SETFL, F_GETOWN,
-            F_SETOWN, F_GETLK, F_SETLK, F_SETLKW, F_RDLCK, F_UNLCK, F_WRLCK, CLOCK_MONOTONIC,
-            CLOCK_PROCESS_CPUTIME_ID, CLOCK_REALTIME, CLOCK_THREAD_CPUTIME_ID
+            FD_CLOEXEC, F_DUPFD, F_DUPFD_CLOEXEC, F_GETFD, F_SETFD, F_GETFL, F_SETFL,
+            F_GETLK, F_SETLK, F_SETLKW, F_RDLCK, F_UNLCK, F_WRLCK, CLOCK_MONOTONIC,
+            CLOCK_PROCESS_CPUTIME_ID, CLOCK_REALTIME, CLOCK_THREAD_CPUTIME_ID,
+#if !defined __HAIKU__
+            F_GETOWN, F_SETOWN
+#endif
          };
          return onum(sysconst[immval(a) % (sizeof sysconst / W)], 0); }
       case 9: /* return process variables */