about summary refs log tree commit diff
path: root/src/afl-fuzz-bitmap.c
diff options
context:
space:
mode:
authorvan Hauser <vh@thc.org>2019-09-02 17:40:23 +0200
committervan Hauser <vh@thc.org>2019-09-02 17:40:23 +0200
commit1652831f1de2fcf13184162503bb764bd610914c (patch)
tree11612f5023c53d40ca8e771f73bc9723cc6f149f /src/afl-fuzz-bitmap.c
parent39c4bb7a49d22c66b3cb613fb13329ec7a6dc16d (diff)
downloadafl++-1652831f1de2fcf13184162503bb764bd610914c.tar.gz
afl-fuzz-src/* -> src/afl-fuzz* rename
Diffstat (limited to 'src/afl-fuzz-bitmap.c')
-rw-r--r--src/afl-fuzz-bitmap.c410
1 files changed, 410 insertions, 0 deletions
diff --git a/src/afl-fuzz-bitmap.c b/src/afl-fuzz-bitmap.c
new file mode 100644
index 00000000..6cd9852f
--- /dev/null
+++ b/src/afl-fuzz-bitmap.c
@@ -0,0 +1,410 @@
+/*
+   american fuzzy lop - fuzzer code
+   --------------------------------
+
+   Written and maintained by Michal Zalewski <lcamtuf@google.com>
+
+   Forkserver design by Jann Horn <jannhorn@googlemail.com>
+
+   Copyright 2013, 2014, 2015, 2016, 2017 Google Inc. All rights reserved.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at:
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+   This is the real deal: the program takes an instrumented binary and
+   attempts a variety of basic fuzzing tricks, paying close attention to
+   how they affect the execution path.
+
+ */
+
+#include "afl-fuzz.h"
+
+/* Write bitmap to file. The bitmap is useful mostly for the secret
+   -B option, to focus a separate fuzzing session on a particular
+   interesting input without rediscovering all the others. */
+
+void write_bitmap(void) {
+
+  u8* fname;
+  s32 fd;
+
+  if (!bitmap_changed) return;
+  bitmap_changed = 0;
+
+  fname = alloc_printf("%s/fuzz_bitmap", out_dir);
+  fd = open(fname, O_WRONLY | O_CREAT | O_TRUNC, 0600);
+
+  if (fd < 0) PFATAL("Unable to open '%s'", fname);
+
+  ck_write(fd, virgin_bits, MAP_SIZE, fname);
+
+  close(fd);
+  ck_free(fname);
+
+}
+
+
+/* Read bitmap from file. This is for the -B option again. */
+
+void read_bitmap(u8* fname) {
+
+  s32 fd = open(fname, O_RDONLY);
+
+  if (fd < 0) PFATAL("Unable to open '%s'", fname);
+
+  ck_read(fd, virgin_bits, MAP_SIZE, fname);
+
+  close(fd);
+
+}
+
+
+/* Check if the current execution path brings anything new to the table.
+   Update virgin bits to reflect the finds. Returns 1 if the only change is
+   the hit-count for a particular tuple; 2 if there are new tuples seen. 
+   Updates the map, so subsequent calls will always return 0.
+
+   This function is called after every exec() on a fairly large buffer, so
+   it needs to be fast. We do this in 32-bit and 64-bit flavors. */
+
+u8 has_new_bits(u8* virgin_map) {
+
+#ifdef __x86_64__
+
+  u64* current = (u64*)trace_bits;
+  u64* virgin  = (u64*)virgin_map;
+
+  u32  i = (MAP_SIZE >> 3);
+
+#else
+
+  u32* current = (u32*)trace_bits;
+  u32* virgin  = (u32*)virgin_map;
+
+  u32  i = (MAP_SIZE >> 2);
+
+#endif /* ^__x86_64__ */
+
+  u8   ret = 0;
+
+  while (i--) {
+
+    /* Optimize for (*current & *virgin) == 0 - i.e., no bits in current bitmap
+       that have not been already cleared from the virgin map - since this will
+       almost always be the case. */
+
+    if (unlikely(*current) && unlikely(*current & *virgin)) {
+
+      if (likely(ret < 2)) {
+
+        u8* cur = (u8*)current;
+        u8* vir = (u8*)virgin;
+
+        /* Looks like we have not found any new bytes yet; see if any non-zero
+           bytes in current[] are pristine in virgin[]. */
+
+#ifdef __x86_64__
+
+        if ((cur[0] && vir[0] == 0xff) || (cur[1] && vir[1] == 0xff) ||
+            (cur[2] && vir[2] == 0xff) || (cur[3] && vir[3] == 0xff) ||
+            (cur[4] && vir[4] == 0xff) || (cur[5] && vir[5] == 0xff) ||
+            (cur[6] && vir[6] == 0xff) || (cur[7] && vir[7] == 0xff)) ret = 2;
+        else ret = 1;
+
+#else
+
+        if ((cur[0] && vir[0] == 0xff) || (cur[1] && vir[1] == 0xff) ||
+            (cur[2] && vir[2] == 0xff) || (cur[3] && vir[3] == 0xff)) ret = 2;
+        else ret = 1;
+
+#endif /* ^__x86_64__ */
+
+      }
+
+      *virgin &= ~*current;
+
+    }
+
+    ++current;
+    ++virgin;
+
+  }
+
+  if (ret && virgin_map == virgin_bits) bitmap_changed = 1;
+
+  return ret;
+
+}
+
+
+/* Count the number of bits set in the provided bitmap. Used for the status
+   screen several times every second, does not have to be fast. */
+
+u32 count_bits(u8* mem) {
+
+  u32* ptr = (u32*)mem;
+  u32  i   = (MAP_SIZE >> 2);
+  u32  ret = 0;
+
+  while (i--) {
+
+    u32 v = *(ptr++);
+
+    /* This gets called on the inverse, virgin bitmap; optimize for sparse
+       data. */
+
+    if (v == 0xffffffff) {
+      ret += 32;
+      continue;
+    }
+
+    v -= ((v >> 1) & 0x55555555);
+    v = (v & 0x33333333) + ((v >> 2) & 0x33333333);
+    ret += (((v + (v >> 4)) & 0xF0F0F0F) * 0x01010101) >> 24;
+
+  }
+
+  return ret;
+
+}
+
+
+#define FF(_b)  (0xff << ((_b) << 3))
+
+/* Count the number of bytes set in the bitmap. Called fairly sporadically,
+   mostly to update the status screen or calibrate and examine confirmed
+   new paths. */
+
+u32 count_bytes(u8* mem) {
+
+  u32* ptr = (u32*)mem;
+  u32  i   = (MAP_SIZE >> 2);
+  u32  ret = 0;
+
+  while (i--) {
+
+    u32 v = *(ptr++);
+
+    if (!v) continue;
+    if (v & FF(0)) ++ret;
+    if (v & FF(1)) ++ret;
+    if (v & FF(2)) ++ret;
+    if (v & FF(3)) ++ret;
+
+  }
+
+  return ret;
+
+}
+
+
+/* Count the number of non-255 bytes set in the bitmap. Used strictly for the
+   status screen, several calls per second or so. */
+
+u32 count_non_255_bytes(u8* mem) {
+
+  u32* ptr = (u32*)mem;
+  u32  i   = (MAP_SIZE >> 2);
+  u32  ret = 0;
+
+  while (i--) {
+
+    u32 v = *(ptr++);
+
+    /* This is called on the virgin bitmap, so optimize for the most likely
+       case. */
+
+    if (v == 0xffffffff) continue;
+    if ((v & FF(0)) != FF(0)) ++ret;
+    if ((v & FF(1)) != FF(1)) ++ret;
+    if ((v & FF(2)) != FF(2)) ++ret;
+    if ((v & FF(3)) != FF(3)) ++ret;
+
+  }
+
+  return ret;
+
+}
+
+
+/* Destructively simplify trace by eliminating hit count information
+   and replacing it with 0x80 or 0x01 depending on whether the tuple
+   is hit or not. Called on every new crash or timeout, should be
+   reasonably fast. */
+
+const u8 simplify_lookup[256] = { 
+
+  [0]         = 1,
+  [1 ... 255] = 128
+
+};
+
+#ifdef __x86_64__
+
+void simplify_trace(u64* mem) {
+
+  u32 i = MAP_SIZE >> 3;
+
+  while (i--) {
+
+    /* Optimize for sparse bitmaps. */
+
+    if (unlikely(*mem)) {
+
+      u8* mem8 = (u8*)mem;
+
+      mem8[0] = simplify_lookup[mem8[0]];
+      mem8[1] = simplify_lookup[mem8[1]];
+      mem8[2] = simplify_lookup[mem8[2]];
+      mem8[3] = simplify_lookup[mem8[3]];
+      mem8[4] = simplify_lookup[mem8[4]];
+      mem8[5] = simplify_lookup[mem8[5]];
+      mem8[6] = simplify_lookup[mem8[6]];
+      mem8[7] = simplify_lookup[mem8[7]];
+
+    } else *mem = 0x0101010101010101ULL;
+
+    ++mem;
+
+  }
+
+}
+
+#else
+
+void simplify_trace(u32* mem) {
+
+  u32 i = MAP_SIZE >> 2;
+
+  while (i--) {
+
+    /* Optimize for sparse bitmaps. */
+
+    if (unlikely(*mem)) {
+
+      u8* mem8 = (u8*)mem;
+
+      mem8[0] = simplify_lookup[mem8[0]];
+      mem8[1] = simplify_lookup[mem8[1]];
+      mem8[2] = simplify_lookup[mem8[2]];
+      mem8[3] = simplify_lookup[mem8[3]];
+
+    } else *mem = 0x01010101;
+
+    ++mem;
+  }
+
+}
+
+#endif /* ^__x86_64__ */
+
+
+/* Destructively classify execution counts in a trace. This is used as a
+   preprocessing step for any newly acquired traces. Called on every exec,
+   must be fast. */
+
+static const u8 count_class_lookup8[256] = {
+
+  [0]           = 0,
+  [1]           = 1,
+  [2]           = 2,
+  [3]           = 4,
+  [4 ... 7]     = 8,
+  [8 ... 15]    = 16,
+  [16 ... 31]   = 32,
+  [32 ... 127]  = 64,
+  [128 ... 255] = 128
+
+};
+
+static u16 count_class_lookup16[65536];
+
+
+void init_count_class16(void) {
+
+  u32 b1, b2;
+
+  for (b1 = 0; b1 < 256; b1++) 
+    for (b2 = 0; b2 < 256; b2++)
+      count_class_lookup16[(b1 << 8) + b2] = 
+        (count_class_lookup8[b1] << 8) |
+        count_class_lookup8[b2];
+
+}
+
+
+#ifdef __x86_64__
+
+void classify_counts(u64* mem) {
+
+  u32 i = MAP_SIZE >> 3;
+
+  while (i--) {
+
+    /* Optimize for sparse bitmaps. */
+
+    if (unlikely(*mem)) {
+
+      u16* mem16 = (u16*)mem;
+
+      mem16[0] = count_class_lookup16[mem16[0]];
+      mem16[1] = count_class_lookup16[mem16[1]];
+      mem16[2] = count_class_lookup16[mem16[2]];
+      mem16[3] = count_class_lookup16[mem16[3]];
+
+    }
+
+    ++mem;
+
+  }
+
+}
+
+#else
+
+void classify_counts(u32* mem) {
+
+  u32 i = MAP_SIZE >> 2;
+
+  while (i--) {
+
+    /* Optimize for sparse bitmaps. */
+
+    if (unlikely(*mem)) {
+
+      u16* mem16 = (u16*)mem;
+
+      mem16[0] = count_class_lookup16[mem16[0]];
+      mem16[1] = count_class_lookup16[mem16[1]];
+
+    }
+
+    ++mem;
+
+  }
+
+}
+
+#endif /* ^__x86_64__ */
+
+
+/* Compact trace bytes into a smaller bitmap. We effectively just drop the
+   count information here. This is called only sporadically, for some
+   new paths. */
+
+void minimize_bits(u8* dst, u8* src) {
+
+  u32 i = 0;
+
+  while (i < MAP_SIZE) {
+
+    if (*(src++)) dst[i >> 3] |= 1 << (i & 7);
+    ++i;
+
+  }
+
+}
+