about summary refs log tree commit diff
path: root/qemu_mode/patches/afl-qemu-tcg-inl.h
diff options
context:
space:
mode:
Diffstat (limited to 'qemu_mode/patches/afl-qemu-tcg-inl.h')
-rw-r--r--qemu_mode/patches/afl-qemu-tcg-inl.h165
1 files changed, 165 insertions, 0 deletions
diff --git a/qemu_mode/patches/afl-qemu-tcg-inl.h b/qemu_mode/patches/afl-qemu-tcg-inl.h
new file mode 100644
index 00000000..fab3d9e3
--- /dev/null
+++ b/qemu_mode/patches/afl-qemu-tcg-inl.h
@@ -0,0 +1,165 @@
+/*
+   american fuzzy lop - high-performance binary-only instrumentation
+   -----------------------------------------------------------------
+
+   Written by Andrew Griffiths <agriffiths@google.com> and
+              Michal Zalewski <lcamtuf@google.com>
+
+   Idea & design very much by Andrew Griffiths.
+
+   TCG instrumentation and block chaining support by Andrea Biondo
+                                      <andrea.biondo965@gmail.com>
+   QEMU 3.1.0 port and thread-safety by Andrea Fioraldi
+                                      <andreafioraldi@gmail.com>
+
+   Copyright 2015, 2016, 2017 Google Inc. All rights reserved.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at:
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+   This code is a shim patched into the separately-distributed source
+   code of QEMU 3.1.0. It leverages the built-in QEMU tracing functionality
+   to implement AFL-style instrumentation and to take care of the remaining
+   parts of the AFL fork server logic.
+
+   The resulting QEMU binary is essentially a standalone instrumentation
+   tool; for an example of how to leverage it for other purposes, you can
+   have a look at afl-showmap.c.
+
+ */
+
+void afl_maybe_log(void* cur_loc);
+
+/* Note: we convert the 64 bit args to 32 bit and do some alignment
+   and endian swap. Maybe it would be better to do the alignment
+   and endian swap in tcg_reg_alloc_call(). */
+void tcg_gen_afl_maybe_log_call(target_ulong cur_loc)
+{
+    int real_args, pi;
+    unsigned sizemask, flags;
+    TCGOp *op;
+
+    TCGTemp *arg = tcgv_ptr_temp( tcg_const_tl(cur_loc) );
+
+    flags = 0;
+    sizemask = dh_sizemask(void, 0) | dh_sizemask(ptr, 1);
+
+#if defined(__sparc__) && !defined(__arch64__) \
+    && !defined(CONFIG_TCG_INTERPRETER)
+    /* We have 64-bit values in one register, but need to pass as two
+       separate parameters.  Split them.  */
+    int orig_sizemask = sizemask;
+    TCGv_i64 retl, reth;
+    TCGTemp *split_args[MAX_OPC_PARAM];
+
+    retl = NULL;
+    reth = NULL;
+    if (sizemask != 0) {
+        real_args = 0;
+        int is_64bit = sizemask & (1 << 2);
+        if (is_64bit) {
+            TCGv_i64 orig = temp_tcgv_i64(arg);
+            TCGv_i32 h = tcg_temp_new_i32();
+            TCGv_i32 l = tcg_temp_new_i32();
+            tcg_gen_extr_i64_i32(l, h, orig);
+            split_args[real_args++] = tcgv_i32_temp(h);
+            split_args[real_args++] = tcgv_i32_temp(l);
+        } else {
+            split_args[real_args++] = arg;
+        }
+        nargs = real_args;
+        args = split_args;
+        sizemask = 0;
+    }
+#elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
+    int is_64bit = sizemask & (1 << 2);
+    int is_signed = sizemask & (2 << 2);
+    if (!is_64bit) {
+        TCGv_i64 temp = tcg_temp_new_i64();
+        TCGv_i64 orig = temp_tcgv_i64(arg);
+        if (is_signed) {
+            tcg_gen_ext32s_i64(temp, orig);
+        } else {
+            tcg_gen_ext32u_i64(temp, orig);
+        }
+        arg = tcgv_i64_temp(temp);
+    }
+#endif /* TCG_TARGET_EXTEND_ARGS */
+
+    op = tcg_emit_op(INDEX_op_call);
+
+    pi = 0;
+
+    TCGOP_CALLO(op) = 0;
+
+    real_args = 0;
+    int is_64bit = sizemask & (1 << 2);
+    if (TCG_TARGET_REG_BITS < 64 && is_64bit) {
+#ifdef TCG_TARGET_CALL_ALIGN_ARGS
+        /* some targets want aligned 64 bit args */
+        if (real_args & 1) {
+            op->args[pi++] = TCG_CALL_DUMMY_ARG;
+            real_args++;
+        }
+#endif
+       /* If stack grows up, then we will be placing successive
+          arguments at lower addresses, which means we need to
+          reverse the order compared to how we would normally
+          treat either big or little-endian.  For those arguments
+          that will wind up in registers, this still works for
+          HPPA (the only current STACK_GROWSUP target) since the
+          argument registers are *also* allocated in decreasing
+          order.  If another such target is added, this logic may
+          have to get more complicated to differentiate between
+          stack arguments and register arguments.  */
+#if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP)
+        op->args[pi++] = temp_arg(arg + 1);
+        op->args[pi++] = temp_arg(arg);
+#else
+        op->args[pi++] = temp_arg(arg);
+        op->args[pi++] = temp_arg(arg + 1);
+#endif
+        real_args += 2;
+    }
+
+    op->args[pi++] = temp_arg(arg);
+    real_args++;
+
+    op->args[pi++] = (uintptr_t)&afl_maybe_log;
+    op->args[pi++] = flags;
+    TCGOP_CALLI(op) = real_args;
+
+    /* Make sure the fields didn't overflow.  */
+    tcg_debug_assert(TCGOP_CALLI(op) == real_args);
+    tcg_debug_assert(pi <= ARRAY_SIZE(op->args));
+
+#if defined(__sparc__) && !defined(__arch64__) \
+    && !defined(CONFIG_TCG_INTERPRETER)
+    /* Free all of the parts we allocated above.  */
+    real_args = 0;
+    int is_64bit = orig_sizemask & (1 << 2);
+    if (is_64bit) {
+        tcg_temp_free_internal(args[real_args++]);
+        tcg_temp_free_internal(args[real_args++]);
+    } else {
+        real_args++;
+    }
+    if (orig_sizemask & 1) {
+        /* The 32-bit ABI returned two 32-bit pieces.  Re-assemble them.
+           Note that describing these as TCGv_i64 eliminates an unnecessary
+           zero-extension that tcg_gen_concat_i32_i64 would create.  */
+        tcg_gen_concat32_i64(temp_tcgv_i64(ret), retl, reth);
+        tcg_temp_free_i64(retl);
+        tcg_temp_free_i64(reth);
+    }
+#elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
+    int is_64bit = sizemask & (1 << 2);
+    if (!is_64bit) {
+        tcg_temp_free_internal(arg);
+    }
+#endif /* TCG_TARGET_EXTEND_ARGS */
+}
+