about summary refs log tree commit diff
path: root/qemu_mode/patches/afl-qemu-tcg-inl.h
diff options
context:
space:
mode:
authorAndrea Fioraldi <andreafioraldi@gmail.com>2019-09-12 12:34:53 +0200
committerAndrea Fioraldi <andreafioraldi@gmail.com>2019-09-12 12:34:53 +0200
commit6b40189045645938098772260ecda1c0bcbf6467 (patch)
tree4e4c012155e4c7f78b4b9d93553f419bf9736fcc /qemu_mode/patches/afl-qemu-tcg-inl.h
parentdf379dfcf46941125bc6b8f9d3e2e1141b84e137 (diff)
downloadafl++-6b40189045645938098772260ecda1c0bcbf6467.tar.gz
first version of persistent QEMU
Diffstat (limited to 'qemu_mode/patches/afl-qemu-tcg-inl.h')
-rw-r--r--qemu_mode/patches/afl-qemu-tcg-inl.h180
1 files changed, 180 insertions, 0 deletions
diff --git a/qemu_mode/patches/afl-qemu-tcg-inl.h b/qemu_mode/patches/afl-qemu-tcg-inl.h
index d45ffac9..2a0ddee1 100644
--- a/qemu_mode/patches/afl-qemu-tcg-inl.h
+++ b/qemu_mode/patches/afl-qemu-tcg-inl.h
@@ -191,6 +191,186 @@ void tcg_gen_afl_maybe_log_call(target_ulong cur_loc) {
 
 }
 
+/* Note: we convert the 64 bit args to 32 bit and do some alignment
+   and endian swap. Maybe it would be better to do the alignment
+   and endian swap in tcg_reg_alloc_call(). */
+void tcg_gen_afl_call0(void *func) {
+
+  int      i, real_args, nb_rets, pi;
+  unsigned sizemask, flags;
+  TCGOp *  op;
+
+  const int nargs = 0;
+  TCGTemp **args;
+
+  flags = 0;
+  sizemask = dh_sizemask(void, 0);
+
+#if defined(__sparc__) && !defined(__arch64__) && \
+    !defined(CONFIG_TCG_INTERPRETER)
+  /* We have 64-bit values in one register, but need to pass as two
+     separate parameters.  Split them.  */
+  int      orig_sizemask = sizemask;
+  int      orig_nargs = nargs;
+  TCGv_i64 retl, reth;
+  TCGTemp *split_args[MAX_OPC_PARAM];
+
+  retl = NULL;
+  reth = NULL;
+  if (sizemask != 0) {
+
+    for (i = real_args = 0; i < nargs; ++i) {
+
+      int is_64bit = sizemask & (1 << (i + 1) * 2);
+      if (is_64bit) {
+
+        TCGv_i64 orig = temp_tcgv_i64(args[i]);
+        TCGv_i32 h = tcg_temp_new_i32();
+        TCGv_i32 l = tcg_temp_new_i32();
+        tcg_gen_extr_i64_i32(l, h, orig);
+        split_args[real_args++] = tcgv_i32_temp(h);
+        split_args[real_args++] = tcgv_i32_temp(l);
+
+      } else {
+
+        split_args[real_args++] = args[i];
+
+      }
+
+    }
+
+    nargs = real_args;
+    args = split_args;
+    sizemask = 0;
+
+  }
+
+#elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
+  for (i = 0; i < nargs; ++i) {
+
+    int is_64bit = sizemask & (1 << (i + 1) * 2);
+    int is_signed = sizemask & (2 << (i + 1) * 2);
+    if (!is_64bit) {
+
+      TCGv_i64 temp = tcg_temp_new_i64();
+      TCGv_i64 orig = temp_tcgv_i64(args[i]);
+      if (is_signed) {
+
+        tcg_gen_ext32s_i64(temp, orig);
+
+      } else {
+
+        tcg_gen_ext32u_i64(temp, orig);
+
+      }
+
+      args[i] = tcgv_i64_temp(temp);
+
+    }
+
+  }
+
+#endif                                            /* TCG_TARGET_EXTEND_ARGS */
+
+  op = tcg_emit_op(INDEX_op_call);
+
+  pi = 0;
+  nb_rets = 0;
+  TCGOP_CALLO(op) = nb_rets;
+
+  real_args = 0;
+  for (i = 0; i < nargs; i++) {
+
+    int is_64bit = sizemask & (1 << (i + 1) * 2);
+    if (TCG_TARGET_REG_BITS < 64 && is_64bit) {
+
+#ifdef TCG_TARGET_CALL_ALIGN_ARGS
+      /* some targets want aligned 64 bit args */
+      if (real_args & 1) {
+
+        op->args[pi++] = TCG_CALL_DUMMY_ARG;
+        real_args++;
+
+      }
+
+#endif
+      /* If stack grows up, then we will be placing successive
+         arguments at lower addresses, which means we need to
+         reverse the order compared to how we would normally
+         treat either big or little-endian.  For those arguments
+         that will wind up in registers, this still works for
+         HPPA (the only current STACK_GROWSUP target) since the
+         argument registers are *also* allocated in decreasing
+         order.  If another such target is added, this logic may
+         have to get more complicated to differentiate between
+         stack arguments and register arguments.  */
+#if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP)
+      op->args[pi++] = temp_arg(args[i] + 1);
+      op->args[pi++] = temp_arg(args[i]);
+#else
+      op->args[pi++] = temp_arg(args[i]);
+      op->args[pi++] = temp_arg(args[i] + 1);
+#endif
+      real_args += 2;
+      continue;
+
+    }
+
+    op->args[pi++] = temp_arg(args[i]);
+    real_args++;
+
+  }
+
+  op->args[pi++] = (uintptr_t)func;
+  op->args[pi++] = flags;
+  TCGOP_CALLI(op) = real_args;
+
+  /* Make sure the fields didn't overflow.  */
+  tcg_debug_assert(TCGOP_CALLI(op) == real_args);
+  tcg_debug_assert(pi <= ARRAY_SIZE(op->args));
+
+#if defined(__sparc__) && !defined(__arch64__) && \
+    !defined(CONFIG_TCG_INTERPRETER)
+  /* Free all of the parts we allocated above.  */
+  for (i = real_args = 0; i < orig_nargs; ++i) {
+
+    int is_64bit = orig_sizemask & (1 << (i + 1) * 2);
+    if (is_64bit) {
+
+      tcg_temp_free_internal(args[real_args++]);
+      tcg_temp_free_internal(args[real_args++]);
+
+    } else {
+
+      real_args++;
+
+    }
+
+  }
+
+  if (orig_sizemask & 1) {
+
+    /* The 32-bit ABI returned two 32-bit pieces.  Re-assemble them.
+       Note that describing these as TCGv_i64 eliminates an unnecessary
+       zero-extension that tcg_gen_concat_i32_i64 would create.  */
+    tcg_gen_concat32_i64(temp_tcgv_i64(NULL), retl, reth);
+    tcg_temp_free_i64(retl);
+    tcg_temp_free_i64(reth);
+
+  }
+
+#elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
+  for (i = 0; i < nargs; ++i) {
+
+    int is_64bit = sizemask & (1 << (i + 1) * 2);
+    if (!is_64bit) { tcg_temp_free_internal(args[i]); }
+
+  }
+
+#endif                                            /* TCG_TARGET_EXTEND_ARGS */
+
+}
+
 void tcg_gen_afl_compcov_log_call(void *func, target_ulong cur_loc,
                                   TCGv_i64 arg1, TCGv_i64 arg2) {