about summary refs log tree commit diff
path: root/qemu_mode/patches/afl-qemu-tcg-inl.h
diff options
context:
space:
mode:
authorAndrea Fioraldi <andreafioraldi@gmail.com>2019-09-02 18:49:43 +0200
committerAndrea Fioraldi <andreafioraldi@gmail.com>2019-09-02 18:49:43 +0200
commitb24639d0113e15933e749ea0f96abe3f25a134a0 (patch)
tree4272020625c80c0d6982d3787bebc573c0da01b8 /qemu_mode/patches/afl-qemu-tcg-inl.h
parent2ae4ca91b48407add0e940ee13bd8b385e319a7a (diff)
downloadafl++-b24639d0113e15933e749ea0f96abe3f25a134a0.tar.gz
run code formatter
Diffstat (limited to 'qemu_mode/patches/afl-qemu-tcg-inl.h')
-rw-r--r--qemu_mode/patches/afl-qemu-tcg-inl.h522
1 files changed, 295 insertions, 227 deletions
diff --git a/qemu_mode/patches/afl-qemu-tcg-inl.h b/qemu_mode/patches/afl-qemu-tcg-inl.h
index a9c53b8c..d53a1ccf 100644
--- a/qemu_mode/patches/afl-qemu-tcg-inl.h
+++ b/qemu_mode/patches/afl-qemu-tcg-inl.h
@@ -31,275 +31,343 @@
 
  */
 
-void afl_maybe_log(void* cur_loc);
+void afl_maybe_log(void *cur_loc);
 
 /* Note: we convert the 64 bit args to 32 bit and do some alignment
    and endian swap. Maybe it would be better to do the alignment
    and endian swap in tcg_reg_alloc_call(). */
-void tcg_gen_afl_maybe_log_call(target_ulong cur_loc)
-{
-    int real_args, pi;
-    unsigned sizemask, flags;
-    TCGOp *op;
-
-    TCGTemp *arg = tcgv_i64_temp( tcg_const_tl(cur_loc) );
-
-    flags = 0;
-    sizemask = dh_sizemask(void, 0) | dh_sizemask(i64, 1);
-
-#if defined(__sparc__) && !defined(__arch64__) \
-    && !defined(CONFIG_TCG_INTERPRETER)
-    /* We have 64-bit values in one register, but need to pass as two
-       separate parameters.  Split them.  */
-    int orig_sizemask = sizemask;
-    TCGv_i64 retl, reth;
-    TCGTemp *split_args[MAX_OPC_PARAM];
-
-    retl = NULL;
-    reth = NULL;
-    if (sizemask != 0) {
-        real_args = 0;
-        int is_64bit = sizemask & (1 << 2);
-        if (is_64bit) {
-            TCGv_i64 orig = temp_tcgv_i64(arg);
-            TCGv_i32 h = tcg_temp_new_i32();
-            TCGv_i32 l = tcg_temp_new_i32();
-            tcg_gen_extr_i64_i32(l, h, orig);
-            split_args[real_args++] = tcgv_i32_temp(h);
-            split_args[real_args++] = tcgv_i32_temp(l);
-        } else {
-            split_args[real_args++] = arg;
-        }
-        nargs = real_args;
-        args = split_args;
-        sizemask = 0;
+void tcg_gen_afl_maybe_log_call(target_ulong cur_loc) {
+
+  int      real_args, pi;
+  unsigned sizemask, flags;
+  TCGOp *  op;
+
+  TCGTemp *arg = tcgv_i64_temp(tcg_const_tl(cur_loc));
+
+  flags = 0;
+  sizemask = dh_sizemask(void, 0) | dh_sizemask(i64, 1);
+
+#if defined(__sparc__) && !defined(__arch64__) && \
+    !defined(CONFIG_TCG_INTERPRETER)
+  /* We have 64-bit values in one register, but need to pass as two
+     separate parameters.  Split them.  */
+  int      orig_sizemask = sizemask;
+  TCGv_i64 retl, reth;
+  TCGTemp *split_args[MAX_OPC_PARAM];
+
+  retl = NULL;
+  reth = NULL;
+  if (sizemask != 0) {
+
+    real_args = 0;
+    int is_64bit = sizemask & (1 << 2);
+    if (is_64bit) {
+
+      TCGv_i64 orig = temp_tcgv_i64(arg);
+      TCGv_i32 h = tcg_temp_new_i32();
+      TCGv_i32 l = tcg_temp_new_i32();
+      tcg_gen_extr_i64_i32(l, h, orig);
+      split_args[real_args++] = tcgv_i32_temp(h);
+      split_args[real_args++] = tcgv_i32_temp(l);
+
+    } else {
+
+      split_args[real_args++] = arg;
+
     }
+
+    nargs = real_args;
+    args = split_args;
+    sizemask = 0;
+
+  }
+
 #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
-    int is_64bit = sizemask & (1 << 2);
-    int is_signed = sizemask & (2 << 2);
-    if (!is_64bit) {
-        TCGv_i64 temp = tcg_temp_new_i64();
-        TCGv_i64 orig = temp_tcgv_i64(arg);
-        if (is_signed) {
-            tcg_gen_ext32s_i64(temp, orig);
-        } else {
-            tcg_gen_ext32u_i64(temp, orig);
-        }
-        arg = tcgv_i64_temp(temp);
+  int is_64bit = sizemask & (1 << 2);
+  int is_signed = sizemask & (2 << 2);
+  if (!is_64bit) {
+
+    TCGv_i64 temp = tcg_temp_new_i64();
+    TCGv_i64 orig = temp_tcgv_i64(arg);
+    if (is_signed) {
+
+      tcg_gen_ext32s_i64(temp, orig);
+
+    } else {
+
+      tcg_gen_ext32u_i64(temp, orig);
+
     }
+
+    arg = tcgv_i64_temp(temp);
+
+  }
+
 #endif /* TCG_TARGET_EXTEND_ARGS */
 
-    op = tcg_emit_op(INDEX_op_call);
+  op = tcg_emit_op(INDEX_op_call);
 
-    pi = 0;
+  pi = 0;
 
-    TCGOP_CALLO(op) = 0;
+  TCGOP_CALLO(op) = 0;
+
+  real_args = 0;
+  int is_64bit = sizemask & (1 << 2);
+  if (TCG_TARGET_REG_BITS < 64 && is_64bit) {
 
-    real_args = 0;
-    int is_64bit = sizemask & (1 << 2);
-    if (TCG_TARGET_REG_BITS < 64 && is_64bit) {
 #ifdef TCG_TARGET_CALL_ALIGN_ARGS
-        /* some targets want aligned 64 bit args */
-        if (real_args & 1) {
-            op->args[pi++] = TCG_CALL_DUMMY_ARG;
-            real_args++;
-        }
+    /* some targets want aligned 64 bit args */
+    if (real_args & 1) {
+
+      op->args[pi++] = TCG_CALL_DUMMY_ARG;
+      real_args++;
+
+    }
+
 #endif
-       /* If stack grows up, then we will be placing successive
-          arguments at lower addresses, which means we need to
-          reverse the order compared to how we would normally
-          treat either big or little-endian.  For those arguments
-          that will wind up in registers, this still works for
-          HPPA (the only current STACK_GROWSUP target) since the
-          argument registers are *also* allocated in decreasing
-          order.  If another such target is added, this logic may
-          have to get more complicated to differentiate between
-          stack arguments and register arguments.  */
+    /* If stack grows up, then we will be placing successive
+       arguments at lower addresses, which means we need to
+       reverse the order compared to how we would normally
+       treat either big or little-endian.  For those arguments
+       that will wind up in registers, this still works for
+       HPPA (the only current STACK_GROWSUP target) since the
+       argument registers are *also* allocated in decreasing
+       order.  If another such target is added, this logic may
+       have to get more complicated to differentiate between
+       stack arguments and register arguments.  */
 #if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP)
-        op->args[pi++] = temp_arg(arg + 1);
-        op->args[pi++] = temp_arg(arg);
+    op->args[pi++] = temp_arg(arg + 1);
+    op->args[pi++] = temp_arg(arg);
 #else
-        op->args[pi++] = temp_arg(arg);
-        op->args[pi++] = temp_arg(arg + 1);
+    op->args[pi++] = temp_arg(arg);
+    op->args[pi++] = temp_arg(arg + 1);
 #endif
-        real_args += 2;
-    }
+    real_args += 2;
+
+  }
+
+  op->args[pi++] = temp_arg(arg);
+  real_args++;
+
+  op->args[pi++] = (uintptr_t)&afl_maybe_log;
+  op->args[pi++] = flags;
+  TCGOP_CALLI(op) = real_args;
+
+  /* Make sure the fields didn't overflow.  */
+  tcg_debug_assert(TCGOP_CALLI(op) == real_args);
+  tcg_debug_assert(pi <= ARRAY_SIZE(op->args));
+
+#if defined(__sparc__) && !defined(__arch64__) && \
+    !defined(CONFIG_TCG_INTERPRETER)
+  /* Free all of the parts we allocated above.  */
+  real_args = 0;
+  int is_64bit = orig_sizemask & (1 << 2);
+  if (is_64bit) {
+
+    tcg_temp_free_internal(args[real_args++]);
+    tcg_temp_free_internal(args[real_args++]);
+
+  } else {
 
-    op->args[pi++] = temp_arg(arg);
     real_args++;
 
-    op->args[pi++] = (uintptr_t)&afl_maybe_log;
-    op->args[pi++] = flags;
-    TCGOP_CALLI(op) = real_args;
+  }
 
-    /* Make sure the fields didn't overflow.  */
-    tcg_debug_assert(TCGOP_CALLI(op) == real_args);
-    tcg_debug_assert(pi <= ARRAY_SIZE(op->args));
+  if (orig_sizemask & 1) {
+
+    /* The 32-bit ABI returned two 32-bit pieces.  Re-assemble them.
+       Note that describing these as TCGv_i64 eliminates an unnecessary
+       zero-extension that tcg_gen_concat_i32_i64 would create.  */
+    tcg_gen_concat32_i64(temp_tcgv_i64(NULL), retl, reth);
+    tcg_temp_free_i64(retl);
+    tcg_temp_free_i64(reth);
+
+  }
 
-#if defined(__sparc__) && !defined(__arch64__) \
-    && !defined(CONFIG_TCG_INTERPRETER)
-    /* Free all of the parts we allocated above.  */
-    real_args = 0;
-    int is_64bit = orig_sizemask & (1 << 2);
-    if (is_64bit) {
-        tcg_temp_free_internal(args[real_args++]);
-        tcg_temp_free_internal(args[real_args++]);
-    } else {
-        real_args++;
-    }
-    if (orig_sizemask & 1) {
-        /* The 32-bit ABI returned two 32-bit pieces.  Re-assemble them.
-           Note that describing these as TCGv_i64 eliminates an unnecessary
-           zero-extension that tcg_gen_concat_i32_i64 would create.  */
-        tcg_gen_concat32_i64(temp_tcgv_i64(NULL), retl, reth);
-        tcg_temp_free_i64(retl);
-        tcg_temp_free_i64(reth);
-    }
 #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
-    int is_64bit = sizemask & (1 << 2);
-    if (!is_64bit) {
-        tcg_temp_free_internal(arg);
-    }
+  int is_64bit = sizemask & (1 << 2);
+  if (!is_64bit) { tcg_temp_free_internal(arg); }
 #endif /* TCG_TARGET_EXTEND_ARGS */
+
 }
 
-void tcg_gen_afl_compcov_log_call(void *func, target_ulong cur_loc, TCGv_i64 arg1, TCGv_i64 arg2)
-{
-    int i, real_args, nb_rets, pi;
-    unsigned sizemask, flags;
-    TCGOp *op;
-
-    const int nargs = 3;
-    TCGTemp *args[3] = { tcgv_i64_temp( tcg_const_tl(cur_loc) ),
-                         tcgv_i64_temp(arg1),
-                         tcgv_i64_temp(arg2) };
-
-    flags = 0;
-    sizemask = dh_sizemask(void, 0) | dh_sizemask(i64, 1) |
-               dh_sizemask(i64, 2) | dh_sizemask(i64, 3);
-
-#if defined(__sparc__) && !defined(__arch64__) \
-    && !defined(CONFIG_TCG_INTERPRETER)
-    /* We have 64-bit values in one register, but need to pass as two
-       separate parameters.  Split them.  */
-    int orig_sizemask = sizemask;
-    int orig_nargs = nargs;
-    TCGv_i64 retl, reth;
-    TCGTemp *split_args[MAX_OPC_PARAM];
-
-    retl = NULL;
-    reth = NULL;
-    if (sizemask != 0) {
-        for (i = real_args = 0; i < nargs; ++i) {
-            int is_64bit = sizemask & (1 << (i+1)*2);
-            if (is_64bit) {
-                TCGv_i64 orig = temp_tcgv_i64(args[i]);
-                TCGv_i32 h = tcg_temp_new_i32();
-                TCGv_i32 l = tcg_temp_new_i32();
-                tcg_gen_extr_i64_i32(l, h, orig);
-                split_args[real_args++] = tcgv_i32_temp(h);
-                split_args[real_args++] = tcgv_i32_temp(l);
-            } else {
-                split_args[real_args++] = args[i];
-            }
-        }
-        nargs = real_args;
-        args = split_args;
-        sizemask = 0;
+void tcg_gen_afl_compcov_log_call(void *func, target_ulong cur_loc,
+                                  TCGv_i64 arg1, TCGv_i64 arg2) {
+
+  int      i, real_args, nb_rets, pi;
+  unsigned sizemask, flags;
+  TCGOp *  op;
+
+  const int nargs = 3;
+  TCGTemp *args[3] = {tcgv_i64_temp(tcg_const_tl(cur_loc)), tcgv_i64_temp(arg1),
+                      tcgv_i64_temp(arg2)};
+
+  flags = 0;
+  sizemask = dh_sizemask(void, 0) | dh_sizemask(i64, 1) | dh_sizemask(i64, 2) |
+             dh_sizemask(i64, 3);
+
+#if defined(__sparc__) && !defined(__arch64__) && \
+    !defined(CONFIG_TCG_INTERPRETER)
+  /* We have 64-bit values in one register, but need to pass as two
+     separate parameters.  Split them.  */
+  int      orig_sizemask = sizemask;
+  int      orig_nargs = nargs;
+  TCGv_i64 retl, reth;
+  TCGTemp *split_args[MAX_OPC_PARAM];
+
+  retl = NULL;
+  reth = NULL;
+  if (sizemask != 0) {
+
+    for (i = real_args = 0; i < nargs; ++i) {
+
+      int is_64bit = sizemask & (1 << (i + 1) * 2);
+      if (is_64bit) {
+
+        TCGv_i64 orig = temp_tcgv_i64(args[i]);
+        TCGv_i32 h = tcg_temp_new_i32();
+        TCGv_i32 l = tcg_temp_new_i32();
+        tcg_gen_extr_i64_i32(l, h, orig);
+        split_args[real_args++] = tcgv_i32_temp(h);
+        split_args[real_args++] = tcgv_i32_temp(l);
+
+      } else {
+
+        split_args[real_args++] = args[i];
+
+      }
+
     }
+
+    nargs = real_args;
+    args = split_args;
+    sizemask = 0;
+
+  }
+
 #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
-    for (i = 0; i < nargs; ++i) {
-        int is_64bit = sizemask & (1 << (i+1)*2);
-        int is_signed = sizemask & (2 << (i+1)*2);
-        if (!is_64bit) {
-            TCGv_i64 temp = tcg_temp_new_i64();
-            TCGv_i64 orig = temp_tcgv_i64(args[i]);
-            if (is_signed) {
-                tcg_gen_ext32s_i64(temp, orig);
-            } else {
-                tcg_gen_ext32u_i64(temp, orig);
-            }
-            args[i] = tcgv_i64_temp(temp);
-        }
+  for (i = 0; i < nargs; ++i) {
+
+    int is_64bit = sizemask & (1 << (i + 1) * 2);
+    int is_signed = sizemask & (2 << (i + 1) * 2);
+    if (!is_64bit) {
+
+      TCGv_i64 temp = tcg_temp_new_i64();
+      TCGv_i64 orig = temp_tcgv_i64(args[i]);
+      if (is_signed) {
+
+        tcg_gen_ext32s_i64(temp, orig);
+
+      } else {
+
+        tcg_gen_ext32u_i64(temp, orig);
+
+      }
+
+      args[i] = tcgv_i64_temp(temp);
+
     }
+
+  }
+
 #endif /* TCG_TARGET_EXTEND_ARGS */
 
-    op = tcg_emit_op(INDEX_op_call);
+  op = tcg_emit_op(INDEX_op_call);
 
-    pi = 0;
-    nb_rets = 0;
-    TCGOP_CALLO(op) = nb_rets;
+  pi = 0;
+  nb_rets = 0;
+  TCGOP_CALLO(op) = nb_rets;
+
+  real_args = 0;
+  for (i = 0; i < nargs; i++) {
+
+    int is_64bit = sizemask & (1 << (i + 1) * 2);
+    if (TCG_TARGET_REG_BITS < 64 && is_64bit) {
 
-    real_args = 0;
-    for (i = 0; i < nargs; i++) {
-        int is_64bit = sizemask & (1 << (i+1)*2);
-        if (TCG_TARGET_REG_BITS < 64 && is_64bit) {
 #ifdef TCG_TARGET_CALL_ALIGN_ARGS
-            /* some targets want aligned 64 bit args */
-            if (real_args & 1) {
-                op->args[pi++] = TCG_CALL_DUMMY_ARG;
-                real_args++;
-            }
+      /* some targets want aligned 64 bit args */
+      if (real_args & 1) {
+
+        op->args[pi++] = TCG_CALL_DUMMY_ARG;
+        real_args++;
+
+      }
+
 #endif
-           /* If stack grows up, then we will be placing successive
-              arguments at lower addresses, which means we need to
-              reverse the order compared to how we would normally
-              treat either big or little-endian.  For those arguments
-              that will wind up in registers, this still works for
-              HPPA (the only current STACK_GROWSUP target) since the
-              argument registers are *also* allocated in decreasing
-              order.  If another such target is added, this logic may
-              have to get more complicated to differentiate between
-              stack arguments and register arguments.  */
+      /* If stack grows up, then we will be placing successive
+         arguments at lower addresses, which means we need to
+         reverse the order compared to how we would normally
+         treat either big or little-endian.  For those arguments
+         that will wind up in registers, this still works for
+         HPPA (the only current STACK_GROWSUP target) since the
+         argument registers are *also* allocated in decreasing
+         order.  If another such target is added, this logic may
+         have to get more complicated to differentiate between
+         stack arguments and register arguments.  */
 #if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP)
-            op->args[pi++] = temp_arg(args[i] + 1);
-            op->args[pi++] = temp_arg(args[i]);
+      op->args[pi++] = temp_arg(args[i] + 1);
+      op->args[pi++] = temp_arg(args[i]);
 #else
-            op->args[pi++] = temp_arg(args[i]);
-            op->args[pi++] = temp_arg(args[i] + 1);
+      op->args[pi++] = temp_arg(args[i]);
+      op->args[pi++] = temp_arg(args[i] + 1);
 #endif
-            real_args += 2;
-            continue;
-        }
+      real_args += 2;
+      continue;
 
-        op->args[pi++] = temp_arg(args[i]);
-        real_args++;
-    }
-    op->args[pi++] = (uintptr_t)func;
-    op->args[pi++] = flags;
-    TCGOP_CALLI(op) = real_args;
-
-    /* Make sure the fields didn't overflow.  */
-    tcg_debug_assert(TCGOP_CALLI(op) == real_args);
-    tcg_debug_assert(pi <= ARRAY_SIZE(op->args));
-
-#if defined(__sparc__) && !defined(__arch64__) \
-    && !defined(CONFIG_TCG_INTERPRETER)
-    /* Free all of the parts we allocated above.  */
-    for (i = real_args = 0; i < orig_nargs; ++i) {
-        int is_64bit = orig_sizemask & (1 << (i+1)*2);
-        if (is_64bit) {
-            tcg_temp_free_internal(args[real_args++]);
-            tcg_temp_free_internal(args[real_args++]);
-        } else {
-            real_args++;
-        }
     }
-    if (orig_sizemask & 1) {
-        /* The 32-bit ABI returned two 32-bit pieces.  Re-assemble them.
-           Note that describing these as TCGv_i64 eliminates an unnecessary
-           zero-extension that tcg_gen_concat_i32_i64 would create.  */
-        tcg_gen_concat32_i64(temp_tcgv_i64(NULL), retl, reth);
-        tcg_temp_free_i64(retl);
-        tcg_temp_free_i64(reth);
+
+    op->args[pi++] = temp_arg(args[i]);
+    real_args++;
+
+  }
+
+  op->args[pi++] = (uintptr_t)func;
+  op->args[pi++] = flags;
+  TCGOP_CALLI(op) = real_args;
+
+  /* Make sure the fields didn't overflow.  */
+  tcg_debug_assert(TCGOP_CALLI(op) == real_args);
+  tcg_debug_assert(pi <= ARRAY_SIZE(op->args));
+
+#if defined(__sparc__) && !defined(__arch64__) && \
+    !defined(CONFIG_TCG_INTERPRETER)
+  /* Free all of the parts we allocated above.  */
+  for (i = real_args = 0; i < orig_nargs; ++i) {
+
+    int is_64bit = orig_sizemask & (1 << (i + 1) * 2);
+    if (is_64bit) {
+
+      tcg_temp_free_internal(args[real_args++]);
+      tcg_temp_free_internal(args[real_args++]);
+
+    } else {
+
+      real_args++;
+
     }
+
+  }
+
+  if (orig_sizemask & 1) {
+
+    /* The 32-bit ABI returned two 32-bit pieces.  Re-assemble them.
+       Note that describing these as TCGv_i64 eliminates an unnecessary
+       zero-extension that tcg_gen_concat_i32_i64 would create.  */
+    tcg_gen_concat32_i64(temp_tcgv_i64(NULL), retl, reth);
+    tcg_temp_free_i64(retl);
+    tcg_temp_free_i64(reth);
+
+  }
+
 #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
-    for (i = 0; i < nargs; ++i) {
-        int is_64bit = sizemask & (1 << (i+1)*2);
-        if (!is_64bit) {
-            tcg_temp_free_internal(args[i]);
-        }
-    }
+  for (i = 0; i < nargs; ++i) {
+
+    int is_64bit = sizemask & (1 << (i + 1) * 2);
+    if (!is_64bit) { tcg_temp_free_internal(args[i]); }
+
+  }
+
 #endif /* TCG_TARGET_EXTEND_ARGS */
+
 }