about summary refs log tree commit diff
path: root/qemu_mode/patches
diff options
context:
space:
mode:
Diffstat (limited to 'qemu_mode/patches')
-rw-r--r--qemu_mode/patches/afl-qemu-common.h20
-rw-r--r--qemu_mode/patches/afl-qemu-cpu-inl.h164
-rw-r--r--qemu_mode/patches/afl-qemu-cpu-translate-inl.h84
-rw-r--r--qemu_mode/patches/afl-qemu-tcg-inl.h522
-rw-r--r--qemu_mode/patches/afl-qemu-translate-inl.h13
5 files changed, 450 insertions, 353 deletions
diff --git a/qemu_mode/patches/afl-qemu-common.h b/qemu_mode/patches/afl-qemu-common.h
index c475cb58..c87bacb6 100644
--- a/qemu_mode/patches/afl-qemu-common.h
+++ b/qemu_mode/patches/afl-qemu-common.h
@@ -33,19 +33,17 @@
 
 #include "../../config.h"
 
-/* NeverZero */ 
+/* NeverZero */
 
 #if (defined(__x86_64__) || defined(__i386__)) && defined(AFL_QEMU_NOT_ZERO)
-#  define INC_AFL_AREA(loc) \
-    asm volatile ( \
-      "incb (%0, %1, 1)\n" \
-      "adcb $0, (%0, %1, 1)\n" \
-      : /* no out */ \
-      : "r" (afl_area_ptr), "r" (loc) \
-      : "memory", "eax" \
-    )
+#  define INC_AFL_AREA(loc)         \
+  asm volatile(                     \
+      "incb (%0, %1, 1)\n"          \
+      "adcb $0, (%0, %1, 1)\n"      \
+      : /* no out */                \
+      : "r"(afl_area_ptr), "r"(loc) \
+      : "memory", "eax")
 #else
-#  define INC_AFL_AREA(loc) \
-  afl_area_ptr[loc]++
+#  define INC_AFL_AREA(loc) afl_area_ptr[loc]++
 #endif
 
diff --git a/qemu_mode/patches/afl-qemu-cpu-inl.h b/qemu_mode/patches/afl-qemu-cpu-inl.h
index 4ad31b60..2a1331cb 100644
--- a/qemu_mode/patches/afl-qemu-cpu-inl.h
+++ b/qemu_mode/patches/afl-qemu-cpu-inl.h
@@ -42,11 +42,16 @@
    _start and does the usual forkserver stuff, not very different from
    regular instrumentation injected via afl-as.h. */
 
-#define AFL_QEMU_CPU_SNIPPET2 do { \
-    if(itb->pc == afl_entry_point) { \
-      afl_setup(); \
-      afl_forkserver(cpu); \
-    } \
+#define AFL_QEMU_CPU_SNIPPET2         \
+  do {                                \
+                                      \
+    if (itb->pc == afl_entry_point) { \
+                                      \
+      afl_setup();                    \
+      afl_forkserver(cpu);            \
+                                      \
+    }                                 \
+                                      \
   } while (0)
 
 /* We use one additional file descriptor to relay "needs translation"
@@ -56,60 +61,71 @@
 
 /* This is equivalent to afl-as.h: */
 
-static unsigned char dummy[MAP_SIZE]; /* costs MAP_SIZE but saves a few instructions */
-unsigned char *afl_area_ptr = dummy; /* Exported for afl_gen_trace */
+static unsigned char
+               dummy[MAP_SIZE]; /* costs MAP_SIZE but saves a few instructions */
+unsigned char *afl_area_ptr = dummy;          /* Exported for afl_gen_trace */
 
 /* Exported variables populated by the code patched into elfload.c: */
 
-abi_ulong afl_entry_point, /* ELF entry point (_start) */
-          afl_start_code,  /* .text start pointer      */
-          afl_end_code;    /* .text end pointer        */
+abi_ulong afl_entry_point,                      /* ELF entry point (_start) */
+    afl_start_code,                             /* .text start pointer      */
+    afl_end_code;                               /* .text end pointer        */
 
 u8 afl_compcov_level;
 
 /* Set in the child process in forkserver mode: */
 
-static int forkserver_installed = 0;
+static int           forkserver_installed = 0;
 static unsigned char afl_fork_child;
-unsigned int afl_forksrv_pid;
+unsigned int         afl_forksrv_pid;
 
 /* Instrumentation ratio: */
 
-unsigned int afl_inst_rms = MAP_SIZE; /* Exported for afl_gen_trace */
+unsigned int afl_inst_rms = MAP_SIZE;         /* Exported for afl_gen_trace */
 
 /* Function declarations. */
 
 static void afl_setup(void);
-static void afl_forkserver(CPUState*);
+static void afl_forkserver(CPUState *);
 
-static void afl_wait_tsl(CPUState*, int);
-static void afl_request_tsl(target_ulong, target_ulong, uint32_t, uint32_t, TranslationBlock*, int);
+static void afl_wait_tsl(CPUState *, int);
+static void afl_request_tsl(target_ulong, target_ulong, uint32_t, uint32_t,
+                            TranslationBlock *, int);
 
 /* Data structures passed around by the translate handlers: */
 
 struct afl_tb {
+
   target_ulong pc;
   target_ulong cs_base;
-  uint32_t flags;
-  uint32_t cf_mask;
+  uint32_t     flags;
+  uint32_t     cf_mask;
+
 };
 
 struct afl_tsl {
+
   struct afl_tb tb;
-  char is_chain;
+  char          is_chain;
+
 };
 
 struct afl_chain {
+
   struct afl_tb last_tb;
-  uint32_t cf_mask;
-  int tb_exit;
+  uint32_t      cf_mask;
+  int           tb_exit;
+
 };
 
 /* Some forward decls: */
 
-TranslationBlock *tb_htable_lookup(CPUState*, target_ulong, target_ulong, uint32_t, uint32_t);
-static inline TranslationBlock *tb_find(CPUState*, TranslationBlock*, int, uint32_t);
-static inline void tb_add_jump(TranslationBlock *tb, int n, TranslationBlock *tb_next);
+TranslationBlock *tb_htable_lookup(CPUState *, target_ulong, target_ulong,
+                                   uint32_t, uint32_t);
+static inline TranslationBlock *tb_find(CPUState *, TranslationBlock *, int,
+                                        uint32_t);
+static inline void              tb_add_jump(TranslationBlock *tb, int n,
+                                            TranslationBlock *tb_next);
 
 /*************************
  * ACTUAL IMPLEMENTATION *
@@ -119,8 +135,7 @@ static inline void tb_add_jump(TranslationBlock *tb, int n, TranslationBlock *tb
 
 static void afl_setup(void) {
 
-  char *id_str = getenv(SHM_ENV_VAR),
-       *inst_r = getenv("AFL_INST_RATIO");
+  char *id_str = getenv(SHM_ENV_VAR), *inst_r = getenv("AFL_INST_RATIO");
 
   int shm_id;
 
@@ -142,7 +157,7 @@ static void afl_setup(void) {
     shm_id = atoi(id_str);
     afl_area_ptr = shmat(shm_id, NULL, 0);
 
-    if (afl_area_ptr == (void*)-1) exit(1);
+    if (afl_area_ptr == (void *)-1) exit(1);
 
     /* With AFL_INST_RATIO set to a low value, we want to touch the bitmap
        so that the parent doesn't give up on us. */
@@ -154,18 +169,16 @@ static void afl_setup(void) {
   if (getenv("AFL_INST_LIBS")) {
 
     afl_start_code = 0;
-    afl_end_code   = (abi_ulong)-1;
+    afl_end_code = (abi_ulong)-1;
 
   }
-  
-  /* Maintain for compatibility */
-  if (getenv("AFL_QEMU_COMPCOV")) {
 
-    afl_compcov_level = 1;
-  }
+  /* Maintain for compatibility */
+  if (getenv("AFL_QEMU_COMPCOV")) { afl_compcov_level = 1; }
   if (getenv("AFL_COMPCOV_LEVEL")) {
 
     afl_compcov_level = atoi(getenv("AFL_COMPCOV_LEVEL"));
+
   }
 
   /* pthread_atfork() seems somewhat broken in util/rcu.c, and I'm
@@ -176,17 +189,15 @@ static void afl_setup(void) {
 
 }
 
-
 /* Fork server logic, invoked once we hit _start. */
 
 static void afl_forkserver(CPUState *cpu) {
 
   static unsigned char tmp[4];
 
-  if (forkserver_installed == 1)
-    return;
+  if (forkserver_installed == 1) return;
   forkserver_installed = 1;
-  //if (!afl_area_ptr) return; // not necessary because of fixed dummy buffer
+  // if (!afl_area_ptr) return; // not necessary because of fixed dummy buffer
 
   /* Tell the parent that we're alive. If the parent doesn't want
      to talk, assume that we're not running in forkserver mode. */
@@ -200,7 +211,7 @@ static void afl_forkserver(CPUState *cpu) {
   while (1) {
 
     pid_t child_pid;
-    int status, t_fd[2];
+    int   status, t_fd[2];
 
     /* Whoops, parent dead? */
 
@@ -246,59 +257,60 @@ static void afl_forkserver(CPUState *cpu) {
 
 }
 
-
 /* This code is invoked whenever QEMU decides that it doesn't have a
    translation of a particular block and needs to compute it, or when it
    decides to chain two TBs together. When this happens, we tell the parent to
    mirror the operation, so that the next fork() has a cached copy. */
 
-static void afl_request_tsl(target_ulong pc, target_ulong cb, uint32_t flags, uint32_t cf_mask,
-                            TranslationBlock *last_tb, int tb_exit) {
+static void afl_request_tsl(target_ulong pc, target_ulong cb, uint32_t flags,
+                            uint32_t cf_mask, TranslationBlock *last_tb,
+                            int tb_exit) {
 
-  struct afl_tsl t;
+  struct afl_tsl   t;
   struct afl_chain c;
 
   if (!afl_fork_child) return;
 
-  t.tb.pc      = pc;
+  t.tb.pc = pc;
   t.tb.cs_base = cb;
-  t.tb.flags   = flags;
+  t.tb.flags = flags;
   t.tb.cf_mask = cf_mask;
-  t.is_chain   = (last_tb != NULL);
+  t.is_chain = (last_tb != NULL);
 
   if (write(TSL_FD, &t, sizeof(struct afl_tsl)) != sizeof(struct afl_tsl))
     return;
 
   if (t.is_chain) {
-    c.last_tb.pc      = last_tb->pc;
+
+    c.last_tb.pc = last_tb->pc;
     c.last_tb.cs_base = last_tb->cs_base;
-    c.last_tb.flags   = last_tb->flags;
-    c.cf_mask         = cf_mask;
-    c.tb_exit         = tb_exit;
+    c.last_tb.flags = last_tb->flags;
+    c.cf_mask = cf_mask;
+    c.tb_exit = tb_exit;
 
     if (write(TSL_FD, &c, sizeof(struct afl_chain)) != sizeof(struct afl_chain))
       return;
+
   }
 
 }
 
-
 /* Check if an address is valid in the current mapping */
 
 static inline int is_valid_addr(target_ulong addr) {
 
-    int l, flags;
-    target_ulong page;
-    void * p;
-    
-    page = addr & TARGET_PAGE_MASK;
-    l = (page + TARGET_PAGE_SIZE) - addr;
-    
-    flags = page_get_flags(page);
-    if (!(flags & PAGE_VALID) || !(flags & PAGE_READ))
-        return 0;
-    
-    return 1;
+  int          l, flags;
+  target_ulong page;
+  void *       p;
+
+  page = addr & TARGET_PAGE_MASK;
+  l = (page + TARGET_PAGE_SIZE) - addr;
+
+  flags = page_get_flags(page);
+  if (!(flags & PAGE_VALID) || !(flags & PAGE_READ)) return 0;
+
+  return 1;
+
 }
 
 /* This is the other side of the same channel. Since timeouts are handled by
@@ -306,8 +318,8 @@ static inline int is_valid_addr(target_ulong addr) {
 
 static void afl_wait_tsl(CPUState *cpu, int fd) {
 
-  struct afl_tsl t;
-  struct afl_chain c;
+  struct afl_tsl    t;
+  struct afl_chain  c;
   TranslationBlock *tb, *last_tb;
 
   while (1) {
@@ -316,30 +328,33 @@ static void afl_wait_tsl(CPUState *cpu, int fd) {
 
     /* Broken pipe means it's time to return to the fork server routine. */
 
-    if (read(fd, &t, sizeof(struct afl_tsl)) != sizeof(struct afl_tsl))
-      break;
+    if (read(fd, &t, sizeof(struct afl_tsl)) != sizeof(struct afl_tsl)) break;
 
     tb = tb_htable_lookup(cpu, t.tb.pc, t.tb.cs_base, t.tb.flags, t.tb.cf_mask);
 
-    if(!tb) {
-      
+    if (!tb) {
+
       /* The child may request to transate a block of memory that is not
          mapped in the parent (e.g. jitted code or dlopened code).
          This causes a SIGSEV in gen_intermediate_code() and associated
          subroutines. We simply avoid caching of such blocks. */
 
       if (is_valid_addr(t.tb.pc)) {
-    
+
         mmap_lock();
         tb = tb_gen_code(cpu, t.tb.pc, t.tb.cs_base, t.tb.flags, t.tb.cf_mask);
         mmap_unlock();
+
       } else {
-      
-        invalid_pc = 1; 
+
+        invalid_pc = 1;
+
       }
+
     }
 
     if (t.is_chain) {
+
       if (read(fd, &c, sizeof(struct afl_chain)) != sizeof(struct afl_chain))
         break;
 
@@ -347,10 +362,10 @@ static void afl_wait_tsl(CPUState *cpu, int fd) {
 
         last_tb = tb_htable_lookup(cpu, c.last_tb.pc, c.last_tb.cs_base,
                                    c.last_tb.flags, c.cf_mask);
-        if (last_tb) {
-          tb_add_jump(last_tb, c.tb_exit, tb);
-        }
+        if (last_tb) { tb_add_jump(last_tb, c.tb_exit, tb); }
+
       }
+
     }
 
   }
@@ -358,3 +373,4 @@ static void afl_wait_tsl(CPUState *cpu, int fd) {
   close(fd);
 
 }
+
diff --git a/qemu_mode/patches/afl-qemu-cpu-translate-inl.h b/qemu_mode/patches/afl-qemu-cpu-translate-inl.h
index 09ecb9d2..3d3c1b6b 100644
--- a/qemu_mode/patches/afl-qemu-cpu-translate-inl.h
+++ b/qemu_mode/patches/afl-qemu-cpu-translate-inl.h
@@ -37,9 +37,9 @@
 
 /* Declared in afl-qemu-cpu-inl.h */
 extern unsigned char *afl_area_ptr;
-extern unsigned int afl_inst_rms;
-extern abi_ulong afl_start_code, afl_end_code;
-extern u8 afl_compcov_level;
+extern unsigned int   afl_inst_rms;
+extern abi_ulong      afl_start_code, afl_end_code;
+extern u8             afl_compcov_level;
 
 void tcg_gen_afl_compcov_log_call(void *func, target_ulong cur_loc,
                                   TCGv_i64 arg1, TCGv_i64 arg2);
@@ -47,81 +47,93 @@ void tcg_gen_afl_compcov_log_call(void *func, target_ulong cur_loc,
 static void afl_compcov_log_16(target_ulong cur_loc, target_ulong arg1,
                                target_ulong arg2) {
 
-  if ((arg1 & 0xff) == (arg2 & 0xff)) {
-    INC_AFL_AREA(cur_loc);
-  }
+  if ((arg1 & 0xff) == (arg2 & 0xff)) { INC_AFL_AREA(cur_loc); }
+
 }
 
 static void afl_compcov_log_32(target_ulong cur_loc, target_ulong arg1,
                                target_ulong arg2) {
 
   if ((arg1 & 0xff) == (arg2 & 0xff)) {
+
     INC_AFL_AREA(cur_loc);
     if ((arg1 & 0xffff) == (arg2 & 0xffff)) {
-      INC_AFL_AREA(cur_loc +1);
-      if ((arg1 & 0xffffff) == (arg2 & 0xffffff)) {
-        INC_AFL_AREA(cur_loc +2);
-      }
+
+      INC_AFL_AREA(cur_loc + 1);
+      if ((arg1 & 0xffffff) == (arg2 & 0xffffff)) { INC_AFL_AREA(cur_loc + 2); }
+
     }
+
   }
+
 }
 
 static void afl_compcov_log_64(target_ulong cur_loc, target_ulong arg1,
                                target_ulong arg2) {
 
   if ((arg1 & 0xff) == (arg2 & 0xff)) {
+
     INC_AFL_AREA(cur_loc);
     if ((arg1 & 0xffff) == (arg2 & 0xffff)) {
-      INC_AFL_AREA(cur_loc +1);
+
+      INC_AFL_AREA(cur_loc + 1);
       if ((arg1 & 0xffffff) == (arg2 & 0xffffff)) {
-        INC_AFL_AREA(cur_loc +2);
+
+        INC_AFL_AREA(cur_loc + 2);
         if ((arg1 & 0xffffffff) == (arg2 & 0xffffffff)) {
-          INC_AFL_AREA(cur_loc +3);
+
+          INC_AFL_AREA(cur_loc + 3);
           if ((arg1 & 0xffffffffff) == (arg2 & 0xffffffffff)) {
-            INC_AFL_AREA(cur_loc +4);
+
+            INC_AFL_AREA(cur_loc + 4);
             if ((arg1 & 0xffffffffffff) == (arg2 & 0xffffffffffff)) {
-              INC_AFL_AREA(cur_loc +5);
+
+              INC_AFL_AREA(cur_loc + 5);
               if ((arg1 & 0xffffffffffffff) == (arg2 & 0xffffffffffffff)) {
-                INC_AFL_AREA(cur_loc +6);
+
+                INC_AFL_AREA(cur_loc + 6);
+
               }
+
             }
+
           }
+
         }
+
       }
+
     }
+
   }
-}
 
+}
 
 static void afl_gen_compcov(target_ulong cur_loc, TCGv_i64 arg1, TCGv_i64 arg2,
                             TCGMemOp ot, int is_imm) {
 
   void *func;
-  
+
   if (!afl_compcov_level || cur_loc > afl_end_code || cur_loc < afl_start_code)
     return;
-  
-  if (!is_imm && afl_compcov_level < 2)
-    return;
+
+  if (!is_imm && afl_compcov_level < 2) return;
 
   switch (ot) {
-    case MO_64:
-      func = &afl_compcov_log_64;
-      break;
-    case MO_32: 
-      func = &afl_compcov_log_32;
-      break;
-    case MO_16:
-      func = &afl_compcov_log_16;
-      break;
-    default:
-      return;
+
+    case MO_64: func = &afl_compcov_log_64; break;
+    case MO_32: func = &afl_compcov_log_32; break;
+    case MO_16: func = &afl_compcov_log_16; break;
+    default: return;
+
   }
-  
-  cur_loc  = (cur_loc >> 4) ^ (cur_loc << 8);
+
+  cur_loc = (cur_loc >> 4) ^ (cur_loc << 8);
   cur_loc &= MAP_SIZE - 7;
-  
+
   if (cur_loc >= afl_inst_rms) return;
-  
+
   tcg_gen_afl_compcov_log_call(func, cur_loc, arg1, arg2);
+
 }
+
diff --git a/qemu_mode/patches/afl-qemu-tcg-inl.h b/qemu_mode/patches/afl-qemu-tcg-inl.h
index a9c53b8c..d53a1ccf 100644
--- a/qemu_mode/patches/afl-qemu-tcg-inl.h
+++ b/qemu_mode/patches/afl-qemu-tcg-inl.h
@@ -31,275 +31,343 @@
 
  */
 
-void afl_maybe_log(void* cur_loc);
+void afl_maybe_log(void *cur_loc);
 
 /* Note: we convert the 64 bit args to 32 bit and do some alignment
    and endian swap. Maybe it would be better to do the alignment
    and endian swap in tcg_reg_alloc_call(). */
-void tcg_gen_afl_maybe_log_call(target_ulong cur_loc)
-{
-    int real_args, pi;
-    unsigned sizemask, flags;
-    TCGOp *op;
-
-    TCGTemp *arg = tcgv_i64_temp( tcg_const_tl(cur_loc) );
-
-    flags = 0;
-    sizemask = dh_sizemask(void, 0) | dh_sizemask(i64, 1);
-
-#if defined(__sparc__) && !defined(__arch64__) \
-    && !defined(CONFIG_TCG_INTERPRETER)
-    /* We have 64-bit values in one register, but need to pass as two
-       separate parameters.  Split them.  */
-    int orig_sizemask = sizemask;
-    TCGv_i64 retl, reth;
-    TCGTemp *split_args[MAX_OPC_PARAM];
-
-    retl = NULL;
-    reth = NULL;
-    if (sizemask != 0) {
-        real_args = 0;
-        int is_64bit = sizemask & (1 << 2);
-        if (is_64bit) {
-            TCGv_i64 orig = temp_tcgv_i64(arg);
-            TCGv_i32 h = tcg_temp_new_i32();
-            TCGv_i32 l = tcg_temp_new_i32();
-            tcg_gen_extr_i64_i32(l, h, orig);
-            split_args[real_args++] = tcgv_i32_temp(h);
-            split_args[real_args++] = tcgv_i32_temp(l);
-        } else {
-            split_args[real_args++] = arg;
-        }
-        nargs = real_args;
-        args = split_args;
-        sizemask = 0;
+void tcg_gen_afl_maybe_log_call(target_ulong cur_loc) {
+
+  int      real_args, pi;
+  unsigned sizemask, flags;
+  TCGOp *  op;
+
+  TCGTemp *arg = tcgv_i64_temp(tcg_const_tl(cur_loc));
+
+  flags = 0;
+  sizemask = dh_sizemask(void, 0) | dh_sizemask(i64, 1);
+
+#if defined(__sparc__) && !defined(__arch64__) && \
+    !defined(CONFIG_TCG_INTERPRETER)
+  /* We have 64-bit values in one register, but need to pass as two
+     separate parameters.  Split them.  */
+  int      orig_sizemask = sizemask;
+  TCGv_i64 retl, reth;
+  TCGTemp *split_args[MAX_OPC_PARAM];
+
+  retl = NULL;
+  reth = NULL;
+  if (sizemask != 0) {
+
+    real_args = 0;
+    int is_64bit = sizemask & (1 << 2);
+    if (is_64bit) {
+
+      TCGv_i64 orig = temp_tcgv_i64(arg);
+      TCGv_i32 h = tcg_temp_new_i32();
+      TCGv_i32 l = tcg_temp_new_i32();
+      tcg_gen_extr_i64_i32(l, h, orig);
+      split_args[real_args++] = tcgv_i32_temp(h);
+      split_args[real_args++] = tcgv_i32_temp(l);
+
+    } else {
+
+      split_args[real_args++] = arg;
+
     }
+
+    nargs = real_args;
+    args = split_args;
+    sizemask = 0;
+
+  }
+
 #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
-    int is_64bit = sizemask & (1 << 2);
-    int is_signed = sizemask & (2 << 2);
-    if (!is_64bit) {
-        TCGv_i64 temp = tcg_temp_new_i64();
-        TCGv_i64 orig = temp_tcgv_i64(arg);
-        if (is_signed) {
-            tcg_gen_ext32s_i64(temp, orig);
-        } else {
-            tcg_gen_ext32u_i64(temp, orig);
-        }
-        arg = tcgv_i64_temp(temp);
+  int is_64bit = sizemask & (1 << 2);
+  int is_signed = sizemask & (2 << 2);
+  if (!is_64bit) {
+
+    TCGv_i64 temp = tcg_temp_new_i64();
+    TCGv_i64 orig = temp_tcgv_i64(arg);
+    if (is_signed) {
+
+      tcg_gen_ext32s_i64(temp, orig);
+
+    } else {
+
+      tcg_gen_ext32u_i64(temp, orig);
+
     }
+
+    arg = tcgv_i64_temp(temp);
+
+  }
+
 #endif /* TCG_TARGET_EXTEND_ARGS */
 
-    op = tcg_emit_op(INDEX_op_call);
+  op = tcg_emit_op(INDEX_op_call);
 
-    pi = 0;
+  pi = 0;
 
-    TCGOP_CALLO(op) = 0;
+  TCGOP_CALLO(op) = 0;
+
+  real_args = 0;
+  int is_64bit = sizemask & (1 << 2);
+  if (TCG_TARGET_REG_BITS < 64 && is_64bit) {
 
-    real_args = 0;
-    int is_64bit = sizemask & (1 << 2);
-    if (TCG_TARGET_REG_BITS < 64 && is_64bit) {
 #ifdef TCG_TARGET_CALL_ALIGN_ARGS
-        /* some targets want aligned 64 bit args */
-        if (real_args & 1) {
-            op->args[pi++] = TCG_CALL_DUMMY_ARG;
-            real_args++;
-        }
+    /* some targets want aligned 64 bit args */
+    if (real_args & 1) {
+
+      op->args[pi++] = TCG_CALL_DUMMY_ARG;
+      real_args++;
+
+    }
+
 #endif
-       /* If stack grows up, then we will be placing successive
-          arguments at lower addresses, which means we need to
-          reverse the order compared to how we would normally
-          treat either big or little-endian.  For those arguments
-          that will wind up in registers, this still works for
-          HPPA (the only current STACK_GROWSUP target) since the
-          argument registers are *also* allocated in decreasing
-          order.  If another such target is added, this logic may
-          have to get more complicated to differentiate between
-          stack arguments and register arguments.  */
+    /* If stack grows up, then we will be placing successive
+       arguments at lower addresses, which means we need to
+       reverse the order compared to how we would normally
+       treat either big or little-endian.  For those arguments
+       that will wind up in registers, this still works for
+       HPPA (the only current STACK_GROWSUP target) since the
+       argument registers are *also* allocated in decreasing
+       order.  If another such target is added, this logic may
+       have to get more complicated to differentiate between
+       stack arguments and register arguments.  */
 #if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP)
-        op->args[pi++] = temp_arg(arg + 1);
-        op->args[pi++] = temp_arg(arg);
+    op->args[pi++] = temp_arg(arg + 1);
+    op->args[pi++] = temp_arg(arg);
 #else
-        op->args[pi++] = temp_arg(arg);
-        op->args[pi++] = temp_arg(arg + 1);
+    op->args[pi++] = temp_arg(arg);
+    op->args[pi++] = temp_arg(arg + 1);
 #endif
-        real_args += 2;
-    }
+    real_args += 2;
+
+  }
+
+  op->args[pi++] = temp_arg(arg);
+  real_args++;
+
+  op->args[pi++] = (uintptr_t)&afl_maybe_log;
+  op->args[pi++] = flags;
+  TCGOP_CALLI(op) = real_args;
+
+  /* Make sure the fields didn't overflow.  */
+  tcg_debug_assert(TCGOP_CALLI(op) == real_args);
+  tcg_debug_assert(pi <= ARRAY_SIZE(op->args));
+
+#if defined(__sparc__) && !defined(__arch64__) && \
+    !defined(CONFIG_TCG_INTERPRETER)
+  /* Free all of the parts we allocated above.  */
+  real_args = 0;
+  int is_64bit = orig_sizemask & (1 << 2);
+  if (is_64bit) {
+
+    tcg_temp_free_internal(args[real_args++]);
+    tcg_temp_free_internal(args[real_args++]);
+
+  } else {
 
-    op->args[pi++] = temp_arg(arg);
     real_args++;
 
-    op->args[pi++] = (uintptr_t)&afl_maybe_log;
-    op->args[pi++] = flags;
-    TCGOP_CALLI(op) = real_args;
+  }
 
-    /* Make sure the fields didn't overflow.  */
-    tcg_debug_assert(TCGOP_CALLI(op) == real_args);
-    tcg_debug_assert(pi <= ARRAY_SIZE(op->args));
+  if (orig_sizemask & 1) {
+
+    /* The 32-bit ABI returned two 32-bit pieces.  Re-assemble them.
+       Note that describing these as TCGv_i64 eliminates an unnecessary
+       zero-extension that tcg_gen_concat_i32_i64 would create.  */
+    tcg_gen_concat32_i64(temp_tcgv_i64(NULL), retl, reth);
+    tcg_temp_free_i64(retl);
+    tcg_temp_free_i64(reth);
+
+  }
 
-#if defined(__sparc__) && !defined(__arch64__) \
-    && !defined(CONFIG_TCG_INTERPRETER)
-    /* Free all of the parts we allocated above.  */
-    real_args = 0;
-    int is_64bit = orig_sizemask & (1 << 2);
-    if (is_64bit) {
-        tcg_temp_free_internal(args[real_args++]);
-        tcg_temp_free_internal(args[real_args++]);
-    } else {
-        real_args++;
-    }
-    if (orig_sizemask & 1) {
-        /* The 32-bit ABI returned two 32-bit pieces.  Re-assemble them.
-           Note that describing these as TCGv_i64 eliminates an unnecessary
-           zero-extension that tcg_gen_concat_i32_i64 would create.  */
-        tcg_gen_concat32_i64(temp_tcgv_i64(NULL), retl, reth);
-        tcg_temp_free_i64(retl);
-        tcg_temp_free_i64(reth);
-    }
 #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
-    int is_64bit = sizemask & (1 << 2);
-    if (!is_64bit) {
-        tcg_temp_free_internal(arg);
-    }
+  int is_64bit = sizemask & (1 << 2);
+  if (!is_64bit) { tcg_temp_free_internal(arg); }
 #endif /* TCG_TARGET_EXTEND_ARGS */
+
 }
 
-void tcg_gen_afl_compcov_log_call(void *func, target_ulong cur_loc, TCGv_i64 arg1, TCGv_i64 arg2)
-{
-    int i, real_args, nb_rets, pi;
-    unsigned sizemask, flags;
-    TCGOp *op;
-
-    const int nargs = 3;
-    TCGTemp *args[3] = { tcgv_i64_temp( tcg_const_tl(cur_loc) ),
-                         tcgv_i64_temp(arg1),
-                         tcgv_i64_temp(arg2) };
-
-    flags = 0;
-    sizemask = dh_sizemask(void, 0) | dh_sizemask(i64, 1) |
-               dh_sizemask(i64, 2) | dh_sizemask(i64, 3);
-
-#if defined(__sparc__) && !defined(__arch64__) \
-    && !defined(CONFIG_TCG_INTERPRETER)
-    /* We have 64-bit values in one register, but need to pass as two
-       separate parameters.  Split them.  */
-    int orig_sizemask = sizemask;
-    int orig_nargs = nargs;
-    TCGv_i64 retl, reth;
-    TCGTemp *split_args[MAX_OPC_PARAM];
-
-    retl = NULL;
-    reth = NULL;
-    if (sizemask != 0) {
-        for (i = real_args = 0; i < nargs; ++i) {
-            int is_64bit = sizemask & (1 << (i+1)*2);
-            if (is_64bit) {
-                TCGv_i64 orig = temp_tcgv_i64(args[i]);
-                TCGv_i32 h = tcg_temp_new_i32();
-                TCGv_i32 l = tcg_temp_new_i32();
-                tcg_gen_extr_i64_i32(l, h, orig);
-                split_args[real_args++] = tcgv_i32_temp(h);
-                split_args[real_args++] = tcgv_i32_temp(l);
-            } else {
-                split_args[real_args++] = args[i];
-            }
-        }
-        nargs = real_args;
-        args = split_args;
-        sizemask = 0;
+void tcg_gen_afl_compcov_log_call(void *func, target_ulong cur_loc,
+                                  TCGv_i64 arg1, TCGv_i64 arg2) {
+
+  int      i, real_args, nb_rets, pi;
+  unsigned sizemask, flags;
+  TCGOp *  op;
+
+  const int nargs = 3;
+  TCGTemp *args[3] = {tcgv_i64_temp(tcg_const_tl(cur_loc)), tcgv_i64_temp(arg1),
+                      tcgv_i64_temp(arg2)};
+
+  flags = 0;
+  sizemask = dh_sizemask(void, 0) | dh_sizemask(i64, 1) | dh_sizemask(i64, 2) |
+             dh_sizemask(i64, 3);
+
+#if defined(__sparc__) && !defined(__arch64__) && \
+    !defined(CONFIG_TCG_INTERPRETER)
+  /* We have 64-bit values in one register, but need to pass as two
+     separate parameters.  Split them.  */
+  int      orig_sizemask = sizemask;
+  int      orig_nargs = nargs;
+  TCGv_i64 retl, reth;
+  TCGTemp *split_args[MAX_OPC_PARAM];
+
+  retl = NULL;
+  reth = NULL;
+  if (sizemask != 0) {
+
+    for (i = real_args = 0; i < nargs; ++i) {
+
+      int is_64bit = sizemask & (1 << (i + 1) * 2);
+      if (is_64bit) {
+
+        TCGv_i64 orig = temp_tcgv_i64(args[i]);
+        TCGv_i32 h = tcg_temp_new_i32();
+        TCGv_i32 l = tcg_temp_new_i32();
+        tcg_gen_extr_i64_i32(l, h, orig);
+        split_args[real_args++] = tcgv_i32_temp(h);
+        split_args[real_args++] = tcgv_i32_temp(l);
+
+      } else {
+
+        split_args[real_args++] = args[i];
+
+      }
+
     }
+
+    nargs = real_args;
+    args = split_args;
+    sizemask = 0;
+
+  }
+
 #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
-    for (i = 0; i < nargs; ++i) {
-        int is_64bit = sizemask & (1 << (i+1)*2);
-        int is_signed = sizemask & (2 << (i+1)*2);
-        if (!is_64bit) {
-            TCGv_i64 temp = tcg_temp_new_i64();
-            TCGv_i64 orig = temp_tcgv_i64(args[i]);
-            if (is_signed) {
-                tcg_gen_ext32s_i64(temp, orig);
-            } else {
-                tcg_gen_ext32u_i64(temp, orig);
-            }
-            args[i] = tcgv_i64_temp(temp);
-        }
+  for (i = 0; i < nargs; ++i) {
+
+    int is_64bit = sizemask & (1 << (i + 1) * 2);
+    int is_signed = sizemask & (2 << (i + 1) * 2);
+    if (!is_64bit) {
+
+      TCGv_i64 temp = tcg_temp_new_i64();
+      TCGv_i64 orig = temp_tcgv_i64(args[i]);
+      if (is_signed) {
+
+        tcg_gen_ext32s_i64(temp, orig);
+
+      } else {
+
+        tcg_gen_ext32u_i64(temp, orig);
+
+      }
+
+      args[i] = tcgv_i64_temp(temp);
+
     }
+
+  }
+
 #endif /* TCG_TARGET_EXTEND_ARGS */
 
-    op = tcg_emit_op(INDEX_op_call);
+  op = tcg_emit_op(INDEX_op_call);
 
-    pi = 0;
-    nb_rets = 0;
-    TCGOP_CALLO(op) = nb_rets;
+  pi = 0;
+  nb_rets = 0;
+  TCGOP_CALLO(op) = nb_rets;
+
+  real_args = 0;
+  for (i = 0; i < nargs; i++) {
+
+    int is_64bit = sizemask & (1 << (i + 1) * 2);
+    if (TCG_TARGET_REG_BITS < 64 && is_64bit) {
 
-    real_args = 0;
-    for (i = 0; i < nargs; i++) {
-        int is_64bit = sizemask & (1 << (i+1)*2);
-        if (TCG_TARGET_REG_BITS < 64 && is_64bit) {
 #ifdef TCG_TARGET_CALL_ALIGN_ARGS
-            /* some targets want aligned 64 bit args */
-            if (real_args & 1) {
-                op->args[pi++] = TCG_CALL_DUMMY_ARG;
-                real_args++;
-            }
+      /* some targets want aligned 64 bit args */
+      if (real_args & 1) {
+
+        op->args[pi++] = TCG_CALL_DUMMY_ARG;
+        real_args++;
+
+      }
+
 #endif
-           /* If stack grows up, then we will be placing successive
-              arguments at lower addresses, which means we need to
-              reverse the order compared to how we would normally
-              treat either big or little-endian.  For those arguments
-              that will wind up in registers, this still works for
-              HPPA (the only current STACK_GROWSUP target) since the
-              argument registers are *also* allocated in decreasing
-              order.  If another such target is added, this logic may
-              have to get more complicated to differentiate between
-              stack arguments and register arguments.  */
+      /* If stack grows up, then we will be placing successive
+         arguments at lower addresses, which means we need to
+         reverse the order compared to how we would normally
+         treat either big or little-endian.  For those arguments
+         that will wind up in registers, this still works for
+         HPPA (the only current STACK_GROWSUP target) since the
+         argument registers are *also* allocated in decreasing
+         order.  If another such target is added, this logic may
+         have to get more complicated to differentiate between
+         stack arguments and register arguments.  */
 #if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP)
-            op->args[pi++] = temp_arg(args[i] + 1);
-            op->args[pi++] = temp_arg(args[i]);
+      op->args[pi++] = temp_arg(args[i] + 1);
+      op->args[pi++] = temp_arg(args[i]);
 #else
-            op->args[pi++] = temp_arg(args[i]);
-            op->args[pi++] = temp_arg(args[i] + 1);
+      op->args[pi++] = temp_arg(args[i]);
+      op->args[pi++] = temp_arg(args[i] + 1);
 #endif
-            real_args += 2;
-            continue;
-        }
+      real_args += 2;
+      continue;
 
-        op->args[pi++] = temp_arg(args[i]);
-        real_args++;
-    }
-    op->args[pi++] = (uintptr_t)func;
-    op->args[pi++] = flags;
-    TCGOP_CALLI(op) = real_args;
-
-    /* Make sure the fields didn't overflow.  */
-    tcg_debug_assert(TCGOP_CALLI(op) == real_args);
-    tcg_debug_assert(pi <= ARRAY_SIZE(op->args));
-
-#if defined(__sparc__) && !defined(__arch64__) \
-    && !defined(CONFIG_TCG_INTERPRETER)
-    /* Free all of the parts we allocated above.  */
-    for (i = real_args = 0; i < orig_nargs; ++i) {
-        int is_64bit = orig_sizemask & (1 << (i+1)*2);
-        if (is_64bit) {
-            tcg_temp_free_internal(args[real_args++]);
-            tcg_temp_free_internal(args[real_args++]);
-        } else {
-            real_args++;
-        }
     }
-    if (orig_sizemask & 1) {
-        /* The 32-bit ABI returned two 32-bit pieces.  Re-assemble them.
-           Note that describing these as TCGv_i64 eliminates an unnecessary
-           zero-extension that tcg_gen_concat_i32_i64 would create.  */
-        tcg_gen_concat32_i64(temp_tcgv_i64(NULL), retl, reth);
-        tcg_temp_free_i64(retl);
-        tcg_temp_free_i64(reth);
+
+    op->args[pi++] = temp_arg(args[i]);
+    real_args++;
+
+  }
+
+  op->args[pi++] = (uintptr_t)func;
+  op->args[pi++] = flags;
+  TCGOP_CALLI(op) = real_args;
+
+  /* Make sure the fields didn't overflow.  */
+  tcg_debug_assert(TCGOP_CALLI(op) == real_args);
+  tcg_debug_assert(pi <= ARRAY_SIZE(op->args));
+
+#if defined(__sparc__) && !defined(__arch64__) && \
+    !defined(CONFIG_TCG_INTERPRETER)
+  /* Free all of the parts we allocated above.  */
+  for (i = real_args = 0; i < orig_nargs; ++i) {
+
+    int is_64bit = orig_sizemask & (1 << (i + 1) * 2);
+    if (is_64bit) {
+
+      tcg_temp_free_internal(args[real_args++]);
+      tcg_temp_free_internal(args[real_args++]);
+
+    } else {
+
+      real_args++;
+
     }
+
+  }
+
+  if (orig_sizemask & 1) {
+
+    /* The 32-bit ABI returned two 32-bit pieces.  Re-assemble them.
+       Note that describing these as TCGv_i64 eliminates an unnecessary
+       zero-extension that tcg_gen_concat_i32_i64 would create.  */
+    tcg_gen_concat32_i64(temp_tcgv_i64(NULL), retl, reth);
+    tcg_temp_free_i64(retl);
+    tcg_temp_free_i64(reth);
+
+  }
+
 #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
-    for (i = 0; i < nargs; ++i) {
-        int is_64bit = sizemask & (1 << (i+1)*2);
-        if (!is_64bit) {
-            tcg_temp_free_internal(args[i]);
-        }
-    }
+  for (i = 0; i < nargs; ++i) {
+
+    int is_64bit = sizemask & (1 << (i + 1) * 2);
+    if (!is_64bit) { tcg_temp_free_internal(args[i]); }
+
+  }
+
 #endif /* TCG_TARGET_EXTEND_ARGS */
+
 }
 
diff --git a/qemu_mode/patches/afl-qemu-translate-inl.h b/qemu_mode/patches/afl-qemu-translate-inl.h
index ffe43dba..9abaa961 100644
--- a/qemu_mode/patches/afl-qemu-translate-inl.h
+++ b/qemu_mode/patches/afl-qemu-translate-inl.h
@@ -36,8 +36,8 @@
 
 /* Declared in afl-qemu-cpu-inl.h */
 extern unsigned char *afl_area_ptr;
-extern unsigned int afl_inst_rms;
-extern abi_ulong afl_start_code, afl_end_code;
+extern unsigned int   afl_inst_rms;
+extern abi_ulong      afl_start_code, afl_end_code;
 
 void tcg_gen_afl_maybe_log_call(target_ulong cur_loc);
 
@@ -59,14 +59,16 @@ static void afl_gen_trace(target_ulong cur_loc) {
   /* Optimize for cur_loc > afl_end_code, which is the most likely case on
      Linux systems. */
 
-  if (cur_loc > afl_end_code || cur_loc < afl_start_code /*|| !afl_area_ptr*/) // not needed because of static dummy buffer
+  if (cur_loc > afl_end_code ||
+      cur_loc < afl_start_code /*|| !afl_area_ptr*/)  // not needed because of
+                                                      // static dummy buffer
     return;
 
   /* Looks like QEMU always maps to fixed locations, so ASLR is not a
      concern. Phew. But instruction addresses may be aligned. Let's mangle
      the value to get something quasi-uniform. */
 
-  cur_loc  = (cur_loc >> 4) ^ (cur_loc << 8);
+  cur_loc = (cur_loc >> 4) ^ (cur_loc << 8);
   cur_loc &= MAP_SIZE - 1;
 
   /* Implement probabilistic instrumentation by looking at scrambled block
@@ -75,5 +77,6 @@ static void afl_gen_trace(target_ulong cur_loc) {
   if (cur_loc >= afl_inst_rms) return;
 
   tcg_gen_afl_maybe_log_call(cur_loc);
-  
+
 }
+