aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndrea Fioraldi <andreafioraldi@gmail.com>2019-09-13 11:37:26 +0200
committerGitHub <noreply@github.com>2019-09-13 11:37:26 +0200
commit8ee11fecc475dd6bcaab7f1e5a38c1cfac4c7e56 (patch)
treef728ee952e94bb299bd5fc603009fbcd51dd85cb
parenta67d86c6e2ca58db81f2ddf6d0a4c837be88271d (diff)
parent36020c41df88ae863fbc2a148765f9c61c7f8bf8 (diff)
downloadafl++-8ee11fecc475dd6bcaab7f1e5a38c1cfac4c7e56.tar.gz
Merge pull request #57 from vanhauser-thc/persistent_qemu
Persistent mode in QEMU
-rw-r--r--.gitignore2
-rw-r--r--README.md18
-rw-r--r--TODO15
-rw-r--r--docs/ChangeLog1
-rw-r--r--include/alloc-inl.h29
-rw-r--r--include/config.h6
-rw-r--r--qemu_mode/README.md19
-rwxr-xr-xqemu_mode/libcompcov/compcovtestbin0 -> 8624 bytes
-rw-r--r--qemu_mode/patches/afl-qemu-common.h22
-rw-r--r--qemu_mode/patches/afl-qemu-cpu-inl.h169
-rw-r--r--qemu_mode/patches/afl-qemu-cpu-translate-inl.h36
-rw-r--r--qemu_mode/patches/afl-qemu-tcg-inl.h180
-rw-r--r--qemu_mode/patches/afl-qemu-translate-inl.h13
-rw-r--r--qemu_mode/patches/i386-translate.diff11
-rw-r--r--src/afl-analyze.c2
-rw-r--r--src/afl-forkserver.c2
-rw-r--r--src/afl-fuzz-one.c2
-rw-r--r--src/afl-fuzz-run.c6
-rw-r--r--src/afl-fuzz.c2
-rw-r--r--src/afl-gcc.c8
-rw-r--r--src/afl-gotcpu.c2
-rw-r--r--src/afl-showmap.c2
-rw-r--r--src/afl-tmin.c2
23 files changed, 460 insertions, 89 deletions
diff --git a/.gitignore b/.gitignore
index e4d2346e..b2975a7e 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,3 +1,5 @@
+.test
+.test2
*.o
*.so
afl-analyze
diff --git a/README.md b/README.md
index 597ed8f0..c697da4f 100644
--- a/README.md
+++ b/README.md
@@ -30,9 +30,9 @@
* AFLfast's power schedules by Marcel Böhme: [https://github.com/mboehme/aflfast](https://github.com/mboehme/aflfast)
- * the new excellent MOpt mutator: [https://github.com/puppet-meteor/MOpt-AFL](https://github.com/puppet-meteor/MOpt-AFL)
+ * The new excellent MOpt mutator: [https://github.com/puppet-meteor/MOpt-AFL](https://github.com/puppet-meteor/MOpt-AFL)
- * instrim, a very effective CFG llvm_mode instrumentation implementation for large targets: [https://github.com/csienslab/instrim](https://github.com/csienslab/instrim)
+ * InsTrim, a very effective CFG llvm_mode instrumentation implementation for large targets: [https://github.com/csienslab/instrim](https://github.com/csienslab/instrim)
* C. Holler's afl-fuzz Python mutator module and llvm_mode whitelist support: [https://github.com/choller/afl](https://github.com/choller/afl)
@@ -40,12 +40,22 @@
* unicorn_mode which allows fuzzing of binaries from completely different platforms (integration provided by domenukk)
- * laf-intel (compcov) support for llvm_mode, qemu_mode and unicorn_mode
+ * laf-intel or CompCov support for llvm_mode, qemu_mode and unicorn_mode
- * neverZero patch for afl-gcc, llvm_mode, qemu_mode and unicorn_mode which prevents a wrapping map value to zero, increases coverage (by Andrea Fioraldi)
+ * NeverZero patch for afl-gcc, llvm_mode, qemu_mode and unicorn_mode which prevents a wrapping map value to zero, increases coverage
+
+ * Persistent mode and deferred forkserver for qemu_mode
A more thorough list is available in the PATCHES file.
+ | Feature/Instrumentation | LLVM | GCC | QEMU | Unicorn |
+ | ----------------------- |:----:|:---:|:----:| -------:|
+ | laf-intel / CompCov | x | | x | x |
+ | NeverZero | x | x | x | x |
+ | Persistent mode | x | | x | |
+ | Whitelist | x | | | |
+ | InsTrim | x | | | |
+
So all in all this is the best-of AFL that is currently out there :-)
For new versions and additional information, check out:
diff --git a/TODO b/TODO
index 26311713..87d1488c 100644
--- a/TODO
+++ b/TODO
@@ -20,6 +20,14 @@ gcc_plugin:
qemu_mode:
- update to 4.x (probably this will be skipped :( )
+ - deferred mode with AFL_DEFERRED_QEMU=0xaddress
+ (AFL_ENTRYPOINT let you to specify only a basic block address as starting
+ point. This will be implemented togheter with the logic for persistent
+ mode.)
+ - instrim for QEMU mode via static analysis (with r2pipe? or angr?)
+ Idea: The static analyzer outputs a map in which each edge that must be
+ skipped is marked with 1. QEMU loads it at startup in the parent process.
+
unit testing / or large testcase campaign
@@ -52,10 +60,3 @@ Problem: Average targets (tiff, jpeg, unrar) go through 1500 edges.
Bad: completely changes how afl uses the map and the scheduling.
Overall another very good solution, Marc Heuse/vanHauser follows this up
-qemu_mode:
- - persistent mode patching the return address (WinAFL style)
- - deferred mode with AFL_DEFERRED_QEMU=0xaddress
- (AFL_ENTRYPOINT let you to specify only a basic block address as starting
- point. This will be implemented togheter with the logic for persistent
- mode.)
-
diff --git a/docs/ChangeLog b/docs/ChangeLog
index a407e253..fa05d1b8 100644
--- a/docs/ChangeLog
+++ b/docs/ChangeLog
@@ -20,6 +20,7 @@ Version ++2.54d (dev):
- no more unlinking the input file, this way the input file can also be a
FIFO or disk partition
- reducing duplicate code in afl-fuzz
+ - persistent mode for QEMU
--------------------------
diff --git a/include/alloc-inl.h b/include/alloc-inl.h
index d851fd61..b0815ab1 100644
--- a/include/alloc-inl.h
+++ b/include/alloc-inl.h
@@ -106,39 +106,20 @@
\
} while (0)
-/*
-#define CHECK_PTR(_p) do { \
- \
- \
- \
- \
+/* #define CHECK_PTR(_p) do { \
if (_p) { \
- \
- \
- \
- \
if (ALLOC_C1(_p) ^ ALLOC_MAGIC_C1) {\
- \
- \
- \
- \
if (ALLOC_C1(_p) == ALLOC_MAGIC_F) \
ABORT("Use after free."); \
else ABORT("Corrupted head alloc canary."); \
- \
+
} \
- \
- \
- \
+
if (ALLOC_C2(_p) ^ ALLOC_MAGIC_C2) \
ABORT("Corrupted tail alloc canary."); \
- \
+
} \
- \
- \
- \
- \
- \
+
} while (0)
*/
diff --git a/include/config.h b/include/config.h
index 98eb0a38..f2732ad4 100644
--- a/include/config.h
+++ b/include/config.h
@@ -373,9 +373,9 @@
/* for *BSD: use ARC4RANDOM and save a file descriptor */
#if defined(__APPLE__) || defined(__FreeBSD__) || defined(__OpenBSD__)
- #ifndef HAVE_ARC4RANDOM
- #define HAVE_ARC4RANDOM 1
- #endif
+#ifndef HAVE_ARC4RANDOM
+#define HAVE_ARC4RANDOM 1
+#endif
#endif /* __APPLE__ || __FreeBSD__ || __OpenBSD__ */
#endif /* ! _HAVE_CONFIG_H */
diff --git a/qemu_mode/README.md b/qemu_mode/README.md
index 610f6860..54fbf874 100644
--- a/qemu_mode/README.md
+++ b/qemu_mode/README.md
@@ -63,6 +63,25 @@ opened (e.g. way after command line parsing and config file loading, etc)
which can be a huge speed improvement. Note that the specified address
must be an address of a basic block.
+QEMU mode support also persistent mode for x86 and x86_64 targets.
+The environment variable to enable it is AFL_QEMU_PERSISTENT_ADDR=`start addr`.
+In this variable you must specify the address of the function that
+have to be the body of the persistent loop.
+The code in this function must be stateless like in the LLVM persistent mode.
+The return address on stack is patched like in WinAFL in order to repeat the
+execution of such function.
+Another modality to execute the persistent loop is to specify also the
+AFL_QEMU_PERSISTENT_RET=`end addr` env variable.
+With this variable assigned, instead of patching the return address, the
+specified instruction is transformed to a jump towards `start addr`.
+Note that the format of the addresses in such variables is hex.
+
+Note that the base address of PIE binaries in QEMU user is 0x4000000000.
+
+Warning: in x86_64 parameters are passed via registers and so the target
+function of persistent mode cannot make use of arguments. An option to restore
+the state of each GPR each iteration of the loop is planned.
+
## 4) Notes on linking
The feature is supported only on Linux. Supporting BSD may amount to porting
diff --git a/qemu_mode/libcompcov/compcovtest b/qemu_mode/libcompcov/compcovtest
new file mode 100755
index 00000000..0bb68d60
--- /dev/null
+++ b/qemu_mode/libcompcov/compcovtest
Binary files differ
diff --git a/qemu_mode/patches/afl-qemu-common.h b/qemu_mode/patches/afl-qemu-common.h
index 88c110b4..053585a7 100644
--- a/qemu_mode/patches/afl-qemu-common.h
+++ b/qemu_mode/patches/afl-qemu-common.h
@@ -47,3 +47,25 @@
#define INC_AFL_AREA(loc) afl_area_ptr[loc]++
#endif
+/* Declared in afl-qemu-cpu-inl.h */
+
+extern unsigned char *afl_area_ptr;
+extern unsigned int afl_inst_rms;
+extern abi_ulong afl_start_code, afl_end_code;
+extern abi_ulong afl_persistent_addr;
+extern abi_ulong afl_persistent_ret_addr;
+extern u8 afl_compcov_level;
+extern unsigned char afl_fork_child;
+extern unsigned char is_persistent;
+extern target_long persistent_stack_offset;
+
+extern __thread abi_ulong afl_prev_loc;
+
+void afl_persistent_loop();
+
+void tcg_gen_afl_call0(void *func);
+void tcg_gen_afl_compcov_log_call(void *func, target_ulong cur_loc,
+ TCGv_i64 arg1, TCGv_i64 arg2);
+
+void tcg_gen_afl_maybe_log_call(target_ulong cur_loc);
+
diff --git a/qemu_mode/patches/afl-qemu-cpu-inl.h b/qemu_mode/patches/afl-qemu-cpu-inl.h
index 2a1331cb..2e685d8d 100644
--- a/qemu_mode/patches/afl-qemu-cpu-inl.h
+++ b/qemu_mode/patches/afl-qemu-cpu-inl.h
@@ -34,6 +34,8 @@
#include <sys/shm.h>
#include "../../config.h"
+#define PERSISTENT_DEFAULT_MAX_CNT 1000
+
/***************************
* VARIOUS AUXILIARY STUFF *
***************************/
@@ -71,13 +73,20 @@ abi_ulong afl_entry_point, /* ELF entry point (_start) */
afl_start_code, /* .text start pointer */
afl_end_code; /* .text end pointer */
+abi_ulong afl_persistent_addr, afl_persistent_ret_addr;
+unsigned int afl_persistent_cnt;
+
u8 afl_compcov_level;
+__thread abi_ulong afl_prev_loc;
+
/* Set in the child process in forkserver mode: */
-static int forkserver_installed = 0;
-static unsigned char afl_fork_child;
-unsigned int afl_forksrv_pid;
+static int forkserver_installed = 0;
+unsigned char afl_fork_child;
+unsigned int afl_forksrv_pid;
+unsigned char is_persistent;
+target_long persistent_stack_offset;
/* Instrumentation ratio: */
@@ -187,6 +196,23 @@ static void afl_setup(void) {
rcu_disable_atfork();
+ is_persistent = getenv("AFL_QEMU_PERSISTENT_ADDR") != NULL;
+
+ if (is_persistent) {
+
+ afl_persistent_addr = strtoll(getenv("AFL_QEMU_PERSISTENT_ADDR"), NULL, 16);
+ if (getenv("AFL_QEMU_PERSISTENT_RET"))
+ afl_persistent_ret_addr =
+ strtoll(getenv("AFL_QEMU_PERSISTENT_RET"), NULL, 16);
+ /* If AFL_QEMU_PERSISTENT_RET is not specified patch the return addr */
+
+ }
+
+ if (getenv("AFL_QEMU_PERSISTENT_CNT"))
+ afl_persistent_cnt = strtoll(getenv("AFL_QEMU_PERSISTENT_CNT"), NULL, 16);
+ else
+ afl_persistent_cnt = PERSISTENT_DEFAULT_MAX_CNT;
+
}
/* Fork server logic, invoked once we hit _start. */
@@ -197,8 +223,13 @@ static void afl_forkserver(CPUState *cpu) {
if (forkserver_installed == 1) return;
forkserver_installed = 1;
+
// if (!afl_area_ptr) return; // not necessary because of fixed dummy buffer
+ pid_t child_pid;
+ int t_fd[2];
+ u8 child_stopped = 0;
+
/* Tell the parent that we're alive. If the parent doesn't want
to talk, assume that we're not running in forkserver mode. */
@@ -210,38 +241,63 @@ static void afl_forkserver(CPUState *cpu) {
while (1) {
- pid_t child_pid;
- int status, t_fd[2];
+ int status;
+ u32 was_killed;
/* Whoops, parent dead? */
- if (read(FORKSRV_FD, tmp, 4) != 4) exit(2);
+ if (read(FORKSRV_FD, &was_killed, 4) != 4) exit(2);
+
+ /* If we stopped the child in persistent mode, but there was a race
+ condition and afl-fuzz already issued SIGKILL, write off the old
+ process. */
+
+ if (child_stopped && was_killed) {
+
+ child_stopped = 0;
+ if (waitpid(child_pid, &status, 0) < 0) exit(8);
+
+ }
+
+ if (!child_stopped) {
- /* Establish a channel with child to grab translation commands. We'll
+ /* Establish a channel with child to grab translation commands. We'll
read from t_fd[0], child will write to TSL_FD. */
- if (pipe(t_fd) || dup2(t_fd[1], TSL_FD) < 0) exit(3);
- close(t_fd[1]);
+ if (pipe(t_fd) || dup2(t_fd[1], TSL_FD) < 0) exit(3);
+ close(t_fd[1]);
- child_pid = fork();
- if (child_pid < 0) exit(4);
+ child_pid = fork();
+ if (child_pid < 0) exit(4);
- if (!child_pid) {
+ if (!child_pid) {
- /* Child process. Close descriptors and run free. */
+ /* Child process. Close descriptors and run free. */
- afl_fork_child = 1;
- close(FORKSRV_FD);
- close(FORKSRV_FD + 1);
- close(t_fd[0]);
- return;
+ afl_fork_child = 1;
+ close(FORKSRV_FD);
+ close(FORKSRV_FD + 1);
+ close(t_fd[0]);
+ return;
+
+ }
+
+ /* Parent. */
+
+ close(TSL_FD);
+
+ } else {
+
+ /* Special handling for persistent mode: if the child is alive but
+ currently stopped, simply restart it with SIGCONT. */
+
+ kill(child_pid, SIGCONT);
+ child_stopped = 0;
}
/* Parent. */
- close(TSL_FD);
-
if (write(FORKSRV_FD + 1, &child_pid, 4) != 4) exit(5);
/* Collect translation requests until child dies and closes the pipe. */
@@ -250,13 +306,80 @@ static void afl_forkserver(CPUState *cpu) {
/* Get and relay exit status to parent. */
- if (waitpid(child_pid, &status, 0) < 0) exit(6);
+ if (waitpid(child_pid, &status, is_persistent ? WUNTRACED : 0) < 0) exit(6);
+
+ /* In persistent mode, the child stops itself with SIGSTOP to indicate
+ a successful run. In this case, we want to wake it up without forking
+ again. */
+
+ if (WIFSTOPPED(status)) child_stopped = 1;
+
if (write(FORKSRV_FD + 1, &status, 4) != 4) exit(7);
}
}
+/* A simplified persistent mode handler, used as explained in README.llvm. */
+
+void afl_persistent_loop() {
+
+ static u8 first_pass = 1;
+ static u32 cycle_cnt;
+ static struct afl_tsl exit_cmd_tsl = {{-1, 0, 0, 0}, NULL};
+
+ if (!afl_fork_child) return;
+
+ if (first_pass) {
+
+ /* Make sure that every iteration of __AFL_LOOP() starts with a clean slate.
+ On subsequent calls, the parent will take care of that, but on the first
+ iteration, it's our job to erase any trace of whatever happened
+ before the loop. */
+
+ if (is_persistent) {
+
+ memset(afl_area_ptr, 0, MAP_SIZE);
+ afl_area_ptr[0] = 1;
+ afl_prev_loc = 0;
+
+ }
+
+ cycle_cnt = afl_persistent_cnt;
+ first_pass = 0;
+ persistent_stack_offset = TARGET_LONG_BITS / 8;
+
+ return;
+
+ }
+
+ if (is_persistent) {
+
+ if (--cycle_cnt) {
+
+ if (write(TSL_FD, &exit_cmd_tsl, sizeof(struct afl_tsl)) !=
+ sizeof(struct afl_tsl)) {
+
+ /* Exit the persistent loop on pipe error */
+ exit(0);
+
+ }
+
+ raise(SIGSTOP);
+
+ afl_area_ptr[0] = 1;
+ afl_prev_loc = 0;
+
+ } else {
+
+ exit(0);
+
+ }
+
+ }
+
+}
+
/* This code is invoked whenever QEMU decides that it doesn't have a
translation of a particular block and needs to compute it, or when it
decides to chain two TBs together. When this happens, we tell the parent to
@@ -330,6 +453,10 @@ static void afl_wait_tsl(CPUState *cpu, int fd) {
if (read(fd, &t, sizeof(struct afl_tsl)) != sizeof(struct afl_tsl)) break;
+ /* Exit command for persistent */
+
+ if (t.tb.pc == (target_ulong)(-1)) return;
+
tb = tb_htable_lookup(cpu, t.tb.pc, t.tb.cs_base, t.tb.flags, t.tb.cf_mask);
if (!tb) {
diff --git a/qemu_mode/patches/afl-qemu-cpu-translate-inl.h b/qemu_mode/patches/afl-qemu-cpu-translate-inl.h
index 3d3c1b6b..cd5c21aa 100644
--- a/qemu_mode/patches/afl-qemu-cpu-translate-inl.h
+++ b/qemu_mode/patches/afl-qemu-cpu-translate-inl.h
@@ -35,15 +35,6 @@
#include "tcg.h"
#include "tcg-op.h"
-/* Declared in afl-qemu-cpu-inl.h */
-extern unsigned char *afl_area_ptr;
-extern unsigned int afl_inst_rms;
-extern abi_ulong afl_start_code, afl_end_code;
-extern u8 afl_compcov_level;
-
-void tcg_gen_afl_compcov_log_call(void *func, target_ulong cur_loc,
- TCGv_i64 arg1, TCGv_i64 arg2);
-
static void afl_compcov_log_16(target_ulong cur_loc, target_ulong arg1,
target_ulong arg2) {
@@ -137,3 +128,30 @@ static void afl_gen_compcov(target_ulong cur_loc, TCGv_i64 arg1, TCGv_i64 arg2,
}
+#define AFL_QEMU_TARGET_i386_SNIPPET \
+ if (is_persistent) { \
+ \
+ if (s->pc == afl_persistent_addr) { \
+ \
+ if (afl_persistent_ret_addr == 0) { \
+ \
+ TCGv_ptr stack_off_ptr = tcg_const_ptr(&persistent_stack_offset); \
+ TCGv stack_off = tcg_temp_new(); \
+ tcg_gen_ld_tl(stack_off, stack_off_ptr, 0); \
+ tcg_gen_sub_tl(cpu_regs[R_ESP], cpu_regs[R_ESP], stack_off); \
+ tcg_temp_free(stack_off); \
+ \
+ } \
+ TCGv_ptr paddr = tcg_const_ptr(afl_persistent_addr); \
+ tcg_gen_st_tl(paddr, cpu_regs[R_ESP], 0); \
+ tcg_gen_afl_call0(&afl_persistent_loop); \
+ \
+ } else if (afl_persistent_ret_addr && s->pc == afl_persistent_ret_addr) { \
+ \
+ gen_jmp_im(s, afl_persistent_addr); \
+ gen_eob(s); \
+ \
+ } \
+ \
+ }
+
diff --git a/qemu_mode/patches/afl-qemu-tcg-inl.h b/qemu_mode/patches/afl-qemu-tcg-inl.h
index d45ffac9..2a0ddee1 100644
--- a/qemu_mode/patches/afl-qemu-tcg-inl.h
+++ b/qemu_mode/patches/afl-qemu-tcg-inl.h
@@ -191,6 +191,186 @@ void tcg_gen_afl_maybe_log_call(target_ulong cur_loc) {
}
+/* Note: we convert the 64 bit args to 32 bit and do some alignment
+ and endian swap. Maybe it would be better to do the alignment
+ and endian swap in tcg_reg_alloc_call(). */
+void tcg_gen_afl_call0(void *func) {
+
+ int i, real_args, nb_rets, pi;
+ unsigned sizemask, flags;
+ TCGOp * op;
+
+ const int nargs = 0;
+ TCGTemp **args;
+
+ flags = 0;
+ sizemask = dh_sizemask(void, 0);
+
+#if defined(__sparc__) && !defined(__arch64__) && \
+ !defined(CONFIG_TCG_INTERPRETER)
+ /* We have 64-bit values in one register, but need to pass as two
+ separate parameters. Split them. */
+ int orig_sizemask = sizemask;
+ int orig_nargs = nargs;
+ TCGv_i64 retl, reth;
+ TCGTemp *split_args[MAX_OPC_PARAM];
+
+ retl = NULL;
+ reth = NULL;
+ if (sizemask != 0) {
+
+ for (i = real_args = 0; i < nargs; ++i) {
+
+ int is_64bit = sizemask & (1 << (i + 1) * 2);
+ if (is_64bit) {
+
+ TCGv_i64 orig = temp_tcgv_i64(args[i]);
+ TCGv_i32 h = tcg_temp_new_i32();
+ TCGv_i32 l = tcg_temp_new_i32();
+ tcg_gen_extr_i64_i32(l, h, orig);
+ split_args[real_args++] = tcgv_i32_temp(h);
+ split_args[real_args++] = tcgv_i32_temp(l);
+
+ } else {
+
+ split_args[real_args++] = args[i];
+
+ }
+
+ }
+
+ nargs = real_args;
+ args = split_args;
+ sizemask = 0;
+
+ }
+
+#elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
+ for (i = 0; i < nargs; ++i) {
+
+ int is_64bit = sizemask & (1 << (i + 1) * 2);
+ int is_signed = sizemask & (2 << (i + 1) * 2);
+ if (!is_64bit) {
+
+ TCGv_i64 temp = tcg_temp_new_i64();
+ TCGv_i64 orig = temp_tcgv_i64(args[i]);
+ if (is_signed) {
+
+ tcg_gen_ext32s_i64(temp, orig);
+
+ } else {
+
+ tcg_gen_ext32u_i64(temp, orig);
+
+ }
+
+ args[i] = tcgv_i64_temp(temp);
+
+ }
+
+ }
+
+#endif /* TCG_TARGET_EXTEND_ARGS */
+
+ op = tcg_emit_op(INDEX_op_call);
+
+ pi = 0;
+ nb_rets = 0;
+ TCGOP_CALLO(op) = nb_rets;
+
+ real_args = 0;
+ for (i = 0; i < nargs; i++) {
+
+ int is_64bit = sizemask & (1 << (i + 1) * 2);
+ if (TCG_TARGET_REG_BITS < 64 && is_64bit) {
+
+#ifdef TCG_TARGET_CALL_ALIGN_ARGS
+ /* some targets want aligned 64 bit args */
+ if (real_args & 1) {
+
+ op->args[pi++] = TCG_CALL_DUMMY_ARG;
+ real_args++;
+
+ }
+
+#endif
+ /* If stack grows up, then we will be placing successive
+ arguments at lower addresses, which means we need to
+ reverse the order compared to how we would normally
+ treat either big or little-endian. For those arguments
+ that will wind up in registers, this still works for
+ HPPA (the only current STACK_GROWSUP target) since the
+ argument registers are *also* allocated in decreasing
+ order. If another such target is added, this logic may
+ have to get more complicated to differentiate between
+ stack arguments and register arguments. */
+#if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP)
+ op->args[pi++] = temp_arg(args[i] + 1);
+ op->args[pi++] = temp_arg(args[i]);
+#else
+ op->args[pi++] = temp_arg(args[i]);
+ op->args[pi++] = temp_arg(args[i] + 1);
+#endif
+ real_args += 2;
+ continue;
+
+ }
+
+ op->args[pi++] = temp_arg(args[i]);
+ real_args++;
+
+ }
+
+ op->args[pi++] = (uintptr_t)func;
+ op->args[pi++] = flags;
+ TCGOP_CALLI(op) = real_args;
+
+ /* Make sure the fields didn't overflow. */
+ tcg_debug_assert(TCGOP_CALLI(op) == real_args);
+ tcg_debug_assert(pi <= ARRAY_SIZE(op->args));
+
+#if defined(__sparc__) && !defined(__arch64__) && \
+ !defined(CONFIG_TCG_INTERPRETER)
+ /* Free all of the parts we allocated above. */
+ for (i = real_args = 0; i < orig_nargs; ++i) {
+
+ int is_64bit = orig_sizemask & (1 << (i + 1) * 2);
+ if (is_64bit) {
+
+ tcg_temp_free_internal(args[real_args++]);
+ tcg_temp_free_internal(args[real_args++]);
+
+ } else {
+
+ real_args++;
+
+ }
+
+ }
+
+ if (orig_sizemask & 1) {
+
+ /* The 32-bit ABI returned two 32-bit pieces. Re-assemble them.
+ Note that describing these as TCGv_i64 eliminates an unnecessary
+ zero-extension that tcg_gen_concat_i32_i64 would create. */
+ tcg_gen_concat32_i64(temp_tcgv_i64(NULL), retl, reth);
+ tcg_temp_free_i64(retl);
+ tcg_temp_free_i64(reth);
+
+ }
+
+#elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
+ for (i = 0; i < nargs; ++i) {
+
+ int is_64bit = sizemask & (1 << (i + 1) * 2);
+ if (!is_64bit) { tcg_temp_free_internal(args[i]); }
+
+ }
+
+#endif /* TCG_TARGET_EXTEND_ARGS */
+
+}
+
void tcg_gen_afl_compcov_log_call(void *func, target_ulong cur_loc,
TCGv_i64 arg1, TCGv_i64 arg2) {
diff --git a/qemu_mode/patches/afl-qemu-translate-inl.h b/qemu_mode/patches/afl-qemu-translate-inl.h
index 9abaa961..530afeaa 100644
--- a/qemu_mode/patches/afl-qemu-translate-inl.h
+++ b/qemu_mode/patches/afl-qemu-translate-inl.h
@@ -34,22 +34,13 @@
#include "afl-qemu-common.h"
#include "tcg-op.h"
-/* Declared in afl-qemu-cpu-inl.h */
-extern unsigned char *afl_area_ptr;
-extern unsigned int afl_inst_rms;
-extern abi_ulong afl_start_code, afl_end_code;
-
-void tcg_gen_afl_maybe_log_call(target_ulong cur_loc);
-
void afl_maybe_log(target_ulong cur_loc) {
- static __thread abi_ulong prev_loc;
-
- register uintptr_t afl_idx = cur_loc ^ prev_loc;
+ register uintptr_t afl_idx = cur_loc ^ afl_prev_loc;
INC_AFL_AREA(afl_idx);
- prev_loc = cur_loc >> 1;
+ afl_prev_loc = cur_loc >> 1;
}
diff --git a/qemu_mode/patches/i386-translate.diff b/qemu_mode/patches/i386-translate.diff
index 239b2404..00337e2c 100644
--- a/qemu_mode/patches/i386-translate.diff
+++ b/qemu_mode/patches/i386-translate.diff
@@ -1,5 +1,5 @@
diff --git a/target/i386/translate.c b/target/i386/translate.c
-index 0dd5fbe4..b95d341e 100644
+index 0dd5fbe4..a23da128 100644
--- a/target/i386/translate.c
+++ b/target/i386/translate.c
@@ -32,6 +32,8 @@
@@ -31,3 +31,12 @@ index 0dd5fbe4..b95d341e 100644
set_cc_op(s1, CC_OP_SUBB + ot);
break;
}
+@@ -4508,6 +4513,8 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
+ rex_w = -1;
+ rex_r = 0;
+
++ AFL_QEMU_TARGET_i386_SNIPPET
++
+ next_byte:
+ b = x86_ldub_code(env, s);
+ /* Collect prefixes. */
diff --git a/src/afl-analyze.c b/src/afl-analyze.c
index e30f53b8..4b157973 100644
--- a/src/afl-analyze.c
+++ b/src/afl-analyze.c
@@ -988,7 +988,7 @@ int main(int argc, char** argv) {
unicorn_mode = 1;
break;
-
+
case 'h':
usage(argv[0]);
return -1;
diff --git a/src/afl-forkserver.c b/src/afl-forkserver.c
index 6c27d0f9..d9f67da5 100644
--- a/src/afl-forkserver.c
+++ b/src/afl-forkserver.c
@@ -44,7 +44,7 @@
extern u8 uses_asan;
extern u8 *trace_bits;
extern s32 forksrv_pid, child_pid, fsrv_ctl_fd, fsrv_st_fd;
-extern s32 out_fd, out_dir_fd, dev_null_fd; /* initialize these with -1 */
+extern s32 out_fd, out_dir_fd, dev_null_fd; /* initialize these with -1 */
#ifndef HAVE_ARC4RANDOM
extern s32 dev_urandom_fd;
#endif
diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c
index 2a437ddd..3928a88f 100644
--- a/src/afl-fuzz-one.c
+++ b/src/afl-fuzz-one.c
@@ -2288,6 +2288,7 @@ abandon_entry:
}
struct MOpt_globals_t {
+
u64 *finds;
u64 *finds_v2;
u64 *cycles;
@@ -2300,6 +2301,7 @@ struct MOpt_globals_t {
char *splice_stageformat;
char *havoc_stagenameshort;
char *splice_stagenameshort;
+
} MOpt_globals_pilot = {
stage_finds_puppet[0],
stage_finds_puppet_v2[0],
diff --git a/src/afl-fuzz-run.c b/src/afl-fuzz-run.c
index 37a04e44..f2f663dc 100644
--- a/src/afl-fuzz-run.c
+++ b/src/afl-fuzz-run.c
@@ -253,7 +253,8 @@ void write_to_testcase(void* mem, u32 len) {
if (out_file) {
- //unlink(out_file); /* Ignore errors. */
+ // unlink(out_file); /* Ignore errors.
+ // */
fd = open(out_file, O_WRONLY | O_CREAT | O_TRUNC, 0600);
@@ -295,7 +296,8 @@ void write_with_gap(void* mem, u32 len, u32 skip_at, u32 skip_len) {
if (out_file) {
- //unlink(out_file); /* Ignore errors. */
+ // unlink(out_file); /* Ignore errors.
+ // */
fd = open(out_file, O_WRONLY | O_CREAT | O_TRUNC, 0600);
diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c
index eb0060a4..2d16345a 100644
--- a/src/afl-fuzz.c
+++ b/src/afl-fuzz.c
@@ -493,7 +493,7 @@ int main(int argc, char** argv) {
case 'h':
usage(argv[0]);
return -1;
- break; // not needed
+ break; // not needed
default: usage(argv[0]);
diff --git a/src/afl-gcc.c b/src/afl-gcc.c
index 2f72ef34..8982ca97 100644
--- a/src/afl-gcc.c
+++ b/src/afl-gcc.c
@@ -334,11 +334,15 @@ static void edit_params(u32 argc, char** argv) {
int main(int argc, char** argv) {
if (argc == 2 && strcmp(argv[1], "-h") == 0) {
- printf("afl-cc" VERSION" by <lcamtuf@google.com>\n\n");
+
+ printf("afl-cc" VERSION " by <lcamtuf@google.com>\n\n");
printf("%s \n\n", argv[0]);
printf("afl-gcc has no command line options\n");
- printf("NOTE: afl-gcc is deprecated, llvm_mode is much faster and has more options\n");
+ printf(
+ "NOTE: afl-gcc is deprecated, llvm_mode is much faster and has more "
+ "options\n");
return -1;
+
}
if (isatty(2) && !getenv("AFL_QUIET")) {
diff --git a/src/afl-gotcpu.c b/src/afl-gotcpu.c
index 85864c6f..de41177c 100644
--- a/src/afl-gotcpu.c
+++ b/src/afl-gotcpu.c
@@ -128,11 +128,13 @@ repeat_loop:
int main(int argc, char** argv) {
if (argc > 1) {
+
printf("afl-gotcpu" VERSION " by <lcamtuf@google.com>\n");
printf("\n%s \n\n", argv[0]);
printf("afl-gotcpu does not have command line options\n");
printf("afl-gotcpu prints out which CPUs are available\n");
return -1;
+
}
#ifdef HAVE_AFFINITY
diff --git a/src/afl-showmap.c b/src/afl-showmap.c
index 6aa72746..bf9306d5 100644
--- a/src/afl-showmap.c
+++ b/src/afl-showmap.c
@@ -691,7 +691,7 @@ int main(int argc, char** argv) {
if (edges_only) FATAL("-e and -r are mutually exclusive");
raw_instr_output = 1;
break;
-
+
case 'h':
usage(argv[0]);
return -1;
diff --git a/src/afl-tmin.c b/src/afl-tmin.c
index baf22557..8308d98d 100644
--- a/src/afl-tmin.c
+++ b/src/afl-tmin.c
@@ -1211,7 +1211,7 @@ int main(int argc, char** argv) {
mask_bitmap = ck_alloc(MAP_SIZE);
read_bitmap(optarg);
break;
-
+
case 'h':
usage(argv[0]);
return -1;