diff options
Diffstat (limited to 'qemu_mode/patches')
-rw-r--r-- | qemu_mode/patches/afl-qemu-common.h | 22 | ||||
-rw-r--r-- | qemu_mode/patches/afl-qemu-cpu-inl.h | 169 | ||||
-rw-r--r-- | qemu_mode/patches/afl-qemu-cpu-translate-inl.h | 36 | ||||
-rw-r--r-- | qemu_mode/patches/afl-qemu-tcg-inl.h | 180 | ||||
-rw-r--r-- | qemu_mode/patches/afl-qemu-translate-inl.h | 13 | ||||
-rw-r--r-- | qemu_mode/patches/i386-translate.diff | 11 |
6 files changed, 389 insertions, 42 deletions
diff --git a/qemu_mode/patches/afl-qemu-common.h b/qemu_mode/patches/afl-qemu-common.h index 88c110b4..053585a7 100644 --- a/qemu_mode/patches/afl-qemu-common.h +++ b/qemu_mode/patches/afl-qemu-common.h @@ -47,3 +47,25 @@ #define INC_AFL_AREA(loc) afl_area_ptr[loc]++ #endif +/* Declared in afl-qemu-cpu-inl.h */ + +extern unsigned char *afl_area_ptr; +extern unsigned int afl_inst_rms; +extern abi_ulong afl_start_code, afl_end_code; +extern abi_ulong afl_persistent_addr; +extern abi_ulong afl_persistent_ret_addr; +extern u8 afl_compcov_level; +extern unsigned char afl_fork_child; +extern unsigned char is_persistent; +extern target_long persistent_stack_offset; + +extern __thread abi_ulong afl_prev_loc; + +void afl_persistent_loop(); + +void tcg_gen_afl_call0(void *func); +void tcg_gen_afl_compcov_log_call(void *func, target_ulong cur_loc, + TCGv_i64 arg1, TCGv_i64 arg2); + +void tcg_gen_afl_maybe_log_call(target_ulong cur_loc); + diff --git a/qemu_mode/patches/afl-qemu-cpu-inl.h b/qemu_mode/patches/afl-qemu-cpu-inl.h index 2a1331cb..2e685d8d 100644 --- a/qemu_mode/patches/afl-qemu-cpu-inl.h +++ b/qemu_mode/patches/afl-qemu-cpu-inl.h @@ -34,6 +34,8 @@ #include <sys/shm.h> #include "../../config.h" +#define PERSISTENT_DEFAULT_MAX_CNT 1000 + /*************************** * VARIOUS AUXILIARY STUFF * ***************************/ @@ -71,13 +73,20 @@ abi_ulong afl_entry_point, /* ELF entry point (_start) */ afl_start_code, /* .text start pointer */ afl_end_code; /* .text end pointer */ +abi_ulong afl_persistent_addr, afl_persistent_ret_addr; +unsigned int afl_persistent_cnt; + u8 afl_compcov_level; +__thread abi_ulong afl_prev_loc; + /* Set in the child process in forkserver mode: */ -static int forkserver_installed = 0; -static unsigned char afl_fork_child; -unsigned int afl_forksrv_pid; +static int forkserver_installed = 0; +unsigned char afl_fork_child; +unsigned int afl_forksrv_pid; +unsigned char is_persistent; +target_long persistent_stack_offset; /* Instrumentation ratio: */ @@ -187,6 +196,23 @@ static void afl_setup(void) { rcu_disable_atfork(); + is_persistent = getenv("AFL_QEMU_PERSISTENT_ADDR") != NULL; + + if (is_persistent) { + + afl_persistent_addr = strtoll(getenv("AFL_QEMU_PERSISTENT_ADDR"), NULL, 16); + if (getenv("AFL_QEMU_PERSISTENT_RET")) + afl_persistent_ret_addr = + strtoll(getenv("AFL_QEMU_PERSISTENT_RET"), NULL, 16); + /* If AFL_QEMU_PERSISTENT_RET is not specified patch the return addr */ + + } + + if (getenv("AFL_QEMU_PERSISTENT_CNT")) + afl_persistent_cnt = strtoll(getenv("AFL_QEMU_PERSISTENT_CNT"), NULL, 16); + else + afl_persistent_cnt = PERSISTENT_DEFAULT_MAX_CNT; + } /* Fork server logic, invoked once we hit _start. */ @@ -197,8 +223,13 @@ static void afl_forkserver(CPUState *cpu) { if (forkserver_installed == 1) return; forkserver_installed = 1; + // if (!afl_area_ptr) return; // not necessary because of fixed dummy buffer + pid_t child_pid; + int t_fd[2]; + u8 child_stopped = 0; + /* Tell the parent that we're alive. If the parent doesn't want to talk, assume that we're not running in forkserver mode. */ @@ -210,38 +241,63 @@ static void afl_forkserver(CPUState *cpu) { while (1) { - pid_t child_pid; - int status, t_fd[2]; + int status; + u32 was_killed; /* Whoops, parent dead? */ - if (read(FORKSRV_FD, tmp, 4) != 4) exit(2); + if (read(FORKSRV_FD, &was_killed, 4) != 4) exit(2); + + /* If we stopped the child in persistent mode, but there was a race + condition and afl-fuzz already issued SIGKILL, write off the old + process. */ + + if (child_stopped && was_killed) { + + child_stopped = 0; + if (waitpid(child_pid, &status, 0) < 0) exit(8); + + } + + if (!child_stopped) { - /* Establish a channel with child to grab translation commands. We'll + /* Establish a channel with child to grab translation commands. We'll read from t_fd[0], child will write to TSL_FD. */ - if (pipe(t_fd) || dup2(t_fd[1], TSL_FD) < 0) exit(3); - close(t_fd[1]); + if (pipe(t_fd) || dup2(t_fd[1], TSL_FD) < 0) exit(3); + close(t_fd[1]); - child_pid = fork(); - if (child_pid < 0) exit(4); + child_pid = fork(); + if (child_pid < 0) exit(4); - if (!child_pid) { + if (!child_pid) { - /* Child process. Close descriptors and run free. */ + /* Child process. Close descriptors and run free. */ - afl_fork_child = 1; - close(FORKSRV_FD); - close(FORKSRV_FD + 1); - close(t_fd[0]); - return; + afl_fork_child = 1; + close(FORKSRV_FD); + close(FORKSRV_FD + 1); + close(t_fd[0]); + return; + + } + + /* Parent. */ + + close(TSL_FD); + + } else { + + /* Special handling for persistent mode: if the child is alive but + currently stopped, simply restart it with SIGCONT. */ + + kill(child_pid, SIGCONT); + child_stopped = 0; } /* Parent. */ - close(TSL_FD); - if (write(FORKSRV_FD + 1, &child_pid, 4) != 4) exit(5); /* Collect translation requests until child dies and closes the pipe. */ @@ -250,13 +306,80 @@ static void afl_forkserver(CPUState *cpu) { /* Get and relay exit status to parent. */ - if (waitpid(child_pid, &status, 0) < 0) exit(6); + if (waitpid(child_pid, &status, is_persistent ? WUNTRACED : 0) < 0) exit(6); + + /* In persistent mode, the child stops itself with SIGSTOP to indicate + a successful run. In this case, we want to wake it up without forking + again. */ + + if (WIFSTOPPED(status)) child_stopped = 1; + if (write(FORKSRV_FD + 1, &status, 4) != 4) exit(7); } } +/* A simplified persistent mode handler, used as explained in README.llvm. */ + +void afl_persistent_loop() { + + static u8 first_pass = 1; + static u32 cycle_cnt; + static struct afl_tsl exit_cmd_tsl = {{-1, 0, 0, 0}, NULL}; + + if (!afl_fork_child) return; + + if (first_pass) { + + /* Make sure that every iteration of __AFL_LOOP() starts with a clean slate. + On subsequent calls, the parent will take care of that, but on the first + iteration, it's our job to erase any trace of whatever happened + before the loop. */ + + if (is_persistent) { + + memset(afl_area_ptr, 0, MAP_SIZE); + afl_area_ptr[0] = 1; + afl_prev_loc = 0; + + } + + cycle_cnt = afl_persistent_cnt; + first_pass = 0; + persistent_stack_offset = TARGET_LONG_BITS / 8; + + return; + + } + + if (is_persistent) { + + if (--cycle_cnt) { + + if (write(TSL_FD, &exit_cmd_tsl, sizeof(struct afl_tsl)) != + sizeof(struct afl_tsl)) { + + /* Exit the persistent loop on pipe error */ + exit(0); + + } + + raise(SIGSTOP); + + afl_area_ptr[0] = 1; + afl_prev_loc = 0; + + } else { + + exit(0); + + } + + } + +} + /* This code is invoked whenever QEMU decides that it doesn't have a translation of a particular block and needs to compute it, or when it decides to chain two TBs together. When this happens, we tell the parent to @@ -330,6 +453,10 @@ static void afl_wait_tsl(CPUState *cpu, int fd) { if (read(fd, &t, sizeof(struct afl_tsl)) != sizeof(struct afl_tsl)) break; + /* Exit command for persistent */ + + if (t.tb.pc == (target_ulong)(-1)) return; + tb = tb_htable_lookup(cpu, t.tb.pc, t.tb.cs_base, t.tb.flags, t.tb.cf_mask); if (!tb) { diff --git a/qemu_mode/patches/afl-qemu-cpu-translate-inl.h b/qemu_mode/patches/afl-qemu-cpu-translate-inl.h index 3d3c1b6b..cd5c21aa 100644 --- a/qemu_mode/patches/afl-qemu-cpu-translate-inl.h +++ b/qemu_mode/patches/afl-qemu-cpu-translate-inl.h @@ -35,15 +35,6 @@ #include "tcg.h" #include "tcg-op.h" -/* Declared in afl-qemu-cpu-inl.h */ -extern unsigned char *afl_area_ptr; -extern unsigned int afl_inst_rms; -extern abi_ulong afl_start_code, afl_end_code; -extern u8 afl_compcov_level; - -void tcg_gen_afl_compcov_log_call(void *func, target_ulong cur_loc, - TCGv_i64 arg1, TCGv_i64 arg2); - static void afl_compcov_log_16(target_ulong cur_loc, target_ulong arg1, target_ulong arg2) { @@ -137,3 +128,30 @@ static void afl_gen_compcov(target_ulong cur_loc, TCGv_i64 arg1, TCGv_i64 arg2, } +#define AFL_QEMU_TARGET_i386_SNIPPET \ + if (is_persistent) { \ + \ + if (s->pc == afl_persistent_addr) { \ + \ + if (afl_persistent_ret_addr == 0) { \ + \ + TCGv_ptr stack_off_ptr = tcg_const_ptr(&persistent_stack_offset); \ + TCGv stack_off = tcg_temp_new(); \ + tcg_gen_ld_tl(stack_off, stack_off_ptr, 0); \ + tcg_gen_sub_tl(cpu_regs[R_ESP], cpu_regs[R_ESP], stack_off); \ + tcg_temp_free(stack_off); \ + \ + } \ + TCGv_ptr paddr = tcg_const_ptr(afl_persistent_addr); \ + tcg_gen_st_tl(paddr, cpu_regs[R_ESP], 0); \ + tcg_gen_afl_call0(&afl_persistent_loop); \ + \ + } else if (afl_persistent_ret_addr && s->pc == afl_persistent_ret_addr) { \ + \ + gen_jmp_im(s, afl_persistent_addr); \ + gen_eob(s); \ + \ + } \ + \ + } + diff --git a/qemu_mode/patches/afl-qemu-tcg-inl.h b/qemu_mode/patches/afl-qemu-tcg-inl.h index d45ffac9..2a0ddee1 100644 --- a/qemu_mode/patches/afl-qemu-tcg-inl.h +++ b/qemu_mode/patches/afl-qemu-tcg-inl.h @@ -191,6 +191,186 @@ void tcg_gen_afl_maybe_log_call(target_ulong cur_loc) { } +/* Note: we convert the 64 bit args to 32 bit and do some alignment + and endian swap. Maybe it would be better to do the alignment + and endian swap in tcg_reg_alloc_call(). */ +void tcg_gen_afl_call0(void *func) { + + int i, real_args, nb_rets, pi; + unsigned sizemask, flags; + TCGOp * op; + + const int nargs = 0; + TCGTemp **args; + + flags = 0; + sizemask = dh_sizemask(void, 0); + +#if defined(__sparc__) && !defined(__arch64__) && \ + !defined(CONFIG_TCG_INTERPRETER) + /* We have 64-bit values in one register, but need to pass as two + separate parameters. Split them. */ + int orig_sizemask = sizemask; + int orig_nargs = nargs; + TCGv_i64 retl, reth; + TCGTemp *split_args[MAX_OPC_PARAM]; + + retl = NULL; + reth = NULL; + if (sizemask != 0) { + + for (i = real_args = 0; i < nargs; ++i) { + + int is_64bit = sizemask & (1 << (i + 1) * 2); + if (is_64bit) { + + TCGv_i64 orig = temp_tcgv_i64(args[i]); + TCGv_i32 h = tcg_temp_new_i32(); + TCGv_i32 l = tcg_temp_new_i32(); + tcg_gen_extr_i64_i32(l, h, orig); + split_args[real_args++] = tcgv_i32_temp(h); + split_args[real_args++] = tcgv_i32_temp(l); + + } else { + + split_args[real_args++] = args[i]; + + } + + } + + nargs = real_args; + args = split_args; + sizemask = 0; + + } + +#elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64 + for (i = 0; i < nargs; ++i) { + + int is_64bit = sizemask & (1 << (i + 1) * 2); + int is_signed = sizemask & (2 << (i + 1) * 2); + if (!is_64bit) { + + TCGv_i64 temp = tcg_temp_new_i64(); + TCGv_i64 orig = temp_tcgv_i64(args[i]); + if (is_signed) { + + tcg_gen_ext32s_i64(temp, orig); + + } else { + + tcg_gen_ext32u_i64(temp, orig); + + } + + args[i] = tcgv_i64_temp(temp); + + } + + } + +#endif /* TCG_TARGET_EXTEND_ARGS */ + + op = tcg_emit_op(INDEX_op_call); + + pi = 0; + nb_rets = 0; + TCGOP_CALLO(op) = nb_rets; + + real_args = 0; + for (i = 0; i < nargs; i++) { + + int is_64bit = sizemask & (1 << (i + 1) * 2); + if (TCG_TARGET_REG_BITS < 64 && is_64bit) { + +#ifdef TCG_TARGET_CALL_ALIGN_ARGS + /* some targets want aligned 64 bit args */ + if (real_args & 1) { + + op->args[pi++] = TCG_CALL_DUMMY_ARG; + real_args++; + + } + +#endif + /* If stack grows up, then we will be placing successive + arguments at lower addresses, which means we need to + reverse the order compared to how we would normally + treat either big or little-endian. For those arguments + that will wind up in registers, this still works for + HPPA (the only current STACK_GROWSUP target) since the + argument registers are *also* allocated in decreasing + order. If another such target is added, this logic may + have to get more complicated to differentiate between + stack arguments and register arguments. */ +#if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP) + op->args[pi++] = temp_arg(args[i] + 1); + op->args[pi++] = temp_arg(args[i]); +#else + op->args[pi++] = temp_arg(args[i]); + op->args[pi++] = temp_arg(args[i] + 1); +#endif + real_args += 2; + continue; + + } + + op->args[pi++] = temp_arg(args[i]); + real_args++; + + } + + op->args[pi++] = (uintptr_t)func; + op->args[pi++] = flags; + TCGOP_CALLI(op) = real_args; + + /* Make sure the fields didn't overflow. */ + tcg_debug_assert(TCGOP_CALLI(op) == real_args); + tcg_debug_assert(pi <= ARRAY_SIZE(op->args)); + +#if defined(__sparc__) && !defined(__arch64__) && \ + !defined(CONFIG_TCG_INTERPRETER) + /* Free all of the parts we allocated above. */ + for (i = real_args = 0; i < orig_nargs; ++i) { + + int is_64bit = orig_sizemask & (1 << (i + 1) * 2); + if (is_64bit) { + + tcg_temp_free_internal(args[real_args++]); + tcg_temp_free_internal(args[real_args++]); + + } else { + + real_args++; + + } + + } + + if (orig_sizemask & 1) { + + /* The 32-bit ABI returned two 32-bit pieces. Re-assemble them. + Note that describing these as TCGv_i64 eliminates an unnecessary + zero-extension that tcg_gen_concat_i32_i64 would create. */ + tcg_gen_concat32_i64(temp_tcgv_i64(NULL), retl, reth); + tcg_temp_free_i64(retl); + tcg_temp_free_i64(reth); + + } + +#elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64 + for (i = 0; i < nargs; ++i) { + + int is_64bit = sizemask & (1 << (i + 1) * 2); + if (!is_64bit) { tcg_temp_free_internal(args[i]); } + + } + +#endif /* TCG_TARGET_EXTEND_ARGS */ + +} + void tcg_gen_afl_compcov_log_call(void *func, target_ulong cur_loc, TCGv_i64 arg1, TCGv_i64 arg2) { diff --git a/qemu_mode/patches/afl-qemu-translate-inl.h b/qemu_mode/patches/afl-qemu-translate-inl.h index 9abaa961..530afeaa 100644 --- a/qemu_mode/patches/afl-qemu-translate-inl.h +++ b/qemu_mode/patches/afl-qemu-translate-inl.h @@ -34,22 +34,13 @@ #include "afl-qemu-common.h" #include "tcg-op.h" -/* Declared in afl-qemu-cpu-inl.h */ -extern unsigned char *afl_area_ptr; -extern unsigned int afl_inst_rms; -extern abi_ulong afl_start_code, afl_end_code; - -void tcg_gen_afl_maybe_log_call(target_ulong cur_loc); - void afl_maybe_log(target_ulong cur_loc) { - static __thread abi_ulong prev_loc; - - register uintptr_t afl_idx = cur_loc ^ prev_loc; + register uintptr_t afl_idx = cur_loc ^ afl_prev_loc; INC_AFL_AREA(afl_idx); - prev_loc = cur_loc >> 1; + afl_prev_loc = cur_loc >> 1; } diff --git a/qemu_mode/patches/i386-translate.diff b/qemu_mode/patches/i386-translate.diff index 239b2404..00337e2c 100644 --- a/qemu_mode/patches/i386-translate.diff +++ b/qemu_mode/patches/i386-translate.diff @@ -1,5 +1,5 @@ diff --git a/target/i386/translate.c b/target/i386/translate.c -index 0dd5fbe4..b95d341e 100644 +index 0dd5fbe4..a23da128 100644 --- a/target/i386/translate.c +++ b/target/i386/translate.c @@ -32,6 +32,8 @@ @@ -31,3 +31,12 @@ index 0dd5fbe4..b95d341e 100644 set_cc_op(s1, CC_OP_SUBB + ot); break; } +@@ -4508,6 +4513,8 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) + rex_w = -1; + rex_r = 0; + ++ AFL_QEMU_TARGET_i386_SNIPPET ++ + next_byte: + b = x86_ldub_code(env, s); + /* Collect prefixes. */ |