aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndrea Fioraldi <andreafioraldi@gmail.com>2019-07-20 13:09:45 +0200
committerAndrea Fioraldi <andreafioraldi@gmail.com>2019-07-20 13:09:45 +0200
commit47525f0dd624df8a2b412dfb75558a06c01f1f36 (patch)
tree445fc3ab263c5fe30b1afbb8a4661b465bc3bf71
parent322b5a736b2c84957c985cfffcb6bfc9470c0045 (diff)
downloadafl++-47525f0dd624df8a2b412dfb75558a06c01f1f36.tar.gz
fix #24 checking for validity of the requested block address
-rw-r--r--qemu_mode/patches/afl-qemu-cpu-inl.h50
1 files changed, 43 insertions, 7 deletions
diff --git a/qemu_mode/patches/afl-qemu-cpu-inl.h b/qemu_mode/patches/afl-qemu-cpu-inl.h
index 32030408..851478a0 100644
--- a/qemu_mode/patches/afl-qemu-cpu-inl.h
+++ b/qemu_mode/patches/afl-qemu-cpu-inl.h
@@ -271,6 +271,25 @@ static void afl_request_tsl(target_ulong pc, target_ulong cb, uint32_t flags, ui
}
+
+/* Check if an address is valid in the current mapping */
+
+static inline int is_valid_addr(target_ulong addr) {
+
+ int l, flags;
+ target_ulong page;
+ void * p;
+
+ page = addr & TARGET_PAGE_MASK;
+ l = (page + TARGET_PAGE_SIZE) - addr;
+
+ flags = page_get_flags(page);
+ if (!(flags & PAGE_VALID) || !(flags & PAGE_READ))
+ return 0;
+
+ return 1;
+}
+
/* This is the other side of the same channel. Since timeouts are handled by
afl-fuzz simply killing the child, we can just wait until the pipe breaks. */
@@ -282,6 +301,8 @@ static void afl_wait_tsl(CPUState *cpu, int fd) {
while (1) {
+ u8 invalid_pc = 0;
+
/* Broken pipe means it's time to return to the fork server routine. */
if (read(fd, &t, sizeof(struct afl_tsl)) != sizeof(struct afl_tsl))
@@ -290,19 +311,34 @@ static void afl_wait_tsl(CPUState *cpu, int fd) {
tb = tb_htable_lookup(cpu, t.tb.pc, t.tb.cs_base, t.tb.flags, t.tb.cf_mask);
if(!tb) {
- mmap_lock();
- tb = tb_gen_code(cpu, t.tb.pc, t.tb.cs_base, t.tb.flags, 0);
- mmap_unlock();
+
+ /* The child may request to transate a block of memory that is not
+ mapped in the parent (e.g. jitted code or dlopened code).
+ This causes a SIGSEV in gen_intermediate_code() and associated
+ subroutines. We simply avoid caching of such blocks. */
+
+ if (is_valid_addr(t.tb.pc)) {
+
+ mmap_lock();
+ tb = tb_gen_code(cpu, t.tb.pc, t.tb.cs_base, t.tb.flags, 0);
+ mmap_unlock();
+ } else {
+
+ invalid_pc = 1;
+ }
}
if (t.is_chain) {
if (read(fd, &c, sizeof(struct afl_chain)) != sizeof(struct afl_chain))
break;
- last_tb = tb_htable_lookup(cpu, c.last_tb.pc, c.last_tb.cs_base,
- c.last_tb.flags, c.cf_mask);
- if (last_tb) {
- tb_add_jump(last_tb, c.tb_exit, tb);
+ if (!invalid_pc) {
+
+ last_tb = tb_htable_lookup(cpu, c.last_tb.pc, c.last_tb.cs_base,
+ c.last_tb.flags, c.cf_mask);
+ if (last_tb) {
+ tb_add_jump(last_tb, c.tb_exit, tb);
+ }
}
}