From 55e9297202d646cfe7da8d6c5eb6937952812569 Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Fri, 17 Jan 2020 16:39:05 +0100 Subject: first experiment cmplog --- src/afl-fuzz-cmplog.c | 950 ++++++++++++++++++++++++++++++++++++++++++++++++++ src/afl-fuzz-one.c | 7 + src/afl-fuzz-run.c | 2 + src/afl-fuzz-stats.c | 6 +- src/afl-fuzz.c | 12 +- src/afl-sharedmem.c | 28 ++ 6 files changed, 1002 insertions(+), 3 deletions(-) create mode 100644 src/afl-fuzz-cmplog.c (limited to 'src') diff --git a/src/afl-fuzz-cmplog.c b/src/afl-fuzz-cmplog.c new file mode 100644 index 00000000..350cb105 --- /dev/null +++ b/src/afl-fuzz-cmplog.c @@ -0,0 +1,950 @@ +#include "afl-fuzz.h" +#include "cmplog.h" + +#define SWAP64(_x) \ + ({ \ + \ + u64 _ret = (_x); \ + _ret = \ + (_ret & 0x00000000FFFFFFFF) << 32 | (_ret & 0xFFFFFFFF00000000) >> 32; \ + _ret = \ + (_ret & 0x0000FFFF0000FFFF) << 16 | (_ret & 0xFFFF0000FFFF0000) >> 16; \ + _ret = \ + (_ret & 0x00FF00FF00FF00FF) << 8 | (_ret & 0xFF00FF00FF00FF00) >> 8; \ + _ret; \ + \ + }) + +u8 common_fuzz_cmplog_stuff(char** argv, u8* out_buf, u32 len); + +extern struct cmp_map* cmp_map; // defined in afl-sharedmem.c + +u8* cmplog_binary; +char** its_argv; + +///// Colorization + +struct range { + u32 start; + u32 end; + struct range * next; +}; + +struct range* add_range(struct range* ranges, u32 start, u32 end) { + + struct range* r = ck_alloc_nozero(sizeof(struct range)); + r->start = start; + r->end = end; + r->next = ranges; + return r; + +} + +struct range* pop_biggest_range(struct range** ranges) { + + struct range* r = *ranges; + struct range* prev = NULL; + struct range* rmax = NULL; + struct range* prev_rmax = NULL; + u32 max_size = 0; + + while (r) { + u32 s = r->end - r->start; + if (s >= max_size) { + max_size = s; + prev_rmax = prev; + rmax = r; + } + prev = r; + r = r->next; + } + + if (rmax) { + if (prev_rmax) + prev_rmax->next = rmax->next; + else + *ranges = rmax->next; + } + + return rmax; + +} + +u8 get_exec_checksum(u8* buf, u32 len, u32* cksum) { + + if (unlikely(common_fuzz_stuff(its_argv, buf, len))) + return 1; + + *cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST); + return 0; + +} + +static void rand_replace(u8* buf, u32 len) { + + u32 i; + for (i = 0; i < len; ++i) + buf[i] = UR(256); + +} + +u8 colorization(u8* buf, u32 len, u32 exec_cksum) { + + struct range* ranges = add_range(NULL, 0, len); + u8* backup = ck_alloc_nozero(len); + + u64 orig_hit_cnt, new_hit_cnt; + orig_hit_cnt = queued_paths + unique_crashes; + + stage_name = "colorization"; + stage_short = "colorization"; + stage_max = 1000; + + struct range* rng; + stage_cur = stage_max; + while ((rng = pop_biggest_range(&ranges)) != NULL && stage_cur) { + + u32 s = rng->end - rng->start; + memcpy(backup, buf + rng->start, s); + rand_replace(buf + rng->start, s); + + u32 cksum; + if (unlikely(get_exec_checksum(buf, len, &cksum))) + return 1; + + if (cksum != exec_cksum) { + + ranges = add_range(ranges, rng->start, rng->start + s/2); + ranges = add_range(ranges, rng->start + s/2 +1, rng->end); + memcpy(buf + rng->start, backup, s); + + } + + ck_free(rng); + --stage_cur; + + } + + new_hit_cnt = queued_paths + unique_crashes; + stage_finds[STAGE_COLORIZATION] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_COLORIZATION] += stage_max - stage_cur; + + while (ranges) { + rng = ranges; + ranges = ranges->next; + ck_free(rng); + } + + return 0; + +} + +///// Input to State replacement + +u8 its_fuzz(u32 idx, u32 size, u8* buf, u32 len, u8* status) { + + u64 orig_hit_cnt, new_hit_cnt; + + orig_hit_cnt = queued_paths + unique_crashes; + + if (unlikely(common_fuzz_stuff(its_argv, buf, len))) + return 1; + + new_hit_cnt = queued_paths + unique_crashes; + + if (unlikely(new_hit_cnt != orig_hit_cnt)) { + + *status = 1; + + } else { + + if (size >= MIN_AUTO_EXTRA && size <= MAX_AUTO_EXTRA) + maybe_add_auto(&buf[idx], size); + *status = 2; + + } + + return 0; + +} + +u8 cmp_extend_encoding(struct cmp_header* h, u64 pattern, u64 repl, u32 idx, + u8* orig_buf, u8* buf, u32 len, u8 do_reverse, u8* status) { + + u64* buf_64 = (u64*)&buf[idx]; + u32* buf_32 = (u32*)&buf[idx]; + u16* buf_16 = (u16*)&buf[idx]; + // u8* buf_8 = &buf[idx]; + u64* o_buf_64 = (u64*)&orig_buf[idx]; + u32* o_buf_32 = (u32*)&orig_buf[idx]; + u16* o_buf_16 = (u16*)&orig_buf[idx]; + // u8* o_buf_8 = &orig_buf[idx]; + + u32 its_len = len - idx; + *status = 0; + + if (SHAPE_BYTES(h->shape) == 8) { + if (its_len >= 8 && *buf_64 == pattern && *o_buf_64 == pattern) { + *buf_64 = repl; + if (unlikely(its_fuzz(idx, 8, buf, len, status))) + return 1; + *buf_64 = pattern; + } + // reverse encoding + if (do_reverse) + if (unlikely(cmp_extend_encoding(h, SWAP64(pattern), SWAP64(repl), idx, + orig_buf, buf, len, 0, status))) + return 1; + } + + if (SHAPE_BYTES(h->shape) == 4 || *status == 2) { + if (its_len >= 4 && *buf_32 == (u32)pattern && *o_buf_32 == (u32)pattern) { + *buf_32 = (u32)repl; + if (unlikely(its_fuzz(idx, 4, buf, len, status))) + return 1; + *buf_32 = pattern; + } + // reverse encoding + if (do_reverse) + if (unlikely(cmp_extend_encoding(h, SWAP32(pattern), SWAP32(repl), idx, + orig_buf, buf, len, 0, status))) + return 1; + } + + if (SHAPE_BYTES(h->shape) == 2 || *status == 2) { + if (its_len >= 2 && *buf_16 == (u16)pattern && *o_buf_16 == (u16)pattern) { + *buf_16 = (u16)repl; + if (unlikely(its_fuzz(idx, 2, buf, len, status))) + return 1; + *buf_16 = (u16)pattern; + } + // reverse encoding + if (do_reverse) + if (unlikely(cmp_extend_encoding(h, SWAP16(pattern), SWAP16(repl), idx, + orig_buf, buf, len, 0, status))) + return 1; + } + + /*if (SHAPE_BYTES(h->shape) == 1 || *status == 2) { + if (its_len >= 2 && *buf_8 == (u8)pattern && *o_buf_8 == (u8)pattern) { + *buf_8 = (u8)repl; + if (unlikely(its_fuzz(idx, 1, buf, len, status))) + return 1; + *buf_16 = (u16)pattern; + } + }*/ + + return 0; + +} + +u8 cmp_fuzz(u32 key, u8* orig_buf, u8* buf, u32 len) { + + struct cmp_header* h = &cmp_map->headers[key]; + u32 i, j, idx; + + u32 loggeds = h->hits; + if (h->hits > CMP_MAP_H) + loggeds = CMP_MAP_H; + + u8 status; + // opt not in the paper + u32 fails = 0; + + for (i = 0; i < loggeds; ++i) { + + struct cmp_operands* o = &cmp_map->log[key][i]; + + // opt not in the paper + for (j = 0; j < i; ++j) + if (cmp_map->log[key][j].v0 == o->v0 && cmp_map->log[key][i].v1 == o->v1) + goto cmp_fuzz_next_iter; + + for (idx = 0; idx < len && fails < 8; ++idx) { + + if (unlikely(cmp_extend_encoding(h, o->v0, o->v1, idx, orig_buf, buf, len, 1, &status))) + return 1; + if (status == 2) ++fails; + else if (status == 1) break; + + if (unlikely(cmp_extend_encoding(h, o->v1, o->v0, idx, orig_buf, buf, len, 1, &status))) + return 1; + if (status == 2) ++fails; + else if (status == 1) break; + + } + +cmp_fuzz_next_iter: + stage_cur++; + + } + + return 0; + +} + +///// Input to State stage + +// queue_cur->exec_cksum +u8 input_to_state_stage(char** argv, u8* orig_buf, u8* buf, u32 len, u32 exec_cksum) { + + its_argv = argv; + + if (unlikely(colorization(buf, len, exec_cksum))) + return 1; + + // do it manually, forkserver clear only trace_bits + memset(cmp_map->headers, 0, sizeof(cmp_map->headers)); + + if (unlikely(common_fuzz_cmplog_stuff(argv, buf, len))) + return 1; + + u64 orig_hit_cnt, new_hit_cnt; + u64 orig_execs = total_execs; + orig_hit_cnt = queued_paths + unique_crashes; + + stage_name = "input-to-state"; + stage_short = "its"; + stage_max = 0; + stage_cur = 0; + + u32 k; + for (k = 0; k < CMP_MAP_W; ++k) { + + if (!cmp_map->headers[k].hits) + continue; + if (cmp_map->headers[k].hits > CMP_MAP_H) + stage_max += CMP_MAP_H; + else + stage_max += cmp_map->headers[k].hits; + + } + + for (k = 0; k < CMP_MAP_W; ++k) { + + if (!cmp_map->headers[k].hits) + continue; + cmp_fuzz(k, orig_buf, buf, len); + + } + + memcpy(buf, orig_buf, len); + + new_hit_cnt = queued_paths + unique_crashes; + stage_finds[STAGE_ITS] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_ITS] += total_execs - orig_execs; + + return 0; + +} + + +//// CmpLog forkserver + +s32 cmplog_forksrv_pid, + cmplog_child_pid, + cmplog_fsrv_ctl_fd, + cmplog_fsrv_st_fd; + +void init_cmplog_forkserver(char **argv) { + + static struct itimerval it; + int st_pipe[2], ctl_pipe[2]; + int status; + s32 rlen; + + ACTF("Spinning up the cmplog fork server..."); + + if (pipe(st_pipe) || pipe(ctl_pipe)) PFATAL("pipe() failed"); + + child_timed_out = 0; + cmplog_forksrv_pid = fork(); + + if (cmplog_forksrv_pid < 0) PFATAL("fork() failed"); + + if (!cmplog_forksrv_pid) { + + /* CHILD PROCESS */ + + struct rlimit r; + + /* Umpf. On OpenBSD, the default fd limit for root users is set to + soft 128. Let's try to fix that... */ + + if (!getrlimit(RLIMIT_NOFILE, &r) && r.rlim_cur < FORKSRV_FD + 2) { + + r.rlim_cur = FORKSRV_FD + 2; + setrlimit(RLIMIT_NOFILE, &r); /* Ignore errors */ + + } + + if (mem_limit) { + + r.rlim_max = r.rlim_cur = ((rlim_t)mem_limit) << 20; + +#ifdef RLIMIT_AS + setrlimit(RLIMIT_AS, &r); /* Ignore errors */ +#else + /* This takes care of OpenBSD, which doesn't have RLIMIT_AS, but + according to reliable sources, RLIMIT_DATA covers anonymous + maps - so we should be getting good protection against OOM bugs. */ + + setrlimit(RLIMIT_DATA, &r); /* Ignore errors */ +#endif /* ^RLIMIT_AS */ + + } + + /* Dumping cores is slow and can lead to anomalies if SIGKILL is delivered + before the dump is complete. */ + + // r.rlim_max = r.rlim_cur = 0; + // setrlimit(RLIMIT_CORE, &r); /* Ignore errors */ + + /* Isolate the process and configure standard descriptors. If out_file is + specified, stdin is /dev/null; otherwise, out_fd is cloned instead. */ + + setsid(); + + if (!getenv("AFL_DEBUG_CHILD_OUTPUT")) { + + dup2(dev_null_fd, 1); + dup2(dev_null_fd, 2); + + } + + if (!use_stdin) { + + dup2(dev_null_fd, 0); + + } else { + + dup2(out_fd, 0); + close(out_fd); + + } + + /* Set up control and status pipes, close the unneeded original fds. */ + + if (dup2(ctl_pipe[0], FORKSRV_FD) < 0) PFATAL("dup2() failed"); + if (dup2(st_pipe[1], FORKSRV_FD + 1) < 0) PFATAL("dup2() failed"); + + close(ctl_pipe[0]); + close(ctl_pipe[1]); + close(st_pipe[0]); + close(st_pipe[1]); + + close(out_dir_fd); + close(dev_null_fd); +#ifndef HAVE_ARC4RANDOM + close(dev_urandom_fd); +#endif + close(plot_file == NULL ? -1 : fileno(plot_file)); + + /* This should improve performance a bit, since it stops the linker from + doing extra work post-fork(). */ + + if (!getenv("LD_BIND_LAZY")) setenv("LD_BIND_NOW", "1", 0); + + /* Set sane defaults for ASAN if nothing else specified. */ + + setenv("ASAN_OPTIONS", + "abort_on_error=1:" + "detect_leaks=0:" + "malloc_context_size=0:" + "symbolize=0:" + "allocator_may_return_null=1", + 0); + + /* MSAN is tricky, because it doesn't support abort_on_error=1 at this + point. So, we do this in a very hacky way. */ + + setenv("MSAN_OPTIONS", + "exit_code=" STRINGIFY(MSAN_ERROR) ":" + "symbolize=0:" + "abort_on_error=1:" + "malloc_context_size=0:" + "allocator_may_return_null=1:" + "msan_track_origins=0", + 0); + + setenv("__AFL_CMPLOG_MODE__", "1", 1); + + argv[0] = cmplog_binary; + execv(cmplog_binary, argv); + + /* Use a distinctive bitmap signature to tell the parent about execv() + falling through. */ + + *(u32 *)trace_bits = EXEC_FAIL_SIG; + exit(0); + + } + + /* PARENT PROCESS */ + + /* Close the unneeded endpoints. */ + + close(ctl_pipe[0]); + close(st_pipe[1]); + + cmplog_fsrv_ctl_fd = ctl_pipe[1]; + cmplog_fsrv_st_fd = st_pipe[0]; + + /* Wait for the fork server to come up, but don't wait too long. */ + + if (exec_tmout) { + + it.it_value.tv_sec = ((exec_tmout * FORK_WAIT_MULT) / 1000); + it.it_value.tv_usec = ((exec_tmout * FORK_WAIT_MULT) % 1000) * 1000; + + } + + setitimer(ITIMER_REAL, &it, NULL); + + rlen = read(cmplog_fsrv_st_fd, &status, 4); + + it.it_value.tv_sec = 0; + it.it_value.tv_usec = 0; + + setitimer(ITIMER_REAL, &it, NULL); + + /* If we have a four-byte "hello" message from the server, we're all set. + Otherwise, try to figure out what went wrong. */ + + if (rlen == 4) { + + OKF("All right - fork server is up."); + return; + + } + + if (child_timed_out) + FATAL("Timeout while initializing cmplog fork server (adjusting -t may help)"); + + if (waitpid(cmplog_forksrv_pid, &status, 0) <= 0) PFATAL("waitpid() failed"); + + if (WIFSIGNALED(status)) { + + if (mem_limit && mem_limit < 500 && uses_asan) { + + SAYF("\n" cLRD "[-] " cRST + "Whoops, the target binary crashed suddenly, " + "before receiving any input\n" + " from the fuzzer! Since it seems to be built with ASAN and you " + "have a\n" + " restrictive memory limit configured, this is expected; please " + "read\n" + " %s/notes_for_asan.txt for help.\n", + doc_path); + + } else if (!mem_limit) { + + SAYF("\n" cLRD "[-] " cRST + "Whoops, the target binary crashed suddenly, " + "before receiving any input\n" + " from the fuzzer! There are several probable explanations:\n\n" + + " - The binary is just buggy and explodes entirely on its own. " + "If so, you\n" + " need to fix the underlying problem or find a better " + "replacement.\n\n" + + MSG_FORK_ON_APPLE + + " - Less likely, there is a horrible bug in the fuzzer. If other " + "options\n" + " fail, poke for troubleshooting " + "tips.\n"); + + } else { + + SAYF("\n" cLRD "[-] " cRST + "Whoops, the target binary crashed suddenly, " + "before receiving any input\n" + " from the fuzzer! There are several probable explanations:\n\n" + + " - The current memory limit (%s) is too restrictive, causing " + "the\n" + " target to hit an OOM condition in the dynamic linker. Try " + "bumping up\n" + " the limit with the -m setting in the command line. A simple " + "way confirm\n" + " this diagnosis would be:\n\n" + + MSG_ULIMIT_USAGE + " /path/to/fuzzed_app )\n\n" + + " Tip: you can use http://jwilk.net/software/recidivm to " + "quickly\n" + " estimate the required amount of virtual memory for the " + "binary.\n\n" + + " - The binary is just buggy and explodes entirely on its own. " + "If so, you\n" + " need to fix the underlying problem or find a better " + "replacement.\n\n" + + MSG_FORK_ON_APPLE + + " - Less likely, there is a horrible bug in the fuzzer. If other " + "options\n" + " fail, poke for troubleshooting " + "tips.\n", + DMS(mem_limit << 20), mem_limit - 1); + + } + + FATAL("Cmplog fork server crashed with signal %d", WTERMSIG(status)); + + } + + if (*(u32 *)trace_bits == EXEC_FAIL_SIG) + FATAL("Unable to execute target application ('%s')", argv[0]); + + if (mem_limit && mem_limit < 500 && uses_asan) { + + SAYF("\n" cLRD "[-] " cRST + "Hmm, looks like the target binary terminated " + "before we could complete a\n" + " handshake with the injected code. Since it seems to be built " + "with ASAN and\n" + " you have a restrictive memory limit configured, this is " + "expected; please\n" + " read %s/notes_for_asan.txt for help.\n", + doc_path); + + } else if (!mem_limit) { + + SAYF("\n" cLRD "[-] " cRST + "Hmm, looks like the target binary terminated " + "before we could complete a\n" + " handshake with the injected code. Perhaps there is a horrible " + "bug in the\n" + " fuzzer. Poke for troubleshooting " + "tips.\n"); + + } else { + + SAYF( + "\n" cLRD "[-] " cRST + "Hmm, looks like the target binary terminated " + "before we could complete a\n" + " handshake with the injected code. There are %s probable " + "explanations:\n\n" + + "%s" + " - The current memory limit (%s) is too restrictive, causing an " + "OOM\n" + " fault in the dynamic linker. This can be fixed with the -m " + "option. A\n" + " simple way to confirm the diagnosis may be:\n\n" + + MSG_ULIMIT_USAGE + " /path/to/fuzzed_app )\n\n" + + " Tip: you can use http://jwilk.net/software/recidivm to quickly\n" + " estimate the required amount of virtual memory for the " + "binary.\n\n" + + " - Less likely, there is a horrible bug in the fuzzer. If other " + "options\n" + " fail, poke for troubleshooting " + "tips.\n", + getenv(DEFER_ENV_VAR) ? "three" : "two", + getenv(DEFER_ENV_VAR) + ? " - You are using deferred forkserver, but __AFL_INIT() is " + "never\n" + " reached before the program terminates.\n\n" + : "", + DMS(mem_limit << 20), mem_limit - 1); + + } + + FATAL("Cmplog fork server handshake failed"); + +} + +u8 run_cmplog_target(char** argv, u32 timeout) { + + static struct itimerval it; + static u32 prev_timed_out = 0; + static u64 exec_ms = 0; + + int status = 0; + u32 tb4; + + child_timed_out = 0; + + /* After this memset, trace_bits[] are effectively volatile, so we + must prevent any earlier operations from venturing into that + territory. */ + + memset(trace_bits, 0, MAP_SIZE); + MEM_BARRIER(); + + /* If we're running in "dumb" mode, we can't rely on the fork server + logic compiled into the target program, so we will just keep calling + execve(). There is a bit of code duplication between here and + init_forkserver(), but c'est la vie. */ + + if (dumb_mode == 1 || no_forkserver) { + + cmplog_child_pid = fork(); + + if (cmplog_child_pid < 0) PFATAL("fork() failed"); + + if (!cmplog_child_pid) { + + struct rlimit r; + + if (mem_limit) { + + r.rlim_max = r.rlim_cur = ((rlim_t)mem_limit) << 20; + +#ifdef RLIMIT_AS + + setrlimit(RLIMIT_AS, &r); /* Ignore errors */ + +#else + + setrlimit(RLIMIT_DATA, &r); /* Ignore errors */ + +#endif /* ^RLIMIT_AS */ + + } + + r.rlim_max = r.rlim_cur = 0; + + setrlimit(RLIMIT_CORE, &r); /* Ignore errors */ + + /* Isolate the process and configure standard descriptors. If out_file is + specified, stdin is /dev/null; otherwise, out_fd is cloned instead. */ + + setsid(); + + dup2(dev_null_fd, 1); + dup2(dev_null_fd, 2); + + if (out_file) { + + dup2(dev_null_fd, 0); + + } else { + + dup2(out_fd, 0); + close(out_fd); + + } + + /* On Linux, would be faster to use O_CLOEXEC. Maybe TODO. */ + + close(dev_null_fd); + close(out_dir_fd); +#ifndef HAVE_ARC4RANDOM + close(dev_urandom_fd); +#endif + close(fileno(plot_file)); + + /* Set sane defaults for ASAN if nothing else specified. */ + + setenv("ASAN_OPTIONS", + "abort_on_error=1:" + "detect_leaks=0:" + "symbolize=0:" + "allocator_may_return_null=1", + 0); + + setenv("MSAN_OPTIONS", "exit_code=" STRINGIFY(MSAN_ERROR) ":" + "symbolize=0:" + "msan_track_origins=0", 0); + + setenv("__AFL_CMPLOG_MODE__", "1", 1); + + argv[0] = cmplog_binary; + execv(cmplog_binary, argv); + + /* Use a distinctive bitmap value to tell the parent about execv() + falling through. */ + + *(u32*)trace_bits = EXEC_FAIL_SIG; + exit(0); + + } + + } else { + + s32 res; + + /* In non-dumb mode, we have the fork server up and running, so simply + tell it to have at it, and then read back PID. */ + + if ((res = write(cmplog_fsrv_ctl_fd, &prev_timed_out, 4)) != 4) { + + if (stop_soon) return 0; + RPFATAL(res, "Unable to request new process from cmplog fork server (OOM?)"); + + } + + if ((res = read(cmplog_fsrv_st_fd, &cmplog_child_pid, 4)) != 4) { + + if (stop_soon) return 0; + RPFATAL(res, "Unable to request new process from cmplog fork server (OOM?)"); + + } + + if (cmplog_child_pid <= 0) FATAL("Cmplog fork server is misbehaving (OOM?)"); + + } + + /* Configure timeout, as requested by user, then wait for child to terminate. + */ + + it.it_value.tv_sec = (timeout / 1000); + it.it_value.tv_usec = (timeout % 1000) * 1000; + + setitimer(ITIMER_REAL, &it, NULL); + + /* The SIGALRM handler simply kills the cmplog_child_pid and sets child_timed_out. */ + + if (dumb_mode == 1 || no_forkserver) { + + if (waitpid(cmplog_child_pid, &status, 0) <= 0) PFATAL("waitpid() failed"); + + } else { + + s32 res; + + if ((res = read(cmplog_fsrv_st_fd, &status, 4)) != 4) { + + if (stop_soon) return 0; + SAYF( + "\n" cLRD "[-] " cRST + "Unable to communicate with fork server. Some possible reasons:\n\n" + " - You've run out of memory. Use -m to increase the the memory " + "limit\n" + " to something higher than %lld.\n" + " - The binary or one of the libraries it uses manages to create\n" + " threads before the forkserver initializes.\n" + " - The binary, at least in some circumstances, exits in a way " + "that\n" + " also kills the parent process - raise() could be the " + "culprit.\n\n" + "If all else fails you can disable the fork server via " + "AFL_NO_FORKSRV=1.\n", + mem_limit); + RPFATAL(res, "Unable to communicate with fork server"); + + } + + } + + if (!WIFSTOPPED(status)) cmplog_child_pid = 0; + + getitimer(ITIMER_REAL, &it); + exec_ms = + (u64)timeout - (it.it_value.tv_sec * 1000 + it.it_value.tv_usec / 1000); + if (slowest_exec_ms < exec_ms) slowest_exec_ms = exec_ms; + + it.it_value.tv_sec = 0; + it.it_value.tv_usec = 0; + + setitimer(ITIMER_REAL, &it, NULL); + + ++total_execs; + + /* Any subsequent operations on trace_bits must not be moved by the + compiler below this point. Past this location, trace_bits[] behave + very normally and do not have to be treated as volatile. */ + + MEM_BARRIER(); + + tb4 = *(u32*)trace_bits; + +#ifdef WORD_SIZE_64 + classify_counts((u64*)trace_bits); +#else + classify_counts((u32*)trace_bits); +#endif /* ^WORD_SIZE_64 */ + + prev_timed_out = child_timed_out; + + /* Report outcome to caller. */ + + if (WIFSIGNALED(status) && !stop_soon) { + + kill_signal = WTERMSIG(status); + + if (child_timed_out && kill_signal == SIGKILL) return FAULT_TMOUT; + + return FAULT_CRASH; + + } + + /* A somewhat nasty hack for MSAN, which doesn't support abort_on_error and + must use a special exit code. */ + + if (uses_asan && WEXITSTATUS(status) == MSAN_ERROR) { + + kill_signal = 0; + return FAULT_CRASH; + + } + + if ((dumb_mode == 1 || no_forkserver) && tb4 == EXEC_FAIL_SIG) + return FAULT_ERROR; + + return FAULT_NONE; + +} + +u8 common_fuzz_cmplog_stuff(char** argv, u8* out_buf, u32 len) { + + u8 fault; + + if (post_handler) { + + out_buf = post_handler(out_buf, &len); + if (!out_buf || !len) return 0; + + } + + write_to_testcase(out_buf, len); + + fault = run_cmplog_target(argv, exec_tmout); + + if (stop_soon) return 1; + + if (fault == FAULT_TMOUT) { + + if (subseq_tmouts++ > TMOUT_LIMIT) { + + ++cur_skipped_paths; + return 1; + + } + + } else + + subseq_tmouts = 0; + + /* Users can hit us with SIGUSR1 to request the current input + to be abandoned. */ + + if (skip_requested) { + + skip_requested = 0; + ++cur_skipped_paths; + return 1; + + } + + /* This handles FAULT_ERROR for us: */ + + /* queued_discovered += save_if_interesting(argv, out_buf, len, fault); + + if (!(stage_cur % stats_update_freq) || stage_cur + 1 == stage_max) + show_stats(); */ + + return 0; + +} diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index 74123300..94c6694a 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -531,6 +531,13 @@ u8 fuzz_one_original(char** argv) { } + if (cmplog_mode) { + + if(input_to_state_stage(argv, in_buf, out_buf, len, queue_cur->exec_cksum)) + goto abandon_entry; + + } + /* Skip right away if -d is given, if it has not been chosen sufficiently often to warrant the expensive deterministic stage (fuzz_level), or if it has gone through deterministic testing in earlier, resumed runs diff --git a/src/afl-fuzz-run.c b/src/afl-fuzz-run.c index fa7a872a..78708402 100644 --- a/src/afl-fuzz-run.c +++ b/src/afl-fuzz-run.c @@ -405,6 +405,8 @@ u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem, u32 handicap, count its spin-up time toward binary calibration. */ if (dumb_mode != 1 && !no_forkserver && !forksrv_pid) init_forkserver(argv); + if (dumb_mode != 1 && !no_forkserver && !cmplog_forksrv_pid && cmplog_mode) + init_cmplog_forkserver(argv); if (q->exec_cksum) memcpy(first_trace, trace_bits, MAP_SIZE); diff --git a/src/afl-fuzz-stats.c b/src/afl-fuzz-stats.c index 7679403b..54d6fb52 100644 --- a/src/afl-fuzz-stats.c +++ b/src/afl-fuzz-stats.c @@ -596,9 +596,11 @@ void show_stats(void) { : cRST), tmp); - sprintf(tmp, "%s/%s, %s/%s", DI(stage_finds[STAGE_PYTHON]), + sprintf(tmp, "%s/%s, %s/%s, %s/%s, %s/%s", DI(stage_finds[STAGE_PYTHON]), DI(stage_cycles[STAGE_PYTHON]), DI(stage_finds[STAGE_CUSTOM_MUTATOR]), - DI(stage_cycles[STAGE_CUSTOM_MUTATOR])); + DI(stage_cycles[STAGE_CUSTOM_MUTATOR]), DI(stage_finds[STAGE_COLORIZATION]), + DI(stage_cycles[STAGE_COLORIZATION]), DI(stage_finds[STAGE_ITS]), + DI(stage_cycles[STAGE_ITS])); SAYF(bV bSTOP " py/custom : " cRST "%-36s " bSTG bVR bH20 bH2 bH bRB "\n", tmp); diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c index 0af8b35f..436e71a5 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -100,6 +100,7 @@ static void usage(u8* argv0) { " -f file - location read by the fuzzed program (stdin)\n" " -t msec - timeout for each run (auto-scaled, 50-%d ms)\n" " -m megs - memory limit for child process (%d MB)\n" + " -c program - enable CmpLog specifying a binary compiled for it\n" " -Q - use binary-only instrumentation (QEMU mode)\n" " -U - use unicorn-based instrumentation (Unicorn mode)\n" " -W - use qemu-based instrumentation with Wine (Wine " @@ -193,12 +194,21 @@ int main(int argc, char** argv) { init_seed = tv.tv_sec ^ tv.tv_usec ^ getpid(); while ((opt = getopt(argc, argv, - "+i:I:o:f:m:t:T:dnCB:S:M:x:QNUWe:p:s:V:E:L:hR")) > 0) + "+i:I:o:f:m:t:T:dnCB:S:M:x:QNUWe:p:s:V:E:L:hRP:")) > 0) switch (opt) { case 'I': infoexec = optarg; break; + case 'c': { + + cmplog_mode = 1; + cmplog_binary = ck_strdup(optarg); + // TODO check cmplog_binary validity + break; + + } + case 's': { init_seed = strtoul(optarg, 0L, 10); diff --git a/src/afl-sharedmem.c b/src/afl-sharedmem.c index 16eb14a7..bad41f88 100644 --- a/src/afl-sharedmem.c +++ b/src/afl-sharedmem.c @@ -35,6 +35,7 @@ #include "alloc-inl.h" #include "hash.h" #include "sharedmem.h" +#include "cmplog.h" #include #include @@ -68,8 +69,12 @@ char g_shm_file_path[L_tmpnam]; /* ========================================= */ #else static s32 shm_id; /* ID of the SHM region */ +static s32 cmplog_shm_id; #endif +int cmplog_mode; +struct cmp_map* cmp_map; + /* Get rid of shared memory (atexit handler). */ void remove_shm(void) { @@ -91,6 +96,8 @@ void remove_shm(void) { #else shmctl(shm_id, IPC_RMID, NULL); + if (cmplog_mode) + shmctl(cmplog_shm_id, IPC_RMID, NULL); #endif } @@ -148,7 +155,15 @@ void setup_shm(unsigned char dumb_mode) { shm_id = shmget(IPC_PRIVATE, MAP_SIZE, IPC_CREAT | IPC_EXCL | 0600); if (shm_id < 0) PFATAL("shmget() failed"); + + if (cmplog_mode) { + + cmplog_shm_id = shmget(IPC_PRIVATE, sizeof(struct cmp_map), IPC_CREAT | IPC_EXCL | 0600); + + if (cmplog_shm_id < 0) PFATAL("shmget() failed"); + } + atexit(remove_shm); shm_str = alloc_printf("%d", shm_id); @@ -161,8 +176,21 @@ void setup_shm(unsigned char dumb_mode) { if (!dumb_mode) setenv(SHM_ENV_VAR, shm_str, 1); ck_free(shm_str); + + if (cmplog_mode) { + + shm_str = alloc_printf("%d", cmplog_shm_id); + + if (!dumb_mode) setenv(CMPLOG_SHM_ENV_VAR, shm_str, 1); + + ck_free(shm_str); + + } trace_bits = shmat(shm_id, NULL, 0); + + if (cmplog_mode) + cmp_map = shmat(cmplog_shm_id, NULL, 0); if (!trace_bits) PFATAL("shmat() failed"); -- cgit 1.4.1 From b6c5974b3781449996f2791b80e22c9fa6c9ba18 Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Fri, 17 Jan 2020 16:41:30 +0100 Subject: format --- include/afl-fuzz.h | 5 +- include/cmplog.h | 9 +- include/config.h | 12 +-- llvm_mode/afl-clang-fast.c | 11 +- llvm_mode/afl-llvm-rt.o.c | 89 ++++++++-------- src/afl-fuzz-bitmap.c | 7 +- src/afl-fuzz-cmplog.c | 248 +++++++++++++++++++++++++-------------------- src/afl-fuzz-one.c | 6 +- src/afl-fuzz-stats.c | 17 ++-- src/afl-sharedmem.c | 29 +++--- 10 files changed, 236 insertions(+), 197 deletions(-) (limited to 'src') diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h index 33ba50f1..ce418931 100644 --- a/include/afl-fuzz.h +++ b/include/afl-fuzz.h @@ -652,8 +652,9 @@ void save_cmdline(u32, char**); extern u8* cmplog_binary; extern s32 cmplog_forksrv_pid; -void init_cmplog_forkserver(char **argv); -u8 input_to_state_stage(char** argv, u8* orig_buf, u8* buf, u32 len, u32 exec_cksum); +void init_cmplog_forkserver(char** argv); +u8 input_to_state_stage(char** argv, u8* orig_buf, u8* buf, u32 len, + u32 exec_cksum); /**** Inline routines ****/ diff --git a/include/cmplog.h b/include/cmplog.h index 26d4b692..d5947226 100644 --- a/include/cmplog.h +++ b/include/cmplog.h @@ -6,7 +6,7 @@ #define CMP_MAP_W 65536 #define CMP_MAP_H 256 -#define SHAPE_BYTES(x) (x+1) +#define SHAPE_BYTES(x) (x + 1) #define CMP_TYPE_INS 0 #define CMP_TYPE_RTN 1 @@ -18,9 +18,9 @@ struct cmp_header { unsigned cnt : 20; unsigned id : 16; - unsigned shape : 5; // from 0 to 31 + unsigned shape : 5; // from 0 to 31 unsigned type : 1; - + } __attribute__((packed)); struct cmp_operands { @@ -41,9 +41,10 @@ typedef struct cmp_operands cmp_map_list[CMP_MAP_H]; struct cmp_map { - struct cmp_header headers[CMP_MAP_W]; + struct cmp_header headers[CMP_MAP_W]; struct cmp_operands log[CMP_MAP_W][CMP_MAP_H]; }; #endif + diff --git a/include/config.h b/include/config.h index 429c57d2..c5a48df0 100644 --- a/include/config.h +++ b/include/config.h @@ -62,13 +62,13 @@ /* Default memory limit for child process (MB): */ #ifndef __NetBSD__ -# ifndef WORD_SIZE_64 -# define MEM_LIMIT 25 -# else -# define MEM_LIMIT 50 -# endif /* ^!WORD_SIZE_64 */ +#ifndef WORD_SIZE_64 +#define MEM_LIMIT 25 #else -# define MEM_LIMIT 200 +#define MEM_LIMIT 50 +#endif /* ^!WORD_SIZE_64 */ +#else +#define MEM_LIMIT 200 #endif /* Default memory limit when running in QEMU mode (MB): */ diff --git a/llvm_mode/afl-clang-fast.c b/llvm_mode/afl-clang-fast.c index d6e96558..939546d7 100644 --- a/llvm_mode/afl-clang-fast.c +++ b/llvm_mode/afl-clang-fast.c @@ -200,11 +200,12 @@ static void edit_params(u32 argc, char** argv) { if (getenv("AFL_CMPLOG")) cc_params[cc_par_cnt++] = "-fsanitize-coverage=trace-pc-guard,trace-cmp"; else - cc_params[cc_par_cnt++] = "-fsanitize-coverage=trace-pc-guard"; // edge coverage by default - // cc_params[cc_par_cnt++] = "-mllvm"; - // cc_params[cc_par_cnt++] = - // "-fsanitize-coverage=trace-cmp,trace-div,trace-gep"; - // cc_params[cc_par_cnt++] = "-sanitizer-coverage-block-threshold=0"; + cc_params[cc_par_cnt++] = + "-fsanitize-coverage=trace-pc-guard"; // edge coverage by default + // cc_params[cc_par_cnt++] = "-mllvm"; + // cc_params[cc_par_cnt++] = + // "-fsanitize-coverage=trace-cmp,trace-div,trace-gep"; + // cc_params[cc_par_cnt++] = "-sanitizer-coverage-block-threshold=0"; #else cc_params[cc_par_cnt++] = "-Xclang"; cc_params[cc_par_cnt++] = "-load"; diff --git a/llvm_mode/afl-llvm-rt.o.c b/llvm_mode/afl-llvm-rt.o.c index a8ed44fa..c3d1ba7d 100644 --- a/llvm_mode/afl-llvm-rt.o.c +++ b/llvm_mode/afl-llvm-rt.o.c @@ -67,7 +67,7 @@ __thread u32 __afl_prev_loc; #endif struct cmp_map* __afl_cmp_map; -__thread u32 __afl_cmp_counter; +__thread u32 __afl_cmp_counter; /* Running in persistent mode? */ @@ -128,26 +128,26 @@ static void __afl_map_shm(void) { __afl_area_ptr[0] = 1; } - + if (getenv("__AFL_CMPLOG_MODE__")) { - + id_str = getenv(CMPLOG_SHM_ENV_VAR); - + if (id_str) { - + u32 shm_id = atoi(id_str); - + __afl_cmp_map = shmat(shm_id, NULL, 0); - + if (__afl_cmp_map == (void*)-1) _exit(1); - + } - + } else if (getenv("AFL_CMPLOG")) { - + // during compilation, do this to avoid segfault __afl_cmp_map = calloc(sizeof(struct cmp_map), 1); - + } } @@ -161,7 +161,7 @@ static void __afl_start_forkserver(void) { u8 child_stopped = 0; - void (*old_sigchld_handler)(int)=0;// = signal(SIGCHLD, SIG_DFL); + void (*old_sigchld_handler)(int) = 0; // = signal(SIGCHLD, SIG_DFL); /* Phone home and tell the parent that we're OK. If parent isn't there, assume we're not running in forkserver mode and just execute program. */ @@ -325,61 +325,63 @@ __attribute__((constructor(CONST_PRIO))) void __afl_auto_init(void) { ///// CmpLog instrumentation void __sanitizer_cov_trace_cmp1(uint8_t Arg1, uint8_t Arg2) { - return; + + return; + } void __sanitizer_cov_trace_cmp2(uint16_t Arg1, uint16_t Arg2) { - + uintptr_t k = (uintptr_t)__builtin_return_address(0); k = (k >> 4) ^ (k << 8); k &= CMP_MAP_W - 1; - + u32 hits = __afl_cmp_map->headers[k].hits; - __afl_cmp_map->headers[k].hits = hits+1; + __afl_cmp_map->headers[k].hits = hits + 1; // if (!__afl_cmp_map->headers[k].cnt) // __afl_cmp_map->headers[k].cnt = __afl_cmp_counter++; - + __afl_cmp_map->headers[k].shape = 1; //__afl_cmp_map->headers[k].type = CMP_TYPE_INS; - - hits &= CMP_MAP_H -1; + + hits &= CMP_MAP_H - 1; __afl_cmp_map->log[k][hits].v0 = Arg1; __afl_cmp_map->log[k][hits].v1 = Arg2; - + } void __sanitizer_cov_trace_cmp4(uint32_t Arg1, uint32_t Arg2) { - + uintptr_t k = (uintptr_t)__builtin_return_address(0); k = (k >> 4) ^ (k << 8); k &= CMP_MAP_W - 1; - + u32 hits = __afl_cmp_map->headers[k].hits; - __afl_cmp_map->headers[k].hits = hits+1; - + __afl_cmp_map->headers[k].hits = hits + 1; + __afl_cmp_map->headers[k].shape = 3; - - hits &= CMP_MAP_H -1; + + hits &= CMP_MAP_H - 1; __afl_cmp_map->log[k][hits].v0 = Arg1; __afl_cmp_map->log[k][hits].v1 = Arg2; - + } void __sanitizer_cov_trace_cmp8(uint64_t Arg1, uint64_t Arg2) { - + uintptr_t k = (uintptr_t)__builtin_return_address(0); k = (k >> 4) ^ (k << 8); k &= CMP_MAP_W - 1; - + u32 hits = __afl_cmp_map->headers[k].hits; - __afl_cmp_map->headers[k].hits = hits+1; - + __afl_cmp_map->headers[k].hits = hits + 1; + __afl_cmp_map->headers[k].shape = 7; - - hits &= CMP_MAP_H -1; + + hits &= CMP_MAP_H - 1; __afl_cmp_map->log[k][hits].v0 = Arg1; __afl_cmp_map->log[k][hits].v1 = Arg2; - + } #if defined(__APPLE__) @@ -396,30 +398,29 @@ void __sanitizer_cov_trace_const_cmp4(uint32_t Arg1, uint32_t Arg2) __attribute__((alias("__sanitizer_cov_trace_cmp4"))); void __sanitizer_cov_trace_const_cmp8(uint64_t Arg1, uint64_t Arg2) __attribute__((alias("__sanitizer_cov_trace_cmp8"))); -#endif /* defined(__APPLE__) */ +#endif /* defined(__APPLE__) */ void __sanitizer_cov_trace_switch(uint64_t Val, uint64_t* Cases) { for (uint64_t i = 0; i < Cases[0]; i++) { - - uintptr_t k = (uintptr_t)__builtin_return_address(0) +i; + + uintptr_t k = (uintptr_t)__builtin_return_address(0) + i; k = (k >> 4) ^ (k << 8); k &= CMP_MAP_W - 1; - + u32 hits = __afl_cmp_map->headers[k].hits; - __afl_cmp_map->headers[k].hits = hits+1; - + __afl_cmp_map->headers[k].hits = hits + 1; + __afl_cmp_map->headers[k].shape = 7; - - hits &= CMP_MAP_H -1; + + hits &= CMP_MAP_H - 1; __afl_cmp_map->log[k][hits].v0 = Val; __afl_cmp_map->log[k][hits].v1 = Cases[i + 2]; - + } } - /* The following stuff deals with supporting -fsanitize-coverage=trace-pc-guard. It remains non-operational in the traditional, plugin-backed LLVM mode. For more info about 'trace-pc-guard', see README.llvm. diff --git a/src/afl-fuzz-bitmap.c b/src/afl-fuzz-bitmap.c index 515a7a79..3f8256b4 100644 --- a/src/afl-fuzz-bitmap.c +++ b/src/afl-fuzz-bitmap.c @@ -524,7 +524,12 @@ u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) { struct queue_entry* q = queue; while (q) { - if (q->exec_cksum == cksum) { q->n_fuzz = q->n_fuzz + 1; break ; } + if (q->exec_cksum == cksum) { + + q->n_fuzz = q->n_fuzz + 1; + break; + + } q = q->next; diff --git a/src/afl-fuzz-cmplog.c b/src/afl-fuzz-cmplog.c index 350cb105..924784cc 100644 --- a/src/afl-fuzz-cmplog.c +++ b/src/afl-fuzz-cmplog.c @@ -17,17 +17,19 @@ u8 common_fuzz_cmplog_stuff(char** argv, u8* out_buf, u32 len); -extern struct cmp_map* cmp_map; // defined in afl-sharedmem.c +extern struct cmp_map* cmp_map; // defined in afl-sharedmem.c -u8* cmplog_binary; +u8* cmplog_binary; char** its_argv; ///// Colorization struct range { - u32 start; - u32 end; - struct range * next; + + u32 start; + u32 end; + struct range* next; + }; struct range* add_range(struct range* ranges, u32 start, u32 end) { @@ -46,36 +48,42 @@ struct range* pop_biggest_range(struct range** ranges) { struct range* prev = NULL; struct range* rmax = NULL; struct range* prev_rmax = NULL; - u32 max_size = 0; - + u32 max_size = 0; + while (r) { + u32 s = r->end - r->start; if (s >= max_size) { + max_size = s; prev_rmax = prev; rmax = r; + } + prev = r; r = r->next; + } - + if (rmax) { + if (prev_rmax) prev_rmax->next = rmax->next; else *ranges = rmax->next; + } - + return rmax; } u8 get_exec_checksum(u8* buf, u32 len, u32* cksum) { - if (unlikely(common_fuzz_stuff(its_argv, buf, len))) - return 1; - - *cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST); + if (unlikely(common_fuzz_stuff(its_argv, buf, len))) return 1; + + *cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST); return 0; } @@ -91,50 +99,51 @@ static void rand_replace(u8* buf, u32 len) { u8 colorization(u8* buf, u32 len, u32 exec_cksum) { struct range* ranges = add_range(NULL, 0, len); - u8* backup = ck_alloc_nozero(len); - + u8* backup = ck_alloc_nozero(len); + u64 orig_hit_cnt, new_hit_cnt; orig_hit_cnt = queued_paths + unique_crashes; - + stage_name = "colorization"; stage_short = "colorization"; stage_max = 1000; - + struct range* rng; stage_cur = stage_max; while ((rng = pop_biggest_range(&ranges)) != NULL && stage_cur) { - + u32 s = rng->end - rng->start; memcpy(backup, buf + rng->start, s); rand_replace(buf + rng->start, s); - + u32 cksum; - if (unlikely(get_exec_checksum(buf, len, &cksum))) - return 1; - + if (unlikely(get_exec_checksum(buf, len, &cksum))) return 1; + if (cksum != exec_cksum) { - - ranges = add_range(ranges, rng->start, rng->start + s/2); - ranges = add_range(ranges, rng->start + s/2 +1, rng->end); + + ranges = add_range(ranges, rng->start, rng->start + s / 2); + ranges = add_range(ranges, rng->start + s / 2 + 1, rng->end); memcpy(buf + rng->start, backup, s); - + } - + ck_free(rng); --stage_cur; - + } - + new_hit_cnt = queued_paths + unique_crashes; stage_finds[STAGE_COLORIZATION] += new_hit_cnt - orig_hit_cnt; stage_cycles[STAGE_COLORIZATION] += stage_max - stage_cur; - + while (ranges) { + rng = ranges; ranges = ranges->next; ck_free(rng); + } - + return 0; } @@ -147,21 +156,20 @@ u8 its_fuzz(u32 idx, u32 size, u8* buf, u32 len, u8* status) { orig_hit_cnt = queued_paths + unique_crashes; - if (unlikely(common_fuzz_stuff(its_argv, buf, len))) - return 1; + if (unlikely(common_fuzz_stuff(its_argv, buf, len))) return 1; new_hit_cnt = queued_paths + unique_crashes; if (unlikely(new_hit_cnt != orig_hit_cnt)) { - + *status = 1; - + } else { - + if (size >= MIN_AUTO_EXTRA && size <= MAX_AUTO_EXTRA) maybe_add_auto(&buf[idx], size); *status = 2; - + } return 0; @@ -169,8 +177,9 @@ u8 its_fuzz(u32 idx, u32 size, u8* buf, u32 len, u8* status) { } u8 cmp_extend_encoding(struct cmp_header* h, u64 pattern, u64 repl, u32 idx, - u8* orig_buf, u8* buf, u32 len, u8 do_reverse, u8* status) { - + u8* orig_buf, u8* buf, u32 len, u8 do_reverse, + u8* status) { + u64* buf_64 = (u64*)&buf[idx]; u32* buf_32 = (u32*)&buf[idx]; u16* buf_16 = (u16*)&buf[idx]; @@ -179,59 +188,75 @@ u8 cmp_extend_encoding(struct cmp_header* h, u64 pattern, u64 repl, u32 idx, u32* o_buf_32 = (u32*)&orig_buf[idx]; u16* o_buf_16 = (u16*)&orig_buf[idx]; // u8* o_buf_8 = &orig_buf[idx]; - + u32 its_len = len - idx; *status = 0; if (SHAPE_BYTES(h->shape) == 8) { + if (its_len >= 8 && *buf_64 == pattern && *o_buf_64 == pattern) { + *buf_64 = repl; - if (unlikely(its_fuzz(idx, 8, buf, len, status))) - return 1; + if (unlikely(its_fuzz(idx, 8, buf, len, status))) return 1; *buf_64 = pattern; + } + // reverse encoding if (do_reverse) if (unlikely(cmp_extend_encoding(h, SWAP64(pattern), SWAP64(repl), idx, - orig_buf, buf, len, 0, status))) - return 1; + orig_buf, buf, len, 0, status))) + return 1; + } - + if (SHAPE_BYTES(h->shape) == 4 || *status == 2) { + if (its_len >= 4 && *buf_32 == (u32)pattern && *o_buf_32 == (u32)pattern) { + *buf_32 = (u32)repl; - if (unlikely(its_fuzz(idx, 4, buf, len, status))) - return 1; + if (unlikely(its_fuzz(idx, 4, buf, len, status))) return 1; *buf_32 = pattern; + } + // reverse encoding if (do_reverse) if (unlikely(cmp_extend_encoding(h, SWAP32(pattern), SWAP32(repl), idx, - orig_buf, buf, len, 0, status))) - return 1; + orig_buf, buf, len, 0, status))) + return 1; + } if (SHAPE_BYTES(h->shape) == 2 || *status == 2) { + if (its_len >= 2 && *buf_16 == (u16)pattern && *o_buf_16 == (u16)pattern) { + *buf_16 = (u16)repl; - if (unlikely(its_fuzz(idx, 2, buf, len, status))) - return 1; + if (unlikely(its_fuzz(idx, 2, buf, len, status))) return 1; *buf_16 = (u16)pattern; + } + // reverse encoding if (do_reverse) if (unlikely(cmp_extend_encoding(h, SWAP16(pattern), SWAP16(repl), idx, - orig_buf, buf, len, 0, status))) - return 1; + orig_buf, buf, len, 0, status))) + return 1; + } - + /*if (SHAPE_BYTES(h->shape) == 1 || *status == 2) { + if (its_len >= 2 && *buf_8 == (u8)pattern && *o_buf_8 == (u8)pattern) { + *buf_8 = (u8)repl; if (unlikely(its_fuzz(idx, 1, buf, len, status))) return 1; *buf_16 = (u16)pattern; + } + }*/ return 0; @@ -241,44 +266,49 @@ u8 cmp_extend_encoding(struct cmp_header* h, u64 pattern, u64 repl, u32 idx, u8 cmp_fuzz(u32 key, u8* orig_buf, u8* buf, u32 len) { struct cmp_header* h = &cmp_map->headers[key]; - u32 i, j, idx; - + u32 i, j, idx; + u32 loggeds = h->hits; - if (h->hits > CMP_MAP_H) - loggeds = CMP_MAP_H; - + if (h->hits > CMP_MAP_H) loggeds = CMP_MAP_H; + u8 status; // opt not in the paper u32 fails = 0; - + for (i = 0; i < loggeds; ++i) { - + struct cmp_operands* o = &cmp_map->log[key][i]; - + // opt not in the paper for (j = 0; j < i; ++j) if (cmp_map->log[key][j].v0 == o->v0 && cmp_map->log[key][i].v1 == o->v1) goto cmp_fuzz_next_iter; - + for (idx = 0; idx < len && fails < 8; ++idx) { - - if (unlikely(cmp_extend_encoding(h, o->v0, o->v1, idx, orig_buf, buf, len, 1, &status))) + + if (unlikely(cmp_extend_encoding(h, o->v0, o->v1, idx, orig_buf, buf, len, + 1, &status))) return 1; - if (status == 2) ++fails; - else if (status == 1) break; - - if (unlikely(cmp_extend_encoding(h, o->v1, o->v0, idx, orig_buf, buf, len, 1, &status))) + if (status == 2) + ++fails; + else if (status == 1) + break; + + if (unlikely(cmp_extend_encoding(h, o->v1, o->v0, idx, orig_buf, buf, len, + 1, &status))) return 1; - if (status == 2) ++fails; - else if (status == 1) break; - + if (status == 2) + ++fails; + else if (status == 1) + break; + } -cmp_fuzz_next_iter: + cmp_fuzz_next_iter: stage_cur++; - + } - + return 0; } @@ -286,67 +316,60 @@ cmp_fuzz_next_iter: ///// Input to State stage // queue_cur->exec_cksum -u8 input_to_state_stage(char** argv, u8* orig_buf, u8* buf, u32 len, u32 exec_cksum) { +u8 input_to_state_stage(char** argv, u8* orig_buf, u8* buf, u32 len, + u32 exec_cksum) { its_argv = argv; - if (unlikely(colorization(buf, len, exec_cksum))) - return 1; - + if (unlikely(colorization(buf, len, exec_cksum))) return 1; + // do it manually, forkserver clear only trace_bits memset(cmp_map->headers, 0, sizeof(cmp_map->headers)); - - if (unlikely(common_fuzz_cmplog_stuff(argv, buf, len))) - return 1; - + + if (unlikely(common_fuzz_cmplog_stuff(argv, buf, len))) return 1; + u64 orig_hit_cnt, new_hit_cnt; u64 orig_execs = total_execs; orig_hit_cnt = queued_paths + unique_crashes; - + stage_name = "input-to-state"; stage_short = "its"; stage_max = 0; stage_cur = 0; - + u32 k; for (k = 0; k < CMP_MAP_W; ++k) { - if (!cmp_map->headers[k].hits) - continue; + if (!cmp_map->headers[k].hits) continue; if (cmp_map->headers[k].hits > CMP_MAP_H) stage_max += CMP_MAP_H; else stage_max += cmp_map->headers[k].hits; } - + for (k = 0; k < CMP_MAP_W; ++k) { - if (!cmp_map->headers[k].hits) - continue; + if (!cmp_map->headers[k].hits) continue; cmp_fuzz(k, orig_buf, buf, len); - + } - + memcpy(buf, orig_buf, len); - + new_hit_cnt = queued_paths + unique_crashes; stage_finds[STAGE_ITS] += new_hit_cnt - orig_hit_cnt; stage_cycles[STAGE_ITS] += total_execs - orig_execs; - + return 0; } - //// CmpLog forkserver -s32 cmplog_forksrv_pid, - cmplog_child_pid, - cmplog_fsrv_ctl_fd, - cmplog_fsrv_st_fd; +s32 cmplog_forksrv_pid, cmplog_child_pid, cmplog_fsrv_ctl_fd, cmplog_fsrv_st_fd; -void init_cmplog_forkserver(char **argv) { +void init_cmplog_forkserver(char** argv) { static struct itimerval it; int st_pipe[2], ctl_pipe[2]; @@ -475,7 +498,7 @@ void init_cmplog_forkserver(char **argv) { /* Use a distinctive bitmap signature to tell the parent about execv() falling through. */ - *(u32 *)trace_bits = EXEC_FAIL_SIG; + *(u32*)trace_bits = EXEC_FAIL_SIG; exit(0); } @@ -519,7 +542,9 @@ void init_cmplog_forkserver(char **argv) { } if (child_timed_out) - FATAL("Timeout while initializing cmplog fork server (adjusting -t may help)"); + FATAL( + "Timeout while initializing cmplog fork server (adjusting -t may " + "help)"); if (waitpid(cmplog_forksrv_pid, &status, 0) <= 0) PFATAL("waitpid() failed"); @@ -598,7 +623,7 @@ void init_cmplog_forkserver(char **argv) { } - if (*(u32 *)trace_bits == EXEC_FAIL_SIG) + if (*(u32*)trace_bits == EXEC_FAIL_SIG) FATAL("Unable to execute target application ('%s')", argv[0]); if (mem_limit && mem_limit < 500 && uses_asan) { @@ -757,7 +782,7 @@ u8 run_cmplog_target(char** argv, u32 timeout) { setenv("MSAN_OPTIONS", "exit_code=" STRINGIFY(MSAN_ERROR) ":" "symbolize=0:" "msan_track_origins=0", 0); - + setenv("__AFL_CMPLOG_MODE__", "1", 1); argv[0] = cmplog_binary; @@ -781,18 +806,21 @@ u8 run_cmplog_target(char** argv, u32 timeout) { if ((res = write(cmplog_fsrv_ctl_fd, &prev_timed_out, 4)) != 4) { if (stop_soon) return 0; - RPFATAL(res, "Unable to request new process from cmplog fork server (OOM?)"); + RPFATAL(res, + "Unable to request new process from cmplog fork server (OOM?)"); } if ((res = read(cmplog_fsrv_st_fd, &cmplog_child_pid, 4)) != 4) { if (stop_soon) return 0; - RPFATAL(res, "Unable to request new process from cmplog fork server (OOM?)"); + RPFATAL(res, + "Unable to request new process from cmplog fork server (OOM?)"); } - if (cmplog_child_pid <= 0) FATAL("Cmplog fork server is misbehaving (OOM?)"); + if (cmplog_child_pid <= 0) + FATAL("Cmplog fork server is misbehaving (OOM?)"); } @@ -804,7 +832,8 @@ u8 run_cmplog_target(char** argv, u32 timeout) { setitimer(ITIMER_REAL, &it, NULL); - /* The SIGALRM handler simply kills the cmplog_child_pid and sets child_timed_out. */ + /* The SIGALRM handler simply kills the cmplog_child_pid and sets + * child_timed_out. */ if (dumb_mode == 1 || no_forkserver) { @@ -900,7 +929,7 @@ u8 run_cmplog_target(char** argv, u32 timeout) { u8 common_fuzz_cmplog_stuff(char** argv, u8* out_buf, u32 len) { u8 fault; - + if (post_handler) { out_buf = post_handler(out_buf, &len); @@ -948,3 +977,4 @@ u8 common_fuzz_cmplog_stuff(char** argv, u8* out_buf, u32 len) { return 0; } + diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index 94c6694a..4c3a5b95 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -532,10 +532,10 @@ u8 fuzz_one_original(char** argv) { } if (cmplog_mode) { - - if(input_to_state_stage(argv, in_buf, out_buf, len, queue_cur->exec_cksum)) + + if (input_to_state_stage(argv, in_buf, out_buf, len, queue_cur->exec_cksum)) goto abandon_entry; - + } /* Skip right away if -d is given, if it has not been chosen sufficiently diff --git a/src/afl-fuzz-stats.c b/src/afl-fuzz-stats.c index 54d6fb52..ecadac9f 100644 --- a/src/afl-fuzz-stats.c +++ b/src/afl-fuzz-stats.c @@ -334,9 +334,9 @@ void show_stats(void) { /* Lord, forgive me this. */ - SAYF(SET_G1 bSTG bLT bH bSTOP cCYA + SAYF(SET_G1 bSTG bLT bH bSTOP cCYA " process timing " bSTG bH30 bH5 bH bHB bH bSTOP cCYA - " overall results " bSTG bH2 bH2 bRT "\n"); + " overall results " bSTG bH2 bH2 bRT "\n"); if (dumb_mode) { @@ -413,9 +413,9 @@ void show_stats(void) { " uniq hangs : " cRST "%-6s" bSTG bV "\n", DTD(cur_ms, last_hang_time), tmp); - SAYF(bVR bH bSTOP cCYA + SAYF(bVR bH bSTOP cCYA " cycle progress " bSTG bH10 bH5 bH2 bH2 bHB bH bSTOP cCYA - " map coverage " bSTG bH bHT bH20 bH2 bVL "\n"); + " map coverage " bSTG bH bHT bH20 bH2 bVL "\n"); /* This gets funny because we want to print several variable-length variables together, but then cram them into a fixed-width field - so we need to @@ -443,9 +443,9 @@ void show_stats(void) { SAYF(bSTOP " count coverage : " cRST "%-21s" bSTG bV "\n", tmp); - SAYF(bVR bH bSTOP cCYA + SAYF(bVR bH bSTOP cCYA " stage progress " bSTG bH10 bH5 bH2 bH2 bX bH bSTOP cCYA - " findings in depth " bSTG bH10 bH5 bH2 bH2 bVL "\n"); + " findings in depth " bSTG bH10 bH5 bH2 bH2 bVL "\n"); sprintf(tmp, "%s (%0.02f%%)", DI(queued_favored), ((double)queued_favored) * 100 / queued_paths); @@ -514,7 +514,7 @@ void show_stats(void) { /* Aaaalmost there... hold on! */ - SAYF(bVR bH cCYA bSTOP + SAYF(bVR bH cCYA bSTOP " fuzzing strategy yields " bSTG bH10 bHT bH10 bH5 bHB bH bSTOP cCYA " path geometry " bSTG bH5 bH2 bVL "\n"); @@ -598,7 +598,8 @@ void show_stats(void) { sprintf(tmp, "%s/%s, %s/%s, %s/%s, %s/%s", DI(stage_finds[STAGE_PYTHON]), DI(stage_cycles[STAGE_PYTHON]), DI(stage_finds[STAGE_CUSTOM_MUTATOR]), - DI(stage_cycles[STAGE_CUSTOM_MUTATOR]), DI(stage_finds[STAGE_COLORIZATION]), + DI(stage_cycles[STAGE_CUSTOM_MUTATOR]), + DI(stage_finds[STAGE_COLORIZATION]), DI(stage_cycles[STAGE_COLORIZATION]), DI(stage_finds[STAGE_ITS]), DI(stage_cycles[STAGE_ITS])); diff --git a/src/afl-sharedmem.c b/src/afl-sharedmem.c index bad41f88..f8ed4e51 100644 --- a/src/afl-sharedmem.c +++ b/src/afl-sharedmem.c @@ -72,8 +72,8 @@ static s32 shm_id; /* ID of the SHM region */ static s32 cmplog_shm_id; #endif -int cmplog_mode; -struct cmp_map* cmp_map; +int cmplog_mode; +struct cmp_map *cmp_map; /* Get rid of shared memory (atexit handler). */ @@ -96,8 +96,7 @@ void remove_shm(void) { #else shmctl(shm_id, IPC_RMID, NULL); - if (cmplog_mode) - shmctl(cmplog_shm_id, IPC_RMID, NULL); + if (cmplog_mode) shmctl(cmplog_shm_id, IPC_RMID, NULL); #endif } @@ -155,15 +154,16 @@ void setup_shm(unsigned char dumb_mode) { shm_id = shmget(IPC_PRIVATE, MAP_SIZE, IPC_CREAT | IPC_EXCL | 0600); if (shm_id < 0) PFATAL("shmget() failed"); - + if (cmplog_mode) { - - cmplog_shm_id = shmget(IPC_PRIVATE, sizeof(struct cmp_map), IPC_CREAT | IPC_EXCL | 0600); - + + cmplog_shm_id = shmget(IPC_PRIVATE, sizeof(struct cmp_map), + IPC_CREAT | IPC_EXCL | 0600); + if (cmplog_shm_id < 0) PFATAL("shmget() failed"); } - + atexit(remove_shm); shm_str = alloc_printf("%d", shm_id); @@ -176,21 +176,20 @@ void setup_shm(unsigned char dumb_mode) { if (!dumb_mode) setenv(SHM_ENV_VAR, shm_str, 1); ck_free(shm_str); - + if (cmplog_mode) { - + shm_str = alloc_printf("%d", cmplog_shm_id); if (!dumb_mode) setenv(CMPLOG_SHM_ENV_VAR, shm_str, 1); ck_free(shm_str); - + } trace_bits = shmat(shm_id, NULL, 0); - - if (cmplog_mode) - cmp_map = shmat(cmplog_shm_id, NULL, 0); + + if (cmplog_mode) cmp_map = shmat(cmplog_shm_id, NULL, 0); if (!trace_bits) PFATAL("shmat() failed"); -- cgit 1.4.1 From b8bad5a2273b0cddd0244a7f37c20150a08af475 Mon Sep 17 00:00:00 2001 From: van Hauser Date: Sun, 19 Jan 2020 22:29:40 +0100 Subject: fix for getopt --- llvm_mode/Makefile | 2 ++ src/afl-fuzz.c | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'src') diff --git a/llvm_mode/Makefile b/llvm_mode/Makefile index ebe6b9de..a455dc8a 100644 --- a/llvm_mode/Makefile +++ b/llvm_mode/Makefile @@ -19,6 +19,8 @@ # For Heiko: #TEST_MMAP=1 +AFL_TRACE_PC=1 + PREFIX ?= /usr/local HELPER_PATH = $(PREFIX)/lib/afl BIN_PATH = $(PREFIX)/bin diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c index 436e71a5..e0542648 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -194,7 +194,7 @@ int main(int argc, char** argv) { init_seed = tv.tv_sec ^ tv.tv_usec ^ getpid(); while ((opt = getopt(argc, argv, - "+i:I:o:f:m:t:T:dnCB:S:M:x:QNUWe:p:s:V:E:L:hRP:")) > 0) + "+c:i:I:o:f:m:t:T:dnCB:S:M:x:QNUWe:p:s:V:E:L:hRP:")) > 0) switch (opt) { -- cgit 1.4.1 From 95a98fb3e805d94a3db82d5da21e9a00a79dbdf2 Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Thu, 30 Jan 2020 22:40:09 +0100 Subject: cmplog runtime for LLVM --- include/afl-fuzz.h | 14 +- include/cmplog.h | 30 ++- include/sharedmem.h | 1 + include/types.h | 14 ++ llvm_mode/Makefile | 17 +- llvm_mode/README.cmplog.md | 42 ++++ llvm_mode/afl-clang-fast.c | 44 ++-- llvm_mode/afl-llvm-cmplog-rt.o.c | 412 ++++++++++++++++++++++++++++++++++++++ llvm_mode/afl-llvm-cmplog.rt.o.c | 422 +++++++++++++++++++++++++++++++++++++++ llvm_mode/afl-llvm-rt.o.c | 123 ------------ src/afl-fuzz-cmplog.c | 385 ++--------------------------------- src/afl-fuzz-globals.c | 3 + src/afl-fuzz-redqueen.c | 373 ++++++++++++++++++++++++++++++++++ 13 files changed, 1370 insertions(+), 510 deletions(-) create mode 100644 llvm_mode/README.cmplog.md create mode 100644 llvm_mode/afl-llvm-cmplog-rt.o.c create mode 100644 llvm_mode/afl-llvm-cmplog.rt.o.c create mode 100644 src/afl-fuzz-redqueen.c (limited to 'src') diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h index ce418931..cd53c703 100644 --- a/include/afl-fuzz.h +++ b/include/afl-fuzz.h @@ -452,6 +452,11 @@ extern u32 a_extras_cnt; /* Total number of tokens available */ u8* (*post_handler)(u8* buf, u32* len); +/* CmpLog */ + +extern u8* cmplog_binary; +extern s32 cmplog_forksrv_pid; + /* hooks for the custom mutator function */ /** * Perform custom mutations on a given input @@ -647,12 +652,13 @@ char** get_qemu_argv(u8*, char**, int); char** get_wine_argv(u8*, char**, int); void save_cmdline(u32, char**); -/* RedQueen */ - -extern u8* cmplog_binary; -extern s32 cmplog_forksrv_pid; +/* CmpLog */ void init_cmplog_forkserver(char** argv); +u8 common_fuzz_cmplog_stuff(char** argv, u8* out_buf, u32 len); + +/* RedQueen */ + u8 input_to_state_stage(char** argv, u8* orig_buf, u8* buf, u32 len, u32 exec_cksum); diff --git a/include/cmplog.h b/include/cmplog.h index d5947226..c02650ee 100644 --- a/include/cmplog.h +++ b/include/cmplog.h @@ -1,5 +1,31 @@ -#ifndef _AFL_REDQUEEN_H -#define _AFL_REDQUEEN_H +/* + american fuzzy lop++ - cmplog header + ------------------------------------ + + Originally written by Michal Zalewski + + Forkserver design by Jann Horn + + Now maintained by by Marc Heuse , + Heiko Eißfeldt and + Andrea Fioraldi + + Copyright 2016, 2017 Google Inc. All rights reserved. + Copyright 2019-2020 AFLplusplus Project. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at: + + http://www.apache.org/licenses/LICENSE-2.0 + + Shared code to handle the shared memory. This is used by the fuzzer + as well the other components like afl-tmin, afl-showmap, etc... + + */ + +#ifndef _AFL_CMPLOG_H +#define _AFL_CMPLOG_H #include "config.h" diff --git a/include/sharedmem.h b/include/sharedmem.h index 25c7336d..3540386d 100644 --- a/include/sharedmem.h +++ b/include/sharedmem.h @@ -31,6 +31,7 @@ void setup_shm(unsigned char dumb_mode); void remove_shm(void); extern int cmplog_mode; +extern struct cmp_map* cmp_map; #endif diff --git a/include/types.h b/include/types.h index eba47be7..d5be5920 100644 --- a/include/types.h +++ b/include/types.h @@ -78,6 +78,20 @@ typedef int64_t s64; \ }) +#define SWAP64(_x) \ + ({ \ + \ + u64 _ret = (_x); \ + _ret = \ + (_ret & 0x00000000FFFFFFFF) << 32 | (_ret & 0xFFFFFFFF00000000) >> 32; \ + _ret = \ + (_ret & 0x0000FFFF0000FFFF) << 16 | (_ret & 0xFFFF0000FFFF0000) >> 16; \ + _ret = \ + (_ret & 0x00FF00FF00FF00FF) << 8 | (_ret & 0xFF00FF00FF00FF00) >> 8; \ + _ret; \ + \ + }) + #ifdef AFL_LLVM_PASS #if defined(__linux__) #define AFL_SR(s) (srandom(s)) diff --git a/llvm_mode/Makefile b/llvm_mode/Makefile index a455dc8a..9ee6fc8b 100644 --- a/llvm_mode/Makefile +++ b/llvm_mode/Makefile @@ -19,8 +19,6 @@ # For Heiko: #TEST_MMAP=1 -AFL_TRACE_PC=1 - PREFIX ?= /usr/local HELPER_PATH = $(PREFIX)/lib/afl BIN_PATH = $(PREFIX)/bin @@ -133,9 +131,9 @@ ifeq "$(TEST_MMAP)" "1" endif ifndef AFL_TRACE_PC - PROGS = ../afl-clang-fast ../afl-llvm-pass.so ../libLLVMInsTrim.so ../afl-llvm-rt.o ../afl-llvm-rt-32.o ../afl-llvm-rt-64.o ../compare-transform-pass.so ../split-compares-pass.so ../split-switches-pass.so + PROGS = ../afl-clang-fast ../afl-llvm-cmplog-rt.o ../afl-llvm-cmplog-rt-32.o ../afl-llvm-cmplog-rt-64.o ../afl-llvm-pass.so ../libLLVMInsTrim.so ../afl-llvm-rt.o ../afl-llvm-rt-32.o ../afl-llvm-rt-64.o ../compare-transform-pass.so ../split-compares-pass.so ../split-switches-pass.so else - PROGS = ../afl-clang-fast ../afl-llvm-rt.o ../afl-llvm-rt-32.o ../afl-llvm-rt-64.o ../compare-transform-pass.so ../split-compares-pass.so ../split-switches-pass.so + PROGS = ../afl-clang-fast ../afl-llvm-cmplog-rt.o ../afl-llvm-cmplog-rt-32.o ../afl-llvm-cmplog-rt-64.o ../afl-llvm-rt.o ../afl-llvm-rt-32.o ../afl-llvm-rt-64.o ../compare-transform-pass.so ../split-compares-pass.so ../split-switches-pass.so endif ifneq "$(CLANGVER)" "$(LLVMVER)" @@ -228,6 +226,17 @@ endif @printf "[*] Building 64-bit variant of the runtime (-m64)... " @$(CC) $(CFLAGS) -m64 -fPIC -c $< -o $@ 2>/dev/null; if [ "$$?" = "0" ]; then echo "success!"; else echo "failed (that's fine)"; fi +../afl-llvm-cmplog-rt.o: afl-llvm-cmplog-rt.o.c | test_deps + $(CC) $(CFLAGS) -fPIC -c $< -o $@ + +../afl-llvm-cmplog-rt-32.o: afl-llvm-cmplog-rt.o.c | test_deps + @printf "[*] Building 32-bit variant of the CmpLog runtime (-m32)... " + @$(CC) $(CFLAGS) -m32 -fPIC -c $< -o $@ 2>/dev/null; if [ "$$?" = "0" ]; then echo "success!"; else echo "failed (that's fine)"; fi + +../afl-llvm-cmplog-rt-64.o: afl-llvm-cmplog-rt.o.c | test_deps + @printf "[*] Building 64-bit variant of the CmpLog runtime (-m64)... " + @$(CC) $(CFLAGS) -m64 -fPIC -c $< -o $@ 2>/dev/null; if [ "$$?" = "0" ]; then echo "success!"; else echo "failed (that's fine)"; fi + test_build: $(PROGS) @echo "[*] Testing the CC wrapper and instrumentation output..." unset AFL_USE_ASAN AFL_USE_MSAN AFL_INST_RATIO; AFL_QUIET=1 AFL_PATH=. AFL_CC=$(CC) AFL_LLVM_LAF_SPLIT_SWITCHES=1 AFL_LLVM_LAF_TRANSFORM_COMPARES=1 AFL_LLVM_LAF_SPLIT_COMPARES=1 ../afl-clang-fast $(CFLAGS) ../test-instr.c -o test-instr $(LDFLAGS) diff --git a/llvm_mode/README.cmplog.md b/llvm_mode/README.cmplog.md new file mode 100644 index 00000000..a0e838ad --- /dev/null +++ b/llvm_mode/README.cmplog.md @@ -0,0 +1,42 @@ +# CmpLog instrumentation + +The CmpLog instrumentation enables the logging of the comparisons operands in a +shared memory. + +These values can be used by variuous mutator built on top of it. +At the moment we support the RedQueen mutator (input-2-state instructions only). + +## Build + +Tou use CmpLog, you have to build two versions of the instrumented target +program. + +The first, using the regular AFL++ instrumentation. + +The second, the CmpLog binary, setting AFL_LLVM_CMPLOG during the compilation. + +For example: + +``` +./configure --cc=~/path/to/afl-clang-fast +make +cp ./program ./program.afl +make clean +export AFL_LLVM_CMPLOG=1 +./configure --cc=~/path/to/afl-clang-fast +make +cp ./program ./program.cmplog +``` + +## Use + +AFL++ have the -c option that can be used to specify a CmpLog binary (the second +built). + +For example: + +``` +afl-fuzz -i input -o output -c ./program.cmplog -m none -- ./program.afl @@ +``` + +Be careful to use -m none because CmpLog maps a lot of pages. diff --git a/llvm_mode/afl-clang-fast.c b/llvm_mode/afl-clang-fast.c index 939546d7..ddaa2388 100644 --- a/llvm_mode/afl-clang-fast.c +++ b/llvm_mode/afl-clang-fast.c @@ -40,6 +40,7 @@ static u8* obj_path; /* Path to runtime libraries */ static u8** cc_params; /* Parameters passed to the real CC */ static u32 cc_par_cnt = 1; /* Param count, including argv0 */ static u8 llvm_fullpath[PATH_MAX]; +static u8 cmplog_mode; /* Try to find the runtime libraries. If that fails, abort. */ @@ -196,16 +197,20 @@ static void edit_params(u32 argc, char** argv) { // /laf -#ifdef USE_TRACE_PC - if (getenv("AFL_CMPLOG")) + if (cmplog_mode) { + cc_params[cc_par_cnt++] = "-fsanitize-coverage=trace-pc-guard,trace-cmp"; - else - cc_params[cc_par_cnt++] = - "-fsanitize-coverage=trace-pc-guard"; // edge coverage by default - // cc_params[cc_par_cnt++] = "-mllvm"; - // cc_params[cc_par_cnt++] = - // "-fsanitize-coverage=trace-cmp,trace-div,trace-gep"; - // cc_params[cc_par_cnt++] = "-sanitizer-coverage-block-threshold=0"; + + } else { + +#ifdef USE_TRACE_PC + + cc_params[cc_par_cnt++] = + "-fsanitize-coverage=trace-pc-guard"; // edge coverage by default + // cc_params[cc_par_cnt++] = "-mllvm"; + // cc_params[cc_par_cnt++] = + // "-fsanitize-coverage=trace-cmp,trace-div,trace-gep"; + // cc_params[cc_par_cnt++] = "-sanitizer-coverage-block-threshold=0"; #else cc_params[cc_par_cnt++] = "-Xclang"; cc_params[cc_par_cnt++] = "-load"; @@ -216,6 +221,8 @@ static void edit_params(u32 argc, char** argv) { cc_params[cc_par_cnt++] = alloc_printf("%s/afl-llvm-pass.so", obj_path); #endif /* ^USE_TRACE_PC */ + } + cc_params[cc_par_cnt++] = "-Qunused-arguments"; /* Detect stray -v calls from ./configure scripts. */ @@ -380,11 +387,17 @@ static void edit_params(u32 argc, char** argv) { switch (bit_mode) { case 0: - cc_params[cc_par_cnt++] = alloc_printf("%s/afl-llvm-rt.o", obj_path); + if (cmplog_mode) + cc_params[cc_par_cnt++] = alloc_printf("%s/afl-llvm-cmplog-rt.o", obj_path); + else + cc_params[cc_par_cnt++] = alloc_printf("%s/afl-llvm-rt.o", obj_path); break; case 32: - cc_params[cc_par_cnt++] = alloc_printf("%s/afl-llvm-rt-32.o", obj_path); + if (cmplog_mode) + cc_params[cc_par_cnt++] = alloc_printf("%s/afl-llvm-cmplog-rt-32.o", obj_path); + else + cc_params[cc_par_cnt++] = alloc_printf("%s/afl-llvm-rt-32.o", obj_path); if (access(cc_params[cc_par_cnt - 1], R_OK)) FATAL("-m32 is not supported by your compiler"); @@ -392,7 +405,10 @@ static void edit_params(u32 argc, char** argv) { break; case 64: - cc_params[cc_par_cnt++] = alloc_printf("%s/afl-llvm-rt-64.o", obj_path); + if (cmplog_mode) + cc_params[cc_par_cnt++] = alloc_printf("%s/afl-llvm-cmplog-rt-64.o", obj_path); + else + cc_params[cc_par_cnt++] = alloc_printf("%s/afl-llvm-rt-64.o", obj_path); if (access(cc_params[cc_par_cnt - 1], R_OK)) FATAL("-m64 is not supported by your compiler"); @@ -463,6 +479,10 @@ int main(int argc, char** argv) { #endif /* ^USE_TRACE_PC */ } + + cmplog_mode = getenv("AFL_CMPLOG") || getenv("AFL_LLVM_CMPLOG"); + if (cmplog_mode) + printf("CmpLog mode by \n"); #ifndef __ANDROID__ find_obj(argv[0]); diff --git a/llvm_mode/afl-llvm-cmplog-rt.o.c b/llvm_mode/afl-llvm-cmplog-rt.o.c new file mode 100644 index 00000000..7a513c0d --- /dev/null +++ b/llvm_mode/afl-llvm-cmplog-rt.o.c @@ -0,0 +1,412 @@ +/* + american fuzzy lop++ - LLVM instrumentation bootstrap + --------------------------------------------------- + + Written by Laszlo Szekeres and + Michal Zalewski + + LLVM integration design comes from Laszlo Szekeres. + + Copyright 2015, 2016 Google Inc. All rights reserved. + Copyright 2019-2020 AFLplusplus Project. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at: + + http://www.apache.org/licenses/LICENSE-2.0 + + This code is the rewrite of afl-as.h's main_payload. + +*/ + +#ifdef __ANDROID__ +#include "android-ashmem.h" +#endif +#include "config.h" +#include "types.h" +#include "cmplog.h" + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +/* This is a somewhat ugly hack for the experimental 'trace-pc-guard' mode. + Basically, we need to make sure that the forkserver is initialized after + the LLVM-generated runtime initialization pass, not before. */ + +#ifdef USE_TRACE_PC +#define CONST_PRIO 5 +#else +#define CONST_PRIO 0 +#endif /* ^USE_TRACE_PC */ + +#include +#include + +/* Globals needed by the injected instrumentation. The __afl_area_initial region + is used for instrumentation output before __afl_map_shm() has a chance to + run. It will end up as .comm, so it shouldn't be too wasteful. */ + +// In CmpLog, the only usage of __afl_area_ptr is to report errors +u8* __afl_area_ptr; + +struct cmp_map* __afl_cmp_map; +__thread u32 __afl_cmp_counter; + +/* Running in persistent mode? */ + +static u8 is_persistent; + +/* SHM setup. */ + +static void __afl_map_shm(void) { + + u8* id_str = getenv(SHM_ENV_VAR); + + /* If we're running under AFL, attach to the appropriate region, replacing the + early-stage __afl_area_initial region that is needed to allow some really + hacky .init code to work correctly in projects such as OpenSSL. */ + + if (id_str) { + +#ifdef USEMMAP + const char* shm_file_path = id_str; + int shm_fd = -1; + unsigned char* shm_base = NULL; + + /* create the shared memory segment as if it was a file */ + shm_fd = shm_open(shm_file_path, O_RDWR, 0600); + if (shm_fd == -1) { + + printf("shm_open() failed\n"); + exit(1); + + } + + /* map the shared memory segment to the address space of the process */ + shm_base = mmap(0, MAP_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, shm_fd, 0); + if (shm_base == MAP_FAILED) { + + close(shm_fd); + shm_fd = -1; + + printf("mmap() failed\n"); + exit(2); + + } + + __afl_area_ptr = shm_base; +#else + u32 shm_id = atoi(id_str); + + __afl_area_ptr = shmat(shm_id, NULL, 0); +#endif + + /* Whooooops. */ + + if (__afl_area_ptr == (void*)-1) _exit(1); + + /* Write something into the bitmap so that even with low AFL_INST_RATIO, + our parent doesn't give up on us. */ + + __afl_area_ptr[0] = 1; + + } + + id_str = getenv(CMPLOG_SHM_ENV_VAR); + + if (id_str) { + + u32 shm_id = atoi(id_str); + + __afl_cmp_map = shmat(shm_id, NULL, 0); + + if (__afl_cmp_map == (void*)-1) _exit(1); + + } + +} + +/* Fork server logic. */ + +static void __afl_start_forkserver(void) { + + static u8 tmp[4]; + s32 child_pid; + + u8 child_stopped = 0; + + void (*old_sigchld_handler)(int) = 0; // = signal(SIGCHLD, SIG_DFL); + + /* Phone home and tell the parent that we're OK. If parent isn't there, + assume we're not running in forkserver mode and just execute program. */ + + if (write(FORKSRV_FD + 1, tmp, 4) != 4) return; + + while (1) { + + u32 was_killed; + int status; + + /* Wait for parent by reading from the pipe. Abort if read fails. */ + + if (read(FORKSRV_FD, &was_killed, 4) != 4) _exit(1); + + /* If we stopped the child in persistent mode, but there was a race + condition and afl-fuzz already issued SIGKILL, write off the old + process. */ + + if (child_stopped && was_killed) { + + child_stopped = 0; + if (waitpid(child_pid, &status, 0) < 0) _exit(1); + + } + + if (!child_stopped) { + + /* Once woken up, create a clone of our process. */ + + child_pid = fork(); + if (child_pid < 0) _exit(1); + + /* In child process: close fds, resume execution. */ + + if (!child_pid) { + + signal(SIGCHLD, old_sigchld_handler); + + close(FORKSRV_FD); + close(FORKSRV_FD + 1); + return; + + } + + } else { + + /* Special handling for persistent mode: if the child is alive but + currently stopped, simply restart it with SIGCONT. */ + + kill(child_pid, SIGCONT); + child_stopped = 0; + + } + + /* In parent process: write PID to pipe, then wait for child. */ + + if (write(FORKSRV_FD + 1, &child_pid, 4) != 4) _exit(1); + + if (waitpid(child_pid, &status, is_persistent ? WUNTRACED : 0) < 0) + _exit(1); + + /* In persistent mode, the child stops itself with SIGSTOP to indicate + a successful run. In this case, we want to wake it up without forking + again. */ + + if (WIFSTOPPED(status)) child_stopped = 1; + + /* Relay wait status to pipe, then loop back. */ + + if (write(FORKSRV_FD + 1, &status, 4) != 4) _exit(1); + + } + +} + +/* A simplified persistent mode handler, used as explained in README.llvm. */ + +int __afl_persistent_loop(unsigned int max_cnt) { + + static u8 first_pass = 1; + static u32 cycle_cnt; + + if (first_pass) { + + /* Make sure that every iteration of __AFL_LOOP() starts with a clean slate. + On subsequent calls, the parent will take care of that, but on the first + iteration, it's our job to erase any trace of whatever happened + before the loop. */ + + if (is_persistent) { + + // memset(__afl_area_ptr, 0, MAP_SIZE); + __afl_area_ptr[0] = 1; + + } + + cycle_cnt = max_cnt; + first_pass = 0; + return 1; + + } + + if (is_persistent) { + + if (--cycle_cnt) { + + raise(SIGSTOP); + + __afl_area_ptr[0] = 1; + + return 1; + + } else { + + /* When exiting __AFL_LOOP(), make sure that the subsequent code that + follows the loop is not traced. We do that by pivoting back to the + dummy output region. */ + + // __afl_area_ptr = __afl_area_initial; + + } + + } + + return 0; + +} + +/* This one can be called from user code when deferred forkserver mode + is enabled. */ + +void __afl_manual_init(void) { + + static u8 init_done; + + if (!init_done) { + + __afl_map_shm(); + __afl_start_forkserver(); + init_done = 1; + + } + +} + +/* Proper initialization routine. */ + +__attribute__((constructor(CONST_PRIO))) void __afl_auto_init(void) { + + is_persistent = !!getenv(PERSIST_ENV_VAR); + + if (getenv(DEFER_ENV_VAR)) return; + + __afl_manual_init(); + +} + +///// CmpLog instrumentation + +void __sanitizer_cov_trace_cmp1(uint8_t Arg1, uint8_t Arg2) { + + return; + +} + +void __sanitizer_cov_trace_cmp2(uint16_t Arg1, uint16_t Arg2) { + + if (!__afl_cmp_map) return; + + uintptr_t k = (uintptr_t)__builtin_return_address(0); + k = (k >> 4) ^ (k << 8); + k &= CMP_MAP_W - 1; + + u32 hits = __afl_cmp_map->headers[k].hits; + __afl_cmp_map->headers[k].hits = hits + 1; + // if (!__afl_cmp_map->headers[k].cnt) + // __afl_cmp_map->headers[k].cnt = __afl_cmp_counter++; + + __afl_cmp_map->headers[k].shape = 1; + //__afl_cmp_map->headers[k].type = CMP_TYPE_INS; + + hits &= CMP_MAP_H - 1; + __afl_cmp_map->log[k][hits].v0 = Arg1; + __afl_cmp_map->log[k][hits].v1 = Arg2; + +} + +void __sanitizer_cov_trace_cmp4(uint32_t Arg1, uint32_t Arg2) { + + if (!__afl_cmp_map) return; + + uintptr_t k = (uintptr_t)__builtin_return_address(0); + k = (k >> 4) ^ (k << 8); + k &= CMP_MAP_W - 1; + + u32 hits = __afl_cmp_map->headers[k].hits; + __afl_cmp_map->headers[k].hits = hits + 1; + + __afl_cmp_map->headers[k].shape = 3; + + hits &= CMP_MAP_H - 1; + __afl_cmp_map->log[k][hits].v0 = Arg1; + __afl_cmp_map->log[k][hits].v1 = Arg2; + +} + +void __sanitizer_cov_trace_cmp8(uint64_t Arg1, uint64_t Arg2) { + + if (!__afl_cmp_map) return; + + uintptr_t k = (uintptr_t)__builtin_return_address(0); + k = (k >> 4) ^ (k << 8); + k &= CMP_MAP_W - 1; + + u32 hits = __afl_cmp_map->headers[k].hits; + __afl_cmp_map->headers[k].hits = hits + 1; + + __afl_cmp_map->headers[k].shape = 7; + + hits &= CMP_MAP_H - 1; + __afl_cmp_map->log[k][hits].v0 = Arg1; + __afl_cmp_map->log[k][hits].v1 = Arg2; + +} + +#if defined(__APPLE__) +#pragma weak __sanitizer_cov_trace_const_cmp1 = __sanitizer_cov_trace_cmp1 +#pragma weak __sanitizer_cov_trace_const_cmp2 = __sanitizer_cov_trace_cmp2 +#pragma weak __sanitizer_cov_trace_const_cmp4 = __sanitizer_cov_trace_cmp4 +#pragma weak __sanitizer_cov_trace_const_cmp8 = __sanitizer_cov_trace_cmp8 +#else +void __sanitizer_cov_trace_const_cmp1(uint8_t Arg1, uint8_t Arg2) + __attribute__((alias("__sanitizer_cov_trace_cmp1"))); +void __sanitizer_cov_trace_const_cmp2(uint16_t Arg1, uint16_t Arg2) + __attribute__((alias("__sanitizer_cov_trace_cmp2"))); +void __sanitizer_cov_trace_const_cmp4(uint32_t Arg1, uint32_t Arg2) + __attribute__((alias("__sanitizer_cov_trace_cmp4"))); +void __sanitizer_cov_trace_const_cmp8(uint64_t Arg1, uint64_t Arg2) + __attribute__((alias("__sanitizer_cov_trace_cmp8"))); +#endif /* defined(__APPLE__) */ + +void __sanitizer_cov_trace_switch(uint64_t Val, uint64_t* Cases) { + + for (uint64_t i = 0; i < Cases[0]; i++) { + + uintptr_t k = (uintptr_t)__builtin_return_address(0) + i; + k = (k >> 4) ^ (k << 8); + k &= CMP_MAP_W - 1; + + u32 hits = __afl_cmp_map->headers[k].hits; + __afl_cmp_map->headers[k].hits = hits + 1; + + __afl_cmp_map->headers[k].shape = 7; + + hits &= CMP_MAP_H - 1; + __afl_cmp_map->log[k][hits].v0 = Val; + __afl_cmp_map->log[k][hits].v1 = Cases[i + 2]; + + } + +} + diff --git a/llvm_mode/afl-llvm-cmplog.rt.o.c b/llvm_mode/afl-llvm-cmplog.rt.o.c new file mode 100644 index 00000000..ef944d4f --- /dev/null +++ b/llvm_mode/afl-llvm-cmplog.rt.o.c @@ -0,0 +1,422 @@ +/* + american fuzzy lop++ - LLVM instrumentation bootstrap + --------------------------------------------------- + + Written by Laszlo Szekeres and + Michal Zalewski + + LLVM integration design comes from Laszlo Szekeres. + + Copyright 2015, 2016 Google Inc. All rights reserved. + Copyright 2019-2020 AFLplusplus Project. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at: + + http://www.apache.org/licenses/LICENSE-2.0 + + This code is the rewrite of afl-as.h's main_payload. + +*/ + +#ifdef __ANDROID__ +#include "android-ashmem.h" +#endif +#include "config.h" +#include "types.h" +#include "cmplog.h" + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +/* This is a somewhat ugly hack for the experimental 'trace-pc-guard' mode. + Basically, we need to make sure that the forkserver is initialized after + the LLVM-generated runtime initialization pass, not before. */ + +#ifdef USE_TRACE_PC +#define CONST_PRIO 5 +#else +#define CONST_PRIO 0 +#endif /* ^USE_TRACE_PC */ + +#include +#include + +/* Globals needed by the injected instrumentation. The __afl_area_initial region + is used for instrumentation output before __afl_map_shm() has a chance to + run. It will end up as .comm, so it shouldn't be too wasteful. */ + +// In CmpLog, the only usage of __afl_area_ptr is to report errors +u8* __afl_area_ptr; + +struct cmp_map* __afl_cmp_map; +__thread u32 __afl_cmp_counter; + +/* Running in persistent mode? */ + +static u8 is_persistent; + +/* SHM setup. */ + +static void __afl_map_shm(void) { + + u8* id_str = getenv(SHM_ENV_VAR); + + /* If we're running under AFL, attach to the appropriate region, replacing the + early-stage __afl_area_initial region that is needed to allow some really + hacky .init code to work correctly in projects such as OpenSSL. */ + + if (id_str) { + +#ifdef USEMMAP + const char* shm_file_path = id_str; + int shm_fd = -1; + unsigned char* shm_base = NULL; + + /* create the shared memory segment as if it was a file */ + shm_fd = shm_open(shm_file_path, O_RDWR, 0600); + if (shm_fd == -1) { + + printf("shm_open() failed\n"); + exit(1); + + } + + /* map the shared memory segment to the address space of the process */ + shm_base = mmap(0, MAP_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, shm_fd, 0); + if (shm_base == MAP_FAILED) { + + close(shm_fd); + shm_fd = -1; + + printf("mmap() failed\n"); + exit(2); + + } + + __afl_area_ptr = shm_base; +#else + u32 shm_id = atoi(id_str); + + __afl_area_ptr = shmat(shm_id, NULL, 0); +#endif + + /* Whooooops. */ + + if (__afl_area_ptr == (void*)-1) _exit(1); + + /* Write something into the bitmap so that even with low AFL_INST_RATIO, + our parent doesn't give up on us. */ + + __afl_area_ptr[0] = 1; + + } + + id_str = getenv(CMPLOG_SHM_ENV_VAR); + + if (id_str) { + + u32 shm_id = atoi(id_str); + + __afl_cmp_map = shmat(shm_id, NULL, 0); + + if (__afl_cmp_map == (void*)-1) _exit(1); + + } + +} + +/* Fork server logic. */ + +static void __afl_start_forkserver(void) { + + static u8 tmp[4]; + s32 child_pid; + + u8 child_stopped = 0; + + void (*old_sigchld_handler)(int) = 0; // = signal(SIGCHLD, SIG_DFL); + + /* Phone home and tell the parent that we're OK. If parent isn't there, + assume we're not running in forkserver mode and just execute program. */ + + if (write(FORKSRV_FD + 1, tmp, 4) != 4) return; + + while (1) { + + u32 was_killed; + int status; + + /* Wait for parent by reading from the pipe. Abort if read fails. */ + + if (read(FORKSRV_FD, &was_killed, 4) != 4) _exit(1); + + /* If we stopped the child in persistent mode, but there was a race + condition and afl-fuzz already issued SIGKILL, write off the old + process. */ + + if (child_stopped && was_killed) { + + child_stopped = 0; + if (waitpid(child_pid, &status, 0) < 0) _exit(1); + + } + + if (!child_stopped) { + + /* Once woken up, create a clone of our process. */ + + child_pid = fork(); + if (child_pid < 0) _exit(1); + + /* In child process: close fds, resume execution. */ + + if (!child_pid) { + + signal(SIGCHLD, old_sigchld_handler); + + close(FORKSRV_FD); + close(FORKSRV_FD + 1); + return; + + } + + } else { + + /* Special handling for persistent mode: if the child is alive but + currently stopped, simply restart it with SIGCONT. */ + + kill(child_pid, SIGCONT); + child_stopped = 0; + + } + + /* In parent process: write PID to pipe, then wait for child. */ + + if (write(FORKSRV_FD + 1, &child_pid, 4) != 4) _exit(1); + + if (waitpid(child_pid, &status, is_persistent ? WUNTRACED : 0) < 0) + _exit(1); + + /* In persistent mode, the child stops itself with SIGSTOP to indicate + a successful run. In this case, we want to wake it up without forking + again. */ + + if (WIFSTOPPED(status)) child_stopped = 1; + + /* Relay wait status to pipe, then loop back. */ + + if (write(FORKSRV_FD + 1, &status, 4) != 4) _exit(1); + + } + +} + +/* A simplified persistent mode handler, used as explained in README.llvm. */ + +int __afl_persistent_loop(unsigned int max_cnt) { + + static u8 first_pass = 1; + static u32 cycle_cnt; + + if (first_pass) { + + /* Make sure that every iteration of __AFL_LOOP() starts with a clean slate. + On subsequent calls, the parent will take care of that, but on the first + iteration, it's our job to erase any trace of whatever happened + before the loop. */ + + if (is_persistent) { + + // memset(__afl_area_ptr, 0, MAP_SIZE); + __afl_area_ptr[0] = 1; + + } + + cycle_cnt = max_cnt; + first_pass = 0; + return 1; + + } + + if (is_persistent) { + + if (--cycle_cnt) { + + raise(SIGSTOP); + + __afl_area_ptr[0] = 1; + + return 1; + + } else { + + /* When exiting __AFL_LOOP(), make sure that the subsequent code that + follows the loop is not traced. We do that by pivoting back to the + dummy output region. */ + + // __afl_area_ptr = __afl_area_initial; + + } + + } + + return 0; + +} + +/* This one can be called from user code when deferred forkserver mode + is enabled. */ + +void __afl_manual_init(void) { + + static u8 init_done; + + if (!init_done) { + + __afl_map_shm(); + __afl_start_forkserver(); + init_done = 1; + + } + +} + +/* Proper initialization routine. */ + +__attribute__((constructor(CONST_PRIO))) void __afl_auto_init(void) { + + is_persistent = !!getenv(PERSIST_ENV_VAR); + + if (getenv(DEFER_ENV_VAR)) return; + + __afl_manual_init(); + +} + +///// CmpLog instrumentation + +void __sanitizer_cov_trace_cmp1(uint8_t Arg1, uint8_t Arg2) { + + return; + +} + +void __sanitizer_cov_trace_cmp2(uint16_t Arg1, uint16_t Arg2) { + + if (!__afl_cmp_map) return; + + uintptr_t k = (uintptr_t)__builtin_return_address(0); + k = (k >> 4) ^ (k << 8); + k &= CMP_MAP_W - 1; + + u32 hits = __afl_cmp_map->headers[k].hits; + __afl_cmp_map->headers[k].hits = hits + 1; + // if (!__afl_cmp_map->headers[k].cnt) + // __afl_cmp_map->headers[k].cnt = __afl_cmp_counter++; + + __afl_cmp_map->headers[k].shape = 1; + //__afl_cmp_map->headers[k].type = CMP_TYPE_INS; + + hits &= CMP_MAP_H - 1; + __afl_cmp_map->log[k][hits].v0 = Arg1; + __afl_cmp_map->log[k][hits].v1 = Arg2; + +} + +void __sanitizer_cov_trace_cmp4(uint32_t Arg1, uint32_t Arg2) { + + if (!__afl_cmp_map) return; + + uintptr_t k = (uintptr_t)__builtin_return_address(0); + k = (k >> 4) ^ (k << 8); + k &= CMP_MAP_W - 1; + + u32 hits = __afl_cmp_map->headers[k].hits; + __afl_cmp_map->headers[k].hits = hits + 1; + + __afl_cmp_map->headers[k].shape = 3; + + hits &= CMP_MAP_H - 1; + __afl_cmp_map->log[k][hits].v0 = Arg1; + __afl_cmp_map->log[k][hits].v1 = Arg2; + +} + +void __sanitizer_cov_trace_cmp8(uint64_t Arg1, uint64_t Arg2) { + + if (!__afl_cmp_map) return; + + uintptr_t k = (uintptr_t)__builtin_return_address(0); + k = (k >> 4) ^ (k << 8); + k &= CMP_MAP_W - 1; + + u32 hits = __afl_cmp_map->headers[k].hits; + __afl_cmp_map->headers[k].hits = hits + 1; + + __afl_cmp_map->headers[k].shape = 7; + + hits &= CMP_MAP_H - 1; + __afl_cmp_map->log[k][hits].v0 = Arg1; + __afl_cmp_map->log[k][hits].v1 = Arg2; + +} + +#if defined(__APPLE__) +#pragma weak __sanitizer_cov_trace_const_cmp1 = __sanitizer_cov_trace_cmp1 +#pragma weak __sanitizer_cov_trace_const_cmp2 = __sanitizer_cov_trace_cmp2 +#pragma weak __sanitizer_cov_trace_const_cmp4 = __sanitizer_cov_trace_cmp4 +#pragma weak __sanitizer_cov_trace_const_cmp8 = __sanitizer_cov_trace_cmp8 +#else +void __sanitizer_cov_trace_const_cmp1(uint8_t Arg1, uint8_t Arg2) + __attribute__((alias("__sanitizer_cov_trace_cmp1"))); +void __sanitizer_cov_trace_const_cmp2(uint16_t Arg1, uint16_t Arg2) + __attribute__((alias("__sanitizer_cov_trace_cmp2"))); +void __sanitizer_cov_trace_const_cmp4(uint32_t Arg1, uint32_t Arg2) + __attribute__((alias("__sanitizer_cov_trace_cmp4"))); +void __sanitizer_cov_trace_const_cmp8(uint64_t Arg1, uint64_t Arg2) + __attribute__((alias("__sanitizer_cov_trace_cmp8"))); +#endif /* defined(__APPLE__) */ + +void __sanitizer_cov_trace_switch(uint64_t Val, uint64_t* Cases) { + + for (uint64_t i = 0; i < Cases[0]; i++) { + + uintptr_t k = (uintptr_t)__builtin_return_address(0) + i; + k = (k >> 4) ^ (k << 8); + k &= CMP_MAP_W - 1; + + u32 hits = __afl_cmp_map->headers[k].hits; + __afl_cmp_map->headers[k].hits = hits + 1; + + __afl_cmp_map->headers[k].shape = 7; + + hits &= CMP_MAP_H - 1; + __afl_cmp_map->log[k][hits].v0 = Val; + __afl_cmp_map->log[k][hits].v1 = Cases[i + 2]; + + } + +} + + +void __sanitizer_cov_trace_pc_guard(uint32_t* guard) { +} + +void __sanitizer_cov_trace_pc_guard_init(uint32_t* start, uint32_t* stop) { +} + +//// Library functions hooks + +// TODO diff --git a/llvm_mode/afl-llvm-rt.o.c b/llvm_mode/afl-llvm-rt.o.c index c3d1ba7d..9632844f 100644 --- a/llvm_mode/afl-llvm-rt.o.c +++ b/llvm_mode/afl-llvm-rt.o.c @@ -66,9 +66,6 @@ u32 __afl_prev_loc; __thread u32 __afl_prev_loc; #endif -struct cmp_map* __afl_cmp_map; -__thread u32 __afl_cmp_counter; - /* Running in persistent mode? */ static u8 is_persistent; @@ -129,27 +126,6 @@ static void __afl_map_shm(void) { } - if (getenv("__AFL_CMPLOG_MODE__")) { - - id_str = getenv(CMPLOG_SHM_ENV_VAR); - - if (id_str) { - - u32 shm_id = atoi(id_str); - - __afl_cmp_map = shmat(shm_id, NULL, 0); - - if (__afl_cmp_map == (void*)-1) _exit(1); - - } - - } else if (getenv("AFL_CMPLOG")) { - - // during compilation, do this to avoid segfault - __afl_cmp_map = calloc(sizeof(struct cmp_map), 1); - - } - } /* Fork server logic. */ @@ -322,105 +298,6 @@ __attribute__((constructor(CONST_PRIO))) void __afl_auto_init(void) { } -///// CmpLog instrumentation - -void __sanitizer_cov_trace_cmp1(uint8_t Arg1, uint8_t Arg2) { - - return; - -} - -void __sanitizer_cov_trace_cmp2(uint16_t Arg1, uint16_t Arg2) { - - uintptr_t k = (uintptr_t)__builtin_return_address(0); - k = (k >> 4) ^ (k << 8); - k &= CMP_MAP_W - 1; - - u32 hits = __afl_cmp_map->headers[k].hits; - __afl_cmp_map->headers[k].hits = hits + 1; - // if (!__afl_cmp_map->headers[k].cnt) - // __afl_cmp_map->headers[k].cnt = __afl_cmp_counter++; - - __afl_cmp_map->headers[k].shape = 1; - //__afl_cmp_map->headers[k].type = CMP_TYPE_INS; - - hits &= CMP_MAP_H - 1; - __afl_cmp_map->log[k][hits].v0 = Arg1; - __afl_cmp_map->log[k][hits].v1 = Arg2; - -} - -void __sanitizer_cov_trace_cmp4(uint32_t Arg1, uint32_t Arg2) { - - uintptr_t k = (uintptr_t)__builtin_return_address(0); - k = (k >> 4) ^ (k << 8); - k &= CMP_MAP_W - 1; - - u32 hits = __afl_cmp_map->headers[k].hits; - __afl_cmp_map->headers[k].hits = hits + 1; - - __afl_cmp_map->headers[k].shape = 3; - - hits &= CMP_MAP_H - 1; - __afl_cmp_map->log[k][hits].v0 = Arg1; - __afl_cmp_map->log[k][hits].v1 = Arg2; - -} - -void __sanitizer_cov_trace_cmp8(uint64_t Arg1, uint64_t Arg2) { - - uintptr_t k = (uintptr_t)__builtin_return_address(0); - k = (k >> 4) ^ (k << 8); - k &= CMP_MAP_W - 1; - - u32 hits = __afl_cmp_map->headers[k].hits; - __afl_cmp_map->headers[k].hits = hits + 1; - - __afl_cmp_map->headers[k].shape = 7; - - hits &= CMP_MAP_H - 1; - __afl_cmp_map->log[k][hits].v0 = Arg1; - __afl_cmp_map->log[k][hits].v1 = Arg2; - -} - -#if defined(__APPLE__) -#pragma weak __sanitizer_cov_trace_const_cmp1 = __sanitizer_cov_trace_cmp1 -#pragma weak __sanitizer_cov_trace_const_cmp2 = __sanitizer_cov_trace_cmp2 -#pragma weak __sanitizer_cov_trace_const_cmp4 = __sanitizer_cov_trace_cmp4 -#pragma weak __sanitizer_cov_trace_const_cmp8 = __sanitizer_cov_trace_cmp8 -#else -void __sanitizer_cov_trace_const_cmp1(uint8_t Arg1, uint8_t Arg2) - __attribute__((alias("__sanitizer_cov_trace_cmp1"))); -void __sanitizer_cov_trace_const_cmp2(uint16_t Arg1, uint16_t Arg2) - __attribute__((alias("__sanitizer_cov_trace_cmp2"))); -void __sanitizer_cov_trace_const_cmp4(uint32_t Arg1, uint32_t Arg2) - __attribute__((alias("__sanitizer_cov_trace_cmp4"))); -void __sanitizer_cov_trace_const_cmp8(uint64_t Arg1, uint64_t Arg2) - __attribute__((alias("__sanitizer_cov_trace_cmp8"))); -#endif /* defined(__APPLE__) */ - -void __sanitizer_cov_trace_switch(uint64_t Val, uint64_t* Cases) { - - for (uint64_t i = 0; i < Cases[0]; i++) { - - uintptr_t k = (uintptr_t)__builtin_return_address(0) + i; - k = (k >> 4) ^ (k << 8); - k &= CMP_MAP_W - 1; - - u32 hits = __afl_cmp_map->headers[k].hits; - __afl_cmp_map->headers[k].hits = hits + 1; - - __afl_cmp_map->headers[k].shape = 7; - - hits &= CMP_MAP_H - 1; - __afl_cmp_map->log[k][hits].v0 = Val; - __afl_cmp_map->log[k][hits].v1 = Cases[i + 2]; - - } - -} - /* The following stuff deals with supporting -fsanitize-coverage=trace-pc-guard. It remains non-operational in the traditional, plugin-backed LLVM mode. For more info about 'trace-pc-guard', see README.llvm. diff --git a/src/afl-fuzz-cmplog.c b/src/afl-fuzz-cmplog.c index 924784cc..fcb545e1 100644 --- a/src/afl-fuzz-cmplog.c +++ b/src/afl-fuzz-cmplog.c @@ -1,373 +1,33 @@ -#include "afl-fuzz.h" -#include "cmplog.h" - -#define SWAP64(_x) \ - ({ \ - \ - u64 _ret = (_x); \ - _ret = \ - (_ret & 0x00000000FFFFFFFF) << 32 | (_ret & 0xFFFFFFFF00000000) >> 32; \ - _ret = \ - (_ret & 0x0000FFFF0000FFFF) << 16 | (_ret & 0xFFFF0000FFFF0000) >> 16; \ - _ret = \ - (_ret & 0x00FF00FF00FF00FF) << 8 | (_ret & 0xFF00FF00FF00FF00) >> 8; \ - _ret; \ - \ - }) - -u8 common_fuzz_cmplog_stuff(char** argv, u8* out_buf, u32 len); - -extern struct cmp_map* cmp_map; // defined in afl-sharedmem.c - -u8* cmplog_binary; -char** its_argv; - -///// Colorization - -struct range { - - u32 start; - u32 end; - struct range* next; - -}; - -struct range* add_range(struct range* ranges, u32 start, u32 end) { - - struct range* r = ck_alloc_nozero(sizeof(struct range)); - r->start = start; - r->end = end; - r->next = ranges; - return r; - -} - -struct range* pop_biggest_range(struct range** ranges) { - - struct range* r = *ranges; - struct range* prev = NULL; - struct range* rmax = NULL; - struct range* prev_rmax = NULL; - u32 max_size = 0; - - while (r) { - - u32 s = r->end - r->start; - if (s >= max_size) { - - max_size = s; - prev_rmax = prev; - rmax = r; - - } - - prev = r; - r = r->next; - - } - - if (rmax) { - - if (prev_rmax) - prev_rmax->next = rmax->next; - else - *ranges = rmax->next; - - } - - return rmax; - -} - -u8 get_exec_checksum(u8* buf, u32 len, u32* cksum) { - - if (unlikely(common_fuzz_stuff(its_argv, buf, len))) return 1; - - *cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST); - return 0; - -} - -static void rand_replace(u8* buf, u32 len) { - - u32 i; - for (i = 0; i < len; ++i) - buf[i] = UR(256); - -} - -u8 colorization(u8* buf, u32 len, u32 exec_cksum) { - - struct range* ranges = add_range(NULL, 0, len); - u8* backup = ck_alloc_nozero(len); - - u64 orig_hit_cnt, new_hit_cnt; - orig_hit_cnt = queued_paths + unique_crashes; - - stage_name = "colorization"; - stage_short = "colorization"; - stage_max = 1000; - - struct range* rng; - stage_cur = stage_max; - while ((rng = pop_biggest_range(&ranges)) != NULL && stage_cur) { - - u32 s = rng->end - rng->start; - memcpy(backup, buf + rng->start, s); - rand_replace(buf + rng->start, s); - - u32 cksum; - if (unlikely(get_exec_checksum(buf, len, &cksum))) return 1; - - if (cksum != exec_cksum) { - - ranges = add_range(ranges, rng->start, rng->start + s / 2); - ranges = add_range(ranges, rng->start + s / 2 + 1, rng->end); - memcpy(buf + rng->start, backup, s); - - } - - ck_free(rng); - --stage_cur; - - } - - new_hit_cnt = queued_paths + unique_crashes; - stage_finds[STAGE_COLORIZATION] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_COLORIZATION] += stage_max - stage_cur; - - while (ranges) { - - rng = ranges; - ranges = ranges->next; - ck_free(rng); - - } - - return 0; - -} - -///// Input to State replacement - -u8 its_fuzz(u32 idx, u32 size, u8* buf, u32 len, u8* status) { - - u64 orig_hit_cnt, new_hit_cnt; - - orig_hit_cnt = queued_paths + unique_crashes; - - if (unlikely(common_fuzz_stuff(its_argv, buf, len))) return 1; - - new_hit_cnt = queued_paths + unique_crashes; - - if (unlikely(new_hit_cnt != orig_hit_cnt)) { - - *status = 1; - - } else { - - if (size >= MIN_AUTO_EXTRA && size <= MAX_AUTO_EXTRA) - maybe_add_auto(&buf[idx], size); - *status = 2; - - } - - return 0; - -} - -u8 cmp_extend_encoding(struct cmp_header* h, u64 pattern, u64 repl, u32 idx, - u8* orig_buf, u8* buf, u32 len, u8 do_reverse, - u8* status) { - - u64* buf_64 = (u64*)&buf[idx]; - u32* buf_32 = (u32*)&buf[idx]; - u16* buf_16 = (u16*)&buf[idx]; - // u8* buf_8 = &buf[idx]; - u64* o_buf_64 = (u64*)&orig_buf[idx]; - u32* o_buf_32 = (u32*)&orig_buf[idx]; - u16* o_buf_16 = (u16*)&orig_buf[idx]; - // u8* o_buf_8 = &orig_buf[idx]; - - u32 its_len = len - idx; - *status = 0; - - if (SHAPE_BYTES(h->shape) == 8) { - - if (its_len >= 8 && *buf_64 == pattern && *o_buf_64 == pattern) { - - *buf_64 = repl; - if (unlikely(its_fuzz(idx, 8, buf, len, status))) return 1; - *buf_64 = pattern; - - } - - // reverse encoding - if (do_reverse) - if (unlikely(cmp_extend_encoding(h, SWAP64(pattern), SWAP64(repl), idx, - orig_buf, buf, len, 0, status))) - return 1; - - } - - if (SHAPE_BYTES(h->shape) == 4 || *status == 2) { - - if (its_len >= 4 && *buf_32 == (u32)pattern && *o_buf_32 == (u32)pattern) { - - *buf_32 = (u32)repl; - if (unlikely(its_fuzz(idx, 4, buf, len, status))) return 1; - *buf_32 = pattern; - - } - - // reverse encoding - if (do_reverse) - if (unlikely(cmp_extend_encoding(h, SWAP32(pattern), SWAP32(repl), idx, - orig_buf, buf, len, 0, status))) - return 1; - - } - - if (SHAPE_BYTES(h->shape) == 2 || *status == 2) { - - if (its_len >= 2 && *buf_16 == (u16)pattern && *o_buf_16 == (u16)pattern) { - - *buf_16 = (u16)repl; - if (unlikely(its_fuzz(idx, 2, buf, len, status))) return 1; - *buf_16 = (u16)pattern; - - } - - // reverse encoding - if (do_reverse) - if (unlikely(cmp_extend_encoding(h, SWAP16(pattern), SWAP16(repl), idx, - orig_buf, buf, len, 0, status))) - return 1; - - } - - /*if (SHAPE_BYTES(h->shape) == 1 || *status == 2) { - - if (its_len >= 2 && *buf_8 == (u8)pattern && *o_buf_8 == (u8)pattern) { - - *buf_8 = (u8)repl; - if (unlikely(its_fuzz(idx, 1, buf, len, status))) - return 1; - *buf_16 = (u16)pattern; - - } - - }*/ - - return 0; - -} - -u8 cmp_fuzz(u32 key, u8* orig_buf, u8* buf, u32 len) { - - struct cmp_header* h = &cmp_map->headers[key]; - u32 i, j, idx; - - u32 loggeds = h->hits; - if (h->hits > CMP_MAP_H) loggeds = CMP_MAP_H; - - u8 status; - // opt not in the paper - u32 fails = 0; - - for (i = 0; i < loggeds; ++i) { - - struct cmp_operands* o = &cmp_map->log[key][i]; - - // opt not in the paper - for (j = 0; j < i; ++j) - if (cmp_map->log[key][j].v0 == o->v0 && cmp_map->log[key][i].v1 == o->v1) - goto cmp_fuzz_next_iter; - - for (idx = 0; idx < len && fails < 8; ++idx) { - - if (unlikely(cmp_extend_encoding(h, o->v0, o->v1, idx, orig_buf, buf, len, - 1, &status))) - return 1; - if (status == 2) - ++fails; - else if (status == 1) - break; - - if (unlikely(cmp_extend_encoding(h, o->v1, o->v0, idx, orig_buf, buf, len, - 1, &status))) - return 1; - if (status == 2) - ++fails; - else if (status == 1) - break; - - } - - cmp_fuzz_next_iter: - stage_cur++; - - } - - return 0; - -} +/* + american fuzzy lop++ - cmplog execution routines + ------------------------------------------------ -///// Input to State stage + Originally written by Michal Zalewski -// queue_cur->exec_cksum -u8 input_to_state_stage(char** argv, u8* orig_buf, u8* buf, u32 len, - u32 exec_cksum) { + Forkserver design by Jann Horn - its_argv = argv; + Now maintained by by Marc Heuse , + Heiko Eißfeldt and + Andrea Fioraldi - if (unlikely(colorization(buf, len, exec_cksum))) return 1; + Copyright 2016, 2017 Google Inc. All rights reserved. + Copyright 2019-2020 AFLplusplus Project. All rights reserved. - // do it manually, forkserver clear only trace_bits - memset(cmp_map->headers, 0, sizeof(cmp_map->headers)); + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at: - if (unlikely(common_fuzz_cmplog_stuff(argv, buf, len))) return 1; + http://www.apache.org/licenses/LICENSE-2.0 - u64 orig_hit_cnt, new_hit_cnt; - u64 orig_execs = total_execs; - orig_hit_cnt = queued_paths + unique_crashes; + Shared code to handle the shared memory. This is used by the fuzzer + as well the other components like afl-tmin, afl-showmap, etc... - stage_name = "input-to-state"; - stage_short = "its"; - stage_max = 0; - stage_cur = 0; + */ - u32 k; - for (k = 0; k < CMP_MAP_W; ++k) { - - if (!cmp_map->headers[k].hits) continue; - if (cmp_map->headers[k].hits > CMP_MAP_H) - stage_max += CMP_MAP_H; - else - stage_max += cmp_map->headers[k].hits; - - } - - for (k = 0; k < CMP_MAP_W; ++k) { - - if (!cmp_map->headers[k].hits) continue; - cmp_fuzz(k, orig_buf, buf, len); - - } - - memcpy(buf, orig_buf, len); - - new_hit_cnt = queued_paths + unique_crashes; - stage_finds[STAGE_ITS] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_ITS] += total_execs - orig_execs; - - return 0; - -} - -//// CmpLog forkserver +#include "afl-fuzz.h" +#include "cmplog.h" -s32 cmplog_forksrv_pid, cmplog_child_pid, cmplog_fsrv_ctl_fd, cmplog_fsrv_st_fd; +static s32 cmplog_child_pid, cmplog_fsrv_ctl_fd, cmplog_fsrv_st_fd; void init_cmplog_forkserver(char** argv) { @@ -490,8 +150,6 @@ void init_cmplog_forkserver(char** argv) { "msan_track_origins=0", 0); - setenv("__AFL_CMPLOG_MODE__", "1", 1); - argv[0] = cmplog_binary; execv(cmplog_binary, argv); @@ -783,8 +441,6 @@ u8 run_cmplog_target(char** argv, u32 timeout) { "symbolize=0:" "msan_track_origins=0", 0); - setenv("__AFL_CMPLOG_MODE__", "1", 1); - argv[0] = cmplog_binary; execv(cmplog_binary, argv); @@ -977,4 +633,3 @@ u8 common_fuzz_cmplog_stuff(char** argv, u8* out_buf, u32 len) { return 0; } - diff --git a/src/afl-fuzz-globals.c b/src/afl-fuzz-globals.c index b3476778..863ee9ad 100644 --- a/src/afl-fuzz-globals.c +++ b/src/afl-fuzz-globals.c @@ -251,6 +251,9 @@ u32 a_extras_cnt; /* Total number of tokens available */ u8 *(*post_handler)(u8 *buf, u32 *len); +u8* cmplog_binary; +s32 cmplog_forksrv_pid; + /* hooks for the custom mutator function */ size_t (*custom_mutator)(u8 *data, size_t size, u8 *mutated_out, size_t max_size, unsigned int seed); diff --git a/src/afl-fuzz-redqueen.c b/src/afl-fuzz-redqueen.c new file mode 100644 index 00000000..f6f659da --- /dev/null +++ b/src/afl-fuzz-redqueen.c @@ -0,0 +1,373 @@ +/* + american fuzzy lop++ - redqueen implementation on top of cmplog + --------------------------------------------------------------- + + Originally written by Michal Zalewski + + Forkserver design by Jann Horn + + Now maintained by by Marc Heuse , + Heiko Eißfeldt and + Andrea Fioraldi + + Copyright 2016, 2017 Google Inc. All rights reserved. + Copyright 2019-2020 AFLplusplus Project. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at: + + http://www.apache.org/licenses/LICENSE-2.0 + + Shared code to handle the shared memory. This is used by the fuzzer + as well the other components like afl-tmin, afl-showmap, etc... + + */ + +#include "afl-fuzz.h" +#include "cmplog.h" + +static char** its_argv; + +///// Colorization + +struct range { + + u32 start; + u32 end; + struct range* next; + +}; + +struct range* add_range(struct range* ranges, u32 start, u32 end) { + + struct range* r = ck_alloc_nozero(sizeof(struct range)); + r->start = start; + r->end = end; + r->next = ranges; + return r; + +} + +struct range* pop_biggest_range(struct range** ranges) { + + struct range* r = *ranges; + struct range* prev = NULL; + struct range* rmax = NULL; + struct range* prev_rmax = NULL; + u32 max_size = 0; + + while (r) { + + u32 s = r->end - r->start; + if (s >= max_size) { + + max_size = s; + prev_rmax = prev; + rmax = r; + + } + + prev = r; + r = r->next; + + } + + if (rmax) { + + if (prev_rmax) + prev_rmax->next = rmax->next; + else + *ranges = rmax->next; + + } + + return rmax; + +} + +u8 get_exec_checksum(u8* buf, u32 len, u32* cksum) { + + if (unlikely(common_fuzz_stuff(its_argv, buf, len))) return 1; + + *cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST); + return 0; + +} + +static void rand_replace(u8* buf, u32 len) { + + u32 i; + for (i = 0; i < len; ++i) + buf[i] = UR(256); + +} + +u8 colorization(u8* buf, u32 len, u32 exec_cksum) { + + struct range* ranges = add_range(NULL, 0, len); + u8* backup = ck_alloc_nozero(len); + + u64 orig_hit_cnt, new_hit_cnt; + orig_hit_cnt = queued_paths + unique_crashes; + + stage_name = "colorization"; + stage_short = "colorization"; + stage_max = 1000; + + struct range* rng; + stage_cur = stage_max; + while ((rng = pop_biggest_range(&ranges)) != NULL && stage_cur) { + + u32 s = rng->end - rng->start; + memcpy(backup, buf + rng->start, s); + rand_replace(buf + rng->start, s); + + u32 cksum; + if (unlikely(get_exec_checksum(buf, len, &cksum))) return 1; + + if (cksum != exec_cksum) { + + ranges = add_range(ranges, rng->start, rng->start + s / 2); + ranges = add_range(ranges, rng->start + s / 2 + 1, rng->end); + memcpy(buf + rng->start, backup, s); + + } + + ck_free(rng); + --stage_cur; + + } + + new_hit_cnt = queued_paths + unique_crashes; + stage_finds[STAGE_COLORIZATION] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_COLORIZATION] += stage_max - stage_cur; + + while (ranges) { + + rng = ranges; + ranges = ranges->next; + ck_free(rng); + + } + + return 0; + +} + +///// Input to State replacement + +u8 its_fuzz(u32 idx, u32 size, u8* buf, u32 len, u8* status) { + + u64 orig_hit_cnt, new_hit_cnt; + + orig_hit_cnt = queued_paths + unique_crashes; + + if (unlikely(common_fuzz_stuff(its_argv, buf, len))) return 1; + + new_hit_cnt = queued_paths + unique_crashes; + + if (unlikely(new_hit_cnt != orig_hit_cnt)) { + + *status = 1; + + } else { + + if (size >= MIN_AUTO_EXTRA && size <= MAX_AUTO_EXTRA) + maybe_add_auto(&buf[idx], size); + *status = 2; + + } + + return 0; + +} + +u8 cmp_extend_encoding(struct cmp_header* h, u64 pattern, u64 repl, u32 idx, + u8* orig_buf, u8* buf, u32 len, u8 do_reverse, + u8* status) { + + u64* buf_64 = (u64*)&buf[idx]; + u32* buf_32 = (u32*)&buf[idx]; + u16* buf_16 = (u16*)&buf[idx]; + // u8* buf_8 = &buf[idx]; + u64* o_buf_64 = (u64*)&orig_buf[idx]; + u32* o_buf_32 = (u32*)&orig_buf[idx]; + u16* o_buf_16 = (u16*)&orig_buf[idx]; + // u8* o_buf_8 = &orig_buf[idx]; + + u32 its_len = len - idx; + *status = 0; + + if (SHAPE_BYTES(h->shape) == 8) { + + if (its_len >= 8 && *buf_64 == pattern && *o_buf_64 == pattern) { + + *buf_64 = repl; + if (unlikely(its_fuzz(idx, 8, buf, len, status))) return 1; + *buf_64 = pattern; + + } + + // reverse encoding + if (do_reverse) + if (unlikely(cmp_extend_encoding(h, SWAP64(pattern), SWAP64(repl), idx, + orig_buf, buf, len, 0, status))) + return 1; + + } + + if (SHAPE_BYTES(h->shape) == 4 || *status == 2) { + + if (its_len >= 4 && *buf_32 == (u32)pattern && *o_buf_32 == (u32)pattern) { + + *buf_32 = (u32)repl; + if (unlikely(its_fuzz(idx, 4, buf, len, status))) return 1; + *buf_32 = pattern; + + } + + // reverse encoding + if (do_reverse) + if (unlikely(cmp_extend_encoding(h, SWAP32(pattern), SWAP32(repl), idx, + orig_buf, buf, len, 0, status))) + return 1; + + } + + if (SHAPE_BYTES(h->shape) == 2 || *status == 2) { + + if (its_len >= 2 && *buf_16 == (u16)pattern && *o_buf_16 == (u16)pattern) { + + *buf_16 = (u16)repl; + if (unlikely(its_fuzz(idx, 2, buf, len, status))) return 1; + *buf_16 = (u16)pattern; + + } + + // reverse encoding + if (do_reverse) + if (unlikely(cmp_extend_encoding(h, SWAP16(pattern), SWAP16(repl), idx, + orig_buf, buf, len, 0, status))) + return 1; + + } + + /*if (SHAPE_BYTES(h->shape) == 1 || *status == 2) { + + if (its_len >= 2 && *buf_8 == (u8)pattern && *o_buf_8 == (u8)pattern) { + + *buf_8 = (u8)repl; + if (unlikely(its_fuzz(idx, 1, buf, len, status))) + return 1; + *buf_16 = (u16)pattern; + + } + + }*/ + + return 0; + +} + +u8 cmp_fuzz(u32 key, u8* orig_buf, u8* buf, u32 len) { + + struct cmp_header* h = &cmp_map->headers[key]; + u32 i, j, idx; + + u32 loggeds = h->hits; + if (h->hits > CMP_MAP_H) loggeds = CMP_MAP_H; + + u8 status; + // opt not in the paper + u32 fails = 0; + + for (i = 0; i < loggeds; ++i) { + + struct cmp_operands* o = &cmp_map->log[key][i]; + + // opt not in the paper + for (j = 0; j < i; ++j) + if (cmp_map->log[key][j].v0 == o->v0 && cmp_map->log[key][i].v1 == o->v1) + goto cmp_fuzz_next_iter; + + for (idx = 0; idx < len && fails < 8; ++idx) { + + if (unlikely(cmp_extend_encoding(h, o->v0, o->v1, idx, orig_buf, buf, len, + 1, &status))) + return 1; + if (status == 2) + ++fails; + else if (status == 1) + break; + + if (unlikely(cmp_extend_encoding(h, o->v1, o->v0, idx, orig_buf, buf, len, + 1, &status))) + return 1; + if (status == 2) + ++fails; + else if (status == 1) + break; + + } + + cmp_fuzz_next_iter: + stage_cur++; + + } + + return 0; + +} + +///// Input to State stage + +// queue_cur->exec_cksum +u8 input_to_state_stage(char** argv, u8* orig_buf, u8* buf, u32 len, + u32 exec_cksum) { + + its_argv = argv; + + if (unlikely(colorization(buf, len, exec_cksum))) return 1; + + // do it manually, forkserver clear only trace_bits + memset(cmp_map->headers, 0, sizeof(cmp_map->headers)); + + if (unlikely(common_fuzz_cmplog_stuff(argv, buf, len))) return 1; + + u64 orig_hit_cnt, new_hit_cnt; + u64 orig_execs = total_execs; + orig_hit_cnt = queued_paths + unique_crashes; + + stage_name = "input-to-state"; + stage_short = "its"; + stage_max = 0; + stage_cur = 0; + + u32 k; + for (k = 0; k < CMP_MAP_W; ++k) { + + if (!cmp_map->headers[k].hits) continue; + if (cmp_map->headers[k].hits > CMP_MAP_H) + stage_max += CMP_MAP_H; + else + stage_max += cmp_map->headers[k].hits; + + } + + for (k = 0; k < CMP_MAP_W; ++k) { + + if (!cmp_map->headers[k].hits) continue; + cmp_fuzz(k, orig_buf, buf, len); + + } + + memcpy(buf, orig_buf, len); + + new_hit_cnt = queued_paths + unique_crashes; + stage_finds[STAGE_ITS] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_ITS] += total_execs - orig_execs; + + return 0; + +} -- cgit 1.4.1 From b15cd4a82a7e89c3fe604a2bb36b810ab2e62478 Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Thu, 30 Jan 2020 22:43:04 +0100 Subject: cmplog check_binary --- src/afl-fuzz.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'src') diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c index e0542648..50356315 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -204,7 +204,6 @@ int main(int argc, char** argv) { cmplog_mode = 1; cmplog_binary = ck_strdup(optarg); - // TODO check cmplog_binary validity break; } @@ -868,6 +867,8 @@ int main(int argc, char** argv) { if (!out_file) setup_stdio_file(); + if (cmplog_binary) + check_binary(cmplog_binary); check_binary(argv[optind]); start_time = get_cur_time(); -- cgit 1.4.1 From c8581050ff372aa76ed22c636e62913f3225389f Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Thu, 30 Jan 2020 22:46:56 +0100 Subject: status custom/rq --- llvm_mode/afl-llvm-cmplog.rt.o.c | 422 --------------------------------------- src/afl-fuzz-stats.c | 2 +- 2 files changed, 1 insertion(+), 423 deletions(-) delete mode 100644 llvm_mode/afl-llvm-cmplog.rt.o.c (limited to 'src') diff --git a/llvm_mode/afl-llvm-cmplog.rt.o.c b/llvm_mode/afl-llvm-cmplog.rt.o.c deleted file mode 100644 index ef944d4f..00000000 --- a/llvm_mode/afl-llvm-cmplog.rt.o.c +++ /dev/null @@ -1,422 +0,0 @@ -/* - american fuzzy lop++ - LLVM instrumentation bootstrap - --------------------------------------------------- - - Written by Laszlo Szekeres and - Michal Zalewski - - LLVM integration design comes from Laszlo Szekeres. - - Copyright 2015, 2016 Google Inc. All rights reserved. - Copyright 2019-2020 AFLplusplus Project. All rights reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at: - - http://www.apache.org/licenses/LICENSE-2.0 - - This code is the rewrite of afl-as.h's main_payload. - -*/ - -#ifdef __ANDROID__ -#include "android-ashmem.h" -#endif -#include "config.h" -#include "types.h" -#include "cmplog.h" - -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -/* This is a somewhat ugly hack for the experimental 'trace-pc-guard' mode. - Basically, we need to make sure that the forkserver is initialized after - the LLVM-generated runtime initialization pass, not before. */ - -#ifdef USE_TRACE_PC -#define CONST_PRIO 5 -#else -#define CONST_PRIO 0 -#endif /* ^USE_TRACE_PC */ - -#include -#include - -/* Globals needed by the injected instrumentation. The __afl_area_initial region - is used for instrumentation output before __afl_map_shm() has a chance to - run. It will end up as .comm, so it shouldn't be too wasteful. */ - -// In CmpLog, the only usage of __afl_area_ptr is to report errors -u8* __afl_area_ptr; - -struct cmp_map* __afl_cmp_map; -__thread u32 __afl_cmp_counter; - -/* Running in persistent mode? */ - -static u8 is_persistent; - -/* SHM setup. */ - -static void __afl_map_shm(void) { - - u8* id_str = getenv(SHM_ENV_VAR); - - /* If we're running under AFL, attach to the appropriate region, replacing the - early-stage __afl_area_initial region that is needed to allow some really - hacky .init code to work correctly in projects such as OpenSSL. */ - - if (id_str) { - -#ifdef USEMMAP - const char* shm_file_path = id_str; - int shm_fd = -1; - unsigned char* shm_base = NULL; - - /* create the shared memory segment as if it was a file */ - shm_fd = shm_open(shm_file_path, O_RDWR, 0600); - if (shm_fd == -1) { - - printf("shm_open() failed\n"); - exit(1); - - } - - /* map the shared memory segment to the address space of the process */ - shm_base = mmap(0, MAP_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, shm_fd, 0); - if (shm_base == MAP_FAILED) { - - close(shm_fd); - shm_fd = -1; - - printf("mmap() failed\n"); - exit(2); - - } - - __afl_area_ptr = shm_base; -#else - u32 shm_id = atoi(id_str); - - __afl_area_ptr = shmat(shm_id, NULL, 0); -#endif - - /* Whooooops. */ - - if (__afl_area_ptr == (void*)-1) _exit(1); - - /* Write something into the bitmap so that even with low AFL_INST_RATIO, - our parent doesn't give up on us. */ - - __afl_area_ptr[0] = 1; - - } - - id_str = getenv(CMPLOG_SHM_ENV_VAR); - - if (id_str) { - - u32 shm_id = atoi(id_str); - - __afl_cmp_map = shmat(shm_id, NULL, 0); - - if (__afl_cmp_map == (void*)-1) _exit(1); - - } - -} - -/* Fork server logic. */ - -static void __afl_start_forkserver(void) { - - static u8 tmp[4]; - s32 child_pid; - - u8 child_stopped = 0; - - void (*old_sigchld_handler)(int) = 0; // = signal(SIGCHLD, SIG_DFL); - - /* Phone home and tell the parent that we're OK. If parent isn't there, - assume we're not running in forkserver mode and just execute program. */ - - if (write(FORKSRV_FD + 1, tmp, 4) != 4) return; - - while (1) { - - u32 was_killed; - int status; - - /* Wait for parent by reading from the pipe. Abort if read fails. */ - - if (read(FORKSRV_FD, &was_killed, 4) != 4) _exit(1); - - /* If we stopped the child in persistent mode, but there was a race - condition and afl-fuzz already issued SIGKILL, write off the old - process. */ - - if (child_stopped && was_killed) { - - child_stopped = 0; - if (waitpid(child_pid, &status, 0) < 0) _exit(1); - - } - - if (!child_stopped) { - - /* Once woken up, create a clone of our process. */ - - child_pid = fork(); - if (child_pid < 0) _exit(1); - - /* In child process: close fds, resume execution. */ - - if (!child_pid) { - - signal(SIGCHLD, old_sigchld_handler); - - close(FORKSRV_FD); - close(FORKSRV_FD + 1); - return; - - } - - } else { - - /* Special handling for persistent mode: if the child is alive but - currently stopped, simply restart it with SIGCONT. */ - - kill(child_pid, SIGCONT); - child_stopped = 0; - - } - - /* In parent process: write PID to pipe, then wait for child. */ - - if (write(FORKSRV_FD + 1, &child_pid, 4) != 4) _exit(1); - - if (waitpid(child_pid, &status, is_persistent ? WUNTRACED : 0) < 0) - _exit(1); - - /* In persistent mode, the child stops itself with SIGSTOP to indicate - a successful run. In this case, we want to wake it up without forking - again. */ - - if (WIFSTOPPED(status)) child_stopped = 1; - - /* Relay wait status to pipe, then loop back. */ - - if (write(FORKSRV_FD + 1, &status, 4) != 4) _exit(1); - - } - -} - -/* A simplified persistent mode handler, used as explained in README.llvm. */ - -int __afl_persistent_loop(unsigned int max_cnt) { - - static u8 first_pass = 1; - static u32 cycle_cnt; - - if (first_pass) { - - /* Make sure that every iteration of __AFL_LOOP() starts with a clean slate. - On subsequent calls, the parent will take care of that, but on the first - iteration, it's our job to erase any trace of whatever happened - before the loop. */ - - if (is_persistent) { - - // memset(__afl_area_ptr, 0, MAP_SIZE); - __afl_area_ptr[0] = 1; - - } - - cycle_cnt = max_cnt; - first_pass = 0; - return 1; - - } - - if (is_persistent) { - - if (--cycle_cnt) { - - raise(SIGSTOP); - - __afl_area_ptr[0] = 1; - - return 1; - - } else { - - /* When exiting __AFL_LOOP(), make sure that the subsequent code that - follows the loop is not traced. We do that by pivoting back to the - dummy output region. */ - - // __afl_area_ptr = __afl_area_initial; - - } - - } - - return 0; - -} - -/* This one can be called from user code when deferred forkserver mode - is enabled. */ - -void __afl_manual_init(void) { - - static u8 init_done; - - if (!init_done) { - - __afl_map_shm(); - __afl_start_forkserver(); - init_done = 1; - - } - -} - -/* Proper initialization routine. */ - -__attribute__((constructor(CONST_PRIO))) void __afl_auto_init(void) { - - is_persistent = !!getenv(PERSIST_ENV_VAR); - - if (getenv(DEFER_ENV_VAR)) return; - - __afl_manual_init(); - -} - -///// CmpLog instrumentation - -void __sanitizer_cov_trace_cmp1(uint8_t Arg1, uint8_t Arg2) { - - return; - -} - -void __sanitizer_cov_trace_cmp2(uint16_t Arg1, uint16_t Arg2) { - - if (!__afl_cmp_map) return; - - uintptr_t k = (uintptr_t)__builtin_return_address(0); - k = (k >> 4) ^ (k << 8); - k &= CMP_MAP_W - 1; - - u32 hits = __afl_cmp_map->headers[k].hits; - __afl_cmp_map->headers[k].hits = hits + 1; - // if (!__afl_cmp_map->headers[k].cnt) - // __afl_cmp_map->headers[k].cnt = __afl_cmp_counter++; - - __afl_cmp_map->headers[k].shape = 1; - //__afl_cmp_map->headers[k].type = CMP_TYPE_INS; - - hits &= CMP_MAP_H - 1; - __afl_cmp_map->log[k][hits].v0 = Arg1; - __afl_cmp_map->log[k][hits].v1 = Arg2; - -} - -void __sanitizer_cov_trace_cmp4(uint32_t Arg1, uint32_t Arg2) { - - if (!__afl_cmp_map) return; - - uintptr_t k = (uintptr_t)__builtin_return_address(0); - k = (k >> 4) ^ (k << 8); - k &= CMP_MAP_W - 1; - - u32 hits = __afl_cmp_map->headers[k].hits; - __afl_cmp_map->headers[k].hits = hits + 1; - - __afl_cmp_map->headers[k].shape = 3; - - hits &= CMP_MAP_H - 1; - __afl_cmp_map->log[k][hits].v0 = Arg1; - __afl_cmp_map->log[k][hits].v1 = Arg2; - -} - -void __sanitizer_cov_trace_cmp8(uint64_t Arg1, uint64_t Arg2) { - - if (!__afl_cmp_map) return; - - uintptr_t k = (uintptr_t)__builtin_return_address(0); - k = (k >> 4) ^ (k << 8); - k &= CMP_MAP_W - 1; - - u32 hits = __afl_cmp_map->headers[k].hits; - __afl_cmp_map->headers[k].hits = hits + 1; - - __afl_cmp_map->headers[k].shape = 7; - - hits &= CMP_MAP_H - 1; - __afl_cmp_map->log[k][hits].v0 = Arg1; - __afl_cmp_map->log[k][hits].v1 = Arg2; - -} - -#if defined(__APPLE__) -#pragma weak __sanitizer_cov_trace_const_cmp1 = __sanitizer_cov_trace_cmp1 -#pragma weak __sanitizer_cov_trace_const_cmp2 = __sanitizer_cov_trace_cmp2 -#pragma weak __sanitizer_cov_trace_const_cmp4 = __sanitizer_cov_trace_cmp4 -#pragma weak __sanitizer_cov_trace_const_cmp8 = __sanitizer_cov_trace_cmp8 -#else -void __sanitizer_cov_trace_const_cmp1(uint8_t Arg1, uint8_t Arg2) - __attribute__((alias("__sanitizer_cov_trace_cmp1"))); -void __sanitizer_cov_trace_const_cmp2(uint16_t Arg1, uint16_t Arg2) - __attribute__((alias("__sanitizer_cov_trace_cmp2"))); -void __sanitizer_cov_trace_const_cmp4(uint32_t Arg1, uint32_t Arg2) - __attribute__((alias("__sanitizer_cov_trace_cmp4"))); -void __sanitizer_cov_trace_const_cmp8(uint64_t Arg1, uint64_t Arg2) - __attribute__((alias("__sanitizer_cov_trace_cmp8"))); -#endif /* defined(__APPLE__) */ - -void __sanitizer_cov_trace_switch(uint64_t Val, uint64_t* Cases) { - - for (uint64_t i = 0; i < Cases[0]; i++) { - - uintptr_t k = (uintptr_t)__builtin_return_address(0) + i; - k = (k >> 4) ^ (k << 8); - k &= CMP_MAP_W - 1; - - u32 hits = __afl_cmp_map->headers[k].hits; - __afl_cmp_map->headers[k].hits = hits + 1; - - __afl_cmp_map->headers[k].shape = 7; - - hits &= CMP_MAP_H - 1; - __afl_cmp_map->log[k][hits].v0 = Val; - __afl_cmp_map->log[k][hits].v1 = Cases[i + 2]; - - } - -} - - -void __sanitizer_cov_trace_pc_guard(uint32_t* guard) { -} - -void __sanitizer_cov_trace_pc_guard_init(uint32_t* start, uint32_t* stop) { -} - -//// Library functions hooks - -// TODO diff --git a/src/afl-fuzz-stats.c b/src/afl-fuzz-stats.c index ecadac9f..0605fca2 100644 --- a/src/afl-fuzz-stats.c +++ b/src/afl-fuzz-stats.c @@ -603,7 +603,7 @@ void show_stats(void) { DI(stage_cycles[STAGE_COLORIZATION]), DI(stage_finds[STAGE_ITS]), DI(stage_cycles[STAGE_ITS])); - SAYF(bV bSTOP " py/custom : " cRST "%-36s " bSTG bVR bH20 bH2 bH bRB "\n", + SAYF(bV bSTOP " custom/rq : " cRST "%-36s " bSTG bVR bH20 bH2 bH bRB "\n", tmp); if (!bytes_trim_out) { -- cgit 1.4.1 From f07fc52cd061fadde21a57fd757e316d6254f588 Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Thu, 30 Jan 2020 22:49:31 +0100 Subject: stats screen for cmplog only --- src/afl-fuzz-stats.c | 31 ++++++++++++++++++++++--------- 1 file changed, 22 insertions(+), 9 deletions(-) (limited to 'src') diff --git a/src/afl-fuzz-stats.c b/src/afl-fuzz-stats.c index 0605fca2..14ffd41a 100644 --- a/src/afl-fuzz-stats.c +++ b/src/afl-fuzz-stats.c @@ -596,15 +596,28 @@ void show_stats(void) { : cRST), tmp); - sprintf(tmp, "%s/%s, %s/%s, %s/%s, %s/%s", DI(stage_finds[STAGE_PYTHON]), - DI(stage_cycles[STAGE_PYTHON]), DI(stage_finds[STAGE_CUSTOM_MUTATOR]), - DI(stage_cycles[STAGE_CUSTOM_MUTATOR]), - DI(stage_finds[STAGE_COLORIZATION]), - DI(stage_cycles[STAGE_COLORIZATION]), DI(stage_finds[STAGE_ITS]), - DI(stage_cycles[STAGE_ITS])); - - SAYF(bV bSTOP " custom/rq : " cRST "%-36s " bSTG bVR bH20 bH2 bH bRB "\n", - tmp); + if (cmplog_mode) { + + sprintf(tmp, "%s/%s, %s/%s, %s/%s, %s/%s", DI(stage_finds[STAGE_PYTHON]), + DI(stage_cycles[STAGE_PYTHON]), DI(stage_finds[STAGE_CUSTOM_MUTATOR]), + DI(stage_cycles[STAGE_CUSTOM_MUTATOR]), + DI(stage_finds[STAGE_COLORIZATION]), + DI(stage_cycles[STAGE_COLORIZATION]), DI(stage_finds[STAGE_ITS]), + DI(stage_cycles[STAGE_ITS])); + + SAYF(bV bSTOP " custom/rq : " cRST "%-36s " bSTG bVR bH20 bH2 bH bRB "\n", + tmp); + + } else { + + sprintf(tmp, "%s/%s, %s/%s", DI(stage_finds[STAGE_PYTHON]), + DI(stage_cycles[STAGE_PYTHON]), DI(stage_finds[STAGE_CUSTOM_MUTATOR]), + DI(stage_cycles[STAGE_CUSTOM_MUTATOR])); + + SAYF(bV bSTOP " py/custom : " cRST "%-36s " bSTG bVR bH20 bH2 bH bRB "\n", + tmp); + + } if (!bytes_trim_out) { -- cgit 1.4.1