aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorvan Hauser <vh@thc.org>2020-08-03 13:39:55 +0200
committerGitHub <noreply@github.com>2020-08-03 13:39:55 +0200
commitd5d8d664d0d4b95792aaccd16264f3a3cff48cc8 (patch)
treefa82a04acca16ea3e088b0d7d3aaec4b01ddf8f9 /src
parent4a51cb71fb8785325dedac693cdea4648f6e5279 (diff)
parent409e4ae945ab5aeb31b1e3a1497ce5fc65226f07 (diff)
downloadafl++-d5d8d664d0d4b95792aaccd16264f3a3cff48cc8.tar.gz
Merge pull request #477 from AFLplusplus/dev
Push to stable
Diffstat (limited to 'src')
-rw-r--r--src/afl-analyze.c29
-rw-r--r--src/afl-common.c6
-rw-r--r--src/afl-forkserver.c29
-rw-r--r--src/afl-fuzz-init.c419
-rw-r--r--src/afl-fuzz-mutators.c49
-rw-r--r--src/afl-fuzz-one.c250
-rw-r--r--src/afl-fuzz-queue.c115
-rw-r--r--src/afl-fuzz-redqueen.c132
-rw-r--r--src/afl-fuzz-run.c92
-rw-r--r--src/afl-fuzz-state.c16
-rw-r--r--src/afl-fuzz-stats.c155
-rw-r--r--src/afl-fuzz.c164
-rw-r--r--src/afl-gcc.c5
-rw-r--r--src/afl-showmap.c29
-rw-r--r--src/afl-tmin.c29
15 files changed, 1223 insertions, 296 deletions
diff --git a/src/afl-analyze.c b/src/afl-analyze.c
index 56284f6f..e6dd0fca 100644
--- a/src/afl-analyze.c
+++ b/src/afl-analyze.c
@@ -772,15 +772,38 @@ static void set_up_environment(void) {
setenv("ASAN_OPTIONS",
"abort_on_error=1:"
"detect_leaks=0:"
+ "allocator_may_return_null=1:"
"symbolize=0:"
- "allocator_may_return_null=1",
+ "handle_segv=0:"
+ "handle_sigbus=0:"
+ "handle_abort=0:"
+ "handle_sigfpe=0:"
+ "handle_sigill=0",
+ 0);
+
+ setenv("UBSAN_OPTIONS",
+ "halt_on_error=1:"
+ "abort_on_error=1:"
+ "malloc_context_size=0:"
+ "allocator_may_return_null=1:"
+ "symbolize=0:"
+ "handle_segv=0:"
+ "handle_sigbus=0:"
+ "handle_abort=0:"
+ "handle_sigfpe=0:"
+ "handle_sigill=0",
0);
setenv("MSAN_OPTIONS", "exit_code=" STRINGIFY(MSAN_ERROR) ":"
- "symbolize=0:"
"abort_on_error=1:"
+ "msan_track_origins=0"
"allocator_may_return_null=1:"
- "msan_track_origins=0", 0);
+ "symbolize=0:"
+ "handle_segv=0:"
+ "handle_sigbus=0:"
+ "handle_abort=0:"
+ "handle_sigfpe=0:"
+ "handle_sigill=0", 0);
if (get_afl_env("AFL_PRELOAD")) {
diff --git a/src/afl-common.c b/src/afl-common.c
index c023789b..367dec72 100644
--- a/src/afl-common.c
+++ b/src/afl-common.c
@@ -145,7 +145,8 @@ char **get_qemu_argv(u8 *own_loc, u8 **target_path_p, int argc, char **argv) {
char **new_argv = ck_alloc(sizeof(char *) * (argc + 4));
u8 * tmp, *cp = NULL, *rsl, *own_copy;
- memcpy(new_argv + 3, argv + 1, (int)(sizeof(char *)) * argc);
+ memcpy(&new_argv[3], &argv[1], (int)(sizeof(char *)) * (argc - 1));
+ new_argv[argc - 1] = NULL;
new_argv[2] = *target_path_p;
new_argv[1] = "--";
@@ -226,7 +227,8 @@ char **get_wine_argv(u8 *own_loc, u8 **target_path_p, int argc, char **argv) {
char **new_argv = ck_alloc(sizeof(char *) * (argc + 3));
u8 * tmp, *cp = NULL, *rsl, *own_copy;
- memcpy(new_argv + 2, argv + 1, (int)(sizeof(char *)) * argc);
+ memcpy(&new_argv[2], &argv[1], (int)(sizeof(char *)) * (argc - 1));
+ new_argv[argc - 1] = NULL;
new_argv[1] = *target_path_p;
diff --git a/src/afl-forkserver.c b/src/afl-forkserver.c
index 419ce28e..47493eba 100644
--- a/src/afl-forkserver.c
+++ b/src/afl-forkserver.c
@@ -434,7 +434,27 @@ void afl_fsrv_start(afl_forkserver_t *fsrv, char **argv,
"detect_leaks=0:"
"malloc_context_size=0:"
"symbolize=0:"
- "allocator_may_return_null=1",
+ "allocator_may_return_null=1:"
+ "handle_segv=0:"
+ "handle_sigbus=0:"
+ "handle_abort=0:"
+ "handle_sigfpe=0:"
+ "handle_sigill=0",
+ 0);
+
+ /* Set sane defaults for UBSAN if nothing else specified. */
+
+ setenv("UBSAN_OPTIONS",
+ "halt_on_error=1:"
+ "abort_on_error=1:"
+ "malloc_context_size=0:"
+ "allocator_may_return_null=1:"
+ "symbolize=0:"
+ "handle_segv=0:"
+ "handle_sigbus=0:"
+ "handle_abort=0:"
+ "handle_sigfpe=0:"
+ "handle_sigill=0",
0);
/* MSAN is tricky, because it doesn't support abort_on_error=1 at this
@@ -446,7 +466,12 @@ void afl_fsrv_start(afl_forkserver_t *fsrv, char **argv,
"abort_on_error=1:"
"malloc_context_size=0:"
"allocator_may_return_null=1:"
- "msan_track_origins=0",
+ "msan_track_origins=0:"
+ "handle_segv=0:"
+ "handle_sigbus=0:"
+ "handle_abort=0:"
+ "handle_sigfpe=0:"
+ "handle_sigill=0",
0);
fsrv->init_child_func(fsrv, argv);
diff --git a/src/afl-fuzz-init.c b/src/afl-fuzz-init.c
index a2e849dc..396a20f0 100644
--- a/src/afl-fuzz-init.c
+++ b/src/afl-fuzz-init.c
@@ -28,38 +28,157 @@
#ifdef HAVE_AFFINITY
-/* Build a list of processes bound to specific cores. Returns -1 if nothing
- can be found. Assumes an upper bound of 4k CPUs. */
+/* bind process to a specific cpu. Returns 0 on failure. */
-void bind_to_free_cpu(afl_state_t *afl) {
+static u8 bind_cpu(afl_state_t *afl, s32 cpuid) {
#if defined(__linux__) || defined(__FreeBSD__) || defined(__DragonFly__)
cpu_set_t c;
#elif defined(__NetBSD__)
- cpuset_t * c;
+ cpuset_t *c;
+ #elif defined(__sun)
+ psetid_t c;
+ #endif
+
+ afl->cpu_aff = cpuid;
+
+ #if defined(__linux__) || defined(__FreeBSD__) || defined(__DragonFly__)
+
+ CPU_ZERO(&c);
+ CPU_SET(cpuid, &c);
+
+ #elif defined(__NetBSD__)
+
+ c = cpuset_create();
+ if (c == NULL) { PFATAL("cpuset_create failed"); }
+ cpuset_set(cpuid, c);
+
+ #elif defined(__sun)
+
+ pset_create(&c);
+ if (pset_assign(c, cpuid, NULL)) { PFATAL("pset_assign failed"); }
+
+ #endif
+
+ #if defined(__linux__)
+
+ return (sched_setaffinity(0, sizeof(c), &c) == 0);
+
+ #elif defined(__FreeBSD__) || defined(__DragonFly__)
+
+ return (pthread_setaffinity_np(pthread_self(), sizeof(c), &c) == 0);
+
+ #elif defined(__NetBSD__)
+
+ if (pthread_setaffinity_np(pthread_self(), cpuset_size(c), c)) {
+
+ cpuset_destroy(c);
+ return 0;
+
+ }
+
+ cpuset_destroy(c);
+ return 1;
+
#elif defined(__sun)
- psetid_t c;
+
+ if (pset_bind(c, P_PID, getpid(), NULL)) {
+
+ pset_destroy(c);
+ return 0;
+
+ }
+
+ pset_destroy(c);
+ return 1;
+
+ #else
+
+ // this will need something for other platforms
+ // TODO: Solaris/Illumos has processor_bind ... might worth a try
+ WARNF("Cannot bind to CPU yet on this platform.");
+ return 1;
+
#endif
+}
+
+/* Build a list of processes bound to specific cores. Returns -1 if nothing
+ can be found. Assumes an upper bound of 4k CPUs. */
+
+void bind_to_free_cpu(afl_state_t *afl) {
+
u8 cpu_used[4096] = {0};
+ u8 lockfile[PATH_MAX] = "";
u32 i;
- if (afl->cpu_core_count < 2) { return; }
-
if (afl->afl_env.afl_no_affinity) {
+ if (afl->cpu_to_bind != -1) {
+
+ FATAL("-b and AFL_NO_AFFINITY are mututally exclusive.");
+
+ }
+
WARNF("Not binding to a CPU core (AFL_NO_AFFINITY set).");
return;
}
+ if (afl->cpu_to_bind != -1) {
+
+ if (!bind_cpu(afl, afl->cpu_to_bind)) {
+
+ FATAL(
+ "Could not bind to requested CPU %d! Make sure you passed a valid "
+ "-b.",
+ afl->cpu_to_bind);
+
+ }
+
+ return;
+
+ }
+
+ if (afl->cpu_core_count < 2) { return; }
+
+ if (afl->sync_id) {
+
+ s32 lockfd, first = 1;
+
+ snprintf(lockfile, sizeof(lockfile), "%s/.affinity_lock", afl->sync_dir);
+ setenv(CPU_AFFINITY_ENV_VAR, lockfile, 1);
+
+ do {
+
+ if ((lockfd = open(lockfile, O_RDWR | O_CREAT | O_EXCL, 0600)) < 0) {
+
+ if (first) {
+
+ WARNF("CPU affinity lock file present, waiting ...");
+ first = 0;
+
+ }
+
+ usleep(1000);
+
+ }
+
+ } while (lockfd < 0);
+
+ close(lockfd);
+
+ }
+
#if defined(__linux__)
+
DIR * d;
struct dirent *de;
d = opendir("/proc");
if (!d) {
+ if (lockfile[0]) unlink(lockfile);
WARNF("Unable to access /proc - can't scan for free CPU cores.");
return;
@@ -67,11 +186,6 @@ void bind_to_free_cpu(afl_state_t *afl) {
ACTF("Checking CPU core loadout...");
- /* Introduce some jitter, in case multiple AFL tasks are doing the same
- thing at the same time... */
-
- usleep(R(1000) * 250);
-
/* Scan all /proc/<pid>/status entries, checking for Cpus_allowed_list.
Flag all processes bound to a specific CPU using cpu_used[]. This will
fail for some exotic binding setups, but is likely good enough in almost
@@ -114,20 +228,29 @@ void bind_to_free_cpu(afl_state_t *afl) {
}
closedir(d);
+
#elif defined(__FreeBSD__) || defined(__DragonFly__)
+
struct kinfo_proc *procs;
size_t nprocs;
size_t proccount;
int s_name[] = {CTL_KERN, KERN_PROC, KERN_PROC_ALL};
size_t s_name_l = sizeof(s_name) / sizeof(s_name[0]);
- if (sysctl(s_name, s_name_l, NULL, &nprocs, NULL, 0) != 0) return;
+ if (sysctl(s_name, s_name_l, NULL, &nprocs, NULL, 0) != 0) {
+
+ if (lockfile[0]) unlink(lockfile);
+ return;
+
+ }
+
proccount = nprocs / sizeof(*procs);
nprocs = nprocs * 4 / 3;
procs = ck_alloc(nprocs);
if (sysctl(s_name, s_name_l, procs, &nprocs, NULL, 0) != 0) {
+ if (lockfile[0]) unlink(lockfile);
ck_free(procs);
return;
@@ -136,6 +259,7 @@ void bind_to_free_cpu(afl_state_t *afl) {
for (i = 0; i < proccount; i++) {
#if defined(__FreeBSD__)
+
if (!strcmp(procs[i].ki_comm, "idle")) continue;
// fix when ki_oncpu = -1
@@ -145,16 +269,21 @@ void bind_to_free_cpu(afl_state_t *afl) {
if (oncpu != -1 && oncpu < sizeof(cpu_used) && procs[i].ki_pctcpu > 60)
cpu_used[oncpu] = 1;
+
#elif defined(__DragonFly__)
+
if (procs[i].kp_lwp.kl_cpuid < sizeof(cpu_used) &&
procs[i].kp_lwp.kl_pctcpu > 10)
cpu_used[procs[i].kp_lwp.kl_cpuid] = 1;
+
#endif
}
ck_free(procs);
+
#elif defined(__NetBSD__)
+
struct kinfo_proc2 *procs;
size_t nprocs;
size_t proccount;
@@ -163,13 +292,20 @@ void bind_to_free_cpu(afl_state_t *afl) {
CTL_KERN, KERN_PROC2, KERN_PROC_ALL, 0, sizeof(struct kinfo_proc2), 0};
size_t s_name_l = sizeof(s_name) / sizeof(s_name[0]);
- if (sysctl(s_name, s_name_l, NULL, &nprocs, NULL, 0) != 0) return;
+ if (sysctl(s_name, s_name_l, NULL, &nprocs, NULL, 0) != 0) {
+
+ if (lockfile[0]) unlink(lockfile);
+ return;
+
+ }
+
proccount = nprocs / sizeof(struct kinfo_proc2);
procs = ck_alloc(nprocs * sizeof(struct kinfo_proc2));
s_name[5] = proccount;
if (sysctl(s_name, s_name_l, procs, &nprocs, NULL, 0) != 0) {
+ if (lockfile[0]) unlink(lockfile);
ck_free(procs);
return;
@@ -183,7 +319,9 @@ void bind_to_free_cpu(afl_state_t *afl) {
}
ck_free(procs);
+
#elif defined(__sun)
+
kstat_named_t *n;
kstat_ctl_t * m;
kstat_t * k;
@@ -198,6 +336,7 @@ void bind_to_free_cpu(afl_state_t *afl) {
if (!k) {
+ if (lockfile[0]) unlink(lockfile);
kstat_close(m);
return;
@@ -205,6 +344,7 @@ void bind_to_free_cpu(afl_state_t *afl) {
if (kstat_read(m, k, NULL)) {
+ if (lockfile[0]) unlink(lockfile);
kstat_close(m);
return;
@@ -220,6 +360,7 @@ void bind_to_free_cpu(afl_state_t *afl) {
k = kstat_lookup(m, "cpu_stat", i, NULL);
if (kstat_read(m, k, &cs)) {
+ if (lockfile[0]) unlink(lockfile);
kstat_close(m);
return;
@@ -233,6 +374,7 @@ void bind_to_free_cpu(afl_state_t *afl) {
}
kstat_close(m);
+
#else
#warning \
"For this platform we do not have free CPU binding code yet. If possible, please supply a PR to https://github.com/AFLplusplus/AFLplusplus"
@@ -240,23 +382,38 @@ void bind_to_free_cpu(afl_state_t *afl) {
size_t cpu_start = 0;
- try:
#if !defined(__ANDROID__)
- for (i = cpu_start; i < afl->cpu_core_count; i++) {
- if (!cpu_used[i]) { break; }
+ for (i = cpu_start; i < afl->cpu_core_count; i++) {
- }
+ #else
- if (i == afl->cpu_core_count) {
+ /* for some reason Android goes backwards */
- #else
- for (i = afl->cpu_core_count - cpu_start - 1; i > -1; i--)
- if (!cpu_used[i]) break;
- if (i == -1) {
+ for (i = afl->cpu_core_count - 1; i > -1; i--) {
#endif
+ if (cpu_used[i]) { continue; }
+
+ OKF("Found a free CPU core, try binding to #%u.", i);
+
+ if (bind_cpu(afl, i)) {
+
+ /* Success :) */
+ break;
+
+ }
+
+ WARNF("setaffinity failed to CPU %d, trying next CPU", i);
+ cpu_start++;
+
+ }
+
+ if (lockfile[0]) unlink(lockfile);
+
+ if (i == afl->cpu_core_count || i == -1) {
+
SAYF("\n" cLRD "[-] " cRST
"Uh-oh, looks like all %d CPU cores on your system are allocated to\n"
" other instances of afl-fuzz (or similar CPU-locked tasks). "
@@ -269,97 +426,175 @@ void bind_to_free_cpu(afl_state_t *afl) {
}
- OKF("Found a free CPU core, try binding to #%u.", i);
-
- afl->cpu_aff = i;
+}
- #if defined(__linux__) || defined(__FreeBSD__) || defined(__DragonFly__)
- CPU_ZERO(&c);
- CPU_SET(i, &c);
- #elif defined(__NetBSD__)
- c = cpuset_create();
- if (c == NULL) PFATAL("cpuset_create failed");
- cpuset_set(i, c);
- #elif defined(__sun)
-pset_create(&c);
-if (pset_assign(c, i, NULL)) PFATAL("pset_assign failed");
- #endif
+#endif /* HAVE_AFFINITY */
- #if defined(__linux__)
- if (sched_setaffinity(0, sizeof(c), &c)) {
+/* Shuffle an array of pointers. Might be slightly biased. */
- if (cpu_start == afl->cpu_core_count) {
+static void shuffle_ptrs(afl_state_t *afl, void **ptrs, u32 cnt) {
- PFATAL("sched_setaffinity failed for CPU %d, exit", i);
+ u32 i;
- }
+ for (i = 0; i < cnt - 2; ++i) {
- WARNF("sched_setaffinity failed to CPU %d, trying next CPU", i);
- cpu_start++;
- goto try
- ;
+ u32 j = i + rand_below(afl, cnt - i);
+ void *s = ptrs[i];
+ ptrs[i] = ptrs[j];
+ ptrs[j] = s;
}
- #elif defined(__FreeBSD__) || defined(__DragonFly__)
- if (pthread_setaffinity_np(pthread_self(), sizeof(c), &c)) {
+}
- if (cpu_start == afl->cpu_core_count)
- PFATAL("pthread_setaffinity failed for cpu %d, exit", i);
- WARNF("pthread_setaffinity failed to CPU %d, trying next CPU", i);
- cpu_start++;
- goto try
- ;
+/* Read all testcases from foreign input directories, then queue them for
+ testing. Called at startup and at sync intervals.
+ Does not descend into subdirectories! */
- }
+void read_foreign_testcases(afl_state_t *afl, int first) {
- #elif defined(__NetBSD__)
-if (pthread_setaffinity_np(pthread_self(), cpuset_size(c), c)) {
+ if (!afl->foreign_sync_cnt) return;
- if (cpu_start == afl->cpu_core_count)
- PFATAL("pthread_setaffinity failed for cpu %d, exit", i);
- WARNF("pthread_setaffinity failed to CPU %d, trying next CPU", i);
- cpu_start++;
- goto try
- ;
+ struct dirent **nl;
+ s32 nl_cnt;
+ u32 i, iter;
-}
+ u8 val_buf[2][STRINGIFY_VAL_SIZE_MAX];
-cpuset_destroy(c);
- #elif defined(__sun)
-if (pset_bind(c, P_PID, getpid(), NULL)) {
+ for (iter = 0; iter < afl->foreign_sync_cnt; iter++) {
- if (cpu_start == afl->cpu_core_count)
- PFATAL("pset_bind failed for cpu %d, exit", i);
- WARNF("pthread_setaffinity failed to CPU %d, trying next CPU", i);
- cpu_start++;
- goto try
- ;
+ if (afl->foreign_syncs[iter].dir != NULL &&
+ afl->foreign_syncs[iter].dir[0] != 0) {
-}
+ if (first) ACTF("Scanning '%s'...", afl->foreign_syncs[iter].dir);
+ time_t ctime_max = 0;
-pset_destroy(c);
- #else
- // this will need something for other platforms
- // TODO: Solaris/Illumos has processor_bind ... might worth a try
- #endif
+ /* We use scandir() + alphasort() rather than readdir() because otherwise,
+ the ordering of test cases would vary somewhat randomly and would be
+ difficult to control. */
-}
+ nl_cnt = scandir(afl->foreign_syncs[iter].dir, &nl, NULL, NULL);
-#endif /* HAVE_AFFINITY */
+ if (nl_cnt < 0) {
-/* Shuffle an array of pointers. Might be slightly biased. */
+ if (first) {
-static void shuffle_ptrs(afl_state_t *afl, void **ptrs, u32 cnt) {
+ WARNF("Unable to open directory '%s'", afl->foreign_syncs[iter].dir);
+ sleep(1);
- u32 i;
+ }
- for (i = 0; i < cnt - 2; ++i) {
+ continue;
- u32 j = i + rand_below(afl, cnt - i);
- void *s = ptrs[i];
- ptrs[i] = ptrs[j];
- ptrs[j] = s;
+ }
+
+ if (nl_cnt == 0) {
+
+ if (first)
+ WARNF("directory %s is currently empty",
+ afl->foreign_syncs[iter].dir);
+ continue;
+
+ }
+
+ /* Show stats */
+
+ snprintf(afl->stage_name_buf, STAGE_BUF_SIZE, "foreign sync %u", iter);
+
+ afl->stage_name = afl->stage_name_buf;
+ afl->stage_cur = 0;
+ afl->stage_max = 0;
+
+ for (i = 0; i < nl_cnt; ++i) {
+
+ struct stat st;
+
+ u8 *fn2 =
+ alloc_printf("%s/%s", afl->foreign_syncs[iter].dir, nl[i]->d_name);
+
+ free(nl[i]); /* not tracked */
+
+ if (unlikely(lstat(fn2, &st) || access(fn2, R_OK))) {
+
+ if (first) PFATAL("Unable to access '%s'", fn2);
+ continue;
+
+ }
+
+ /* we detect new files by their ctime */
+ if (likely(st.st_ctime <= afl->foreign_syncs[iter].ctime)) {
+
+ ck_free(fn2);
+ continue;
+
+ }
+
+ /* This also takes care of . and .. */
+
+ if (!S_ISREG(st.st_mode) || !st.st_size || strstr(fn2, "/README.txt")) {
+
+ ck_free(fn2);
+ continue;
+
+ }
+
+ if (st.st_size > MAX_FILE) {
+
+ if (first)
+ WARNF(
+ "Test case '%s' is too big (%s, limit is %s), skipping", fn2,
+ stringify_mem_size(val_buf[0], sizeof(val_buf[0]), st.st_size),
+ stringify_mem_size(val_buf[1], sizeof(val_buf[1]), MAX_FILE));
+ ck_free(fn2);
+ continue;
+
+ }
+
+ // lets do not use add_to_queue(afl, fn2, st.st_size, 0);
+ // as this could add duplicates of the startup input corpus
+
+ int fd = open(fn2, O_RDONLY);
+ if (fd < 0) {
+
+ ck_free(fn2);
+ continue;
+
+ }
+
+ u8 fault;
+ u8 *mem = mmap(0, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
+
+ if (mem == MAP_FAILED) {
+
+ ck_free(fn2);
+ continue;
+
+ }
+
+ write_to_testcase(afl, mem, st.st_size);
+ fault = fuzz_run_target(afl, &afl->fsrv, afl->fsrv.exec_tmout);
+ afl->syncing_party = "foreign";
+ afl->queued_imported +=
+ save_if_interesting(afl, mem, st.st_size, fault);
+ afl->syncing_party = 0;
+ munmap(mem, st.st_size);
+ close(fd);
+
+ if (st.st_ctime > ctime_max) ctime_max = st.st_ctime;
+
+ }
+
+ afl->foreign_syncs[iter].ctime = ctime_max;
+ free(nl); /* not tracked */
+
+ }
+
+ }
+
+ if (first) {
+
+ afl->last_path_time = 0;
+ afl->queued_at_start = afl->queued_paths;
}
@@ -393,7 +628,7 @@ void read_testcases(afl_state_t *afl) {
ACTF("Scanning '%s'...", afl->in_dir);
/* We use scandir() + alphasort() rather than readdir() because otherwise,
- the ordering of test cases would vary somewhat randomly and would be
+ the ordering of test cases would vary somewhat randomly and would be
difficult to control. */
nl_cnt = scandir(afl->in_dir, &nl, NULL, alphasort);
@@ -454,9 +689,11 @@ void read_testcases(afl_state_t *afl) {
if (st.st_size > MAX_FILE) {
- FATAL("Test case '%s' is too big (%s, limit is %s)", fn2,
+ WARNF("Test case '%s' is too big (%s, limit is %s), skipping", fn2,
stringify_mem_size(val_buf[0], sizeof(val_buf[0]), st.st_size),
stringify_mem_size(val_buf[1], sizeof(val_buf[1]), MAX_FILE));
+ ck_free(fn2);
+ continue;
}
diff --git a/src/afl-fuzz-mutators.c b/src/afl-fuzz-mutators.c
index 9fc77ffe..b30106a0 100644
--- a/src/afl-fuzz-mutators.c
+++ b/src/afl-fuzz-mutators.c
@@ -40,7 +40,7 @@ void setup_custom_mutators(afl_state_t *afl) {
if (fn) {
- if (afl->limit_time_sig)
+ if (afl->limit_time_sig && afl->limit_time_sig != -1)
FATAL(
"MOpt and custom mutator are mutually exclusive. We accept pull "
"requests that integrates MOpt with the optional mutators "
@@ -168,7 +168,8 @@ struct custom_mutator *load_custom_mutator(afl_state_t *afl, const char *fn) {
/* "afl_custom_deinit", optional for backward compatibility */
mutator->afl_custom_deinit = dlsym(dh, "afl_custom_deinit");
- if (!mutator->afl_custom_deinit) FATAL("Symbol 'afl_custom_init' not found.");
+ if (!mutator->afl_custom_deinit)
+ FATAL("Symbol 'afl_custom_deinit' not found.");
/* "afl_custom_post_process", optional */
mutator->afl_custom_post_process = dlsym(dh, "afl_custom_post_process");
@@ -282,22 +283,48 @@ u8 trim_case_custom(afl_state_t *afl, struct queue_entry *q, u8 *in_buf,
} else if (unlikely(retlen > orig_len)) {
- FATAL(
- "Trimmed data returned by custom mutator is larger than original "
- "data");
+ /* Do not exit the fuzzer, even if the trimmed data returned by the custom
+ mutator is larger than the original data. For some use cases, like the
+ grammar mutator, the definition of "size" may have different meanings.
+ For example, the trimming function in a grammar mutator aims at
+ reducing the objects in a grammar structure, but does not guarantee to
+ generate a smaller binary buffer.
+
+ Thus, we allow the custom mutator to generate the trimmed data that is
+ larger than the original data. */
+
+ if (afl->not_on_tty && afl->debug) {
+
+ WARNF(
+ "Trimmed data returned by custom mutator is larger than original "
+ "data");
+
+ }
+
+ } else if (unlikely(retlen == 0)) {
+
+ /* Do not run the empty test case on the target. To keep the custom
+ trimming function running, we simply treat the empty test case as an
+ unsuccessful trimming and skip it, instead of aborting the trimming. */
+
+ ++afl->trim_execs;
}
- write_to_testcase(afl, retbuf, retlen);
+ if (likely(retlen)) {
- fault = fuzz_run_target(afl, &afl->fsrv, afl->fsrv.exec_tmout);
- ++afl->trim_execs;
+ write_to_testcase(afl, retbuf, retlen);
- if (afl->stop_soon || fault == FSRV_RUN_ERROR) { goto abort_trimming; }
+ fault = fuzz_run_target(afl, &afl->fsrv, afl->fsrv.exec_tmout);
+ ++afl->trim_execs;
- cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
+ if (afl->stop_soon || fault == FSRV_RUN_ERROR) { goto abort_trimming; }
+
+ cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
+
+ }
- if (cksum == q->exec_cksum) {
+ if (likely(retlen && cksum == q->exec_cksum)) {
q->len = retlen;
memcpy(in_buf, retbuf, retlen);
diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c
index 72383727..1f0bf30e 100644
--- a/src/afl-fuzz-one.c
+++ b/src/afl-fuzz-one.c
@@ -24,6 +24,8 @@
*/
#include "afl-fuzz.h"
+#include <string.h>
+#include <limits.h>
/* MOpt */
@@ -362,6 +364,8 @@ static void locate_diffs(u8 *ptr1, u8 *ptr2, u32 len, s32 *first, s32 *last) {
#endif /* !IGNORE_FINDS */
+#define BUF_PARAMS(name) (void **)&afl->name##_buf, &afl->name##_size
+
/* Take the current entry from the queue, fuzz it for a while. This
function is a tad too long... returns 0 if fuzzed successfully, 1 if
skipped or bailed out. */
@@ -1854,6 +1858,21 @@ havoc_stage:
/* We essentially just do several thousand runs (depending on perf_score)
where we take the input file and make random stacked tweaks. */
+ u32 r_max, r;
+
+ if (unlikely(afl->expand_havoc)) {
+
+ /* add expensive havoc cases here, they are activated after a full
+ cycle without finds happened */
+
+ r_max = 16 + ((afl->extras_cnt + afl->a_extras_cnt) ? 2 : 0);
+
+ } else {
+
+ r_max = 15 + ((afl->extras_cnt + afl->a_extras_cnt) ? 2 : 0);
+
+ }
+
for (afl->stage_cur = 0; afl->stage_cur < afl->stage_max; ++afl->stage_cur) {
u32 use_stacking = 1 << (1 + rand_below(afl, HAVOC_STACK_POW2));
@@ -1896,8 +1915,7 @@ havoc_stage:
}
- switch (rand_below(
- afl, 15 + ((afl->extras_cnt + afl->a_extras_cnt) ? 2 : 0))) {
+ switch ((r = rand_below(afl, r_max))) {
case 0:
@@ -2192,85 +2210,198 @@ havoc_stage:
}
- /* Values 15 and 16 can be selected only if there are any extras
- present in the dictionaries. */
+ default:
- case 15: {
+ if (likely(r <= 16 && (afl->extras_cnt || afl->a_extras_cnt))) {
- /* Overwrite bytes with an extra. */
+ /* Values 15 and 16 can be selected only if there are any extras
+ present in the dictionaries. */
- if (!afl->extras_cnt || (afl->a_extras_cnt && rand_below(afl, 2))) {
+ if (r == 15) {
- /* No user-specified extras or odds in our favor. Let's use an
- auto-detected one. */
+ /* Overwrite bytes with an extra. */
- u32 use_extra = rand_below(afl, afl->a_extras_cnt);
- u32 extra_len = afl->a_extras[use_extra].len;
- u32 insert_at;
+ if (!afl->extras_cnt ||
+ (afl->a_extras_cnt && rand_below(afl, 2))) {
- if (extra_len > temp_len) { break; }
+ /* No user-specified extras or odds in our favor. Let's use an
+ auto-detected one. */
- insert_at = rand_below(afl, temp_len - extra_len + 1);
- memcpy(out_buf + insert_at, afl->a_extras[use_extra].data,
- extra_len);
+ u32 use_extra = rand_below(afl, afl->a_extras_cnt);
+ u32 extra_len = afl->a_extras[use_extra].len;
+ u32 insert_at;
- } else {
+ if (extra_len > temp_len) { break; }
- /* No auto extras or odds in our favor. Use the dictionary. */
+ insert_at = rand_below(afl, temp_len - extra_len + 1);
+ memcpy(out_buf + insert_at, afl->a_extras[use_extra].data,
+ extra_len);
- u32 use_extra = rand_below(afl, afl->extras_cnt);
- u32 extra_len = afl->extras[use_extra].len;
- u32 insert_at;
+ } else {
- if (extra_len > temp_len) { break; }
+ /* No auto extras or odds in our favor. Use the dictionary. */
- insert_at = rand_below(afl, temp_len - extra_len + 1);
- memcpy(out_buf + insert_at, afl->extras[use_extra].data, extra_len);
+ u32 use_extra = rand_below(afl, afl->extras_cnt);
+ u32 extra_len = afl->extras[use_extra].len;
+ u32 insert_at;
- }
+ if (extra_len > temp_len) { break; }
- break;
+ insert_at = rand_below(afl, temp_len - extra_len + 1);
+ memcpy(out_buf + insert_at, afl->extras[use_extra].data,
+ extra_len);
- }
+ }
- case 16: {
+ break;
- u32 use_extra, extra_len, insert_at = rand_below(afl, temp_len + 1);
- u8 *ptr;
+ } else { // case 16
+
+ u32 use_extra, extra_len,
+ insert_at = rand_below(afl, temp_len + 1);
+ u8 *ptr;
+
+ /* Insert an extra. Do the same dice-rolling stuff as for the
+ previous case. */
+
+ if (!afl->extras_cnt ||
+ (afl->a_extras_cnt && rand_below(afl, 2))) {
+
+ use_extra = rand_below(afl, afl->a_extras_cnt);
+ extra_len = afl->a_extras[use_extra].len;
+ ptr = afl->a_extras[use_extra].data;
+
+ } else {
+
+ use_extra = rand_below(afl, afl->extras_cnt);
+ extra_len = afl->extras[use_extra].len;
+ ptr = afl->extras[use_extra].data;
+
+ }
+
+ if (temp_len + extra_len >= MAX_FILE) { break; }
+
+ out_buf = ck_maybe_grow(BUF_PARAMS(out), temp_len + extra_len);
+
+ /* Tail */
+ memmove(out_buf + insert_at + extra_len, out_buf + insert_at,
+ temp_len - insert_at);
+
+ /* Inserted part */
+ memcpy(out_buf + insert_at, ptr, extra_len);
- /* Insert an extra. Do the same dice-rolling stuff as for the
- previous case. */
+ temp_len += extra_len;
- if (!afl->extras_cnt || (afl->a_extras_cnt && rand_below(afl, 2))) {
+ break;
- use_extra = rand_below(afl, afl->a_extras_cnt);
- extra_len = afl->a_extras[use_extra].len;
- ptr = afl->a_extras[use_extra].data;
+ }
} else {
- use_extra = rand_below(afl, afl->extras_cnt);
- extra_len = afl->extras[use_extra].len;
- ptr = afl->extras[use_extra].data;
+ /*
+ switch (r) {
- }
+ case 15: // fall through
+ case 16:
+ case 17: {*/
- if (temp_len + extra_len >= MAX_FILE) { break; }
+ /* Overwrite bytes with a randomly selected chunk from another
+ testcase or insert that chunk. */
- out_buf = ck_maybe_grow(BUF_PARAMS(out), temp_len + extra_len);
+ if (afl->queued_paths < 4) break;
- /* Tail */
- memmove(out_buf + insert_at + extra_len, out_buf + insert_at,
- temp_len - insert_at);
+ /* Pick a random queue entry and seek to it. */
- /* Inserted part */
- memcpy(out_buf + insert_at, ptr, extra_len);
+ u32 tid;
+ do
+ tid = rand_below(afl, afl->queued_paths);
+ while (tid == afl->current_entry);
- temp_len += extra_len;
+ struct queue_entry *target = afl->queue_buf[tid];
- break;
+ /* Make sure that the target has a reasonable length. */
- }
+ while (target && (target->len < 2 || target == afl->queue_cur))
+ target = target->next;
+
+ if (!target) break;
+
+ /* Read the testcase into a new buffer. */
+
+ fd = open(target->fname, O_RDONLY);
+
+ if (unlikely(fd < 0)) {
+
+ PFATAL("Unable to open '%s'", target->fname);
+
+ }
+
+ u32 new_len = target->len;
+ u8 *new_buf = ck_maybe_grow(BUF_PARAMS(in_scratch), new_len);
+
+ ck_read(fd, new_buf, new_len, target->fname);
+
+ close(fd);
+
+ u8 overwrite = 0;
+ if (temp_len >= 2 && rand_below(afl, 2))
+ overwrite = 1;
+ else if (temp_len + HAVOC_BLK_XL >= MAX_FILE) {
+
+ if (temp_len >= 2)
+ overwrite = 1;
+ else
+ break;
+
+ }
+
+ if (overwrite) {
+
+ u32 copy_from, copy_to, copy_len;
+
+ copy_len = choose_block_len(afl, new_len - 1);
+ if (copy_len > temp_len) copy_len = temp_len;
+
+ copy_from = rand_below(afl, new_len - copy_len + 1);
+ copy_to = rand_below(afl, temp_len - copy_len + 1);
+
+ memmove(out_buf + copy_to, new_buf + copy_from, copy_len);
+
+ } else {
+
+ u32 clone_from, clone_to, clone_len;
+
+ clone_len = choose_block_len(afl, new_len);
+ clone_from = rand_below(afl, new_len - clone_len + 1);
+
+ clone_to = rand_below(afl, temp_len);
+
+ u8 *temp_buf =
+ ck_maybe_grow(BUF_PARAMS(out_scratch), temp_len + clone_len);
+
+ /* Head */
+
+ memcpy(temp_buf, out_buf, clone_to);
+
+ /* Inserted part */
+
+ memcpy(temp_buf + clone_to, new_buf + clone_from, clone_len);
+
+ /* Tail */
+ memcpy(temp_buf + clone_to + clone_len, out_buf + clone_to,
+ temp_len - clone_to);
+
+ swap_bufs(BUF_PARAMS(out), BUF_PARAMS(out_scratch));
+ out_buf = temp_buf;
+ temp_len += clone_len;
+
+ }
+
+ break;
+
+ }
+
+ // end of default:
}
@@ -2357,20 +2488,7 @@ retry_splicing:
} while (tid == afl->current_entry);
afl->splicing_with = tid;
- target = afl->queue;
-
- while (tid >= 100) {
-
- target = target->next_100;
- tid -= 100;
-
- }
-
- while (tid--) {
-
- target = target->next;
-
- }
+ target = afl->queue_buf[tid];
/* Make sure that the target has a reasonable length. */
@@ -4750,7 +4868,7 @@ u8 fuzz_one(afl_state_t *afl) {
return (key_val_lv_1 | key_val_lv_2);
-#undef BUF_PARAMS
-
}
+#undef BUF_PARAMS
+
diff --git a/src/afl-fuzz-queue.c b/src/afl-fuzz-queue.c
index 7afdd9f1..71874283 100644
--- a/src/afl-fuzz-queue.c
+++ b/src/afl-fuzz-queue.c
@@ -24,6 +24,9 @@
#include "afl-fuzz.h"
#include <limits.h>
+#include <ctype.h>
+
+#define BUF_PARAMS(name) (void **)&afl->name##_buf, &afl->name##_size
/* Mark deterministic checks as done for a particular queue entry. We use the
.state file to avoid repeating deterministic fuzzing when resuming aborted
@@ -100,6 +103,111 @@ void mark_as_redundant(afl_state_t *afl, struct queue_entry *q, u8 state) {
}
+/* check if ascii or UTF-8 */
+
+static u8 check_if_text(struct queue_entry *q) {
+
+ if (q->len < AFL_TXT_MIN_LEN) return 0;
+
+ u8 buf[MAX_FILE];
+ s32 fd, len = q->len, offset = 0, ascii = 0, utf8 = 0, comp;
+
+ if ((fd = open(q->fname, O_RDONLY)) < 0) return 0;
+ if ((comp = read(fd, buf, len)) != len) return 0;
+ close(fd);
+
+ while (offset < len) {
+
+ // ASCII: <= 0x7F to allow ASCII control characters
+ if ((buf[offset + 0] == 0x09 || buf[offset + 0] == 0x0A ||
+ buf[offset + 0] == 0x0D ||
+ (0x20 <= buf[offset + 0] && buf[offset + 0] <= 0x7E))) {
+
+ offset++;
+ utf8++;
+ ascii++;
+ continue;
+
+ }
+
+ if (isascii((int)buf[offset]) || isprint((int)buf[offset])) {
+
+ ascii++;
+ // we continue though as it can also be a valid utf8
+
+ }
+
+ // non-overlong 2-byte
+ if (((0xC2 <= buf[offset + 0] && buf[offset + 0] <= 0xDF) &&
+ (0x80 <= buf[offset + 1] && buf[offset + 1] <= 0xBF)) &&
+ len - offset > 1) {
+
+ offset += 2;
+ utf8++;
+ comp--;
+ continue;
+
+ }
+
+ // excluding overlongs
+ if ((len - offset > 2) &&
+ ((buf[offset + 0] == 0xE0 &&
+ (0xA0 <= buf[offset + 1] && buf[offset + 1] <= 0xBF) &&
+ (0x80 <= buf[offset + 2] &&
+ buf[offset + 2] <= 0xBF)) || // straight 3-byte
+ (((0xE1 <= buf[offset + 0] && buf[offset + 0] <= 0xEC) ||
+ buf[offset + 0] == 0xEE || buf[offset + 0] == 0xEF) &&
+ (0x80 <= buf[offset + 1] && buf[offset + 1] <= 0xBF) &&
+ (0x80 <= buf[offset + 2] &&
+ buf[offset + 2] <= 0xBF)) || // excluding surrogates
+ (buf[offset + 0] == 0xED &&
+ (0x80 <= buf[offset + 1] && buf[offset + 1] <= 0x9F) &&
+ (0x80 <= buf[offset + 2] && buf[offset + 2] <= 0xBF)))) {
+
+ offset += 3;
+ utf8++;
+ comp -= 2;
+ continue;
+
+ }
+
+ // planes 1-3
+ if ((len - offset > 3) &&
+ ((buf[offset + 0] == 0xF0 &&
+ (0x90 <= buf[offset + 1] && buf[offset + 1] <= 0xBF) &&
+ (0x80 <= buf[offset + 2] && buf[offset + 2] <= 0xBF) &&
+ (0x80 <= buf[offset + 3] &&
+ buf[offset + 3] <= 0xBF)) || // planes 4-15
+ ((0xF1 <= buf[offset + 0] && buf[offset + 0] <= 0xF3) &&
+ (0x80 <= buf[offset + 1] && buf[offset + 1] <= 0xBF) &&
+ (0x80 <= buf[offset + 2] && buf[offset + 2] <= 0xBF) &&
+ (0x80 <= buf[offset + 3] && buf[offset + 3] <= 0xBF)) || // plane 16
+ (buf[offset + 0] == 0xF4 &&
+ (0x80 <= buf[offset + 1] && buf[offset + 1] <= 0x8F) &&
+ (0x80 <= buf[offset + 2] && buf[offset + 2] <= 0xBF) &&
+ (0x80 <= buf[offset + 3] && buf[offset + 3] <= 0xBF)))) {
+
+ offset += 4;
+ utf8++;
+ comp -= 3;
+ continue;
+
+ }
+
+ offset++;
+
+ }
+
+ u32 percent_utf8 = (utf8 * 100) / comp;
+ u32 percent_ascii = (ascii * 100) / len;
+
+ if (percent_utf8 >= percent_ascii && percent_utf8 >= AFL_TXT_MIN_PERCENT)
+ return 2;
+ if (percent_ascii >= AFL_TXT_MIN_PERCENT) return 1;
+ return 0;
+
+}
+
/* Append new test case to the queue. */
void add_to_queue(afl_state_t *afl, u8 *fname, u32 len, u8 passed_det) {
@@ -138,6 +246,10 @@ void add_to_queue(afl_state_t *afl, u8 *fname, u32 len, u8 passed_det) {
}
+ struct queue_entry **queue_buf = ck_maybe_grow(
+ BUF_PARAMS(queue), afl->queued_paths * sizeof(struct queue_entry *));
+ queue_buf[afl->queued_paths - 1] = q;
+
afl->last_path_time = get_cur_time();
if (afl->custom_mutators_count) {
@@ -159,6 +271,9 @@ void add_to_queue(afl_state_t *afl, u8 *fname, u32 len, u8 passed_det) {
}
+ /* only redqueen currently uses is_ascii */
+ if (afl->shm.cmplog_mode) q->is_ascii = check_if_text(q);
+
}
/* Destroy the entire queue. */
diff --git a/src/afl-fuzz-redqueen.c b/src/afl-fuzz-redqueen.c
index 724da407..cb4c78df 100644
--- a/src/afl-fuzz-redqueen.c
+++ b/src/afl-fuzz-redqueen.c
@@ -24,6 +24,7 @@
*/
+#include <limits.h>
#include "afl-fuzz.h"
#include "cmplog.h"
@@ -177,6 +178,9 @@ static u8 colorization(afl_state_t *afl, u8 *buf, u32 len, u64 exec_cksum) {
afl->stage_cycles[STAGE_COLORIZATION] += afl->stage_cur;
ck_free(backup);
+ ck_free(rng);
+ rng = NULL;
+
while (ranges) {
rng = ranges;
@@ -186,9 +190,6 @@ static u8 colorization(afl_state_t *afl, u8 *buf, u32 len, u64 exec_cksum) {
}
- ck_free(rng);
- rng = NULL;
-
// save the input with the high entropy
if (needs_write) {
@@ -262,11 +263,64 @@ static u8 its_fuzz(afl_state_t *afl, u8 *buf, u32 len, u8 *status) {
}
+static long long strntoll(const char *str, size_t sz, char **end, int base) {
+
+ char buf[64];
+ long long ret;
+ const char *beg = str;
+
+ for (; beg && sz && *beg == ' '; beg++, sz--) {};
+
+ if (!sz || sz >= sizeof(buf)) {
+
+ if (end) *end = (char *)str;
+ return 0;
+
+ }
+
+ memcpy(buf, beg, sz);
+ buf[sz] = '\0';
+ ret = strtoll(buf, end, base);
+ if (ret == LLONG_MIN || ret == LLONG_MAX) return ret;
+ if (end) *end = (char *)beg + (*end - buf);
+ return ret;
+
+}
+
+static unsigned long long strntoull(const char *str, size_t sz, char **end,
+ int base) {
+
+ char buf[64];
+ unsigned long long ret;
+ const char * beg = str;
+
+ for (; beg && sz && *beg == ' '; beg++, sz--)
+ ;
+
+ if (!sz || sz >= sizeof(buf)) {
+
+ if (end) *end = (char *)str;
+ return 0;
+
+ }
+
+ memcpy(buf, beg, sz);
+ buf[sz] = '\0';
+ ret = strtoull(buf, end, base);
+ if (end) *end = (char *)beg + (*end - buf);
+ return ret;
+
+}
+
+#define BUF_PARAMS(name) (void **)&afl->name##_buf, &afl->name##_size
+
static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header *h,
u64 pattern, u64 repl, u64 o_pattern, u32 idx,
u8 *orig_buf, u8 *buf, u32 len, u8 do_reverse,
u8 *status) {
+ if (!buf) { FATAL("BUG: buf was NULL. Please report this.\n"); }
+
u64 *buf_64 = (u64 *)&buf[idx];
u32 *buf_32 = (u32 *)&buf[idx];
u16 *buf_16 = (u16 *)&buf[idx];
@@ -277,9 +331,56 @@ static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header *h,
u8 * o_buf_8 = &orig_buf[idx];
u32 its_len = len - idx;
- *status = 0;
+ // *status = 0;
+
+ u8 * endptr;
+ u8 use_num = 0, use_unum = 0;
+ unsigned long long unum;
+ long long num;
+ if (afl->queue_cur->is_ascii) {
+
+ endptr = buf_8;
+ num = strntoll(buf_8, len - idx, (char **)&endptr, 0);
+ if (endptr == buf_8) {
+
+ unum = strntoull(buf_8, len - idx, (char **)&endptr, 0);
+ if (endptr == buf_8) use_unum = 1;
+
+ } else
+
+ use_num = 1;
+
+ }
+
+ if (use_num && num == pattern) {
- if (SHAPE_BYTES(h->shape) == 8) {
+ size_t old_len = endptr - buf_8;
+ size_t num_len = snprintf(NULL, 0, "%lld", num);
+
+ u8 *new_buf = ck_maybe_grow(BUF_PARAMS(out_scratch), len + num_len);
+ memcpy(new_buf, buf, idx);
+
+ snprintf(new_buf + idx, num_len, "%lld", num);
+ memcpy(new_buf + idx + num_len, buf_8 + old_len, len - idx - old_len);
+
+ if (unlikely(its_fuzz(afl, new_buf, len, status))) { return 1; }
+
+ } else if (use_unum && unum == pattern) {
+
+ size_t old_len = endptr - buf_8;
+ size_t num_len = snprintf(NULL, 0, "%llu", unum);
+
+ u8 *new_buf = ck_maybe_grow(BUF_PARAMS(out_scratch), len + num_len);
+ memcpy(new_buf, buf, idx);
+
+ snprintf(new_buf + idx, num_len, "%llu", unum);
+ memcpy(new_buf + idx + num_len, buf_8 + old_len, len - idx - old_len);
+
+ if (unlikely(its_fuzz(afl, new_buf, len, status))) { return 1; }
+
+ }
+
+ if (SHAPE_BYTES(h->shape) >= 8 && *status != 1) {
if (its_len >= 8 && *buf_64 == pattern && *o_buf_64 == o_pattern) {
@@ -290,7 +391,7 @@ static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header *h,
}
// reverse encoding
- if (do_reverse) {
+ if (do_reverse && *status != 1) {
if (unlikely(cmp_extend_encoding(afl, h, SWAP64(pattern), SWAP64(repl),
SWAP64(o_pattern), idx, orig_buf, buf,
@@ -304,7 +405,7 @@ static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header *h,
}
- if (SHAPE_BYTES(h->shape) == 4 || *status == 2) {
+ if (SHAPE_BYTES(h->shape) >= 4 && *status != 1) {
if (its_len >= 4 && *buf_32 == (u32)pattern &&
*o_buf_32 == (u32)o_pattern) {
@@ -316,7 +417,7 @@ static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header *h,
}
// reverse encoding
- if (do_reverse) {
+ if (do_reverse && *status != 1) {
if (unlikely(cmp_extend_encoding(afl, h, SWAP32(pattern), SWAP32(repl),
SWAP32(o_pattern), idx, orig_buf, buf,
@@ -330,7 +431,7 @@ static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header *h,
}
- if (SHAPE_BYTES(h->shape) == 2 || *status == 2) {
+ if (SHAPE_BYTES(h->shape) >= 2 && *status != 1) {
if (its_len >= 2 && *buf_16 == (u16)pattern &&
*o_buf_16 == (u16)o_pattern) {
@@ -342,7 +443,7 @@ static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header *h,
}
// reverse encoding
- if (do_reverse) {
+ if (do_reverse && *status != 1) {
if (unlikely(cmp_extend_encoding(afl, h, SWAP16(pattern), SWAP16(repl),
SWAP16(o_pattern), idx, orig_buf, buf,
@@ -356,7 +457,7 @@ static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header *h,
}
- if (SHAPE_BYTES(h->shape) == 1 || *status == 2) {
+ if (SHAPE_BYTES(h->shape) >= 1 && *status != 1) {
if (its_len >= 1 && *buf_8 == (u8)pattern && *o_buf_8 == (u8)o_pattern) {
@@ -482,6 +583,7 @@ static u8 cmp_fuzz(afl_state_t *afl, u32 key, u8 *orig_buf, u8 *buf, u32 len) {
for (idx = 0; idx < len && fails < 8; ++idx) {
+ status = 0;
if (unlikely(cmp_extend_encoding(afl, h, o->v0, o->v1, orig_o->v0, idx,
orig_buf, buf, len, 1, &status))) {
@@ -499,6 +601,7 @@ static u8 cmp_fuzz(afl_state_t *afl, u32 key, u8 *orig_buf, u8 *buf, u32 len) {
}
+ status = 0;
if (unlikely(cmp_extend_encoding(afl, h, o->v1, o->v0, orig_o->v1, idx,
orig_buf, buf, len, 1, &status))) {
@@ -570,14 +673,15 @@ static u8 rtn_extend_encoding(afl_state_t *afl, struct cmp_header *h,
for (i = 0; i < its_len; ++i) {
- if (pattern[idx + i] != buf[idx + i] ||
- o_pattern[idx + i] != orig_buf[idx + i] || *status == 1) {
+ if (pattern[i] != buf[idx + i] || o_pattern[i] != orig_buf[idx + i] ||
+ *status == 1) {
break;
}
- buf[idx + i] = repl[idx + i];
+ buf[idx + i] = repl[i];
+
if (unlikely(its_fuzz(afl, buf, len, status))) { return 1; }
}
diff --git a/src/afl-fuzz-run.c b/src/afl-fuzz-run.c
index 2a1664e2..44d3c522 100644
--- a/src/afl-fuzz-run.c
+++ b/src/afl-fuzz-run.c
@@ -134,6 +134,8 @@ void write_to_testcase(afl_state_t *afl, void *mem, u32 len) {
}
+#define BUF_PARAMS(name) (void **)&afl->name##_buf, &afl->name##_size
+
/* The same, but with an adjustable gap. Used for trimming. */
static void write_with_gap(afl_state_t *afl, void *mem, u32 len, u32 skip_at,
@@ -142,18 +144,81 @@ static void write_with_gap(afl_state_t *afl, void *mem, u32 len, u32 skip_at,
s32 fd = afl->fsrv.out_fd;
u32 tail_len = len - skip_at - skip_len;
+ /*
+ This memory is used to carry out the post_processing(if present) after copying
+ the testcase by removing the gaps. This can break though
+ */
+ u8 *mem_trimmed = ck_maybe_grow(BUF_PARAMS(out_scratch), len - skip_len + 1);
+
+ ssize_t new_size = len - skip_len;
+ void * new_mem = mem;
+ u8 * new_buf = NULL;
+
+ bool post_process_skipped = true;
+
+ if (unlikely(afl->custom_mutators_count)) {
+
+ new_mem = mem_trimmed;
+
+ LIST_FOREACH(&afl->custom_mutator_list, struct custom_mutator, {
+
+ if (el->afl_custom_post_process) {
+
+ // We copy into the mem_trimmed only if we actually have custom mutators
+ // *with* post_processing installed
+
+ if (post_process_skipped) {
+
+ if (skip_at) { memcpy(mem_trimmed, (u8 *)mem, skip_at); }
+
+ if (tail_len) {
+
+ memcpy(mem_trimmed + skip_at, (u8 *)mem + skip_at + skip_len,
+ tail_len);
+
+ }
+
+ post_process_skipped = false;
+
+ }
+
+ new_size =
+ el->afl_custom_post_process(el->data, new_mem, new_size, &new_buf);
+
+ if (unlikely(!new_buf || (new_size <= 0))) {
+
+ FATAL("Custom_post_process failed (ret: %lu)",
+ (long unsigned)new_size);
+
+ }
+
+ }
+
+ new_mem = new_buf;
+
+ });
+
+ }
+
if (afl->fsrv.shmem_fuzz) {
- if (skip_at) { memcpy(afl->fsrv.shmem_fuzz, mem, skip_at); }
+ if (!post_process_skipped) {
+
+ // If we did post_processing, copy directly from the new_buf bufer
+
+ memcpy(afl->fsrv.shmem_fuzz, new_buf, new_size);
+
+ }
+
+ else {
- if (tail_len) {
+ memcpy(afl->fsrv.shmem_fuzz, mem, skip_at);
- memcpy(afl->fsrv.shmem_fuzz + skip_at, (u8 *)mem + skip_at + skip_len,
- tail_len);
+ memcpy(afl->fsrv.shmem_fuzz, mem + skip_at + skip_len, tail_len);
}
- *afl->fsrv.shmem_fuzz_len = len - skip_len;
+ *afl->fsrv.shmem_fuzz_len = new_size;
#ifdef _DEBUG
if (afl->debug) {
@@ -197,18 +262,21 @@ static void write_with_gap(afl_state_t *afl, void *mem, u32 len, u32 skip_at,
}
- if (skip_at) { ck_write(fd, mem, skip_at, afl->fsrv.out_file); }
+ if (!post_process_skipped) {
- u8 *memu8 = mem;
- if (tail_len) {
+ ck_write(fd, new_buf, new_size, afl->fsrv.out_file);
- ck_write(fd, memu8 + skip_at + skip_len, tail_len, afl->fsrv.out_file);
+ } else {
+
+ ck_write(fd, mem, skip_at, afl->fsrv.out_file);
+
+ ck_write(fd, mem + skip_at + skip_len, tail_len, afl->fsrv.out_file);
}
if (!afl->fsrv.out_file) {
- if (ftruncate(fd, len - skip_len)) { PFATAL("ftruncate() failed"); }
+ if (ftruncate(fd, new_size)) { PFATAL("ftruncate() failed"); }
lseek(fd, 0, SEEK_SET);
} else {
@@ -219,6 +287,8 @@ static void write_with_gap(afl_state_t *afl, void *mem, u32 len, u32 skip_at,
}
+#undef BUF_PARAMS
+
/* Calibrate a new test case. This is done when processing the input directory
to warn about flaky or otherwise problematic test cases early on; and when
new paths are discovered to detect variable behavior and so on. */
@@ -612,6 +682,8 @@ void sync_fuzzers(afl_state_t *afl) {
}
+ if (afl->foreign_sync_cnt) read_foreign_testcases(afl, 0);
+
}
/* Trim all new test cases to save cycles when doing deterministic checks. The
diff --git a/src/afl-fuzz-state.c b/src/afl-fuzz-state.c
index e0e43f54..e2d62bc6 100644
--- a/src/afl-fuzz-state.c
+++ b/src/afl-fuzz-state.c
@@ -94,6 +94,7 @@ void afl_state_init(afl_state_t *afl, uint32_t map_size) {
afl->havoc_div = 1; /* Cycle count divisor for havoc */
afl->stage_name = "init"; /* Name of the current fuzz stage */
afl->splicing_with = -1; /* Splicing with which test case? */
+ afl->cpu_to_bind = -1;
#ifdef HAVE_AFFINITY
afl->cpu_aff = -1; /* Selected CPU core */
@@ -293,6 +294,20 @@ void read_afl_environment(afl_state_t *afl, char **envp) {
afl->afl_env.afl_autoresume =
get_afl_env(afl_environment_variables[i]) ? 1 : 0;
+ } else if (!strncmp(env, "AFL_CYCLE_SCHEDULES",
+
+ afl_environment_variable_len)) {
+
+ afl->cycle_schedules = afl->afl_env.afl_cycle_schedules =
+ get_afl_env(afl_environment_variables[i]) ? 1 : 0;
+
+ } else if (!strncmp(env, "AFL_EXPAND_HAVOC_NOW",
+
+ afl_environment_variable_len)) {
+
+ afl->expand_havoc = afl->afl_env.afl_expand_havoc =
+ get_afl_env(afl_environment_variables[i]) ? 1 : 0;
+
} else if (!strncmp(env, "AFL_CAL_FAST",
afl_environment_variable_len)) {
@@ -405,6 +420,7 @@ void afl_state_deinit(afl_state_t *afl) {
if (afl->pass_stats) { ck_free(afl->pass_stats); }
if (afl->orig_cmp_map) { ck_free(afl->orig_cmp_map); }
+ if (afl->queue_buf) { free(afl->queue_buf); }
if (afl->out_buf) { free(afl->out_buf); }
if (afl->out_scratch_buf) { free(afl->out_scratch_buf); }
if (afl->eff_buf) { free(afl->eff_buf); }
diff --git a/src/afl-fuzz-stats.c b/src/afl-fuzz-stats.c
index fc93011b..7b30b5ea 100644
--- a/src/afl-fuzz-stats.c
+++ b/src/afl-fuzz-stats.c
@@ -39,7 +39,7 @@ void write_stats_file(afl_state_t *afl, double bitmap_cvg, double stability,
u8 fn[PATH_MAX];
s32 fd;
FILE * f;
- uint32_t t_bytes = count_non_255_bytes(afl, afl->virgin_bits);
+ u32 t_bytes = count_non_255_bytes(afl, afl->virgin_bits);
snprintf(fn, PATH_MAX, "%s/fuzzer_stats", afl->out_dir);
@@ -67,89 +67,102 @@ void write_stats_file(afl_state_t *afl, double bitmap_cvg, double stability,
}
+ if ((unlikely(!afl->last_avg_exec_update ||
+ cur_time - afl->last_avg_exec_update >= 60000))) {
+
+ afl->last_avg_execs_saved =
+ (float)(1000 * (afl->fsrv.total_execs - afl->last_avg_execs)) /
+ (float)(cur_time - afl->last_avg_exec_update);
+ afl->last_avg_execs = afl->fsrv.total_execs;
+ afl->last_avg_exec_update = cur_time;
+
+ }
+
#ifndef __HAIKU__
if (getrusage(RUSAGE_CHILDREN, &rus)) { rus.ru_maxrss = 0; }
#endif
- fprintf(
- f,
- "start_time : %llu\n"
- "last_update : %llu\n"
- "run_time : %llu\n"
- "fuzzer_pid : %u\n"
- "cycles_done : %llu\n"
- "cycles_wo_finds : %llu\n"
- "execs_done : %llu\n"
- "execs_per_sec : %0.02f\n"
- // "real_execs_per_sec: %0.02f\n" // damn the name is too long
- "paths_total : %u\n"
- "paths_favored : %u\n"
- "paths_found : %u\n"
- "paths_imported : %u\n"
- "max_depth : %u\n"
- "cur_path : %u\n" /* Must match find_start_position() */
- "pending_favs : %u\n"
- "pending_total : %u\n"
- "variable_paths : %u\n"
- "stability : %0.02f%%\n"
- "bitmap_cvg : %0.02f%%\n"
- "unique_crashes : %llu\n"
- "unique_hangs : %llu\n"
- "last_path : %llu\n"
- "last_crash : %llu\n"
- "last_hang : %llu\n"
- "execs_since_crash : %llu\n"
- "exec_timeout : %u\n"
- "slowest_exec_ms : %u\n"
- "peak_rss_mb : %lu\n"
- "cpu_affinity : %d\n"
- "edges_found : %u\n"
- "var_byte_count : %u\n"
- "afl_banner : %s\n"
- "afl_version : " VERSION
- "\n"
- "target_mode : %s%s%s%s%s%s%s%s%s\n"
- "command_line : %s\n",
- afl->start_time / 1000, cur_time / 1000,
- (cur_time - afl->start_time) / 1000, (u32)getpid(),
- afl->queue_cycle ? (afl->queue_cycle - 1) : 0, afl->cycles_wo_finds,
- afl->fsrv.total_execs,
- afl->fsrv.total_execs /
- ((double)(get_cur_time() - afl->start_time) / 1000),
- afl->queued_paths, afl->queued_favored, afl->queued_discovered,
- afl->queued_imported, afl->max_depth, afl->current_entry,
- afl->pending_favored, afl->pending_not_fuzzed, afl->queued_variable,
- stability, bitmap_cvg, afl->unique_crashes, afl->unique_hangs,
- afl->last_path_time / 1000, afl->last_crash_time / 1000,
- afl->last_hang_time / 1000, afl->fsrv.total_execs - afl->last_crash_execs,
- afl->fsrv.exec_tmout, afl->slowest_exec_ms,
+ fprintf(f,
+ "start_time : %llu\n"
+ "last_update : %llu\n"
+ "run_time : %llu\n"
+ "fuzzer_pid : %u\n"
+ "cycles_done : %llu\n"
+ "cycles_wo_finds : %llu\n"
+ "execs_done : %llu\n"
+ "execs_per_sec : %0.02f\n"
+ "execs_ps_last_min : %0.02f\n"
+ "paths_total : %u\n"
+ "paths_favored : %u\n"
+ "paths_found : %u\n"
+ "paths_imported : %u\n"
+ "max_depth : %u\n"
+ "cur_path : %u\n" /* Must match find_start_position() */
+ "pending_favs : %u\n"
+ "pending_total : %u\n"
+ "variable_paths : %u\n"
+ "stability : %0.02f%%\n"
+ "bitmap_cvg : %0.02f%%\n"
+ "unique_crashes : %llu\n"
+ "unique_hangs : %llu\n"
+ "last_path : %llu\n"
+ "last_crash : %llu\n"
+ "last_hang : %llu\n"
+ "execs_since_crash : %llu\n"
+ "exec_timeout : %u\n"
+ "slowest_exec_ms : %u\n"
+ "peak_rss_mb : %lu\n"
+ "cpu_affinity : %d\n"
+ "edges_found : %u\n"
+ "var_byte_count : %u\n"
+ "havoc_expansion : %u\n"
+ "afl_banner : %s\n"
+ "afl_version : " VERSION
+ "\n"
+ "target_mode : %s%s%s%s%s%s%s%s%s\n"
+ "command_line : %s\n",
+ afl->start_time / 1000, cur_time / 1000,
+ (cur_time - afl->start_time) / 1000, (u32)getpid(),
+ afl->queue_cycle ? (afl->queue_cycle - 1) : 0, afl->cycles_wo_finds,
+ afl->fsrv.total_execs,
+ afl->fsrv.total_execs /
+ ((double)(get_cur_time() - afl->start_time) / 1000),
+ afl->last_avg_execs_saved, afl->queued_paths, afl->queued_favored,
+ afl->queued_discovered, afl->queued_imported, afl->max_depth,
+ afl->current_entry, afl->pending_favored, afl->pending_not_fuzzed,
+ afl->queued_variable, stability, bitmap_cvg, afl->unique_crashes,
+ afl->unique_hangs, afl->last_path_time / 1000,
+ afl->last_crash_time / 1000, afl->last_hang_time / 1000,
+ afl->fsrv.total_execs - afl->last_crash_execs, afl->fsrv.exec_tmout,
+ afl->slowest_exec_ms,
#ifndef __HAIKU__
#ifdef __APPLE__
- (unsigned long int)(rus.ru_maxrss >> 20),
+ (unsigned long int)(rus.ru_maxrss >> 20),
#else
- (unsigned long int)(rus.ru_maxrss >> 10),
+ (unsigned long int)(rus.ru_maxrss >> 10),
#endif
#else
- -1UL,
+ -1UL,
#endif
#ifdef HAVE_AFFINITY
- afl->cpu_aff,
+ afl->cpu_aff,
#else
- -1,
+ -1,
#endif
- t_bytes, afl->var_byte_count, afl->use_banner,
- afl->unicorn_mode ? "unicorn" : "", afl->fsrv.qemu_mode ? "qemu " : "",
- afl->non_instrumented_mode ? " non_instrumented " : "",
- afl->no_forkserver ? "no_fsrv " : "", afl->crash_mode ? "crash " : "",
- afl->persistent_mode ? "persistent " : "",
- afl->shmem_testcase_mode ? "shmem_testcase " : "",
- afl->deferred_mode ? "deferred " : "",
- (afl->unicorn_mode || afl->fsrv.qemu_mode || afl->non_instrumented_mode ||
- afl->no_forkserver || afl->crash_mode || afl->persistent_mode ||
- afl->deferred_mode)
- ? ""
- : "default",
- afl->orig_cmdline);
+ t_bytes, afl->var_byte_count, afl->expand_havoc, afl->use_banner,
+ afl->unicorn_mode ? "unicorn" : "",
+ afl->fsrv.qemu_mode ? "qemu " : "",
+ afl->non_instrumented_mode ? " non_instrumented " : "",
+ afl->no_forkserver ? "no_fsrv " : "", afl->crash_mode ? "crash " : "",
+ afl->persistent_mode ? "persistent " : "",
+ afl->shmem_testcase_mode ? "shmem_testcase " : "",
+ afl->deferred_mode ? "deferred " : "",
+ (afl->unicorn_mode || afl->fsrv.qemu_mode ||
+ afl->non_instrumented_mode || afl->no_forkserver ||
+ afl->crash_mode || afl->persistent_mode || afl->deferred_mode)
+ ? ""
+ : "default",
+ afl->orig_cmdline);
/* ignore errors */
if (afl->debug) {
diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c
index e4e2669c..da30797c 100644
--- a/src/afl-fuzz.c
+++ b/src/afl-fuzz.c
@@ -42,19 +42,21 @@ static void at_exit() {
int i;
char *list[4] = {SHM_ENV_VAR, SHM_FUZZ_ENV_VAR, CMPLOG_SHM_ENV_VAR, NULL};
- char *ptr = getenv("__AFL_TARGET_PID1");
+ char *ptr;
+ ptr = getenv(CPU_AFFINITY_ENV_VAR);
+ if (ptr && *ptr) unlink(ptr);
+
+ ptr = getenv("__AFL_TARGET_PID1");
if (ptr && *ptr && (i = atoi(ptr)) > 0) kill(i, SIGKILL);
ptr = getenv("__AFL_TARGET_PID2");
-
if (ptr && *ptr && (i = atoi(ptr)) > 0) kill(i, SIGKILL);
i = 0;
while (list[i] != NULL) {
ptr = getenv(list[i]);
-
if (ptr && *ptr) {
#ifdef USEMMAP
@@ -129,18 +131,23 @@ static void usage(afl_state_t *afl, u8 *argv0, int more_help) {
"executions.\n\n"
"Other stuff:\n"
- " -T text - text banner to show on the screen\n"
" -M/-S id - distributed mode (see docs/parallel_fuzzing.md)\n"
" use -D to force -S secondary to perform deterministic "
"fuzzing\n"
+ " -F path - sync to a foreign fuzzer queue directory (requires "
+ "-M, can\n"
+ " be specified up to %u times)\n"
+ " -T text - text banner to show on the screen\n"
" -I command - execute this command/script when a new crash is "
"found\n"
//" -B bitmap.txt - mutate a specific test case, use the out/fuzz_bitmap
//" "file\n"
" -C - crash exploration mode (the peruvian rabbit thing)\n"
+ " -b cpu_id - bind the fuzzing process to the specified CPU core "
+ "(0-...)\n"
" -e ext - file extension for the fuzz test input file (if "
"needed)\n\n",
- argv0, EXEC_TIMEOUT, MEM_LIMIT);
+ argv0, EXEC_TIMEOUT, MEM_LIMIT, FOREIGN_SYNCS_MAX);
if (more_help > 1) {
@@ -156,11 +163,13 @@ static void usage(afl_state_t *afl, u8 *argv0, int more_help) {
"AFL_BENCH_UNTIL_CRASH: exit soon when the first crashing input has been found\n"
"AFL_CUSTOM_MUTATOR_LIBRARY: lib with afl_custom_fuzz() to mutate inputs\n"
"AFL_CUSTOM_MUTATOR_ONLY: avoid AFL++'s internal mutators\n"
+ "AFL_CYCLE_SCHEDULES: after completing a cycle, switch to a different -p schedule\n"
"AFL_DEBUG: extra debugging output for Python mode trimming\n"
"AFL_DEBUG_CHILD_OUTPUT: do not suppress stdout/stderr from target\n"
"AFL_DISABLE_TRIM: disable the trimming of test cases\n"
"AFL_DUMB_FORKSRV: use fork server without feedback from target\n"
"AFL_EXIT_WHEN_DONE: exit when all inputs are run and no new finds are found\n"
+ "AFL_EXPAND_HAVOC_NOW: immediately enable expand havoc mode (default: after 60 minutes and a cycle without finds)\n"
"AFL_FAST_CAL: limit the calibration stage to three cycles for speedup\n"
"AFL_FORCE_UI: force showing the status screen (for virtual consoles)\n"
"AFL_HANG_TMOUT: override timeout value (in milliseconds)\n"
@@ -264,9 +273,11 @@ int main(int argc, char **argv_orig, char **envp) {
gettimeofday(&tv, &tz);
rand_set_seed(afl, tv.tv_sec ^ tv.tv_usec ^ getpid());
- while ((opt = getopt(argc, argv,
- "+c:i:I:o:f:m:t:T:dDnCB:S:M:x:QNUWe:p:s:V:E:L:hRP:")) >
- 0) {
+ afl->shmem_testcase_mode = 1; // we always try to perform shmem fuzzing
+
+ while ((opt = getopt(
+ argc, argv,
+ "+b:c:i:I:o:f:F:m:t:T:dDnCB:S:M:x:QNUWe:p:s:V:E:L:hRP:")) > 0) {
switch (opt) {
@@ -274,6 +285,17 @@ int main(int argc, char **argv_orig, char **envp) {
afl->infoexec = optarg;
break;
+ case 'b': { /* bind CPU core */
+
+ if (afl->cpu_to_bind != -1) FATAL("Multiple -b options not supported");
+
+ if (sscanf(optarg, "%u", &afl->cpu_to_bind) < 0 || optarg[0] == '-')
+ FATAL("Bad syntax used for -b");
+
+ break;
+
+ }
+
case 'c': {
afl->shm.cmplog_mode = 1;
@@ -399,6 +421,19 @@ int main(int argc, char **argv_orig, char **envp) {
afl->use_splicing = 1;
break;
+ case 'F': /* foreign sync dir */
+
+ if (!afl->is_main_node)
+ FATAL(
+ "Option -F can only be specified after the -M option for the "
+ "main fuzzer of a fuzzing campaign");
+ if (afl->foreign_sync_cnt >= FOREIGN_SYNCS_MAX)
+ FATAL("Maximum %u entried of -F option can be specified",
+ FOREIGN_SYNCS_MAX);
+ afl->foreign_syncs[afl->foreign_sync_cnt].dir = optarg;
+ afl->foreign_sync_cnt++;
+ break;
+
case 'f': /* target file */
if (afl->fsrv.out_file) { FATAL("Multiple -f options not supported"); }
@@ -561,7 +596,6 @@ int main(int argc, char **argv_orig, char **envp) {
if (afl->fsrv.qemu_mode) { FATAL("Multiple -Q options not supported"); }
afl->fsrv.qemu_mode = 1;
- afl->shmem_testcase_mode = 1;
if (!mem_limit_given) { afl->fsrv.mem_limit = MEM_LIMIT_QEMU; }
@@ -578,7 +612,6 @@ int main(int argc, char **argv_orig, char **envp) {
if (afl->unicorn_mode) { FATAL("Multiple -U options not supported"); }
afl->unicorn_mode = 1;
- afl->shmem_testcase_mode = 1;
if (!mem_limit_given) { afl->fsrv.mem_limit = MEM_LIMIT_UNICORN; }
@@ -589,7 +622,6 @@ int main(int argc, char **argv_orig, char **envp) {
if (afl->use_wine) { FATAL("Multiple -W options not supported"); }
afl->fsrv.qemu_mode = 1;
afl->use_wine = 1;
- afl->shmem_testcase_mode = 1;
if (!mem_limit_given) { afl->fsrv.mem_limit = 0; }
@@ -899,6 +931,7 @@ int main(int argc, char **argv_orig, char **envp) {
if (get_afl_env("AFL_NO_ARITH")) { afl->no_arith = 1; }
if (get_afl_env("AFL_SHUFFLE_QUEUE")) { afl->shuffle_queue = 1; }
if (get_afl_env("AFL_FAST_CAL")) { afl->fast_cal = 1; }
+ if (get_afl_env("AFL_EXPAND_HAVOC_NOW")) { afl->expand_havoc = 1; }
if (afl->afl_env.afl_autoresume) {
@@ -1011,16 +1044,23 @@ int main(int argc, char **argv_orig, char **envp) {
}
+ check_crash_handling();
+ check_cpu_governor(afl);
+
get_core_count(afl);
+ atexit(at_exit);
+
+ setup_dirs_fds(afl);
+
#ifdef HAVE_AFFINITY
bind_to_free_cpu(afl);
#endif /* HAVE_AFFINITY */
- check_crash_handling();
- check_cpu_governor(afl);
-
- atexit(at_exit);
+ #ifdef __HAIKU__
+ /* Prioritizes performance over power saving */
+ set_scheduler_mode(SCHEDULER_MODE_LOW_LATENCY);
+ #endif
afl->fsrv.trace_bits =
afl_shm_init(&afl->shm, afl->fsrv.map_size, afl->non_instrumented_mode);
@@ -1038,20 +1078,26 @@ int main(int argc, char **argv_orig, char **envp) {
}
- setup_dirs_fds(afl);
-
if (afl->is_secondary_node && check_main_node_exists(afl) == 0) {
WARNF("no -M main node found. You need to run one main instance!");
- sleep(5);
+ sleep(3);
}
+ #ifdef RAND_TEST_VALUES
+ u32 counter;
+ for (counter = 0; counter < 100000; counter++)
+ printf("DEBUG: rand %06d is %u\n", counter, rand_below(afl, 65536));
+ #endif
+
setup_custom_mutators(afl);
setup_cmdline_file(afl, argv + optind);
read_testcases(afl);
+ // read_foreign_testcases(afl, 1); for the moment dont do this
+
load_auto(afl);
pivot_inputs(afl);
@@ -1209,6 +1255,7 @@ int main(int argc, char **argv_orig, char **envp) {
}
+ // (void)nice(-20); // does not improve the speed
// real start time, we reset, so this works correctly with -V
afl->start_time = get_cur_time();
@@ -1245,11 +1292,43 @@ int main(int argc, char **argv_orig, char **envp) {
/* If we had a full queue cycle with no new finds, try
recombination strategies next. */
- if (afl->queued_paths == prev_queued) {
+ if (afl->queued_paths == prev_queued &&
+ (get_cur_time() - afl->start_time) >= 3600) {
if (afl->use_splicing) {
++afl->cycles_wo_finds;
+ switch (afl->expand_havoc) {
+
+ case 0:
+ afl->expand_havoc = 1;
+ break;
+ case 1:
+ if (afl->limit_time_sig == 0 && !afl->custom_only &&
+ !afl->python_only) {
+
+ afl->limit_time_sig = -1;
+ afl->limit_time_puppet = 0;
+
+ }
+
+ afl->expand_havoc = 2;
+ break;
+ case 2:
+ // afl->cycle_schedules = 1;
+ afl->expand_havoc = 3;
+ break;
+ case 3:
+ // nothing else currently
+ break;
+
+ }
+
+ if (afl->expand_havoc) {
+
+ } else
+
+ afl->expand_havoc = 1;
} else {
@@ -1263,6 +1342,53 @@ int main(int argc, char **argv_orig, char **envp) {
}
+ if (afl->cycle_schedules) {
+
+ /* we cannot mix non-AFLfast schedules with others */
+
+ switch (afl->schedule) {
+
+ case EXPLORE:
+ afl->schedule = EXPLOIT;
+ break;
+ case EXPLOIT:
+ afl->schedule = MMOPT;
+ break;
+ case MMOPT:
+ afl->schedule = SEEK;
+ break;
+ case SEEK:
+ afl->schedule = EXPLORE;
+ break;
+ case FAST:
+ afl->schedule = COE;
+ break;
+ case COE:
+ afl->schedule = LIN;
+ break;
+ case LIN:
+ afl->schedule = QUAD;
+ break;
+ case QUAD:
+ afl->schedule = RARE;
+ break;
+ case RARE:
+ afl->schedule = FAST;
+ break;
+
+ }
+
+ struct queue_entry *q = afl->queue;
+ // we must recalculate the scores of all queue entries
+ while (q) {
+
+ update_bitmap_score(afl, q);
+ q = q->next;
+
+ }
+
+ }
+
prev_queued = afl->queued_paths;
if (afl->sync_id && afl->queue_cycle == 1 &&
diff --git a/src/afl-gcc.c b/src/afl-gcc.c
index b8ff7e77..22e6be8e 100644
--- a/src/afl-gcc.c
+++ b/src/afl-gcc.c
@@ -132,6 +132,9 @@ static void edit_params(u32 argc, char **argv) {
name = argv[0];
+ /* This should never happen but fixes a scan-build warning */
+ if (!name) { FATAL("Empty argv set"); }
+
} else {
++name;
@@ -465,7 +468,7 @@ int main(int argc, char **argv) {
u32 map_size = atoi(ptr);
if (map_size != MAP_SIZE) {
- FATAL("AFL_MAP_SIZE is not supported by afl-gcc");
+ WARNF("AFL_MAP_SIZE is not supported by afl-gcc");
}
diff --git a/src/afl-showmap.c b/src/afl-showmap.c
index 883398ff..71e975a1 100644
--- a/src/afl-showmap.c
+++ b/src/afl-showmap.c
@@ -456,15 +456,38 @@ static void set_up_environment(afl_forkserver_t *fsrv) {
setenv("ASAN_OPTIONS",
"abort_on_error=1:"
"detect_leaks=0:"
+ "allocator_may_return_null=1:"
"symbolize=0:"
- "allocator_may_return_null=1",
+ "handle_segv=0:"
+ "handle_sigbus=0:"
+ "handle_abort=0:"
+ "handle_sigfpe=0:"
+ "handle_sigill=0",
+ 0);
+
+ setenv("UBSAN_OPTIONS",
+ "halt_on_error=1:"
+ "abort_on_error=1:"
+ "malloc_context_size=0:"
+ "allocator_may_return_null=1:"
+ "symbolize=0:"
+ "handle_segv=0:"
+ "handle_sigbus=0:"
+ "handle_abort=0:"
+ "handle_sigfpe=0:"
+ "handle_sigill=0",
0);
setenv("MSAN_OPTIONS", "exit_code=" STRINGIFY(MSAN_ERROR) ":"
- "symbolize=0:"
"abort_on_error=1:"
+ "msan_track_origins=0"
"allocator_may_return_null=1:"
- "msan_track_origins=0", 0);
+ "symbolize=0:"
+ "handle_segv=0:"
+ "handle_sigbus=0:"
+ "handle_abort=0:"
+ "handle_sigfpe=0:"
+ "handle_sigill=0", 0);
if (get_afl_env("AFL_PRELOAD")) {
diff --git a/src/afl-tmin.c b/src/afl-tmin.c
index 2db1eae7..68fcdd14 100644
--- a/src/afl-tmin.c
+++ b/src/afl-tmin.c
@@ -701,15 +701,38 @@ static void set_up_environment(afl_forkserver_t *fsrv) {
setenv("ASAN_OPTIONS",
"abort_on_error=1:"
"detect_leaks=0:"
+ "allocator_may_return_null=1:"
"symbolize=0:"
- "allocator_may_return_null=1",
+ "handle_segv=0:"
+ "handle_sigbus=0:"
+ "handle_abort=0:"
+ "handle_sigfpe=0:"
+ "handle_sigill=0",
+ 0);
+
+ setenv("UBSAN_OPTIONS",
+ "halt_on_error=1:"
+ "abort_on_error=1:"
+ "malloc_context_size=0:"
+ "allocator_may_return_null=1:"
+ "symbolize=0:"
+ "handle_segv=0:"
+ "handle_sigbus=0:"
+ "handle_abort=0:"
+ "handle_sigfpe=0:"
+ "handle_sigill=0",
0);
setenv("MSAN_OPTIONS", "exit_code=" STRINGIFY(MSAN_ERROR) ":"
- "symbolize=0:"
"abort_on_error=1:"
+ "msan_track_origins=0"
"allocator_may_return_null=1:"
- "msan_track_origins=0", 0);
+ "symbolize=0:"
+ "handle_segv=0:"
+ "handle_sigbus=0:"
+ "handle_abort=0:"
+ "handle_sigfpe=0:"
+ "handle_sigill=0", 0);
if (get_afl_env("AFL_PRELOAD")) {