aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorvan Hauser <vh@thc.org>2020-09-07 17:30:28 +0200
committervan Hauser <vh@thc.org>2020-09-07 17:30:28 +0200
commit6404abd7d609350ffd6c6f221cbf56e60b2ef030 (patch)
tree5e81ba7c8fc92bb26bdd6847027ef4feec6042fa
parent7bcbfd48e54eba5a99d05b04f4f3d6bea29cde80 (diff)
downloadafl++-6404abd7d609350ffd6c6f221cbf56e60b2ef030.tar.gz
bugfix for fixed seeds
-rw-r--r--src/afl-fuzz-queue.c2
-rw-r--r--src/afl-fuzz-stats.c12
2 files changed, 11 insertions, 3 deletions
diff --git a/src/afl-fuzz-queue.c b/src/afl-fuzz-queue.c
index 8c7bfc55..336b7f4f 100644
--- a/src/afl-fuzz-queue.c
+++ b/src/afl-fuzz-queue.c
@@ -502,7 +502,7 @@ u32 calculate_score(afl_state_t *afl, struct queue_entry *q) {
// Longer execution time means longer work on the input, the deeper in
// coverage, the better the fuzzing, right? -mh
- if (afl->schedule >= RARE && likely(!afl->fixed_seed)) {
+ if (likely(afl->schedule < RARE) && likely(!afl->fixed_seed)) {
if (q->exec_us * 0.1 > avg_exec_us) {
diff --git a/src/afl-fuzz-stats.c b/src/afl-fuzz-stats.c
index dfc0cd97..1d5b169d 100644
--- a/src/afl-fuzz-stats.c
+++ b/src/afl-fuzz-stats.c
@@ -1022,7 +1022,11 @@ void show_init_stats(afl_state_t *afl) {
/* Let's keep things moving with slow binaries. */
- if (avg_us > 50000) {
+ if (unlikely(afl->fixed_seed)) {
+
+ afl->havoc_div = 1;
+
+ } else if (avg_us > 50000) {
afl->havoc_div = 10; /* 0-19 execs/sec */
@@ -1093,7 +1097,11 @@ void show_init_stats(afl_state_t *afl) {
random scheduler jitter is less likely to have any impact, and because
our patience is wearing thin =) */
- if (avg_us > 50000) {
+ if (unlikely(afl->fixed_seed)) {
+
+ afl->fsrv.exec_tmout = avg_us * 5 / 1000;
+
+ } else if (avg_us > 50000) {
afl->fsrv.exec_tmout = avg_us * 2 / 1000;