diff options
Diffstat (limited to 'src')
-rw-r--r-- | src/afl-fuzz-queue.c | 2 | ||||
-rw-r--r-- | src/afl-fuzz-stats.c | 12 |
2 files changed, 11 insertions, 3 deletions
diff --git a/src/afl-fuzz-queue.c b/src/afl-fuzz-queue.c index 8c7bfc55..336b7f4f 100644 --- a/src/afl-fuzz-queue.c +++ b/src/afl-fuzz-queue.c @@ -502,7 +502,7 @@ u32 calculate_score(afl_state_t *afl, struct queue_entry *q) { // Longer execution time means longer work on the input, the deeper in // coverage, the better the fuzzing, right? -mh - if (afl->schedule >= RARE && likely(!afl->fixed_seed)) { + if (likely(afl->schedule < RARE) && likely(!afl->fixed_seed)) { if (q->exec_us * 0.1 > avg_exec_us) { diff --git a/src/afl-fuzz-stats.c b/src/afl-fuzz-stats.c index dfc0cd97..1d5b169d 100644 --- a/src/afl-fuzz-stats.c +++ b/src/afl-fuzz-stats.c @@ -1022,7 +1022,11 @@ void show_init_stats(afl_state_t *afl) { /* Let's keep things moving with slow binaries. */ - if (avg_us > 50000) { + if (unlikely(afl->fixed_seed)) { + + afl->havoc_div = 1; + + } else if (avg_us > 50000) { afl->havoc_div = 10; /* 0-19 execs/sec */ @@ -1093,7 +1097,11 @@ void show_init_stats(afl_state_t *afl) { random scheduler jitter is less likely to have any impact, and because our patience is wearing thin =) */ - if (avg_us > 50000) { + if (unlikely(afl->fixed_seed)) { + + afl->fsrv.exec_tmout = avg_us * 5 / 1000; + + } else if (avg_us > 50000) { afl->fsrv.exec_tmout = avg_us * 2 / 1000; |