diff options
Diffstat (limited to 'src/afl-fuzz-queue.c')
-rw-r--r-- | src/afl-fuzz-queue.c | 25 |
1 files changed, 9 insertions, 16 deletions
diff --git a/src/afl-fuzz-queue.c b/src/afl-fuzz-queue.c index 0c472845..336b7f4f 100644 --- a/src/afl-fuzz-queue.c +++ b/src/afl-fuzz-queue.c @@ -138,9 +138,9 @@ static u8 check_if_text(struct queue_entry *q) { } // non-overlong 2-byte - if (((0xC2 <= buf[offset + 0] && buf[offset + 0] <= 0xDF) && - (0x80 <= buf[offset + 1] && buf[offset + 1] <= 0xBF)) && - len - offset > 1) { + if (len - offset > 1 && + ((0xC2 <= buf[offset + 0] && buf[offset + 0] <= 0xDF) && + (0x80 <= buf[offset + 1] && buf[offset + 1] <= 0xBF))) { offset += 2; utf8++; @@ -230,7 +230,7 @@ void add_to_queue(afl_state_t *afl, u8 *fname, u32 len, u8 passed_det) { } else { - afl->q_prev100 = afl->queue = afl->queue_top = q; + afl->queue = afl->queue_top = q; } @@ -239,13 +239,6 @@ void add_to_queue(afl_state_t *afl, u8 *fname, u32 len, u8 passed_det) { afl->cycles_wo_finds = 0; - if (!(afl->queued_paths % 100)) { - - afl->q_prev100->next_100 = q; - afl->q_prev100 = q; - - } - struct queue_entry **queue_buf = afl_realloc( AFL_BUF_PARAM(queue), afl->queued_paths * sizeof(struct queue_entry *)); if (unlikely(!queue_buf)) { PFATAL("alloc"); } @@ -281,15 +274,15 @@ void add_to_queue(afl_state_t *afl, u8 *fname, u32 len, u8 passed_det) { void destroy_queue(afl_state_t *afl) { - struct queue_entry *q = afl->queue, *n; + struct queue_entry *q; + u32 i; - while (q) { + for (i = 0; i < afl->queued_paths; i++) { - n = q->next; + q = afl->queue_buf[i]; ck_free(q->fname); ck_free(q->trace_mini); ck_free(q); - q = n; } @@ -509,7 +502,7 @@ u32 calculate_score(afl_state_t *afl, struct queue_entry *q) { // Longer execution time means longer work on the input, the deeper in // coverage, the better the fuzzing, right? -mh - if (afl->schedule >= RARE && likely(!afl->fixed_seed)) { + if (likely(afl->schedule < RARE) && likely(!afl->fixed_seed)) { if (q->exec_us * 0.1 > avg_exec_us) { |