diff options
author | vanhauser-thc <vh@thc.org> | 2024-06-07 11:16:42 +0200 |
---|---|---|
committer | vanhauser-thc <vh@thc.org> | 2024-06-07 11:16:42 +0200 |
commit | fe36ceaa552569be66447aba11885259ca700d85 (patch) | |
tree | db9a9e26b64d6002092f44d6852478c9f09d80b4 /src | |
parent | 0618bfd4ae6a31ce44fcad13bbf6f5a41bb265d1 (diff) | |
download | afl++-fe36ceaa552569be66447aba11885259ca700d85.tar.gz |
minor testcache optimizations
Diffstat (limited to 'src')
-rw-r--r-- | src/afl-fuzz-queue.c | 197 |
1 files changed, 102 insertions, 95 deletions
diff --git a/src/afl-fuzz-queue.c b/src/afl-fuzz-queue.c index cbdfd5b0..f4cb930d 100644 --- a/src/afl-fuzz-queue.c +++ b/src/afl-fuzz-queue.c @@ -621,6 +621,8 @@ void add_to_queue(afl_state_t *afl, u8 *fname, u32 len, u8 passed_det) { q->trace_mini = NULL; q->testcase_buf = NULL; q->mother = afl->queue_cur; + q->weight = 1.0; + q->perf_score = 100; #ifdef INTROSPECTION q->bitsmap_size = afl->bitsmap_size; @@ -1226,9 +1228,11 @@ inline void queue_testcase_retake(afl_state_t *afl, struct queue_entry *q, u32 len = q->len; - if (len != old_len) { + // only realloc if necessary or useful + // (a custom trim can make the testcase larger) + if (unlikely(len > old_len || len < old_len + 1024)) { - afl->q_testcase_cache_size = afl->q_testcase_cache_size + len - old_len; + afl->q_testcase_cache_size += len - old_len; q->testcase_buf = (u8 *)realloc(q->testcase_buf, len); if (unlikely(!q->testcase_buf)) { @@ -1257,41 +1261,48 @@ inline void queue_testcase_retake_mem(afl_state_t *afl, struct queue_entry *q, if (likely(q->testcase_buf)) { - u32 is_same = in == q->testcase_buf; + if (likely(in != q->testcase_buf)) { - if (likely(len != old_len)) { + // only realloc if we save memory + if (unlikely(len < old_len + 1024)) { - u8 *ptr = (u8 *)realloc(q->testcase_buf, len); + u8 *ptr = (u8 *)realloc(q->testcase_buf, len); - if (likely(ptr)) { + if (likely(ptr)) { - q->testcase_buf = ptr; - afl->q_testcase_cache_size = afl->q_testcase_cache_size + len - old_len; + q->testcase_buf = ptr; + afl->q_testcase_cache_size += len - old_len; + + } } - } + memcpy(q->testcase_buf, in, len); - if (unlikely(!is_same)) { memcpy(q->testcase_buf, in, len); } + } } } /* Returns the testcase buf from the file behind this queue entry. - Increases the refcount. */ + Increases the refcount. */ inline u8 *queue_testcase_get(afl_state_t *afl, struct queue_entry *q) { - u32 len = q->len; + if (likely(q->testcase_buf)) { return q->testcase_buf; } + + u32 len = q->len; + double weight = q->weight; - /* first handle if no testcase cache is configured */ + // first handle if no testcase cache is configured, or if the + // weighting of the testcase is below average. - if (unlikely(!afl->q_testcase_max_cache_size)) { + if (unlikely(weight < 1.0 || !afl->q_testcase_max_cache_size)) { u8 *buf; - if (unlikely(q == afl->queue_cur)) { + if (likely(q == afl->queue_cur)) { buf = (u8 *)afl_realloc((void **)&afl->testcase_buf, len); @@ -1317,118 +1328,113 @@ inline u8 *queue_testcase_get(afl_state_t *afl, struct queue_entry *q) { } - /* now handle the testcase cache */ + /* now handle the testcase cache and we know it is an interesting one */ - if (unlikely(!q->testcase_buf)) { - - /* Buf not cached, let's load it */ - u32 tid = afl->q_testcase_max_cache_count; - static u32 do_once = 0; // because even threaded we would want this. WIP - - while (unlikely( - (afl->q_testcase_cache_size + len >= afl->q_testcase_max_cache_size && - afl->q_testcase_cache_count > 1) || - afl->q_testcase_cache_count >= afl->q_testcase_max_cache_entries - 1)) { + /* Buf not cached, let's load it */ + u32 tid = afl->q_testcase_max_cache_count; + static u32 do_once = 0; // because even threaded we would want this. WIP - /* We want a max number of entries to the cache that we learn. - Very simple: once the cache is filled by size - that is the max. */ + while (unlikely( + (afl->q_testcase_cache_size + len >= afl->q_testcase_max_cache_size && + afl->q_testcase_cache_count > 1) || + afl->q_testcase_cache_count >= afl->q_testcase_max_cache_entries - 1)) { - if (unlikely(afl->q_testcase_cache_size + len >= - afl->q_testcase_max_cache_size && - (afl->q_testcase_cache_count < - afl->q_testcase_max_cache_entries && - afl->q_testcase_max_cache_count < - afl->q_testcase_max_cache_entries) && - !do_once)) { + /* We want a max number of entries to the cache that we learn. + Very simple: once the cache is filled by size - that is the max. */ - if (afl->q_testcase_max_cache_count > afl->q_testcase_cache_count) { + if (unlikely( + afl->q_testcase_cache_size + len >= + afl->q_testcase_max_cache_size && + (afl->q_testcase_cache_count < afl->q_testcase_max_cache_entries && + afl->q_testcase_max_cache_count < + afl->q_testcase_max_cache_entries) && + !do_once)) { - afl->q_testcase_max_cache_entries = - afl->q_testcase_max_cache_count + 1; - - } else { + if (afl->q_testcase_max_cache_count > afl->q_testcase_cache_count) { - afl->q_testcase_max_cache_entries = afl->q_testcase_cache_count + 1; + afl->q_testcase_max_cache_entries = afl->q_testcase_max_cache_count + 1; - } + } else { - do_once = 1; - // release unneeded memory - afl->q_testcase_cache = (struct queue_entry **)ck_realloc( - afl->q_testcase_cache, - (afl->q_testcase_max_cache_entries + 1) * sizeof(size_t)); + afl->q_testcase_max_cache_entries = afl->q_testcase_cache_count + 1; } - /* Cache full. We neet to evict one or more to map one. - Get a random one which is not in use */ + do_once = 1; + // release unneeded memory + afl->q_testcase_cache = (struct queue_entry **)ck_realloc( + afl->q_testcase_cache, + (afl->q_testcase_max_cache_entries + 1) * sizeof(size_t)); - do { + } - // if the cache (MB) is not enough for the queue then this gets - // undesirable because q_testcase_max_cache_count grows sometimes - // although the number of items in the cache will not change hence - // more and more loops - tid = rand_below(afl, afl->q_testcase_max_cache_count); + /* Cache full. We neet to evict one or more to map one. + Get a random one which is not in use */ - } while (afl->q_testcase_cache[tid] == NULL || + do { - afl->q_testcase_cache[tid] == afl->queue_cur); + // if the cache (MB) is not enough for the queue then this gets + // undesirable because q_testcase_max_cache_count grows sometimes + // although the number of items in the cache will not change hence + // more and more loops + tid = rand_below(afl, afl->q_testcase_max_cache_count); - struct queue_entry *old_cached = afl->q_testcase_cache[tid]; - free(old_cached->testcase_buf); - old_cached->testcase_buf = NULL; - afl->q_testcase_cache_size -= old_cached->len; - afl->q_testcase_cache[tid] = NULL; - --afl->q_testcase_cache_count; - ++afl->q_testcase_evictions; - if (tid < afl->q_testcase_smallest_free) - afl->q_testcase_smallest_free = tid; + } while (afl->q_testcase_cache[tid] == NULL || - } + afl->q_testcase_cache[tid] == afl->queue_cur); - if (unlikely(tid >= afl->q_testcase_max_cache_entries)) { + struct queue_entry *old_cached = afl->q_testcase_cache[tid]; + free(old_cached->testcase_buf); + old_cached->testcase_buf = NULL; + afl->q_testcase_cache_size -= old_cached->len; + afl->q_testcase_cache[tid] = NULL; + --afl->q_testcase_cache_count; + ++afl->q_testcase_evictions; + if (tid < afl->q_testcase_smallest_free) + afl->q_testcase_smallest_free = tid; - // uh we were full, so now we have to search from start - tid = afl->q_testcase_smallest_free; + } - } + if (unlikely(tid >= afl->q_testcase_max_cache_entries)) { - // we need this while loop in case there were ever previous evictions but - // not in this call. - while (unlikely(afl->q_testcase_cache[tid] != NULL)) - ++tid; + // uh we were full, so now we have to search from start + tid = afl->q_testcase_smallest_free; - /* Map the test case into memory. */ + } - int fd = open((char *)q->fname, O_RDONLY); + // we need this while loop in case there were ever previous evictions but + // not in this call. + while (unlikely(afl->q_testcase_cache[tid] != NULL)) + ++tid; - if (unlikely(fd < 0)) { PFATAL("Unable to open '%s'", (char *)q->fname); } + /* Map the test case into memory. */ - q->testcase_buf = (u8 *)malloc(len); + int fd = open((char *)q->fname, O_RDONLY); - if (unlikely(!q->testcase_buf)) { + if (unlikely(fd < 0)) { PFATAL("Unable to open '%s'", (char *)q->fname); } - PFATAL("Unable to malloc '%s' with len %u", (char *)q->fname, len); + q->testcase_buf = (u8 *)malloc(len); - } + if (unlikely(!q->testcase_buf)) { - ck_read(fd, q->testcase_buf, len, q->fname); - close(fd); + PFATAL("Unable to malloc '%s' with len %u", (char *)q->fname, len); + + } - /* Register testcase as cached */ - afl->q_testcase_cache[tid] = q; - afl->q_testcase_cache_size += len; - ++afl->q_testcase_cache_count; - if (likely(tid >= afl->q_testcase_max_cache_count)) { + ck_read(fd, q->testcase_buf, len, q->fname); + close(fd); - afl->q_testcase_max_cache_count = tid + 1; + /* Register testcase as cached */ + afl->q_testcase_cache[tid] = q; + afl->q_testcase_cache_size += len; + ++afl->q_testcase_cache_count; + if (likely(tid >= afl->q_testcase_max_cache_count)) { - } else if (unlikely(tid == afl->q_testcase_smallest_free)) { + afl->q_testcase_max_cache_count = tid + 1; - afl->q_testcase_smallest_free = tid + 1; + } else if (unlikely(tid == afl->q_testcase_smallest_free)) { - } + afl->q_testcase_smallest_free = tid + 1; } @@ -1443,12 +1449,13 @@ inline void queue_testcase_store_mem(afl_state_t *afl, struct queue_entry *q, u32 len = q->len; - if (unlikely(afl->q_testcase_cache_size + len >= + if (unlikely(q->weight < 1.0 || + afl->q_testcase_cache_size + len >= afl->q_testcase_max_cache_size || afl->q_testcase_cache_count >= afl->q_testcase_max_cache_entries - 1)) { - // no space? will be loaded regularly later. + // no space or uninteresting? will be loaded regularly later. return; } |