aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorvan Hauser <vh@thc.org>2020-09-29 15:02:57 +0200
committervan Hauser <vh@thc.org>2020-09-29 15:02:57 +0200
commit383cd487a2c28012c80341f8517e473120af4d19 (patch)
tree64e2367e898abe6855796303e95689cf2f3cd57c /src
parentfe08482c1b2269289bfedea9f0ef2b6721d18221 (diff)
downloadafl++-383cd487a2c28012c80341f8517e473120af4d19.tar.gz
small improvements to Marcel's patch, fix laf-intel + redqueen crashes
Diffstat (limited to 'src')
-rw-r--r--src/afl-fuzz-bitmap.c13
-rw-r--r--src/afl-fuzz-init.c4
-rw-r--r--src/afl-fuzz-queue.c30
-rw-r--r--src/afl-fuzz.c2
4 files changed, 27 insertions, 22 deletions
diff --git a/src/afl-fuzz-bitmap.c b/src/afl-fuzz-bitmap.c
index 64de86a2..a22223b9 100644
--- a/src/afl-fuzz-bitmap.c
+++ b/src/afl-fuzz-bitmap.c
@@ -556,8 +556,8 @@ save_if_interesting(afl_state_t *afl, void *mem, u32 len, u8 fault) {
cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
/* Saturated increment */
- if (afl->n_fuzz[cksum % n_fuzz_size] < 0xFFFFFFFF)
- afl->n_fuzz[cksum % n_fuzz_size]++;
+ if (afl->n_fuzz[cksum % N_FUZZ_SIZE] < 0xFFFFFFFF)
+ afl->n_fuzz[cksum % N_FUZZ_SIZE]++;
}
@@ -597,10 +597,15 @@ save_if_interesting(afl_state_t *afl, void *mem, u32 len, u8 fault) {
if (cksum)
afl->queue_top->exec_cksum = cksum;
else
- afl->queue_top->exec_cksum =
+ cksum = afl->queue_top->exec_cksum =
hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
- afl->n_fuzz[cksum % n_fuzz_size] = 1;
+ if (afl->schedule >= FAST && afl->schedule <= RARE) {
+
+ afl->queue_top->n_fuzz_entry = cksum % N_FUZZ_SIZE;
+ afl->n_fuzz[afl->queue_top->n_fuzz_entry] = 1;
+
+ }
/* Try to calibrate inline; this also calls update_bitmap_score() when
successful. */
diff --git a/src/afl-fuzz-init.c b/src/afl-fuzz-init.c
index b825837f..65478a78 100644
--- a/src/afl-fuzz-init.c
+++ b/src/afl-fuzz-init.c
@@ -732,8 +732,8 @@ void read_testcases(afl_state_t *afl, u8 *directory) {
if (unlikely(afl->schedule >= FAST && afl->schedule <= RARE)) {
u64 cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
-
- afl->n_fuzz[cksum % n_fuzz_size] = 1;
+ afl->queue_top->n_fuzz_entry = cksum % N_FUZZ_SIZE;
+ afl->n_fuzz[afl->queue_top->n_fuzz_entry] = 1;
}
diff --git a/src/afl-fuzz-queue.c b/src/afl-fuzz-queue.c
index dfabba7b..0d7d0314 100644
--- a/src/afl-fuzz-queue.c
+++ b/src/afl-fuzz-queue.c
@@ -308,9 +308,9 @@ void update_bitmap_score(afl_state_t *afl, struct queue_entry *q) {
u64 fuzz_p2;
if (unlikely(afl->schedule >= FAST && afl->schedule < RARE))
- fuzz_p2 = 0; // Skip the fuzz_p2 comparison
+ fuzz_p2 = 0; // Skip the fuzz_p2 comparison
else if (unlikely(afl->schedule == RARE))
- fuzz_p2 = next_pow2(afl->n_fuzz[q->exec_cksum % n_fuzz_size]);
+ fuzz_p2 = next_pow2(afl->n_fuzz[q->n_fuzz_entry]);
else
fuzz_p2 = q->fuzz_level;
@@ -336,7 +336,8 @@ void update_bitmap_score(afl_state_t *afl, struct queue_entry *q) {
u64 top_rated_fav_factor;
u64 top_rated_fuzz_p2;
if (unlikely(afl->schedule >= FAST && afl->schedule <= RARE))
- top_rated_fuzz_p2 = next_pow2(afl->n_fuzz[afl->top_rated[i]->exec_cksum % n_fuzz_size]);
+ top_rated_fuzz_p2 =
+ next_pow2(afl->n_fuzz[afl->top_rated[i]->n_fuzz_entry]);
else
top_rated_fuzz_p2 = afl->top_rated[i]->fuzz_level;
@@ -607,11 +608,10 @@ u32 calculate_score(afl_state_t *afl, struct queue_entry *q) {
}
- u32 n_paths;
- double factor = 1.0;
+ u32 n_paths;
+ double factor = 1.0;
long double fuzz_mu;
-
switch (afl->schedule) {
case EXPLORE:
@@ -634,7 +634,7 @@ u32 calculate_score(afl_state_t *afl, struct queue_entry *q) {
struct queue_entry *queue_it = afl->queue;
while (queue_it) {
- fuzz_mu += log2(afl->n_fuzz[q->exec_cksum % n_fuzz_size]);
+ fuzz_mu += log2(afl->n_fuzz[q->n_fuzz_entry]);
n_paths++;
queue_it = queue_it->next;
@@ -645,7 +645,7 @@ u32 calculate_score(afl_state_t *afl, struct queue_entry *q) {
fuzz_mu = fuzz_mu / n_paths;
- if (log2(afl->n_fuzz[q->exec_cksum % n_fuzz_size]) > fuzz_mu) {
+ if (log2(afl->n_fuzz[q->n_fuzz_entry]) > fuzz_mu) {
/* Never skip favourites */
if (!q->favored) factor = 0;
@@ -660,7 +660,7 @@ u32 calculate_score(afl_state_t *afl, struct queue_entry *q) {
// Don't modify unfuzzed seeds
if (q->fuzz_level == 0) break;
- switch ((u32)log2(afl->n_fuzz[q->exec_cksum % n_fuzz_size])) {
+ switch ((u32)log2(afl->n_fuzz[q->n_fuzz_entry])) {
case 0 ... 1:
factor = 4;
@@ -691,17 +691,17 @@ u32 calculate_score(afl_state_t *afl, struct queue_entry *q) {
}
- if (q->favored)
- factor *= 1.15;
+ if (q->favored) factor *= 1.15;
break;
case LIN:
- factor = q->fuzz_level / (afl->n_fuzz[q->exec_cksum % n_fuzz_size] + 1);
+ factor = q->fuzz_level / (afl->n_fuzz[q->n_fuzz_entry] + 1);
break;
case QUAD:
- factor = q->fuzz_level * q->fuzz_level / (afl->n_fuzz[q->exec_cksum % n_fuzz_size] + 1);
+ factor =
+ q->fuzz_level * q->fuzz_level / (afl->n_fuzz[q->n_fuzz_entry] + 1);
break;
case MMOPT:
@@ -726,8 +726,8 @@ u32 calculate_score(afl_state_t *afl, struct queue_entry *q) {
perf_score += (q->tc_ref * 10);
// the more often fuzz result paths are equal to this queue entry,
// reduce its value
- perf_score *=
- (1 - (double)((double)afl->n_fuzz[q->exec_cksum % n_fuzz_size] / (double)afl->fsrv.total_execs));
+ perf_score *= (1 - (double)((double)afl->n_fuzz[q->n_fuzz_entry] /
+ (double)afl->fsrv.total_execs));
break;
diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c
index 889f753d..273d1c14 100644
--- a/src/afl-fuzz.c
+++ b/src/afl-fuzz.c
@@ -939,7 +939,7 @@ int main(int argc, char **argv_orig, char **envp) {
/* Dynamically allocate memory for AFLFast schedules */
if (afl->schedule >= FAST && afl->schedule <= RARE) {
- afl->n_fuzz = ck_alloc(n_fuzz_size * sizeof(u32));
+ afl->n_fuzz = ck_alloc(N_FUZZ_SIZE * sizeof(u32));
}