From eda1ee0807fdc17e52b44202e58da70ada92c4d2 Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Sat, 27 Mar 2021 12:24:18 +0100 Subject: restructure havoc --- src/afl-fuzz-one.c | 662 +++++++++++++++++++++++++++++------------------------ 1 file changed, 367 insertions(+), 295 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index 4e8154cd..e0d961ba 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -1997,16 +1997,19 @@ havoc_stage: /* We essentially just do several thousand runs (depending on perf_score) where we take the input file and make random stacked tweaks. */ +#define MAX_HAVOC_ENTRY 59 /* 55 to 60 */ + u32 r_max, r; - r_max = 15 + ((afl->extras_cnt + afl->a_extras_cnt) ? 2 : 0); + r_max = (MAX_HAVOC_ENTRY + 1) + (afl->extras_cnt ? 4 : 0) + + (afl->a_extras_cnt ? 2 : 0); if (unlikely(afl->expand_havoc && afl->ready_for_splicing_count > 1)) { /* add expensive havoc cases here, they are activated after a full cycle without finds happened */ - r_max++; + r_max += 4; } @@ -2015,7 +2018,7 @@ havoc_stage: /* add expensive havoc cases here if there is no findings in the last 5s */ - r_max++; + r_max += 4; } @@ -2069,7 +2072,7 @@ havoc_stage: switch ((r = rand_below(afl, r_max))) { - case 0: + case 0 ... 3: { /* Flip a single bit somewhere. Spooky! */ @@ -2080,7 +2083,9 @@ havoc_stage: FLIP_BIT(out_buf, rand_below(afl, temp_len << 3)); break; - case 1: + } + + case 4 ... 7: { /* Set byte to interesting value. */ @@ -2092,63 +2097,77 @@ havoc_stage: interesting_8[rand_below(afl, sizeof(interesting_8))]; break; - case 2: + } + + case 8 ... 9: { /* Set word to interesting value, randomly choosing endian. */ if (temp_len < 2) { break; } - if (rand_below(afl, 2)) { - #ifdef INTROSPECTION - snprintf(afl->m_tmp, sizeof(afl->m_tmp), " INTERESTING16"); - strcat(afl->mutation, afl->m_tmp); + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " INTERESTING16"); + strcat(afl->mutation, afl->m_tmp); #endif - *(u16 *)(out_buf + rand_below(afl, temp_len - 1)) = - interesting_16[rand_below(afl, sizeof(interesting_16) >> 1)]; + *(u16 *)(out_buf + rand_below(afl, temp_len - 1)) = + interesting_16[rand_below(afl, sizeof(interesting_16) >> 1)]; - } else { + break; + + } + + case 10 ... 11: { + + /* Set word to interesting value, randomly choosing endian. */ + + if (temp_len < 2) { break; } #ifdef INTROSPECTION - snprintf(afl->m_tmp, sizeof(afl->m_tmp), " INTERESTING16BE"); - strcat(afl->mutation, afl->m_tmp); + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " INTERESTING16BE"); + strcat(afl->mutation, afl->m_tmp); #endif - *(u16 *)(out_buf + rand_below(afl, temp_len - 1)) = SWAP16( - interesting_16[rand_below(afl, sizeof(interesting_16) >> 1)]); - - } + *(u16 *)(out_buf + rand_below(afl, temp_len - 1)) = SWAP16( + interesting_16[rand_below(afl, sizeof(interesting_16) >> 1)]); break; - case 3: + } + + case 12 ... 13: { /* Set dword to interesting value, randomly choosing endian. */ if (temp_len < 4) { break; } - if (rand_below(afl, 2)) { - #ifdef INTROSPECTION - snprintf(afl->m_tmp, sizeof(afl->m_tmp), " INTERESTING32"); - strcat(afl->mutation, afl->m_tmp); + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " INTERESTING32"); + strcat(afl->mutation, afl->m_tmp); #endif - *(u32 *)(out_buf + rand_below(afl, temp_len - 3)) = - interesting_32[rand_below(afl, sizeof(interesting_32) >> 2)]; + *(u32 *)(out_buf + rand_below(afl, temp_len - 3)) = + interesting_32[rand_below(afl, sizeof(interesting_32) >> 2)]; - } else { + break; + + } + + case 14 ... 15: { + + /* Set dword to interesting value, randomly choosing endian. */ + + if (temp_len < 4) { break; } #ifdef INTROSPECTION - snprintf(afl->m_tmp, sizeof(afl->m_tmp), " INTERESTING32BE"); - strcat(afl->mutation, afl->m_tmp); + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " INTERESTING32BE"); + strcat(afl->mutation, afl->m_tmp); #endif - *(u32 *)(out_buf + rand_below(afl, temp_len - 3)) = SWAP32( - interesting_32[rand_below(afl, sizeof(interesting_32) >> 2)]); - - } + *(u32 *)(out_buf + rand_below(afl, temp_len - 3)) = SWAP32( + interesting_32[rand_below(afl, sizeof(interesting_32) >> 2)]); break; - case 4: + } + + case 16 ... 19: { /* Randomly subtract from byte. */ @@ -2159,7 +2178,9 @@ havoc_stage: out_buf[rand_below(afl, temp_len)] -= 1 + rand_below(afl, ARITH_MAX); break; - case 5: + } + + case 20 ... 23: { /* Randomly add to byte. */ @@ -2170,139 +2191,165 @@ havoc_stage: out_buf[rand_below(afl, temp_len)] += 1 + rand_below(afl, ARITH_MAX); break; - case 6: + } + + case 24 ... 25: { - /* Randomly subtract from word, random endian. */ + /* Randomly subtract from word, little endian. */ if (temp_len < 2) { break; } - if (rand_below(afl, 2)) { - - u32 pos = rand_below(afl, temp_len - 1); + u32 pos = rand_below(afl, temp_len - 1); #ifdef INTROSPECTION - snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH16_-%u", pos); - strcat(afl->mutation, afl->m_tmp); + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH16_-%u", pos); + strcat(afl->mutation, afl->m_tmp); #endif - *(u16 *)(out_buf + pos) -= 1 + rand_below(afl, ARITH_MAX); + *(u16 *)(out_buf + pos) -= 1 + rand_below(afl, ARITH_MAX); - } else { + break; + + } + + case 26 ... 27: { + + /* Randomly subtract from word, big endian. */ - u32 pos = rand_below(afl, temp_len - 1); - u16 num = 1 + rand_below(afl, ARITH_MAX); + if (temp_len < 2) { break; } + + u32 pos = rand_below(afl, temp_len - 1); + u16 num = 1 + rand_below(afl, ARITH_MAX); #ifdef INTROSPECTION - snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH16_BE-%u_%u", pos, - num); - strcat(afl->mutation, afl->m_tmp); + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH16_BE-%u_%u", pos, + num); + strcat(afl->mutation, afl->m_tmp); #endif - *(u16 *)(out_buf + pos) = - SWAP16(SWAP16(*(u16 *)(out_buf + pos)) - num); - - } + *(u16 *)(out_buf + pos) = + SWAP16(SWAP16(*(u16 *)(out_buf + pos)) - num); break; - case 7: + } - /* Randomly add to word, random endian. */ + case 28 ... 29: { - if (temp_len < 2) { break; } + /* Randomly add to word, little endian. */ - if (rand_below(afl, 2)) { + if (temp_len < 2) { break; } - u32 pos = rand_below(afl, temp_len - 1); + u32 pos = rand_below(afl, temp_len - 1); #ifdef INTROSPECTION - snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH16+-%u", pos); - strcat(afl->mutation, afl->m_tmp); + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH16+-%u", pos); + strcat(afl->mutation, afl->m_tmp); #endif - *(u16 *)(out_buf + pos) += 1 + rand_below(afl, ARITH_MAX); + *(u16 *)(out_buf + pos) += 1 + rand_below(afl, ARITH_MAX); - } else { + break; + + } - u32 pos = rand_below(afl, temp_len - 1); - u16 num = 1 + rand_below(afl, ARITH_MAX); + case 30 ... 31: { + + /* Randomly add to word, big endian. */ + + if (temp_len < 2) { break; } + + u32 pos = rand_below(afl, temp_len - 1); + u16 num = 1 + rand_below(afl, ARITH_MAX); #ifdef INTROSPECTION - snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH16+BE-%u_%u", pos, - num); - strcat(afl->mutation, afl->m_tmp); + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH16+BE-%u_%u", pos, + num); + strcat(afl->mutation, afl->m_tmp); #endif - *(u16 *)(out_buf + pos) = - SWAP16(SWAP16(*(u16 *)(out_buf + pos)) + num); - - } + *(u16 *)(out_buf + pos) = + SWAP16(SWAP16(*(u16 *)(out_buf + pos)) + num); break; - case 8: + } + + case 32 ... 33: { - /* Randomly subtract from dword, random endian. */ + /* Randomly subtract from dword, little endian. */ if (temp_len < 4) { break; } - if (rand_below(afl, 2)) { - - u32 pos = rand_below(afl, temp_len - 3); + u32 pos = rand_below(afl, temp_len - 3); #ifdef INTROSPECTION - snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH32_-%u", pos); - strcat(afl->mutation, afl->m_tmp); + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH32_-%u", pos); + strcat(afl->mutation, afl->m_tmp); #endif - *(u32 *)(out_buf + pos) -= 1 + rand_below(afl, ARITH_MAX); + *(u32 *)(out_buf + pos) -= 1 + rand_below(afl, ARITH_MAX); - } else { + break; + + } + + case 34 ... 35: { + + /* Randomly subtract from dword, big endian. */ - u32 pos = rand_below(afl, temp_len - 3); - u32 num = 1 + rand_below(afl, ARITH_MAX); + if (temp_len < 4) { break; } + + u32 pos = rand_below(afl, temp_len - 3); + u32 num = 1 + rand_below(afl, ARITH_MAX); #ifdef INTROSPECTION - snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH32_BE-%u-%u", pos, - num); - strcat(afl->mutation, afl->m_tmp); + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH32_BE-%u-%u", pos, + num); + strcat(afl->mutation, afl->m_tmp); #endif - *(u32 *)(out_buf + pos) = - SWAP32(SWAP32(*(u32 *)(out_buf + pos)) - num); - - } + *(u32 *)(out_buf + pos) = + SWAP32(SWAP32(*(u32 *)(out_buf + pos)) - num); break; - case 9: + } - /* Randomly add to dword, random endian. */ + case 36 ... 37: { - if (temp_len < 4) { break; } + /* Randomly add to dword, little endian. */ - if (rand_below(afl, 2)) { + if (temp_len < 4) { break; } - u32 pos = rand_below(afl, temp_len - 3); + u32 pos = rand_below(afl, temp_len - 3); #ifdef INTROSPECTION - snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH32+-%u", pos); - strcat(afl->mutation, afl->m_tmp); + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH32+-%u", pos); + strcat(afl->mutation, afl->m_tmp); #endif - *(u32 *)(out_buf + pos) += 1 + rand_below(afl, ARITH_MAX); + *(u32 *)(out_buf + pos) += 1 + rand_below(afl, ARITH_MAX); - } else { + break; + + } - u32 pos = rand_below(afl, temp_len - 3); - u32 num = 1 + rand_below(afl, ARITH_MAX); + case 38 ... 39: { + + /* Randomly add to dword, big endian. */ + + if (temp_len < 4) { break; } + + u32 pos = rand_below(afl, temp_len - 3); + u32 num = 1 + rand_below(afl, ARITH_MAX); #ifdef INTROSPECTION - snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH32+BE-%u-%u", pos, - num); - strcat(afl->mutation, afl->m_tmp); + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " ARITH32+BE-%u-%u", pos, + num); + strcat(afl->mutation, afl->m_tmp); #endif - *(u32 *)(out_buf + pos) = - SWAP32(SWAP32(*(u32 *)(out_buf + pos)) + num); - - } + *(u32 *)(out_buf + pos) = + SWAP32(SWAP32(*(u32 *)(out_buf + pos)) + num); break; - case 10: + } + + case 40 ... 43: { /* Just set a random byte to a random value. Because, why not. We use XOR with 1-255 to eliminate the @@ -2315,59 +2362,58 @@ havoc_stage: out_buf[rand_below(afl, temp_len)] ^= 1 + rand_below(afl, 255); break; - case 11 ... 12: { - - /* Delete bytes. We're making this a bit more likely - than insertion (the next option) in hopes of keeping - files reasonably small. */ - - u32 del_from, del_len; + } - if (temp_len < 2) { break; } + case 44 ... 46: { - /* Don't delete too much. */ + if (temp_len + HAVOC_BLK_XL < MAX_FILE) { - del_len = choose_block_len(afl, temp_len - 1); + /* Clone bytes. */ - del_from = rand_below(afl, temp_len - del_len + 1); + u32 clone_len = choose_block_len(afl, temp_len); + u32 clone_from = rand_below(afl, temp_len - clone_len + 1); + u32 clone_to = rand_below(afl, temp_len); #ifdef INTROSPECTION - snprintf(afl->m_tmp, sizeof(afl->m_tmp), " DEL-%u-%u", del_from, - del_len); - strcat(afl->mutation, afl->m_tmp); + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " CLONE-%s-%u-%u-%u", + actually_clone ? "clone" : "insert", clone_from, clone_to, + clone_len); + strcat(afl->mutation, afl->m_tmp); #endif - memmove(out_buf + del_from, out_buf + del_from + del_len, - temp_len - del_from - del_len); + u8 *new_buf = + afl_realloc(AFL_BUF_PARAM(out_scratch), temp_len + clone_len); + if (unlikely(!new_buf)) { PFATAL("alloc"); } - temp_len -= del_len; + /* Head */ - break; + memcpy(new_buf, out_buf, clone_to); - } + /* Inserted part */ - case 13: + memcpy(new_buf + clone_to, out_buf + clone_from, clone_len); - if (temp_len + HAVOC_BLK_XL < MAX_FILE) { + /* Tail */ + memcpy(new_buf + clone_to + clone_len, out_buf + clone_to, + temp_len - clone_to); - /* Clone bytes (75%) or insert a block of constant bytes (25%). */ + out_buf = new_buf; + afl_swap_bufs(AFL_BUF_PARAM(out), AFL_BUF_PARAM(out_scratch)); + temp_len += clone_len; - u8 actually_clone = rand_below(afl, 4); - u32 clone_from, clone_to, clone_len; - u8 *new_buf; + } - if (likely(actually_clone)) { + break; - clone_len = choose_block_len(afl, temp_len); - clone_from = rand_below(afl, temp_len - clone_len + 1); + } - } else { + case 47: { - clone_len = choose_block_len(afl, HAVOC_BLK_XL); - clone_from = 0; + if (temp_len + HAVOC_BLK_XL < MAX_FILE) { - } + /* Insert a block of constant bytes (25%). */ - clone_to = rand_below(afl, temp_len); + u32 clone_len = choose_block_len(afl, HAVOC_BLK_XL); + u32 clone_to = rand_below(afl, temp_len); #ifdef INTROSPECTION snprintf(afl->m_tmp, sizeof(afl->m_tmp), " CLONE-%s-%u-%u-%u", @@ -2375,7 +2421,7 @@ havoc_stage: clone_len); strcat(afl->mutation, afl->m_tmp); #endif - new_buf = + u8 *new_buf = afl_realloc(AFL_BUF_PARAM(out_scratch), temp_len + clone_len); if (unlikely(!new_buf)) { PFATAL("alloc"); } @@ -2385,18 +2431,10 @@ havoc_stage: /* Inserted part */ - if (likely(actually_clone)) { - - memcpy(new_buf + clone_to, out_buf + clone_from, clone_len); - - } else { - - memset(new_buf + clone_to, - rand_below(afl, 2) ? rand_below(afl, 256) - : out_buf[rand_below(afl, temp_len)], - clone_len); - - } + memset(new_buf + clone_to, + rand_below(afl, 2) ? rand_below(afl, 256) + : out_buf[rand_below(afl, temp_len)], + clone_len); /* Tail */ memcpy(new_buf + clone_to + clone_len, out_buf + clone_to, @@ -2410,47 +2448,79 @@ havoc_stage: break; - case 14: { + } - /* Overwrite bytes with a randomly selected chunk (75%) or fixed - bytes (25%). */ + case 48 ... 50: { - u32 copy_from, copy_to, copy_len; + /* Overwrite bytes with a randomly selected chunk bytes. */ if (temp_len < 2) { break; } - copy_len = choose_block_len(afl, temp_len - 1); + u32 copy_len = choose_block_len(afl, temp_len - 1); + u32 copy_from = rand_below(afl, temp_len - copy_len + 1); + u32 copy_to = rand_below(afl, temp_len - copy_len + 1); + + if (likely(copy_from != copy_to)) { + +#ifdef INTROSPECTION + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " OVERWRITE_COPY-%u-%u-%u", + copy_from, copy_to, copy_len); + strcat(afl->mutation, afl->m_tmp); +#endif + memmove(out_buf + copy_to, out_buf + copy_from, copy_len); + + } + + break; + + } - copy_from = rand_below(afl, temp_len - copy_len + 1); - copy_to = rand_below(afl, temp_len - copy_len + 1); + case 51: { - if (likely(rand_below(afl, 4))) { + /* Overwrite bytes with fixed bytes. */ + + if (temp_len < 2) { break; } - if (likely(copy_from != copy_to)) { + u32 copy_len = choose_block_len(afl, temp_len - 1); + u32 copy_to = rand_below(afl, temp_len - copy_len + 1); #ifdef INTROSPECTION - snprintf(afl->m_tmp, sizeof(afl->m_tmp), - " OVERWRITE_COPY-%u-%u-%u", copy_from, copy_to, - copy_len); - strcat(afl->mutation, afl->m_tmp); + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " OVERWRITE_FIXED-%u-%u-%u", + copy_from, copy_to, copy_len); + strcat(afl->mutation, afl->m_tmp); #endif - memmove(out_buf + copy_to, out_buf + copy_from, copy_len); + memset(out_buf + copy_to, + rand_below(afl, 2) ? rand_below(afl, 256) + : out_buf[rand_below(afl, temp_len)], + copy_len); - } + break; - } else { + } + + // increase from 4 up to 8? + case 52 ... MAX_HAVOC_ENTRY: { + + /* Delete bytes. We're making this a bit more likely + than insertion (the next option) in hopes of keeping + files reasonably small. */ + + if (temp_len < 2) { break; } + + /* Don't delete too much. */ + + u32 del_len = choose_block_len(afl, temp_len - 1); + u32 del_from = rand_below(afl, temp_len - del_len + 1); #ifdef INTROSPECTION - snprintf(afl->m_tmp, sizeof(afl->m_tmp), - " OVERWRITE_FIXED-%u-%u-%u", copy_from, copy_to, copy_len); - strcat(afl->mutation, afl->m_tmp); + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " DEL-%u-%u", del_from, + del_len); + strcat(afl->mutation, afl->m_tmp); #endif - memset(out_buf + copy_to, - rand_below(afl, 2) ? rand_below(afl, 256) - : out_buf[rand_below(afl, temp_len)], - copy_len); + memmove(out_buf + del_from, out_buf + del_from + del_len, + temp_len - del_from - del_len); - } + temp_len -= del_len; break; @@ -2458,93 +2528,101 @@ havoc_stage: default: - if (likely(r <= 16 && (afl->extras_cnt || afl->a_extras_cnt))) { - - /* Values 15 and 16 can be selected only if there are any extras - present in the dictionaries. */ + r -= (MAX_HAVOC_ENTRY + 1); - if (r == 15) { + if (afl->extras_cnt) { - /* Overwrite bytes with an extra. */ + if (r < 2) { - if (!afl->extras_cnt || - (afl->a_extras_cnt && rand_below(afl, 2))) { - - /* No user-specified extras or odds in our favor. Let's use an - auto-detected one. */ + /* Use the dictionary. */ - u32 use_extra = rand_below(afl, afl->a_extras_cnt); - u32 extra_len = afl->a_extras[use_extra].len; + u32 use_extra = rand_below(afl, afl->extras_cnt); + u32 extra_len = afl->extras[use_extra].len; - if (extra_len > temp_len) { break; } + if (extra_len > temp_len) { break; } - u32 insert_at = rand_below(afl, temp_len - extra_len + 1); + u32 insert_at = rand_below(afl, temp_len - extra_len + 1); #ifdef INTROSPECTION - snprintf(afl->m_tmp, sizeof(afl->m_tmp), - " AUTO_EXTRA_OVERWRITE-%u-%u", insert_at, extra_len); - strcat(afl->mutation, afl->m_tmp); + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " EXTRA_OVERWRITE-%u-%u", + insert_at, extra_len); + strcat(afl->mutation, afl->m_tmp); #endif - memcpy(out_buf + insert_at, afl->a_extras[use_extra].data, - extra_len); - - } else { + memcpy(out_buf + insert_at, afl->extras[use_extra].data, + extra_len); - /* No auto extras or odds in our favor. Use the dictionary. */ + break; - u32 use_extra = rand_below(afl, afl->extras_cnt); - u32 extra_len = afl->extras[use_extra].len; + } else if (r < 4) { - if (extra_len > temp_len) { break; } + u32 use_extra = rand_below(afl, afl->extras_cnt); + u32 extra_len = afl->extras[use_extra].len; + if (temp_len + extra_len >= MAX_FILE) { break; } - u32 insert_at = rand_below(afl, temp_len - extra_len + 1); + u8 *ptr = afl->extras[use_extra].data; + u32 insert_at = rand_below(afl, temp_len + 1); #ifdef INTROSPECTION - snprintf(afl->m_tmp, sizeof(afl->m_tmp), - " EXTRA_OVERWRITE-%u-%u", insert_at, extra_len); - strcat(afl->mutation, afl->m_tmp); + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " EXTRA_INSERT-%u-%u", + insert_at, extra_len); + strcat(afl->mutation, afl->m_tmp); #endif - memcpy(out_buf + insert_at, afl->extras[use_extra].data, - extra_len); - } + out_buf = afl_realloc(AFL_BUF_PARAM(out), temp_len + extra_len); + if (unlikely(!out_buf)) { PFATAL("alloc"); } + + /* Tail */ + memmove(out_buf + insert_at + extra_len, out_buf + insert_at, + temp_len - insert_at); + + /* Inserted part */ + memcpy(out_buf + insert_at, ptr, extra_len); + temp_len += extra_len; break; - } else { // case 16 + } else { - u32 use_extra, extra_len, - insert_at = rand_below(afl, temp_len + 1); - u8 *ptr; + r -= 4; - /* Insert an extra. Do the same dice-rolling stuff as for the - previous case. */ + } - if (!afl->extras_cnt || - (afl->a_extras_cnt && rand_below(afl, 2))) { + } - use_extra = rand_below(afl, afl->a_extras_cnt); - extra_len = afl->a_extras[use_extra].len; - ptr = afl->a_extras[use_extra].data; -#ifdef INTROSPECTION - snprintf(afl->m_tmp, sizeof(afl->m_tmp), - " AUTO_EXTRA_INSERT-%u-%u", insert_at, extra_len); - strcat(afl->mutation, afl->m_tmp); -#endif + if (afl->a_extras_cnt) { - } else { + if (r == 0) { - use_extra = rand_below(afl, afl->extras_cnt); - extra_len = afl->extras[use_extra].len; - ptr = afl->extras[use_extra].data; + /* Use the dictionary. */ + + u32 use_extra = rand_below(afl, afl->a_extras_cnt); + u32 extra_len = afl->a_extras[use_extra].len; + + if (extra_len > temp_len) { break; } + + u32 insert_at = rand_below(afl, temp_len - extra_len + 1); #ifdef INTROSPECTION - snprintf(afl->m_tmp, sizeof(afl->m_tmp), " EXTRA_INSERT-%u-%u", - insert_at, extra_len); - strcat(afl->mutation, afl->m_tmp); + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " AUTO_EXTRA_OVERWRITE-%u-%u", + insert_at, extra_len); + strcat(afl->mutation, afl->m_tmp); #endif + memcpy(out_buf + insert_at, afl->a_extras[use_extra].data, + extra_len); - } + break; + + } else if (r == 1) { + u32 use_extra = rand_below(afl, afl->a_extras_cnt); + u32 extra_len = afl->a_extras[use_extra].len; if (temp_len + extra_len >= MAX_FILE) { break; } + u8 *ptr = afl->a_extras[use_extra].data; + u32 insert_at = rand_below(afl, temp_len + 1); +#ifdef INTROSPECTION + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " AUTO_EXTRA_INSERT-%u-%u", + insert_at, extra_len); + strcat(afl->mutation, afl->m_tmp); +#endif + out_buf = afl_realloc(AFL_BUF_PARAM(out), temp_len + extra_len); if (unlikely(!out_buf)) { PFATAL("alloc"); } @@ -2554,103 +2632,97 @@ havoc_stage: /* Inserted part */ memcpy(out_buf + insert_at, ptr, extra_len); - temp_len += extra_len; break; - } + } else { - } else { + r -= 2; - /* - switch (r) { + } - case 15: // fall through - case 16: - case 17: {*/ + } - /* Overwrite bytes with a randomly selected chunk from another - testcase or insert that chunk. */ + /* Splicing otherwise if we are still here. + Overwrite bytes with a randomly selected chunk from another + testcase or insert that chunk. */ - /* Pick a random queue entry and seek to it. */ + /* Pick a random queue entry and seek to it. */ - u32 tid; - do { + u32 tid; + do { - tid = rand_below(afl, afl->queued_paths); + tid = rand_below(afl, afl->queued_paths); - } while (tid == afl->current_entry || afl->queue_buf[tid]->len < 4); + } while (tid == afl->current_entry || afl->queue_buf[tid]->len < 4); - /* Get the testcase for splicing. */ - struct queue_entry *target = afl->queue_buf[tid]; - u32 new_len = target->len; - u8 * new_buf = queue_testcase_get(afl, target); + /* Get the testcase for splicing. */ + struct queue_entry *target = afl->queue_buf[tid]; + u32 new_len = target->len; + u8 * new_buf = queue_testcase_get(afl, target); - if ((temp_len >= 2 && rand_below(afl, 2)) || - temp_len + HAVOC_BLK_XL >= MAX_FILE) { + if ((temp_len >= 2 && r % 2) || temp_len + HAVOC_BLK_XL >= MAX_FILE) { - /* overwrite mode */ + /* overwrite mode */ - u32 copy_from, copy_to, copy_len; + u32 copy_from, copy_to, copy_len; - copy_len = choose_block_len(afl, new_len - 1); - if (copy_len > temp_len) copy_len = temp_len; + copy_len = choose_block_len(afl, new_len - 1); + if (copy_len > temp_len) copy_len = temp_len; - copy_from = rand_below(afl, new_len - copy_len + 1); - copy_to = rand_below(afl, temp_len - copy_len + 1); + copy_from = rand_below(afl, new_len - copy_len + 1); + copy_to = rand_below(afl, temp_len - copy_len + 1); #ifdef INTROSPECTION - snprintf(afl->m_tmp, sizeof(afl->m_tmp), - " SPLICE_OVERWRITE-%u-%u-%u-%s", copy_from, copy_to, - copy_len, target->fname); - strcat(afl->mutation, afl->m_tmp); + snprintf(afl->m_tmp, sizeof(afl->m_tmp), + " SPLICE_OVERWRITE-%u-%u-%u-%s", copy_from, copy_to, + copy_len, target->fname); + strcat(afl->mutation, afl->m_tmp); #endif - memmove(out_buf + copy_to, new_buf + copy_from, copy_len); + memmove(out_buf + copy_to, new_buf + copy_from, copy_len); - } else { + } else { - /* insert mode */ + /* insert mode */ - u32 clone_from, clone_to, clone_len; + u32 clone_from, clone_to, clone_len; - clone_len = choose_block_len(afl, new_len); - clone_from = rand_below(afl, new_len - clone_len + 1); - clone_to = rand_below(afl, temp_len + 1); + clone_len = choose_block_len(afl, new_len); + clone_from = rand_below(afl, new_len - clone_len + 1); + clone_to = rand_below(afl, temp_len + 1); - u8 *temp_buf = afl_realloc(AFL_BUF_PARAM(out_scratch), - temp_len + clone_len + 1); - if (unlikely(!temp_buf)) { PFATAL("alloc"); } + u8 *temp_buf = afl_realloc(AFL_BUF_PARAM(out_scratch), + temp_len + clone_len + 1); + if (unlikely(!temp_buf)) { PFATAL("alloc"); } #ifdef INTROSPECTION - snprintf(afl->m_tmp, sizeof(afl->m_tmp), - " SPLICE_INSERT-%u-%u-%u-%s", clone_from, clone_to, - clone_len, target->fname); - strcat(afl->mutation, afl->m_tmp); + snprintf(afl->m_tmp, sizeof(afl->m_tmp), + " SPLICE_INSERT-%u-%u-%u-%s", clone_from, clone_to, + clone_len, target->fname); + strcat(afl->mutation, afl->m_tmp); #endif - /* Head */ - - memcpy(temp_buf, out_buf, clone_to); - - /* Inserted part */ + /* Head */ - memcpy(temp_buf + clone_to, new_buf + clone_from, clone_len); + memcpy(temp_buf, out_buf, clone_to); - /* Tail */ - memcpy(temp_buf + clone_to + clone_len, out_buf + clone_to, - temp_len - clone_to); + /* Inserted part */ - out_buf = temp_buf; - afl_swap_bufs(AFL_BUF_PARAM(out), AFL_BUF_PARAM(out_scratch)); - temp_len += clone_len; + memcpy(temp_buf + clone_to, new_buf + clone_from, clone_len); - } + /* Tail */ + memcpy(temp_buf + clone_to + clone_len, out_buf + clone_to, + temp_len - clone_to); - break; + out_buf = temp_buf; + afl_swap_bufs(AFL_BUF_PARAM(out), AFL_BUF_PARAM(out_scratch)); + temp_len += clone_len; } - // end of default: + break; + + // end of default } -- cgit 1.4.1 From 1edc3ece6172be28802f1856bee758ff5acfd91c Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Sat, 27 Mar 2021 12:50:57 +0100 Subject: add introspection --- src/afl-fuzz-one.c | 20 +++++++++----------- src/afl-fuzz.c | 7 +++++++ 2 files changed, 16 insertions(+), 11 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index e0d961ba..28ec0c46 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -2376,8 +2376,7 @@ havoc_stage: #ifdef INTROSPECTION snprintf(afl->m_tmp, sizeof(afl->m_tmp), " CLONE-%s-%u-%u-%u", - actually_clone ? "clone" : "insert", clone_from, clone_to, - clone_len); + "clone", clone_from, clone_to, clone_len); strcat(afl->mutation, afl->m_tmp); #endif u8 *new_buf = @@ -2416,9 +2415,8 @@ havoc_stage: u32 clone_to = rand_below(afl, temp_len); #ifdef INTROSPECTION - snprintf(afl->m_tmp, sizeof(afl->m_tmp), " CLONE-%s-%u-%u-%u", - actually_clone ? "clone" : "insert", clone_from, clone_to, - clone_len); + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " CLONE-%s-%u-%u", + "insert", clone_to, clone_len); strcat(afl->mutation, afl->m_tmp); #endif u8 *new_buf = @@ -2485,8 +2483,8 @@ havoc_stage: u32 copy_to = rand_below(afl, temp_len - copy_len + 1); #ifdef INTROSPECTION - snprintf(afl->m_tmp, sizeof(afl->m_tmp), " OVERWRITE_FIXED-%u-%u-%u", - copy_from, copy_to, copy_len); + snprintf(afl->m_tmp, sizeof(afl->m_tmp), " OVERWRITE_FIXED-%u-%u", + copy_to, copy_len); strcat(afl->mutation, afl->m_tmp); #endif memset(out_buf + copy_to, @@ -2600,8 +2598,8 @@ havoc_stage: u32 insert_at = rand_below(afl, temp_len - extra_len + 1); #ifdef INTROSPECTION - snprintf(afl->m_tmp, sizeof(afl->m_tmp), " AUTO_EXTRA_OVERWRITE-%u-%u", - insert_at, extra_len); + snprintf(afl->m_tmp, sizeof(afl->m_tmp), + " AUTO_EXTRA_OVERWRITE-%u-%u", insert_at, extra_len); strcat(afl->mutation, afl->m_tmp); #endif memcpy(out_buf + insert_at, afl->a_extras[use_extra].data, @@ -2618,8 +2616,8 @@ havoc_stage: u8 *ptr = afl->a_extras[use_extra].data; u32 insert_at = rand_below(afl, temp_len + 1); #ifdef INTROSPECTION - snprintf(afl->m_tmp, sizeof(afl->m_tmp), " AUTO_EXTRA_INSERT-%u-%u", - insert_at, extra_len); + snprintf(afl->m_tmp, sizeof(afl->m_tmp), + " AUTO_EXTRA_INSERT-%u-%u", insert_at, extra_len); strcat(afl->mutation, afl->m_tmp); #endif diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c index a7edb924..9bd7fca0 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -1980,6 +1980,13 @@ int main(int argc, char **argv_orig, char **envp) { } + #ifdef INTROSPECTION + fprintf(afl->introspection_file, + "CYCLE cycle=%llu cycle_wo_finds=%llu expand_havoc=%u queue=%u\n", + afl->queue_cycle, afl->cycles_wo_finds, afl->expand_havoc, + afl->queued_paths); + #endif + if (afl->cycle_schedules) { /* we cannot mix non-AFLfast schedules with others */ -- cgit 1.4.1 From ae9087b3909a1d6dc631e59df9f200b11c60e0a2 Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Wed, 14 Apr 2021 17:30:08 +0200 Subject: update havoc --- src/afl-fuzz-one.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index 28ec0c46..d72d4145 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -2002,7 +2002,7 @@ havoc_stage: u32 r_max, r; r_max = (MAX_HAVOC_ENTRY + 1) + (afl->extras_cnt ? 4 : 0) + - (afl->a_extras_cnt ? 2 : 0); + (afl->a_extras_cnt ? 4 : 0); if (unlikely(afl->expand_havoc && afl->ready_for_splicing_count > 1)) { @@ -2587,7 +2587,7 @@ havoc_stage: if (afl->a_extras_cnt) { - if (r == 0) { + if (r < 2) { /* Use the dictionary. */ @@ -2607,7 +2607,7 @@ havoc_stage: break; - } else if (r == 1) { + } else if (r < 4) { u32 use_extra = rand_below(afl, afl->a_extras_cnt); u32 extra_len = afl->a_extras[use_extra].len; @@ -2636,7 +2636,7 @@ havoc_stage: } else { - r -= 2; + r -= 4; } -- cgit 1.4.1 From e9d2f72382cab75832721d859c3e731da071435d Mon Sep 17 00:00:00 2001 From: Dominik Maier Date: Fri, 30 Apr 2021 13:35:24 +0200 Subject: fixed potential double free in custom trim (#881) --- include/afl-fuzz.h | 4 ++-- src/afl-fuzz-mutators.c | 23 +++++++++++++++++------ src/afl-fuzz-one.c | 8 ++++---- src/afl-fuzz-run.c | 8 ++++++-- 4 files changed, 29 insertions(+), 14 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h index f201782a..040d7ae9 100644 --- a/include/afl-fuzz.h +++ b/include/afl-fuzz.h @@ -1003,7 +1003,7 @@ void read_afl_environment(afl_state_t *, char **); /* Custom mutators */ void setup_custom_mutators(afl_state_t *); void destroy_custom_mutators(afl_state_t *); -u8 trim_case_custom(afl_state_t *, struct queue_entry *q, u8 *in_buf, +u8 trim_case_custom(afl_state_t *, struct queue_entry *q, u8 **in_buf, struct custom_mutator *mutator); /* Python */ @@ -1093,7 +1093,7 @@ fsrv_run_result_t fuzz_run_target(afl_state_t *, afl_forkserver_t *fsrv, u32); void write_to_testcase(afl_state_t *, void *, u32); u8 calibrate_case(afl_state_t *, struct queue_entry *, u8 *, u32, u8); void sync_fuzzers(afl_state_t *); -u8 trim_case(afl_state_t *, struct queue_entry *, u8 *); +u8 trim_case(afl_state_t *, struct queue_entry *, u8 **); u8 common_fuzz_stuff(afl_state_t *, u8 *, u32); /* Fuzz one */ diff --git a/src/afl-fuzz-mutators.c b/src/afl-fuzz-mutators.c index c99d9a4d..d8db8676 100644 --- a/src/afl-fuzz-mutators.c +++ b/src/afl-fuzz-mutators.c @@ -305,9 +305,13 @@ struct custom_mutator *load_custom_mutator(afl_state_t *afl, const char *fn) { } -u8 trim_case_custom(afl_state_t *afl, struct queue_entry *q, u8 *in_buf, +// Custom testcase trimming. +u8 trim_case_custom(afl_state_t *afl, struct queue_entry *q, u8 **in_buf_p, struct custom_mutator *mutator) { + // We need to pass pointers around, as growing testcases may need to realloc. + u8 *in_buf = *in_buf_p; + u8 needs_write = 0, fault = 0; u32 trim_exec = 0; u32 orig_len = q->len; @@ -397,14 +401,21 @@ u8 trim_case_custom(afl_state_t *afl, struct queue_entry *q, u8 *in_buf, if (likely(retlen && cksum == q->exec_cksum)) { - if (afl_realloc((void **)&in_buf, retlen) == NULL) { + // Check if we got a new retbuf and to memcpy our buf. + if (in_buf != retbuf) { - FATAL("can not allocate memory for trim"); + if (afl_realloc((void **)in_buf_p, retlen) == NULL) { - } + FATAL("can not allocate memory for trim"); + + } - memcpy(in_buf, retbuf, retlen); - q->len = retlen; + in_buf = *in_buf_p; + + memcpy(in_buf, retbuf, retlen); + q->len = retlen; + + } /* Let's save a clean trace, which will be needed by update_bitmap_score once we're done with the trimming stuff. */ diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index d72d4145..ed815cb4 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -508,7 +508,7 @@ u8 fuzz_one_original(afl_state_t *afl) { u32 old_len = afl->queue_cur->len; - u8 res = trim_case(afl, afl->queue_cur, in_buf); + u8 res = trim_case(afl, afl->queue_cur, &in_buf); orig_in = in_buf = queue_testcase_get(afl, afl->queue_cur); if (unlikely(res == FSRV_RUN_ERROR)) { @@ -3007,16 +3007,16 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { u32 old_len = afl->queue_cur->len; - u8 res = trim_case(afl, afl->queue_cur, in_buf); + u8 res = trim_case(afl, afl->queue_cur, &in_buf); orig_in = in_buf = queue_testcase_get(afl, afl->queue_cur); - if (res == FSRV_RUN_ERROR) { + if (unlikely(res == FSRV_RUN_ERROR)) { FATAL("Unable to execute target application"); } - if (afl->stop_soon) { + if (unlikely(afl->stop_soon)) { ++afl->cur_skipped_paths; goto abandon_entry; diff --git a/src/afl-fuzz-run.c b/src/afl-fuzz-run.c index 832f17bb..a7b071a5 100644 --- a/src/afl-fuzz-run.c +++ b/src/afl-fuzz-run.c @@ -720,7 +720,10 @@ void sync_fuzzers(afl_state_t *afl) { trimmer uses power-of-two increments somewhere between 1/16 and 1/1024 of file size, to keep the stage short and sweet. */ -u8 trim_case(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) { +u8 trim_case(afl_state_t *afl, struct queue_entry *q, u8 **in_buf_p) { + + // We need to pass pointers around, as growing testcases may need to realloc. + u8 *in_buf = *in_buf_p; u32 orig_len = q->len; @@ -734,7 +737,8 @@ u8 trim_case(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) { if (el->afl_custom_trim) { - trimmed_case = trim_case_custom(afl, q, in_buf, el); + trimmed_case = trim_case_custom(afl, q, in_buf_p, el); + in_buf = *in_buf_p; custom_trimmed = true; } -- cgit 1.4.1 From 1d9a3d955cb4b1350ecad1e008b7c24c5ea3af57 Mon Sep 17 00:00:00 2001 From: realmadsci <71108352+realmadsci@users.noreply.github.com> Date: Thu, 6 May 2021 18:14:16 -0400 Subject: Fix memory errors when trim causes testcase growth (#881) (#903) * Revert "fixed potential double free in custom trim (#881)" This reverts commit e9d2f72382cab75832721d859c3e731da071435d. * Revert "fix custom trim for increasing data" This reverts commit 86a8ef168dda766d2f25f15c15c4d3ecf21d0667. * Fix memory errors when trim causes testcase growth Modify trim_case_custom to avoid writing into in_buf because some custom mutators can cause the testcase to grow rather than shrink. Instead of modifying in_buf directly, we write the update out to the disk when trimming is complete, and then the caller is responsible for refreshing the in-memory buffer from the file. This is still a bit sketchy because it does need to modify q->len in order to notify the upper layers that something changed, and it could end up telling upper layer code that the q->len is *bigger* than the buffer (q->testcase_buf) that contains it, which is asking for trouble down the line somewhere... * Fix an unlikely situation Put back some `unlikely()` calls that were in the e9d2f72382cab75832721d859c3e731da071435d commit that was reverted. --- include/afl-fuzz.h | 4 +-- src/afl-fuzz-mutators.c | 65 +++++++++++++++++++++++-------------------------- src/afl-fuzz-one.c | 4 +-- src/afl-fuzz-run.c | 8 ++---- 4 files changed, 37 insertions(+), 44 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h index 040d7ae9..f201782a 100644 --- a/include/afl-fuzz.h +++ b/include/afl-fuzz.h @@ -1003,7 +1003,7 @@ void read_afl_environment(afl_state_t *, char **); /* Custom mutators */ void setup_custom_mutators(afl_state_t *); void destroy_custom_mutators(afl_state_t *); -u8 trim_case_custom(afl_state_t *, struct queue_entry *q, u8 **in_buf, +u8 trim_case_custom(afl_state_t *, struct queue_entry *q, u8 *in_buf, struct custom_mutator *mutator); /* Python */ @@ -1093,7 +1093,7 @@ fsrv_run_result_t fuzz_run_target(afl_state_t *, afl_forkserver_t *fsrv, u32); void write_to_testcase(afl_state_t *, void *, u32); u8 calibrate_case(afl_state_t *, struct queue_entry *, u8 *, u32, u8); void sync_fuzzers(afl_state_t *); -u8 trim_case(afl_state_t *, struct queue_entry *, u8 **); +u8 trim_case(afl_state_t *, struct queue_entry *, u8 *); u8 common_fuzz_stuff(afl_state_t *, u8 *, u32); /* Fuzz one */ diff --git a/src/afl-fuzz-mutators.c b/src/afl-fuzz-mutators.c index d8db8676..3bb37a89 100644 --- a/src/afl-fuzz-mutators.c +++ b/src/afl-fuzz-mutators.c @@ -305,16 +305,14 @@ struct custom_mutator *load_custom_mutator(afl_state_t *afl, const char *fn) { } -// Custom testcase trimming. -u8 trim_case_custom(afl_state_t *afl, struct queue_entry *q, u8 **in_buf_p, +u8 trim_case_custom(afl_state_t *afl, struct queue_entry *q, u8 *in_buf, struct custom_mutator *mutator) { - // We need to pass pointers around, as growing testcases may need to realloc. - u8 *in_buf = *in_buf_p; - - u8 needs_write = 0, fault = 0; + u8 fault = 0; u32 trim_exec = 0; u32 orig_len = q->len; + u32 out_len = 0; + u8* out_buf = NULL; u8 val_buf[STRINGIFY_VAL_SIZE_MAX]; @@ -401,40 +399,33 @@ u8 trim_case_custom(afl_state_t *afl, struct queue_entry *q, u8 **in_buf_p, if (likely(retlen && cksum == q->exec_cksum)) { - // Check if we got a new retbuf and to memcpy our buf. - if (in_buf != retbuf) { - - if (afl_realloc((void **)in_buf_p, retlen) == NULL) { - - FATAL("can not allocate memory for trim"); - - } + /* Let's save a clean trace, which will be needed by + update_bitmap_score once we're done with the trimming stuff. + Use out_buf NULL check to make this only happen once per trim. */ - in_buf = *in_buf_p; + if (!out_buf) { - memcpy(in_buf, retbuf, retlen); - q->len = retlen; + memcpy(afl->clean_trace_custom, afl->fsrv.trace_bits, + afl->fsrv.map_size); } - /* Let's save a clean trace, which will be needed by - update_bitmap_score once we're done with the trimming stuff. */ - - if (!needs_write) { + if (afl_realloc((void **)&out_buf, retlen) == NULL) { - needs_write = 1; - memcpy(afl->clean_trace_custom, afl->fsrv.trace_bits, - afl->fsrv.map_size); + FATAL("can not allocate memory for trim"); } + out_len = retlen; + memcpy(out_buf, retbuf, retlen); + /* Tell the custom mutator that the trimming was successful */ afl->stage_cur = mutator->afl_custom_post_trim(mutator->data, 1); if (afl->not_on_tty && afl->debug) { SAYF("[Custom Trimming] SUCCESS: %u/%u iterations (now at %u bytes)", - afl->stage_cur, afl->stage_max, q->len); + afl->stage_cur, afl->stage_max, out_len); } @@ -467,16 +458,10 @@ u8 trim_case_custom(afl_state_t *afl, struct queue_entry *q, u8 **in_buf_p, } - if (afl->not_on_tty && afl->debug) { - - SAYF("[Custom Trimming] DONE: %u bytes -> %u bytes", orig_len, q->len); - - } - - /* If we have made changes to in_buf, we also need to update the on-disk + /* If we have made changes, we also need to update the on-disk version of the test case. */ - if (needs_write) { + if (out_buf) { s32 fd; @@ -486,16 +471,28 @@ u8 trim_case_custom(afl_state_t *afl, struct queue_entry *q, u8 **in_buf_p, if (fd < 0) { PFATAL("Unable to create '%s'", q->fname); } - ck_write(fd, in_buf, q->len, q->fname); + ck_write(fd, out_buf, out_len, q->fname); close(fd); + /* Update the queue's knowledge of length as soon as we write the file. + We do this here so that exit/error cases that *don't* update the file also + don't update q->len. */ + q->len = out_len; + memcpy(afl->fsrv.trace_bits, afl->clean_trace_custom, afl->fsrv.map_size); update_bitmap_score(afl, q); } + if (afl->not_on_tty && afl->debug) { + + SAYF("[Custom Trimming] DONE: %u bytes -> %u bytes", orig_len, q->len); + + } + abort_trimming: + if (out_buf) afl_free(out_buf); afl->bytes_trim_out += q->len; return fault; diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index ed815cb4..4eeb93de 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -508,7 +508,7 @@ u8 fuzz_one_original(afl_state_t *afl) { u32 old_len = afl->queue_cur->len; - u8 res = trim_case(afl, afl->queue_cur, &in_buf); + u8 res = trim_case(afl, afl->queue_cur, in_buf); orig_in = in_buf = queue_testcase_get(afl, afl->queue_cur); if (unlikely(res == FSRV_RUN_ERROR)) { @@ -3007,7 +3007,7 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { u32 old_len = afl->queue_cur->len; - u8 res = trim_case(afl, afl->queue_cur, &in_buf); + u8 res = trim_case(afl, afl->queue_cur, in_buf); orig_in = in_buf = queue_testcase_get(afl, afl->queue_cur); if (unlikely(res == FSRV_RUN_ERROR)) { diff --git a/src/afl-fuzz-run.c b/src/afl-fuzz-run.c index 397d62bf..6e5210b8 100644 --- a/src/afl-fuzz-run.c +++ b/src/afl-fuzz-run.c @@ -718,10 +718,7 @@ void sync_fuzzers(afl_state_t *afl) { trimmer uses power-of-two increments somewhere between 1/16 and 1/1024 of file size, to keep the stage short and sweet. */ -u8 trim_case(afl_state_t *afl, struct queue_entry *q, u8 **in_buf_p) { - - // We need to pass pointers around, as growing testcases may need to realloc. - u8 *in_buf = *in_buf_p; +u8 trim_case(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) { u32 orig_len = q->len; @@ -735,8 +732,7 @@ u8 trim_case(afl_state_t *afl, struct queue_entry *q, u8 **in_buf_p) { if (el->afl_custom_trim) { - trimmed_case = trim_case_custom(afl, q, in_buf_p, el); - in_buf = *in_buf_p; + trimmed_case = trim_case_custom(afl, q, in_buf, el); custom_trimmed = true; } -- cgit 1.4.1 From 72ca9b4684981ce2b807e4efd218bd1924f3e6b1 Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Tue, 11 May 2021 22:06:37 +0200 Subject: fix a few cur_time uses --- docs/Changelog.md | 1 + src/afl-cc.c | 16 +++++++++------- src/afl-fuzz-one.c | 6 +++--- src/afl-fuzz-stats.c | 5 +++-- src/afl-fuzz.c | 6 ++++-- 5 files changed, 20 insertions(+), 14 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/docs/Changelog.md b/docs/Changelog.md index ceb02bb9..e4c02921 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -9,6 +9,7 @@ Want to stay in the loop on major new features? Join our mailing list by sending a mail to . ### Version ++3.13a (development) + - Note: plot_data switched to relative time from unix time in 3.10 - frida_mode - new mode that uses frida to fuzz binary-only targets, it currently supports persistent mode and cmplog. thanks to @WorksButNotTested! diff --git a/src/afl-cc.c b/src/afl-cc.c index c1050355..ff7b5219 100644 --- a/src/afl-cc.c +++ b/src/afl-cc.c @@ -1574,12 +1574,12 @@ int main(int argc, char **argv, char **envp) { else if (have_gcc_plugin) compiler_mode = GCC_PLUGIN; else if (have_gcc) - #ifdef __APPLE__ - // on OSX clang masquerades as GCC - compiler_mode = CLANG; - #else - compiler_mode = GCC; - #endif +#ifdef __APPLE__ + // on OSX clang masquerades as GCC + compiler_mode = CLANG; +#else + compiler_mode = GCC; +#endif else if (have_lto) compiler_mode = LTO; else @@ -1602,8 +1602,10 @@ int main(int argc, char **argv, char **envp) { } if (compiler_mode == CLANG) { + instrument_mode = INSTRUMENT_CLANG; - setenv(CLANG_ENV_VAR, "1", 1); // used by afl-as + setenv(CLANG_ENV_VAR, "1", 1); // used by afl-as + } if (argc < 2 || strncmp(argv[1], "-h", 2) == 0) { diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index 4eeb93de..4a3e7f33 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -562,7 +562,7 @@ u8 fuzz_one_original(afl_state_t *afl) { if (afl->cmplog_lvl == 3 || (afl->cmplog_lvl == 2 && afl->queue_cur->tc_ref) || !(afl->fsrv.total_execs % afl->queued_paths) || - get_cur_time() - afl->last_path_time > 300000) { + get_cur_time() - afl->last_path_time > 300000) { // 300 seconds if (input_to_state_stage(afl, in_buf, out_buf, len)) { @@ -2013,7 +2013,7 @@ havoc_stage: } - if (unlikely(get_cur_time() - afl->last_path_time > 5000 && + if (unlikely(get_cur_time() - afl->last_path_time > 5000 /* 5 seconds */ && afl->ready_for_splicing_count > 1)) { /* add expensive havoc cases here if there is no findings in the last 5s */ @@ -3060,7 +3060,7 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { if (afl->cmplog_lvl == 3 || (afl->cmplog_lvl == 2 && afl->queue_cur->tc_ref) || !(afl->fsrv.total_execs % afl->queued_paths) || - get_cur_time() - afl->last_path_time > 300000) { + get_cur_time() - afl->last_path_time > 300000) { // 300 seconds if (input_to_state_stage(afl, in_buf, out_buf, len)) { diff --git a/src/afl-fuzz-stats.c b/src/afl-fuzz-stats.c index 313263f9..4884b942 100644 --- a/src/afl-fuzz-stats.c +++ b/src/afl-fuzz-stats.c @@ -368,7 +368,8 @@ void maybe_update_plot_file(afl_state_t *afl, u32 t_bytes, double bitmap_cvg, afl->plot_prev_uh == afl->unique_hangs && afl->plot_prev_md == afl->max_depth && afl->plot_prev_ed == afl->fsrv.total_execs) || - !afl->queue_cycle || get_cur_time() - afl->start_time <= 60))) { + !afl->queue_cycle || + get_cur_time() - afl->start_time <= 60000))) { return; @@ -393,7 +394,7 @@ void maybe_update_plot_file(afl_state_t *afl, u32 t_bytes, double bitmap_cvg, fprintf(afl->fsrv.plot_file, "%llu, %llu, %u, %u, %u, %u, %0.02f%%, %llu, %llu, %u, %0.02f, %llu, " "%u\n", - (afl->prev_run_time + get_cur_time() - afl->start_time), + ((afl->prev_run_time + get_cur_time() - afl->start_time) / 1000), afl->queue_cycle - 1, afl->current_entry, afl->queued_paths, afl->pending_not_fuzzed, afl->pending_favored, bitmap_cvg, afl->unique_crashes, afl->unique_hangs, afl->max_depth, eps, diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c index 8de3ed6b..094fd161 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -1940,8 +1940,10 @@ int main(int argc, char **argv_orig, char **envp) { /* If we had a full queue cycle with no new finds, try recombination strategies next. */ - if (unlikely(afl->queued_paths == prev_queued && - (get_cur_time() - afl->start_time) >= 3600)) { + if (unlikely(afl->queued_paths == prev_queued + /* FIXME TODO BUG: && (get_cur_time() - afl->start_time) >= + 3600 */ + )) { if (afl->use_splicing) { -- cgit 1.4.1 From 76653544056ce2334b6523252e91a8f8a6ac9dcb Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Tue, 1 Jun 2021 10:13:16 +0200 Subject: threadsafe doc fixes, code format --- README.md | 3 +- docs/Changelog.md | 3 +- docs/env_variables.md | 9 +- frida_mode/src/instrument/instrument_debug.c | 2 +- frida_mode/src/stats/stats.c | 4 +- instrumentation/README.llvm.md | 7 +- instrumentation/SanitizerCoverageLTO.so.cc | 7 +- instrumentation/SanitizerCoveragePCGUARD.so.cc | 6 +- instrumentation/afl-llvm-lto-instrumentation.so.cc | 11 +- instrumentation/afl-llvm-pass.so.cc | 116 +++++++++++++-------- qemu_mode/libqasan/libqasan.c | 5 +- src/afl-cc.c | 3 +- src/afl-fuzz-one.c | 1 + src/afl-fuzz.c | 7 +- 14 files changed, 106 insertions(+), 78 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/README.md b/README.md index 69e2d14a..c04dba98 100644 --- a/README.md +++ b/README.md @@ -90,6 +90,7 @@ behaviours and defaults: | Feature/Instrumentation | afl-gcc | llvm | gcc_plugin | frida_mode | qemu_mode |unicorn_mode | | -------------------------|:-------:|:---------:|:----------:|:----------:|:----------------:|:------------:| + | Threadsafe counters | | x(3) | | | | | | NeverZero | x86[_64]| x(1) | x | x | x | x | | Persistent Mode | | x | x | x86[_64] | x86[_64]/arm[64] | x | | LAF-Intel / CompCov | | x | | | x86[_64]/arm[64] | x86[_64]/arm | @@ -104,7 +105,7 @@ behaviours and defaults: 1. default for LLVM >= 9.0, env var for older version due an efficiency bug in previous llvm versions 2. GCC creates non-performant code, hence it is disabled in gcc_plugin - 3. (currently unassigned) + 3. with `AFL_LLVM_THREADSAFE_INST`, disables NeverZero 4. with pcguard mode and LTO mode for LLVM 11 and newer 5. upcoming, development in the branch 6. not compatible with LTO instrumentation and needs at least LLVM v4.1 diff --git a/docs/Changelog.md b/docs/Changelog.md index d8ffe498..29ea918b 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -41,6 +41,8 @@ sending a mail to . it fails - afl-cc: - We do not support llvm versions prior 6.0 anymore + - added thread safe counters to all modes (`AFL_LLVM_THREADSAFE_INST`), + note that this disables never zero counters. - Fix for -pie compiled binaries with default afl-clang-fast PCGUARD - Leak Sanitizer (AFL_USE_LSAN) added by Joshua Rogers, thanks! - Removed InsTrim instrumentation as it is not as good as PCGUARD @@ -58,7 +60,6 @@ sending a mail to . MacOS shared memory - updated the grammar custom mutator to the newest version - add -d (add dead fuzzer stats) to afl-whatsup - - add thread safe counters for LLVM CLASSIC (set AFL_LLVM_THREADSAFE_INST) - added AFL_PRINT_FILENAMES to afl-showmap/cmin to print the current filename - afl-showmap/cmin will now process queue items in alphabetical order diff --git a/docs/env_variables.md b/docs/env_variables.md index b4b866ab..38a67bc7 100644 --- a/docs/env_variables.md +++ b/docs/env_variables.md @@ -231,10 +231,11 @@ Then there are a few specific features that are only available in instrumentatio See [instrumentation/README.instrument_list.md](../instrumentation/README.instrument_list.md) for more information. -### Thread safe instrumentation counters (in mode LLVM CLASSIC) - - Setting `AFL_LLVM_THREADSAFE_INST` will inject code that implements thread safe counters. - The overhead is a bit higher compared to the older non-thread safe case. - `AFL_LLVM_NOT_ZERO` and `AFL_LLVM_SKIP_NEVERZERO` are supported (see below). +### Thread safe instrumentation counters (in all modes) + + - Setting `AFL_LLVM_THREADSAFE_INST` will inject code that implements thread + safe counters. The overhead is a little bit higher compared to the older + non-thread safe case. Note that this disables neverzero (see below). ### NOT_ZERO diff --git a/frida_mode/src/instrument/instrument_debug.c b/frida_mode/src/instrument/instrument_debug.c index be72ef89..f8c1df77 100644 --- a/frida_mode/src/instrument/instrument_debug.c +++ b/frida_mode/src/instrument/instrument_debug.c @@ -17,7 +17,7 @@ static void instrument_debug(char *format, ...) { va_list ap; char buffer[4096] = {0}; int ret; - int len; + int len; va_start(ap, format); ret = vsnprintf(buffer, sizeof(buffer) - 1, format, ap); diff --git a/frida_mode/src/stats/stats.c b/frida_mode/src/stats/stats.c index 890a8d6b..662fb6d5 100644 --- a/frida_mode/src/stats/stats.c +++ b/frida_mode/src/stats/stats.c @@ -96,10 +96,10 @@ void stats_init(void) { void stats_vprint(int fd, char *format, va_list ap) { char buffer[4096] = {0}; - int ret; + int ret; int len; - if(vsnprintf(buffer, sizeof(buffer) - 1, format, ap) < 0) { return; } + if (vsnprintf(buffer, sizeof(buffer) - 1, format, ap) < 0) { return; } len = strnlen(buffer, sizeof(buffer)); IGNORED_RETURN(write(fd, buffer, len)); diff --git a/instrumentation/README.llvm.md b/instrumentation/README.llvm.md index 02722588..8ce5afb9 100644 --- a/instrumentation/README.llvm.md +++ b/instrumentation/README.llvm.md @@ -144,9 +144,10 @@ is not optimal and was only fixed in llvm 9. You can set this with AFL_LLVM_NOT_ZERO=1 See [README.neverzero.md](README.neverzero.md) -Support for thread safe counters has been added for mode LLVM CLASSIC. -Activate it with `AFL_LLVM_THREADSAFE_INST=1`. The tradeoff is better precision in -multi threaded apps for a slightly higher instrumentation overhead. +Support for thread safe counters has been added for all modes. +Activate it with `AFL_LLVM_THREADSAFE_INST=1`. The tradeoff is better precision +in multi threaded apps for a slightly higher instrumentation overhead. +This also disables the nozero counter default for performance reasons. ## 4) Snapshot feature diff --git a/instrumentation/SanitizerCoverageLTO.so.cc b/instrumentation/SanitizerCoverageLTO.so.cc index 58969e18..20f1856e 100644 --- a/instrumentation/SanitizerCoverageLTO.so.cc +++ b/instrumentation/SanitizerCoverageLTO.so.cc @@ -1497,14 +1497,12 @@ void ModuleSanitizerCoverage::InjectCoverageAtBlock(Function &F, BasicBlock &BB, } /* Update bitmap */ - if (use_threadsafe_counters) { /* Atomic */ + if (use_threadsafe_counters) { /* Atomic */ IRB.CreateAtomicRMW(llvm::AtomicRMWInst::BinOp::Add, MapPtrIdx, One, llvm::AtomicOrdering::Monotonic); - } - else - { + } else { LoadInst *Counter = IRB.CreateLoad(MapPtrIdx); Counter->setMetadata(Mo->getMDKindID("nosanitize"), @@ -1524,6 +1522,7 @@ void ModuleSanitizerCoverage::InjectCoverageAtBlock(Function &F, BasicBlock &BB, ->setMetadata(Mo->getMDKindID("nosanitize"), MDNode::get(*Ct, None)); } + // done :) inst++; diff --git a/instrumentation/SanitizerCoveragePCGUARD.so.cc b/instrumentation/SanitizerCoveragePCGUARD.so.cc index dbddad0a..4a8c9e28 100644 --- a/instrumentation/SanitizerCoveragePCGUARD.so.cc +++ b/instrumentation/SanitizerCoveragePCGUARD.so.cc @@ -1069,16 +1069,14 @@ void ModuleSanitizerCoverage::InjectCoverageAtBlock(Function &F, BasicBlock &BB, /* Load counter for CurLoc */ - Value * MapPtrIdx = IRB.CreateGEP(MapPtr, CurLoc); + Value *MapPtrIdx = IRB.CreateGEP(MapPtr, CurLoc); if (use_threadsafe_counters) { IRB.CreateAtomicRMW(llvm::AtomicRMWInst::BinOp::Add, MapPtrIdx, One, llvm::AtomicOrdering::Monotonic); - } - else - { + } else { LoadInst *Counter = IRB.CreateLoad(MapPtrIdx); /* Update bitmap */ diff --git a/instrumentation/afl-llvm-lto-instrumentation.so.cc b/instrumentation/afl-llvm-lto-instrumentation.so.cc index b5fdb3d6..fe43fbe5 100644 --- a/instrumentation/afl-llvm-lto-instrumentation.so.cc +++ b/instrumentation/afl-llvm-lto-instrumentation.so.cc @@ -93,8 +93,8 @@ class AFLLTOPass : public ModulePass { uint32_t function_minimum_size = 1; uint32_t inst_blocks = 0, inst_funcs = 0, total_instr = 0; unsigned long long int map_addr = 0x10000; - const char *skip_nozero = NULL; - const char *use_threadsafe_counters = nullptr; + const char * skip_nozero = NULL; + const char * use_threadsafe_counters = nullptr; }; @@ -843,9 +843,12 @@ bool AFLLTOPass::runOnModule(Module &M) { /* Update bitmap */ if (use_threadsafe_counters) { + IRB.CreateAtomicRMW(llvm::AtomicRMWInst::BinOp::Add, MapPtrIdx, One, llvm::AtomicOrdering::Monotonic); + } else { + LoadInst *Counter = IRB.CreateLoad(MapPtrIdx); Counter->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, None)); @@ -861,7 +864,9 @@ bool AFLLTOPass::runOnModule(Module &M) { } IRB.CreateStore(Incr, MapPtrIdx) - ->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, None)); + ->setMetadata(M.getMDKindID("nosanitize"), + MDNode::get(C, None)); + } // done :) diff --git a/instrumentation/afl-llvm-pass.so.cc b/instrumentation/afl-llvm-pass.so.cc index fe9e2e40..62f8b2ed 100644 --- a/instrumentation/afl-llvm-pass.so.cc +++ b/instrumentation/afl-llvm-pass.so.cc @@ -81,12 +81,12 @@ class AFLCoverage : public ModulePass { bool runOnModule(Module &M) override; protected: - uint32_t ngram_size = 0; - uint32_t ctx_k = 0; - uint32_t map_size = MAP_SIZE; - uint32_t function_minimum_size = 1; - const char * ctx_str = NULL, *caller_str = NULL, *skip_nozero = NULL; - const char * use_threadsafe_counters = nullptr; + uint32_t ngram_size = 0; + uint32_t ctx_k = 0; + uint32_t map_size = MAP_SIZE; + uint32_t function_minimum_size = 1; + const char *ctx_str = NULL, *caller_str = NULL, *skip_nozero = NULL; + const char *use_threadsafe_counters = nullptr; }; @@ -188,18 +188,30 @@ bool AFLCoverage::runOnModule(Module &M) { if ((isatty(2) && !getenv("AFL_QUIET")) || !!getenv("AFL_DEBUG")) { if (use_threadsafe_counters) { - if (!getenv("AFL_LLVM_NOT_ZERO")) { - skip_nozero = "1"; - SAYF(cCYA "afl-llvm-pass" VERSION cRST " using thread safe counters\n"); - } - else { - SAYF(cCYA "afl-llvm-pass" VERSION cRST - " using thread safe not-zero-counters\n"); - } - } - else - { - SAYF(cCYA "afl-llvm-pass" VERSION cRST " using non-thread safe instrumentation\n"); + + // disabled unless there is support for other modules as well + // (increases documentation complexity) + /* if (!getenv("AFL_LLVM_NOT_ZERO")) { */ + + skip_nozero = "1"; + SAYF(cCYA "afl-llvm-pass" VERSION cRST " using thread safe counters\n"); + + /* + + } else { + + SAYF(cCYA "afl-llvm-pass" VERSION cRST + " using thread safe not-zero-counters\n"); + + } + + */ + + } else { + + SAYF(cCYA "afl-llvm-pass" VERSION cRST + " using non-thread safe instrumentation\n"); + } } @@ -649,44 +661,44 @@ bool AFLCoverage::runOnModule(Module &M) { /* Update bitmap */ + if (use_threadsafe_counters) { /* Atomic */ - if (use_threadsafe_counters) {/* Atomic */ - - #if LLVM_VERSION_MAJOR < 9 +#if LLVM_VERSION_MAJOR < 9 if (neverZero_counters_str != - NULL) { // with llvm 9 we make this the default as the bug in llvm is then fixed - #else + NULL) { // with llvm 9 we make this the default as the bug in llvm + // is then fixed +#else if (!skip_nozero) { - #endif +#endif // register MapPtrIdx in a todo list todo.push_back(MapPtrIdx); - } - else - { + } else { + IRB.CreateAtomicRMW(llvm::AtomicRMWInst::BinOp::Add, MapPtrIdx, One, llvm::AtomicOrdering::Monotonic); + } - } - else - { + + } else { LoadInst *Counter = IRB.CreateLoad(MapPtrIdx); Counter->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, None)); Value *Incr = IRB.CreateAdd(Counter, One); - #if LLVM_VERSION_MAJOR < 9 +#if LLVM_VERSION_MAJOR < 9 if (neverZero_counters_str != - NULL) { // with llvm 9 we make this the default as the bug in llvm is - // then fixed - #else + NULL) { // with llvm 9 we make this the default as the bug in llvm + // is then fixed +#else if (!skip_nozero) { - #endif +#endif /* hexcoder: Realize a counter that skips zero during overflow. - * Once this counter reaches its maximum value, it next increments to 1 + * Once this counter reaches its maximum value, it next increments to + * 1 * * Instead of * Counter + 1 -> Counter @@ -705,7 +717,7 @@ bool AFLCoverage::runOnModule(Module &M) { IRB.CreateStore(Incr, MapPtrIdx) ->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, None)); - } /* non atomic case */ + } /* non atomic case */ /* Update prev_loc history vector (by placing cur_loc at the head of the vector and shuffle the other elements back by one) */ @@ -762,16 +774,19 @@ bool AFLCoverage::runOnModule(Module &M) { } - if (use_threadsafe_counters) { /*Atomic NeverZero */ + if (use_threadsafe_counters) { /*Atomic NeverZero */ // handle the list of registered blocks to instrument for (auto val : todo) { - /* hexcoder: Realize a thread-safe counter that skips zero during overflow. Once this counter reaches its maximum value, it next increments to 1 - * - * Instead of - * Counter + 1 -> Counter - * we inject now this - * Counter + 1 -> {Counter, OverflowFlag} - * Counter + OverflowFlag -> Counter + + /* hexcoder: Realize a thread-safe counter that skips zero during + * overflow. Once this counter reaches its maximum value, it next + * increments to 1 + * + * Instead of + * Counter + 1 -> Counter + * we inject now this + * Counter + 1 -> {Counter, OverflowFlag} + * Counter + OverflowFlag -> Counter */ /* equivalent c code looks like this @@ -781,12 +796,19 @@ bool AFLCoverage::runOnModule(Module &M) { int old = atomic_load_explicit(&Counter, memory_order_relaxed); int new; do { + if (old == 255) { + new = 1; + } else { + new = old + 1; + } + } while (!atomic_compare_exchange_weak_explicit(&Counter, &old, new, + memory_order_relaxed, memory_order_relaxed)); */ @@ -805,7 +827,8 @@ bool AFLCoverage::runOnModule(Module &M) { BasicBlock *BB = IRB.GetInsertBlock(); // insert a basic block with the corpus of a do while loop - // the calculation may need to repeat, if atomic compare_exchange is not successful + // the calculation may need to repeat, if atomic compare_exchange is not + // successful BasicBlock::iterator it(*Counter); it++; // split after load counter @@ -857,6 +880,7 @@ bool AFLCoverage::runOnModule(Module &M) { // if the cmpXchg was not successful, retry IRB.CreateCondBr(Success, end_bb, do_while_bb); + } } diff --git a/qemu_mode/libqasan/libqasan.c b/qemu_mode/libqasan/libqasan.c index d4742e3e..6ea24f08 100644 --- a/qemu_mode/libqasan/libqasan.c +++ b/qemu_mode/libqasan/libqasan.c @@ -69,9 +69,8 @@ __attribute__((constructor)) void __libqasan_init() { __libqasan_is_initialized = 1; __libqasan_init_hooks(); - - if (getenv("AFL_INST_LIBS") || getenv("QASAN_HOTPACH")) - __libqasan_hotpatch(); + + if (getenv("AFL_INST_LIBS") || getenv("QASAN_HOTPACH")) __libqasan_hotpatch(); if (getenv("AFL_INST_LIBS") || getenv("QASAN_HOTPACH")) __libqasan_hotpatch(); diff --git a/src/afl-cc.c b/src/afl-cc.c index 6be6e165..486f7468 100644 --- a/src/afl-cc.c +++ b/src/afl-cc.c @@ -1777,7 +1777,8 @@ int main(int argc, char **argv, char **envp) { SAYF( "\nLLVM/LTO/afl-clang-fast/afl-clang-lto specific environment " "variables:\n" - " AFL_LLVM_THREADSAFE_INST: instrument with thread safe counters\n" + " AFL_LLVM_THREADSAFE_INST: instrument with thread safe counters, " + "disables neverzero\n" COUNTER_BEHAVIOUR diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index 4a3e7f33..c3ce2edd 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -561,6 +561,7 @@ u8 fuzz_one_original(afl_state_t *afl) { if (afl->cmplog_lvl == 3 || (afl->cmplog_lvl == 2 && afl->queue_cur->tc_ref) || + afl->queue_cur->favored || !(afl->fsrv.total_execs % afl->queued_paths) || get_cur_time() - afl->last_path_time > 300000) { // 300 seconds diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c index a3a623d9..5bdb4c8d 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -2066,13 +2066,10 @@ int main(int argc, char **argv_orig, char **envp) { break; case 4: afl->expand_havoc = 5; - if (afl->cmplog_lvl && afl->cmplog_lvl < 3) afl->cmplog_lvl = 3; + // if (afl->cmplog_lvl && afl->cmplog_lvl < 3) afl->cmplog_lvl = + // 3; break; case 5: - // if not in sync mode, enable deterministic mode? - // if (!afl->sync_id) afl->skip_deterministic = 0; - afl->expand_havoc = 6; - case 6: // nothing else currently break; -- cgit 1.4.1 From 74fcb365e99ce86e405e52b586baa9d0f825f70c Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Mon, 14 Jun 2021 12:36:41 +0200 Subject: little inline --- src/afl-fuzz-one.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src/afl-fuzz-one.c') diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index c3ce2edd..11adebf4 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -73,7 +73,7 @@ static int select_algorithm(afl_state_t *afl, u32 max_algorithm) { /* Helper to choose random block len for block operations in fuzz_one(). Doesn't return zero, provided that max_len is > 0. */ -static u32 choose_block_len(afl_state_t *afl, u32 limit) { +static inline u32 choose_block_len(afl_state_t *afl, u32 limit) { u32 min_value, max_value; u32 rlim = MIN(afl->queue_cycle, (u32)3); -- cgit 1.4.1 From 7038e56da3952c89a51596180578153918ce6eee Mon Sep 17 00:00:00 2001 From: van Hauser Date: Sun, 27 Jun 2021 10:22:18 +0200 Subject: Select (#995) * favor unfuzzed * fix * reinit table after a new fuzz --- include/afl-fuzz.h | 3 ++- src/afl-fuzz-one.c | 1 + src/afl-fuzz-queue.c | 5 ++++- src/afl-fuzz.c | 3 ++- 4 files changed, 9 insertions(+), 3 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h index 2920f905..2e2c78ef 100644 --- a/include/afl-fuzz.h +++ b/include/afl-fuzz.h @@ -519,7 +519,8 @@ typedef struct afl_state { shmem_testcase_mode, /* If sharedmem testcases are used */ expand_havoc, /* perform expensive havoc after no find */ cycle_schedules, /* cycle power schedules? */ - old_seed_selection; /* use vanilla afl seed selection */ + old_seed_selection, /* use vanilla afl seed selection */ + reinit_table; /* reinit the queue weight table */ u8 *virgin_bits, /* Regions yet untouched by fuzzing */ *virgin_tmout, /* Bits we haven't seen in tmouts */ diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index 11adebf4..f03249e9 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -2862,6 +2862,7 @@ abandon_entry: --afl->pending_not_fuzzed; afl->queue_cur->was_fuzzed = 1; + afl->reinit_table = 1; if (afl->queue_cur->favored) { --afl->pending_favored; } } diff --git a/src/afl-fuzz-queue.c b/src/afl-fuzz-queue.c index 811e805c..d2689c94 100644 --- a/src/afl-fuzz-queue.c +++ b/src/afl-fuzz-queue.c @@ -58,7 +58,8 @@ double compute_weight(afl_state_t *afl, struct queue_entry *q, if (likely(afl->schedule < RARE)) { weight *= (avg_exec_us / q->exec_us); } weight *= (log(q->bitmap_size) / avg_bitmap_size); weight *= (1 + (q->tc_ref / avg_top_size)); - if (unlikely(q->favored)) weight *= 5; + if (unlikely(q->favored)) { weight *= 5; } + if (unlikely(!q->was_fuzzed)) { weight *= 2; } return weight; @@ -198,6 +199,8 @@ void create_alias_table(afl_state_t *afl) { while (nS) afl->alias_probability[S[--nS]] = 1; + afl->reinit_table = 0; + /* #ifdef INTROSPECTION u8 fn[PATH_MAX]; diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c index 5f25f728..bd9b6691 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -2154,7 +2154,8 @@ int main(int argc, char **argv_orig, char **envp) { if (likely(!afl->old_seed_selection)) { - if (unlikely(prev_queued_paths < afl->queued_paths)) { + if (unlikely(prev_queued_paths < afl->queued_paths || + afl->reinit_table)) { // we have new queue entries since the last run, recreate alias table prev_queued_paths = afl->queued_paths; -- cgit 1.4.1 From 7cec158b0eb9b09160e58b289093cf615e2ca429 Mon Sep 17 00:00:00 2001 From: yuan Date: Wed, 14 Jul 2021 13:53:20 +0800 Subject: fix havoc comments (#1020) --- src/afl-fuzz-one.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'src/afl-fuzz-one.c') diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index f03249e9..76e64f2a 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -2102,7 +2102,7 @@ havoc_stage: case 8 ... 9: { - /* Set word to interesting value, randomly choosing endian. */ + /* Set word to interesting value, little endian. */ if (temp_len < 2) { break; } @@ -2119,7 +2119,7 @@ havoc_stage: case 10 ... 11: { - /* Set word to interesting value, randomly choosing endian. */ + /* Set word to interesting value, big endian. */ if (temp_len < 2) { break; } @@ -2136,7 +2136,7 @@ havoc_stage: case 12 ... 13: { - /* Set dword to interesting value, randomly choosing endian. */ + /* Set dword to interesting value, little endian. */ if (temp_len < 4) { break; } @@ -2153,7 +2153,7 @@ havoc_stage: case 14 ... 15: { - /* Set dword to interesting value, randomly choosing endian. */ + /* Set dword to interesting value, big endian. */ if (temp_len < 4) { break; } -- cgit 1.4.1 From cd683ed2530d70c958c78395e7ee67b34c6821df Mon Sep 17 00:00:00 2001 From: Michael Rodler Date: Thu, 15 Jul 2021 11:03:20 +0200 Subject: fixed potential UAF with custom mutator havoc on realloc --- src/afl-fuzz-one.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src/afl-fuzz-one.c') diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index 76e64f2a..7274f679 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -2057,7 +2057,7 @@ havoc_stage: temp_len = new_len; if (out_buf != custom_havoc_buf) { - afl_realloc(AFL_BUF_PARAM(out), temp_len); + out_buf = afl_realloc(AFL_BUF_PARAM(out), temp_len); if (unlikely(!afl->out_buf)) { PFATAL("alloc"); } memcpy(out_buf, custom_havoc_buf, temp_len); -- cgit 1.4.1