diff options
author | Dominik Maier <domenukk@gmail.com> | 2020-08-18 00:50:52 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2020-08-18 00:50:52 +0200 |
commit | 7470b475a9b5e65afa78ca493867d8c980bd66db (patch) | |
tree | 827b38424f766c81db8c7732b6437c234e4001e1 | |
parent | 9532499ef5280ae4c7aa3d189dd7a924a38e8358 (diff) | |
download | afl++-7470b475a9b5e65afa78ca493867d8c980bd66db.tar.gz |
Reworked maybe_grow to take a single ptr, renamed to afl_realloc (#505)
* maybe_grow takes a single ptr * fixed use_deflate * reworked maybe_grow_bufsize * helper to access underlying buf * remove redundant realloc_block * code format * fixes * added unit tests * renamed maybe_grow to afl_realloc * BUF_PARAMS -> AFL_BUF_PARAM
-rw-r--r-- | custom_mutators/radamsa/custom_mutator_helpers.h | 2 | ||||
-rw-r--r-- | examples/afl_network_proxy/afl-network-server.c | 34 | ||||
-rw-r--r-- | examples/custom_mutators/custom_mutator_helpers.h | 4 | ||||
-rw-r--r-- | include/afl-fuzz.h | 25 | ||||
-rw-r--r-- | include/alloc-inl.h | 177 | ||||
-rw-r--r-- | src/afl-fuzz-extras.c | 12 | ||||
-rw-r--r-- | src/afl-fuzz-mutators.c | 3 | ||||
-rw-r--r-- | src/afl-fuzz-one.c | 79 | ||||
-rw-r--r-- | src/afl-fuzz-python.c | 20 | ||||
-rw-r--r-- | src/afl-fuzz-queue.c | 7 | ||||
-rw-r--r-- | src/afl-fuzz-redqueen.c | 8 | ||||
-rw-r--r-- | src/afl-fuzz-run.c | 7 | ||||
-rw-r--r-- | src/afl-fuzz-state.c | 14 | ||||
-rw-r--r-- | test/unittests/unit_maybe_alloc.c | 109 |
14 files changed, 272 insertions, 229 deletions
diff --git a/custom_mutators/radamsa/custom_mutator_helpers.h b/custom_mutators/radamsa/custom_mutator_helpers.h index 0848321f..e23c0b6a 100644 --- a/custom_mutators/radamsa/custom_mutator_helpers.h +++ b/custom_mutators/radamsa/custom_mutator_helpers.h @@ -324,7 +324,7 @@ static inline void *maybe_grow(void **buf, size_t *size, size_t size_needed) { } /* Swaps buf1 ptr and buf2 ptr, as well as their sizes */ -static inline void swap_bufs(void **buf1, size_t *size1, void **buf2, +static inline void afl_swap_bufs(void **buf1, size_t *size1, void **buf2, size_t *size2) { void * scratch_buf = *buf1; diff --git a/examples/afl_network_proxy/afl-network-server.c b/examples/afl_network_proxy/afl-network-server.c index ab7874fd..c70fd47d 100644 --- a/examples/afl_network_proxy/afl-network-server.c +++ b/examples/afl_network_proxy/afl-network-server.c @@ -73,9 +73,8 @@ static u8 *in_file, /* Minimizer input test case */ static u8 *in_data; /* Input data for trimming */ static u8 *buf2; -static s32 in_len; -static u32 map_size = MAP_SIZE; -static size_t buf2_len; +static s32 in_len; +static u32 map_size = MAP_SIZE; static volatile u8 stop_soon; /* Ctrl-C pressed? */ @@ -272,7 +271,7 @@ static void set_up_environment(afl_forkserver_t *fsrv) { setenv("QEMU_SET_ENV", buf, 1); - ck_free(buf); + afl_free(buf); } else { @@ -343,7 +342,7 @@ static void usage(u8 *argv0) { } -int recv_testcase(int s, void **buf, size_t *max_len) { +int recv_testcase(int s, void **buf) { u32 size; s32 ret; @@ -358,7 +357,8 @@ int recv_testcase(int s, void **buf, size_t *max_len) { if ((size & 0xff000000) != 0xff000000) { - *buf = ck_maybe_grow(buf, max_len, size); + *buf = afl_realloc((void **)&buf, size); + if (unlikely(!buf)) { PFATAL("Alloc"); } received = 0; // fprintf(stderr, "unCOMPRESS (%u)\n", size); while (received < size && @@ -370,7 +370,8 @@ int recv_testcase(int s, void **buf, size_t *max_len) { #ifdef USE_DEFLATE u32 clen; size -= 0xff000000; - *buf = ck_maybe_grow(buf, max_len, size); + *buf = afl_realloc((void **)&buf, size); + if (unlikely(!buf)) { PFATAL("Alloc"); } received = 0; while (received < 4 && (ret = recv(s, &clen + received, 4 - received, 0)) > 0) @@ -379,15 +380,15 @@ int recv_testcase(int s, void **buf, size_t *max_len) { // fprintf(stderr, "received clen information of %d\n", clen); if (clen < 1) FATAL("did not receive valid compressed len information: %u", clen); - buf2 = ck_maybe_grow((void **)&buf2, &buf2_len, clen); + buf2 = afl_realloc((void **)&buf2, clen); + if (unlikely(!buf2)) { PFATAL("Alloc"); } received = 0; while (received < clen && (ret = recv(s, buf2 + received, clen - received, 0)) > 0) received += ret; if (received != clen) FATAL("did not receive compressed information"); if (libdeflate_deflate_decompress(decompressor, buf2, clen, (char *)*buf, - *max_len, - &received) != LIBDEFLATE_SUCCESS) + size, &received) != LIBDEFLATE_SUCCESS) FATAL("decompression failed"); // fprintf(stderr, "DECOMPRESS (%u->%u):\n", clen, received); // for (u32 i = 0; i < clen; i++) fprintf(stderr, "%02x", buf2[i]); @@ -413,7 +414,6 @@ int recv_testcase(int s, void **buf, size_t *max_len) { int main(int argc, char **argv_orig, char **envp) { s32 opt, s, sock, on = 1, port = -1; - size_t max_len = 0; u8 mem_limit_given = 0, timeout_given = 0, unicorn_mode = 0, use_wine = 0; char **use_argv; struct sockaddr_in6 serveraddr, clientaddr; @@ -568,7 +568,8 @@ int main(int argc, char **argv_orig, char **envp) { sharedmem_t shm = {0}; fsrv->trace_bits = afl_shm_init(&shm, map_size, 0); - in_data = ck_maybe_grow((void **)&in_data, &max_len, 65536); + in_data = afl_realloc((void **)&in_data, 65536); + if (unlikely(!in_data)) { PFATAL("Alloc"); } atexit(at_exit_handler); setup_signal_handlers(); @@ -639,7 +640,8 @@ int main(int argc, char **argv_orig, char **envp) { #ifdef USE_DEFLATE compressor = libdeflate_alloc_compressor(1); decompressor = libdeflate_alloc_decompressor(); - buf2 = ck_maybe_grow((void **)&buf2, &buf2_len, map_size + 16); + buf2 = afl_realloc((void **)&buf2, map_size + 16); + if (unlikely(!buf2)) { PFATAL("alloc"); } lenptr = (u32 *)(buf2 + 4); fprintf(stderr, "Compiled with compression support\n"); #endif @@ -664,7 +666,7 @@ int main(int argc, char **argv_orig, char **envp) { #endif - while ((in_len = recv_testcase(s, (void **)&in_data, &max_len)) > 0) { + while ((in_len = recv_testcase(s, (void **)&in_data)) > 0) { // fprintf(stderr, "received %u\n", in_len); (void)run_target(fsrv, use_argv, in_data, in_len, 1); @@ -697,9 +699,9 @@ int main(int argc, char **argv_orig, char **envp) { afl_shm_deinit(&shm); afl_fsrv_deinit(fsrv); if (fsrv->target_path) { ck_free(fsrv->target_path); } - if (in_data) { ck_free(in_data); } + afl_free(in_data); #if USE_DEFLATE - if (buf2) { ck_free(buf2); } + afl_free(buf2); libdeflate_free_compressor(compressor); libdeflate_free_decompressor(decompressor); #endif diff --git a/examples/custom_mutators/custom_mutator_helpers.h b/examples/custom_mutators/custom_mutator_helpers.h index 0848321f..ad5acb08 100644 --- a/examples/custom_mutators/custom_mutator_helpers.h +++ b/examples/custom_mutators/custom_mutator_helpers.h @@ -324,8 +324,8 @@ static inline void *maybe_grow(void **buf, size_t *size, size_t size_needed) { } /* Swaps buf1 ptr and buf2 ptr, as well as their sizes */ -static inline void swap_bufs(void **buf1, size_t *size1, void **buf2, - size_t *size2) { +static inline void afl_swap_bufs(void **buf1, size_t *size1, void **buf2, + size_t *size2) { void * scratch_buf = *buf1; size_t scratch_size = *size1; diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h index ca7d10fe..dca395aa 100644 --- a/include/afl-fuzz.h +++ b/include/afl-fuzz.h @@ -126,6 +126,9 @@ #define STAGE_BUF_SIZE (64) /* usable size for stage name buf in afl_state */ +// Little helper to access the ptr to afl->##name_buf - for use in afl_realloc. +#define AFL_BUF_PARAM(name) ((void **)&afl->name##_buf) + extern s8 interesting_8[INTERESTING_8_LEN]; extern s16 interesting_16[INTERESTING_8_LEN + INTERESTING_16_LEN]; extern s32 @@ -572,7 +575,6 @@ typedef struct afl_state { // growing buf struct queue_entry **queue_buf; - size_t queue_size; struct queue_entry **top_rated; /* Top entries for bitmap bytes */ @@ -633,24 +635,18 @@ typedef struct afl_state { /*needed for afl_fuzz_one */ // TODO: see which we can reuse - u8 * out_buf; - size_t out_size; + u8 *out_buf; - u8 * out_scratch_buf; - size_t out_scratch_size; + u8 *out_scratch_buf; - u8 * eff_buf; - size_t eff_size; + u8 *eff_buf; - u8 * in_buf; - size_t in_size; + u8 *in_buf; - u8 * in_scratch_buf; - size_t in_scratch_size; + u8 *in_scratch_buf; - u8 * ex_buf; - size_t ex_size; - u32 custom_mutators_count; + u8 *ex_buf; + u32 custom_mutators_count; list_t custom_mutator_list; @@ -666,7 +662,6 @@ struct custom_mutator { char * name_short; void * dh; u8 * post_process_buf; - size_t post_process_size; u8 stacked_custom_prob, stacked_custom; void *data; /* custom mutator data ptr */ diff --git a/include/alloc-inl.h b/include/alloc-inl.h index 306cc622..90701d18 100644 --- a/include/alloc-inl.h +++ b/include/alloc-inl.h @@ -30,12 +30,13 @@ #include <stdio.h> #include <stdlib.h> #include <string.h> +#include <stddef.h> #include "config.h" #include "types.h" #include "debug.h" -/* Initial size used for ck_maybe_grow */ +/* Initial size used for afl_realloc */ #define INITIAL_GROWTH_SIZE (64) // Be careful! _WANT_ORIGINAL_AFL_ALLOC is not compatible with custom mutators @@ -76,10 +77,6 @@ \ } while (0) - /* Allocator increments for ck_realloc_block(). */ - - #define ALLOC_BLK_INC 256 - /* Allocate a buffer, explicitly not zeroing it. Returns NULL for zero-sized requests. */ @@ -149,15 +146,6 @@ static inline void *DFL_ck_realloc(void *orig, u32 size) { } -/* Re-allocate a buffer with ALLOC_BLK_INC increments (used to speed up - repeated small reallocs without complicating the user code). */ - -static inline void *DFL_ck_realloc_block(void *orig, u32 size) { - - return DFL_ck_realloc(orig, size); - -} - /* Create a buffer with a copy of a string. Returns NULL for NULL inputs. */ static inline u8 *DFL_ck_strdup(u8 *str) { @@ -183,7 +171,6 @@ static inline u8 *DFL_ck_strdup(u8 *str) { #define ck_alloc DFL_ck_alloc #define ck_alloc_nozero DFL_ck_alloc_nozero #define ck_realloc DFL_ck_realloc - #define ck_realloc_block DFL_ck_realloc_block #define ck_strdup DFL_ck_strdup #define ck_free DFL_ck_free @@ -239,10 +226,6 @@ static inline u8 *DFL_ck_strdup(u8 *str) { #define ALLOC_OFF_HEAD 8 #define ALLOC_OFF_TOTAL (ALLOC_OFF_HEAD + 1) - /* Allocator increments for ck_realloc_block(). */ - - #define ALLOC_BLK_INC 256 - /* Sanity-checking macros for pointers. */ #define CHECK_PTR(_p) \ @@ -402,29 +385,6 @@ static inline void *DFL_ck_realloc(void *orig, u32 size) { } -/* Re-allocate a buffer with ALLOC_BLK_INC increments (used to speed up - repeated small reallocs without complicating the user code). */ - -static inline void *DFL_ck_realloc_block(void *orig, u32 size) { - - #ifndef DEBUG_BUILD - - if (orig) { - - CHECK_PTR(orig); - - if (ALLOC_S(orig) >= size) return orig; - - size += ALLOC_BLK_INC; - - } - - #endif /* !DEBUG_BUILD */ - - return DFL_ck_realloc(orig, size); - -} - /* Create a buffer with a copy of a string. Returns NULL for NULL inputs. */ static inline u8 *DFL_ck_strdup(u8 *str) { @@ -458,7 +418,6 @@ static inline u8 *DFL_ck_strdup(u8 *str) { #define ck_alloc DFL_ck_alloc #define ck_alloc_nozero DFL_ck_alloc_nozero #define ck_realloc DFL_ck_realloc - #define ck_realloc_block DFL_ck_realloc_block #define ck_strdup DFL_ck_strdup #define ck_free DFL_ck_free @@ -528,8 +487,8 @@ static inline void TRK_alloc_buf(void *ptr, const char *file, const char *func, /* No space available - allocate more. */ - TRK[bucket] = DFL_ck_realloc_block( - TRK[bucket], (TRK_cnt[bucket] + 1) * sizeof(struct TRK_obj)); + TRK[bucket] = DFL_ck_realloc(TRK[bucket], + (TRK_cnt[bucket] + 1) * sizeof(struct TRK_obj)); TRK[bucket][i].ptr = ptr; TRK[bucket][i].file = (char *)file; @@ -604,16 +563,6 @@ static inline void *TRK_ck_realloc(void *orig, u32 size, const char *file, } -static inline void *TRK_ck_realloc_block(void *orig, u32 size, const char *file, - const char *func, u32 line) { - - void *ret = DFL_ck_realloc_block(orig, size); - TRK_free_buf(orig, file, func, line); - TRK_alloc_buf(ret, file, func, line); - return ret; - -} - static inline void *TRK_ck_strdup(u8 *str, const char *file, const char *func, u32 line) { @@ -641,9 +590,6 @@ static inline void TRK_ck_free(void *ptr, const char *file, const char *func, #define ck_realloc(_p1, _p2) \ TRK_ck_realloc(_p1, _p2, __FILE__, __FUNCTION__, __LINE__) - #define ck_realloc_block(_p1, _p2) \ - TRK_ck_realloc_block(_p1, _p2, __FILE__, __FUNCTION__, __LINE__) - #define ck_strdup(_p1) TRK_ck_strdup(_p1, __FILE__, __FUNCTION__, __LINE__) #define ck_free(_p1) TRK_ck_free(_p1, __FILE__, __FUNCTION__, __LINE__) @@ -657,11 +603,14 @@ static inline void TRK_ck_free(void *ptr, const char *file, const char *func, */ static inline size_t next_pow2(size_t in) { - if (in == 0 || in > (size_t)-1) { - - return 0; /* avoid undefined behaviour under-/overflow */ + // Commented this out as this behavior doesn't change, according to unittests + // if (in == 0 || in > (size_t)-1) { - } + // + // return 0; /* avoid undefined behaviour under-/overflow + // */ + // + // } size_t out = in - 1; out |= out >> 1; @@ -673,32 +622,32 @@ static inline size_t next_pow2(size_t in) { } -/* This function makes sure *size is > size_needed after call. - It will realloc *buf otherwise. - *size will grow exponentially as per: - https://blog.mozilla.org/nnethercote/2014/11/04/please-grow-your-buffers-exponentially/ - Will return NULL and free *buf if size_needed is <1 or realloc failed. - @return For convenience, this function returns *buf. - */ -static inline void *maybe_grow(void **buf, size_t *size, size_t size_needed) { +/* AFL alloc buffer, the struct is here so we don't need to do fancy ptr + * arithmetics */ +struct afl_alloc_buf { - /* No need to realloc */ - if (likely(size_needed && *size >= size_needed)) { return *buf; } + /* The complete allocated size, including the header of len + * AFL_ALLOC_SIZE_OFFSET */ + size_t complete_size; + /* ptr to the first element of the actual buffer */ + u8 buf[0]; - /* No initial size was set */ - if (size_needed < INITIAL_GROWTH_SIZE) { size_needed = INITIAL_GROWTH_SIZE; } +}; - /* grow exponentially */ - size_t next_size = next_pow2(size_needed); +#define AFL_ALLOC_SIZE_OFFSET (offsetof(struct afl_alloc_buf, buf)) - /* handle overflow and zero size_needed */ - if (!next_size) { next_size = size_needed; } +/* Returs the container element to this ptr */ +static inline struct afl_alloc_buf *afl_alloc_bufptr(void *buf) { - /* alloc */ - *buf = realloc(*buf, next_size); - *size = *buf ? next_size : 0; + return (struct afl_alloc_buf *)((u8 *)buf - AFL_ALLOC_SIZE_OFFSET); - return *buf; +} + +/* Gets the maximum size of the buf contents (ptr->complete_size - + * AFL_ALLOC_SIZE_OFFSET) */ +static inline size_t afl_alloc_bufsize(void *buf) { + + return afl_alloc_bufptr(buf)->complete_size - AFL_ALLOC_SIZE_OFFSET; } @@ -706,45 +655,71 @@ static inline void *maybe_grow(void **buf, size_t *size, size_t size_needed) { It will realloc *buf otherwise. *size will grow exponentially as per: https://blog.mozilla.org/nnethercote/2014/11/04/please-grow-your-buffers-exponentially/ - Will FATAL if size_needed is <1. + Will return NULL and free *buf if size_needed is <1 or realloc failed. @return For convenience, this function returns *buf. */ -static inline void *ck_maybe_grow(void **buf, size_t *size, - size_t size_needed) { +static inline void *afl_realloc(void **buf, size_t size_needed) { + + struct afl_alloc_buf *new_buf = NULL; - /* Oops. found a bug? */ - if (unlikely(size_needed < 1)) { FATAL("cannot grow to non-positive size"); } + size_t current_size = 0; + size_t next_size = 0; + + if (likely(*buf)) { + + /* the size is always stored at buf - 1*size_t */ + new_buf = afl_alloc_bufptr(*buf); + current_size = new_buf->complete_size; + + } + + size_needed += AFL_ALLOC_SIZE_OFFSET; /* No need to realloc */ - if (likely(*size >= size_needed)) { return *buf; } + if (likely(current_size >= size_needed)) { return *buf; } /* No initial size was set */ - if (size_needed < INITIAL_GROWTH_SIZE) { size_needed = INITIAL_GROWTH_SIZE; } + if (size_needed < INITIAL_GROWTH_SIZE) { - /* grow exponentially */ - size_t next_size = next_pow2(size_needed); + next_size = INITIAL_GROWTH_SIZE; - /* handle overflow */ - if (!next_size) { next_size = size_needed; } + } else { + + /* grow exponentially */ + next_size = next_pow2(size_needed); + + /* handle overflow: fall back to the original size_needed */ + if (unlikely(!next_size)) { next_size = size_needed; } + + } /* alloc */ - *buf = ck_realloc(*buf, next_size); - *size = next_size; + new_buf = realloc(new_buf, next_size); + if (unlikely(!new_buf)) { + *buf = NULL; + return NULL; + + } + + new_buf->complete_size = next_size; + *buf = (void *)(new_buf->buf); return *buf; } +static inline void afl_free(void *buf) { + + if (buf) { free(afl_alloc_bufptr(buf)); } + +} + /* Swaps buf1 ptr and buf2 ptr, as well as their sizes */ -static inline void swap_bufs(void **buf1, size_t *size1, void **buf2, - size_t *size2) { +static inline void afl_swap_bufs(void **buf1, void **buf2) { - void * scratch_buf = *buf1; - size_t scratch_size = *size1; + void *scratch_buf = *buf1; *buf1 = *buf2; - *size1 = *size2; *buf2 = scratch_buf; - *size2 = scratch_size; } diff --git a/src/afl-fuzz-extras.c b/src/afl-fuzz-extras.c index 17f02984..88262a98 100644 --- a/src/afl-fuzz-extras.c +++ b/src/afl-fuzz-extras.c @@ -152,8 +152,10 @@ void load_extras_file(afl_state_t *afl, u8 *fname, u32 *min_len, u32 *max_len, /* Okay, let's allocate memory and copy data between "...", handling \xNN escaping, \\, and \". */ - afl->extras = ck_realloc_block( - afl->extras, (afl->extras_cnt + 1) * sizeof(struct extra_data)); + afl->extras = + afl_realloc((void **)&afl->extras, + (afl->extras_cnt + 1) * sizeof(struct extra_data)); + if (unlikely(!afl->extras)) { PFATAL("alloc"); } wptr = afl->extras[afl->extras_cnt].data = ck_alloc(rptr - lptr); @@ -296,8 +298,10 @@ void load_extras(afl_state_t *afl, u8 *dir) { if (min_len > st.st_size) { min_len = st.st_size; } if (max_len < st.st_size) { max_len = st.st_size; } - afl->extras = ck_realloc_block( - afl->extras, (afl->extras_cnt + 1) * sizeof(struct extra_data)); + afl->extras = + afl_realloc((void **)&afl->extras, + (afl->extras_cnt + 1) * sizeof(struct extra_data)); + if (unlikely(!afl->extras)) { PFATAL("alloc"); } afl->extras[afl->extras_cnt].data = ck_alloc(st.st_size); afl->extras[afl->extras_cnt].len = st.st_size; diff --git a/src/afl-fuzz-mutators.c b/src/afl-fuzz-mutators.c index 0fa646f9..22578df9 100644 --- a/src/afl-fuzz-mutators.c +++ b/src/afl-fuzz-mutators.c @@ -122,9 +122,8 @@ void destroy_custom_mutators(afl_state_t *afl) { if (el->post_process_buf) { - ck_free(el->post_process_buf); + afl_free(el->post_process_buf); el->post_process_buf = NULL; - el->post_process_size = 0; } diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index 0a4be320..3bf0c195 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -364,8 +364,6 @@ static void locate_diffs(u8 *ptr1, u8 *ptr2, u32 len, s32 *first, s32 *last) { #endif /* !IGNORE_FINDS */ -#define BUF_PARAMS(name) (void **)&afl->name##_buf, &afl->name##_size - /* Take the current entry from the queue, fuzz it for a while. This function is a tad too long... returns 0 if fuzzed successfully, 1 if skipped or bailed out. */ @@ -384,9 +382,6 @@ u8 fuzz_one_original(afl_state_t *afl) { u8 a_collect[MAX_AUTO_EXTRA]; u32 a_len = 0; -/* Not pretty, but saves a lot of writing */ -#define BUF_PARAMS(name) (void **)&afl->name##_buf, &afl->name##_size - #ifdef IGNORE_FINDS /* In IGNORE_FINDS mode, skip any entries that weren't in the @@ -484,7 +479,8 @@ u8 fuzz_one_original(afl_state_t *afl) { single byte anyway, so it wouldn't give us any performance or memory usage benefits. */ - out_buf = ck_maybe_grow(BUF_PARAMS(out), len); + out_buf = afl_realloc(AFL_BUF_PARAM(out), len); + if (unlikely(!out_buf)) { PFATAL("alloc"); } afl->subseq_tmouts = 0; @@ -800,7 +796,8 @@ u8 fuzz_one_original(afl_state_t *afl) { /* Initialize effector map for the next step (see comments below). Always flag first and last byte as doing something. */ - eff_map = ck_maybe_grow(BUF_PARAMS(eff), EFF_ALEN(len)); + eff_map = afl_realloc(AFL_BUF_PARAM(eff), EFF_ALEN(len)); + if (unlikely(!eff_map)) { PFATAL("alloc"); } eff_map[0] = 1; if (EFF_APOS(len - 1) != 0) { @@ -1557,7 +1554,8 @@ skip_interest: orig_hit_cnt = new_hit_cnt; - ex_tmp = ck_maybe_grow(BUF_PARAMS(ex), len + MAX_DICT_FILE); + ex_tmp = afl_realloc(AFL_BUF_PARAM(ex), len + MAX_DICT_FILE); + if (unlikely(!ex_tmp)) { PFATAL("alloc"); } for (i = 0; i <= (u32)len; ++i) { @@ -1733,7 +1731,8 @@ custom_mutator_stage: fd = open(target->fname, O_RDONLY); if (unlikely(fd < 0)) { PFATAL("Unable to open '%s'", target->fname); } - new_buf = ck_maybe_grow(BUF_PARAMS(out_scratch), target->len); + new_buf = afl_realloc(AFL_BUF_PARAM(out_scratch), target->len); + if (unlikely(!new_buf)) { PFATAL("alloc"); } ck_read(fd, new_buf, target->len, target->fname); close(fd); @@ -1908,7 +1907,8 @@ havoc_stage: temp_len = new_len; if (out_buf != custom_havoc_buf) { - ck_maybe_grow(BUF_PARAMS(out), temp_len); + afl_realloc(AFL_BUF_PARAM(out), temp_len); + if (unlikely(!afl->out_buf)) { PFATAL("alloc"); } memcpy(out_buf, custom_havoc_buf, temp_len); } @@ -2147,7 +2147,8 @@ havoc_stage: clone_to = rand_below(afl, temp_len); new_buf = - ck_maybe_grow(BUF_PARAMS(out_scratch), temp_len + clone_len); + afl_realloc(AFL_BUF_PARAM(out_scratch), temp_len + clone_len); + if (unlikely(!new_buf)) { PFATAL("alloc"); } /* Head */ @@ -2172,7 +2173,7 @@ havoc_stage: memcpy(new_buf + clone_to + clone_len, out_buf + clone_to, temp_len - clone_to); - swap_bufs(BUF_PARAMS(out), BUF_PARAMS(out_scratch)); + afl_swap_bufs(AFL_BUF_PARAM(out), AFL_BUF_PARAM(out_scratch)); out_buf = new_buf; new_buf = NULL; temp_len += clone_len; @@ -2287,7 +2288,8 @@ havoc_stage: if (temp_len + extra_len >= MAX_FILE) { break; } - out_buf = ck_maybe_grow(BUF_PARAMS(out), temp_len + extra_len); + out_buf = afl_realloc(AFL_BUF_PARAM(out), temp_len + extra_len); + if (unlikely(!out_buf)) { PFATAL("alloc"); } /* Tail */ memmove(out_buf + insert_at + extra_len, out_buf + insert_at, @@ -2343,7 +2345,8 @@ havoc_stage: } u32 new_len = target->len; - u8 *new_buf = ck_maybe_grow(BUF_PARAMS(in_scratch), new_len); + u8 *new_buf = afl_realloc(AFL_BUF_PARAM(in_scratch), new_len); + if (unlikely(!new_buf)) { PFATAL("alloc"); } ck_read(fd, new_buf, new_len, target->fname); @@ -2383,7 +2386,8 @@ havoc_stage: clone_to = rand_below(afl, temp_len); u8 *temp_buf = - ck_maybe_grow(BUF_PARAMS(out_scratch), temp_len + clone_len); + afl_realloc(AFL_BUF_PARAM(out_scratch), temp_len + clone_len); + if (unlikely(!temp_buf)) { PFATAL("alloc"); } /* Head */ @@ -2397,7 +2401,7 @@ havoc_stage: memcpy(temp_buf + clone_to + clone_len, out_buf + clone_to, temp_len - clone_to); - swap_bufs(BUF_PARAMS(out), BUF_PARAMS(out_scratch)); + afl_swap_bufs(AFL_BUF_PARAM(out), AFL_BUF_PARAM(out_scratch)); out_buf = temp_buf; temp_len += clone_len; @@ -2418,7 +2422,8 @@ havoc_stage: /* out_buf might have been mangled a bit, so let's restore it to its original size and shape. */ - out_buf = ck_maybe_grow(BUF_PARAMS(out), len); + out_buf = afl_realloc(AFL_BUF_PARAM(out), len); + if (unlikely(!out_buf)) { PFATAL("alloc"); } temp_len = len; memcpy(out_buf, in_buf, len); @@ -2513,7 +2518,8 @@ retry_splicing: if (unlikely(fd < 0)) { PFATAL("Unable to open '%s'", target->fname); } - new_buf = ck_maybe_grow(BUF_PARAMS(in_scratch), target->len); + new_buf = afl_realloc(AFL_BUF_PARAM(in_scratch), target->len); + if (unlikely(!new_buf)) { PFATAL("alloc"); } ck_read(fd, new_buf, target->len, target->fname); @@ -2535,10 +2541,11 @@ retry_splicing: len = target->len; memcpy(new_buf, in_buf, split_at); - swap_bufs(BUF_PARAMS(in), BUF_PARAMS(in_scratch)); + afl_swap_bufs(AFL_BUF_PARAM(in), AFL_BUF_PARAM(in_scratch)); in_buf = new_buf; - out_buf = ck_maybe_grow(BUF_PARAMS(out), len); + out_buf = afl_realloc(AFL_BUF_PARAM(out), len); + if (unlikely(!out_buf)) { PFATAL("alloc"); } memcpy(out_buf, in_buf, len); goto custom_mutator_stage; @@ -2679,7 +2686,8 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { single byte anyway, so it wouldn't give us any performance or memory usage benefits. */ - out_buf = ck_maybe_grow(BUF_PARAMS(out), len); + out_buf = afl_realloc(AFL_BUF_PARAM(out), len); + if (unlikely(!out_buf)) { PFATAL("alloc"); } afl->subseq_tmouts = 0; @@ -3001,7 +3009,8 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { /* Initialize effector map for the next step (see comments below). Always flag first and last byte as doing something. */ - eff_map = ck_maybe_grow(BUF_PARAMS(eff), EFF_ALEN(len)); + eff_map = afl_realloc(AFL_BUF_PARAM(eff), EFF_ALEN(len)); + if (unlikely(!eff_map)) { PFATAL("alloc"); } eff_map[0] = 1; if (EFF_APOS(len - 1) != 0) { @@ -3758,7 +3767,8 @@ skip_interest: orig_hit_cnt = new_hit_cnt; - ex_tmp = ck_maybe_grow(BUF_PARAMS(ex), len + MAX_DICT_FILE); + ex_tmp = afl_realloc(AFL_BUF_PARAM(ex), len + MAX_DICT_FILE); + if (unlikely(!ex_tmp)) { PFATAL("alloc"); } for (i = 0; i <= (u32)len; ++i) { @@ -4196,8 +4206,9 @@ pacemaker_fuzzing: clone_to = rand_below(afl, temp_len); - new_buf = ck_maybe_grow(BUF_PARAMS(out_scratch), - temp_len + clone_len); + new_buf = afl_realloc(AFL_BUF_PARAM(out_scratch), + temp_len + clone_len); + if (unlikely(!new_buf)) { PFATAL("alloc"); } /* Head */ @@ -4223,7 +4234,7 @@ pacemaker_fuzzing: memcpy(new_buf + clone_to + clone_len, out_buf + clone_to, temp_len - clone_to); - swap_bufs(BUF_PARAMS(out), BUF_PARAMS(out_scratch)); + afl_swap_bufs(AFL_BUF_PARAM(out), AFL_BUF_PARAM(out_scratch)); out_buf = new_buf; temp_len += clone_len; MOpt_globals.cycles_v2[STAGE_Clone75] += 1; @@ -4340,7 +4351,8 @@ pacemaker_fuzzing: if (temp_len + extra_len >= MAX_FILE) break; - out_buf = ck_maybe_grow(BUF_PARAMS(out), temp_len + extra_len); + out_buf = afl_realloc(AFL_BUF_PARAM(out), temp_len + extra_len); + if (unlikely(!out_buf)) { PFATAL("alloc"); } /* Tail */ memmove(out_buf + insert_at + extra_len, out_buf + insert_at, @@ -4373,7 +4385,8 @@ pacemaker_fuzzing: /* out_buf might have been mangled a bit, so let's restore it to its original size and shape. */ - out_buf = ck_maybe_grow(BUF_PARAMS(out), len); + out_buf = afl_realloc(AFL_BUF_PARAM(out), len); + if (unlikely(!out_buf)) { PFATAL("alloc"); } temp_len = len; memcpy(out_buf, in_buf, len); @@ -4518,7 +4531,8 @@ pacemaker_fuzzing: if (fd < 0) { PFATAL("Unable to open '%s'", target->fname); } - new_buf = ck_maybe_grow(BUF_PARAMS(in_scratch), target->len); + new_buf = afl_realloc(AFL_BUF_PARAM(in_scratch), target->len); + if (unlikely(!new_buf)) { PFATAL("alloc"); } ck_read(fd, new_buf, target->len, target->fname); @@ -4545,9 +4559,10 @@ pacemaker_fuzzing: len = target->len; memcpy(new_buf, in_buf, split_at); - swap_bufs(BUF_PARAMS(in), BUF_PARAMS(in_scratch)); + afl_swap_bufs(AFL_BUF_PARAM(in), AFL_BUF_PARAM(in_scratch)); in_buf = new_buf; - out_buf = ck_maybe_grow(BUF_PARAMS(out), len); + out_buf = afl_realloc(AFL_BUF_PARAM(out), len); + if (unlikely(!out_buf)) { PFATAL("alloc"); } memcpy(out_buf, in_buf, len); goto havoc_stage_puppet; @@ -4880,5 +4895,3 @@ u8 fuzz_one(afl_state_t *afl) { } -#undef BUF_PARAMS - diff --git a/src/afl-fuzz-python.c b/src/afl-fuzz-python.c index a077469e..e540f548 100644 --- a/src/afl-fuzz-python.c +++ b/src/afl-fuzz-python.c @@ -40,9 +40,7 @@ static void *unsupported(afl_state_t *afl, unsigned int seed) { /* sorry for this makro... it just fills in `&py_mutator->something_buf, &py_mutator->something_size`. */ - #define BUF_PARAMS(name) \ - (void **)&((py_mutator_t *)py_mutator)->name##_buf, \ - &((py_mutator_t *)py_mutator)->name##_size + #define BUF_PARAMS(name) (void **)&((py_mutator_t *)py_mutator)->name##_buf static size_t fuzz_py(void *py_mutator, u8 *buf, size_t buf_size, u8 **out_buf, u8 *add_buf, size_t add_buf_size, size_t max_size) { @@ -97,7 +95,8 @@ static size_t fuzz_py(void *py_mutator, u8 *buf, size_t buf_size, u8 **out_buf, mutated_size = PyByteArray_Size(py_value); - *out_buf = ck_maybe_grow(BUF_PARAMS(fuzz), mutated_size); + *out_buf = afl_realloc(BUF_PARAMS(fuzz), mutated_size); + if (unlikely(!out_buf)) { PFATAL("alloc"); } memcpy(*out_buf, PyByteArray_AsString(py_value), mutated_size); Py_DECREF(py_value); @@ -317,7 +316,6 @@ struct custom_mutator *load_custom_mutator_py(afl_state_t *afl, mutator = ck_alloc(sizeof(struct custom_mutator)); mutator->post_process_buf = NULL; - mutator->post_process_size = 0; mutator->name = module_name; ACTF("Loading Python mutator library from '%s'...", module_name); @@ -419,7 +417,11 @@ size_t post_process_py(void *py_mutator, u8 *buf, size_t buf_size, py_out_buf_size = PyByteArray_Size(py_value); - ck_maybe_grow(BUF_PARAMS(post_process), py_out_buf_size); + if (unlikely(!afl_realloc(BUF_PARAMS(post_process), py_out_buf_size))) { + + PFATAL("alloc"); + + } memcpy(py->post_process_buf, PyByteArray_AsString(py_value), py_out_buf_size); @@ -527,7 +529,8 @@ size_t trim_py(void *py_mutator, u8 **out_buf) { if (py_value != NULL) { ret = PyByteArray_Size(py_value); - *out_buf = ck_maybe_grow(BUF_PARAMS(trim), ret); + *out_buf = afl_realloc(BUF_PARAMS(trim), ret); + if (unlikely(!out_buf)) { PFATAL("alloc"); } memcpy(*out_buf, PyByteArray_AsString(py_value), ret); Py_DECREF(py_value); @@ -592,7 +595,8 @@ size_t havoc_mutation_py(void *py_mutator, u8 *buf, size_t buf_size, } else { /* A new buf is needed... */ - *out_buf = ck_maybe_grow(BUF_PARAMS(havoc), mutated_size); + *out_buf = afl_realloc(BUF_PARAMS(havoc), mutated_size); + if (unlikely(!out_buf)) { PFATAL("alloc"); } } diff --git a/src/afl-fuzz-queue.c b/src/afl-fuzz-queue.c index f35df914..0c472845 100644 --- a/src/afl-fuzz-queue.c +++ b/src/afl-fuzz-queue.c @@ -26,8 +26,6 @@ #include <limits.h> #include <ctype.h> -#define BUF_PARAMS(name) (void **)&afl->name##_buf, &afl->name##_size - /* Mark deterministic checks as done for a particular queue entry. We use the .state file to avoid repeating deterministic fuzzing when resuming aborted scans. */ @@ -248,8 +246,9 @@ void add_to_queue(afl_state_t *afl, u8 *fname, u32 len, u8 passed_det) { } - struct queue_entry **queue_buf = ck_maybe_grow( - BUF_PARAMS(queue), afl->queued_paths * sizeof(struct queue_entry *)); + struct queue_entry **queue_buf = afl_realloc( + AFL_BUF_PARAM(queue), afl->queued_paths * sizeof(struct queue_entry *)); + if (unlikely(!queue_buf)) { PFATAL("alloc"); } queue_buf[afl->queued_paths - 1] = q; afl->last_path_time = get_cur_time(); diff --git a/src/afl-fuzz-redqueen.c b/src/afl-fuzz-redqueen.c index f21dd0b0..1ae6ab54 100644 --- a/src/afl-fuzz-redqueen.c +++ b/src/afl-fuzz-redqueen.c @@ -313,8 +313,6 @@ static unsigned long long strntoull(const char *str, size_t sz, char **end, } -#define BUF_PARAMS(name) (void **)&afl->name##_buf, &afl->name##_size - static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header *h, u64 pattern, u64 repl, u64 o_pattern, u32 idx, u8 *orig_buf, u8 *buf, u32 len, u8 do_reverse, @@ -358,7 +356,8 @@ static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header *h, size_t old_len = endptr - buf_8; size_t num_len = snprintf(NULL, 0, "%lld", num); - u8 *new_buf = ck_maybe_grow(BUF_PARAMS(out_scratch), len + num_len); + u8 *new_buf = afl_realloc((void **)&afl->out_scratch_buf, len + num_len); + if (unlikely(!new_buf)) { PFATAL("alloc"); } memcpy(new_buf, buf, idx); snprintf(new_buf + idx, num_len, "%lld", num); @@ -371,7 +370,8 @@ static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header *h, size_t old_len = endptr - buf_8; size_t num_len = snprintf(NULL, 0, "%llu", unum); - u8 *new_buf = ck_maybe_grow(BUF_PARAMS(out_scratch), len + num_len); + u8 *new_buf = afl_realloc((void **)&afl->out_scratch_buf, len + num_len); + if (unlikely(!new_buf)) { PFATAL("alloc"); } memcpy(new_buf, buf, idx); snprintf(new_buf + idx, num_len, "%llu", unum); diff --git a/src/afl-fuzz-run.c b/src/afl-fuzz-run.c index d3f823c9..d71ec339 100644 --- a/src/afl-fuzz-run.c +++ b/src/afl-fuzz-run.c @@ -135,8 +135,6 @@ write_to_testcase(afl_state_t *afl, void *mem, u32 len) { } -#define BUF_PARAMS(name) (void **)&afl->name##_buf, &afl->name##_size - /* The same, but with an adjustable gap. Used for trimming. */ static void write_with_gap(afl_state_t *afl, u8 *mem, u32 len, u32 skip_at, @@ -149,7 +147,8 @@ static void write_with_gap(afl_state_t *afl, u8 *mem, u32 len, u32 skip_at, This memory is used to carry out the post_processing(if present) after copying the testcase by removing the gaps. This can break though */ - u8 *mem_trimmed = ck_maybe_grow(BUF_PARAMS(out_scratch), len - skip_len + 1); + u8 *mem_trimmed = afl_realloc(AFL_BUF_PARAM(out_scratch), len - skip_len + 1); + if (unlikely(!mem_trimmed)) { PFATAL("alloc"); } ssize_t new_size = len - skip_len; void * new_mem = mem; @@ -288,8 +287,6 @@ static void write_with_gap(afl_state_t *afl, u8 *mem, u32 len, u32 skip_at, } -#undef BUF_PARAMS - /* Calibrate a new test case. This is done when processing the input directory to warn about flaky or otherwise problematic test cases early on; and when new paths are discovered to detect variable behavior and so on. */ diff --git a/src/afl-fuzz-state.c b/src/afl-fuzz-state.c index d4de91a4..e68e7786 100644 --- a/src/afl-fuzz-state.c +++ b/src/afl-fuzz-state.c @@ -421,13 +421,13 @@ void afl_state_deinit(afl_state_t *afl) { if (afl->pass_stats) { ck_free(afl->pass_stats); } if (afl->orig_cmp_map) { ck_free(afl->orig_cmp_map); } - if (afl->queue_buf) { free(afl->queue_buf); } - if (afl->out_buf) { free(afl->out_buf); } - if (afl->out_scratch_buf) { free(afl->out_scratch_buf); } - if (afl->eff_buf) { free(afl->eff_buf); } - if (afl->in_buf) { free(afl->in_buf); } - if (afl->in_scratch_buf) { free(afl->in_scratch_buf); } - if (afl->ex_buf) { free(afl->ex_buf); } + afl_free(afl->queue_buf); + afl_free(afl->out_buf); + afl_free(afl->out_scratch_buf); + afl_free(afl->eff_buf); + afl_free(afl->in_buf); + afl_free(afl->in_scratch_buf); + afl_free(afl->ex_buf); ck_free(afl->virgin_bits); ck_free(afl->virgin_tmout); diff --git a/test/unittests/unit_maybe_alloc.c b/test/unittests/unit_maybe_alloc.c index 889ced8a..e452e2f2 100644 --- a/test/unittests/unit_maybe_alloc.c +++ b/test/unittests/unit_maybe_alloc.c @@ -42,7 +42,24 @@ int __wrap_printf(const char *format, ...) { return 1; } -#define BUF_PARAMS (void **)&buf, &size +#define VOID_BUF (void **)&buf + +static void *create_fake_maybe_grow_of(size_t size) { + + size += AFL_ALLOC_SIZE_OFFSET; + + // fake a realloc buf + + struct afl_alloc_buf *buf = malloc(size); + if (!buf) { + perror("Could not allocate fake buf"); + return NULL; + } + buf->complete_size = size; // The size + void *actual_buf = (void *)(buf->buf); + return actual_buf; + +} /* static int setup(void **state) { @@ -52,29 +69,55 @@ static int setup(void **state) { } */ +static void test_pow2(void **state) { + (void)state; + + assert_int_equal(next_pow2(64), 64); + assert_int_equal(next_pow2(63), 64); + assert_int_not_equal(next_pow2(65), 65); + assert_int_equal(next_pow2(0x100), 0x100); + assert_int_equal(next_pow2(0x180), 0x200); + assert_int_equal(next_pow2(108), 0x80); + assert_int_equal(next_pow2(0), 0); + assert_int_equal(next_pow2(1), 1); + assert_int_equal(next_pow2(2), 2); + assert_int_equal(next_pow2(3), 4); + assert_int_equal(next_pow2(0xFFFFFF), 0x1000000); + assert_int_equal(next_pow2(0xFFFFFFF), 0x10000000); + assert_int_equal(next_pow2(0xFFFFFF0), 0x10000000); + assert_int_equal(next_pow2(SIZE_MAX), 0); + assert_int_equal(next_pow2(-1), 0); + assert_int_equal(next_pow2(-2), 0); + +} + static void test_null_allocs(void **state) { (void)state; void *buf = NULL; - size_t size = 0; - void *ptr = ck_maybe_grow(BUF_PARAMS, 100); + void *ptr = afl_realloc(VOID_BUF, 100); + if (unlikely(!buf)) { PFATAL("alloc"); } + size_t size = afl_alloc_bufsize(buf); assert_true(buf == ptr); assert_true(size >= 100); - ck_free(ptr); + afl_free(ptr); } static void test_nonpow2_size(void **state) { (void)state; - char *buf = ck_alloc(150); - size_t size = 150; + char *buf = create_fake_maybe_grow_of(150); + buf[140] = '5'; - char *ptr = ck_maybe_grow(BUF_PARAMS, 160); + + char *ptr = afl_realloc(VOID_BUF, 160); + if (unlikely(!ptr)) { PFATAL("alloc"); } + size_t size = afl_alloc_bufsize(buf); assert_ptr_equal(buf, ptr); assert_true(size >= 160); assert_true(buf[140] == '5'); - ck_free(ptr); + afl_free(ptr); } @@ -83,32 +126,37 @@ static void test_zero_size(void **state) { char *buf = NULL; size_t size = 0; - assert_non_null(maybe_grow(BUF_PARAMS, 0)); - free(buf); + char *new_buf = afl_realloc(VOID_BUF, 0); + assert_non_null(new_buf); + assert_ptr_equal(buf, new_buf); + afl_free(buf); buf = NULL; size = 0; - char *ptr = ck_maybe_grow(BUF_PARAMS, 100); + char *ptr = afl_realloc(VOID_BUF, 100); + if (unlikely(!ptr)) { PFATAL("alloc"); } + size = afl_alloc_bufsize(buf); assert_non_null(ptr); assert_ptr_equal(buf, ptr); assert_true(size >= 100); - expect_assert_failure(ck_maybe_grow(BUF_PARAMS, 0)); - - ck_free(ptr); + afl_free(ptr); } + static void test_unchanged_size(void **state) { (void)state; - void *buf = ck_alloc(100); - size_t size = 100; - void *buf_before = buf; - void *buf_after = ck_maybe_grow(BUF_PARAMS, 100); - assert_ptr_equal(buf, buf_after); + // fake a realloc buf + void *actual_buf = create_fake_maybe_grow_of(100); + + void *buf_before = actual_buf; + void *buf_after = afl_realloc(&actual_buf, 100); + if (unlikely(!buf_after)) { PFATAL("alloc"); } + assert_ptr_equal(actual_buf, buf_after); assert_ptr_equal(buf_after, buf_before); - ck_free(buf); + afl_free(buf_after); } @@ -118,29 +166,35 @@ static void test_grow_multiple(void **state) { char *buf = NULL; size_t size = 0; - char *ptr = ck_maybe_grow(BUF_PARAMS, 100); + char *ptr = afl_realloc(VOID_BUF, 100); + if (unlikely(!ptr)) { PFATAL("alloc"); } + size = afl_alloc_bufsize(ptr); assert_ptr_equal(ptr, buf); assert_true(size >= 100); - assert_int_equal(size, next_pow2(size)); + assert_int_equal(size, next_pow2(size) - AFL_ALLOC_SIZE_OFFSET); buf[50] = '5'; - ptr = (char *)ck_maybe_grow(BUF_PARAMS, 1000); + ptr = (char *)afl_realloc(VOID_BUF, 1000); + if (unlikely(!ptr)) { PFATAL("alloc"); } + size = afl_alloc_bufsize(ptr); assert_ptr_equal(ptr, buf); assert_true(size >= 100); - assert_int_equal(size, next_pow2(size)); + assert_int_equal(size, next_pow2(size) - AFL_ALLOC_SIZE_OFFSET); buf[500] = '5'; - ptr = (char *)ck_maybe_grow(BUF_PARAMS, 10000); + ptr = (char *)afl_realloc(VOID_BUF, 10000); + if (unlikely(!ptr)) { PFATAL("alloc"); } + size = afl_alloc_bufsize(ptr); assert_ptr_equal(ptr, buf); assert_true(size >= 10000); - assert_int_equal(size, next_pow2(size)); + assert_int_equal(size, next_pow2(size) - AFL_ALLOC_SIZE_OFFSET); buf[5000] = '5'; assert_int_equal(buf[50], '5'); assert_int_equal(buf[500], '5'); assert_int_equal(buf[5000], '5'); - ck_free(buf); + afl_free(buf); } @@ -157,6 +211,7 @@ int main(int argc, char **argv) { (void)argv; const struct CMUnitTest tests[] = { + cmocka_unit_test(test_pow2), cmocka_unit_test(test_null_allocs), cmocka_unit_test(test_nonpow2_size), cmocka_unit_test(test_zero_size), |