aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorvan Hauser <vh@thc.org>2023-11-09 15:13:51 +0100
committerGitHub <noreply@github.com>2023-11-09 15:13:51 +0100
commit61e27c6b54f7641a168b6acc6ecffb1754c10918 (patch)
treea82934c35bd84b2893b71646080e46433083d516 /include
parent85c5b5218c6a7b2289f309fbd1625a5d0a602a00 (diff)
parenta6efdfdb15c8a48967ff773a0ca530a68544cd8f (diff)
downloadafl++-61e27c6b54f7641a168b6acc6ecffb1754c10918.tar.gz
Merge pull request #1906 from AFLplusplus/dev
Dev
Diffstat (limited to 'include')
-rw-r--r--include/afl-fuzz.h2
-rw-r--r--include/afl-mutations.h2
-rw-r--r--include/envs.h1
-rw-r--r--include/xxhash.h30
4 files changed, 18 insertions, 17 deletions
diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h
index 217a720a..8112d430 100644
--- a/include/afl-fuzz.h
+++ b/include/afl-fuzz.h
@@ -675,7 +675,7 @@ typedef struct afl_state {
u32 cmplog_max_filesize;
u32 cmplog_lvl;
u32 colorize_success;
- u8 cmplog_enable_arith, cmplog_enable_transform,
+ u8 cmplog_enable_arith, cmplog_enable_transform, cmplog_enable_scale,
cmplog_enable_xtreme_transform, cmplog_random_colorization;
struct afl_pass_stat *pass_stats;
diff --git a/include/afl-mutations.h b/include/afl-mutations.h
index 98ba6fcf..d709b90d 100644
--- a/include/afl-mutations.h
+++ b/include/afl-mutations.h
@@ -1854,7 +1854,7 @@ inline u32 afl_mutate(afl_state_t *afl, u8 *buf, u32 len, u32 steps,
for (u32 step = 0; step < steps; ++step) {
- retry_havoc_step : {
+ retry_havoc_step: {
u32 r = rand_below(afl, MUT_STRATEGY_ARRAY_SIZE), item;
diff --git a/include/envs.h b/include/envs.h
index 734b1707..93e49e34 100644
--- a/include/envs.h
+++ b/include/envs.h
@@ -162,6 +162,7 @@ static char *afl_environment_variables[] = {
"AFL_LLVM_MAP_DYNAMIC",
"AFL_LLVM_NGRAM_SIZE",
"AFL_NGRAM_SIZE",
+ "AFL_LLVM_NO_RPATH",
"AFL_LLVM_NOT_ZERO",
"AFL_LLVM_INSTRUMENT_FILE",
"AFL_LLVM_THREADSAFE_INST",
diff --git a/include/xxhash.h b/include/xxhash.h
index 7bc0a14e..a8bd6f27 100644
--- a/include/xxhash.h
+++ b/include/xxhash.h
@@ -365,7 +365,7 @@ typedef uint32_t XXH32_hash_t;
(defined(__cplusplus) || \
(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */))
#include <stdint.h>
-typedef uint32_t XXH32_hash_t;
+typedef uint32_t XXH32_hash_t;
#else
#include <limits.h>
@@ -1082,7 +1082,7 @@ struct XXH64_state_s {
#include <stdalign.h>
#define XXH_ALIGN(n) alignas(n)
#elif defined(__cplusplus) && (__cplusplus >= 201103L) /* >= C++11 */
- /* In C++ alignas() is a keyword */
+ /* In C++ alignas() is a keyword */
#define XXH_ALIGN(n) alignas(n)
#elif defined(__GNUC__)
#define XXH_ALIGN(n) __attribute__((aligned(n)))
@@ -3031,8 +3031,8 @@ XXH64_hashFromCanonical(const XXH64_canonical_t *src) {
__STDC_VERSION__ >= 199901L /* >= C99 */
#define XXH_RESTRICT restrict
#else
- /* Note: it might be useful to define __restrict or __restrict__ for
- * some C++ compilers */
+ /* Note: it might be useful to define __restrict or __restrict__ for
+ * some C++ compilers */
#define XXH_RESTRICT /* disable */
#endif
@@ -3492,8 +3492,8 @@ XXH_FORCE_INLINE xxh_u64x2 XXH_vec_loadu(const void *ptr) {
#define XXH_vec_mulo vec_mulo
#define XXH_vec_mule vec_mule
#elif defined(__clang__) && XXH_HAS_BUILTIN(__builtin_altivec_vmuleuw)
- /* Clang has a better way to control this, we can just use the builtin
- * which doesn't swap. */
+ /* Clang has a better way to control this, we can just use the builtin
+ * which doesn't swap. */
#define XXH_vec_mulo __builtin_altivec_vmulouw
#define XXH_vec_mule __builtin_altivec_vmuleuw
#else
@@ -3604,15 +3604,15 @@ XXH_FORCE_INLINE xxh_u64 XXH_mult32to64(xxh_u64 x, xxh_u64 y) {
#include <intrin.h>
#define XXH_mult32to64(x, y) __emulu((unsigned)(x), (unsigned)(y))
#else
- /*
- * Downcast + upcast is usually better than masking on older compilers
- * like GCC 4.2 (especially 32-bit ones), all without affecting newer
- * compilers.
- *
- * The other method, (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF), will AND both
- * operands and perform a full 64x64 multiply -- entirely redundant on
- * 32-bit.
- */
+ /*
+ * Downcast + upcast is usually better than masking on older compilers
+ * like GCC 4.2 (especially 32-bit ones), all without affecting newer
+ * compilers.
+ *
+ * The other method, (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF), will AND both
+ * operands and perform a full 64x64 multiply -- entirely redundant on
+ * 32-bit.
+ */
#define XXH_mult32to64(x, y) \
((xxh_u64)(xxh_u32)(x) * (xxh_u64)(xxh_u32)(y))
#endif