diff options
author | vanhauser-thc <vh@thc.org> | 2023-11-06 10:13:59 +0100 |
---|---|---|
committer | vanhauser-thc <vh@thc.org> | 2023-11-06 10:13:59 +0100 |
commit | f3d2127fd815bed2ec9dfab981123898d11cea65 (patch) | |
tree | b94d78a392120a198c1b40fdcaf884bda74470ec | |
parent | 053334f35d0e8d6eace444781d52504585c69f76 (diff) | |
download | afl++-f3d2127fd815bed2ec9dfab981123898d11cea65.tar.gz |
clang-format 16->17
-rwxr-xr-x | .custom-format.py | 2 | ||||
-rw-r--r-- | frida_mode/src/main.c | 8 | ||||
-rw-r--r-- | include/afl-mutations.h | 2 | ||||
-rw-r--r-- | include/xxhash.h | 30 | ||||
-rw-r--r-- | instrumentation/cmplog-instructions-pass.cc | 2 | ||||
-rw-r--r-- | instrumentation/cmplog-routines-pass.cc | 2 | ||||
-rw-r--r-- | instrumentation/cmplog-switches-pass.cc | 2 | ||||
-rw-r--r-- | instrumentation/split-switches-pass.so.cc | 2 | ||||
-rw-r--r-- | qemu_mode/libqasan/dlmalloc.c | 38 | ||||
-rw-r--r-- | qemu_mode/libqasan/malloc.c | 4 | ||||
-rw-r--r-- | src/afl-fuzz-one.c | 30 | ||||
-rw-r--r-- | src/afl-fuzz-redqueen.c | 2 | ||||
-rw-r--r-- | utils/libtokencap/libtokencap.so.c | 6 |
13 files changed, 65 insertions, 65 deletions
diff --git a/.custom-format.py b/.custom-format.py index 3521c05d..c8075ace 100755 --- a/.custom-format.py +++ b/.custom-format.py @@ -24,7 +24,7 @@ import importlib.metadata # string_re = re.compile('(\\"(\\\\.|[^"\\\\])*\\")') # TODO: for future use -CURRENT_LLVM = os.getenv('LLVM_VERSION', 16) +CURRENT_LLVM = os.getenv('LLVM_VERSION', 17) CLANG_FORMAT_BIN = os.getenv("CLANG_FORMAT_BIN", "") diff --git a/frida_mode/src/main.c b/frida_mode/src/main.c index f11c4b25..bd7b1351 100644 --- a/frida_mode/src/main.c +++ b/frida_mode/src/main.c @@ -49,10 +49,10 @@ extern void __libc_init(void *raw_args, void (*onexit)(void) __unused, int (*slingshot)(int, char **, char **), structors_array_t const *const structors); #else -extern int __libc_start_main(int (*main)(int, char **, char **), int argc, - char **ubp_av, void (*init)(void), - void (*fini)(void), void (*rtld_fini)(void), - void(*stack_end)); +extern int __libc_start_main(int (*main)(int, char **, char **), int argc, + char **ubp_av, void (*init)(void), + void (*fini)(void), void (*rtld_fini)(void), + void(*stack_end)); #endif typedef int (*main_fn_t)(int argc, char **argv, char **envp); diff --git a/include/afl-mutations.h b/include/afl-mutations.h index 98ba6fcf..d709b90d 100644 --- a/include/afl-mutations.h +++ b/include/afl-mutations.h @@ -1854,7 +1854,7 @@ inline u32 afl_mutate(afl_state_t *afl, u8 *buf, u32 len, u32 steps, for (u32 step = 0; step < steps; ++step) { - retry_havoc_step : { + retry_havoc_step: { u32 r = rand_below(afl, MUT_STRATEGY_ARRAY_SIZE), item; diff --git a/include/xxhash.h b/include/xxhash.h index 7bc0a14e..a8bd6f27 100644 --- a/include/xxhash.h +++ b/include/xxhash.h @@ -365,7 +365,7 @@ typedef uint32_t XXH32_hash_t; (defined(__cplusplus) || \ (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)) #include <stdint.h> -typedef uint32_t XXH32_hash_t; +typedef uint32_t XXH32_hash_t; #else #include <limits.h> @@ -1082,7 +1082,7 @@ struct XXH64_state_s { #include <stdalign.h> #define XXH_ALIGN(n) alignas(n) #elif defined(__cplusplus) && (__cplusplus >= 201103L) /* >= C++11 */ - /* In C++ alignas() is a keyword */ + /* In C++ alignas() is a keyword */ #define XXH_ALIGN(n) alignas(n) #elif defined(__GNUC__) #define XXH_ALIGN(n) __attribute__((aligned(n))) @@ -3031,8 +3031,8 @@ XXH64_hashFromCanonical(const XXH64_canonical_t *src) { __STDC_VERSION__ >= 199901L /* >= C99 */ #define XXH_RESTRICT restrict #else - /* Note: it might be useful to define __restrict or __restrict__ for - * some C++ compilers */ + /* Note: it might be useful to define __restrict or __restrict__ for + * some C++ compilers */ #define XXH_RESTRICT /* disable */ #endif @@ -3492,8 +3492,8 @@ XXH_FORCE_INLINE xxh_u64x2 XXH_vec_loadu(const void *ptr) { #define XXH_vec_mulo vec_mulo #define XXH_vec_mule vec_mule #elif defined(__clang__) && XXH_HAS_BUILTIN(__builtin_altivec_vmuleuw) - /* Clang has a better way to control this, we can just use the builtin - * which doesn't swap. */ + /* Clang has a better way to control this, we can just use the builtin + * which doesn't swap. */ #define XXH_vec_mulo __builtin_altivec_vmulouw #define XXH_vec_mule __builtin_altivec_vmuleuw #else @@ -3604,15 +3604,15 @@ XXH_FORCE_INLINE xxh_u64 XXH_mult32to64(xxh_u64 x, xxh_u64 y) { #include <intrin.h> #define XXH_mult32to64(x, y) __emulu((unsigned)(x), (unsigned)(y)) #else - /* - * Downcast + upcast is usually better than masking on older compilers - * like GCC 4.2 (especially 32-bit ones), all without affecting newer - * compilers. - * - * The other method, (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF), will AND both - * operands and perform a full 64x64 multiply -- entirely redundant on - * 32-bit. - */ + /* + * Downcast + upcast is usually better than masking on older compilers + * like GCC 4.2 (especially 32-bit ones), all without affecting newer + * compilers. + * + * The other method, (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF), will AND both + * operands and perform a full 64x64 multiply -- entirely redundant on + * 32-bit. + */ #define XXH_mult32to64(x, y) \ ((xxh_u64)(xxh_u32)(x) * (xxh_u64)(xxh_u32)(y)) #endif diff --git a/instrumentation/cmplog-instructions-pass.cc b/instrumentation/cmplog-instructions-pass.cc index bca1f927..9cd1dc59 100644 --- a/instrumentation/cmplog-instructions-pass.cc +++ b/instrumentation/cmplog-instructions-pass.cc @@ -90,7 +90,7 @@ class CmpLogInstructions : public ModulePass { #if LLVM_MAJOR >= 11 /* use new pass manager */ PreservedAnalyses run(Module &M, ModuleAnalysisManager &MAM); #else - bool runOnModule(Module &M) override; + bool runOnModule(Module &M) override; #if LLVM_VERSION_MAJOR >= 4 StringRef getPassName() const override { diff --git a/instrumentation/cmplog-routines-pass.cc b/instrumentation/cmplog-routines-pass.cc index c3fbed8d..54e9ddf3 100644 --- a/instrumentation/cmplog-routines-pass.cc +++ b/instrumentation/cmplog-routines-pass.cc @@ -85,7 +85,7 @@ class CmpLogRoutines : public ModulePass { #if LLVM_VERSION_MAJOR >= 11 /* use new pass manager */ PreservedAnalyses run(Module &M, ModuleAnalysisManager &MAM); #else - bool runOnModule(Module &M) override; + bool runOnModule(Module &M) override; #if LLVM_VERSION_MAJOR >= 4 StringRef getPassName() const override { diff --git a/instrumentation/cmplog-switches-pass.cc b/instrumentation/cmplog-switches-pass.cc index 38de669d..01da6da7 100644 --- a/instrumentation/cmplog-switches-pass.cc +++ b/instrumentation/cmplog-switches-pass.cc @@ -85,7 +85,7 @@ class CmplogSwitches : public ModulePass { #if LLVM_VERSION_MAJOR >= 11 /* use new pass manager */ PreservedAnalyses run(Module &M, ModuleAnalysisManager &MAM); #else - bool runOnModule(Module &M) override; + bool runOnModule(Module &M) override; #if LLVM_VERSION_MAJOR < 4 const char *getPassName() const override { diff --git a/instrumentation/split-switches-pass.so.cc b/instrumentation/split-switches-pass.so.cc index dcd89652..e3dfea0d 100644 --- a/instrumentation/split-switches-pass.so.cc +++ b/instrumentation/split-switches-pass.so.cc @@ -84,7 +84,7 @@ class SplitSwitchesTransform : public ModulePass { #if LLVM_VERSION_MAJOR >= 11 /* use new pass manager */ PreservedAnalyses run(Module &M, ModuleAnalysisManager &MAM); #else - bool runOnModule(Module &M) override; + bool runOnModule(Module &M) override; #if LLVM_VERSION_MAJOR >= 4 StringRef getPassName() const override { diff --git a/qemu_mode/libqasan/dlmalloc.c b/qemu_mode/libqasan/dlmalloc.c index b459eb7b..1919ae26 100644 --- a/qemu_mode/libqasan/dlmalloc.c +++ b/qemu_mode/libqasan/dlmalloc.c @@ -771,8 +771,8 @@ MAX_RELEASE_CHECK_RATE default: 4095 unless not HAVE_MMAP #include "/usr/include/malloc.h" #else /* HAVE_USR_INCLUDE_MALLOC_H */ #ifndef STRUCT_MALLINFO_DECLARED - /* HP-UX (and others?) redefines mallinfo unless _STRUCT_MALLINFO is - * defined */ + /* HP-UX (and others?) redefines mallinfo unless _STRUCT_MALLINFO is + * defined */ #define _STRUCT_MALLINFO #define STRUCT_MALLINFO_DECLARED 1 struct mallinfo { @@ -1660,10 +1660,10 @@ extern size_t getpagesize(); #define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0) /* the number of bytes to offset an address to align it */ - #define align_offset(A) \ - ((((size_t)(A)&CHUNK_ALIGN_MASK) == 0) \ - ? 0 \ - : ((MALLOC_ALIGNMENT - ((size_t)(A)&CHUNK_ALIGN_MASK)) & \ + #define align_offset(A) \ + ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0) \ + ? 0 \ + : ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & \ CHUNK_ALIGN_MASK)) /* -------------------------- MMAP preliminaries ------------------------- */ @@ -1715,10 +1715,10 @@ static FORCEINLINE int unixmunmap(void *ptr, size_t size) { #define MUNMAP_DEFAULT(a, s) unixmunmap((a), (s)) #else /* MAP_ANONYMOUS */ - /* - Nearly all versions of mmap support MAP_ANONYMOUS, so the following - is unlikely to be needed, but is supplied just in case. - */ + /* + Nearly all versions of mmap support MAP_ANONYMOUS, so the following + is unlikely to be needed, but is supplied just in case. + */ #define MMAP_FLAGS (MAP_PRIVATE) static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */ #define MMAP_DEFAULT(s) \ @@ -1965,7 +1965,7 @@ static FORCEINLINE void x86_clear_lock(int *sl) { #endif /* ... gcc spins locks ... */ - /* How to yield for a spin lock */ + /* How to yield for a spin lock */ #define SPINS_PER_YIELD 63 #if defined(_MSC_VER) #define SLEEP_EX_DURATION 50 /* delay for yield/sleep */ @@ -2008,11 +2008,11 @@ static MLOCK_T malloc_global_mutex = 0; #define CURRENT_THREAD GetCurrentThreadId() #define EQ_OWNER(X, Y) ((X) == (Y)) #else - /* - Note: the following assume that pthread_t is a type that can be - initialized to (casted) zero. If this is not the case, you will need - to somehow redefine these or not use spin locks. - */ + /* + Note: the following assume that pthread_t is a type that can be + initialized to (casted) zero. If this is not the case, you will need + to somehow redefine these or not use spin locks. + */ #define THREAD_ID_T pthread_t #define CURRENT_THREAD pthread_self() #define EQ_OWNER(X, Y) pthread_equal(X, Y) @@ -2169,7 +2169,7 @@ static int pthread_init_lock(MLOCK_T *lk) { #endif /* ... lock types ... */ - /* Common code for all lock types */ + /* Common code for all lock types */ #define USE_LOCK_BIT (2U) #ifndef ACQUIRE_MALLOC_GLOBAL_LOCK @@ -3077,7 +3077,7 @@ static size_t traverse_and_check(mstate m); /* The size of the smallest chunk held in bin with index i */ #define minsize_for_tree_index(i) \ ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \ - (((size_t)((i)&SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1))) + (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1))) /* ------------------------ Operations on bin maps ----------------------- */ @@ -3245,7 +3245,7 @@ static size_t traverse_and_check(mstate m); #else /* FOOTERS */ - /* Set foot of inuse chunk to be xor of mstate and seed */ + /* Set foot of inuse chunk to be xor of mstate and seed */ #define mark_inuse_foot(M, p, s) \ (((mchunkptr)((char *)(p) + (s)))->prev_foot = \ ((size_t)(M) ^ mparams.magic)) diff --git a/qemu_mode/libqasan/malloc.c b/qemu_mode/libqasan/malloc.c index d2db3856..4448f480 100644 --- a/qemu_mode/libqasan/malloc.c +++ b/qemu_mode/libqasan/malloc.c @@ -80,8 +80,8 @@ static unsigned char __tmp_alloc_zone[TMP_ZONE_SIZE]; #else // From dlmalloc.c -void *dlmalloc(size_t); -void dlfree(void *); +void *dlmalloc(size_t); +void dlfree(void *); #define backend_malloc dlmalloc #define backend_free dlfree diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index 2003be1f..b2306996 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -577,13 +577,13 @@ u8 fuzz_one_original(afl_state_t *afl) { * SIMPLE BITFLIP (+dictionary construction) * *********************************************/ -#define FLIP_BIT(_ar, _b) \ - do { \ - \ - u8 *_arf = (u8 *)(_ar); \ - u32 _bf = (_b); \ - _arf[(_bf) >> 3] ^= (128 >> ((_bf)&7)); \ - \ +#define FLIP_BIT(_ar, _b) \ + do { \ + \ + u8 *_arf = (u8 *)(_ar); \ + u32 _bf = (_b); \ + _arf[(_bf) >> 3] ^= (128 >> ((_bf) & 7)); \ + \ } while (0) /* Single walking bit. */ @@ -2216,7 +2216,7 @@ havoc_stage: } - retry_havoc_step : { + retry_havoc_step: { u32 r = rand_below(afl, rand_max), item; @@ -3703,13 +3703,13 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { * SIMPLE BITFLIP (+dictionary construction) * *********************************************/ -#define FLIP_BIT(_ar, _b) \ - do { \ - \ - u8 *_arf = (u8 *)(_ar); \ - u32 _bf = (_b); \ - _arf[(_bf) >> 3] ^= (128 >> ((_bf)&7)); \ - \ +#define FLIP_BIT(_ar, _b) \ + do { \ + \ + u8 *_arf = (u8 *)(_ar); \ + u32 _bf = (_b); \ + _arf[(_bf) >> 3] ^= (128 >> ((_bf) & 7)); \ + \ } while (0) /* Single walking bit. */ diff --git a/src/afl-fuzz-redqueen.c b/src/afl-fuzz-redqueen.c index 43b5c8bd..86e7f1cf 100644 --- a/src/afl-fuzz-redqueen.c +++ b/src/afl-fuzz-redqueen.c @@ -1828,7 +1828,7 @@ static void try_to_add_to_dictN(afl_state_t *afl, u128 v, u8 size) { for (k = 0; k < size; ++k) { #else - u32 off = 16 - size; + u32 off = 16 - size; for (k = 16 - size; k < 16; ++k) { #endif diff --git a/utils/libtokencap/libtokencap.so.c b/utils/libtokencap/libtokencap.so.c index b21f3068..f4024799 100644 --- a/utils/libtokencap/libtokencap.so.c +++ b/utils/libtokencap/libtokencap.so.c @@ -55,7 +55,7 @@ #elif defined __HAIKU__ #include <kernel/image.h> #elif defined __sun - /* For map addresses the old struct is enough */ +/* For map addresses the old struct is enough */ #include <sys/procfs.h> #include <limits.h> #endif @@ -168,7 +168,7 @@ static void __tokencap_load_mappings(void) { #elif defined __FreeBSD__ || defined __OpenBSD__ || defined __NetBSD__ #if defined __FreeBSD__ - int mib[] = {CTL_KERN, KERN_PROC, KERN_PROC_VMMAP, __tokencap_pid}; + int mib[] = {CTL_KERN, KERN_PROC, KERN_PROC_VMMAP, __tokencap_pid}; #elif defined __OpenBSD__ int mib[] = {CTL_KERN, KERN_PROC_VMMAP, __tokencap_pid}; #elif defined __NetBSD__ @@ -209,7 +209,7 @@ static void __tokencap_load_mappings(void) { #if defined __FreeBSD__ || defined __NetBSD__ #if defined __FreeBSD__ - size_t size = region->kve_structsize; + size_t size = region->kve_structsize; if (size == 0) break; #elif defined __NetBSD__ |