From c25a602a0370f484e32adbf186290d2504cf3f12 Mon Sep 17 00:00:00 2001 From: van Hauser Date: Sun, 28 Jun 2020 23:47:57 +0200 Subject: less problematic definitions --- include/hash.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/hash.h') diff --git a/include/hash.h b/include/hash.h index 6910e0e2..9d42e44b 100644 --- a/include/hash.h +++ b/include/hash.h @@ -30,8 +30,8 @@ #include "types.h" -u32 hash32(const void *key, u32 len, u32 seed); -u64 hash64(const void *key, u32 len, u64 seed); +u32 hash32(u8 *key, u32 len, u32 seed); +u64 hash64(u8 *key, u32 len, u64 seed); #if 0 -- cgit 1.4.1 From 6d0f086d9cb1b8eedbeb4c9654f9e44870460e8d Mon Sep 17 00:00:00 2001 From: van Hauser Date: Sun, 28 Jun 2020 23:50:25 +0200 Subject: less problematic definitions --- include/hash.h | 2 +- src/afl-performance.c | 6 +++++- 2 files changed, 6 insertions(+), 2 deletions(-) (limited to 'include/hash.h') diff --git a/include/hash.h b/include/hash.h index 9d42e44b..9319ab95 100644 --- a/include/hash.h +++ b/include/hash.h @@ -41,7 +41,7 @@ The following code is disabled because xxh3 is 30% faster #define ROL64(_x, _r) ((((u64)(_x)) << (_r)) | (((u64)(_x)) >> (64 - (_r)))) -static inline u32 hash32(const void *key, u32 len, u32 seed) { +static inline u32 hash32(u8 *key, u32 len, u32 seed) { const u64 *data = (u64 *)key; u64 h1 = seed ^ len; diff --git a/src/afl-performance.c b/src/afl-performance.c index b3d30cbd..0832dc39 100644 --- a/src/afl-performance.c +++ b/src/afl-performance.c @@ -141,7 +141,11 @@ void long_jump(afl_state_t *afl) { /* we switch from afl's murmur implementation to xxh3 as it is 30% faster - and get 64 bit hashes instead of just 32 bit. Less collisions! :-) */ -u32 inline hash32(void *key, u32 len, u32 seed) { +#ifdef _DEBUG +u32 hash32(u8 *key, u32 len, u32 seed) { +#else +u32 inline hash32(u8 *key, u32 len, u32 seed) { +#endif return (u32)XXH64(key, len, seed); -- cgit 1.4.1